Merge tag 'powerpc-4.5-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 15 Jan 2016 21:18:47 +0000 (13:18 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 15 Jan 2016 21:18:47 +0000 (13:18 -0800)
Pull powerpc updates from Michael Ellerman:
 "Core:
   - Ground work for the new Power9 MMU from Aneesh Kumar K.V
   - Optimise FP/VMX/VSX context switching from Anton Blanchard

  Misc:
   - Various cleanups from Krzysztof Kozlowski, John Ogness, Rashmica
     Gupta, Russell Currey, Gavin Shan, Daniel Axtens, Michael Neuling,
     Andrew Donnellan
   - Allow wrapper to work on non-english system from Laurent Vivier
   - Add rN aliases to the pt_regs_offset table from Rashmica Gupta
   - Fix module autoload for rackmeter & axonram drivers from Luis de
     Bethencourt
   - Include KVM guest test in all interrupt vectors from Paul Mackerras
   - Fix DSCR inheritance over fork() from Anton Blanchard
   - Make value-returning atomics & {cmp}xchg* & their atomic_ versions
     fully ordered from Boqun Feng
   - Print MSR TM bits in oops messages from Michael Neuling
   - Add TM signal return & invalid stack selftests from Michael Neuling
   - Limit EPOW reset event warnings from Vipin K Parashar
   - Remove the Cell QPACE code from Rashmica Gupta
   - Append linux_banner to exception information in xmon from Rashmica
     Gupta
   - Add selftest to check if VSRs are corrupted from Rashmica Gupta
   - Remove broken GregorianDay() from Daniel Axtens
   - Import Anton's context_switch2 benchmark into selftests from
     Michael Ellerman
   - Add selftest script to test HMI functionality from Daniel Axtens
   - Remove obsolete OPAL v2 support from Stewart Smith
   - Make enter_rtas() private from Michael Ellerman
   - PPR exception cleanups from Michael Ellerman
   - Add page soft dirty tracking from Laurent Dufour
   - Add support for Nvlink NPUs from Alistair Popple
   - Add support for kexec on 476fpe from Alistair Popple
   - Enable kernel CPU dlpar from sysfs from Nathan Fontenot
   - Copy only required pieces of the mm_context_t to the paca from
     Michael Neuling
   - Add a kmsg_dumper that flushes OPAL console output on panic from
     Russell Currey
   - Implement save_stack_trace_regs() to enable kprobe stack tracing
     from Steven Rostedt
   - Add HWCAP bits for Power9 from Michael Ellerman
   - Fix _PAGE_PTE breaking swapoff from Aneesh Kumar K.V
   - Fix _PAGE_SWP_SOFT_DIRTY breaking swapoff from Hugh Dickins
   - scripts/recordmcount.pl: support data in text section on powerpc
     from Ulrich Weigand
   - Handle R_PPC64_ENTRY relocations in modules from Ulrich Weigand

  cxl:
   - cxl: Fix possible idr warning when contexts are released from
     Vaibhav Jain
   - cxl: use correct operator when writing pcie config space values
     from Andrew Donnellan
   - cxl: Fix DSI misses when the context owning task exits from Vaibhav
     Jain
   - cxl: fix build for GCC 4.6.x from Brian Norris
   - cxl: use -Werror only with CONFIG_PPC_WERROR from Brian Norris
   - cxl: Enable PCI device ID for future IBM CXL adapter from Uma
     Krishnan

  Freescale:
   - Freescale updates from Scott: Highlights include moving QE code out
     of arch/powerpc (to be shared with arm), device tree updates, and
     minor fixes"

* tag 'powerpc-4.5-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (149 commits)
  powerpc/module: Handle R_PPC64_ENTRY relocations
  scripts/recordmcount.pl: support data in text section on powerpc
  powerpc/powernv: Fix OPAL_CONSOLE_FLUSH prototype and usages
  powerpc/mm: fix _PAGE_SWP_SOFT_DIRTY breaking swapoff
  powerpc/mm: Fix _PAGE_PTE breaking swapoff
  cxl: Enable PCI device ID for future IBM CXL adapter
  cxl: use -Werror only with CONFIG_PPC_WERROR
  cxl: fix build for GCC 4.6.x
  powerpc: Add HWCAP bits for Power9
  powerpc/powernv: Reserve PE#0 on NPU
  powerpc/powernv: Change NPU PE# assignment
  powerpc/powernv: Fix update of NVLink DMA mask
  powerpc/powernv: Remove misleading comment in pci.c
  powerpc: Implement save_stack_trace_regs() to enable kprobe stack tracing
  powerpc: Fix build break due to paca mm_context_t changes
  cxl: Fix DSI misses when the context owning task exits
  MAINTAINERS: Update Scott Wood's e-mail address
  powerpc/powernv: Fix minor off-by-one error in opal_mce_check_early_recovery()
  powerpc: Fix style of self-test config prompts
  powerpc/powernv: Only delay opal_rtc_read() retry when necessary
  ...

278 files changed:
Documentation/devicetree/bindings/serial/8250.txt
Documentation/devicetree/bindings/thermal/qoriq-thermal.txt [new file with mode: 0644]
Documentation/kernel-parameters.txt
MAINTAINERS
arch/powerpc/Kconfig
arch/powerpc/Kconfig.debug
arch/powerpc/boot/Makefile
arch/powerpc/boot/dts/fsl/b4si-post.dtsi
arch/powerpc/boot/dts/fsl/bsc9132qds.dts
arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi
arch/powerpc/boot/dts/fsl/bsc9132si-pre.dtsi
arch/powerpc/boot/dts/fsl/p1010rdb.dtsi
arch/powerpc/boot/dts/fsl/t1023rdb.dts
arch/powerpc/boot/dts/fsl/t1023si-post.dtsi
arch/powerpc/boot/dts/fsl/t1024qds.dts
arch/powerpc/boot/dts/fsl/t1024rdb.dts
arch/powerpc/boot/dts/fsl/t1024si-post.dtsi
arch/powerpc/boot/dts/fsl/t102xsi-pre.dtsi
arch/powerpc/boot/dts/fsl/t1040d4rdb.dts
arch/powerpc/boot/dts/fsl/t1040qds.dts
arch/powerpc/boot/dts/fsl/t1040rdb.dts
arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
arch/powerpc/boot/dts/fsl/t1042d4rdb.dts
arch/powerpc/boot/dts/fsl/t1042qds.dts
arch/powerpc/boot/dts/fsl/t1042rdb.dts
arch/powerpc/boot/dts/fsl/t1042rdb_pi.dts
arch/powerpc/boot/dts/fsl/t1042si-post.dtsi
arch/powerpc/boot/dts/fsl/t104xsi-pre.dtsi
arch/powerpc/boot/wrapper
arch/powerpc/configs/mpc85xx_basic_defconfig
arch/powerpc/configs/ppc64_defconfig
arch/powerpc/crypto/aes-spe-glue.c
arch/powerpc/crypto/sha1-spe-glue.c
arch/powerpc/crypto/sha256-spe-glue.c
arch/powerpc/include/asm/book3s/32/hash.h [new file with mode: 0644]
arch/powerpc/include/asm/book3s/32/pgtable.h [new file with mode: 0644]
arch/powerpc/include/asm/book3s/64/hash-4k.h [new file with mode: 0644]
arch/powerpc/include/asm/book3s/64/hash-64k.h [new file with mode: 0644]
arch/powerpc/include/asm/book3s/64/hash.h [new file with mode: 0644]
arch/powerpc/include/asm/book3s/64/pgtable.h [new file with mode: 0644]
arch/powerpc/include/asm/book3s/pgtable.h [new file with mode: 0644]
arch/powerpc/include/asm/cmpxchg.h
arch/powerpc/include/asm/cpm.h
arch/powerpc/include/asm/exception-64s.h
arch/powerpc/include/asm/firmware.h
arch/powerpc/include/asm/immap_qe.h [deleted file]
arch/powerpc/include/asm/io.h
arch/powerpc/include/asm/mmu-hash64.h
arch/powerpc/include/asm/nohash/32/pgtable.h [new file with mode: 0644]
arch/powerpc/include/asm/nohash/32/pte-40x.h [new file with mode: 0644]
arch/powerpc/include/asm/nohash/32/pte-44x.h [new file with mode: 0644]
arch/powerpc/include/asm/nohash/32/pte-8xx.h [new file with mode: 0644]
arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h [new file with mode: 0644]
arch/powerpc/include/asm/nohash/64/pgtable-4k.h [new file with mode: 0644]
arch/powerpc/include/asm/nohash/64/pgtable-64k.h [new file with mode: 0644]
arch/powerpc/include/asm/nohash/64/pgtable.h [new file with mode: 0644]
arch/powerpc/include/asm/nohash/pgtable.h [new file with mode: 0644]
arch/powerpc/include/asm/nohash/pte-book3e.h [new file with mode: 0644]
arch/powerpc/include/asm/opal-api.h
arch/powerpc/include/asm/opal.h
arch/powerpc/include/asm/paca.h
arch/powerpc/include/asm/page.h
arch/powerpc/include/asm/pci-bridge.h
arch/powerpc/include/asm/pci.h
arch/powerpc/include/asm/pgalloc-32.h
arch/powerpc/include/asm/pgalloc-64.h
arch/powerpc/include/asm/pgtable-ppc32.h [deleted file]
arch/powerpc/include/asm/pgtable-ppc64-4k.h [deleted file]
arch/powerpc/include/asm/pgtable-ppc64-64k.h [deleted file]
arch/powerpc/include/asm/pgtable-ppc64.h [deleted file]
arch/powerpc/include/asm/pgtable.h
arch/powerpc/include/asm/plpar_wrappers.h
arch/powerpc/include/asm/ppc_asm.h
arch/powerpc/include/asm/processor.h
arch/powerpc/include/asm/pte-40x.h [deleted file]
arch/powerpc/include/asm/pte-44x.h [deleted file]
arch/powerpc/include/asm/pte-8xx.h [deleted file]
arch/powerpc/include/asm/pte-book3e.h [deleted file]
arch/powerpc/include/asm/pte-common.h
arch/powerpc/include/asm/pte-fsl-booke.h [deleted file]
arch/powerpc/include/asm/pte-hash32.h [deleted file]
arch/powerpc/include/asm/pte-hash64-4k.h [deleted file]
arch/powerpc/include/asm/pte-hash64-64k.h [deleted file]
arch/powerpc/include/asm/pte-hash64.h [deleted file]
arch/powerpc/include/asm/qe.h [deleted file]
arch/powerpc/include/asm/qe_ic.h [deleted file]
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/rtas.h
arch/powerpc/include/asm/switch_to.h
arch/powerpc/include/asm/synch.h
arch/powerpc/include/asm/time.h
arch/powerpc/include/asm/ucc.h [deleted file]
arch/powerpc/include/asm/ucc_fast.h [deleted file]
arch/powerpc/include/asm/ucc_slow.h [deleted file]
arch/powerpc/include/asm/unistd.h
arch/powerpc/include/asm/vdso_datapage.h
arch/powerpc/include/uapi/asm/cputable.h
arch/powerpc/include/uapi/asm/elf.h
arch/powerpc/kernel/align.c
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/fpu.S
arch/powerpc/kernel/head_fsl_booke.S
arch/powerpc/kernel/idle_power7.S
arch/powerpc/kernel/misc_32.S
arch/powerpc/kernel/module_64.c
arch/powerpc/kernel/ppc_ksyms.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/ptrace.c
arch/powerpc/kernel/rtas.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/stacktrace.c
arch/powerpc/kernel/swsusp.c
arch/powerpc/kernel/systbl_chk.c
arch/powerpc/kernel/systbl_chk.sh
arch/powerpc/kernel/time.c
arch/powerpc/kernel/traps.c
arch/powerpc/kernel/vdso.c
arch/powerpc/kernel/vdso32/datapage.S
arch/powerpc/kernel/vdso64/datapage.S
arch/powerpc/kernel/vector.S
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_paired_singles.c
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/booke.c
arch/powerpc/lib/vmx-helper.c
arch/powerpc/lib/xor_vmx.c
arch/powerpc/mm/40x_mmu.c
arch/powerpc/mm/Makefile
arch/powerpc/mm/hash64_4k.c [new file with mode: 0644]
arch/powerpc/mm/hash64_64k.c [new file with mode: 0644]
arch/powerpc/mm/hash_low_64.S [deleted file]
arch/powerpc/mm/hash_native_64.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/hugepage-hash64.c
arch/powerpc/mm/hugetlbpage-book3e.c
arch/powerpc/mm/hugetlbpage-hash64.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/init_64.c
arch/powerpc/mm/pgtable.c
arch/powerpc/mm/pgtable_64.c
arch/powerpc/mm/slb.c
arch/powerpc/mm/slice.c
arch/powerpc/platforms/83xx/km83xx.c
arch/powerpc/platforms/83xx/misc.c
arch/powerpc/platforms/83xx/mpc832x_mds.c
arch/powerpc/platforms/83xx/mpc832x_rdb.c
arch/powerpc/platforms/83xx/mpc836x_mds.c
arch/powerpc/platforms/83xx/mpc836x_rdk.c
arch/powerpc/platforms/85xx/bsc913x_qds.c
arch/powerpc/platforms/85xx/common.c
arch/powerpc/platforms/85xx/corenet_generic.c
arch/powerpc/platforms/85xx/mpc85xx_ads.c
arch/powerpc/platforms/85xx/mpc85xx_mds.c
arch/powerpc/platforms/85xx/mpc85xx_rdb.c
arch/powerpc/platforms/85xx/twr_p102x.c
arch/powerpc/platforms/Kconfig
arch/powerpc/platforms/cell/Kconfig
arch/powerpc/platforms/cell/Makefile
arch/powerpc/platforms/cell/qpace_setup.c [deleted file]
arch/powerpc/platforms/cell/spufs/run.c
arch/powerpc/platforms/maple/time.c
arch/powerpc/platforms/powermac/bootx_init.c
arch/powerpc/platforms/powermac/pic.c
arch/powerpc/platforms/powernv/Makefile
arch/powerpc/platforms/powernv/eeh-powernv.c
arch/powerpc/platforms/powernv/idle.c
arch/powerpc/platforms/powernv/npu-dma.c [new file with mode: 0644]
arch/powerpc/platforms/powernv/opal-kmsg.c [new file with mode: 0644]
arch/powerpc/platforms/powernv/opal-prd.c
arch/powerpc/platforms/powernv/opal-rtc.c
arch/powerpc/platforms/powernv/opal-wrappers.S
arch/powerpc/platforms/powernv/opal-xscom.c
arch/powerpc/platforms/powernv/opal.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/powernv/pci.c
arch/powerpc/platforms/powernv/pci.h
arch/powerpc/platforms/powernv/setup.c
arch/powerpc/platforms/powernv/smp.c
arch/powerpc/platforms/pseries/dlpar.c
arch/powerpc/platforms/pseries/hotplug-cpu.c
arch/powerpc/platforms/pseries/lpar.c
arch/powerpc/platforms/pseries/pseries.h
arch/powerpc/platforms/pseries/ras.c
arch/powerpc/sysdev/Makefile
arch/powerpc/sysdev/axonram.c
arch/powerpc/sysdev/cpm_common.c
arch/powerpc/sysdev/fsl_lbc.c
arch/powerpc/sysdev/fsl_pci.c
arch/powerpc/sysdev/qe_lib/Kconfig [deleted file]
arch/powerpc/sysdev/qe_lib/Makefile [deleted file]
arch/powerpc/sysdev/qe_lib/gpio.c [deleted file]
arch/powerpc/sysdev/qe_lib/qe.c [deleted file]
arch/powerpc/sysdev/qe_lib/qe_ic.c [deleted file]
arch/powerpc/sysdev/qe_lib/qe_ic.h [deleted file]
arch/powerpc/sysdev/qe_lib/qe_io.c [deleted file]
arch/powerpc/sysdev/qe_lib/ucc.c [deleted file]
arch/powerpc/sysdev/qe_lib/ucc_fast.c [deleted file]
arch/powerpc/sysdev/qe_lib/ucc_slow.c [deleted file]
arch/powerpc/sysdev/qe_lib/usb.c [deleted file]
arch/powerpc/xmon/xmon.c
drivers/cpufreq/powernv-cpufreq.c
drivers/cpuidle/cpuidle-powernv.c
drivers/crypto/vmx/aes.c
drivers/crypto/vmx/aes_cbc.c
drivers/crypto/vmx/aes_ctr.c
drivers/crypto/vmx/ghash.c
drivers/macintosh/rack-meter.c
drivers/macintosh/via-pmu.c
drivers/misc/cxl/Makefile
drivers/misc/cxl/api.c
drivers/misc/cxl/context.c
drivers/misc/cxl/cxl.h
drivers/misc/cxl/fault.c
drivers/misc/cxl/file.c
drivers/misc/cxl/pci.c
drivers/misc/cxl/vphb.c
drivers/net/ethernet/freescale/fsl_pq_mdio.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/freescale/ucc_geth.h
drivers/rtc/rtc-opal.c
drivers/soc/Kconfig
drivers/soc/Makefile
drivers/soc/fsl/Makefile [new file with mode: 0644]
drivers/soc/fsl/qe/Kconfig [new file with mode: 0644]
drivers/soc/fsl/qe/Makefile [new file with mode: 0644]
drivers/soc/fsl/qe/gpio.c [new file with mode: 0644]
drivers/soc/fsl/qe/qe.c [new file with mode: 0644]
drivers/soc/fsl/qe/qe_common.c [new file with mode: 0644]
drivers/soc/fsl/qe/qe_ic.c [new file with mode: 0644]
drivers/soc/fsl/qe/qe_ic.h [new file with mode: 0644]
drivers/soc/fsl/qe/qe_io.c [new file with mode: 0644]
drivers/soc/fsl/qe/ucc.c [new file with mode: 0644]
drivers/soc/fsl/qe/ucc_fast.c [new file with mode: 0644]
drivers/soc/fsl/qe/ucc_slow.c [new file with mode: 0644]
drivers/soc/fsl/qe/usb.c [new file with mode: 0644]
drivers/spi/spi-fsl-cpm.c
drivers/tty/serial/ucc_uart.c
drivers/usb/gadget/udc/fsl_qe_udc.c
drivers/usb/host/fhci-hcd.c
drivers/usb/host/fhci-hub.c
drivers/usb/host/fhci-sched.c
drivers/usb/host/fhci.h
include/linux/genalloc.h
include/soc/fsl/qe/immap_qe.h [new file with mode: 0644]
include/soc/fsl/qe/qe.h [new file with mode: 0644]
include/soc/fsl/qe/qe_ic.h [new file with mode: 0644]
include/soc/fsl/qe/ucc.h [new file with mode: 0644]
include/soc/fsl/qe/ucc_fast.h [new file with mode: 0644]
include/soc/fsl/qe/ucc_slow.h [new file with mode: 0644]
lib/genalloc.c
lib/raid6/altivec.uc
scripts/recordmcount.pl
tools/testing/selftests/powerpc/benchmarks/.gitignore
tools/testing/selftests/powerpc/benchmarks/Makefile
tools/testing/selftests/powerpc/benchmarks/context_switch.c [new file with mode: 0644]
tools/testing/selftests/powerpc/dscr/dscr_inherit_exec_test.c
tools/testing/selftests/powerpc/dscr/dscr_inherit_test.c
tools/testing/selftests/powerpc/harness.c
tools/testing/selftests/powerpc/pmu/Makefile
tools/testing/selftests/powerpc/pmu/ebb/Makefile
tools/testing/selftests/powerpc/pmu/ebb/ebb.c
tools/testing/selftests/powerpc/pmu/lib.c
tools/testing/selftests/powerpc/pmu/lib.h
tools/testing/selftests/powerpc/scripts/hmi.sh [new file with mode: 0755]
tools/testing/selftests/powerpc/tm/.gitignore
tools/testing/selftests/powerpc/tm/Makefile
tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
tools/testing/selftests/powerpc/tm/tm-signal-msr-resv.c [new file with mode: 0644]
tools/testing/selftests/powerpc/tm/tm-signal-stack.c [new file with mode: 0644]
tools/testing/selftests/powerpc/tm/tm-syscall.c
tools/testing/selftests/powerpc/tm/tm-vmxcopy.c [new file with mode: 0644]
tools/testing/selftests/powerpc/tm/tm.h [new file with mode: 0644]
tools/testing/selftests/powerpc/utils.c [new file with mode: 0644]
tools/testing/selftests/powerpc/utils.h

index 91d5ab0..936ab5b 100644 (file)
@@ -14,7 +14,6 @@ Required properties:
          tegra132, or tegra210.
        - "nxp,lpc3220-uart"
        - "ralink,rt2880-uart"
-       - "ibm,qpace-nwp-serial"
        - "altr,16550-FIFO32"
        - "altr,16550-FIFO64"
        - "altr,16550-FIFO128"
diff --git a/Documentation/devicetree/bindings/thermal/qoriq-thermal.txt b/Documentation/devicetree/bindings/thermal/qoriq-thermal.txt
new file mode 100644 (file)
index 0000000..66223d5
--- /dev/null
@@ -0,0 +1,63 @@
+* Thermal Monitoring Unit (TMU) on Freescale QorIQ SoCs
+
+Required properties:
+- compatible : Must include "fsl,qoriq-tmu". The version of the device is
+       determined by the TMU IP Block Revision Register (IPBRR0) at
+       offset 0x0BF8.
+       Table of correspondences between IPBRR0 values and example  chips:
+               Value           Device
+               ----------      -----
+               0x01900102      T1040
+- reg : Address range of TMU registers.
+- interrupts : Contains the interrupt for TMU.
+- fsl,tmu-range : The values to be programmed into TTRnCR, as specified by
+       the SoC reference manual. The first cell is TTR0CR, the second is
+       TTR1CR, etc.
+- fsl,tmu-calibration : A list of cell pairs containing temperature
+       calibration data, as specified by the SoC reference manual.
+       The first cell of each pair is the value to be written to TTCFGR,
+       and the second is the value to be written to TSCFGR.
+
+Example:
+
+tmu@f0000 {
+       compatible = "fsl,qoriq-tmu";
+       reg = <0xf0000 0x1000>;
+       interrupts = <18 2 0 0>;
+       fsl,tmu-range = <0x000a0000 0x00090026 0x0008004a 0x0001006a>;
+       fsl,tmu-calibration = <0x00000000 0x00000025
+                              0x00000001 0x00000028
+                              0x00000002 0x0000002d
+                              0x00000003 0x00000031
+                              0x00000004 0x00000036
+                              0x00000005 0x0000003a
+                              0x00000006 0x00000040
+                              0x00000007 0x00000044
+                              0x00000008 0x0000004a
+                              0x00000009 0x0000004f
+                              0x0000000a 0x00000054
+
+                              0x00010000 0x0000000d
+                              0x00010001 0x00000013
+                              0x00010002 0x00000019
+                              0x00010003 0x0000001f
+                              0x00010004 0x00000025
+                              0x00010005 0x0000002d
+                              0x00010006 0x00000033
+                              0x00010007 0x00000043
+                              0x00010008 0x0000004b
+                              0x00010009 0x00000053
+
+                              0x00020000 0x00000010
+                              0x00020001 0x00000017
+                              0x00020002 0x0000001f
+                              0x00020003 0x00000029
+                              0x00020004 0x00000031
+                              0x00020005 0x0000003c
+                              0x00020006 0x00000042
+                              0x00020007 0x0000004d
+                              0x00020008 0x00000056
+
+                              0x00030000 0x00000012
+                              0x00030001 0x0000001d>;
+};
index 168fd79..5a6235e 100644 (file)
@@ -2993,6 +2993,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        may be specified.
                        Format: <port>,<port>....
 
+       ppc_strict_facility_enable
+                       [PPC] This option catches any kernel floating point,
+                       Altivec, VSX and SPE outside of regions specifically
+                       allowed (eg kernel_enable_fpu()/kernel_disable_fpu()).
+                       There is some performance impact when enabling this.
+
        print-fatal-signals=
                        [KNL] debug: print fatal signals
 
index d14baa1..04d62b1 100644 (file)
@@ -4490,8 +4490,9 @@ F:        include/linux/fs_enet_pd.h
 FREESCALE QUICC ENGINE LIBRARY
 L:     linuxppc-dev@lists.ozlabs.org
 S:     Orphan
-F:     arch/powerpc/sysdev/qe_lib/
-F:     arch/powerpc/include/asm/*qe.h
+F:     drivers/soc/fsl/qe/
+F:     include/soc/fsl/*qe*.h
+F:     include/soc/fsl/*ucc*.h
 
 FREESCALE USB PERIPHERAL DRIVERS
 M:     Li Yang <leoli@freescale.com>
@@ -6444,7 +6445,7 @@ S:        Maintained
 F:     arch/powerpc/platforms/8xx/
 
 LINUX FOR POWERPC EMBEDDED PPC83XX AND PPC85XX
-M:     Scott Wood <scottwood@freescale.com>
+M:     Scott Wood <oss@buserror.net>
 M:     Kumar Gala <galak@kernel.crashing.org>
 W:     http://www.penguinppc.org/
 L:     linuxppc-dev@lists.ozlabs.org
index 85eabc4..7d5a835 100644 (file)
@@ -560,6 +560,7 @@ choice
 
 config PPC_4K_PAGES
        bool "4k page size"
+       select HAVE_ARCH_SOFT_DIRTY if CHECKPOINT_RESTORE && PPC_BOOK3S
 
 config PPC_16K_PAGES
        bool "16k page size"
@@ -568,6 +569,7 @@ config PPC_16K_PAGES
 config PPC_64K_PAGES
        bool "64k page size"
        depends on !PPC_FSL_BOOK3E && (44x || PPC_STD_MMU_64 || PPC_BOOK3E_64)
+       select HAVE_ARCH_SOFT_DIRTY if CHECKPOINT_RESTORE && PPC_BOOK3S
 
 config PPC_256K_PAGES
        bool "256k page size"
@@ -1075,8 +1077,6 @@ source "drivers/Kconfig"
 
 source "fs/Kconfig"
 
-source "arch/powerpc/sysdev/qe_lib/Kconfig"
-
 source "lib/Kconfig"
 
 source "arch/powerpc/Kconfig.debug"
index a0e44a9..638f9ce 100644 (file)
@@ -64,17 +64,17 @@ config PPC_EMULATED_STATS
          emulated.
 
 config CODE_PATCHING_SELFTEST
-       bool "Run self-tests of the code-patching code."
+       bool "Run self-tests of the code-patching code"
        depends on DEBUG_KERNEL
        default n
 
 config FTR_FIXUP_SELFTEST
-       bool "Run self-tests of the feature-fixup code."
+       bool "Run self-tests of the feature-fixup code"
        depends on DEBUG_KERNEL
        default n
 
 config MSI_BITMAP_SELFTEST
-       bool "Run self-tests of the MSI bitmap code."
+       bool "Run self-tests of the MSI bitmap code"
        depends on DEBUG_KERNEL
        default n
 
index 99e4487..6116510 100644 (file)
@@ -113,7 +113,6 @@ src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c
 src-plat-$(CONFIG_PPC_PSERIES) += pseries-head.S
 src-plat-$(CONFIG_PPC_POWERNV) += pseries-head.S
 src-plat-$(CONFIG_PPC_IBM_CELL_BLADE) += pseries-head.S
-src-plat-$(CONFIG_PPC_CELL_QPACE) += pseries-head.S
 
 src-wlib := $(sort $(src-wlib-y))
 src-plat := $(sort $(src-plat-y))
@@ -217,7 +216,6 @@ image-$(CONFIG_PPC_POWERNV)         += zImage.pseries
 image-$(CONFIG_PPC_MAPLE)              += zImage.maple
 image-$(CONFIG_PPC_IBM_CELL_BLADE)     += zImage.pseries
 image-$(CONFIG_PPC_PS3)                        += dtbImage.ps3
-image-$(CONFIG_PPC_CELL_QPACE)         += zImage.pseries
 image-$(CONFIG_PPC_CHRP)               += zImage.chrp
 image-$(CONFIG_PPC_EFIKA)              += zImage.chrp
 image-$(CONFIG_PPC_PMAC)               += zImage.pmac
index 74866ac..1b33f51 100644 (file)
        fman@400000 {
                interrupts = <96 2 0 0>, <16 2 1 30>;
 
+               muram@0 {
+                       compatible = "fsl,fman-muram";
+                       reg = <0x0 0x80000>;
+               };
+
                enet0: ethernet@e0000 {
                };
 
index 70882ad..56e6f13 100644 (file)
        soc: soc@ff700000 {
                ranges = <0x0 0x0 0xff700000 0x100000>;
        };
+
+       pci0: pcie@ff70a000 {
+               reg = <0 0xff70a000 0 0x1000>;
+               ranges = <0x2000000 0x0 0x90000000 0 0x90000000 0x0 0x20000000
+                         0x1000000 0x0 0x00000000 0 0xc0010000 0x0 0x10000>;
+               pcie@0 {
+                       ranges = <0x2000000 0x0 0x90000000
+                                 0x2000000 0x0 0x90000000
+                                 0x0 0x20000000
+
+                                 0x1000000 0x0 0x0
+                                 0x1000000 0x0 0x0
+                                 0x0 0x100000>;
+               };
+       };
 };
 
 /include/ "bsc9132qds.dtsi"
index c723071..b5f0715 100644 (file)
        interrupts = <16 2 0 0 20 2 0 0>;
 };
 
+/* controller at 0xa000 */
+&pci0 {
+       compatible = "fsl,bsc9132-pcie", "fsl,qoriq-pcie-v2.2";
+       device_type = "pci";
+       #size-cells = <2>;
+       #address-cells = <3>;
+       bus-range = <0 255>;
+       interrupts = <16 2 0 0>;
+
+       pcie@0 {
+               reg = <0 0 0 0 0>;
+               #interrupt-cells = <1>;
+               #size-cells = <2>;
+               #address-cells = <3>;
+               device_type = "pci";
+               interrupts = <16 2 0 0>;
+               interrupt-map-mask = <0xf800 0 0 7>;
+
+               interrupt-map = <
+                       /* IDSEL 0x0 */
+                       0000 0x0 0x0 0x1 &mpic 0x0 0x2 0x0 0x0
+                       0000 0x0 0x0 0x2 &mpic 0x1 0x2 0x0 0x0
+                       0000 0x0 0x0 0x3 &mpic 0x2 0x2 0x0 0x0
+                       0000 0x0 0x0 0x4 &mpic 0x3 0x2 0x0 0x0
+                       >;
+       };
+};
+
 &soc {
        #address-cells = <1>;
        #size-cells = <1>;
index 301a9db..90f7949 100644 (file)
@@ -45,6 +45,7 @@
                serial0 = &serial0;
                ethernet0 = &enet0;
                ethernet1 = &enet1;
+               pci0 = &pci0;
        };
 
        cpus {
index 0f0ced6..14b6295 100644 (file)
                phy-connection-type = "sgmii";
        };
 };
+
+&pci0 {
+       pcie@0 {
+               interrupt-map = <
+                       /* IDSEL 0x0 */
+                       /*
+                        *irq[4:5] are active-high
+                        *irq[6:7] are active-low
+                        */
+                       0000 0x0 0x0 0x1 &mpic 0x4 0x2 0x0 0x0
+                       0000 0x0 0x0 0x2 &mpic 0x5 0x2 0x0 0x0
+                       0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0
+                       0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0
+                       >;
+       };
+};
index 2b2fff4..6bd842b 100644 (file)
        };
 };
 
-/include/ "t1023si-post.dtsi"
+#include "t1023si-post.dtsi"
index 518ddaa..99e421d 100644 (file)
@@ -32,6 +32,8 @@
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <dt-bindings/thermal/thermal.h>
+
 &ifc {
        #address-cells = <2>;
        #size-cells = <1>;
                reg = <0xea000 0x4000>;
        };
 
+       tmu: tmu@f0000 {
+               compatible = "fsl,qoriq-tmu";
+               reg = <0xf0000 0x1000>;
+               interrupts = <18 2 0 0>;
+               fsl,tmu-range = <0xb0000 0xa0026 0x80048 0x30061>;
+               fsl,tmu-calibration = <0x00000000 0x0000000f
+                                      0x00000001 0x00000017
+                                      0x00000002 0x0000001e
+                                      0x00000003 0x00000026
+                                      0x00000004 0x0000002e
+                                      0x00000005 0x00000035
+                                      0x00000006 0x0000003d
+                                      0x00000007 0x00000044
+                                      0x00000008 0x0000004c
+                                      0x00000009 0x00000053
+                                      0x0000000a 0x0000005b
+                                      0x0000000b 0x00000064
+
+                                      0x00010000 0x00000011
+                                      0x00010001 0x0000001c
+                                      0x00010002 0x00000024
+                                      0x00010003 0x0000002b
+                                      0x00010004 0x00000034
+                                      0x00010005 0x00000039
+                                      0x00010006 0x00000042
+                                      0x00010007 0x0000004c
+                                      0x00010008 0x00000051
+                                      0x00010009 0x0000005a
+                                      0x0001000a 0x00000063
+
+                                      0x00020000 0x00000013
+                                      0x00020001 0x00000019
+                                      0x00020002 0x00000024
+                                      0x00020003 0x0000002c
+                                      0x00020004 0x00000035
+                                      0x00020005 0x0000003d
+                                      0x00020006 0x00000046
+                                      0x00020007 0x00000050
+                                      0x00020008 0x00000059
+
+                                      0x00030000 0x00000002
+                                      0x00030001 0x0000000d
+                                      0x00030002 0x00000019
+                                      0x00030003 0x00000024>;
+               #thermal-sensor-cells = <0>;
+       };
+
+       thermal-zones {
+               cpu_thermal: cpu-thermal {
+                       polling-delay-passive = <1000>;
+                       polling-delay = <5000>;
+
+                       thermal-sensors = <&tmu>;
+
+                       trips {
+                               cpu_alert: cpu-alert {
+                                       temperature = <85000>;
+                                       hysteresis = <2000>;
+                                       type = "passive";
+                               };
+                               cpu_crit: cpu-crit {
+                                       temperature = <95000>;
+                                       hysteresis = <2000>;
+                                       type = "critical";
+                               };
+                       };
+
+                       cooling-maps {
+                               map0 {
+                                       trip = <&cpu_alert>;
+                                       cooling-device =
+                                               <&cpu0 THERMAL_NO_LIMIT
+                                                       THERMAL_NO_LIMIT>;
+                               };
+                               map1 {
+                                       trip = <&cpu_alert>;
+                                       cooling-device =
+                                               <&cpu1 THERMAL_NO_LIMIT
+                                                       THERMAL_NO_LIMIT>;
+                               };
+                       };
+               };
+       };
+
        scfg: global-utilities@fc000 {
                compatible = "fsl,t1023-scfg";
                reg = <0xfc000 0x1000>;
index 43cd5b5..6a3581b 100644 (file)
        };
 };
 
-/include/ "t1024si-post.dtsi"
+#include "t1024si-post.dtsi"
index 429d8c7..0ccc7d0 100644 (file)
        };
 };
 
-/include/ "t1024si-post.dtsi"
+#include "t1024si-post.dtsi"
index 95e3af8..bb48034 100644 (file)
@@ -32,7 +32,7 @@
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-/include/ "t1023si-post.dtsi"
+#include "t1023si-post.dtsi"
 
 / {
        aliases {
index 3e1528a..9d08a36 100644 (file)
@@ -76,6 +76,7 @@
                        reg = <0>;
                        clocks = <&mux0>;
                        next-level-cache = <&L2_1>;
+                       #cooling-cells = <2>;
                        L2_1: l2-cache {
                                next-level-cache = <&cpc>;
                        };
@@ -85,6 +86,7 @@
                        reg = <1>;
                        clocks = <&mux1>;
                        next-level-cache = <&L2_2>;
+                       #cooling-cells = <2>;
                        L2_2: l2-cache {
                                next-level-cache = <&cpc>;
                        };
index 681746e..fb6bc02 100644 (file)
@@ -43,4 +43,4 @@
        interrupt-parent = <&mpic>;
 };
 
-/include/ "t1040si-post.dtsi"
+#include "t1040si-post.dtsi"
index 4d29865..5f76edc 100644 (file)
@@ -43,4 +43,4 @@
        interrupt-parent = <&mpic>;
 };
 
-/include/ "t1040si-post.dtsi"
+#include "t1040si-post.dtsi"
index 8f9e65b..cf19415 100644 (file)
@@ -45,4 +45,4 @@
        };
 };
 
-/include/ "t1040si-post.dtsi"
+#include "t1040si-post.dtsi"
index d30b3de..e0f4da5 100644 (file)
@@ -32,6 +32,8 @@
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <dt-bindings/thermal/thermal.h>
+
 &bman_fbpr {
        compatible = "fsl,bman-fbpr";
        alloc-ranges = <0 0 0x10000 0>;
                reg        = <0xea000 0x4000>;
        };
 
+       tmu: tmu@f0000 {
+               compatible = "fsl,qoriq-tmu";
+               reg = <0xf0000 0x1000>;
+               interrupts = <18 2 0 0>;
+               fsl,tmu-range = <0xa0000 0x90026 0x8004a 0x1006a>;
+               fsl,tmu-calibration = <0x00000000 0x00000025
+                                      0x00000001 0x00000028
+                                      0x00000002 0x0000002d
+                                      0x00000003 0x00000031
+                                      0x00000004 0x00000036
+                                      0x00000005 0x0000003a
+                                      0x00000006 0x00000040
+                                      0x00000007 0x00000044
+                                      0x00000008 0x0000004a
+                                      0x00000009 0x0000004f
+                                      0x0000000a 0x00000054
+
+                                      0x00010000 0x0000000d
+                                      0x00010001 0x00000013
+                                      0x00010002 0x00000019
+                                      0x00010003 0x0000001f
+                                      0x00010004 0x00000025
+                                      0x00010005 0x0000002d
+                                      0x00010006 0x00000033
+                                      0x00010007 0x00000043
+                                      0x00010008 0x0000004b
+                                      0x00010009 0x00000053
+
+                                      0x00020000 0x00000010
+                                      0x00020001 0x00000017
+                                      0x00020002 0x0000001f
+                                      0x00020003 0x00000029
+                                      0x00020004 0x00000031
+                                      0x00020005 0x0000003c
+                                      0x00020006 0x00000042
+                                      0x00020007 0x0000004d
+                                      0x00020008 0x00000056
+
+                                      0x00030000 0x00000012
+                                      0x00030001 0x0000001d>;
+               #thermal-sensor-cells = <0>;
+       };
+
+       thermal-zones {
+               cpu_thermal: cpu-thermal {
+                       polling-delay-passive = <1000>;
+                       polling-delay = <5000>;
+
+                       thermal-sensors = <&tmu>;
+
+                       trips {
+                               cpu_alert: cpu-alert {
+                                       temperature = <85000>;
+                                       hysteresis = <2000>;
+                                       type = "passive";
+                               };
+                               cpu_crit: cpu-crit {
+                                       temperature = <95000>;
+                                       hysteresis = <2000>;
+                                       type = "critical";
+                               };
+                       };
+
+                       cooling-maps {
+                               map0 {
+                                       trip = <&cpu_alert>;
+                                       cooling-device =
+                                               <&cpu0 THERMAL_NO_LIMIT
+                                                       THERMAL_NO_LIMIT>;
+                               };
+                               map1 {
+                                       trip = <&cpu_alert>;
+                                       cooling-device =
+                                               <&cpu1 THERMAL_NO_LIMIT
+                                                       THERMAL_NO_LIMIT>;
+                               };
+                               map2 {
+                                       trip = <&cpu_alert>;
+                                       cooling-device =
+                                               <&cpu2 THERMAL_NO_LIMIT
+                                                       THERMAL_NO_LIMIT>;
+                               };
+                               map3 {
+                                       trip = <&cpu_alert>;
+                                       cooling-device =
+                                               <&cpu3 THERMAL_NO_LIMIT
+                                                       THERMAL_NO_LIMIT>;
+                               };
+                       };
+               };
+       };
+
        scfg: global-utilities@fc000 {
                compatible = "fsl,t1040-scfg";
                reg = <0xfc000 0x1000>;
index b245b31..2a5a90d 100644 (file)
@@ -50,4 +50,4 @@
        };
 };
 
-/include/ "t1040si-post.dtsi"
+#include "t1042si-post.dtsi"
index 4ab9bbe..90a4a73 100644 (file)
@@ -43,4 +43,4 @@
        interrupt-parent = <&mpic>;
 };
 
-/include/ "t1042si-post.dtsi"
+#include "t1042si-post.dtsi"
index 67af56b..8d908e7 100644 (file)
@@ -45,4 +45,4 @@
        };
 };
 
-/include/ "t1042si-post.dtsi"
+#include "t1042si-post.dtsi"
index 2f67677..98c0010 100644 (file)
@@ -54,4 +54,4 @@
        };
 };
 
-/include/ "t1042si-post.dtsi"
+#include "t1042si-post.dtsi"
index 319b74f..a5544f9 100644 (file)
@@ -32,6 +32,6 @@
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-/include/ "t1040si-post.dtsi"
+#include "t1040si-post.dtsi"
 
 /* Place holder for ethernet related device tree nodes */
index fcfa38a..6db0ee8 100644 (file)
@@ -76,6 +76,7 @@
                        reg = <0>;
                        clocks = <&mux0>;
                        next-level-cache = <&L2_1>;
+                       #cooling-cells = <2>;
                        L2_1: l2-cache {
                                next-level-cache = <&cpc>;
                        };
@@ -85,6 +86,7 @@
                        reg = <1>;
                        clocks = <&mux1>;
                        next-level-cache = <&L2_2>;
+                       #cooling-cells = <2>;
                        L2_2: l2-cache {
                                next-level-cache = <&cpc>;
                        };
@@ -94,6 +96,7 @@
                        reg = <2>;
                        clocks = <&mux2>;
                        next-level-cache = <&L2_3>;
+                       #cooling-cells = <2>;
                        L2_3: l2-cache {
                                next-level-cache = <&cpc>;
                        };
                        reg = <3>;
                        clocks = <&mux3>;
                        next-level-cache = <&L2_4>;
+                       #cooling-cells = <2>;
                        L2_4: l2-cache {
                                next-level-cache = <&cpc>;
                        };
index ceaa75d..6a19fce 100755 (executable)
@@ -154,7 +154,7 @@ if [ -z "$kernel" ]; then
     kernel=vmlinux
 fi
 
-elfformat="`${CROSS}objdump -p "$kernel" | grep 'file format' | awk '{print $4}'`"
+LANG=C elfformat="`${CROSS}objdump -p "$kernel" | grep 'file format' | awk '{print $4}'`"
 case "$elfformat" in
     elf64-powerpcle)   format=elf64lppc        ;;
     elf64-powerpc)     format=elf32ppc ;;
index 850bd19..b1593fe 100644 (file)
@@ -12,6 +12,7 @@ CONFIG_P1010_RDB=y
 CONFIG_P1022_DS=y
 CONFIG_P1022_RDK=y
 CONFIG_P1023_RDB=y
+CONFIG_TWR_P102x=y
 CONFIG_SBC8548=y
 CONFIG_SOCRATES=y
 CONFIG_STX_GP3=y
index 2c041b5..b041fb6 100644 (file)
@@ -36,7 +36,6 @@ CONFIG_PS3_ROM=m
 CONFIG_PS3_FLASH=m
 CONFIG_PS3_LPM=m
 CONFIG_PPC_IBM_CELL_BLADE=y
-CONFIG_PPC_CELL_QPACE=y
 CONFIG_RTAS_FLASH=m
 CONFIG_IBMEBUS=y
 CONFIG_CPU_FREQ_PMAC64=y
index bd5e63f..93ee046 100644 (file)
@@ -85,6 +85,7 @@ static void spe_begin(void)
 
 static void spe_end(void)
 {
+       disable_kernel_spe();
        /* reenable preemption */
        preempt_enable();
 }
index 3e1d222..f9ebc38 100644 (file)
@@ -46,6 +46,7 @@ static void spe_begin(void)
 
 static void spe_end(void)
 {
+       disable_kernel_spe();
        /* reenable preemption */
        preempt_enable();
 }
index f4a616f..718a079 100644 (file)
@@ -47,6 +47,7 @@ static void spe_begin(void)
 
 static void spe_end(void)
 {
+       disable_kernel_spe();
        /* reenable preemption */
        preempt_enable();
 }
diff --git a/arch/powerpc/include/asm/book3s/32/hash.h b/arch/powerpc/include/asm/book3s/32/hash.h
new file mode 100644 (file)
index 0000000..264b754
--- /dev/null
@@ -0,0 +1,46 @@
+#ifndef _ASM_POWERPC_BOOK3S_32_HASH_H
+#define _ASM_POWERPC_BOOK3S_32_HASH_H
+#ifdef __KERNEL__
+
+/*
+ * The "classic" 32-bit implementation of the PowerPC MMU uses a hash
+ * table containing PTEs, together with a set of 16 segment registers,
+ * to define the virtual to physical address mapping.
+ *
+ * We use the hash table as an extended TLB, i.e. a cache of currently
+ * active mappings.  We maintain a two-level page table tree, much
+ * like that used by the i386, for the sake of the Linux memory
+ * management code.  Low-level assembler code in hash_low_32.S
+ * (procedure hash_page) is responsible for extracting ptes from the
+ * tree and putting them into the hash table when necessary, and
+ * updating the accessed and modified bits in the page table tree.
+ */
+
+#define _PAGE_PRESENT  0x001   /* software: pte contains a translation */
+#define _PAGE_HASHPTE  0x002   /* hash_page has made an HPTE for this pte */
+#define _PAGE_USER     0x004   /* usermode access allowed */
+#define _PAGE_GUARDED  0x008   /* G: prohibit speculative access */
+#define _PAGE_COHERENT 0x010   /* M: enforce memory coherence (SMP systems) */
+#define _PAGE_NO_CACHE 0x020   /* I: cache inhibit */
+#define _PAGE_WRITETHRU        0x040   /* W: cache write-through */
+#define _PAGE_DIRTY    0x080   /* C: page changed */
+#define _PAGE_ACCESSED 0x100   /* R: page referenced */
+#define _PAGE_RW       0x400   /* software: user write access allowed */
+#define _PAGE_SPECIAL  0x800   /* software: Special page */
+
+#ifdef CONFIG_PTE_64BIT
+/* We never clear the high word of the pte */
+#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
+#else
+#define _PTE_NONE_MASK _PAGE_HASHPTE
+#endif
+
+#define _PMD_PRESENT   0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD       (~PAGE_MASK)
+
+/* Hash table based platforms need atomic updates of the linux PTE */
+#define PTE_ATOMIC_UPDATES     1
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_BOOK3S_32_HASH_H */
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
new file mode 100644 (file)
index 0000000..38b33dc
--- /dev/null
@@ -0,0 +1,482 @@
+#ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
+#define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
+
+#include <asm-generic/pgtable-nopmd.h>
+
+#include <asm/book3s/32/hash.h>
+
+/* And here we include common definitions */
+#include <asm/pte-common.h>
+
+/*
+ * The normal case is that PTEs are 32-bits and we have a 1-page
+ * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
+ *
+ * For any >32-bit physical address platform, we can use the following
+ * two level page table layout where the pgdir is 8KB and the MS 13 bits
+ * are an index to the second level table.  The combined pgdir/pmd first
+ * level has 2048 entries and the second level has 512 64-bit PTE entries.
+ * -Matt
+ */
+/* PGDIR_SHIFT determines what a top-level page table entry can map */
+#define PGDIR_SHIFT    (PAGE_SHIFT + PTE_SHIFT)
+#define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK     (~(PGDIR_SIZE-1))
+
+#define PTRS_PER_PTE   (1 << PTE_SHIFT)
+#define PTRS_PER_PMD   1
+#define PTRS_PER_PGD   (1 << (32 - PGDIR_SHIFT))
+
+#define USER_PTRS_PER_PGD      (TASK_SIZE / PGDIR_SIZE)
+/*
+ * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
+ * value (for now) on others, from where we can start layout kernel
+ * virtual space that goes below PKMAP and FIXMAP
+ */
+#ifdef CONFIG_HIGHMEM
+#define KVIRT_TOP      PKMAP_BASE
+#else
+#define KVIRT_TOP      (0xfe000000UL)  /* for now, could be FIXMAP_BASE ? */
+#endif
+
+/*
+ * ioremap_bot starts at that address. Early ioremaps move down from there,
+ * until mem_init() at which point this becomes the top of the vmalloc
+ * and ioremap space
+ */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+#define IOREMAP_TOP    ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
+#else
+#define IOREMAP_TOP    KVIRT_TOP
+#endif
+
+/*
+ * Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 16MB value just means that there will be a 64MB "hole" after the
+ * physical memory until the kernel virtual memory starts.  That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ *
+ * We no longer map larger than phys RAM with the BATs so we don't have
+ * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
+ * about clashes between our early calls to ioremap() that start growing down
+ * from ioremap_base being run into the VM area allocations (growing upwards
+ * from VMALLOC_START).  For this reason we have ioremap_bot to check when
+ * we actually run into our mappings setup in the early boot with the VM
+ * system.  This really does become a problem for machines with good amounts
+ * of RAM.  -- Cort
+ */
+#define VMALLOC_OFFSET (0x1000000) /* 16M */
+#ifdef PPC_PIN_SIZE
+#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
+#else
+#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
+#endif
+#define VMALLOC_END    ioremap_bot
+
+#ifndef __ASSEMBLY__
+#include <linux/sched.h>
+#include <linux/threads.h>
+#include <asm/io.h>                    /* For sub-arch specific PPC_PIN_SIZE */
+
+extern unsigned long ioremap_bot;
+
+/*
+ * entries per page directory level: our page-table tree is two-level, so
+ * we don't really have any PMD directory.
+ */
+#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT)
+#define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT))
+
+#define pte_ERROR(e) \
+       pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
+               (unsigned long long)pte_val(e))
+#define pgd_ERROR(e) \
+       pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+/*
+ * Bits in a linux-style PTE.  These match the bits in the
+ * (hardware-defined) PowerPC PTE as closely as possible.
+ */
+
+#define pte_clear(mm, addr, ptep) \
+       do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
+
+#define pmd_none(pmd)          (!pmd_val(pmd))
+#define        pmd_bad(pmd)            (pmd_val(pmd) & _PMD_BAD)
+#define        pmd_present(pmd)        (pmd_val(pmd) & _PMD_PRESENT_MASK)
+static inline void pmd_clear(pmd_t *pmdp)
+{
+       *pmdp = __pmd(0);
+}
+
+
+/*
+ * When flushing the tlb entry for a page, we also need to flush the hash
+ * table entry.  flush_hash_pages is assembler (for speed) in hashtable.S.
+ */
+extern int flush_hash_pages(unsigned context, unsigned long va,
+                           unsigned long pmdval, int count);
+
+/* Add an HPTE to the hash table */
+extern void add_hash_page(unsigned context, unsigned long va,
+                         unsigned long pmdval);
+
+/* Flush an entry from the TLB/hash table */
+extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
+                            unsigned long address);
+
+/*
+ * PTE updates. This function is called whenever an existing
+ * valid PTE is updated. This does -not- include set_pte_at()
+ * which nowadays only sets a new PTE.
+ *
+ * Depending on the type of MMU, we may need to use atomic updates
+ * and the PTE may be either 32 or 64 bit wide. In the later case,
+ * when using atomic updates, only the low part of the PTE is
+ * accessed atomically.
+ *
+ * In addition, on 44x, we also maintain a global flag indicating
+ * that an executable user mapping was modified, which is needed
+ * to properly flush the virtually tagged instruction cache of
+ * those implementations.
+ */
+#ifndef CONFIG_PTE_64BIT
+static inline unsigned long pte_update(pte_t *p,
+                                      unsigned long clr,
+                                      unsigned long set)
+{
+       unsigned long old, tmp;
+
+       __asm__ __volatile__("\
+1:     lwarx   %0,0,%3\n\
+       andc    %1,%0,%4\n\
+       or      %1,%1,%5\n"
+       PPC405_ERR77(0,%3)
+"      stwcx.  %1,0,%3\n\
+       bne-    1b"
+       : "=&r" (old), "=&r" (tmp), "=m" (*p)
+       : "r" (p), "r" (clr), "r" (set), "m" (*p)
+       : "cc" );
+
+       return old;
+}
+#else /* CONFIG_PTE_64BIT */
+static inline unsigned long long pte_update(pte_t *p,
+                                           unsigned long clr,
+                                           unsigned long set)
+{
+       unsigned long long old;
+       unsigned long tmp;
+
+       __asm__ __volatile__("\
+1:     lwarx   %L0,0,%4\n\
+       lwzx    %0,0,%3\n\
+       andc    %1,%L0,%5\n\
+       or      %1,%1,%6\n"
+       PPC405_ERR77(0,%3)
+"      stwcx.  %1,0,%4\n\
+       bne-    1b"
+       : "=&r" (old), "=&r" (tmp), "=m" (*p)
+       : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
+       : "cc" );
+
+       return old;
+}
+#endif /* CONFIG_PTE_64BIT */
+
+/*
+ * 2.6 calls this without flushing the TLB entry; this is wrong
+ * for our hash-based implementation, we fix that up here.
+ */
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
+{
+       unsigned long old;
+       old = pte_update(ptep, _PAGE_ACCESSED, 0);
+       if (old & _PAGE_HASHPTE) {
+               unsigned long ptephys = __pa(ptep) & PAGE_MASK;
+               flush_hash_pages(context, addr, ptephys, 1);
+       }
+       return (old & _PAGE_ACCESSED) != 0;
+}
+#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
+       __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+                                      pte_t *ptep)
+{
+       return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
+}
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+                                     pte_t *ptep)
+{
+       pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO);
+}
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+                                          unsigned long addr, pte_t *ptep)
+{
+       ptep_set_wrprotect(mm, addr, ptep);
+}
+
+
+static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
+{
+       unsigned long set = pte_val(entry) &
+               (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
+       unsigned long clr = ~pte_val(entry) & _PAGE_RO;
+
+       pte_update(ptep, clr, set);
+}
+
+#define __HAVE_ARCH_PTE_SAME
+#define pte_same(A,B)  (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
+
+/*
+ * Note that on Book E processors, the pmd contains the kernel virtual
+ * (lowmem) address of the pte page.  The physical address is less useful
+ * because everything runs with translation enabled (even the TLB miss
+ * handler).  On everything else the pmd contains the physical address
+ * of the pte page.  -- paulus
+ */
+#ifndef CONFIG_BOOKE
+#define pmd_page_vaddr(pmd)    \
+       ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+#define pmd_page(pmd)          \
+       pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
+#else
+#define pmd_page_vaddr(pmd)    \
+       ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
+#define pmd_page(pmd)          \
+       pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
+#endif
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/* to find an entry in a page-table-directory */
+#define pgd_index(address)      ((address) >> PGDIR_SHIFT)
+#define pgd_offset(mm, address)         ((mm)->pgd + pgd_index(address))
+
+/* Find an entry in the third-level page table.. */
+#define pte_index(address)             \
+       (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, addr)   \
+       ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
+#define pte_offset_map(dir, addr)              \
+       ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
+#define pte_unmap(pte)         kunmap_atomic(pte)
+
+/*
+ * Encode and decode a swap entry.
+ * Note that the bits we use in a PTE for representing a swap entry
+ * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
+ *   -- paulus
+ */
+#define __swp_type(entry)              ((entry).val & 0x1f)
+#define __swp_offset(entry)            ((entry).val >> 5)
+#define __swp_entry(type, offset)      ((swp_entry_t) { (type) | ((offset) << 5) })
+#define __pte_to_swp_entry(pte)                ((swp_entry_t) { pte_val(pte) >> 3 })
+#define __swp_entry_to_pte(x)          ((pte_t) { (x).val << 3 })
+
+#ifndef CONFIG_PPC_4K_PAGES
+void pgtable_cache_init(void);
+#else
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init()   do { } while (0)
+#endif
+
+extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
+                     pmd_t **pmdp);
+
+/* Generic accessors to PTE bits */
+static inline int pte_write(pte_t pte)         { return !!(pte_val(pte) & _PAGE_RW);}
+static inline int pte_dirty(pte_t pte)         { return !!(pte_val(pte) & _PAGE_DIRTY); }
+static inline int pte_young(pte_t pte)         { return !!(pte_val(pte) & _PAGE_ACCESSED); }
+static inline int pte_special(pte_t pte)       { return !!(pte_val(pte) & _PAGE_SPECIAL); }
+static inline int pte_none(pte_t pte)          { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
+static inline pgprot_t pte_pgprot(pte_t pte)   { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
+
+static inline int pte_present(pte_t pte)
+{
+       return pte_val(pte) & _PAGE_PRESENT;
+}
+
+/* Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ *
+ * Even if PTEs can be unsigned long long, a PFN is always an unsigned
+ * long for now.
+ */
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
+{
+       return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
+                    pgprot_val(pgprot));
+}
+
+static inline unsigned long pte_pfn(pte_t pte)
+{
+       return pte_val(pte) >> PTE_RPN_SHIFT;
+}
+
+/* Generic modifiers for PTE bits */
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+       return __pte(pte_val(pte) & ~_PAGE_RW);
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+       return __pte(pte_val(pte) & ~_PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+       return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_RW);
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_SPECIAL);
+}
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+       return pte;
+}
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+       return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+
+
+/* This low level function performs the actual PTE insertion
+ * Setting the PTE depends on the MMU type and other factors. It's
+ * an horrible mess that I'm not going to try to clean up now but
+ * I'm keeping it in one place rather than spread around
+ */
+static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
+                               pte_t *ptep, pte_t pte, int percpu)
+{
+#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
+       /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
+        * helper pte_update() which does an atomic update. We need to do that
+        * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
+        * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
+        * the hash bits instead (ie, same as the non-SMP case)
+        */
+       if (percpu)
+               *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
+                             | (pte_val(pte) & ~_PAGE_HASHPTE));
+       else
+               pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
+
+#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
+       /* Second case is 32-bit with 64-bit PTE.  In this case, we
+        * can just store as long as we do the two halves in the right order
+        * with a barrier in between. This is possible because we take care,
+        * in the hash code, to pre-invalidate if the PTE was already hashed,
+        * which synchronizes us with any concurrent invalidation.
+        * In the percpu case, we also fallback to the simple update preserving
+        * the hash bits
+        */
+       if (percpu) {
+               *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
+                             | (pte_val(pte) & ~_PAGE_HASHPTE));
+               return;
+       }
+       if (pte_val(*ptep) & _PAGE_HASHPTE)
+               flush_hash_entry(mm, ptep, addr);
+       __asm__ __volatile__("\
+               stw%U0%X0 %2,%0\n\
+               eieio\n\
+               stw%U0%X0 %L2,%1"
+       : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
+       : "r" (pte) : "memory");
+
+#elif defined(CONFIG_PPC_STD_MMU_32)
+       /* Third case is 32-bit hash table in UP mode, we need to preserve
+        * the _PAGE_HASHPTE bit since we may not have invalidated the previous
+        * translation in the hash yet (done in a subsequent flush_tlb_xxx())
+        * and see we need to keep track that this PTE needs invalidating
+        */
+       *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
+                     | (pte_val(pte) & ~_PAGE_HASHPTE));
+
+#else
+#error "Not supported "
+#endif
+}
+
+/*
+ * Macro to mark a page protection value as "uncacheable".
+ */
+
+#define _PAGE_CACHE_CTL        (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
+                        _PAGE_WRITETHRU)
+
+#define pgprot_noncached pgprot_noncached
+static inline pgprot_t pgprot_noncached(pgprot_t prot)
+{
+       return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+                       _PAGE_NO_CACHE | _PAGE_GUARDED);
+}
+
+#define pgprot_noncached_wc pgprot_noncached_wc
+static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
+{
+       return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+                       _PAGE_NO_CACHE);
+}
+
+#define pgprot_cached pgprot_cached
+static inline pgprot_t pgprot_cached(pgprot_t prot)
+{
+       return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+                       _PAGE_COHERENT);
+}
+
+#define pgprot_cached_wthru pgprot_cached_wthru
+static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
+{
+       return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+                       _PAGE_COHERENT | _PAGE_WRITETHRU);
+}
+
+#define pgprot_cached_noncoherent pgprot_cached_noncoherent
+static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
+{
+       return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
+}
+
+#define pgprot_writecombine pgprot_writecombine
+static inline pgprot_t pgprot_writecombine(pgprot_t prot)
+{
+       return pgprot_noncached_wc(prot);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /*  _ASM_POWERPC_BOOK3S_32_PGTABLE_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
new file mode 100644 (file)
index 0000000..ea0414d
--- /dev/null
@@ -0,0 +1,132 @@
+#ifndef _ASM_POWERPC_BOOK3S_64_HASH_4K_H
+#define _ASM_POWERPC_BOOK3S_64_HASH_4K_H
+/*
+ * Entries per page directory level.  The PTE level must use a 64b record
+ * for each page table entry.  The PMD and PGD level use a 32b record for
+ * each entry by assuming that each entry is page aligned.
+ */
+#define PTE_INDEX_SIZE  9
+#define PMD_INDEX_SIZE  7
+#define PUD_INDEX_SIZE  9
+#define PGD_INDEX_SIZE  9
+
+#ifndef __ASSEMBLY__
+#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
+#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
+#define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE)
+#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
+#endif /* __ASSEMBLY__ */
+
+#define PTRS_PER_PTE   (1 << PTE_INDEX_SIZE)
+#define PTRS_PER_PMD   (1 << PMD_INDEX_SIZE)
+#define PTRS_PER_PUD   (1 << PUD_INDEX_SIZE)
+#define PTRS_PER_PGD   (1 << PGD_INDEX_SIZE)
+
+/* PMD_SHIFT determines what a second-level page table entry can map */
+#define PMD_SHIFT      (PAGE_SHIFT + PTE_INDEX_SIZE)
+#define PMD_SIZE       (1UL << PMD_SHIFT)
+#define PMD_MASK       (~(PMD_SIZE-1))
+
+/* With 4k base page size, hugepage PTEs go at the PMD level */
+#define MIN_HUGEPTE_SHIFT      PMD_SHIFT
+
+/* PUD_SHIFT determines what a third-level page table entry can map */
+#define PUD_SHIFT      (PMD_SHIFT + PMD_INDEX_SIZE)
+#define PUD_SIZE       (1UL << PUD_SHIFT)
+#define PUD_MASK       (~(PUD_SIZE-1))
+
+/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
+#define PGDIR_SHIFT    (PUD_SHIFT + PUD_INDEX_SIZE)
+#define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK     (~(PGDIR_SIZE-1))
+
+/* Bits to mask out from a PMD to get to the PTE page */
+#define PMD_MASKED_BITS                0
+/* Bits to mask out from a PUD to get to the PMD page */
+#define PUD_MASKED_BITS                0
+/* Bits to mask out from a PGD to get to the PUD page */
+#define PGD_MASKED_BITS                0
+
+/* PTE flags to conserve for HPTE identification */
+#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \
+                        _PAGE_F_SECOND | _PAGE_F_GIX)
+
+/* shift to put page number into pte */
+#define PTE_RPN_SHIFT  (18)
+
+#define _PAGE_4K_PFN           0
+#ifndef __ASSEMBLY__
+/*
+ * 4-level page tables related bits
+ */
+
+#define pgd_none(pgd)          (!pgd_val(pgd))
+#define pgd_bad(pgd)           (pgd_val(pgd) == 0)
+#define pgd_present(pgd)       (pgd_val(pgd) != 0)
+#define pgd_page_vaddr(pgd)    (pgd_val(pgd) & ~PGD_MASKED_BITS)
+
+static inline void pgd_clear(pgd_t *pgdp)
+{
+       *pgdp = __pgd(0);
+}
+
+static inline pte_t pgd_pte(pgd_t pgd)
+{
+       return __pte(pgd_val(pgd));
+}
+
+static inline pgd_t pte_pgd(pte_t pte)
+{
+       return __pgd(pte_val(pte));
+}
+extern struct page *pgd_page(pgd_t pgd);
+
+#define pud_offset(pgdp, addr) \
+  (((pud_t *) pgd_page_vaddr(*(pgdp))) + \
+    (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
+
+#define pud_ERROR(e) \
+       pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
+
+/*
+ * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range() */
+#define remap_4k_pfn(vma, addr, pfn, prot)     \
+       remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
+
+#ifdef CONFIG_HUGETLB_PAGE
+/*
+ * For 4k page size, we support explicit hugepage via hugepd
+ */
+static inline int pmd_huge(pmd_t pmd)
+{
+       return 0;
+}
+
+static inline int pud_huge(pud_t pud)
+{
+       return 0;
+}
+
+static inline int pgd_huge(pgd_t pgd)
+{
+       return 0;
+}
+#define pgd_huge pgd_huge
+
+static inline int hugepd_ok(hugepd_t hpd)
+{
+       /*
+        * if it is not a pte and have hugepd shift mask
+        * set, then it is a hugepd directory pointer
+        */
+       if (!(hpd.pd & _PAGE_PTE) &&
+           ((hpd.pd & HUGEPD_SHIFT_MASK) != 0))
+               return true;
+       return false;
+}
+#define is_hugepd(hpd)         (hugepd_ok(hpd))
+#endif
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
new file mode 100644 (file)
index 0000000..9e55e3b
--- /dev/null
@@ -0,0 +1,312 @@
+#ifndef _ASM_POWERPC_BOOK3S_64_HASH_64K_H
+#define _ASM_POWERPC_BOOK3S_64_HASH_64K_H
+
+#include <asm-generic/pgtable-nopud.h>
+
+#define PTE_INDEX_SIZE  8
+#define PMD_INDEX_SIZE  10
+#define PUD_INDEX_SIZE 0
+#define PGD_INDEX_SIZE  12
+
+#define PTRS_PER_PTE   (1 << PTE_INDEX_SIZE)
+#define PTRS_PER_PMD   (1 << PMD_INDEX_SIZE)
+#define PTRS_PER_PGD   (1 << PGD_INDEX_SIZE)
+
+/* With 4k base page size, hugepage PTEs go at the PMD level */
+#define MIN_HUGEPTE_SHIFT      PAGE_SHIFT
+
+/* PMD_SHIFT determines what a second-level page table entry can map */
+#define PMD_SHIFT      (PAGE_SHIFT + PTE_INDEX_SIZE)
+#define PMD_SIZE       (1UL << PMD_SHIFT)
+#define PMD_MASK       (~(PMD_SIZE-1))
+
+/* PGDIR_SHIFT determines what a third-level page table entry can map */
+#define PGDIR_SHIFT    (PMD_SHIFT + PMD_INDEX_SIZE)
+#define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK     (~(PGDIR_SIZE-1))
+
+#define _PAGE_COMBO    0x00040000 /* this is a combo 4k page */
+#define _PAGE_4K_PFN   0x00080000 /* PFN is for a single 4k page */
+/*
+ * Used to track subpage group valid if _PAGE_COMBO is set
+ * This overloads _PAGE_F_GIX and _PAGE_F_SECOND
+ */
+#define _PAGE_COMBO_VALID      (_PAGE_F_GIX | _PAGE_F_SECOND)
+
+/* PTE flags to conserve for HPTE identification */
+#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_F_SECOND | \
+                        _PAGE_F_GIX | _PAGE_HASHPTE | _PAGE_COMBO)
+
+/* Shift to put page number into pte.
+ *
+ * That gives us a max RPN of 34 bits, which means a max of 50 bits
+ * of addressable physical space, or 46 bits for the special 4k PFNs.
+ */
+#define PTE_RPN_SHIFT  (30)
+/*
+ * we support 16 fragments per PTE page of 64K size.
+ */
+#define PTE_FRAG_NR    16
+/*
+ * We use a 2K PTE page fragment and another 2K for storing
+ * real_pte_t hash index
+ */
+#define PTE_FRAG_SIZE_SHIFT  12
+#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
+
+/*
+ * Bits to mask out from a PMD to get to the PTE page
+ * PMDs point to PTE table fragments which are PTE_FRAG_SIZE aligned.
+ */
+#define PMD_MASKED_BITS                (PTE_FRAG_SIZE - 1)
+/* Bits to mask out from a PGD/PUD to get to the PMD page */
+#define PUD_MASKED_BITS                0x1ff
+
+#ifndef __ASSEMBLY__
+
+/*
+ * With 64K pages on hash table, we have a special PTE format that
+ * uses a second "half" of the page table to encode sub-page information
+ * in order to deal with 64K made of 4K HW pages. Thus we override the
+ * generic accessors and iterators here
+ */
+#define __real_pte __real_pte
+static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
+{
+       real_pte_t rpte;
+       unsigned long *hidxp;
+
+       rpte.pte = pte;
+       rpte.hidx = 0;
+       if (pte_val(pte) & _PAGE_COMBO) {
+               /*
+                * Make sure we order the hidx load against the _PAGE_COMBO
+                * check. The store side ordering is done in __hash_page_4K
+                */
+               smp_rmb();
+               hidxp = (unsigned long *)(ptep + PTRS_PER_PTE);
+               rpte.hidx = *hidxp;
+       }
+       return rpte;
+}
+
+static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
+{
+       if ((pte_val(rpte.pte) & _PAGE_COMBO))
+               return (rpte.hidx >> (index<<2)) & 0xf;
+       return (pte_val(rpte.pte) >> _PAGE_F_GIX_SHIFT) & 0xf;
+}
+
+#define __rpte_to_pte(r)       ((r).pte)
+extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index);
+/*
+ * Trick: we set __end to va + 64k, which happens works for
+ * a 16M page as well as we want only one iteration
+ */
+#define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift)    \
+       do {                                                            \
+               unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT));  \
+               unsigned __split = (psize == MMU_PAGE_4K ||             \
+                                   psize == MMU_PAGE_64K_AP);          \
+               shift = mmu_psize_defs[psize].shift;                    \
+               for (index = 0; vpn < __end; index++,                   \
+                            vpn += (1L << (shift - VPN_SHIFT))) {      \
+                       if (!__split || __rpte_sub_valid(rpte, index))  \
+                               do {
+
+#define pte_iterate_hashed_end() } while(0); } } while(0)
+
+#define pte_pagesize_index(mm, addr, pte)      \
+       (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
+
+#define remap_4k_pfn(vma, addr, pfn, prot)                             \
+       (WARN_ON(((pfn) >= (1UL << (64 - PTE_RPN_SHIFT)))) ? -EINVAL :  \
+               remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE,        \
+                       __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN)))
+
+#define PTE_TABLE_SIZE PTE_FRAG_SIZE
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define PMD_TABLE_SIZE ((sizeof(pmd_t) << PMD_INDEX_SIZE) + (sizeof(unsigned long) << PMD_INDEX_SIZE))
+#else
+#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
+#endif
+#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
+
+#define pgd_pte(pgd)   (pud_pte(((pud_t){ pgd })))
+#define pte_pgd(pte)   ((pgd_t)pte_pud(pte))
+
+#ifdef CONFIG_HUGETLB_PAGE
+/*
+ * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
+ * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD;
+ *
+ * Defined in such a way that we can optimize away code block at build time
+ * if CONFIG_HUGETLB_PAGE=n.
+ */
+static inline int pmd_huge(pmd_t pmd)
+{
+       /*
+        * leaf pte for huge page
+        */
+       return !!(pmd_val(pmd) & _PAGE_PTE);
+}
+
+static inline int pud_huge(pud_t pud)
+{
+       /*
+        * leaf pte for huge page
+        */
+       return !!(pud_val(pud) & _PAGE_PTE);
+}
+
+static inline int pgd_huge(pgd_t pgd)
+{
+       /*
+        * leaf pte for huge page
+        */
+       return !!(pgd_val(pgd) & _PAGE_PTE);
+}
+#define pgd_huge pgd_huge
+
+#ifdef CONFIG_DEBUG_VM
+extern int hugepd_ok(hugepd_t hpd);
+#define is_hugepd(hpd)               (hugepd_ok(hpd))
+#else
+/*
+ * With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't
+ * need to setup hugepage directory for them. Our pte and page directory format
+ * enable us to have this enabled.
+ */
+static inline int hugepd_ok(hugepd_t hpd)
+{
+       return 0;
+}
+#define is_hugepd(pdep)                        0
+#endif /* CONFIG_DEBUG_VM */
+
+#endif /* CONFIG_HUGETLB_PAGE */
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern unsigned long pmd_hugepage_update(struct mm_struct *mm,
+                                        unsigned long addr,
+                                        pmd_t *pmdp,
+                                        unsigned long clr,
+                                        unsigned long set);
+static inline char *get_hpte_slot_array(pmd_t *pmdp)
+{
+       /*
+        * The hpte hindex is stored in the pgtable whose address is in the
+        * second half of the PMD
+        *
+        * Order this load with the test for pmd_trans_huge in the caller
+        */
+       smp_rmb();
+       return *(char **)(pmdp + PTRS_PER_PMD);
+
+
+}
+/*
+ * The linux hugepage PMD now include the pmd entries followed by the address
+ * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits.
+ * [ 1 bit secondary | 3 bit hidx | 1 bit valid | 000]. We use one byte per
+ * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and
+ * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t.
+ *
+ * The last three bits are intentionally left to zero. This memory location
+ * are also used as normal page PTE pointers. So if we have any pointers
+ * left around while we collapse a hugepage, we need to make sure
+ * _PAGE_PRESENT bit of that is zero when we look at them
+ */
+static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index)
+{
+       return (hpte_slot_array[index] >> 3) & 0x1;
+}
+
+static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array,
+                                          int index)
+{
+       return hpte_slot_array[index] >> 4;
+}
+
+static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
+                                       unsigned int index, unsigned int hidx)
+{
+       hpte_slot_array[index] = hidx << 4 | 0x1 << 3;
+}
+
+/*
+ *
+ * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs
+ * page. The hugetlbfs page table walking and mangling paths are totally
+ * separated form the core VM paths and they're differentiated by
+ *  VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run.
+ *
+ * pmd_trans_huge() is defined as false at build time if
+ * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build
+ * time in such case.
+ *
+ * For ppc64 we need to differntiate from explicit hugepages from THP, because
+ * for THP we also track the subpage details at the pmd level. We don't do
+ * that for explicit huge pages.
+ *
+ */
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+       return !!((pmd_val(pmd) & (_PAGE_PTE | _PAGE_THP_HUGE)) ==
+                 (_PAGE_PTE | _PAGE_THP_HUGE));
+}
+
+static inline int pmd_trans_splitting(pmd_t pmd)
+{
+       if (pmd_trans_huge(pmd))
+               return pmd_val(pmd) & _PAGE_SPLITTING;
+       return 0;
+}
+
+static inline int pmd_large(pmd_t pmd)
+{
+       return !!(pmd_val(pmd) & _PAGE_PTE);
+}
+
+static inline pmd_t pmd_mknotpresent(pmd_t pmd)
+{
+       return __pmd(pmd_val(pmd) & ~_PAGE_PRESENT);
+}
+
+static inline pmd_t pmd_mksplitting(pmd_t pmd)
+{
+       return __pmd(pmd_val(pmd) | _PAGE_SPLITTING);
+}
+
+#define __HAVE_ARCH_PMD_SAME
+static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+{
+       return (((pmd_val(pmd_a) ^ pmd_val(pmd_b)) & ~_PAGE_HPTEFLAGS) == 0);
+}
+
+static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
+                                             unsigned long addr, pmd_t *pmdp)
+{
+       unsigned long old;
+
+       if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
+               return 0;
+       old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);
+       return ((old & _PAGE_ACCESSED) != 0);
+}
+
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+                                     pmd_t *pmdp)
+{
+
+       if ((pmd_val(*pmdp) & _PAGE_RW) == 0)
+               return;
+
+       pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0);
+}
+
+#endif /*  CONFIG_TRANSPARENT_HUGEPAGE */
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
new file mode 100644 (file)
index 0000000..2ff8b3d
--- /dev/null
@@ -0,0 +1,551 @@
+#ifndef _ASM_POWERPC_BOOK3S_64_HASH_H
+#define _ASM_POWERPC_BOOK3S_64_HASH_H
+#ifdef __KERNEL__
+
+/*
+ * Common bits between 4K and 64K pages in a linux-style PTE.
+ * These match the bits in the (hardware-defined) PowerPC PTE as closely
+ * as possible. Additional bits may be defined in pgtable-hash64-*.h
+ *
+ * Note: We only support user read/write permissions. Supervisor always
+ * have full read/write to pages above PAGE_OFFSET (pages below that
+ * always use the user access permissions).
+ *
+ * We could create separate kernel read-only if we used the 3 PP bits
+ * combinations that newer processors provide but we currently don't.
+ */
+#define _PAGE_PTE              0x00001
+#define _PAGE_PRESENT          0x00002 /* software: pte contains a translation */
+#define _PAGE_BIT_SWAP_TYPE    2
+#define _PAGE_USER             0x00004 /* matches one of the PP bits */
+#define _PAGE_EXEC             0x00008 /* No execute on POWER4 and newer (we invert) */
+#define _PAGE_GUARDED          0x00010
+/* We can derive Memory coherence from _PAGE_NO_CACHE */
+#define _PAGE_COHERENT         0x0
+#define _PAGE_NO_CACHE         0x00020 /* I: cache inhibit */
+#define _PAGE_WRITETHRU                0x00040 /* W: cache write-through */
+#define _PAGE_DIRTY            0x00080 /* C: page changed */
+#define _PAGE_ACCESSED         0x00100 /* R: page referenced */
+#define _PAGE_RW               0x00200 /* software: user write access allowed */
+#define _PAGE_HASHPTE          0x00400 /* software: pte has an associated HPTE */
+#define _PAGE_BUSY             0x00800 /* software: PTE & hash are busy */
+#define _PAGE_F_GIX            0x07000 /* full page: hidx bits */
+#define _PAGE_F_GIX_SHIFT      12
+#define _PAGE_F_SECOND         0x08000 /* Whether to use secondary hash or not */
+#define _PAGE_SPECIAL          0x10000 /* software: special page */
+
+#ifdef CONFIG_MEM_SOFT_DIRTY
+#define _PAGE_SOFT_DIRTY       0x20000 /* software: software dirty tracking */
+#else
+#define _PAGE_SOFT_DIRTY       0x00000
+#endif
+
+/*
+ * THP pages can't be special. So use the _PAGE_SPECIAL
+ */
+#define _PAGE_SPLITTING _PAGE_SPECIAL
+
+/*
+ * We need to differentiate between explicit huge page and THP huge
+ * page, since THP huge page also need to track real subpage details
+ */
+#define _PAGE_THP_HUGE  _PAGE_4K_PFN
+
+/*
+ * set of bits not changed in pmd_modify.
+ */
+#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS |              \
+                        _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPLITTING | \
+                        _PAGE_THP_HUGE | _PAGE_PTE | _PAGE_SOFT_DIRTY)
+
+#ifdef CONFIG_PPC_64K_PAGES
+#include <asm/book3s/64/hash-64k.h>
+#else
+#include <asm/book3s/64/hash-4k.h>
+#endif
+
+/*
+ * Size of EA range mapped by our pagetables.
+ */
+#define PGTABLE_EADDR_SIZE     (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
+                                PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
+#define PGTABLE_RANGE          (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define PMD_CACHE_INDEX        (PMD_INDEX_SIZE + 1)
+#else
+#define PMD_CACHE_INDEX        PMD_INDEX_SIZE
+#endif
+/*
+ * Define the address range of the kernel non-linear virtual area
+ */
+#define KERN_VIRT_START ASM_CONST(0xD000000000000000)
+#define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000)
+
+/*
+ * The vmalloc space starts at the beginning of that region, and
+ * occupies half of it on hash CPUs and a quarter of it on Book3E
+ * (we keep a quarter for the virtual memmap)
+ */
+#define VMALLOC_START  KERN_VIRT_START
+#define VMALLOC_SIZE   (KERN_VIRT_SIZE >> 1)
+#define VMALLOC_END    (VMALLOC_START + VMALLOC_SIZE)
+
+/*
+ * Region IDs
+ */
+#define REGION_SHIFT           60UL
+#define REGION_MASK            (0xfUL << REGION_SHIFT)
+#define REGION_ID(ea)          (((unsigned long)(ea)) >> REGION_SHIFT)
+
+#define VMALLOC_REGION_ID      (REGION_ID(VMALLOC_START))
+#define KERNEL_REGION_ID       (REGION_ID(PAGE_OFFSET))
+#define VMEMMAP_REGION_ID      (0xfUL) /* Server only */
+#define USER_REGION_ID         (0UL)
+
+/*
+ * Defines the address of the vmemap area, in its own region on
+ * hash table CPUs.
+ */
+#define VMEMMAP_BASE           (VMEMMAP_REGION_ID << REGION_SHIFT)
+
+#ifdef CONFIG_PPC_MM_SLICES
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+#endif /* CONFIG_PPC_MM_SLICES */
+
+/* No separate kernel read-only */
+#define _PAGE_KERNEL_RW                (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */
+#define _PAGE_KERNEL_RO                 _PAGE_KERNEL_RW
+#define _PAGE_KERNEL_RWX       (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
+
+/* Strong Access Ordering */
+#define _PAGE_SAO              (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT)
+
+/* No page size encoding in the linux PTE */
+#define _PAGE_PSIZE            0
+
+/* PTEIDX nibble */
+#define _PTEIDX_SECONDARY      0x8
+#define _PTEIDX_GROUP_IX       0x7
+
+/* Hash table based platforms need atomic updates of the linux PTE */
+#define PTE_ATOMIC_UPDATES     1
+#define _PTE_NONE_MASK _PAGE_HPTEFLAGS
+/*
+ * The mask convered by the RPN must be a ULL on 32-bit platforms with
+ * 64-bit PTEs
+ */
+#define PTE_RPN_MASK   (~((1UL << PTE_RPN_SHIFT) - 1))
+/*
+ * _PAGE_CHG_MASK masks of bits that are to be preserved across
+ * pgprot changes
+ */
+#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
+                        _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \
+                        _PAGE_SOFT_DIRTY)
+/*
+ * Mask of bits returned by pte_pgprot()
+ */
+#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
+                        _PAGE_WRITETHRU | _PAGE_4K_PFN | \
+                        _PAGE_USER | _PAGE_ACCESSED |  \
+                        _PAGE_RW |  _PAGE_DIRTY | _PAGE_EXEC | \
+                        _PAGE_SOFT_DIRTY)
+/*
+ * We define 2 sets of base prot bits, one for basic pages (ie,
+ * cacheable kernel and user pages) and one for non cacheable
+ * pages. We always set _PAGE_COHERENT when SMP is enabled or
+ * the processor might need it for DMA coherency.
+ */
+#define _PAGE_BASE_NC  (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
+#define _PAGE_BASE     (_PAGE_BASE_NC | _PAGE_COHERENT)
+
+/* Permission masks used to generate the __P and __S table,
+ *
+ * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
+ *
+ * Write permissions imply read permissions for now (we could make write-only
+ * pages on BookE but we don't bother for now). Execute permission control is
+ * possible on platforms that define _PAGE_EXEC
+ *
+ * Note due to the way vm flags are laid out, the bits are XWR
+ */
+#define PAGE_NONE      __pgprot(_PAGE_BASE)
+#define PAGE_SHARED    __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+#define PAGE_SHARED_X  __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | \
+                                _PAGE_EXEC)
+#define PAGE_COPY      __pgprot(_PAGE_BASE | _PAGE_USER )
+#define PAGE_COPY_X    __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_READONLY  __pgprot(_PAGE_BASE | _PAGE_USER )
+#define PAGE_READONLY_X        __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY_X
+#define __P101 PAGE_READONLY_X
+#define __P110 PAGE_COPY_X
+#define __P111 PAGE_COPY_X
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY_X
+#define __S101 PAGE_READONLY_X
+#define __S110 PAGE_SHARED_X
+#define __S111 PAGE_SHARED_X
+
+/* Permission masks used for kernel mappings */
+#define PAGE_KERNEL    __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
+#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
+                                _PAGE_NO_CACHE)
+#define PAGE_KERNEL_NCG        __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
+                                _PAGE_NO_CACHE | _PAGE_GUARDED)
+#define PAGE_KERNEL_X  __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
+#define PAGE_KERNEL_ROX        __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
+
+/* Protection used for kernel text. We want the debuggers to be able to
+ * set breakpoints anywhere, so don't write protect the kernel text
+ * on platforms where such control is possible.
+ */
+#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
+       defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
+#define PAGE_KERNEL_TEXT       PAGE_KERNEL_X
+#else
+#define PAGE_KERNEL_TEXT       PAGE_KERNEL_ROX
+#endif
+
+/* Make modules code happy. We don't set RO yet */
+#define PAGE_KERNEL_EXEC       PAGE_KERNEL_X
+#define PAGE_AGP               (PAGE_KERNEL_NC)
+
+#define PMD_BAD_BITS           (PTE_TABLE_SIZE-1)
+#define PUD_BAD_BITS           (PMD_TABLE_SIZE-1)
+
+#ifndef __ASSEMBLY__
+#define        pmd_bad(pmd)            (!is_kernel_addr(pmd_val(pmd)) \
+                                || (pmd_val(pmd) & PMD_BAD_BITS))
+#define pmd_page_vaddr(pmd)    (pmd_val(pmd) & ~PMD_MASKED_BITS)
+
+#define        pud_bad(pud)            (!is_kernel_addr(pud_val(pud)) \
+                                || (pud_val(pud) & PUD_BAD_BITS))
+#define pud_page_vaddr(pud)    (pud_val(pud) & ~PUD_MASKED_BITS)
+
+#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1))
+#define pmd_index(address) (((address) >> (PMD_SHIFT)) & (PTRS_PER_PMD - 1))
+#define pte_index(address) (((address) >> (PAGE_SHIFT)) & (PTRS_PER_PTE - 1))
+
+extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
+                           pte_t *ptep, unsigned long pte, int huge);
+extern unsigned long htab_convert_pte_flags(unsigned long pteflags);
+/* Atomic PTE updates */
+static inline unsigned long pte_update(struct mm_struct *mm,
+                                      unsigned long addr,
+                                      pte_t *ptep, unsigned long clr,
+                                      unsigned long set,
+                                      int huge)
+{
+       unsigned long old, tmp;
+
+       __asm__ __volatile__(
+       "1:     ldarx   %0,0,%3         # pte_update\n\
+       andi.   %1,%0,%6\n\
+       bne-    1b \n\
+       andc    %1,%0,%4 \n\
+       or      %1,%1,%7\n\
+       stdcx.  %1,0,%3 \n\
+       bne-    1b"
+       : "=&r" (old), "=&r" (tmp), "=m" (*ptep)
+       : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set)
+       : "cc" );
+       /* huge pages use the old page table lock */
+       if (!huge)
+               assert_pte_locked(mm, addr);
+
+       if (old & _PAGE_HASHPTE)
+               hpte_need_flush(mm, addr, ptep, old, huge);
+
+       return old;
+}
+
+static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
+                                             unsigned long addr, pte_t *ptep)
+{
+       unsigned long old;
+
+       if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
+               return 0;
+       old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
+       return (old & _PAGE_ACCESSED) != 0;
+}
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define ptep_test_and_clear_young(__vma, __addr, __ptep)                  \
+({                                                                        \
+       int __r;                                                           \
+       __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
+       __r;                                                               \
+})
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+                                     pte_t *ptep)
+{
+
+       if ((pte_val(*ptep) & _PAGE_RW) == 0)
+               return;
+
+       pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+                                          unsigned long addr, pte_t *ptep)
+{
+       if ((pte_val(*ptep) & _PAGE_RW) == 0)
+               return;
+
+       pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
+}
+
+/*
+ * We currently remove entries from the hashtable regardless of whether
+ * the entry was young or dirty. The generic routines only flush if the
+ * entry was young or dirty which is not good enough.
+ *
+ * We should be more intelligent about this but for the moment we override
+ * these functions and force a tlb flush unconditionally
+ */
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define ptep_clear_flush_young(__vma, __address, __ptep)               \
+({                                                                     \
+       int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
+                                                 __ptep);              \
+       __young;                                                        \
+})
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+                                      unsigned long addr, pte_t *ptep)
+{
+       unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
+       return __pte(old);
+}
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
+                            pte_t * ptep)
+{
+       pte_update(mm, addr, ptep, ~0UL, 0, 0);
+}
+
+
+/* Set the dirty and/or accessed bits atomically in a linux PTE, this
+ * function doesn't need to flush the hash entry
+ */
+static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
+{
+       unsigned long bits = pte_val(entry) &
+               (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC |
+                _PAGE_SOFT_DIRTY);
+
+       unsigned long old, tmp;
+
+       __asm__ __volatile__(
+       "1:     ldarx   %0,0,%4\n\
+               andi.   %1,%0,%6\n\
+               bne-    1b \n\
+               or      %0,%3,%0\n\
+               stdcx.  %0,0,%4\n\
+               bne-    1b"
+       :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
+       :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY)
+       :"cc");
+}
+
+#define __HAVE_ARCH_PTE_SAME
+#define pte_same(A,B)  (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
+
+/* Generic accessors to PTE bits */
+static inline int pte_write(pte_t pte)         { return !!(pte_val(pte) & _PAGE_RW);}
+static inline int pte_dirty(pte_t pte)         { return !!(pte_val(pte) & _PAGE_DIRTY); }
+static inline int pte_young(pte_t pte)         { return !!(pte_val(pte) & _PAGE_ACCESSED); }
+static inline int pte_special(pte_t pte)       { return !!(pte_val(pte) & _PAGE_SPECIAL); }
+static inline int pte_none(pte_t pte)          { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
+static inline pgprot_t pte_pgprot(pte_t pte)   { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
+
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+static inline bool pte_soft_dirty(pte_t pte)
+{
+       return !!(pte_val(pte) & _PAGE_SOFT_DIRTY);
+}
+static inline pte_t pte_mksoft_dirty(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_SOFT_DIRTY);
+}
+
+static inline pte_t pte_clear_soft_dirty(pte_t pte)
+{
+       return __pte(pte_val(pte) & ~_PAGE_SOFT_DIRTY);
+}
+#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
+
+#ifdef CONFIG_NUMA_BALANCING
+/*
+ * These work without NUMA balancing but the kernel does not care. See the
+ * comment in include/asm-generic/pgtable.h . On powerpc, this will only
+ * work for user pages and always return true for kernel pages.
+ */
+static inline int pte_protnone(pte_t pte)
+{
+       return (pte_val(pte) &
+               (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+static inline int pte_present(pte_t pte)
+{
+       return pte_val(pte) & _PAGE_PRESENT;
+}
+
+/* Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ *
+ * Even if PTEs can be unsigned long long, a PFN is always an unsigned
+ * long for now.
+ */
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
+{
+       return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
+                    pgprot_val(pgprot));
+}
+
+static inline unsigned long pte_pfn(pte_t pte)
+{
+       return pte_val(pte) >> PTE_RPN_SHIFT;
+}
+
+/* Generic modifiers for PTE bits */
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+       return __pte(pte_val(pte) & ~_PAGE_RW);
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+       return __pte(pte_val(pte) & ~_PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+       return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_RW);
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_SPECIAL);
+}
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+       return pte;
+}
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+       return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+/* This low level function performs the actual PTE insertion
+ * Setting the PTE depends on the MMU type and other factors. It's
+ * an horrible mess that I'm not going to try to clean up now but
+ * I'm keeping it in one place rather than spread around
+ */
+static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
+                               pte_t *ptep, pte_t pte, int percpu)
+{
+       /*
+        * Anything else just stores the PTE normally. That covers all 64-bit
+        * cases, and 32-bit non-hash with 32-bit PTEs.
+        */
+       *ptep = pte;
+}
+
+/*
+ * Macro to mark a page protection value as "uncacheable".
+ */
+
+#define _PAGE_CACHE_CTL        (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
+                        _PAGE_WRITETHRU)
+
+#define pgprot_noncached pgprot_noncached
+static inline pgprot_t pgprot_noncached(pgprot_t prot)
+{
+       return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+                       _PAGE_NO_CACHE | _PAGE_GUARDED);
+}
+
+#define pgprot_noncached_wc pgprot_noncached_wc
+static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
+{
+       return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+                       _PAGE_NO_CACHE);
+}
+
+#define pgprot_cached pgprot_cached
+static inline pgprot_t pgprot_cached(pgprot_t prot)
+{
+       return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+                       _PAGE_COHERENT);
+}
+
+#define pgprot_cached_wthru pgprot_cached_wthru
+static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
+{
+       return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+                       _PAGE_COHERENT | _PAGE_WRITETHRU);
+}
+
+#define pgprot_cached_noncoherent pgprot_cached_noncoherent
+static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
+{
+       return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
+}
+
+#define pgprot_writecombine pgprot_writecombine
+static inline pgprot_t pgprot_writecombine(pgprot_t prot)
+{
+       return pgprot_noncached_wc(prot);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
+                                  pmd_t *pmdp, unsigned long old_pmd);
+#else
+static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
+                                         unsigned long addr, pmd_t *pmdp,
+                                         unsigned long old_pmd)
+{
+       WARN(1, "%s called with THP disabled\n", __func__);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
new file mode 100644 (file)
index 0000000..b3a5bad
--- /dev/null
@@ -0,0 +1,300 @@
+#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
+#define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
+/*
+ * This file contains the functions and defines necessary to modify and use
+ * the ppc64 hashed page table.
+ */
+
+#include <asm/book3s/64/hash.h>
+#include <asm/barrier.h>
+
+/*
+ * The second half of the kernel virtual space is used for IO mappings,
+ * it's itself carved into the PIO region (ISA and PHB IO space) and
+ * the ioremap space
+ *
+ *  ISA_IO_BASE = KERN_IO_START, 64K reserved area
+ *  PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
+ * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
+ */
+#define KERN_IO_START  (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
+#define FULL_IO_SIZE   0x80000000ul
+#define  ISA_IO_BASE   (KERN_IO_START)
+#define  ISA_IO_END    (KERN_IO_START + 0x10000ul)
+#define  PHB_IO_BASE   (ISA_IO_END)
+#define  PHB_IO_END    (KERN_IO_START + FULL_IO_SIZE)
+#define IOREMAP_BASE   (PHB_IO_END)
+#define IOREMAP_END    (KERN_VIRT_START + KERN_VIRT_SIZE)
+
+#define vmemmap                        ((struct page *)VMEMMAP_BASE)
+
+/* Advertise special mapping type for AGP */
+#define HAVE_PAGE_AGP
+
+/* Advertise support for _PAGE_SPECIAL */
+#define __HAVE_ARCH_PTE_SPECIAL
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This is the default implementation of various PTE accessors, it's
+ * used in all cases except Book3S with 64K pages where we have a
+ * concept of sub-pages
+ */
+#ifndef __real_pte
+
+#ifdef CONFIG_STRICT_MM_TYPECHECKS
+#define __real_pte(e,p)                ((real_pte_t){(e)})
+#define __rpte_to_pte(r)       ((r).pte)
+#else
+#define __real_pte(e,p)                (e)
+#define __rpte_to_pte(r)       (__pte(r))
+#endif
+#define __rpte_to_hidx(r,index)        (pte_val(__rpte_to_pte(r)) >>_PAGE_F_GIX_SHIFT)
+
+#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift)       \
+       do {                                                             \
+               index = 0;                                               \
+               shift = mmu_psize_defs[psize].shift;                     \
+
+#define pte_iterate_hashed_end() } while(0)
+
+/*
+ * We expect this to be called only for user addresses or kernel virtual
+ * addresses other than the linear mapping.
+ */
+#define pte_pagesize_index(mm, addr, pte)      MMU_PAGE_4K
+
+#endif /* __real_pte */
+
+static inline void pmd_set(pmd_t *pmdp, unsigned long val)
+{
+       *pmdp = __pmd(val);
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+       *pmdp = __pmd(0);
+}
+
+#define pmd_none(pmd)          (!pmd_val(pmd))
+#define        pmd_present(pmd)        (!pmd_none(pmd))
+
+static inline void pud_set(pud_t *pudp, unsigned long val)
+{
+       *pudp = __pud(val);
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+       *pudp = __pud(0);
+}
+
+#define pud_none(pud)          (!pud_val(pud))
+#define pud_present(pud)       (pud_val(pud) != 0)
+
+extern struct page *pud_page(pud_t pud);
+extern struct page *pmd_page(pmd_t pmd);
+static inline pte_t pud_pte(pud_t pud)
+{
+       return __pte(pud_val(pud));
+}
+
+static inline pud_t pte_pud(pte_t pte)
+{
+       return __pud(pte_val(pte));
+}
+#define pud_write(pud)         pte_write(pud_pte(pud))
+#define pgd_write(pgd)         pte_write(pgd_pte(pgd))
+static inline void pgd_set(pgd_t *pgdp, unsigned long val)
+{
+       *pgdp = __pgd(val);
+}
+
+/*
+ * Find an entry in a page-table-directory.  We combine the address region
+ * (the high order N bits) and the pgd portion of the address.
+ */
+
+#define pgd_offset(mm, address)         ((mm)->pgd + pgd_index(address))
+
+#define pmd_offset(pudp,addr) \
+       (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr))
+
+#define pte_offset_kernel(dir,addr) \
+       (((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr))
+
+#define pte_offset_map(dir,addr)       pte_offset_kernel((dir), (addr))
+#define pte_unmap(pte)                 do { } while(0)
+
+/* to find an entry in a kernel page-table-directory */
+/* This now only contains the vmalloc pages */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+#define pte_ERROR(e) \
+       pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pmd_ERROR(e) \
+       pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pgd_ERROR(e) \
+       pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/* Encode and de-code a swap entry */
+#define MAX_SWAPFILES_CHECK() do { \
+       BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
+       /*                                                      \
+        * Don't have overlapping bits with _PAGE_HPTEFLAGS     \
+        * We filter HPTEFLAGS on set_pte.                      \
+        */                                                     \
+       BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
+       BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY);   \
+       } while (0)
+/*
+ * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT;
+ */
+#define SWP_TYPE_BITS 5
+#define __swp_type(x)          (((x).val >> _PAGE_BIT_SWAP_TYPE) \
+                               & ((1UL << SWP_TYPE_BITS) - 1))
+#define __swp_offset(x)                ((x).val >> PTE_RPN_SHIFT)
+#define __swp_entry(type, offset)      ((swp_entry_t) { \
+                                       ((type) << _PAGE_BIT_SWAP_TYPE) \
+                                       | ((offset) << PTE_RPN_SHIFT) })
+/*
+ * swp_entry_t must be independent of pte bits. We build a swp_entry_t from
+ * swap type and offset we get from swap and convert that to pte to find a
+ * matching pte in linux page table.
+ * Clear bits not found in swap entries here.
+ */
+#define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE })
+#define __swp_entry_to_pte(x)  __pte((x).val | _PAGE_PTE)
+
+#ifdef CONFIG_MEM_SOFT_DIRTY
+#define _PAGE_SWP_SOFT_DIRTY   (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE))
+#else
+#define _PAGE_SWP_SOFT_DIRTY   0UL
+#endif /* CONFIG_MEM_SOFT_DIRTY */
+
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY);
+}
+static inline bool pte_swp_soft_dirty(pte_t pte)
+{
+       return !!(pte_val(pte) & _PAGE_SWP_SOFT_DIRTY);
+}
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+       return __pte(pte_val(pte) & ~_PAGE_SWP_SOFT_DIRTY);
+}
+#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
+
+void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
+void pgtable_cache_init(void);
+
+struct page *realmode_pfn_to_page(unsigned long pfn);
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
+extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
+extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
+extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+                      pmd_t *pmdp, pmd_t pmd);
+extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
+                                pmd_t *pmd);
+extern int has_transparent_hugepage(void);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+
+static inline pte_t pmd_pte(pmd_t pmd)
+{
+       return __pte(pmd_val(pmd));
+}
+
+static inline pmd_t pte_pmd(pte_t pte)
+{
+       return __pmd(pte_val(pte));
+}
+
+static inline pte_t *pmdp_ptep(pmd_t *pmd)
+{
+       return (pte_t *)pmd;
+}
+
+#define pmd_pfn(pmd)           pte_pfn(pmd_pte(pmd))
+#define pmd_dirty(pmd)         pte_dirty(pmd_pte(pmd))
+#define pmd_young(pmd)         pte_young(pmd_pte(pmd))
+#define pmd_mkold(pmd)         pte_pmd(pte_mkold(pmd_pte(pmd)))
+#define pmd_wrprotect(pmd)     pte_pmd(pte_wrprotect(pmd_pte(pmd)))
+#define pmd_mkdirty(pmd)       pte_pmd(pte_mkdirty(pmd_pte(pmd)))
+#define pmd_mkyoung(pmd)       pte_pmd(pte_mkyoung(pmd_pte(pmd)))
+#define pmd_mkwrite(pmd)       pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+#define pmd_soft_dirty(pmd)    pte_soft_dirty(pmd_pte(pmd))
+#define pmd_mksoft_dirty(pmd)  pte_pmd(pte_mksoft_dirty(pmd_pte(pmd)))
+#define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd)))
+#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
+
+#ifdef CONFIG_NUMA_BALANCING
+static inline int pmd_protnone(pmd_t pmd)
+{
+       return pte_protnone(pmd_pte(pmd));
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write(pmd)         pte_write(pmd_pte(pmd))
+
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+       return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_THP_HUGE));
+}
+
+#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+extern int pmdp_set_access_flags(struct vm_area_struct *vma,
+                                unsigned long address, pmd_t *pmdp,
+                                pmd_t entry, int dirty);
+
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+                                    unsigned long address, pmd_t *pmdp);
+#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
+extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
+                                 unsigned long address, pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+                                    unsigned long addr, pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
+extern void pmdp_splitting_flush(struct vm_area_struct *vma,
+                                unsigned long address, pmd_t *pmdp);
+
+extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+                                unsigned long address, pmd_t *pmdp);
+#define pmdp_collapse_flush pmdp_collapse_flush
+
+#define __HAVE_ARCH_PGTABLE_DEPOSIT
+extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+                                      pgtable_t pgtable);
+#define __HAVE_ARCH_PGTABLE_WITHDRAW
+extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMDP_INVALIDATE
+extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+                           pmd_t *pmdp);
+
+#define pmd_move_must_withdraw pmd_move_must_withdraw
+struct spinlock;
+static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
+                                        struct spinlock *old_pmd_ptl)
+{
+       /*
+        * Archs like ppc64 use pgtable to store per pmd
+        * specific information. So when we switch the pmd,
+        * we should also withdraw and deposit the pgtable
+        */
+       return true;
+}
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h
new file mode 100644 (file)
index 0000000..8b0f4a2
--- /dev/null
@@ -0,0 +1,29 @@
+#ifndef _ASM_POWERPC_BOOK3S_PGTABLE_H
+#define _ASM_POWERPC_BOOK3S_PGTABLE_H
+
+#ifdef CONFIG_PPC64
+#include <asm/book3s/64/pgtable.h>
+#else
+#include <asm/book3s/32/pgtable.h>
+#endif
+
+#define FIRST_USER_ADDRESS     0UL
+#ifndef __ASSEMBLY__
+/* Insert a PTE, top-level function is out of line. It uses an inline
+ * low level function in the respective pgtable-* files
+ */
+extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+                      pte_t pte);
+
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
+                                pte_t *ptep, pte_t entry, int dirty);
+
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+                                    unsigned long size, pgprot_t vma_prot);
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+
+#endif /* __ASSEMBLY__ */
+#endif
index ad6263c..d1a8d93 100644 (file)
@@ -18,12 +18,12 @@ __xchg_u32(volatile void *p, unsigned long val)
        unsigned long prev;
 
        __asm__ __volatile__(
-       PPC_RELEASE_BARRIER
+       PPC_ATOMIC_ENTRY_BARRIER
 "1:    lwarx   %0,0,%2 \n"
        PPC405_ERR77(0,%2)
 "      stwcx.  %3,0,%2 \n\
        bne-    1b"
-       PPC_ACQUIRE_BARRIER
+       PPC_ATOMIC_EXIT_BARRIER
        : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
        : "r" (p), "r" (val)
        : "cc", "memory");
@@ -61,12 +61,12 @@ __xchg_u64(volatile void *p, unsigned long val)
        unsigned long prev;
 
        __asm__ __volatile__(
-       PPC_RELEASE_BARRIER
+       PPC_ATOMIC_ENTRY_BARRIER
 "1:    ldarx   %0,0,%2 \n"
        PPC405_ERR77(0,%2)
 "      stdcx.  %3,0,%2 \n\
        bne-    1b"
-       PPC_ACQUIRE_BARRIER
+       PPC_ATOMIC_EXIT_BARRIER
        : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
        : "r" (p), "r" (val)
        : "cc", "memory");
@@ -151,14 +151,14 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
        unsigned int prev;
 
        __asm__ __volatile__ (
-       PPC_RELEASE_BARRIER
+       PPC_ATOMIC_ENTRY_BARRIER
 "1:    lwarx   %0,0,%2         # __cmpxchg_u32\n\
        cmpw    0,%0,%3\n\
        bne-    2f\n"
        PPC405_ERR77(0,%2)
 "      stwcx.  %4,0,%2\n\
        bne-    1b"
-       PPC_ACQUIRE_BARRIER
+       PPC_ATOMIC_EXIT_BARRIER
        "\n\
 2:"
        : "=&r" (prev), "+m" (*p)
@@ -197,13 +197,13 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
        unsigned long prev;
 
        __asm__ __volatile__ (
-       PPC_RELEASE_BARRIER
+       PPC_ATOMIC_ENTRY_BARRIER
 "1:    ldarx   %0,0,%2         # __cmpxchg_u64\n\
        cmpd    0,%0,%3\n\
        bne-    2f\n\
        stdcx.  %4,0,%2\n\
        bne-    1b"
-       PPC_ACQUIRE_BARRIER
+       PPC_ATOMIC_EXIT_BARRIER
        "\n\
 2:"
        : "=&r" (prev), "+m" (*p)
index 4398a6c..2c5c5b4 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/of.h>
+#include <soc/fsl/qe/qe.h>
 
 /*
  * SPI Parameter RAM common to QE and CPM.
@@ -155,49 +156,6 @@ typedef struct cpm_buf_desc {
  */
 #define BD_I2C_START           (0x0400)
 
-int cpm_muram_init(void);
-
-#if defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE)
-unsigned long cpm_muram_alloc(unsigned long size, unsigned long align);
-int cpm_muram_free(unsigned long offset);
-unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size);
-void __iomem *cpm_muram_addr(unsigned long offset);
-unsigned long cpm_muram_offset(void __iomem *addr);
-dma_addr_t cpm_muram_dma(void __iomem *addr);
-#else
-static inline unsigned long cpm_muram_alloc(unsigned long size,
-                                           unsigned long align)
-{
-       return -ENOSYS;
-}
-
-static inline int cpm_muram_free(unsigned long offset)
-{
-       return -ENOSYS;
-}
-
-static inline unsigned long cpm_muram_alloc_fixed(unsigned long offset,
-                                                 unsigned long size)
-{
-       return -ENOSYS;
-}
-
-static inline void __iomem *cpm_muram_addr(unsigned long offset)
-{
-       return NULL;
-}
-
-static inline unsigned long cpm_muram_offset(void __iomem *addr)
-{
-       return -ENOSYS;
-}
-
-static inline dma_addr_t cpm_muram_dma(void __iomem *addr)
-{
-       return 0;
-}
-#endif /* defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE) */
-
 #ifdef CONFIG_CPM
 int cpm_command(u32 command, u8 opcode);
 #else
index 77f52b2..93ae809 100644 (file)
@@ -129,15 +129,6 @@ BEGIN_FTR_SECTION_NESTED(941)                                              \
        mtspr   SPRN_PPR,ra;                                            \
 END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,941)
 
-/*
- * Increase the priority on systems where PPR save/restore is not
- * implemented/ supported.
- */
-#define HMT_MEDIUM_PPR_DISCARD                                         \
-BEGIN_FTR_SECTION_NESTED(942)                                          \
-       HMT_MEDIUM;                                                     \
-END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,0,942)  /*non P7*/              
-
 /*
  * Get an SPR into a register if the CPU has the given feature
  */
@@ -263,17 +254,6 @@ do_kvm_##n:                                                                \
 #define KVM_HANDLER_SKIP(area, h, n)
 #endif
 
-#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
-#define KVMTEST_PR(n)                  __KVMTEST(n)
-#define KVM_HANDLER_PR(area, h, n)     __KVM_HANDLER(area, h, n)
-#define KVM_HANDLER_PR_SKIP(area, h, n)        __KVM_HANDLER_SKIP(area, h, n)
-
-#else
-#define KVMTEST_PR(n)
-#define KVM_HANDLER_PR(area, h, n)
-#define KVM_HANDLER_PR_SKIP(area, h, n)
-#endif
-
 #define NOTEST(n)
 
 /*
@@ -353,27 +333,25 @@ do_kvm_##n:                                                               \
 /*
  * Exception vectors.
  */
-#define STD_EXCEPTION_PSERIES(loc, vec, label)         \
-       . = loc;                                        \
+#define STD_EXCEPTION_PSERIES(vec, label)              \
+       . = vec;                                        \
        .globl label##_pSeries;                         \
 label##_pSeries:                                       \
-       HMT_MEDIUM_PPR_DISCARD;                         \
        SET_SCRATCH0(r13);              /* save r13 */          \
        EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common,    \
-                                EXC_STD, KVMTEST_PR, vec)
+                                EXC_STD, KVMTEST, vec)
 
 /* Version of above for when we have to branch out-of-line */
 #define STD_EXCEPTION_PSERIES_OOL(vec, label)                  \
        .globl label##_pSeries;                                 \
 label##_pSeries:                                               \
-       EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec);        \
+       EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, vec);   \
        EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_STD)
 
 #define STD_EXCEPTION_HV(loc, vec, label)              \
        . = loc;                                        \
        .globl label##_hv;                              \
 label##_hv:                                            \
-       HMT_MEDIUM_PPR_DISCARD;                         \
        SET_SCRATCH0(r13);      /* save r13 */                  \
        EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common,    \
                                 EXC_HV, KVMTEST, vec)
@@ -389,7 +367,6 @@ label##_hv:                                         \
        . = loc;                                        \
        .globl label##_relon_pSeries;                   \
 label##_relon_pSeries:                                 \
-       HMT_MEDIUM_PPR_DISCARD;                         \
        /* No guest interrupts come through here */     \
        SET_SCRATCH0(r13);              /* save r13 */  \
        EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
@@ -405,7 +382,6 @@ label##_relon_pSeries:                                              \
        . = loc;                                        \
        .globl label##_relon_hv;                        \
 label##_relon_hv:                                      \
-       HMT_MEDIUM_PPR_DISCARD;                         \
        /* No guest interrupts come through here */     \
        SET_SCRATCH0(r13);      /* save r13 */          \
        EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
@@ -436,17 +412,13 @@ label##_relon_hv:                                         \
 #define _SOFTEN_TEST(h, vec)   __SOFTEN_TEST(h, vec)
 
 #define SOFTEN_TEST_PR(vec)                                            \
-       KVMTEST_PR(vec);                                                \
+       KVMTEST(vec);                                                   \
        _SOFTEN_TEST(EXC_STD, vec)
 
 #define SOFTEN_TEST_HV(vec)                                            \
        KVMTEST(vec);                                                   \
        _SOFTEN_TEST(EXC_HV, vec)
 
-#define SOFTEN_TEST_HV_201(vec)                                                \
-       KVMTEST(vec);                                                   \
-       _SOFTEN_TEST(EXC_STD, vec)
-
 #define SOFTEN_NOTEST_PR(vec)          _SOFTEN_TEST(EXC_STD, vec)
 #define SOFTEN_NOTEST_HV(vec)          _SOFTEN_TEST(EXC_HV, vec)
 
@@ -463,7 +435,6 @@ label##_relon_hv:                                           \
        . = loc;                                                        \
        .globl label##_pSeries;                                         \
 label##_pSeries:                                                       \
-       HMT_MEDIUM_PPR_DISCARD;                                         \
        _MASKABLE_EXCEPTION_PSERIES(vec, label,                         \
                                    EXC_STD, SOFTEN_TEST_PR)
 
@@ -481,7 +452,6 @@ label##_hv:                                                         \
        EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV);
 
 #define __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra)       \
-       HMT_MEDIUM_PPR_DISCARD;                                         \
        SET_SCRATCH0(r13);    /* save r13 */                            \
        EXCEPTION_PROLOG_0(PACA_EXGEN);                                 \
        __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec);           \
index e05808a..b062924 100644 (file)
 #define FW_FEATURE_VPHN                ASM_CONST(0x0000000004000000)
 #define FW_FEATURE_XCMO                ASM_CONST(0x0000000008000000)
 #define FW_FEATURE_OPAL                ASM_CONST(0x0000000010000000)
-#define FW_FEATURE_OPALv2      ASM_CONST(0x0000000020000000)
 #define FW_FEATURE_SET_MODE    ASM_CONST(0x0000000040000000)
 #define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000)
 #define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
 #define FW_FEATURE_PRRN                ASM_CONST(0x0000000200000000)
-#define FW_FEATURE_OPALv3      ASM_CONST(0x0000000400000000)
 
 #ifndef __ASSEMBLY__
 
@@ -70,8 +68,7 @@ enum {
                FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
                FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN,
        FW_FEATURE_PSERIES_ALWAYS = 0,
-       FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2 |
-               FW_FEATURE_OPALv3,
+       FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL,
        FW_FEATURE_POWERNV_ALWAYS = 0,
        FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
        FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
diff --git a/arch/powerpc/include/asm/immap_qe.h b/arch/powerpc/include/asm/immap_qe.h
deleted file mode 100644 (file)
index bedbff8..0000000
+++ /dev/null
@@ -1,491 +0,0 @@
-/*
- * QUICC Engine (QE) Internal Memory Map.
- * The Internal Memory Map for devices with QE on them. This
- * is the superset of all QE devices (8360, etc.).
-
- * Copyright (C) 2006. Freescale Semiconductor, Inc. All rights reserved.
- *
- * Authors:    Shlomi Gridish <gridish@freescale.com>
- *             Li Yang <leoli@freescale.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-#ifndef _ASM_POWERPC_IMMAP_QE_H
-#define _ASM_POWERPC_IMMAP_QE_H
-#ifdef __KERNEL__
-
-#include <linux/kernel.h>
-#include <asm/io.h>
-
-#define QE_IMMAP_SIZE  (1024 * 1024)   /* 1MB from 1MB+IMMR */
-
-/* QE I-RAM */
-struct qe_iram {
-       __be32  iadd;           /* I-RAM Address Register */
-       __be32  idata;          /* I-RAM Data Register */
-       u8      res0[0x04];
-       __be32  iready;         /* I-RAM Ready Register */
-       u8      res1[0x70];
-} __attribute__ ((packed));
-
-/* QE Interrupt Controller */
-struct qe_ic_regs {
-       __be32  qicr;
-       __be32  qivec;
-       __be32  qripnr;
-       __be32  qipnr;
-       __be32  qipxcc;
-       __be32  qipycc;
-       __be32  qipwcc;
-       __be32  qipzcc;
-       __be32  qimr;
-       __be32  qrimr;
-       __be32  qicnr;
-       u8      res0[0x4];
-       __be32  qiprta;
-       __be32  qiprtb;
-       u8      res1[0x4];
-       __be32  qricr;
-       u8      res2[0x20];
-       __be32  qhivec;
-       u8      res3[0x1C];
-} __attribute__ ((packed));
-
-/* Communications Processor */
-struct cp_qe {
-       __be32  cecr;           /* QE command register */
-       __be32  ceccr;          /* QE controller configuration register */
-       __be32  cecdr;          /* QE command data register */
-       u8      res0[0xA];
-       __be16  ceter;          /* QE timer event register */
-       u8      res1[0x2];
-       __be16  cetmr;          /* QE timers mask register */
-       __be32  cetscr;         /* QE time-stamp timer control register */
-       __be32  cetsr1;         /* QE time-stamp register 1 */
-       __be32  cetsr2;         /* QE time-stamp register 2 */
-       u8      res2[0x8];
-       __be32  cevter;         /* QE virtual tasks event register */
-       __be32  cevtmr;         /* QE virtual tasks mask register */
-       __be16  cercr;          /* QE RAM control register */
-       u8      res3[0x2];
-       u8      res4[0x24];
-       __be16  ceexe1;         /* QE external request 1 event register */
-       u8      res5[0x2];
-       __be16  ceexm1;         /* QE external request 1 mask register */
-       u8      res6[0x2];
-       __be16  ceexe2;         /* QE external request 2 event register */
-       u8      res7[0x2];
-       __be16  ceexm2;         /* QE external request 2 mask register */
-       u8      res8[0x2];
-       __be16  ceexe3;         /* QE external request 3 event register */
-       u8      res9[0x2];
-       __be16  ceexm3;         /* QE external request 3 mask register */
-       u8      res10[0x2];
-       __be16  ceexe4;         /* QE external request 4 event register */
-       u8      res11[0x2];
-       __be16  ceexm4;         /* QE external request 4 mask register */
-       u8      res12[0x3A];
-       __be32  ceurnr;         /* QE microcode revision number register */
-       u8      res13[0x244];
-} __attribute__ ((packed));
-
-/* QE Multiplexer */
-struct qe_mux {
-       __be32  cmxgcr;         /* CMX general clock route register */
-       __be32  cmxsi1cr_l;     /* CMX SI1 clock route low register */
-       __be32  cmxsi1cr_h;     /* CMX SI1 clock route high register */
-       __be32  cmxsi1syr;      /* CMX SI1 SYNC route register */
-       __be32  cmxucr[4];      /* CMX UCCx clock route registers */
-       __be32  cmxupcr;        /* CMX UPC clock route register */
-       u8      res0[0x1C];
-} __attribute__ ((packed));
-
-/* QE Timers */
-struct qe_timers {
-       u8      gtcfr1;         /* Timer 1 and Timer 2 global config register*/
-       u8      res0[0x3];
-       u8      gtcfr2;         /* Timer 3 and timer 4 global config register*/
-       u8      res1[0xB];
-       __be16  gtmdr1;         /* Timer 1 mode register */
-       __be16  gtmdr2;         /* Timer 2 mode register */
-       __be16  gtrfr1;         /* Timer 1 reference register */
-       __be16  gtrfr2;         /* Timer 2 reference register */
-       __be16  gtcpr1;         /* Timer 1 capture register */
-       __be16  gtcpr2;         /* Timer 2 capture register */
-       __be16  gtcnr1;         /* Timer 1 counter */
-       __be16  gtcnr2;         /* Timer 2 counter */
-       __be16  gtmdr3;         /* Timer 3 mode register */
-       __be16  gtmdr4;         /* Timer 4 mode register */
-       __be16  gtrfr3;         /* Timer 3 reference register */
-       __be16  gtrfr4;         /* Timer 4 reference register */
-       __be16  gtcpr3;         /* Timer 3 capture register */
-       __be16  gtcpr4;         /* Timer 4 capture register */
-       __be16  gtcnr3;         /* Timer 3 counter */
-       __be16  gtcnr4;         /* Timer 4 counter */
-       __be16  gtevr1;         /* Timer 1 event register */
-       __be16  gtevr2;         /* Timer 2 event register */
-       __be16  gtevr3;         /* Timer 3 event register */
-       __be16  gtevr4;         /* Timer 4 event register */
-       __be16  gtps;           /* Timer 1 prescale register */
-       u8 res2[0x46];
-} __attribute__ ((packed));
-
-/* BRG */
-struct qe_brg {
-       __be32  brgc[16];       /* BRG configuration registers */
-       u8      res0[0x40];
-} __attribute__ ((packed));
-
-/* SPI */
-struct spi {
-       u8      res0[0x20];
-       __be32  spmode;         /* SPI mode register */
-       u8      res1[0x2];
-       u8      spie;           /* SPI event register */
-       u8      res2[0x1];
-       u8      res3[0x2];
-       u8      spim;           /* SPI mask register */
-       u8      res4[0x1];
-       u8      res5[0x1];
-       u8      spcom;          /* SPI command register */
-       u8      res6[0x2];
-       __be32  spitd;          /* SPI transmit data register (cpu mode) */
-       __be32  spird;          /* SPI receive data register (cpu mode) */
-       u8      res7[0x8];
-} __attribute__ ((packed));
-
-/* SI */
-struct si1 {
-       __be16  siamr1;         /* SI1 TDMA mode register */
-       __be16  sibmr1;         /* SI1 TDMB mode register */
-       __be16  sicmr1;         /* SI1 TDMC mode register */
-       __be16  sidmr1;         /* SI1 TDMD mode register */
-       u8      siglmr1_h;      /* SI1 global mode register high */
-       u8      res0[0x1];
-       u8      sicmdr1_h;      /* SI1 command register high */
-       u8      res2[0x1];
-       u8      sistr1_h;       /* SI1 status register high */
-       u8      res3[0x1];
-       __be16  sirsr1_h;       /* SI1 RAM shadow address register high */
-       u8      sitarc1;        /* SI1 RAM counter Tx TDMA */
-       u8      sitbrc1;        /* SI1 RAM counter Tx TDMB */
-       u8      sitcrc1;        /* SI1 RAM counter Tx TDMC */
-       u8      sitdrc1;        /* SI1 RAM counter Tx TDMD */
-       u8      sirarc1;        /* SI1 RAM counter Rx TDMA */
-       u8      sirbrc1;        /* SI1 RAM counter Rx TDMB */
-       u8      sircrc1;        /* SI1 RAM counter Rx TDMC */
-       u8      sirdrc1;        /* SI1 RAM counter Rx TDMD */
-       u8      res4[0x8];
-       __be16  siemr1;         /* SI1 TDME mode register 16 bits */
-       __be16  sifmr1;         /* SI1 TDMF mode register 16 bits */
-       __be16  sigmr1;         /* SI1 TDMG mode register 16 bits */
-       __be16  sihmr1;         /* SI1 TDMH mode register 16 bits */
-       u8      siglmg1_l;      /* SI1 global mode register low 8 bits */
-       u8      res5[0x1];
-       u8      sicmdr1_l;      /* SI1 command register low 8 bits */
-       u8      res6[0x1];
-       u8      sistr1_l;       /* SI1 status register low 8 bits */
-       u8      res7[0x1];
-       __be16  sirsr1_l;       /* SI1 RAM shadow address register low 16 bits*/
-       u8      siterc1;        /* SI1 RAM counter Tx TDME 8 bits */
-       u8      sitfrc1;        /* SI1 RAM counter Tx TDMF 8 bits */
-       u8      sitgrc1;        /* SI1 RAM counter Tx TDMG 8 bits */
-       u8      sithrc1;        /* SI1 RAM counter Tx TDMH 8 bits */
-       u8      sirerc1;        /* SI1 RAM counter Rx TDME 8 bits */
-       u8      sirfrc1;        /* SI1 RAM counter Rx TDMF 8 bits */
-       u8      sirgrc1;        /* SI1 RAM counter Rx TDMG 8 bits */
-       u8      sirhrc1;        /* SI1 RAM counter Rx TDMH 8 bits */
-       u8      res8[0x8];
-       __be32  siml1;          /* SI1 multiframe limit register */
-       u8      siedm1;         /* SI1 extended diagnostic mode register */
-       u8      res9[0xBB];
-} __attribute__ ((packed));
-
-/* SI Routing Tables */
-struct sir {
-       u8      tx[0x400];
-       u8      rx[0x400];
-       u8      res0[0x800];
-} __attribute__ ((packed));
-
-/* USB Controller */
-struct qe_usb_ctlr {
-       u8      usb_usmod;
-       u8      usb_usadr;
-       u8      usb_uscom;
-       u8      res1[1];
-       __be16  usb_usep[4];
-       u8      res2[4];
-       __be16  usb_usber;
-       u8      res3[2];
-       __be16  usb_usbmr;
-       u8      res4[1];
-       u8      usb_usbs;
-       __be16  usb_ussft;
-       u8      res5[2];
-       __be16  usb_usfrn;
-       u8      res6[0x22];
-} __attribute__ ((packed));
-
-/* MCC */
-struct qe_mcc {
-       __be32  mcce;           /* MCC event register */
-       __be32  mccm;           /* MCC mask register */
-       __be32  mccf;           /* MCC configuration register */
-       __be32  merl;           /* MCC emergency request level register */
-       u8      res0[0xF0];
-} __attribute__ ((packed));
-
-/* QE UCC Slow */
-struct ucc_slow {
-       __be32  gumr_l;         /* UCCx general mode register (low) */
-       __be32  gumr_h;         /* UCCx general mode register (high) */
-       __be16  upsmr;          /* UCCx protocol-specific mode register */
-       u8      res0[0x2];
-       __be16  utodr;          /* UCCx transmit on demand register */
-       __be16  udsr;           /* UCCx data synchronization register */
-       __be16  ucce;           /* UCCx event register */
-       u8      res1[0x2];
-       __be16  uccm;           /* UCCx mask register */
-       u8      res2[0x1];
-       u8      uccs;           /* UCCx status register */
-       u8      res3[0x24];
-       __be16  utpt;
-       u8      res4[0x52];
-       u8      guemr;          /* UCC general extended mode register */
-} __attribute__ ((packed));
-
-/* QE UCC Fast */
-struct ucc_fast {
-       __be32  gumr;           /* UCCx general mode register */
-       __be32  upsmr;          /* UCCx protocol-specific mode register */
-       __be16  utodr;          /* UCCx transmit on demand register */
-       u8      res0[0x2];
-       __be16  udsr;           /* UCCx data synchronization register */
-       u8      res1[0x2];
-       __be32  ucce;           /* UCCx event register */
-       __be32  uccm;           /* UCCx mask register */
-       u8      uccs;           /* UCCx status register */
-       u8      res2[0x7];
-       __be32  urfb;           /* UCC receive FIFO base */
-       __be16  urfs;           /* UCC receive FIFO size */
-       u8      res3[0x2];
-       __be16  urfet;          /* UCC receive FIFO emergency threshold */
-       __be16  urfset;         /* UCC receive FIFO special emergency
-                                  threshold */
-       __be32  utfb;           /* UCC transmit FIFO base */
-       __be16  utfs;           /* UCC transmit FIFO size */
-       u8      res4[0x2];
-       __be16  utfet;          /* UCC transmit FIFO emergency threshold */
-       u8      res5[0x2];
-       __be16  utftt;          /* UCC transmit FIFO transmit threshold */
-       u8      res6[0x2];
-       __be16  utpt;           /* UCC transmit polling timer */
-       u8      res7[0x2];
-       __be32  urtry;          /* UCC retry counter register */
-       u8      res8[0x4C];
-       u8      guemr;          /* UCC general extended mode register */
-} __attribute__ ((packed));
-
-struct ucc {
-       union {
-               struct  ucc_slow slow;
-               struct  ucc_fast fast;
-               u8      res[0x200];     /* UCC blocks are 512 bytes each */
-       };
-} __attribute__ ((packed));
-
-/* MultiPHY UTOPIA POS Controllers (UPC) */
-struct upc {
-       __be32  upgcr;          /* UTOPIA/POS general configuration register */
-       __be32  uplpa;          /* UTOPIA/POS last PHY address */
-       __be32  uphec;          /* ATM HEC register */
-       __be32  upuc;           /* UTOPIA/POS UCC configuration */
-       __be32  updc1;          /* UTOPIA/POS device 1 configuration */
-       __be32  updc2;          /* UTOPIA/POS device 2 configuration */
-       __be32  updc3;          /* UTOPIA/POS device 3 configuration */
-       __be32  updc4;          /* UTOPIA/POS device 4 configuration */
-       __be32  upstpa;         /* UTOPIA/POS STPA threshold */
-       u8      res0[0xC];
-       __be32  updrs1_h;       /* UTOPIA/POS device 1 rate select */
-       __be32  updrs1_l;       /* UTOPIA/POS device 1 rate select */
-       __be32  updrs2_h;       /* UTOPIA/POS device 2 rate select */
-       __be32  updrs2_l;       /* UTOPIA/POS device 2 rate select */
-       __be32  updrs3_h;       /* UTOPIA/POS device 3 rate select */
-       __be32  updrs3_l;       /* UTOPIA/POS device 3 rate select */
-       __be32  updrs4_h;       /* UTOPIA/POS device 4 rate select */
-       __be32  updrs4_l;       /* UTOPIA/POS device 4 rate select */
-       __be32  updrp1;         /* UTOPIA/POS device 1 receive priority low */
-       __be32  updrp2;         /* UTOPIA/POS device 2 receive priority low */
-       __be32  updrp3;         /* UTOPIA/POS device 3 receive priority low */
-       __be32  updrp4;         /* UTOPIA/POS device 4 receive priority low */
-       __be32  upde1;          /* UTOPIA/POS device 1 event */
-       __be32  upde2;          /* UTOPIA/POS device 2 event */
-       __be32  upde3;          /* UTOPIA/POS device 3 event */
-       __be32  upde4;          /* UTOPIA/POS device 4 event */
-       __be16  uprp1;
-       __be16  uprp2;
-       __be16  uprp3;
-       __be16  uprp4;
-       u8      res1[0x8];
-       __be16  uptirr1_0;      /* Device 1 transmit internal rate 0 */
-       __be16  uptirr1_1;      /* Device 1 transmit internal rate 1 */
-       __be16  uptirr1_2;      /* Device 1 transmit internal rate 2 */
-       __be16  uptirr1_3;      /* Device 1 transmit internal rate 3 */
-       __be16  uptirr2_0;      /* Device 2 transmit internal rate 0 */
-       __be16  uptirr2_1;      /* Device 2 transmit internal rate 1 */
-       __be16  uptirr2_2;      /* Device 2 transmit internal rate 2 */
-       __be16  uptirr2_3;      /* Device 2 transmit internal rate 3 */
-       __be16  uptirr3_0;      /* Device 3 transmit internal rate 0 */
-       __be16  uptirr3_1;      /* Device 3 transmit internal rate 1 */
-       __be16  uptirr3_2;      /* Device 3 transmit internal rate 2 */
-       __be16  uptirr3_3;      /* Device 3 transmit internal rate 3 */
-       __be16  uptirr4_0;      /* Device 4 transmit internal rate 0 */
-       __be16  uptirr4_1;      /* Device 4 transmit internal rate 1 */
-       __be16  uptirr4_2;      /* Device 4 transmit internal rate 2 */
-       __be16  uptirr4_3;      /* Device 4 transmit internal rate 3 */
-       __be32  uper1;          /* Device 1 port enable register */
-       __be32  uper2;          /* Device 2 port enable register */
-       __be32  uper3;          /* Device 3 port enable register */
-       __be32  uper4;          /* Device 4 port enable register */
-       u8      res2[0x150];
-} __attribute__ ((packed));
-
-/* SDMA */
-struct sdma {
-       __be32  sdsr;           /* Serial DMA status register */
-       __be32  sdmr;           /* Serial DMA mode register */
-       __be32  sdtr1;          /* SDMA system bus threshold register */
-       __be32  sdtr2;          /* SDMA secondary bus threshold register */
-       __be32  sdhy1;          /* SDMA system bus hysteresis register */
-       __be32  sdhy2;          /* SDMA secondary bus hysteresis register */
-       __be32  sdta1;          /* SDMA system bus address register */
-       __be32  sdta2;          /* SDMA secondary bus address register */
-       __be32  sdtm1;          /* SDMA system bus MSNUM register */
-       __be32  sdtm2;          /* SDMA secondary bus MSNUM register */
-       u8      res0[0x10];
-       __be32  sdaqr;          /* SDMA address bus qualify register */
-       __be32  sdaqmr;         /* SDMA address bus qualify mask register */
-       u8      res1[0x4];
-       __be32  sdebcr;         /* SDMA CAM entries base register */
-       u8      res2[0x38];
-} __attribute__ ((packed));
-
-/* Debug Space */
-struct dbg {
-       __be32  bpdcr;          /* Breakpoint debug command register */
-       __be32  bpdsr;          /* Breakpoint debug status register */
-       __be32  bpdmr;          /* Breakpoint debug mask register */
-       __be32  bprmrr0;        /* Breakpoint request mode risc register 0 */
-       __be32  bprmrr1;        /* Breakpoint request mode risc register 1 */
-       u8      res0[0x8];
-       __be32  bprmtr0;        /* Breakpoint request mode trb register 0 */
-       __be32  bprmtr1;        /* Breakpoint request mode trb register 1 */
-       u8      res1[0x8];
-       __be32  bprmir;         /* Breakpoint request mode immediate register */
-       __be32  bprmsr;         /* Breakpoint request mode serial register */
-       __be32  bpemr;          /* Breakpoint exit mode register */
-       u8      res2[0x48];
-} __attribute__ ((packed));
-
-/*
- * RISC Special Registers (Trap and Breakpoint).  These are described in
- * the QE Developer's Handbook.
- */
-struct rsp {
-       __be32 tibcr[16];       /* Trap/instruction breakpoint control regs */
-       u8 res0[64];
-       __be32 ibcr0;
-       __be32 ibs0;
-       __be32 ibcnr0;
-       u8 res1[4];
-       __be32 ibcr1;
-       __be32 ibs1;
-       __be32 ibcnr1;
-       __be32 npcr;
-       __be32 dbcr;
-       __be32 dbar;
-       __be32 dbamr;
-       __be32 dbsr;
-       __be32 dbcnr;
-       u8 res2[12];
-       __be32 dbdr_h;
-       __be32 dbdr_l;
-       __be32 dbdmr_h;
-       __be32 dbdmr_l;
-       __be32 bsr;
-       __be32 bor;
-       __be32 bior;
-       u8 res3[4];
-       __be32 iatr[4];
-       __be32 eccr;            /* Exception control configuration register */
-       __be32 eicr;
-       u8 res4[0x100-0xf8];
-} __attribute__ ((packed));
-
-struct qe_immap {
-       struct qe_iram          iram;           /* I-RAM */
-       struct qe_ic_regs       ic;             /* Interrupt Controller */
-       struct cp_qe            cp;             /* Communications Processor */
-       struct qe_mux           qmx;            /* QE Multiplexer */
-       struct qe_timers        qet;            /* QE Timers */
-       struct spi              spi[0x2];       /* spi */
-       struct qe_mcc           mcc;            /* mcc */
-       struct qe_brg           brg;            /* brg */
-       struct qe_usb_ctlr      usb;            /* USB */
-       struct si1              si1;            /* SI */
-       u8                      res11[0x800];
-       struct sir              sir;            /* SI Routing Tables */
-       struct ucc              ucc1;           /* ucc1 */
-       struct ucc              ucc3;           /* ucc3 */
-       struct ucc              ucc5;           /* ucc5 */
-       struct ucc              ucc7;           /* ucc7 */
-       u8                      res12[0x600];
-       struct upc              upc1;           /* MultiPHY UTOPIA POS Ctrlr 1*/
-       struct ucc              ucc2;           /* ucc2 */
-       struct ucc              ucc4;           /* ucc4 */
-       struct ucc              ucc6;           /* ucc6 */
-       struct ucc              ucc8;           /* ucc8 */
-       u8                      res13[0x600];
-       struct upc              upc2;           /* MultiPHY UTOPIA POS Ctrlr 2*/
-       struct sdma             sdma;           /* SDMA */
-       struct dbg              dbg;            /* 0x104080 - 0x1040FF
-                                                  Debug Space */
-       struct rsp              rsp[0x2];       /* 0x104100 - 0x1042FF
-                                                  RISC Special Registers
-                                                  (Trap and Breakpoint) */
-       u8                      res14[0x300];   /* 0x104300 - 0x1045FF */
-       u8                      res15[0x3A00];  /* 0x104600 - 0x107FFF */
-       u8                      res16[0x8000];  /* 0x108000 - 0x110000 */
-       u8                      muram[0xC000];  /* 0x110000 - 0x11C000
-                                                  Multi-user RAM */
-       u8                      res17[0x24000]; /* 0x11C000 - 0x140000 */
-       u8                      res18[0xC0000]; /* 0x140000 - 0x200000 */
-} __attribute__ ((packed));
-
-extern struct qe_immap __iomem *qe_immr;
-extern phys_addr_t get_qe_base(void);
-
-/*
- * Returns the offset within the QE address space of the given pointer.
- *
- * Note that the QE does not support 36-bit physical addresses, so if
- * get_qe_base() returns a number above 4GB, the caller will probably fail.
- */
-static inline phys_addr_t immrbar_virt_to_phys(void *address)
-{
-       void *q = (void *)qe_immr;
-
-       /* Is it a MURAM address? */
-       if ((address >= q) && (address < (q + QE_IMMAP_SIZE)))
-               return get_qe_base() + (address - q);
-
-       /* It's an address returned by kmalloc */
-       return virt_to_phys(address);
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_IMMAP_QE_H */
index 5879fde..6c1297e 100644 (file)
@@ -385,6 +385,17 @@ static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr)
 {
        *(volatile unsigned long __force *)PCI_FIX_ADDR(addr) = v;
 }
+
+/*
+ * Real mode version of the above. stdcix is only supposed to be used
+ * in hypervisor real mode as per the architecture spec.
+ */
+static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr)
+{
+       __asm__ __volatile__("stdcix %0,0,%1"
+               : : "r" (val), "r" (paddr) : "memory");
+}
+
 #endif /* __powerpc64__ */
 
 /*
index ba3342b..7352d3f 100644 (file)
@@ -21,7 +21,7 @@
  * need for various slices related matters. Note that this isn't the
  * complete pgtable.h but only a portion of it.
  */
-#include <asm/pgtable-ppc64.h>
+#include <asm/book3s/64/pgtable.h>
 #include <asm/bug.h>
 #include <asm/processor.h>
 
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
new file mode 100644 (file)
index 0000000..c82cbf5
--- /dev/null
@@ -0,0 +1,343 @@
+#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
+#define _ASM_POWERPC_NOHASH_32_PGTABLE_H
+
+#include <asm-generic/pgtable-nopmd.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/sched.h>
+#include <linux/threads.h>
+#include <asm/io.h>                    /* For sub-arch specific PPC_PIN_SIZE */
+
+extern unsigned long ioremap_bot;
+
+#ifdef CONFIG_44x
+extern int icache_44x_need_flush;
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * The normal case is that PTEs are 32-bits and we have a 1-page
+ * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
+ *
+ * For any >32-bit physical address platform, we can use the following
+ * two level page table layout where the pgdir is 8KB and the MS 13 bits
+ * are an index to the second level table.  The combined pgdir/pmd first
+ * level has 2048 entries and the second level has 512 64-bit PTE entries.
+ * -Matt
+ */
+/* PGDIR_SHIFT determines what a top-level page table entry can map */
+#define PGDIR_SHIFT    (PAGE_SHIFT + PTE_SHIFT)
+#define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK     (~(PGDIR_SIZE-1))
+
+/*
+ * entries per page directory level: our page-table tree is two-level, so
+ * we don't really have any PMD directory.
+ */
+#ifndef __ASSEMBLY__
+#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT)
+#define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT))
+#endif /* __ASSEMBLY__ */
+
+#define PTRS_PER_PTE   (1 << PTE_SHIFT)
+#define PTRS_PER_PMD   1
+#define PTRS_PER_PGD   (1 << (32 - PGDIR_SHIFT))
+
+#define USER_PTRS_PER_PGD      (TASK_SIZE / PGDIR_SIZE)
+#define FIRST_USER_ADDRESS     0UL
+
+#define pte_ERROR(e) \
+       pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
+               (unsigned long long)pte_val(e))
+#define pgd_ERROR(e) \
+       pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/*
+ * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
+ * value (for now) on others, from where we can start layout kernel
+ * virtual space that goes below PKMAP and FIXMAP
+ */
+#ifdef CONFIG_HIGHMEM
+#define KVIRT_TOP      PKMAP_BASE
+#else
+#define KVIRT_TOP      (0xfe000000UL)  /* for now, could be FIXMAP_BASE ? */
+#endif
+
+/*
+ * ioremap_bot starts at that address. Early ioremaps move down from there,
+ * until mem_init() at which point this becomes the top of the vmalloc
+ * and ioremap space
+ */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+#define IOREMAP_TOP    ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
+#else
+#define IOREMAP_TOP    KVIRT_TOP
+#endif
+
+/*
+ * Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 16MB value just means that there will be a 64MB "hole" after the
+ * physical memory until the kernel virtual memory starts.  That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ *
+ * We no longer map larger than phys RAM with the BATs so we don't have
+ * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
+ * about clashes between our early calls to ioremap() that start growing down
+ * from ioremap_base being run into the VM area allocations (growing upwards
+ * from VMALLOC_START).  For this reason we have ioremap_bot to check when
+ * we actually run into our mappings setup in the early boot with the VM
+ * system.  This really does become a problem for machines with good amounts
+ * of RAM.  -- Cort
+ */
+#define VMALLOC_OFFSET (0x1000000) /* 16M */
+#ifdef PPC_PIN_SIZE
+#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
+#else
+#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
+#endif
+#define VMALLOC_END    ioremap_bot
+
+/*
+ * Bits in a linux-style PTE.  These match the bits in the
+ * (hardware-defined) PowerPC PTE as closely as possible.
+ */
+
+#if defined(CONFIG_40x)
+#include <asm/nohash/32/pte-40x.h>
+#elif defined(CONFIG_44x)
+#include <asm/nohash/32/pte-44x.h>
+#elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
+#include <asm/nohash/pte-book3e.h>
+#elif defined(CONFIG_FSL_BOOKE)
+#include <asm/nohash/32/pte-fsl-booke.h>
+#elif defined(CONFIG_8xx)
+#include <asm/nohash/32/pte-8xx.h>
+#endif
+
+/* And here we include common definitions */
+#include <asm/pte-common.h>
+
+#ifndef __ASSEMBLY__
+
+#define pte_clear(mm, addr, ptep) \
+       do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
+
+#define pmd_none(pmd)          (!pmd_val(pmd))
+#define        pmd_bad(pmd)            (pmd_val(pmd) & _PMD_BAD)
+#define        pmd_present(pmd)        (pmd_val(pmd) & _PMD_PRESENT_MASK)
+static inline void pmd_clear(pmd_t *pmdp)
+{
+       *pmdp = __pmd(0);
+}
+
+
+
+/*
+ * When flushing the tlb entry for a page, we also need to flush the hash
+ * table entry.  flush_hash_pages is assembler (for speed) in hashtable.S.
+ */
+extern int flush_hash_pages(unsigned context, unsigned long va,
+                           unsigned long pmdval, int count);
+
+/* Add an HPTE to the hash table */
+extern void add_hash_page(unsigned context, unsigned long va,
+                         unsigned long pmdval);
+
+/* Flush an entry from the TLB/hash table */
+extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
+                            unsigned long address);
+
+/*
+ * PTE updates. This function is called whenever an existing
+ * valid PTE is updated. This does -not- include set_pte_at()
+ * which nowadays only sets a new PTE.
+ *
+ * Depending on the type of MMU, we may need to use atomic updates
+ * and the PTE may be either 32 or 64 bit wide. In the later case,
+ * when using atomic updates, only the low part of the PTE is
+ * accessed atomically.
+ *
+ * In addition, on 44x, we also maintain a global flag indicating
+ * that an executable user mapping was modified, which is needed
+ * to properly flush the virtually tagged instruction cache of
+ * those implementations.
+ */
+#ifndef CONFIG_PTE_64BIT
+static inline unsigned long pte_update(pte_t *p,
+                                      unsigned long clr,
+                                      unsigned long set)
+{
+#ifdef PTE_ATOMIC_UPDATES
+       unsigned long old, tmp;
+
+       __asm__ __volatile__("\
+1:     lwarx   %0,0,%3\n\
+       andc    %1,%0,%4\n\
+       or      %1,%1,%5\n"
+       PPC405_ERR77(0,%3)
+"      stwcx.  %1,0,%3\n\
+       bne-    1b"
+       : "=&r" (old), "=&r" (tmp), "=m" (*p)
+       : "r" (p), "r" (clr), "r" (set), "m" (*p)
+       : "cc" );
+#else /* PTE_ATOMIC_UPDATES */
+       unsigned long old = pte_val(*p);
+       *p = __pte((old & ~clr) | set);
+#endif /* !PTE_ATOMIC_UPDATES */
+
+#ifdef CONFIG_44x
+       if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
+               icache_44x_need_flush = 1;
+#endif
+       return old;
+}
+#else /* CONFIG_PTE_64BIT */
+static inline unsigned long long pte_update(pte_t *p,
+                                           unsigned long clr,
+                                           unsigned long set)
+{
+#ifdef PTE_ATOMIC_UPDATES
+       unsigned long long old;
+       unsigned long tmp;
+
+       __asm__ __volatile__("\
+1:     lwarx   %L0,0,%4\n\
+       lwzx    %0,0,%3\n\
+       andc    %1,%L0,%5\n\
+       or      %1,%1,%6\n"
+       PPC405_ERR77(0,%3)
+"      stwcx.  %1,0,%4\n\
+       bne-    1b"
+       : "=&r" (old), "=&r" (tmp), "=m" (*p)
+       : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
+       : "cc" );
+#else /* PTE_ATOMIC_UPDATES */
+       unsigned long long old = pte_val(*p);
+       *p = __pte((old & ~(unsigned long long)clr) | set);
+#endif /* !PTE_ATOMIC_UPDATES */
+
+#ifdef CONFIG_44x
+       if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
+               icache_44x_need_flush = 1;
+#endif
+       return old;
+}
+#endif /* CONFIG_PTE_64BIT */
+
+/*
+ * 2.6 calls this without flushing the TLB entry; this is wrong
+ * for our hash-based implementation, we fix that up here.
+ */
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
+{
+       unsigned long old;
+       old = pte_update(ptep, _PAGE_ACCESSED, 0);
+#if _PAGE_HASHPTE != 0
+       if (old & _PAGE_HASHPTE) {
+               unsigned long ptephys = __pa(ptep) & PAGE_MASK;
+               flush_hash_pages(context, addr, ptephys, 1);
+       }
+#endif
+       return (old & _PAGE_ACCESSED) != 0;
+}
+#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
+       __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+                                      pte_t *ptep)
+{
+       return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
+}
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+                                     pte_t *ptep)
+{
+       pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO);
+}
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+                                          unsigned long addr, pte_t *ptep)
+{
+       ptep_set_wrprotect(mm, addr, ptep);
+}
+
+
+static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
+{
+       unsigned long set = pte_val(entry) &
+               (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
+       unsigned long clr = ~pte_val(entry) & _PAGE_RO;
+
+       pte_update(ptep, clr, set);
+}
+
+#define __HAVE_ARCH_PTE_SAME
+#define pte_same(A,B)  (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
+
+/*
+ * Note that on Book E processors, the pmd contains the kernel virtual
+ * (lowmem) address of the pte page.  The physical address is less useful
+ * because everything runs with translation enabled (even the TLB miss
+ * handler).  On everything else the pmd contains the physical address
+ * of the pte page.  -- paulus
+ */
+#ifndef CONFIG_BOOKE
+#define pmd_page_vaddr(pmd)    \
+       ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+#define pmd_page(pmd)          \
+       pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
+#else
+#define pmd_page_vaddr(pmd)    \
+       ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
+#define pmd_page(pmd)          \
+       pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
+#endif
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/* to find an entry in a page-table-directory */
+#define pgd_index(address)      ((address) >> PGDIR_SHIFT)
+#define pgd_offset(mm, address)         ((mm)->pgd + pgd_index(address))
+
+/* Find an entry in the third-level page table.. */
+#define pte_index(address)             \
+       (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, addr)   \
+       ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
+#define pte_offset_map(dir, addr)              \
+       ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
+#define pte_unmap(pte)         kunmap_atomic(pte)
+
+/*
+ * Encode and decode a swap entry.
+ * Note that the bits we use in a PTE for representing a swap entry
+ * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
+ *   -- paulus
+ */
+#define __swp_type(entry)              ((entry).val & 0x1f)
+#define __swp_offset(entry)            ((entry).val >> 5)
+#define __swp_entry(type, offset)      ((swp_entry_t) { (type) | ((offset) << 5) })
+#define __pte_to_swp_entry(pte)                ((swp_entry_t) { pte_val(pte) >> 3 })
+#define __swp_entry_to_pte(x)          ((pte_t) { (x).val << 3 })
+
+#ifndef CONFIG_PPC_4K_PAGES
+void pgtable_cache_init(void);
+#else
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init()   do { } while (0)
+#endif
+
+extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
+                     pmd_t **pmdp);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h
new file mode 100644 (file)
index 0000000..9624ebd
--- /dev/null
@@ -0,0 +1,64 @@
+#ifndef _ASM_POWERPC_NOHASH_32_PTE_40x_H
+#define _ASM_POWERPC_NOHASH_32_PTE_40x_H
+#ifdef __KERNEL__
+
+/*
+ * At present, all PowerPC 400-class processors share a similar TLB
+ * architecture. The instruction and data sides share a unified,
+ * 64-entry, fully-associative TLB which is maintained totally under
+ * software control. In addition, the instruction side has a
+ * hardware-managed, 4-entry, fully-associative TLB which serves as a
+ * first level to the shared TLB. These two TLBs are known as the UTLB
+ * and ITLB, respectively (see "mmu.h" for definitions).
+ *
+ * There are several potential gotchas here.  The 40x hardware TLBLO
+ * field looks like this:
+ *
+ * 0  1  2  3  4  ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ * RPN.....................  0  0 EX WR ZSEL.......  W  I  M  G
+ *
+ * Where possible we make the Linux PTE bits match up with this
+ *
+ * - bits 20 and 21 must be cleared, because we use 4k pages (40x can
+ *   support down to 1k pages), this is done in the TLBMiss exception
+ *   handler.
+ * - We use only zones 0 (for kernel pages) and 1 (for user pages)
+ *   of the 16 available.  Bit 24-26 of the TLB are cleared in the TLB
+ *   miss handler.  Bit 27 is PAGE_USER, thus selecting the correct
+ *   zone.
+ * - PRESENT *must* be in the bottom two bits because swap cache
+ *   entries use the top 30 bits.  Because 40x doesn't support SMP
+ *   anyway, M is irrelevant so we borrow it for PAGE_PRESENT.  Bit 30
+ *   is cleared in the TLB miss handler before the TLB entry is loaded.
+ * - All other bits of the PTE are loaded into TLBLO without
+ *   modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
+ *   software PTE bits.  We actually use use bits 21, 24, 25, and
+ *   30 respectively for the software bits: ACCESSED, DIRTY, RW, and
+ *   PRESENT.
+ */
+
+#define        _PAGE_GUARDED   0x001   /* G: page is guarded from prefetch */
+#define _PAGE_PRESENT  0x002   /* software: PTE contains a translation */
+#define        _PAGE_NO_CACHE  0x004   /* I: caching is inhibited */
+#define        _PAGE_WRITETHRU 0x008   /* W: caching is write-through */
+#define        _PAGE_USER      0x010   /* matches one of the zone permission bits */
+#define        _PAGE_SPECIAL   0x020   /* software: Special page */
+#define        _PAGE_RW        0x040   /* software: Writes permitted */
+#define        _PAGE_DIRTY     0x080   /* software: dirty page */
+#define _PAGE_HWWRITE  0x100   /* hardware: Dirty & RW, set in exception */
+#define _PAGE_EXEC     0x200   /* hardware: EX permission */
+#define _PAGE_ACCESSED 0x400   /* software: R: page referenced */
+
+#define _PMD_PRESENT   0x400   /* PMD points to page of PTEs */
+#define _PMD_BAD       0x802
+#define _PMD_SIZE      0x0e0   /* size field, != 0 for large-page PMD entry */
+#define _PMD_SIZE_4M   0x0c0
+#define _PMD_SIZE_16M  0x0e0
+
+#define PMD_PAGE_SIZE(pmdval)  (1024 << (((pmdval) & _PMD_SIZE) >> 4))
+
+/* Until my rework is finished, 40x still needs atomic PTE updates */
+#define PTE_ATOMIC_UPDATES     1
+
+#endif /* __KERNEL__ */
+#endif /*  _ASM_POWERPC_NOHASH_32_PTE_40x_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-44x.h b/arch/powerpc/include/asm/nohash/32/pte-44x.h
new file mode 100644 (file)
index 0000000..fdab41c
--- /dev/null
@@ -0,0 +1,97 @@
+#ifndef _ASM_POWERPC_NOHASH_32_PTE_44x_H
+#define _ASM_POWERPC_NOHASH_32_PTE_44x_H
+#ifdef __KERNEL__
+
+/*
+ * Definitions for PPC440
+ *
+ * Because of the 3 word TLB entries to support 36-bit addressing,
+ * the attribute are difficult to map in such a fashion that they
+ * are easily loaded during exception processing.  I decided to
+ * organize the entry so the ERPN is the only portion in the
+ * upper word of the PTE and the attribute bits below are packed
+ * in as sensibly as they can be in the area below a 4KB page size
+ * oriented RPN.  This at least makes it easy to load the RPN and
+ * ERPN fields in the TLB. -Matt
+ *
+ * This isn't entirely true anymore, at least some bits are now
+ * easier to move into the TLB from the PTE. -BenH.
+ *
+ * Note that these bits preclude future use of a page size
+ * less than 4KB.
+ *
+ *
+ * PPC 440 core has following TLB attribute fields;
+ *
+ *   TLB1:
+ *   0  1  2  3  4  ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ *   RPN.................................  -  -  -  -  -  - ERPN.......
+ *
+ *   TLB2:
+ *   0  1  2  3  4  ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ *   -  -  -  -  -    - U0 U1 U2 U3 W  I  M  G  E   - UX UW UR SX SW SR
+ *
+ * Newer 440 cores (440x6 as used on AMCC 460EX/460GT) have additional
+ * TLB2 storage attibute fields. Those are:
+ *
+ *   TLB2:
+ *   0...10    11   12   13   14   15   16...31
+ *   no change WL1  IL1I IL1D IL2I IL2D no change
+ *
+ * There are some constrains and options, to decide mapping software bits
+ * into TLB entry.
+ *
+ *   - PRESENT *must* be in the bottom three bits because swap cache
+ *     entries use the top 29 bits for TLB2.
+ *
+ *   - CACHE COHERENT bit (M) has no effect on original PPC440 cores,
+ *     because it doesn't support SMP. However, some later 460 variants
+ *     have -some- form of SMP support and so I keep the bit there for
+ *     future use
+ *
+ * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used
+ * for memory protection related functions (see PTE structure in
+ * include/asm-ppc/mmu.h).  The _PAGE_XXX definitions in this file map to the
+ * above bits.  Note that the bit values are CPU specific, not architecture
+ * specific.
+ *
+ * The kernel PTE entry holds an arch-dependent swp_entry structure under
+ * certain situations. In other words, in such situations some portion of
+ * the PTE bits are used as a swp_entry. In the PPC implementation, the
+ * 3-24th LSB are shared with swp_entry, however the 0-2nd three LSB still
+ * hold protection values. That means the three protection bits are
+ * reserved for both PTE and SWAP entry at the most significant three
+ * LSBs.
+ *
+ * There are three protection bits available for SWAP entry:
+ *     _PAGE_PRESENT
+ *     _PAGE_HASHPTE (if HW has)
+ *
+ * So those three bits have to be inside of 0-2nd LSB of PTE.
+ *
+ */
+
+#define _PAGE_PRESENT  0x00000001              /* S: PTE valid */
+#define _PAGE_RW       0x00000002              /* S: Write permission */
+#define _PAGE_EXEC     0x00000004              /* H: Execute permission */
+#define _PAGE_ACCESSED 0x00000008              /* S: Page referenced */
+#define _PAGE_DIRTY    0x00000010              /* S: Page dirty */
+#define _PAGE_SPECIAL  0x00000020              /* S: Special page */
+#define _PAGE_USER     0x00000040              /* S: User page */
+#define _PAGE_ENDIAN   0x00000080              /* H: E bit */
+#define _PAGE_GUARDED  0x00000100              /* H: G bit */
+#define _PAGE_COHERENT 0x00000200              /* H: M bit */
+#define _PAGE_NO_CACHE 0x00000400              /* H: I bit */
+#define _PAGE_WRITETHRU        0x00000800              /* H: W bit */
+
+/* TODO: Add large page lowmem mapping support */
+#define _PMD_PRESENT   0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD       (~PAGE_MASK)
+
+/* ERPN in a PTE never gets cleared, ignore it */
+#define _PTE_NONE_MASK 0xffffffff00000000ULL
+
+
+#endif /* __KERNEL__ */
+#endif /*  _ASM_POWERPC_NOHASH_32_PTE_44x_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
new file mode 100644 (file)
index 0000000..3742b19
--- /dev/null
@@ -0,0 +1,65 @@
+#ifndef _ASM_POWERPC_NOHASH_32_PTE_8xx_H
+#define _ASM_POWERPC_NOHASH_32_PTE_8xx_H
+#ifdef __KERNEL__
+
+/*
+ * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk.
+ * We also use the two level tables, but we can put the real bits in them
+ * needed for the TLB and tablewalk.  These definitions require Mx_CTR.PPM = 0,
+ * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1.  The level 2 descriptor has
+ * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit
+ * based upon user/super access.  The TLB does not have accessed nor write
+ * protect.  We assume that if the TLB get loaded with an entry it is
+ * accessed, and overload the changed bit for write protect.  We use
+ * two bits in the software pte that are supposed to be set to zero in
+ * the TLB entry (24 and 25) for these indicators.  Although the level 1
+ * descriptor contains the guarded and writethrough/copyback bits, we can
+ * set these at the page level since they get copied from the Mx_TWC
+ * register when the TLB entry is loaded.  We will use bit 27 for guard, since
+ * that is where it exists in the MD_TWC, and bit 26 for writethrough.
+ * These will get masked from the level 2 descriptor at TLB load time, and
+ * copied to the MD_TWC before it gets loaded.
+ * Large page sizes added.  We currently support two sizes, 4K and 8M.
+ * This also allows a TLB hander optimization because we can directly
+ * load the PMD into MD_TWC.  The 8M pages are only used for kernel
+ * mapping of well known areas.  The PMD (PGD) entries contain control
+ * flags in addition to the address, so care must be taken that the
+ * software no longer assumes these are only pointers.
+ */
+
+/* Definitions for 8xx embedded chips. */
+#define _PAGE_PRESENT  0x0001  /* Page is valid */
+#define _PAGE_NO_CACHE 0x0002  /* I: cache inhibit */
+#define _PAGE_SHARED   0x0004  /* No ASID (context) compare */
+#define _PAGE_SPECIAL  0x0008  /* SW entry, forced to 0 by the TLB miss */
+#define _PAGE_DIRTY    0x0100  /* C: page changed */
+
+/* These 4 software bits must be masked out when the L2 entry is loaded
+ * into the TLB.
+ */
+#define _PAGE_GUARDED  0x0010  /* Copied to L1 G entry in DTLB */
+#define _PAGE_USER     0x0020  /* Copied to L1 APG lsb */
+#define _PAGE_EXEC     0x0040  /* Copied to L1 APG */
+#define _PAGE_WRITETHRU        0x0080  /* software: caching is write through */
+#define _PAGE_ACCESSED 0x0800  /* software: page referenced */
+
+#define _PAGE_RO       0x0600  /* Supervisor RO, User no access */
+
+#define _PMD_PRESENT   0x0001
+#define _PMD_BAD       0x0ff0
+#define _PMD_PAGE_MASK 0x000c
+#define _PMD_PAGE_8M   0x000c
+
+/* Until my rework is finished, 8xx still needs atomic PTE updates */
+#define PTE_ATOMIC_UPDATES     1
+
+/* We need to add _PAGE_SHARED to kernel pages */
+#define _PAGE_KERNEL_RO                (_PAGE_SHARED | _PAGE_RO)
+#define _PAGE_KERNEL_ROX       (_PAGE_SHARED | _PAGE_RO | _PAGE_EXEC)
+#define _PAGE_KERNEL_RW                (_PAGE_SHARED | _PAGE_DIRTY | _PAGE_RW | \
+                                _PAGE_HWWRITE)
+#define _PAGE_KERNEL_RWX       (_PAGE_SHARED | _PAGE_DIRTY | _PAGE_RW | \
+                                _PAGE_HWWRITE | _PAGE_EXEC)
+
+#endif /* __KERNEL__ */
+#endif /*  _ASM_POWERPC_NOHASH_32_PTE_8xx_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h b/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h
new file mode 100644 (file)
index 0000000..5422d00
--- /dev/null
@@ -0,0 +1,40 @@
+#ifndef _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H
+#define _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H
+#ifdef __KERNEL__
+
+/* PTE bit definitions for Freescale BookE SW loaded TLB MMU based
+ * processors
+ *
+   MMU Assist Register 3:
+
+   32 33 34 35 36  ... 50 51 52 53 54 55 56 57 58 59 60 61 62 63
+   RPN......................  0  0 U0 U1 U2 U3 UX SX UW SW UR SR
+
+   - PRESENT *must* be in the bottom three bits because swap cache
+     entries use the top 29 bits.
+
+*/
+
+/* Definitions for FSL Book-E Cores */
+#define _PAGE_PRESENT  0x00001 /* S: PTE contains a translation */
+#define _PAGE_USER     0x00002 /* S: User page (maps to UR) */
+#define _PAGE_RW       0x00004 /* S: Write permission (SW) */
+#define _PAGE_DIRTY    0x00008 /* S: Page dirty */
+#define _PAGE_EXEC     0x00010 /* H: SX permission */
+#define _PAGE_ACCESSED 0x00020 /* S: Page referenced */
+
+#define _PAGE_ENDIAN   0x00040 /* H: E bit */
+#define _PAGE_GUARDED  0x00080 /* H: G bit */
+#define _PAGE_COHERENT 0x00100 /* H: M bit */
+#define _PAGE_NO_CACHE 0x00200 /* H: I bit */
+#define _PAGE_WRITETHRU        0x00400 /* H: W bit */
+#define _PAGE_SPECIAL  0x00800 /* S: Special page */
+
+#define _PMD_PRESENT   0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD       (~PAGE_MASK)
+
+#define PTE_WIMGE_SHIFT (6)
+
+#endif /* __KERNEL__ */
+#endif /*  _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H */
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
new file mode 100644 (file)
index 0000000..fc7d517
--- /dev/null
@@ -0,0 +1,92 @@
+#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
+#define _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
+/*
+ * Entries per page directory level.  The PTE level must use a 64b record
+ * for each page table entry.  The PMD and PGD level use a 32b record for
+ * each entry by assuming that each entry is page aligned.
+ */
+#define PTE_INDEX_SIZE  9
+#define PMD_INDEX_SIZE  7
+#define PUD_INDEX_SIZE  9
+#define PGD_INDEX_SIZE  9
+
+#ifndef __ASSEMBLY__
+#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
+#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
+#define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE)
+#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
+#endif /* __ASSEMBLY__ */
+
+#define PTRS_PER_PTE   (1 << PTE_INDEX_SIZE)
+#define PTRS_PER_PMD   (1 << PMD_INDEX_SIZE)
+#define PTRS_PER_PUD   (1 << PUD_INDEX_SIZE)
+#define PTRS_PER_PGD   (1 << PGD_INDEX_SIZE)
+
+/* PMD_SHIFT determines what a second-level page table entry can map */
+#define PMD_SHIFT      (PAGE_SHIFT + PTE_INDEX_SIZE)
+#define PMD_SIZE       (1UL << PMD_SHIFT)
+#define PMD_MASK       (~(PMD_SIZE-1))
+
+/* With 4k base page size, hugepage PTEs go at the PMD level */
+#define MIN_HUGEPTE_SHIFT      PMD_SHIFT
+
+/* PUD_SHIFT determines what a third-level page table entry can map */
+#define PUD_SHIFT      (PMD_SHIFT + PMD_INDEX_SIZE)
+#define PUD_SIZE       (1UL << PUD_SHIFT)
+#define PUD_MASK       (~(PUD_SIZE-1))
+
+/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
+#define PGDIR_SHIFT    (PUD_SHIFT + PUD_INDEX_SIZE)
+#define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK     (~(PGDIR_SIZE-1))
+
+/* Bits to mask out from a PMD to get to the PTE page */
+#define PMD_MASKED_BITS                0
+/* Bits to mask out from a PUD to get to the PMD page */
+#define PUD_MASKED_BITS                0
+/* Bits to mask out from a PGD to get to the PUD page */
+#define PGD_MASKED_BITS                0
+
+
+/*
+ * 4-level page tables related bits
+ */
+
+#define pgd_none(pgd)          (!pgd_val(pgd))
+#define pgd_bad(pgd)           (pgd_val(pgd) == 0)
+#define pgd_present(pgd)       (pgd_val(pgd) != 0)
+#define pgd_page_vaddr(pgd)    (pgd_val(pgd) & ~PGD_MASKED_BITS)
+
+#ifndef __ASSEMBLY__
+
+static inline void pgd_clear(pgd_t *pgdp)
+{
+       *pgdp = __pgd(0);
+}
+
+static inline pte_t pgd_pte(pgd_t pgd)
+{
+       return __pte(pgd_val(pgd));
+}
+
+static inline pgd_t pte_pgd(pte_t pte)
+{
+       return __pgd(pte_val(pte));
+}
+extern struct page *pgd_page(pgd_t pgd);
+
+#endif /* !__ASSEMBLY__ */
+
+#define pud_offset(pgdp, addr) \
+  (((pud_t *) pgd_page_vaddr(*(pgdp))) + \
+    (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
+
+#define pud_ERROR(e) \
+       pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
+
+/*
+ * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range() */
+#define remap_4k_pfn(vma, addr, pfn, prot)     \
+       remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
+
+#endif /* _ _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H */
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
new file mode 100644 (file)
index 0000000..570fb30
--- /dev/null
@@ -0,0 +1,57 @@
+#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H
+#define _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H
+
+#include <asm-generic/pgtable-nopud.h>
+
+
+#define PTE_INDEX_SIZE  8
+#define PMD_INDEX_SIZE  10
+#define PUD_INDEX_SIZE 0
+#define PGD_INDEX_SIZE  12
+
+/*
+ * we support 32 fragments per PTE page of 64K size
+ */
+#define PTE_FRAG_NR    32
+/*
+ * We use a 2K PTE page fragment and another 2K for storing
+ * real_pte_t hash index
+ */
+#define PTE_FRAG_SIZE_SHIFT  11
+#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
+
+#ifndef __ASSEMBLY__
+#define PTE_TABLE_SIZE PTE_FRAG_SIZE
+#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
+#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
+#endif /* __ASSEMBLY__ */
+
+#define PTRS_PER_PTE   (1 << PTE_INDEX_SIZE)
+#define PTRS_PER_PMD   (1 << PMD_INDEX_SIZE)
+#define PTRS_PER_PGD   (1 << PGD_INDEX_SIZE)
+
+/* With 4k base page size, hugepage PTEs go at the PMD level */
+#define MIN_HUGEPTE_SHIFT      PAGE_SHIFT
+
+/* PMD_SHIFT determines what a second-level page table entry can map */
+#define PMD_SHIFT      (PAGE_SHIFT + PTE_INDEX_SIZE)
+#define PMD_SIZE       (1UL << PMD_SHIFT)
+#define PMD_MASK       (~(PMD_SIZE-1))
+
+/* PGDIR_SHIFT determines what a third-level page table entry can map */
+#define PGDIR_SHIFT    (PMD_SHIFT + PMD_INDEX_SIZE)
+#define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK     (~(PGDIR_SIZE-1))
+
+/*
+ * Bits to mask out from a PMD to get to the PTE page
+ * PMDs point to PTE table fragments which are PTE_FRAG_SIZE aligned.
+ */
+#define PMD_MASKED_BITS                (PTE_FRAG_SIZE - 1)
+/* Bits to mask out from a PGD/PUD to get to the PMD page */
+#define PUD_MASKED_BITS                0x1ff
+
+#define pgd_pte(pgd)   (pud_pte(((pud_t){ pgd })))
+#define pte_pgd(pte)   ((pgd_t)pte_pud(pte))
+
+#endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H */
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
new file mode 100644 (file)
index 0000000..b9f734d
--- /dev/null
@@ -0,0 +1,364 @@
+#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_H
+#define _ASM_POWERPC_NOHASH_64_PGTABLE_H
+/*
+ * This file contains the functions and defines necessary to modify and use
+ * the ppc64 hashed page table.
+ */
+
+#ifdef CONFIG_PPC_64K_PAGES
+#include <asm/nohash/64/pgtable-64k.h>
+#else
+#include <asm/nohash/64/pgtable-4k.h>
+#endif
+#include <asm/barrier.h>
+
+#define FIRST_USER_ADDRESS     0UL
+
+/*
+ * Size of EA range mapped by our pagetables.
+ */
+#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
+                           PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
+#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define PMD_CACHE_INDEX        (PMD_INDEX_SIZE + 1)
+#else
+#define PMD_CACHE_INDEX        PMD_INDEX_SIZE
+#endif
+/*
+ * Define the address range of the kernel non-linear virtual area
+ */
+
+#ifdef CONFIG_PPC_BOOK3E
+#define KERN_VIRT_START ASM_CONST(0x8000000000000000)
+#else
+#define KERN_VIRT_START ASM_CONST(0xD000000000000000)
+#endif
+#define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000)
+
+/*
+ * The vmalloc space starts at the beginning of that region, and
+ * occupies half of it on hash CPUs and a quarter of it on Book3E
+ * (we keep a quarter for the virtual memmap)
+ */
+#define VMALLOC_START  KERN_VIRT_START
+#ifdef CONFIG_PPC_BOOK3E
+#define VMALLOC_SIZE   (KERN_VIRT_SIZE >> 2)
+#else
+#define VMALLOC_SIZE   (KERN_VIRT_SIZE >> 1)
+#endif
+#define VMALLOC_END    (VMALLOC_START + VMALLOC_SIZE)
+
+/*
+ * The second half of the kernel virtual space is used for IO mappings,
+ * it's itself carved into the PIO region (ISA and PHB IO space) and
+ * the ioremap space
+ *
+ *  ISA_IO_BASE = KERN_IO_START, 64K reserved area
+ *  PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
+ * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
+ */
+#define KERN_IO_START  (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
+#define FULL_IO_SIZE   0x80000000ul
+#define  ISA_IO_BASE   (KERN_IO_START)
+#define  ISA_IO_END    (KERN_IO_START + 0x10000ul)
+#define  PHB_IO_BASE   (ISA_IO_END)
+#define  PHB_IO_END    (KERN_IO_START + FULL_IO_SIZE)
+#define IOREMAP_BASE   (PHB_IO_END)
+#define IOREMAP_END    (KERN_VIRT_START + KERN_VIRT_SIZE)
+
+
+/*
+ * Region IDs
+ */
+#define REGION_SHIFT           60UL
+#define REGION_MASK            (0xfUL << REGION_SHIFT)
+#define REGION_ID(ea)          (((unsigned long)(ea)) >> REGION_SHIFT)
+
+#define VMALLOC_REGION_ID      (REGION_ID(VMALLOC_START))
+#define KERNEL_REGION_ID       (REGION_ID(PAGE_OFFSET))
+#define VMEMMAP_REGION_ID      (0xfUL) /* Server only */
+#define USER_REGION_ID         (0UL)
+
+/*
+ * Defines the address of the vmemap area, in its own region on
+ * hash table CPUs and after the vmalloc space on Book3E
+ */
+#ifdef CONFIG_PPC_BOOK3E
+#define VMEMMAP_BASE           VMALLOC_END
+#define VMEMMAP_END            KERN_IO_START
+#else
+#define VMEMMAP_BASE           (VMEMMAP_REGION_ID << REGION_SHIFT)
+#endif
+#define vmemmap                        ((struct page *)VMEMMAP_BASE)
+
+
+/*
+ * Include the PTE bits definitions
+ */
+#include <asm/nohash/pte-book3e.h>
+#include <asm/pte-common.h>
+
+#ifdef CONFIG_PPC_MM_SLICES
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+#endif /* CONFIG_PPC_MM_SLICES */
+
+#ifndef __ASSEMBLY__
+/* pte_clear moved to later in this file */
+
+#define PMD_BAD_BITS           (PTE_TABLE_SIZE-1)
+#define PUD_BAD_BITS           (PMD_TABLE_SIZE-1)
+
+static inline void pmd_set(pmd_t *pmdp, unsigned long val)
+{
+       *pmdp = __pmd(val);
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+       *pmdp = __pmd(0);
+}
+
+static inline pte_t pmd_pte(pmd_t pmd)
+{
+       return __pte(pmd_val(pmd));
+}
+
+#define pmd_none(pmd)          (!pmd_val(pmd))
+#define        pmd_bad(pmd)            (!is_kernel_addr(pmd_val(pmd)) \
+                                || (pmd_val(pmd) & PMD_BAD_BITS))
+#define        pmd_present(pmd)        (!pmd_none(pmd))
+#define pmd_page_vaddr(pmd)    (pmd_val(pmd) & ~PMD_MASKED_BITS)
+extern struct page *pmd_page(pmd_t pmd);
+
+static inline void pud_set(pud_t *pudp, unsigned long val)
+{
+       *pudp = __pud(val);
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+       *pudp = __pud(0);
+}
+
+#define pud_none(pud)          (!pud_val(pud))
+#define        pud_bad(pud)            (!is_kernel_addr(pud_val(pud)) \
+                                || (pud_val(pud) & PUD_BAD_BITS))
+#define pud_present(pud)       (pud_val(pud) != 0)
+#define pud_page_vaddr(pud)    (pud_val(pud) & ~PUD_MASKED_BITS)
+
+extern struct page *pud_page(pud_t pud);
+
+static inline pte_t pud_pte(pud_t pud)
+{
+       return __pte(pud_val(pud));
+}
+
+static inline pud_t pte_pud(pte_t pte)
+{
+       return __pud(pte_val(pte));
+}
+#define pud_write(pud)         pte_write(pud_pte(pud))
+#define pgd_write(pgd)         pte_write(pgd_pte(pgd))
+
+static inline void pgd_set(pgd_t *pgdp, unsigned long val)
+{
+       *pgdp = __pgd(val);
+}
+
+/*
+ * Find an entry in a page-table-directory.  We combine the address region
+ * (the high order N bits) and the pgd portion of the address.
+ */
+#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1))
+
+#define pgd_offset(mm, address)         ((mm)->pgd + pgd_index(address))
+
+#define pmd_offset(pudp,addr) \
+  (((pmd_t *) pud_page_vaddr(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
+
+#define pte_offset_kernel(dir,addr) \
+  (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
+
+#define pte_offset_map(dir,addr)       pte_offset_kernel((dir), (addr))
+#define pte_unmap(pte)                 do { } while(0)
+
+/* to find an entry in a kernel page-table-directory */
+/* This now only contains the vmalloc pages */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
+                           pte_t *ptep, unsigned long pte, int huge);
+
+/* Atomic PTE updates */
+static inline unsigned long pte_update(struct mm_struct *mm,
+                                      unsigned long addr,
+                                      pte_t *ptep, unsigned long clr,
+                                      unsigned long set,
+                                      int huge)
+{
+#ifdef PTE_ATOMIC_UPDATES
+       unsigned long old, tmp;
+
+       __asm__ __volatile__(
+       "1:     ldarx   %0,0,%3         # pte_update\n\
+       andi.   %1,%0,%6\n\
+       bne-    1b \n\
+       andc    %1,%0,%4 \n\
+       or      %1,%1,%7\n\
+       stdcx.  %1,0,%3 \n\
+       bne-    1b"
+       : "=&r" (old), "=&r" (tmp), "=m" (*ptep)
+       : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set)
+       : "cc" );
+#else
+       unsigned long old = pte_val(*ptep);
+       *ptep = __pte((old & ~clr) | set);
+#endif
+       /* huge pages use the old page table lock */
+       if (!huge)
+               assert_pte_locked(mm, addr);
+
+#ifdef CONFIG_PPC_STD_MMU_64
+       if (old & _PAGE_HASHPTE)
+               hpte_need_flush(mm, addr, ptep, old, huge);
+#endif
+
+       return old;
+}
+
+static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
+                                             unsigned long addr, pte_t *ptep)
+{
+       unsigned long old;
+
+       if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
+               return 0;
+       old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
+       return (old & _PAGE_ACCESSED) != 0;
+}
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define ptep_test_and_clear_young(__vma, __addr, __ptep)                  \
+({                                                                        \
+       int __r;                                                           \
+       __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
+       __r;                                                               \
+})
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+                                     pte_t *ptep)
+{
+
+       if ((pte_val(*ptep) & _PAGE_RW) == 0)
+               return;
+
+       pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+                                          unsigned long addr, pte_t *ptep)
+{
+       if ((pte_val(*ptep) & _PAGE_RW) == 0)
+               return;
+
+       pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
+}
+
+/*
+ * We currently remove entries from the hashtable regardless of whether
+ * the entry was young or dirty. The generic routines only flush if the
+ * entry was young or dirty which is not good enough.
+ *
+ * We should be more intelligent about this but for the moment we override
+ * these functions and force a tlb flush unconditionally
+ */
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define ptep_clear_flush_young(__vma, __address, __ptep)               \
+({                                                                     \
+       int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
+                                                 __ptep);              \
+       __young;                                                        \
+})
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+                                      unsigned long addr, pte_t *ptep)
+{
+       unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
+       return __pte(old);
+}
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
+                            pte_t * ptep)
+{
+       pte_update(mm, addr, ptep, ~0UL, 0, 0);
+}
+
+
+/* Set the dirty and/or accessed bits atomically in a linux PTE, this
+ * function doesn't need to flush the hash entry
+ */
+static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
+{
+       unsigned long bits = pte_val(entry) &
+               (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
+
+#ifdef PTE_ATOMIC_UPDATES
+       unsigned long old, tmp;
+
+       __asm__ __volatile__(
+       "1:     ldarx   %0,0,%4\n\
+               andi.   %1,%0,%6\n\
+               bne-    1b \n\
+               or      %0,%3,%0\n\
+               stdcx.  %0,0,%4\n\
+               bne-    1b"
+       :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
+       :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY)
+       :"cc");
+#else
+       unsigned long old = pte_val(*ptep);
+       *ptep = __pte(old | bits);
+#endif
+}
+
+#define __HAVE_ARCH_PTE_SAME
+#define pte_same(A,B)  (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
+
+#define pte_ERROR(e) \
+       pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pmd_ERROR(e) \
+       pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pgd_ERROR(e) \
+       pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/* Encode and de-code a swap entry */
+#define MAX_SWAPFILES_CHECK() do { \
+       BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
+       /*                                                      \
+        * Don't have overlapping bits with _PAGE_HPTEFLAGS     \
+        * We filter HPTEFLAGS on set_pte.                      \
+        */                                                     \
+       BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
+       } while (0)
+/*
+ * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT;
+ */
+#define SWP_TYPE_BITS 5
+#define __swp_type(x)          (((x).val >> _PAGE_BIT_SWAP_TYPE) \
+                               & ((1UL << SWP_TYPE_BITS) - 1))
+#define __swp_offset(x)                ((x).val >> PTE_RPN_SHIFT)
+#define __swp_entry(type, offset)      ((swp_entry_t) { \
+                                       ((type) << _PAGE_BIT_SWAP_TYPE) \
+                                       | ((offset) << PTE_RPN_SHIFT) })
+
+#define __pte_to_swp_entry(pte)                ((swp_entry_t) { pte_val((pte)) })
+#define __swp_entry_to_pte(x)          __pte((x).val)
+
+void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
+void pgtable_cache_init(void);
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_H */
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
new file mode 100644 (file)
index 0000000..1263c22
--- /dev/null
@@ -0,0 +1,252 @@
+#ifndef _ASM_POWERPC_NOHASH_PGTABLE_H
+#define _ASM_POWERPC_NOHASH_PGTABLE_H
+
+#if defined(CONFIG_PPC64)
+#include <asm/nohash/64/pgtable.h>
+#else
+#include <asm/nohash/32/pgtable.h>
+#endif
+
+#ifndef __ASSEMBLY__
+
+/* Generic accessors to PTE bits */
+static inline int pte_write(pte_t pte)
+{
+       return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO;
+}
+static inline int pte_dirty(pte_t pte)         { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte)         { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_special(pte_t pte)       { return pte_val(pte) & _PAGE_SPECIAL; }
+static inline int pte_none(pte_t pte)          { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
+static inline pgprot_t pte_pgprot(pte_t pte)   { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
+
+#ifdef CONFIG_NUMA_BALANCING
+/*
+ * These work without NUMA balancing but the kernel does not care. See the
+ * comment in include/asm-generic/pgtable.h . On powerpc, this will only
+ * work for user pages and always return true for kernel pages.
+ */
+static inline int pte_protnone(pte_t pte)
+{
+       return (pte_val(pte) &
+               (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT;
+}
+
+static inline int pmd_protnone(pmd_t pmd)
+{
+       return pte_protnone(pmd_pte(pmd));
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+static inline int pte_present(pte_t pte)
+{
+       return pte_val(pte) & _PAGE_PRESENT;
+}
+
+/* Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ *
+ * Even if PTEs can be unsigned long long, a PFN is always an unsigned
+ * long for now.
+ */
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
+       return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
+                    pgprot_val(pgprot)); }
+static inline unsigned long pte_pfn(pte_t pte) {
+       return pte_val(pte) >> PTE_RPN_SHIFT; }
+
+/* Generic modifiers for PTE bits */
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+       pte_basic_t ptev;
+
+       ptev = pte_val(pte) & ~(_PAGE_RW | _PAGE_HWWRITE);
+       ptev |= _PAGE_RO;
+       return __pte(ptev);
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+       return __pte(pte_val(pte) & ~(_PAGE_DIRTY | _PAGE_HWWRITE));
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+       return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+       pte_basic_t ptev;
+
+       ptev = pte_val(pte) & ~_PAGE_RO;
+       ptev |= _PAGE_RW;
+       return __pte(ptev);
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_SPECIAL);
+}
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+       return pte;
+}
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+       return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+/* Insert a PTE, top-level function is out of line. It uses an inline
+ * low level function in the respective pgtable-* files
+ */
+extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+                      pte_t pte);
+
+/* This low level function performs the actual PTE insertion
+ * Setting the PTE depends on the MMU type and other factors. It's
+ * an horrible mess that I'm not going to try to clean up now but
+ * I'm keeping it in one place rather than spread around
+ */
+static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
+                               pte_t *ptep, pte_t pte, int percpu)
+{
+#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
+       /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
+        * helper pte_update() which does an atomic update. We need to do that
+        * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
+        * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
+        * the hash bits instead (ie, same as the non-SMP case)
+        */
+       if (percpu)
+               *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
+                             | (pte_val(pte) & ~_PAGE_HASHPTE));
+       else
+               pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
+
+#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
+       /* Second case is 32-bit with 64-bit PTE.  In this case, we
+        * can just store as long as we do the two halves in the right order
+        * with a barrier in between. This is possible because we take care,
+        * in the hash code, to pre-invalidate if the PTE was already hashed,
+        * which synchronizes us with any concurrent invalidation.
+        * In the percpu case, we also fallback to the simple update preserving
+        * the hash bits
+        */
+       if (percpu) {
+               *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
+                             | (pte_val(pte) & ~_PAGE_HASHPTE));
+               return;
+       }
+#if _PAGE_HASHPTE != 0
+       if (pte_val(*ptep) & _PAGE_HASHPTE)
+               flush_hash_entry(mm, ptep, addr);
+#endif
+       __asm__ __volatile__("\
+               stw%U0%X0 %2,%0\n\
+               eieio\n\
+               stw%U0%X0 %L2,%1"
+       : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
+       : "r" (pte) : "memory");
+
+#elif defined(CONFIG_PPC_STD_MMU_32)
+       /* Third case is 32-bit hash table in UP mode, we need to preserve
+        * the _PAGE_HASHPTE bit since we may not have invalidated the previous
+        * translation in the hash yet (done in a subsequent flush_tlb_xxx())
+        * and see we need to keep track that this PTE needs invalidating
+        */
+       *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
+                     | (pte_val(pte) & ~_PAGE_HASHPTE));
+
+#else
+       /* Anything else just stores the PTE normally. That covers all 64-bit
+        * cases, and 32-bit non-hash with 32-bit PTEs.
+        */
+       *ptep = pte;
+
+#ifdef CONFIG_PPC_BOOK3E_64
+       /*
+        * With hardware tablewalk, a sync is needed to ensure that
+        * subsequent accesses see the PTE we just wrote.  Unlike userspace
+        * mappings, we can't tolerate spurious faults, so make sure
+        * the new PTE will be seen the first time.
+        */
+       if (is_kernel_addr(addr))
+               mb();
+#endif
+#endif
+}
+
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
+                                pte_t *ptep, pte_t entry, int dirty);
+
+/*
+ * Macro to mark a page protection value as "uncacheable".
+ */
+
+#define _PAGE_CACHE_CTL        (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
+                        _PAGE_WRITETHRU)
+
+#define pgprot_noncached(prot)   (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
+                                           _PAGE_NO_CACHE | _PAGE_GUARDED))
+
+#define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
+                                           _PAGE_NO_CACHE))
+
+#define pgprot_cached(prot)       (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
+                                           _PAGE_COHERENT))
+
+#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
+                                           _PAGE_COHERENT | _PAGE_WRITETHRU))
+
+#define pgprot_cached_noncoherent(prot) \
+               (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
+
+#define pgprot_writecombine pgprot_noncached_wc
+
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+                                    unsigned long size, pgprot_t vma_prot);
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+
+#ifdef CONFIG_HUGETLB_PAGE
+static inline int hugepd_ok(hugepd_t hpd)
+{
+       return (hpd.pd > 0);
+}
+
+static inline int pmd_huge(pmd_t pmd)
+{
+       return 0;
+}
+
+static inline int pud_huge(pud_t pud)
+{
+       return 0;
+}
+
+static inline int pgd_huge(pgd_t pgd)
+{
+       return 0;
+}
+#define pgd_huge               pgd_huge
+
+#define is_hugepd(hpd)         (hugepd_ok(hpd))
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif
diff --git a/arch/powerpc/include/asm/nohash/pte-book3e.h b/arch/powerpc/include/asm/nohash/pte-book3e.h
new file mode 100644 (file)
index 0000000..e16807b
--- /dev/null
@@ -0,0 +1,87 @@
+#ifndef _ASM_POWERPC_NOHASH_PTE_BOOK3E_H
+#define _ASM_POWERPC_NOHASH_PTE_BOOK3E_H
+#ifdef __KERNEL__
+
+/* PTE bit definitions for processors compliant to the Book3E
+ * architecture 2.06 or later. The position of the PTE bits
+ * matches the HW definition of the optional Embedded Page Table
+ * category.
+ */
+
+/* Architected bits */
+#define _PAGE_PRESENT  0x000001 /* software: pte contains a translation */
+#define _PAGE_SW1      0x000002
+#define _PAGE_BIT_SWAP_TYPE    2
+#define _PAGE_BAP_SR   0x000004
+#define _PAGE_BAP_UR   0x000008
+#define _PAGE_BAP_SW   0x000010
+#define _PAGE_BAP_UW   0x000020
+#define _PAGE_BAP_SX   0x000040
+#define _PAGE_BAP_UX   0x000080
+#define _PAGE_PSIZE_MSK        0x000f00
+#define _PAGE_PSIZE_4K 0x000200
+#define _PAGE_PSIZE_8K 0x000300
+#define _PAGE_PSIZE_16K        0x000400
+#define _PAGE_PSIZE_32K        0x000500
+#define _PAGE_PSIZE_64K        0x000600
+#define _PAGE_PSIZE_128K       0x000700
+#define _PAGE_PSIZE_256K       0x000800
+#define _PAGE_PSIZE_512K       0x000900
+#define _PAGE_PSIZE_1M 0x000a00
+#define _PAGE_PSIZE_2M 0x000b00
+#define _PAGE_PSIZE_4M 0x000c00
+#define _PAGE_PSIZE_8M 0x000d00
+#define _PAGE_PSIZE_16M        0x000e00
+#define _PAGE_PSIZE_32M        0x000f00
+#define _PAGE_DIRTY    0x001000 /* C: page changed */
+#define _PAGE_SW0      0x002000
+#define _PAGE_U3       0x004000
+#define _PAGE_U2       0x008000
+#define _PAGE_U1       0x010000
+#define _PAGE_U0       0x020000
+#define _PAGE_ACCESSED 0x040000
+#define _PAGE_ENDIAN   0x080000
+#define _PAGE_GUARDED  0x100000
+#define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */
+#define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */
+#define _PAGE_WRITETHRU        0x800000 /* W: cache write-through */
+
+/* "Higher level" linux bit combinations */
+#define _PAGE_EXEC             _PAGE_BAP_UX /* .. and was cache cleaned */
+#define _PAGE_RW               (_PAGE_BAP_SW | _PAGE_BAP_UW) /* User write permission */
+#define _PAGE_KERNEL_RW                (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY)
+#define _PAGE_KERNEL_RO                (_PAGE_BAP_SR)
+#define _PAGE_KERNEL_RWX       (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY | _PAGE_BAP_SX)
+#define _PAGE_KERNEL_ROX       (_PAGE_BAP_SR | _PAGE_BAP_SX)
+#define _PAGE_USER             (_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */
+
+#define _PAGE_HASHPTE  0
+#define _PAGE_BUSY     0
+
+#define _PAGE_SPECIAL  _PAGE_SW0
+
+/* Flags to be preserved on PTE modifications */
+#define _PAGE_HPTEFLAGS        _PAGE_BUSY
+
+/* Base page size */
+#ifdef CONFIG_PPC_64K_PAGES
+#define _PAGE_PSIZE    _PAGE_PSIZE_64K
+#define PTE_RPN_SHIFT  (28)
+#else
+#define _PAGE_PSIZE    _PAGE_PSIZE_4K
+#define        PTE_RPN_SHIFT   (24)
+#endif
+
+#define PTE_WIMGE_SHIFT (19)
+#define PTE_BAP_SHIFT  (2)
+
+/* On 32-bit, we never clear the top part of the PTE */
+#ifdef CONFIG_PPC32
+#define _PTE_NONE_MASK 0xffffffff00000000ULL
+#define _PMD_PRESENT   0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD       (~PAGE_MASK)
+#endif
+
+#endif /* __KERNEL__ */
+#endif /*  _ASM_POWERPC_NOHASH_PTE_BOOK3E_H */
index 8374afe..f8faaae 100644 (file)
 #define OPAL_LEDS_GET_INDICATOR                        114
 #define OPAL_LEDS_SET_INDICATOR                        115
 #define OPAL_CEC_REBOOT2                       116
-#define OPAL_LAST                              116
+#define OPAL_CONSOLE_FLUSH                     117
+#define OPAL_LAST                              117
 
 /* Device tree flags */
 
index 8001159..07a99e6 100644 (file)
@@ -35,6 +35,7 @@ int64_t opal_console_read(int64_t term_number, __be64 *length,
                          uint8_t *buffer);
 int64_t opal_console_write_buffer_space(int64_t term_number,
                                        __be64 *length);
+int64_t opal_console_flush(int64_t term_number);
 int64_t opal_rtc_read(__be32 *year_month_day,
                      __be64 *hour_minute_second_millisecond);
 int64_t opal_rtc_write(uint32_t year_month_day,
@@ -262,6 +263,8 @@ extern int opal_resync_timebase(void);
 
 extern void opal_lpc_init(void);
 
+extern void opal_kmsg_init(void);
+
 extern int opal_event_request(unsigned int opal_event_nr);
 
 struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
index 70bd438..546540b 100644 (file)
@@ -16,6 +16,7 @@
 
 #ifdef CONFIG_PPC64
 
+#include <linux/string.h>
 #include <asm/types.h>
 #include <asm/lppaca.h>
 #include <asm/mmu.h>
@@ -131,7 +132,16 @@ struct paca_struct {
        struct tlb_core_data tcd;
 #endif /* CONFIG_PPC_BOOK3E */
 
-       mm_context_t context;
+#ifdef CONFIG_PPC_BOOK3S
+       mm_context_id_t mm_ctx_id;
+#ifdef CONFIG_PPC_MM_SLICES
+       u64 mm_ctx_low_slices_psize;
+       unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
+#else
+       u16 mm_ctx_user_psize;
+       u16 mm_ctx_sllp;
+#endif
+#endif
 
        /*
         * then miscellaneous read-write fields
@@ -194,6 +204,23 @@ struct paca_struct {
 #endif
 };
 
+#ifdef CONFIG_PPC_BOOK3S
+static inline void copy_mm_to_paca(mm_context_t *context)
+{
+       get_paca()->mm_ctx_id = context->id;
+#ifdef CONFIG_PPC_MM_SLICES
+       get_paca()->mm_ctx_low_slices_psize = context->low_slices_psize;
+       memcpy(&get_paca()->mm_ctx_high_slices_psize,
+              &context->high_slices_psize, SLICE_ARRAY_SIZE);
+#else
+       get_paca()->mm_ctx_user_psize = context->user_psize;
+       get_paca()->mm_ctx_sllp = context->sllp;
+#endif
+}
+#else
+static inline void copy_mm_to_paca(mm_context_t *context){}
+#endif
+
 extern struct paca_struct *paca;
 extern void initialise_paca(struct paca_struct *new_paca, int cpu);
 extern void setup_paca(struct paca_struct *new_paca);
index 3140c19..e34124f 100644 (file)
@@ -286,8 +286,11 @@ extern long long virt_phys_offset;
 
 /* PTE level */
 typedef struct { pte_basic_t pte; } pte_t;
-#define pte_val(x)     ((x).pte)
 #define __pte(x)       ((pte_t) { (x) })
+static inline pte_basic_t pte_val(pte_t x)
+{
+       return x.pte;
+}
 
 /* 64k pages additionally define a bigger "real PTE" type that gathers
  * the "second half" part of the PTE for pseudo 64k pages
@@ -301,21 +304,30 @@ typedef struct { pte_t pte; } real_pte_t;
 /* PMD level */
 #ifdef CONFIG_PPC64
 typedef struct { unsigned long pmd; } pmd_t;
-#define pmd_val(x)     ((x).pmd)
 #define __pmd(x)       ((pmd_t) { (x) })
+static inline unsigned long pmd_val(pmd_t x)
+{
+       return x.pmd;
+}
 
 /* PUD level exusts only on 4k pages */
 #ifndef CONFIG_PPC_64K_PAGES
 typedef struct { unsigned long pud; } pud_t;
-#define pud_val(x)     ((x).pud)
 #define __pud(x)       ((pud_t) { (x) })
+static inline unsigned long pud_val(pud_t x)
+{
+       return x.pud;
+}
 #endif /* !CONFIG_PPC_64K_PAGES */
 #endif /* CONFIG_PPC64 */
 
 /* PGD level */
 typedef struct { unsigned long pgd; } pgd_t;
-#define pgd_val(x)     ((x).pgd)
 #define __pgd(x)       ((pgd_t) { (x) })
+static inline unsigned long pgd_val(pgd_t x)
+{
+       return x.pgd;
+}
 
 /* Page protection bits */
 typedef struct { unsigned long pgprot; } pgprot_t;
@@ -329,8 +341,11 @@ typedef struct { unsigned long pgprot; } pgprot_t;
  */
 
 typedef pte_basic_t pte_t;
-#define pte_val(x)     (x)
 #define __pte(x)       (x)
+static inline pte_basic_t pte_val(pte_t pte)
+{
+       return pte;
+}
 
 #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64)
 typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
@@ -341,67 +356,42 @@ typedef pte_t real_pte_t;
 
 #ifdef CONFIG_PPC64
 typedef unsigned long pmd_t;
-#define pmd_val(x)     (x)
 #define __pmd(x)       (x)
+static inline unsigned long pmd_val(pmd_t pmd)
+{
+       return pmd;
+}
 
 #ifndef CONFIG_PPC_64K_PAGES
 typedef unsigned long pud_t;
-#define pud_val(x)     (x)
 #define __pud(x)       (x)
+static inline unsigned long pud_val(pud_t pud)
+{
+       return pud;
+}
 #endif /* !CONFIG_PPC_64K_PAGES */
 #endif /* CONFIG_PPC64 */
 
 typedef unsigned long pgd_t;
-#define pgd_val(x)     (x)
-#define pgprot_val(x)  (x)
+#define __pgd(x)       (x)
+static inline unsigned long pgd_val(pgd_t pgd)
+{
+       return pgd;
+}
 
 typedef unsigned long pgprot_t;
-#define __pgd(x)       (x)
+#define pgprot_val(x)  (x)
 #define __pgprot(x)    (x)
 
 #endif
 
 typedef struct { signed long pd; } hugepd_t;
 
-#ifdef CONFIG_HUGETLB_PAGE
-#ifdef CONFIG_PPC_BOOK3S_64
-#ifdef CONFIG_PPC_64K_PAGES
-/*
- * With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't
- * need to setup hugepage directory for them. Our pte and page directory format
- * enable us to have this enabled. But to avoid errors when implementing new
- * features disable hugepd for 64K. We enable a debug version here, So we catch
- * wrong usage.
- */
-#ifdef CONFIG_DEBUG_VM
-extern int hugepd_ok(hugepd_t hpd);
-#else
-#define hugepd_ok(x)   (0)
-#endif
-#else
-static inline int hugepd_ok(hugepd_t hpd)
-{
-       /*
-        * hugepd pointer, bottom two bits == 00 and next 4 bits
-        * indicate size of table
-        */
-       return (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
-}
-#endif
-#else
-static inline int hugepd_ok(hugepd_t hpd)
-{
-       return (hpd.pd > 0);
-}
-#endif
-
-#define is_hugepd(hpd)               (hugepd_ok(hpd))
-#define pgd_huge pgd_huge
-int pgd_huge(pgd_t pgd);
-#else /* CONFIG_HUGETLB_PAGE */
-#define is_hugepd(pdep)                        0
-#define pgd_huge(pgd)                  0
+#ifndef CONFIG_HUGETLB_PAGE
+#define is_hugepd(pdep)                (0)
+#define pgd_huge(pgd)          (0)
 #endif /* CONFIG_HUGETLB_PAGE */
+
 #define __hugepd(x) ((hugepd_t) { (x) })
 
 struct page;
index 37fc535..54843ca 100644 (file)
@@ -205,6 +205,7 @@ struct pci_dn {
 
        int     pci_ext_config_space;   /* for pci devices */
 
+       struct  pci_dev *pcidev;        /* back-pointer to the pci device */
 #ifdef CONFIG_EEH
        struct eeh_dev *edev;           /* eeh device */
 #endif
index 3453bd8..6f8065a 100644 (file)
@@ -149,4 +149,8 @@ extern void pcibios_setup_phb_io_space(struct pci_controller *hose);
 extern void pcibios_scan_phb(struct pci_controller *hose);
 
 #endif /* __KERNEL__ */
+
+extern struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev);
+extern struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index);
+
 #endif /* __ASM_POWERPC_PCI_H */
index 842846c..76d6b9e 100644 (file)
@@ -21,16 +21,34 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
 /* #define pgd_populate(mm, pmd, pte)      BUG() */
 
 #ifndef CONFIG_BOOKE
-#define pmd_populate_kernel(mm, pmd, pte)      \
-               (pmd_val(*(pmd)) = __pa(pte) | _PMD_PRESENT)
-#define pmd_populate(mm, pmd, pte)     \
-               (pmd_val(*(pmd)) = (page_to_pfn(pte) << PAGE_SHIFT) | _PMD_PRESENT)
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
+                                      pte_t *pte)
+{
+       *pmdp = __pmd(__pa(pte) | _PMD_PRESENT);
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
+                               pgtable_t pte_page)
+{
+       *pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_PRESENT);
+}
+
 #define pmd_pgtable(pmd) pmd_page(pmd)
 #else
-#define pmd_populate_kernel(mm, pmd, pte)      \
-               (pmd_val(*(pmd)) = (unsigned long)pte | _PMD_PRESENT)
-#define pmd_populate(mm, pmd, pte)     \
-               (pmd_val(*(pmd)) = (unsigned long)lowmem_page_address(pte) | _PMD_PRESENT)
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
+                                      pte_t *pte)
+{
+       *pmdp = __pmd((unsigned long)pte | _PMD_PRESENT);
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
+                               pgtable_t pte_page)
+{
+       *pmdp = __pmd((unsigned long)lowmem_page_address(pte_page) | _PMD_PRESENT);
+}
+
 #define pmd_pgtable(pmd) pmd_page(pmd)
 #endif
 
index 4b0be20..69ef28a 100644 (file)
@@ -53,7 +53,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 
 #ifndef CONFIG_PPC_64K_PAGES
 
-#define pgd_populate(MM, PGD, PUD)     pgd_set(PGD, PUD)
+#define pgd_populate(MM, PGD, PUD)     pgd_set(PGD, (unsigned long)PUD)
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
@@ -71,9 +71,18 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
        pud_set(pud, (unsigned long)pmd);
 }
 
-#define pmd_populate(mm, pmd, pte_page) \
-       pmd_populate_kernel(mm, pmd, page_address(pte_page))
-#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+                                      pte_t *pte)
+{
+       pmd_set(pmd, (unsigned long)pte);
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+                               pgtable_t pte_page)
+{
+       pmd_set(pmd, (unsigned long)page_address(pte_page));
+}
+
 #define pmd_pgtable(pmd) pmd_page(pmd)
 
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
@@ -154,16 +163,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
 }
 
 #else /* if CONFIG_PPC_64K_PAGES */
-/*
- * we support 16 fragments per PTE page.
- */
-#define PTE_FRAG_NR    16
-/*
- * We use a 2K PTE page fragment and another 2K for storing
- * real_pte_t hash index
- */
-#define PTE_FRAG_SIZE_SHIFT  12
-#define PTE_FRAG_SIZE (2 * PTRS_PER_PTE * sizeof(pte_t))
 
 extern pte_t *page_table_alloc(struct mm_struct *, unsigned long, int);
 extern void page_table_free(struct mm_struct *, unsigned long *, int);
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
deleted file mode 100644 (file)
index 9c32656..0000000
+++ /dev/null
@@ -1,340 +0,0 @@
-#ifndef _ASM_POWERPC_PGTABLE_PPC32_H
-#define _ASM_POWERPC_PGTABLE_PPC32_H
-
-#include <asm-generic/pgtable-nopmd.h>
-
-#ifndef __ASSEMBLY__
-#include <linux/sched.h>
-#include <linux/threads.h>
-#include <asm/io.h>                    /* For sub-arch specific PPC_PIN_SIZE */
-
-extern unsigned long ioremap_bot;
-
-#ifdef CONFIG_44x
-extern int icache_44x_need_flush;
-#endif
-
-#endif /* __ASSEMBLY__ */
-
-/*
- * The normal case is that PTEs are 32-bits and we have a 1-page
- * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
- *
- * For any >32-bit physical address platform, we can use the following
- * two level page table layout where the pgdir is 8KB and the MS 13 bits
- * are an index to the second level table.  The combined pgdir/pmd first
- * level has 2048 entries and the second level has 512 64-bit PTE entries.
- * -Matt
- */
-/* PGDIR_SHIFT determines what a top-level page table entry can map */
-#define PGDIR_SHIFT    (PAGE_SHIFT + PTE_SHIFT)
-#define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
-#define PGDIR_MASK     (~(PGDIR_SIZE-1))
-
-/*
- * entries per page directory level: our page-table tree is two-level, so
- * we don't really have any PMD directory.
- */
-#ifndef __ASSEMBLY__
-#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT)
-#define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT))
-#endif /* __ASSEMBLY__ */
-
-#define PTRS_PER_PTE   (1 << PTE_SHIFT)
-#define PTRS_PER_PMD   1
-#define PTRS_PER_PGD   (1 << (32 - PGDIR_SHIFT))
-
-#define USER_PTRS_PER_PGD      (TASK_SIZE / PGDIR_SIZE)
-#define FIRST_USER_ADDRESS     0UL
-
-#define pte_ERROR(e) \
-       pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
-               (unsigned long long)pte_val(e))
-#define pgd_ERROR(e) \
-       pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
-
-/*
- * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
- * value (for now) on others, from where we can start layout kernel
- * virtual space that goes below PKMAP and FIXMAP
- */
-#ifdef CONFIG_HIGHMEM
-#define KVIRT_TOP      PKMAP_BASE
-#else
-#define KVIRT_TOP      (0xfe000000UL)  /* for now, could be FIXMAP_BASE ? */
-#endif
-
-/*
- * ioremap_bot starts at that address. Early ioremaps move down from there,
- * until mem_init() at which point this becomes the top of the vmalloc
- * and ioremap space
- */
-#ifdef CONFIG_NOT_COHERENT_CACHE
-#define IOREMAP_TOP    ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
-#else
-#define IOREMAP_TOP    KVIRT_TOP
-#endif
-
-/*
- * Just any arbitrary offset to the start of the vmalloc VM area: the
- * current 16MB value just means that there will be a 64MB "hole" after the
- * physical memory until the kernel virtual memory starts.  That means that
- * any out-of-bounds memory accesses will hopefully be caught.
- * The vmalloc() routines leaves a hole of 4kB between each vmalloced
- * area for the same reason. ;)
- *
- * We no longer map larger than phys RAM with the BATs so we don't have
- * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
- * about clashes between our early calls to ioremap() that start growing down
- * from ioremap_base being run into the VM area allocations (growing upwards
- * from VMALLOC_START).  For this reason we have ioremap_bot to check when
- * we actually run into our mappings setup in the early boot with the VM
- * system.  This really does become a problem for machines with good amounts
- * of RAM.  -- Cort
- */
-#define VMALLOC_OFFSET (0x1000000) /* 16M */
-#ifdef PPC_PIN_SIZE
-#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
-#else
-#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
-#endif
-#define VMALLOC_END    ioremap_bot
-
-/*
- * Bits in a linux-style PTE.  These match the bits in the
- * (hardware-defined) PowerPC PTE as closely as possible.
- */
-
-#if defined(CONFIG_40x)
-#include <asm/pte-40x.h>
-#elif defined(CONFIG_44x)
-#include <asm/pte-44x.h>
-#elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
-#include <asm/pte-book3e.h>
-#elif defined(CONFIG_FSL_BOOKE)
-#include <asm/pte-fsl-booke.h>
-#elif defined(CONFIG_8xx)
-#include <asm/pte-8xx.h>
-#else /* CONFIG_6xx */
-#include <asm/pte-hash32.h>
-#endif
-
-/* And here we include common definitions */
-#include <asm/pte-common.h>
-
-#ifndef __ASSEMBLY__
-
-#define pte_clear(mm, addr, ptep) \
-       do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
-
-#define pmd_none(pmd)          (!pmd_val(pmd))
-#define        pmd_bad(pmd)            (pmd_val(pmd) & _PMD_BAD)
-#define        pmd_present(pmd)        (pmd_val(pmd) & _PMD_PRESENT_MASK)
-#define        pmd_clear(pmdp)         do { pmd_val(*(pmdp)) = 0; } while (0)
-
-/*
- * When flushing the tlb entry for a page, we also need to flush the hash
- * table entry.  flush_hash_pages is assembler (for speed) in hashtable.S.
- */
-extern int flush_hash_pages(unsigned context, unsigned long va,
-                           unsigned long pmdval, int count);
-
-/* Add an HPTE to the hash table */
-extern void add_hash_page(unsigned context, unsigned long va,
-                         unsigned long pmdval);
-
-/* Flush an entry from the TLB/hash table */
-extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
-                            unsigned long address);
-
-/*
- * PTE updates. This function is called whenever an existing
- * valid PTE is updated. This does -not- include set_pte_at()
- * which nowadays only sets a new PTE.
- *
- * Depending on the type of MMU, we may need to use atomic updates
- * and the PTE may be either 32 or 64 bit wide. In the later case,
- * when using atomic updates, only the low part of the PTE is
- * accessed atomically.
- *
- * In addition, on 44x, we also maintain a global flag indicating
- * that an executable user mapping was modified, which is needed
- * to properly flush the virtually tagged instruction cache of
- * those implementations.
- */
-#ifndef CONFIG_PTE_64BIT
-static inline unsigned long pte_update(pte_t *p,
-                                      unsigned long clr,
-                                      unsigned long set)
-{
-#ifdef PTE_ATOMIC_UPDATES
-       unsigned long old, tmp;
-
-       __asm__ __volatile__("\
-1:     lwarx   %0,0,%3\n\
-       andc    %1,%0,%4\n\
-       or      %1,%1,%5\n"
-       PPC405_ERR77(0,%3)
-"      stwcx.  %1,0,%3\n\
-       bne-    1b"
-       : "=&r" (old), "=&r" (tmp), "=m" (*p)
-       : "r" (p), "r" (clr), "r" (set), "m" (*p)
-       : "cc" );
-#else /* PTE_ATOMIC_UPDATES */
-       unsigned long old = pte_val(*p);
-       *p = __pte((old & ~clr) | set);
-#endif /* !PTE_ATOMIC_UPDATES */
-
-#ifdef CONFIG_44x
-       if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
-               icache_44x_need_flush = 1;
-#endif
-       return old;
-}
-#else /* CONFIG_PTE_64BIT */
-static inline unsigned long long pte_update(pte_t *p,
-                                           unsigned long clr,
-                                           unsigned long set)
-{
-#ifdef PTE_ATOMIC_UPDATES
-       unsigned long long old;
-       unsigned long tmp;
-
-       __asm__ __volatile__("\
-1:     lwarx   %L0,0,%4\n\
-       lwzx    %0,0,%3\n\
-       andc    %1,%L0,%5\n\
-       or      %1,%1,%6\n"
-       PPC405_ERR77(0,%3)
-"      stwcx.  %1,0,%4\n\
-       bne-    1b"
-       : "=&r" (old), "=&r" (tmp), "=m" (*p)
-       : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
-       : "cc" );
-#else /* PTE_ATOMIC_UPDATES */
-       unsigned long long old = pte_val(*p);
-       *p = __pte((old & ~(unsigned long long)clr) | set);
-#endif /* !PTE_ATOMIC_UPDATES */
-
-#ifdef CONFIG_44x
-       if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
-               icache_44x_need_flush = 1;
-#endif
-       return old;
-}
-#endif /* CONFIG_PTE_64BIT */
-
-/*
- * 2.6 calls this without flushing the TLB entry; this is wrong
- * for our hash-based implementation, we fix that up here.
- */
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
-{
-       unsigned long old;
-       old = pte_update(ptep, _PAGE_ACCESSED, 0);
-#if _PAGE_HASHPTE != 0
-       if (old & _PAGE_HASHPTE) {
-               unsigned long ptephys = __pa(ptep) & PAGE_MASK;
-               flush_hash_pages(context, addr, ptephys, 1);
-       }
-#endif
-       return (old & _PAGE_ACCESSED) != 0;
-}
-#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
-       __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
-
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
-                                      pte_t *ptep)
-{
-       return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
-}
-
-#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
-                                     pte_t *ptep)
-{
-       pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO);
-}
-static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
-                                          unsigned long addr, pte_t *ptep)
-{
-       ptep_set_wrprotect(mm, addr, ptep);
-}
-
-
-static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
-{
-       unsigned long set = pte_val(entry) &
-               (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
-       unsigned long clr = ~pte_val(entry) & _PAGE_RO;
-
-       pte_update(ptep, clr, set);
-}
-
-#define __HAVE_ARCH_PTE_SAME
-#define pte_same(A,B)  (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
-
-/*
- * Note that on Book E processors, the pmd contains the kernel virtual
- * (lowmem) address of the pte page.  The physical address is less useful
- * because everything runs with translation enabled (even the TLB miss
- * handler).  On everything else the pmd contains the physical address
- * of the pte page.  -- paulus
- */
-#ifndef CONFIG_BOOKE
-#define pmd_page_vaddr(pmd)    \
-       ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-#define pmd_page(pmd)          \
-       pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
-#else
-#define pmd_page_vaddr(pmd)    \
-       ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
-#define pmd_page(pmd)          \
-       pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
-#endif
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/* to find an entry in a page-table-directory */
-#define pgd_index(address)      ((address) >> PGDIR_SHIFT)
-#define pgd_offset(mm, address)         ((mm)->pgd + pgd_index(address))
-
-/* Find an entry in the third-level page table.. */
-#define pte_index(address)             \
-       (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, addr)   \
-       ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
-#define pte_offset_map(dir, addr)              \
-       ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
-#define pte_unmap(pte)         kunmap_atomic(pte)
-
-/*
- * Encode and decode a swap entry.
- * Note that the bits we use in a PTE for representing a swap entry
- * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
- *   -- paulus
- */
-#define __swp_type(entry)              ((entry).val & 0x1f)
-#define __swp_offset(entry)            ((entry).val >> 5)
-#define __swp_entry(type, offset)      ((swp_entry_t) { (type) | ((offset) << 5) })
-#define __pte_to_swp_entry(pte)                ((swp_entry_t) { pte_val(pte) >> 3 })
-#define __swp_entry_to_pte(x)          ((pte_t) { (x).val << 3 })
-
-#ifndef CONFIG_PPC_4K_PAGES
-void pgtable_cache_init(void);
-#else
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init()   do { } while (0)
-#endif
-
-extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
-                     pmd_t **pmdp);
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* _ASM_POWERPC_PGTABLE_PPC32_H */
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-4k.h b/arch/powerpc/include/asm/pgtable-ppc64-4k.h
deleted file mode 100644 (file)
index 132ee1d..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-#ifndef _ASM_POWERPC_PGTABLE_PPC64_4K_H
-#define _ASM_POWERPC_PGTABLE_PPC64_4K_H
-/*
- * Entries per page directory level.  The PTE level must use a 64b record
- * for each page table entry.  The PMD and PGD level use a 32b record for
- * each entry by assuming that each entry is page aligned.
- */
-#define PTE_INDEX_SIZE  9
-#define PMD_INDEX_SIZE  7
-#define PUD_INDEX_SIZE  9
-#define PGD_INDEX_SIZE  9
-
-#ifndef __ASSEMBLY__
-#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
-#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
-#define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE)
-#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
-#endif /* __ASSEMBLY__ */
-
-#define PTRS_PER_PTE   (1 << PTE_INDEX_SIZE)
-#define PTRS_PER_PMD   (1 << PMD_INDEX_SIZE)
-#define PTRS_PER_PUD   (1 << PUD_INDEX_SIZE)
-#define PTRS_PER_PGD   (1 << PGD_INDEX_SIZE)
-
-/* PMD_SHIFT determines what a second-level page table entry can map */
-#define PMD_SHIFT      (PAGE_SHIFT + PTE_INDEX_SIZE)
-#define PMD_SIZE       (1UL << PMD_SHIFT)
-#define PMD_MASK       (~(PMD_SIZE-1))
-
-/* With 4k base page size, hugepage PTEs go at the PMD level */
-#define MIN_HUGEPTE_SHIFT      PMD_SHIFT
-
-/* PUD_SHIFT determines what a third-level page table entry can map */
-#define PUD_SHIFT      (PMD_SHIFT + PMD_INDEX_SIZE)
-#define PUD_SIZE       (1UL << PUD_SHIFT)
-#define PUD_MASK       (~(PUD_SIZE-1))
-
-/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
-#define PGDIR_SHIFT    (PUD_SHIFT + PUD_INDEX_SIZE)
-#define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
-#define PGDIR_MASK     (~(PGDIR_SIZE-1))
-
-/* Bits to mask out from a PMD to get to the PTE page */
-#define PMD_MASKED_BITS                0
-/* Bits to mask out from a PUD to get to the PMD page */
-#define PUD_MASKED_BITS                0
-/* Bits to mask out from a PGD to get to the PUD page */
-#define PGD_MASKED_BITS                0
-
-
-/*
- * 4-level page tables related bits
- */
-
-#define pgd_none(pgd)          (!pgd_val(pgd))
-#define pgd_bad(pgd)           (pgd_val(pgd) == 0)
-#define pgd_present(pgd)       (pgd_val(pgd) != 0)
-#define pgd_clear(pgdp)                (pgd_val(*(pgdp)) = 0)
-#define pgd_page_vaddr(pgd)    (pgd_val(pgd) & ~PGD_MASKED_BITS)
-
-#ifndef __ASSEMBLY__
-
-static inline pte_t pgd_pte(pgd_t pgd)
-{
-       return __pte(pgd_val(pgd));
-}
-
-static inline pgd_t pte_pgd(pte_t pte)
-{
-       return __pgd(pte_val(pte));
-}
-extern struct page *pgd_page(pgd_t pgd);
-
-#endif /* !__ASSEMBLY__ */
-
-#define pud_offset(pgdp, addr) \
-  (((pud_t *) pgd_page_vaddr(*(pgdp))) + \
-    (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
-
-#define pud_ERROR(e) \
-       pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
-
-/*
- * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range() */
-#define remap_4k_pfn(vma, addr, pfn, prot)     \
-       remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
-
-#endif /* _ASM_POWERPC_PGTABLE_PPC64_4K_H */
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-64k.h b/arch/powerpc/include/asm/pgtable-ppc64-64k.h
deleted file mode 100644 (file)
index 1de35bb..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef _ASM_POWERPC_PGTABLE_PPC64_64K_H
-#define _ASM_POWERPC_PGTABLE_PPC64_64K_H
-
-#include <asm-generic/pgtable-nopud.h>
-
-
-#define PTE_INDEX_SIZE  8
-#define PMD_INDEX_SIZE  10
-#define PUD_INDEX_SIZE 0
-#define PGD_INDEX_SIZE  12
-
-#ifndef __ASSEMBLY__
-#define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE)
-#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
-#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
-#endif /* __ASSEMBLY__ */
-
-#define PTRS_PER_PTE   (1 << PTE_INDEX_SIZE)
-#define PTRS_PER_PMD   (1 << PMD_INDEX_SIZE)
-#define PTRS_PER_PGD   (1 << PGD_INDEX_SIZE)
-
-/* With 4k base page size, hugepage PTEs go at the PMD level */
-#define MIN_HUGEPTE_SHIFT      PAGE_SHIFT
-
-/* PMD_SHIFT determines what a second-level page table entry can map */
-#define PMD_SHIFT      (PAGE_SHIFT + PTE_INDEX_SIZE)
-#define PMD_SIZE       (1UL << PMD_SHIFT)
-#define PMD_MASK       (~(PMD_SIZE-1))
-
-/* PGDIR_SHIFT determines what a third-level page table entry can map */
-#define PGDIR_SHIFT    (PMD_SHIFT + PMD_INDEX_SIZE)
-#define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
-#define PGDIR_MASK     (~(PGDIR_SIZE-1))
-
-/* Bits to mask out from a PMD to get to the PTE page */
-/* PMDs point to PTE table fragments which are 4K aligned.  */
-#define PMD_MASKED_BITS                0xfff
-/* Bits to mask out from a PGD/PUD to get to the PMD page */
-#define PUD_MASKED_BITS                0x1ff
-
-#define pgd_pte(pgd)   (pud_pte(((pud_t){ pgd })))
-#define pte_pgd(pte)   ((pgd_t)pte_pud(pte))
-
-#endif /* _ASM_POWERPC_PGTABLE_PPC64_64K_H */
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
deleted file mode 100644 (file)
index 3245f2d..0000000
+++ /dev/null
@@ -1,626 +0,0 @@
-#ifndef _ASM_POWERPC_PGTABLE_PPC64_H_
-#define _ASM_POWERPC_PGTABLE_PPC64_H_
-/*
- * This file contains the functions and defines necessary to modify and use
- * the ppc64 hashed page table.
- */
-
-#ifdef CONFIG_PPC_64K_PAGES
-#include <asm/pgtable-ppc64-64k.h>
-#else
-#include <asm/pgtable-ppc64-4k.h>
-#endif
-#include <asm/barrier.h>
-
-#define FIRST_USER_ADDRESS     0UL
-
-/*
- * Size of EA range mapped by our pagetables.
- */
-#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
-                           PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
-#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define PMD_CACHE_INDEX        (PMD_INDEX_SIZE + 1)
-#else
-#define PMD_CACHE_INDEX        PMD_INDEX_SIZE
-#endif
-/*
- * Define the address range of the kernel non-linear virtual area
- */
-
-#ifdef CONFIG_PPC_BOOK3E
-#define KERN_VIRT_START ASM_CONST(0x8000000000000000)
-#else
-#define KERN_VIRT_START ASM_CONST(0xD000000000000000)
-#endif
-#define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000)
-
-/*
- * The vmalloc space starts at the beginning of that region, and
- * occupies half of it on hash CPUs and a quarter of it on Book3E
- * (we keep a quarter for the virtual memmap)
- */
-#define VMALLOC_START  KERN_VIRT_START
-#ifdef CONFIG_PPC_BOOK3E
-#define VMALLOC_SIZE   (KERN_VIRT_SIZE >> 2)
-#else
-#define VMALLOC_SIZE   (KERN_VIRT_SIZE >> 1)
-#endif
-#define VMALLOC_END    (VMALLOC_START + VMALLOC_SIZE)
-
-/*
- * The second half of the kernel virtual space is used for IO mappings,
- * it's itself carved into the PIO region (ISA and PHB IO space) and
- * the ioremap space
- *
- *  ISA_IO_BASE = KERN_IO_START, 64K reserved area
- *  PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
- * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
- */
-#define KERN_IO_START  (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
-#define FULL_IO_SIZE   0x80000000ul
-#define  ISA_IO_BASE   (KERN_IO_START)
-#define  ISA_IO_END    (KERN_IO_START + 0x10000ul)
-#define  PHB_IO_BASE   (ISA_IO_END)
-#define  PHB_IO_END    (KERN_IO_START + FULL_IO_SIZE)
-#define IOREMAP_BASE   (PHB_IO_END)
-#define IOREMAP_END    (KERN_VIRT_START + KERN_VIRT_SIZE)
-
-
-/*
- * Region IDs
- */
-#define REGION_SHIFT           60UL
-#define REGION_MASK            (0xfUL << REGION_SHIFT)
-#define REGION_ID(ea)          (((unsigned long)(ea)) >> REGION_SHIFT)
-
-#define VMALLOC_REGION_ID      (REGION_ID(VMALLOC_START))
-#define KERNEL_REGION_ID       (REGION_ID(PAGE_OFFSET))
-#define VMEMMAP_REGION_ID      (0xfUL) /* Server only */
-#define USER_REGION_ID         (0UL)
-
-/*
- * Defines the address of the vmemap area, in its own region on
- * hash table CPUs and after the vmalloc space on Book3E
- */
-#ifdef CONFIG_PPC_BOOK3E
-#define VMEMMAP_BASE           VMALLOC_END
-#define VMEMMAP_END            KERN_IO_START
-#else
-#define VMEMMAP_BASE           (VMEMMAP_REGION_ID << REGION_SHIFT)
-#endif
-#define vmemmap                        ((struct page *)VMEMMAP_BASE)
-
-
-/*
- * Include the PTE bits definitions
- */
-#ifdef CONFIG_PPC_BOOK3S
-#include <asm/pte-hash64.h>
-#else
-#include <asm/pte-book3e.h>
-#endif
-#include <asm/pte-common.h>
-
-#ifdef CONFIG_PPC_MM_SLICES
-#define HAVE_ARCH_UNMAPPED_AREA
-#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-#endif /* CONFIG_PPC_MM_SLICES */
-
-#ifndef __ASSEMBLY__
-
-/*
- * This is the default implementation of various PTE accessors, it's
- * used in all cases except Book3S with 64K pages where we have a
- * concept of sub-pages
- */
-#ifndef __real_pte
-
-#ifdef CONFIG_STRICT_MM_TYPECHECKS
-#define __real_pte(e,p)                ((real_pte_t){(e)})
-#define __rpte_to_pte(r)       ((r).pte)
-#else
-#define __real_pte(e,p)                (e)
-#define __rpte_to_pte(r)       (__pte(r))
-#endif
-#define __rpte_to_hidx(r,index)        (pte_val(__rpte_to_pte(r)) >> 12)
-
-#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift)       \
-       do {                                                             \
-               index = 0;                                               \
-               shift = mmu_psize_defs[psize].shift;                     \
-
-#define pte_iterate_hashed_end() } while(0)
-
-/*
- * We expect this to be called only for user addresses or kernel virtual
- * addresses other than the linear mapping.
- */
-#define pte_pagesize_index(mm, addr, pte)      MMU_PAGE_4K
-
-#endif /* __real_pte */
-
-
-/* pte_clear moved to later in this file */
-
-#define PMD_BAD_BITS           (PTE_TABLE_SIZE-1)
-#define PUD_BAD_BITS           (PMD_TABLE_SIZE-1)
-
-#define pmd_set(pmdp, pmdval)  (pmd_val(*(pmdp)) = (pmdval))
-#define pmd_none(pmd)          (!pmd_val(pmd))
-#define        pmd_bad(pmd)            (!is_kernel_addr(pmd_val(pmd)) \
-                                || (pmd_val(pmd) & PMD_BAD_BITS))
-#define        pmd_present(pmd)        (!pmd_none(pmd))
-#define        pmd_clear(pmdp)         (pmd_val(*(pmdp)) = 0)
-#define pmd_page_vaddr(pmd)    (pmd_val(pmd) & ~PMD_MASKED_BITS)
-extern struct page *pmd_page(pmd_t pmd);
-
-#define pud_set(pudp, pudval)  (pud_val(*(pudp)) = (pudval))
-#define pud_none(pud)          (!pud_val(pud))
-#define        pud_bad(pud)            (!is_kernel_addr(pud_val(pud)) \
-                                || (pud_val(pud) & PUD_BAD_BITS))
-#define pud_present(pud)       (pud_val(pud) != 0)
-#define pud_clear(pudp)                (pud_val(*(pudp)) = 0)
-#define pud_page_vaddr(pud)    (pud_val(pud) & ~PUD_MASKED_BITS)
-
-extern struct page *pud_page(pud_t pud);
-
-static inline pte_t pud_pte(pud_t pud)
-{
-       return __pte(pud_val(pud));
-}
-
-static inline pud_t pte_pud(pte_t pte)
-{
-       return __pud(pte_val(pte));
-}
-#define pud_write(pud)         pte_write(pud_pte(pud))
-#define pgd_set(pgdp, pudp)    ({pgd_val(*(pgdp)) = (unsigned long)(pudp);})
-#define pgd_write(pgd)         pte_write(pgd_pte(pgd))
-
-/*
- * Find an entry in a page-table-directory.  We combine the address region
- * (the high order N bits) and the pgd portion of the address.
- */
-#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1))
-
-#define pgd_offset(mm, address)         ((mm)->pgd + pgd_index(address))
-
-#define pmd_offset(pudp,addr) \
-  (((pmd_t *) pud_page_vaddr(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
-
-#define pte_offset_kernel(dir,addr) \
-  (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
-
-#define pte_offset_map(dir,addr)       pte_offset_kernel((dir), (addr))
-#define pte_unmap(pte)                 do { } while(0)
-
-/* to find an entry in a kernel page-table-directory */
-/* This now only contains the vmalloc pages */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
-                           pte_t *ptep, unsigned long pte, int huge);
-
-/* Atomic PTE updates */
-static inline unsigned long pte_update(struct mm_struct *mm,
-                                      unsigned long addr,
-                                      pte_t *ptep, unsigned long clr,
-                                      unsigned long set,
-                                      int huge)
-{
-#ifdef PTE_ATOMIC_UPDATES
-       unsigned long old, tmp;
-
-       __asm__ __volatile__(
-       "1:     ldarx   %0,0,%3         # pte_update\n\
-       andi.   %1,%0,%6\n\
-       bne-    1b \n\
-       andc    %1,%0,%4 \n\
-       or      %1,%1,%7\n\
-       stdcx.  %1,0,%3 \n\
-       bne-    1b"
-       : "=&r" (old), "=&r" (tmp), "=m" (*ptep)
-       : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set)
-       : "cc" );
-#else
-       unsigned long old = pte_val(*ptep);
-       *ptep = __pte((old & ~clr) | set);
-#endif
-       /* huge pages use the old page table lock */
-       if (!huge)
-               assert_pte_locked(mm, addr);
-
-#ifdef CONFIG_PPC_STD_MMU_64
-       if (old & _PAGE_HASHPTE)
-               hpte_need_flush(mm, addr, ptep, old, huge);
-#endif
-
-       return old;
-}
-
-static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
-                                             unsigned long addr, pte_t *ptep)
-{
-       unsigned long old;
-
-       if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
-               return 0;
-       old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
-       return (old & _PAGE_ACCESSED) != 0;
-}
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define ptep_test_and_clear_young(__vma, __addr, __ptep)                  \
-({                                                                        \
-       int __r;                                                           \
-       __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
-       __r;                                                               \
-})
-
-#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
-                                     pte_t *ptep)
-{
-
-       if ((pte_val(*ptep) & _PAGE_RW) == 0)
-               return;
-
-       pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
-}
-
-static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
-                                          unsigned long addr, pte_t *ptep)
-{
-       if ((pte_val(*ptep) & _PAGE_RW) == 0)
-               return;
-
-       pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
-}
-
-/*
- * We currently remove entries from the hashtable regardless of whether
- * the entry was young or dirty. The generic routines only flush if the
- * entry was young or dirty which is not good enough.
- *
- * We should be more intelligent about this but for the moment we override
- * these functions and force a tlb flush unconditionally
- */
-#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
-#define ptep_clear_flush_young(__vma, __address, __ptep)               \
-({                                                                     \
-       int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
-                                                 __ptep);              \
-       __young;                                                        \
-})
-
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
-                                      unsigned long addr, pte_t *ptep)
-{
-       unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
-       return __pte(old);
-}
-
-static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
-                            pte_t * ptep)
-{
-       pte_update(mm, addr, ptep, ~0UL, 0, 0);
-}
-
-
-/* Set the dirty and/or accessed bits atomically in a linux PTE, this
- * function doesn't need to flush the hash entry
- */
-static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
-{
-       unsigned long bits = pte_val(entry) &
-               (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
-
-#ifdef PTE_ATOMIC_UPDATES
-       unsigned long old, tmp;
-
-       __asm__ __volatile__(
-       "1:     ldarx   %0,0,%4\n\
-               andi.   %1,%0,%6\n\
-               bne-    1b \n\
-               or      %0,%3,%0\n\
-               stdcx.  %0,0,%4\n\
-               bne-    1b"
-       :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
-       :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY)
-       :"cc");
-#else
-       unsigned long old = pte_val(*ptep);
-       *ptep = __pte(old | bits);
-#endif
-}
-
-#define __HAVE_ARCH_PTE_SAME
-#define pte_same(A,B)  (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
-
-#define pte_ERROR(e) \
-       pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
-#define pmd_ERROR(e) \
-       pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
-#define pgd_ERROR(e) \
-       pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
-
-/* Encode and de-code a swap entry */
-#define MAX_SWAPFILES_CHECK() do { \
-       BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
-       /*                                                      \
-        * Don't have overlapping bits with _PAGE_HPTEFLAGS     \
-        * We filter HPTEFLAGS on set_pte.                      \
-        */                                                     \
-       BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
-       } while (0)
-/*
- * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT;
- */
-#define SWP_TYPE_BITS 5
-#define __swp_type(x)          (((x).val >> _PAGE_BIT_SWAP_TYPE) \
-                               & ((1UL << SWP_TYPE_BITS) - 1))
-#define __swp_offset(x)                ((x).val >> PTE_RPN_SHIFT)
-#define __swp_entry(type, offset)      ((swp_entry_t) { \
-                                       ((type) << _PAGE_BIT_SWAP_TYPE) \
-                                       | ((offset) << PTE_RPN_SHIFT) })
-
-#define __pte_to_swp_entry(pte)                ((swp_entry_t) { pte_val((pte)) })
-#define __swp_entry_to_pte(x)          __pte((x).val)
-
-void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
-void pgtable_cache_init(void);
-#endif /* __ASSEMBLY__ */
-
-/*
- * THP pages can't be special. So use the _PAGE_SPECIAL
- */
-#define _PAGE_SPLITTING _PAGE_SPECIAL
-
-/*
- * We need to differentiate between explicit huge page and THP huge
- * page, since THP huge page also need to track real subpage details
- */
-#define _PAGE_THP_HUGE  _PAGE_4K_PFN
-
-/*
- * set of bits not changed in pmd_modify.
- */
-#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS |              \
-                        _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPLITTING | \
-                        _PAGE_THP_HUGE)
-
-#ifndef __ASSEMBLY__
-/*
- * The linux hugepage PMD now include the pmd entries followed by the address
- * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits.
- * [ 1 bit secondary | 3 bit hidx | 1 bit valid | 000]. We use one byte per
- * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and
- * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t.
- *
- * The last three bits are intentionally left to zero. This memory location
- * are also used as normal page PTE pointers. So if we have any pointers
- * left around while we collapse a hugepage, we need to make sure
- * _PAGE_PRESENT bit of that is zero when we look at them
- */
-static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index)
-{
-       return (hpte_slot_array[index] >> 3) & 0x1;
-}
-
-static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array,
-                                          int index)
-{
-       return hpte_slot_array[index] >> 4;
-}
-
-static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
-                                       unsigned int index, unsigned int hidx)
-{
-       hpte_slot_array[index] = hidx << 4 | 0x1 << 3;
-}
-
-struct page *realmode_pfn_to_page(unsigned long pfn);
-
-static inline char *get_hpte_slot_array(pmd_t *pmdp)
-{
-       /*
-        * The hpte hindex is stored in the pgtable whose address is in the
-        * second half of the PMD
-        *
-        * Order this load with the test for pmd_trans_huge in the caller
-        */
-       smp_rmb();
-       return *(char **)(pmdp + PTRS_PER_PMD);
-
-
-}
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
-                                  pmd_t *pmdp, unsigned long old_pmd);
-extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
-extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
-extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
-extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
-                      pmd_t *pmdp, pmd_t pmd);
-extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
-                                pmd_t *pmd);
-/*
- *
- * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs
- * page. The hugetlbfs page table walking and mangling paths are totally
- * separated form the core VM paths and they're differentiated by
- *  VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run.
- *
- * pmd_trans_huge() is defined as false at build time if
- * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build
- * time in such case.
- *
- * For ppc64 we need to differntiate from explicit hugepages from THP, because
- * for THP we also track the subpage details at the pmd level. We don't do
- * that for explicit huge pages.
- *
- */
-static inline int pmd_trans_huge(pmd_t pmd)
-{
-       /*
-        * leaf pte for huge page, bottom two bits != 00
-        */
-       return (pmd_val(pmd) & 0x3) && (pmd_val(pmd) & _PAGE_THP_HUGE);
-}
-
-static inline int pmd_trans_splitting(pmd_t pmd)
-{
-       if (pmd_trans_huge(pmd))
-               return pmd_val(pmd) & _PAGE_SPLITTING;
-       return 0;
-}
-
-extern int has_transparent_hugepage(void);
-#else
-static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
-                                         unsigned long addr, pmd_t *pmdp,
-                                         unsigned long old_pmd)
-{
-
-       WARN(1, "%s called with THP disabled\n", __func__);
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-
-static inline int pmd_large(pmd_t pmd)
-{
-       /*
-        * leaf pte for huge page, bottom two bits != 00
-        */
-       return ((pmd_val(pmd) & 0x3) != 0x0);
-}
-
-static inline pte_t pmd_pte(pmd_t pmd)
-{
-       return __pte(pmd_val(pmd));
-}
-
-static inline pmd_t pte_pmd(pte_t pte)
-{
-       return __pmd(pte_val(pte));
-}
-
-static inline pte_t *pmdp_ptep(pmd_t *pmd)
-{
-       return (pte_t *)pmd;
-}
-
-#define pmd_pfn(pmd)           pte_pfn(pmd_pte(pmd))
-#define pmd_dirty(pmd)         pte_dirty(pmd_pte(pmd))
-#define pmd_young(pmd)         pte_young(pmd_pte(pmd))
-#define pmd_mkold(pmd)         pte_pmd(pte_mkold(pmd_pte(pmd)))
-#define pmd_wrprotect(pmd)     pte_pmd(pte_wrprotect(pmd_pte(pmd)))
-#define pmd_mkdirty(pmd)       pte_pmd(pte_mkdirty(pmd_pte(pmd)))
-#define pmd_mkyoung(pmd)       pte_pmd(pte_mkyoung(pmd_pte(pmd)))
-#define pmd_mkwrite(pmd)       pte_pmd(pte_mkwrite(pmd_pte(pmd)))
-
-#define __HAVE_ARCH_PMD_WRITE
-#define pmd_write(pmd)         pte_write(pmd_pte(pmd))
-
-static inline pmd_t pmd_mkhuge(pmd_t pmd)
-{
-       /* Do nothing, mk_pmd() does this part.  */
-       return pmd;
-}
-
-static inline pmd_t pmd_mknotpresent(pmd_t pmd)
-{
-       pmd_val(pmd) &= ~_PAGE_PRESENT;
-       return pmd;
-}
-
-static inline pmd_t pmd_mksplitting(pmd_t pmd)
-{
-       pmd_val(pmd) |= _PAGE_SPLITTING;
-       return pmd;
-}
-
-#define __HAVE_ARCH_PMD_SAME
-static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
-{
-       return (((pmd_val(pmd_a) ^ pmd_val(pmd_b)) & ~_PAGE_HPTEFLAGS) == 0);
-}
-
-#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
-extern int pmdp_set_access_flags(struct vm_area_struct *vma,
-                                unsigned long address, pmd_t *pmdp,
-                                pmd_t entry, int dirty);
-
-extern unsigned long pmd_hugepage_update(struct mm_struct *mm,
-                                        unsigned long addr,
-                                        pmd_t *pmdp,
-                                        unsigned long clr,
-                                        unsigned long set);
-
-static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
-                                             unsigned long addr, pmd_t *pmdp)
-{
-       unsigned long old;
-
-       if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
-               return 0;
-       old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);
-       return ((old & _PAGE_ACCESSED) != 0);
-}
-
-#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
-extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
-                                    unsigned long address, pmd_t *pmdp);
-#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
-extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
-                                 unsigned long address, pmd_t *pmdp);
-
-#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
-extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
-                                    unsigned long addr, pmd_t *pmdp);
-
-#define __HAVE_ARCH_PMDP_SET_WRPROTECT
-static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
-                                     pmd_t *pmdp)
-{
-
-       if ((pmd_val(*pmdp) & _PAGE_RW) == 0)
-               return;
-
-       pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0);
-}
-
-#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
-extern void pmdp_splitting_flush(struct vm_area_struct *vma,
-                                unsigned long address, pmd_t *pmdp);
-
-extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
-                                unsigned long address, pmd_t *pmdp);
-#define pmdp_collapse_flush pmdp_collapse_flush
-
-#define __HAVE_ARCH_PGTABLE_DEPOSIT
-extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
-                                      pgtable_t pgtable);
-#define __HAVE_ARCH_PGTABLE_WITHDRAW
-extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
-
-#define __HAVE_ARCH_PMDP_INVALIDATE
-extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
-                           pmd_t *pmdp);
-
-#define pmd_move_must_withdraw pmd_move_must_withdraw
-struct spinlock;
-static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
-                                        struct spinlock *old_pmd_ptl)
-{
-       /*
-        * Archs like ppc64 use pgtable to store per pmd
-        * specific information. So when we switch the pmd,
-        * we should also withdraw and deposit the pgtable
-        */
-       return true;
-}
-#endif /* __ASSEMBLY__ */
-#endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */
index b64b421..ac9fb11 100644 (file)
@@ -1,6 +1,5 @@
 #ifndef _ASM_POWERPC_PGTABLE_H
 #define _ASM_POWERPC_PGTABLE_H
-#ifdef __KERNEL__
 
 #ifndef __ASSEMBLY__
 #include <linux/mmdebug.h>
@@ -13,210 +12,20 @@ struct mm_struct;
 
 #endif /* !__ASSEMBLY__ */
 
-#if defined(CONFIG_PPC64)
-#  include <asm/pgtable-ppc64.h>
+#ifdef CONFIG_PPC_BOOK3S
+#include <asm/book3s/pgtable.h>
 #else
-#  include <asm/pgtable-ppc32.h>
-#endif
-
-/*
- * We save the slot number & secondary bit in the second half of the
- * PTE page. We use the 8 bytes per each pte entry.
- */
-#define PTE_PAGE_HIDX_OFFSET (PTRS_PER_PTE * 8)
+#include <asm/nohash/pgtable.h>
+#endif /* !CONFIG_PPC_BOOK3S */
 
 #ifndef __ASSEMBLY__
 
 #include <asm/tlbflush.h>
 
-/* Generic accessors to PTE bits */
-static inline int pte_write(pte_t pte)
-{      return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO; }
-static inline int pte_dirty(pte_t pte)         { return pte_val(pte) & _PAGE_DIRTY; }
-static inline int pte_young(pte_t pte)         { return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_special(pte_t pte)       { return pte_val(pte) & _PAGE_SPECIAL; }
-static inline int pte_none(pte_t pte)          { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
-static inline pgprot_t pte_pgprot(pte_t pte)   { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
-
-#ifdef CONFIG_NUMA_BALANCING
-/*
- * These work without NUMA balancing but the kernel does not care. See the
- * comment in include/asm-generic/pgtable.h . On powerpc, this will only
- * work for user pages and always return true for kernel pages.
- */
-static inline int pte_protnone(pte_t pte)
-{
-       return (pte_val(pte) &
-               (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT;
-}
-
-static inline int pmd_protnone(pmd_t pmd)
-{
-       return pte_protnone(pmd_pte(pmd));
-}
-#endif /* CONFIG_NUMA_BALANCING */
-
-static inline int pte_present(pte_t pte)
-{
-       return pte_val(pte) & _PAGE_PRESENT;
-}
-
-/* Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- *
- * Even if PTEs can be unsigned long long, a PFN is always an unsigned
- * long for now.
- */
-static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
-       return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
-                    pgprot_val(pgprot)); }
-static inline unsigned long pte_pfn(pte_t pte) {
-       return pte_val(pte) >> PTE_RPN_SHIFT; }
-
 /* Keep these as a macros to avoid include dependency mess */
 #define pte_page(x)            pfn_to_page(pte_pfn(x))
 #define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
 
-/* Generic modifiers for PTE bits */
-static inline pte_t pte_wrprotect(pte_t pte) {
-       pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE);
-       pte_val(pte) |= _PAGE_RO; return pte; }
-static inline pte_t pte_mkclean(pte_t pte) {
-       pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
-static inline pte_t pte_mkold(pte_t pte) {
-       pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
-static inline pte_t pte_mkwrite(pte_t pte) {
-       pte_val(pte) &= ~_PAGE_RO;
-       pte_val(pte) |= _PAGE_RW; return pte; }
-static inline pte_t pte_mkdirty(pte_t pte) {
-       pte_val(pte) |= _PAGE_DIRTY; return pte; }
-static inline pte_t pte_mkyoung(pte_t pte) {
-       pte_val(pte) |= _PAGE_ACCESSED; return pte; }
-static inline pte_t pte_mkspecial(pte_t pte) {
-       pte_val(pte) |= _PAGE_SPECIAL; return pte; }
-static inline pte_t pte_mkhuge(pte_t pte) {
-       return pte; }
-static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-{
-       pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
-       return pte;
-}
-
-
-/* Insert a PTE, top-level function is out of line. It uses an inline
- * low level function in the respective pgtable-* files
- */
-extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
-                      pte_t pte);
-
-/* This low level function performs the actual PTE insertion
- * Setting the PTE depends on the MMU type and other factors. It's
- * an horrible mess that I'm not going to try to clean up now but
- * I'm keeping it in one place rather than spread around
- */
-static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
-                               pte_t *ptep, pte_t pte, int percpu)
-{
-#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
-       /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
-        * helper pte_update() which does an atomic update. We need to do that
-        * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
-        * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
-        * the hash bits instead (ie, same as the non-SMP case)
-        */
-       if (percpu)
-               *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
-                             | (pte_val(pte) & ~_PAGE_HASHPTE));
-       else
-               pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
-
-#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
-       /* Second case is 32-bit with 64-bit PTE.  In this case, we
-        * can just store as long as we do the two halves in the right order
-        * with a barrier in between. This is possible because we take care,
-        * in the hash code, to pre-invalidate if the PTE was already hashed,
-        * which synchronizes us with any concurrent invalidation.
-        * In the percpu case, we also fallback to the simple update preserving
-        * the hash bits
-        */
-       if (percpu) {
-               *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
-                             | (pte_val(pte) & ~_PAGE_HASHPTE));
-               return;
-       }
-#if _PAGE_HASHPTE != 0
-       if (pte_val(*ptep) & _PAGE_HASHPTE)
-               flush_hash_entry(mm, ptep, addr);
-#endif
-       __asm__ __volatile__("\
-               stw%U0%X0 %2,%0\n\
-               eieio\n\
-               stw%U0%X0 %L2,%1"
-       : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
-       : "r" (pte) : "memory");
-
-#elif defined(CONFIG_PPC_STD_MMU_32)
-       /* Third case is 32-bit hash table in UP mode, we need to preserve
-        * the _PAGE_HASHPTE bit since we may not have invalidated the previous
-        * translation in the hash yet (done in a subsequent flush_tlb_xxx())
-        * and see we need to keep track that this PTE needs invalidating
-        */
-       *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
-                     | (pte_val(pte) & ~_PAGE_HASHPTE));
-
-#else
-       /* Anything else just stores the PTE normally. That covers all 64-bit
-        * cases, and 32-bit non-hash with 32-bit PTEs.
-        */
-       *ptep = pte;
-
-#ifdef CONFIG_PPC_BOOK3E_64
-       /*
-        * With hardware tablewalk, a sync is needed to ensure that
-        * subsequent accesses see the PTE we just wrote.  Unlike userspace
-        * mappings, we can't tolerate spurious faults, so make sure
-        * the new PTE will be seen the first time.
-        */
-       if (is_kernel_addr(addr))
-               mb();
-#endif
-#endif
-}
-
-
-#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
-                                pte_t *ptep, pte_t entry, int dirty);
-
-/*
- * Macro to mark a page protection value as "uncacheable".
- */
-
-#define _PAGE_CACHE_CTL        (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
-                        _PAGE_WRITETHRU)
-
-#define pgprot_noncached(prot)   (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
-                                           _PAGE_NO_CACHE | _PAGE_GUARDED))
-
-#define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
-                                           _PAGE_NO_CACHE))
-
-#define pgprot_cached(prot)       (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
-                                           _PAGE_COHERENT))
-
-#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
-                                           _PAGE_COHERENT | _PAGE_WRITETHRU))
-
-#define pgprot_cached_noncoherent(prot) \
-               (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
-
-#define pgprot_writecombine pgprot_noncached_wc
-
-struct file;
-extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
-                                    unsigned long size, pgprot_t vma_prot);
-#define __HAVE_PHYS_MEM_ACCESS_PROT
-
 /*
  * ZERO_PAGE is a global shared page that is always zero: used
  * for zero-mapped memory areas etc..
@@ -271,5 +80,4 @@ static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
 }
 #endif /* __ASSEMBLY__ */
 
-#endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_PGTABLE_H */
index 67859ed..1b39424 100644 (file)
@@ -201,6 +201,23 @@ static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
        return rc;
 }
 
+/*
+ * ptes must be 8*sizeof(unsigned long)
+ */
+static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
+                                   unsigned long *ptes)
+
+{
+       long rc;
+       unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+
+       rc = plpar_hcall9(H_READ, retbuf, flags | H_READ_4, ptex);
+
+       memcpy(ptes, retbuf, 8*sizeof(unsigned long));
+
+       return rc;
+}
+
 /*
  * plpar_pte_read_4_raw can be called in real mode.
  * ptes must be 8*sizeof(unsigned long)
index dd0fc18..499d9f8 100644 (file)
@@ -413,24 +413,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
        FTR_SECTION_ELSE_NESTED(848);   \
        mtocrf (FXM), RS;               \
        ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_NOEXECUTE, 848)
-
-/*
- * PPR restore macros used in entry_64.S
- * Used for P7 or later processors
- */
-#define HMT_MEDIUM_LOW_HAS_PPR                                         \
-BEGIN_FTR_SECTION_NESTED(944)                                          \
-       HMT_MEDIUM_LOW;                                                 \
-END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,944)
-
-#define SET_DEFAULT_THREAD_PPR(ra, rb)                                 \
-BEGIN_FTR_SECTION_NESTED(945)                                          \
-       lis     ra,INIT_PPR@highest;    /* default ppr=3 */             \
-       ld      rb,PACACURRENT(r13);                                    \
-       sldi    ra,ra,32;       /* 11- 13 bits are used for ppr */      \
-       std     ra,TASKTHREADPPR(rb);                                   \
-END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945)
-
 #endif
 
 /*
index 5afea36..ac23308 100644 (file)
@@ -88,12 +88,6 @@ struct task_struct;
 void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
 void release_thread(struct task_struct *);
 
-/* Lazy FPU handling on uni-processor */
-extern struct task_struct *last_task_used_math;
-extern struct task_struct *last_task_used_altivec;
-extern struct task_struct *last_task_used_vsx;
-extern struct task_struct *last_task_used_spe;
-
 #ifdef CONFIG_PPC32
 
 #if CONFIG_TASK_SIZE > CONFIG_KERNEL_START
@@ -294,6 +288,7 @@ struct thread_struct {
 #endif
 #ifdef CONFIG_PPC64
        unsigned long   dscr;
+       unsigned long   fscr;
        /*
         * This member element dscr_inherit indicates that the process
         * has explicitly attempted and changed the DSCR register value
@@ -385,8 +380,6 @@ extern int set_endian(struct task_struct *tsk, unsigned int val);
 extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
 extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
 
-extern void fp_enable(void);
-extern void vec_enable(void);
 extern void load_fp_state(struct thread_fp_state *fp);
 extern void store_fp_state(struct thread_fp_state *fp);
 extern void load_vr_state(struct thread_vr_state *vr);
diff --git a/arch/powerpc/include/asm/pte-40x.h b/arch/powerpc/include/asm/pte-40x.h
deleted file mode 100644 (file)
index 486b1ef..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-#ifndef _ASM_POWERPC_PTE_40x_H
-#define _ASM_POWERPC_PTE_40x_H
-#ifdef __KERNEL__
-
-/*
- * At present, all PowerPC 400-class processors share a similar TLB
- * architecture. The instruction and data sides share a unified,
- * 64-entry, fully-associative TLB which is maintained totally under
- * software control. In addition, the instruction side has a
- * hardware-managed, 4-entry, fully-associative TLB which serves as a
- * first level to the shared TLB. These two TLBs are known as the UTLB
- * and ITLB, respectively (see "mmu.h" for definitions).
- *
- * There are several potential gotchas here.  The 40x hardware TLBLO
- * field looks like this:
- *
- * 0  1  2  3  4  ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31
- * RPN.....................  0  0 EX WR ZSEL.......  W  I  M  G
- *
- * Where possible we make the Linux PTE bits match up with this
- *
- * - bits 20 and 21 must be cleared, because we use 4k pages (40x can
- *   support down to 1k pages), this is done in the TLBMiss exception
- *   handler.
- * - We use only zones 0 (for kernel pages) and 1 (for user pages)
- *   of the 16 available.  Bit 24-26 of the TLB are cleared in the TLB
- *   miss handler.  Bit 27 is PAGE_USER, thus selecting the correct
- *   zone.
- * - PRESENT *must* be in the bottom two bits because swap cache
- *   entries use the top 30 bits.  Because 40x doesn't support SMP
- *   anyway, M is irrelevant so we borrow it for PAGE_PRESENT.  Bit 30
- *   is cleared in the TLB miss handler before the TLB entry is loaded.
- * - All other bits of the PTE are loaded into TLBLO without
- *   modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
- *   software PTE bits.  We actually use use bits 21, 24, 25, and
- *   30 respectively for the software bits: ACCESSED, DIRTY, RW, and
- *   PRESENT.
- */
-
-#define        _PAGE_GUARDED   0x001   /* G: page is guarded from prefetch */
-#define _PAGE_PRESENT  0x002   /* software: PTE contains a translation */
-#define        _PAGE_NO_CACHE  0x004   /* I: caching is inhibited */
-#define        _PAGE_WRITETHRU 0x008   /* W: caching is write-through */
-#define        _PAGE_USER      0x010   /* matches one of the zone permission bits */
-#define        _PAGE_SPECIAL   0x020   /* software: Special page */
-#define        _PAGE_RW        0x040   /* software: Writes permitted */
-#define        _PAGE_DIRTY     0x080   /* software: dirty page */
-#define _PAGE_HWWRITE  0x100   /* hardware: Dirty & RW, set in exception */
-#define _PAGE_EXEC     0x200   /* hardware: EX permission */
-#define _PAGE_ACCESSED 0x400   /* software: R: page referenced */
-
-#define _PMD_PRESENT   0x400   /* PMD points to page of PTEs */
-#define _PMD_BAD       0x802
-#define _PMD_SIZE      0x0e0   /* size field, != 0 for large-page PMD entry */
-#define _PMD_SIZE_4M   0x0c0
-#define _PMD_SIZE_16M  0x0e0
-
-#define PMD_PAGE_SIZE(pmdval)  (1024 << (((pmdval) & _PMD_SIZE) >> 4))
-
-/* Until my rework is finished, 40x still needs atomic PTE updates */
-#define PTE_ATOMIC_UPDATES     1
-
-#endif /* __KERNEL__ */
-#endif /*  _ASM_POWERPC_PTE_40x_H */
diff --git a/arch/powerpc/include/asm/pte-44x.h b/arch/powerpc/include/asm/pte-44x.h
deleted file mode 100644 (file)
index 36f75fa..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-#ifndef _ASM_POWERPC_PTE_44x_H
-#define _ASM_POWERPC_PTE_44x_H
-#ifdef __KERNEL__
-
-/*
- * Definitions for PPC440
- *
- * Because of the 3 word TLB entries to support 36-bit addressing,
- * the attribute are difficult to map in such a fashion that they
- * are easily loaded during exception processing.  I decided to
- * organize the entry so the ERPN is the only portion in the
- * upper word of the PTE and the attribute bits below are packed
- * in as sensibly as they can be in the area below a 4KB page size
- * oriented RPN.  This at least makes it easy to load the RPN and
- * ERPN fields in the TLB. -Matt
- *
- * This isn't entirely true anymore, at least some bits are now
- * easier to move into the TLB from the PTE. -BenH.
- *
- * Note that these bits preclude future use of a page size
- * less than 4KB.
- *
- *
- * PPC 440 core has following TLB attribute fields;
- *
- *   TLB1:
- *   0  1  2  3  4  ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
- *   RPN.................................  -  -  -  -  -  - ERPN.......
- *
- *   TLB2:
- *   0  1  2  3  4  ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
- *   -  -  -  -  -    - U0 U1 U2 U3 W  I  M  G  E   - UX UW UR SX SW SR
- *
- * Newer 440 cores (440x6 as used on AMCC 460EX/460GT) have additional
- * TLB2 storage attibute fields. Those are:
- *
- *   TLB2:
- *   0...10    11   12   13   14   15   16...31
- *   no change WL1  IL1I IL1D IL2I IL2D no change
- *
- * There are some constrains and options, to decide mapping software bits
- * into TLB entry.
- *
- *   - PRESENT *must* be in the bottom three bits because swap cache
- *     entries use the top 29 bits for TLB2.
- *
- *   - CACHE COHERENT bit (M) has no effect on original PPC440 cores,
- *     because it doesn't support SMP. However, some later 460 variants
- *     have -some- form of SMP support and so I keep the bit there for
- *     future use
- *
- * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used
- * for memory protection related functions (see PTE structure in
- * include/asm-ppc/mmu.h).  The _PAGE_XXX definitions in this file map to the
- * above bits.  Note that the bit values are CPU specific, not architecture
- * specific.
- *
- * The kernel PTE entry holds an arch-dependent swp_entry structure under
- * certain situations. In other words, in such situations some portion of
- * the PTE bits are used as a swp_entry. In the PPC implementation, the
- * 3-24th LSB are shared with swp_entry, however the 0-2nd three LSB still
- * hold protection values. That means the three protection bits are
- * reserved for both PTE and SWAP entry at the most significant three
- * LSBs.
- *
- * There are three protection bits available for SWAP entry:
- *     _PAGE_PRESENT
- *     _PAGE_HASHPTE (if HW has)
- *
- * So those three bits have to be inside of 0-2nd LSB of PTE.
- *
- */
-
-#define _PAGE_PRESENT  0x00000001              /* S: PTE valid */
-#define _PAGE_RW       0x00000002              /* S: Write permission */
-#define _PAGE_EXEC     0x00000004              /* H: Execute permission */
-#define _PAGE_ACCESSED 0x00000008              /* S: Page referenced */
-#define _PAGE_DIRTY    0x00000010              /* S: Page dirty */
-#define _PAGE_SPECIAL  0x00000020              /* S: Special page */
-#define _PAGE_USER     0x00000040              /* S: User page */
-#define _PAGE_ENDIAN   0x00000080              /* H: E bit */
-#define _PAGE_GUARDED  0x00000100              /* H: G bit */
-#define _PAGE_COHERENT 0x00000200              /* H: M bit */
-#define _PAGE_NO_CACHE 0x00000400              /* H: I bit */
-#define _PAGE_WRITETHRU        0x00000800              /* H: W bit */
-
-/* TODO: Add large page lowmem mapping support */
-#define _PMD_PRESENT   0
-#define _PMD_PRESENT_MASK (PAGE_MASK)
-#define _PMD_BAD       (~PAGE_MASK)
-
-/* ERPN in a PTE never gets cleared, ignore it */
-#define _PTE_NONE_MASK 0xffffffff00000000ULL
-
-
-#endif /* __KERNEL__ */
-#endif /*  _ASM_POWERPC_PTE_44x_H */
diff --git a/arch/powerpc/include/asm/pte-8xx.h b/arch/powerpc/include/asm/pte-8xx.h
deleted file mode 100644 (file)
index a0e2ba9..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-#ifndef _ASM_POWERPC_PTE_8xx_H
-#define _ASM_POWERPC_PTE_8xx_H
-#ifdef __KERNEL__
-
-/*
- * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk.
- * We also use the two level tables, but we can put the real bits in them
- * needed for the TLB and tablewalk.  These definitions require Mx_CTR.PPM = 0,
- * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1.  The level 2 descriptor has
- * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit
- * based upon user/super access.  The TLB does not have accessed nor write
- * protect.  We assume that if the TLB get loaded with an entry it is
- * accessed, and overload the changed bit for write protect.  We use
- * two bits in the software pte that are supposed to be set to zero in
- * the TLB entry (24 and 25) for these indicators.  Although the level 1
- * descriptor contains the guarded and writethrough/copyback bits, we can
- * set these at the page level since they get copied from the Mx_TWC
- * register when the TLB entry is loaded.  We will use bit 27 for guard, since
- * that is where it exists in the MD_TWC, and bit 26 for writethrough.
- * These will get masked from the level 2 descriptor at TLB load time, and
- * copied to the MD_TWC before it gets loaded.
- * Large page sizes added.  We currently support two sizes, 4K and 8M.
- * This also allows a TLB hander optimization because we can directly
- * load the PMD into MD_TWC.  The 8M pages are only used for kernel
- * mapping of well known areas.  The PMD (PGD) entries contain control
- * flags in addition to the address, so care must be taken that the
- * software no longer assumes these are only pointers.
- */
-
-/* Definitions for 8xx embedded chips. */
-#define _PAGE_PRESENT  0x0001  /* Page is valid */
-#define _PAGE_NO_CACHE 0x0002  /* I: cache inhibit */
-#define _PAGE_SHARED   0x0004  /* No ASID (context) compare */
-#define _PAGE_SPECIAL  0x0008  /* SW entry, forced to 0 by the TLB miss */
-#define _PAGE_DIRTY    0x0100  /* C: page changed */
-
-/* These 4 software bits must be masked out when the L2 entry is loaded
- * into the TLB.
- */
-#define _PAGE_GUARDED  0x0010  /* Copied to L1 G entry in DTLB */
-#define _PAGE_USER     0x0020  /* Copied to L1 APG lsb */
-#define _PAGE_EXEC     0x0040  /* Copied to L1 APG */
-#define _PAGE_WRITETHRU        0x0080  /* software: caching is write through */
-#define _PAGE_ACCESSED 0x0800  /* software: page referenced */
-
-#define _PAGE_RO       0x0600  /* Supervisor RO, User no access */
-
-#define _PMD_PRESENT   0x0001
-#define _PMD_BAD       0x0ff0
-#define _PMD_PAGE_MASK 0x000c
-#define _PMD_PAGE_8M   0x000c
-
-/* Until my rework is finished, 8xx still needs atomic PTE updates */
-#define PTE_ATOMIC_UPDATES     1
-
-/* We need to add _PAGE_SHARED to kernel pages */
-#define _PAGE_KERNEL_RO                (_PAGE_SHARED | _PAGE_RO)
-#define _PAGE_KERNEL_ROX       (_PAGE_SHARED | _PAGE_RO | _PAGE_EXEC)
-#define _PAGE_KERNEL_RW                (_PAGE_SHARED | _PAGE_DIRTY | _PAGE_RW | \
-                                _PAGE_HWWRITE)
-#define _PAGE_KERNEL_RWX       (_PAGE_SHARED | _PAGE_DIRTY | _PAGE_RW | \
-                                _PAGE_HWWRITE | _PAGE_EXEC)
-
-#endif /* __KERNEL__ */
-#endif /*  _ASM_POWERPC_PTE_8xx_H */
diff --git a/arch/powerpc/include/asm/pte-book3e.h b/arch/powerpc/include/asm/pte-book3e.h
deleted file mode 100644 (file)
index 8d84732..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-#ifndef _ASM_POWERPC_PTE_BOOK3E_H
-#define _ASM_POWERPC_PTE_BOOK3E_H
-#ifdef __KERNEL__
-
-/* PTE bit definitions for processors compliant to the Book3E
- * architecture 2.06 or later. The position of the PTE bits
- * matches the HW definition of the optional Embedded Page Table
- * category.
- */
-
-/* Architected bits */
-#define _PAGE_PRESENT  0x000001 /* software: pte contains a translation */
-#define _PAGE_SW1      0x000002
-#define _PAGE_BIT_SWAP_TYPE    2
-#define _PAGE_BAP_SR   0x000004
-#define _PAGE_BAP_UR   0x000008
-#define _PAGE_BAP_SW   0x000010
-#define _PAGE_BAP_UW   0x000020
-#define _PAGE_BAP_SX   0x000040
-#define _PAGE_BAP_UX   0x000080
-#define _PAGE_PSIZE_MSK        0x000f00
-#define _PAGE_PSIZE_4K 0x000200
-#define _PAGE_PSIZE_8K 0x000300
-#define _PAGE_PSIZE_16K        0x000400
-#define _PAGE_PSIZE_32K        0x000500
-#define _PAGE_PSIZE_64K        0x000600
-#define _PAGE_PSIZE_128K       0x000700
-#define _PAGE_PSIZE_256K       0x000800
-#define _PAGE_PSIZE_512K       0x000900
-#define _PAGE_PSIZE_1M 0x000a00
-#define _PAGE_PSIZE_2M 0x000b00
-#define _PAGE_PSIZE_4M 0x000c00
-#define _PAGE_PSIZE_8M 0x000d00
-#define _PAGE_PSIZE_16M        0x000e00
-#define _PAGE_PSIZE_32M        0x000f00
-#define _PAGE_DIRTY    0x001000 /* C: page changed */
-#define _PAGE_SW0      0x002000
-#define _PAGE_U3       0x004000
-#define _PAGE_U2       0x008000
-#define _PAGE_U1       0x010000
-#define _PAGE_U0       0x020000
-#define _PAGE_ACCESSED 0x040000
-#define _PAGE_ENDIAN   0x080000
-#define _PAGE_GUARDED  0x100000
-#define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */
-#define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */
-#define _PAGE_WRITETHRU        0x800000 /* W: cache write-through */
-
-/* "Higher level" linux bit combinations */
-#define _PAGE_EXEC             _PAGE_BAP_UX /* .. and was cache cleaned */
-#define _PAGE_RW               (_PAGE_BAP_SW | _PAGE_BAP_UW) /* User write permission */
-#define _PAGE_KERNEL_RW                (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY)
-#define _PAGE_KERNEL_RO                (_PAGE_BAP_SR)
-#define _PAGE_KERNEL_RWX       (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY | _PAGE_BAP_SX)
-#define _PAGE_KERNEL_ROX       (_PAGE_BAP_SR | _PAGE_BAP_SX)
-#define _PAGE_USER             (_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */
-
-#define _PAGE_HASHPTE  0
-#define _PAGE_BUSY     0
-
-#define _PAGE_SPECIAL  _PAGE_SW0
-
-/* Flags to be preserved on PTE modifications */
-#define _PAGE_HPTEFLAGS        _PAGE_BUSY
-
-/* Base page size */
-#ifdef CONFIG_PPC_64K_PAGES
-#define _PAGE_PSIZE    _PAGE_PSIZE_64K
-#define PTE_RPN_SHIFT  (28)
-#else
-#define _PAGE_PSIZE    _PAGE_PSIZE_4K
-#define        PTE_RPN_SHIFT   (24)
-#endif
-
-#define PTE_WIMGE_SHIFT (19)
-#define PTE_BAP_SHIFT  (2)
-
-/* On 32-bit, we never clear the top part of the PTE */
-#ifdef CONFIG_PPC32
-#define _PTE_NONE_MASK 0xffffffff00000000ULL
-#define _PMD_PRESENT   0
-#define _PMD_PRESENT_MASK (PAGE_MASK)
-#define _PMD_BAD       (~PAGE_MASK)
-#endif
-
-#endif /* __KERNEL__ */
-#endif /*  _ASM_POWERPC_PTE_FSL_BOOKE_H */
index 71537a3..1ec67b0 100644 (file)
 #else
 #define _PAGE_RW 0
 #endif
+
+#ifndef _PAGE_PTE
+#define _PAGE_PTE 0
+#endif
+
 #ifndef _PMD_PRESENT_MASK
 #define _PMD_PRESENT_MASK      _PMD_PRESENT
 #endif
diff --git a/arch/powerpc/include/asm/pte-fsl-booke.h b/arch/powerpc/include/asm/pte-fsl-booke.h
deleted file mode 100644 (file)
index 9f5c3d0..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-#ifndef _ASM_POWERPC_PTE_FSL_BOOKE_H
-#define _ASM_POWERPC_PTE_FSL_BOOKE_H
-#ifdef __KERNEL__
-
-/* PTE bit definitions for Freescale BookE SW loaded TLB MMU based
- * processors
- *
-   MMU Assist Register 3:
-
-   32 33 34 35 36  ... 50 51 52 53 54 55 56 57 58 59 60 61 62 63
-   RPN......................  0  0 U0 U1 U2 U3 UX SX UW SW UR SR
-
-   - PRESENT *must* be in the bottom three bits because swap cache
-     entries use the top 29 bits.
-
-*/
-
-/* Definitions for FSL Book-E Cores */
-#define _PAGE_PRESENT  0x00001 /* S: PTE contains a translation */
-#define _PAGE_USER     0x00002 /* S: User page (maps to UR) */
-#define _PAGE_RW       0x00004 /* S: Write permission (SW) */
-#define _PAGE_DIRTY    0x00008 /* S: Page dirty */
-#define _PAGE_EXEC     0x00010 /* H: SX permission */
-#define _PAGE_ACCESSED 0x00020 /* S: Page referenced */
-
-#define _PAGE_ENDIAN   0x00040 /* H: E bit */
-#define _PAGE_GUARDED  0x00080 /* H: G bit */
-#define _PAGE_COHERENT 0x00100 /* H: M bit */
-#define _PAGE_NO_CACHE 0x00200 /* H: I bit */
-#define _PAGE_WRITETHRU        0x00400 /* H: W bit */
-#define _PAGE_SPECIAL  0x00800 /* S: Special page */
-
-#define _PMD_PRESENT   0
-#define _PMD_PRESENT_MASK (PAGE_MASK)
-#define _PMD_BAD       (~PAGE_MASK)
-
-#define PTE_WIMGE_SHIFT (6)
-
-#endif /* __KERNEL__ */
-#endif /*  _ASM_POWERPC_PTE_FSL_BOOKE_H */
diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
deleted file mode 100644 (file)
index 62cfb0c..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-#ifndef _ASM_POWERPC_PTE_HASH32_H
-#define _ASM_POWERPC_PTE_HASH32_H
-#ifdef __KERNEL__
-
-/*
- * The "classic" 32-bit implementation of the PowerPC MMU uses a hash
- * table containing PTEs, together with a set of 16 segment registers,
- * to define the virtual to physical address mapping.
- *
- * We use the hash table as an extended TLB, i.e. a cache of currently
- * active mappings.  We maintain a two-level page table tree, much
- * like that used by the i386, for the sake of the Linux memory
- * management code.  Low-level assembler code in hash_low_32.S
- * (procedure hash_page) is responsible for extracting ptes from the
- * tree and putting them into the hash table when necessary, and
- * updating the accessed and modified bits in the page table tree.
- */
-
-#define _PAGE_PRESENT  0x001   /* software: pte contains a translation */
-#define _PAGE_HASHPTE  0x002   /* hash_page has made an HPTE for this pte */
-#define _PAGE_USER     0x004   /* usermode access allowed */
-#define _PAGE_GUARDED  0x008   /* G: prohibit speculative access */
-#define _PAGE_COHERENT 0x010   /* M: enforce memory coherence (SMP systems) */
-#define _PAGE_NO_CACHE 0x020   /* I: cache inhibit */
-#define _PAGE_WRITETHRU        0x040   /* W: cache write-through */
-#define _PAGE_DIRTY    0x080   /* C: page changed */
-#define _PAGE_ACCESSED 0x100   /* R: page referenced */
-#define _PAGE_RW       0x400   /* software: user write access allowed */
-#define _PAGE_SPECIAL  0x800   /* software: Special page */
-
-#ifdef CONFIG_PTE_64BIT
-/* We never clear the high word of the pte */
-#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
-#else
-#define _PTE_NONE_MASK _PAGE_HASHPTE
-#endif
-
-#define _PMD_PRESENT   0
-#define _PMD_PRESENT_MASK (PAGE_MASK)
-#define _PMD_BAD       (~PAGE_MASK)
-
-/* Hash table based platforms need atomic updates of the linux PTE */
-#define PTE_ATOMIC_UPDATES     1
-
-#endif /* __KERNEL__ */
-#endif /*  _ASM_POWERPC_PTE_HASH32_H */
diff --git a/arch/powerpc/include/asm/pte-hash64-4k.h b/arch/powerpc/include/asm/pte-hash64-4k.h
deleted file mode 100644 (file)
index c134e80..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-/* To be include by pgtable-hash64.h only */
-
-/* PTE bits */
-#define _PAGE_HASHPTE  0x0400 /* software: pte has an associated HPTE */
-#define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */
-#define _PAGE_GROUP_IX  0x7000 /* software: HPTE index within group */
-#define _PAGE_F_SECOND  _PAGE_SECONDARY
-#define _PAGE_F_GIX     _PAGE_GROUP_IX
-#define _PAGE_SPECIAL  0x10000 /* software: special page */
-
-/* PTE flags to conserve for HPTE identification */
-#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \
-                        _PAGE_SECONDARY | _PAGE_GROUP_IX)
-
-/* shift to put page number into pte */
-#define PTE_RPN_SHIFT  (17)
-
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
deleted file mode 100644 (file)
index 4f4ec2a..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-/* To be include by pgtable-hash64.h only */
-
-/* Additional PTE bits (don't change without checking asm in hash_low.S) */
-#define _PAGE_SPECIAL  0x00000400 /* software: special page */
-#define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */
-#define _PAGE_HPTE_SUB0        0x08000000 /* combo only: first sub page */
-#define _PAGE_COMBO    0x10000000 /* this is a combo 4k page */
-#define _PAGE_4K_PFN   0x20000000 /* PFN is for a single 4k page */
-
-/* For 64K page, we don't have a separate _PAGE_HASHPTE bit. Instead,
- * we set that to be the whole sub-bits mask. The C code will only
- * test this, so a multi-bit mask will work. For combo pages, this
- * is equivalent as effectively, the old _PAGE_HASHPTE was an OR of
- * all the sub bits. For real 64k pages, we now have the assembly set
- * _PAGE_HPTE_SUB0 in addition to setting the HIDX bits which overlap
- * that mask. This is fine as long as the HIDX bits are never set on
- * a PTE that isn't hashed, which is the case today.
- *
- * A little nit is for the huge page C code, which does the hashing
- * in C, we need to provide which bit to use.
- */
-#define _PAGE_HASHPTE  _PAGE_HPTE_SUB
-
-/* Note the full page bits must be in the same location as for normal
- * 4k pages as the same assembly will be used to insert 64K pages
- * whether the kernel has CONFIG_PPC_64K_PAGES or not
- */
-#define _PAGE_F_SECOND  0x00008000 /* full page: hidx bits */
-#define _PAGE_F_GIX     0x00007000 /* full page: hidx bits */
-
-/* PTE flags to conserve for HPTE identification */
-#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_COMBO)
-
-/* Shift to put page number into pte.
- *
- * That gives us a max RPN of 34 bits, which means a max of 50 bits
- * of addressable physical space, or 46 bits for the special 4k PFNs.
- */
-#define PTE_RPN_SHIFT  (30)
-
-#ifndef __ASSEMBLY__
-
-/*
- * With 64K pages on hash table, we have a special PTE format that
- * uses a second "half" of the page table to encode sub-page information
- * in order to deal with 64K made of 4K HW pages. Thus we override the
- * generic accessors and iterators here
- */
-#define __real_pte __real_pte
-static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
-{
-       real_pte_t rpte;
-
-       rpte.pte = pte;
-       rpte.hidx = 0;
-       if (pte_val(pte) & _PAGE_COMBO) {
-               /*
-                * Make sure we order the hidx load against the _PAGE_COMBO
-                * check. The store side ordering is done in __hash_page_4K
-                */
-               smp_rmb();
-               rpte.hidx = pte_val(*((ptep) + PTRS_PER_PTE));
-       }
-       return rpte;
-}
-
-static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
-{
-       if ((pte_val(rpte.pte) & _PAGE_COMBO))
-               return (rpte.hidx >> (index<<2)) & 0xf;
-       return (pte_val(rpte.pte) >> 12) & 0xf;
-}
-
-#define __rpte_to_pte(r)       ((r).pte)
-#define __rpte_sub_valid(rpte, index) \
-       (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index)))
-
-/* Trick: we set __end to va + 64k, which happens works for
- * a 16M page as well as we want only one iteration
- */
-#define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift)    \
-       do {                                                            \
-               unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT));  \
-               unsigned __split = (psize == MMU_PAGE_4K ||             \
-                                   psize == MMU_PAGE_64K_AP);          \
-               shift = mmu_psize_defs[psize].shift;                    \
-               for (index = 0; vpn < __end; index++,                   \
-                            vpn += (1L << (shift - VPN_SHIFT))) {      \
-                       if (!__split || __rpte_sub_valid(rpte, index))  \
-                               do {
-
-#define pte_iterate_hashed_end() } while(0); } } while(0)
-
-#define pte_pagesize_index(mm, addr, pte)      \
-       (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
-
-#define remap_4k_pfn(vma, addr, pfn, prot)                             \
-       (WARN_ON(((pfn) >= (1UL << (64 - PTE_RPN_SHIFT)))) ? -EINVAL :  \
-               remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE,        \
-                       __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN)))
-
-#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/pte-hash64.h b/arch/powerpc/include/asm/pte-hash64.h
deleted file mode 100644 (file)
index ef612c1..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-#ifndef _ASM_POWERPC_PTE_HASH64_H
-#define _ASM_POWERPC_PTE_HASH64_H
-#ifdef __KERNEL__
-
-/*
- * Common bits between 4K and 64K pages in a linux-style PTE.
- * These match the bits in the (hardware-defined) PowerPC PTE as closely
- * as possible. Additional bits may be defined in pgtable-hash64-*.h
- *
- * Note: We only support user read/write permissions. Supervisor always
- * have full read/write to pages above PAGE_OFFSET (pages below that
- * always use the user access permissions).
- *
- * We could create separate kernel read-only if we used the 3 PP bits
- * combinations that newer processors provide but we currently don't.
- */
-#define _PAGE_PRESENT          0x0001 /* software: pte contains a translation */
-#define _PAGE_USER             0x0002 /* matches one of the PP bits */
-#define _PAGE_BIT_SWAP_TYPE    2
-#define _PAGE_EXEC             0x0004 /* No execute on POWER4 and newer (we invert) */
-#define _PAGE_GUARDED          0x0008
-/* We can derive Memory coherence from _PAGE_NO_CACHE */
-#define _PAGE_NO_CACHE         0x0020 /* I: cache inhibit */
-#define _PAGE_WRITETHRU                0x0040 /* W: cache write-through */
-#define _PAGE_DIRTY            0x0080 /* C: page changed */
-#define _PAGE_ACCESSED         0x0100 /* R: page referenced */
-#define _PAGE_RW               0x0200 /* software: user write access allowed */
-#define _PAGE_BUSY             0x0800 /* software: PTE & hash are busy */
-
-/* No separate kernel read-only */
-#define _PAGE_KERNEL_RW                (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */
-#define _PAGE_KERNEL_RO                 _PAGE_KERNEL_RW
-
-/* Strong Access Ordering */
-#define _PAGE_SAO              (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT)
-
-/* No page size encoding in the linux PTE */
-#define _PAGE_PSIZE            0
-
-/* PTEIDX nibble */
-#define _PTEIDX_SECONDARY      0x8
-#define _PTEIDX_GROUP_IX       0x7
-
-/* Hash table based platforms need atomic updates of the linux PTE */
-#define PTE_ATOMIC_UPDATES     1
-
-#ifdef CONFIG_PPC_64K_PAGES
-#include <asm/pte-hash64-64k.h>
-#else
-#include <asm/pte-hash64-4k.h>
-#endif
-
-#endif /* __KERNEL__ */
-#endif /*  _ASM_POWERPC_PTE_HASH64_H */
diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h
deleted file mode 100644 (file)
index 32b9bfa..0000000
+++ /dev/null
@@ -1,740 +0,0 @@
-/*
- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
- *
- * Authors:    Shlomi Gridish <gridish@freescale.com>
- *             Li Yang <leoli@freescale.com>
- *
- * Description:
- * QUICC Engine (QE) external definitions and structure.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-#ifndef _ASM_POWERPC_QE_H
-#define _ASM_POWERPC_QE_H
-#ifdef __KERNEL__
-
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <asm/cpm.h>
-#include <asm/immap_qe.h>
-
-#define QE_NUM_OF_SNUM 256     /* There are 256 serial number in QE */
-#define QE_NUM_OF_BRGS 16
-#define QE_NUM_OF_PORTS        1024
-
-/* Memory partitions
-*/
-#define MEM_PART_SYSTEM                0
-#define MEM_PART_SECONDARY     1
-#define MEM_PART_MURAM         2
-
-/* Clocks and BRGs */
-enum qe_clock {
-       QE_CLK_NONE = 0,
-       QE_BRG1,                /* Baud Rate Generator 1 */
-       QE_BRG2,                /* Baud Rate Generator 2 */
-       QE_BRG3,                /* Baud Rate Generator 3 */
-       QE_BRG4,                /* Baud Rate Generator 4 */
-       QE_BRG5,                /* Baud Rate Generator 5 */
-       QE_BRG6,                /* Baud Rate Generator 6 */
-       QE_BRG7,                /* Baud Rate Generator 7 */
-       QE_BRG8,                /* Baud Rate Generator 8 */
-       QE_BRG9,                /* Baud Rate Generator 9 */
-       QE_BRG10,               /* Baud Rate Generator 10 */
-       QE_BRG11,               /* Baud Rate Generator 11 */
-       QE_BRG12,               /* Baud Rate Generator 12 */
-       QE_BRG13,               /* Baud Rate Generator 13 */
-       QE_BRG14,               /* Baud Rate Generator 14 */
-       QE_BRG15,               /* Baud Rate Generator 15 */
-       QE_BRG16,               /* Baud Rate Generator 16 */
-       QE_CLK1,                /* Clock 1 */
-       QE_CLK2,                /* Clock 2 */
-       QE_CLK3,                /* Clock 3 */
-       QE_CLK4,                /* Clock 4 */
-       QE_CLK5,                /* Clock 5 */
-       QE_CLK6,                /* Clock 6 */
-       QE_CLK7,                /* Clock 7 */
-       QE_CLK8,                /* Clock 8 */
-       QE_CLK9,                /* Clock 9 */
-       QE_CLK10,               /* Clock 10 */
-       QE_CLK11,               /* Clock 11 */
-       QE_CLK12,               /* Clock 12 */
-       QE_CLK13,               /* Clock 13 */
-       QE_CLK14,               /* Clock 14 */
-       QE_CLK15,               /* Clock 15 */
-       QE_CLK16,               /* Clock 16 */
-       QE_CLK17,               /* Clock 17 */
-       QE_CLK18,               /* Clock 18 */
-       QE_CLK19,               /* Clock 19 */
-       QE_CLK20,               /* Clock 20 */
-       QE_CLK21,               /* Clock 21 */
-       QE_CLK22,               /* Clock 22 */
-       QE_CLK23,               /* Clock 23 */
-       QE_CLK24,               /* Clock 24 */
-       QE_CLK_DUMMY
-};
-
-static inline bool qe_clock_is_brg(enum qe_clock clk)
-{
-       return clk >= QE_BRG1 && clk <= QE_BRG16;
-}
-
-extern spinlock_t cmxgcr_lock;
-
-/* Export QE common operations */
-#ifdef CONFIG_QUICC_ENGINE
-extern void qe_reset(void);
-#else
-static inline void qe_reset(void) {}
-#endif
-
-/* QE PIO */
-#define QE_PIO_PINS 32
-
-struct qe_pio_regs {
-       __be32  cpodr;          /* Open drain register */
-       __be32  cpdata;         /* Data register */
-       __be32  cpdir1;         /* Direction register */
-       __be32  cpdir2;         /* Direction register */
-       __be32  cppar1;         /* Pin assignment register */
-       __be32  cppar2;         /* Pin assignment register */
-#ifdef CONFIG_PPC_85xx
-       u8      pad[8];
-#endif
-};
-
-#define QE_PIO_DIR_IN  2
-#define QE_PIO_DIR_OUT 1
-extern void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin,
-                               int dir, int open_drain, int assignment,
-                               int has_irq);
-#ifdef CONFIG_QUICC_ENGINE
-extern int par_io_init(struct device_node *np);
-extern int par_io_of_config(struct device_node *np);
-extern int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
-                            int assignment, int has_irq);
-extern int par_io_data_set(u8 port, u8 pin, u8 val);
-#else
-static inline int par_io_init(struct device_node *np) { return -ENOSYS; }
-static inline int par_io_of_config(struct device_node *np) { return -ENOSYS; }
-static inline int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
-               int assignment, int has_irq) { return -ENOSYS; }
-static inline int par_io_data_set(u8 port, u8 pin, u8 val) { return -ENOSYS; }
-#endif /* CONFIG_QUICC_ENGINE */
-
-/*
- * Pin multiplexing functions.
- */
-struct qe_pin;
-#ifdef CONFIG_QE_GPIO
-extern struct qe_pin *qe_pin_request(struct device_node *np, int index);
-extern void qe_pin_free(struct qe_pin *qe_pin);
-extern void qe_pin_set_gpio(struct qe_pin *qe_pin);
-extern void qe_pin_set_dedicated(struct qe_pin *pin);
-#else
-static inline struct qe_pin *qe_pin_request(struct device_node *np, int index)
-{
-       return ERR_PTR(-ENOSYS);
-}
-static inline void qe_pin_free(struct qe_pin *qe_pin) {}
-static inline void qe_pin_set_gpio(struct qe_pin *qe_pin) {}
-static inline void qe_pin_set_dedicated(struct qe_pin *pin) {}
-#endif /* CONFIG_QE_GPIO */
-
-#ifdef CONFIG_QUICC_ENGINE
-int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input);
-#else
-static inline int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol,
-                              u32 cmd_input)
-{
-       return -ENOSYS;
-}
-#endif /* CONFIG_QUICC_ENGINE */
-
-/* QE internal API */
-enum qe_clock qe_clock_source(const char *source);
-unsigned int qe_get_brg_clk(void);
-int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier);
-int qe_get_snum(void);
-void qe_put_snum(u8 snum);
-unsigned int qe_get_num_of_risc(void);
-unsigned int qe_get_num_of_snums(void);
-
-static inline int qe_alive_during_sleep(void)
-{
-       /*
-        * MPC8568E reference manual says:
-        *
-        * "...power down sequence waits for all I/O interfaces to become idle.
-        *  In some applications this may happen eventually without actively
-        *  shutting down interfaces, but most likely, software will have to
-        *  take steps to shut down the eTSEC, QUICC Engine Block, and PCI
-        *  interfaces before issuing the command (either the write to the core
-        *  MSR[WE] as described above or writing to POWMGTCSR) to put the
-        *  device into sleep state."
-        *
-        * MPC8569E reference manual has a similar paragraph.
-        */
-#ifdef CONFIG_PPC_85xx
-       return 0;
-#else
-       return 1;
-#endif
-}
-
-/* we actually use cpm_muram implementation, define this for convenience */
-#define qe_muram_init cpm_muram_init
-#define qe_muram_alloc cpm_muram_alloc
-#define qe_muram_alloc_fixed cpm_muram_alloc_fixed
-#define qe_muram_free cpm_muram_free
-#define qe_muram_addr cpm_muram_addr
-#define qe_muram_offset cpm_muram_offset
-
-/* Structure that defines QE firmware binary files.
- *
- * See Documentation/powerpc/qe_firmware.txt for a description of these
- * fields.
- */
-struct qe_firmware {
-       struct qe_header {
-               __be32 length;  /* Length of the entire structure, in bytes */
-               u8 magic[3];    /* Set to { 'Q', 'E', 'F' } */
-               u8 version;     /* Version of this layout. First ver is '1' */
-       } header;
-       u8 id[62];      /* Null-terminated identifier string */
-       u8 split;       /* 0 = shared I-RAM, 1 = split I-RAM */
-       u8 count;       /* Number of microcode[] structures */
-       struct {
-               __be16 model;           /* The SOC model  */
-               u8 major;               /* The SOC revision major */
-               u8 minor;               /* The SOC revision minor */
-       } __attribute__ ((packed)) soc;
-       u8 padding[4];                  /* Reserved, for alignment */
-       __be64 extended_modes;          /* Extended modes */
-       __be32 vtraps[8];               /* Virtual trap addresses */
-       u8 reserved[4];                 /* Reserved, for future expansion */
-       struct qe_microcode {
-               u8 id[32];              /* Null-terminated identifier */
-               __be32 traps[16];       /* Trap addresses, 0 == ignore */
-               __be32 eccr;            /* The value for the ECCR register */
-               __be32 iram_offset;     /* Offset into I-RAM for the code */
-               __be32 count;           /* Number of 32-bit words of the code */
-               __be32 code_offset;     /* Offset of the actual microcode */
-               u8 major;               /* The microcode version major */
-               u8 minor;               /* The microcode version minor */
-               u8 revision;            /* The microcode version revision */
-               u8 padding;             /* Reserved, for alignment */
-               u8 reserved[4];         /* Reserved, for future expansion */
-       } __attribute__ ((packed)) microcode[1];
-       /* All microcode binaries should be located here */
-       /* CRC32 should be located here, after the microcode binaries */
-} __attribute__ ((packed));
-
-struct qe_firmware_info {
-       char id[64];            /* Firmware name */
-       u32 vtraps[8];          /* Virtual trap addresses */
-       u64 extended_modes;     /* Extended modes */
-};
-
-#ifdef CONFIG_QUICC_ENGINE
-/* Upload a firmware to the QE */
-int qe_upload_firmware(const struct qe_firmware *firmware);
-#else
-static inline int qe_upload_firmware(const struct qe_firmware *firmware)
-{
-       return -ENOSYS;
-}
-#endif /* CONFIG_QUICC_ENGINE */
-
-/* Obtain information on the uploaded firmware */
-struct qe_firmware_info *qe_get_firmware_info(void);
-
-/* QE USB */
-int qe_usb_clock_set(enum qe_clock clk, int rate);
-
-/* Buffer descriptors */
-struct qe_bd {
-       __be16 status;
-       __be16 length;
-       __be32 buf;
-} __attribute__ ((packed));
-
-#define BD_STATUS_MASK 0xffff0000
-#define BD_LENGTH_MASK 0x0000ffff
-
-/* Alignment */
-#define QE_INTR_TABLE_ALIGN    16      /* ??? */
-#define QE_ALIGNMENT_OF_BD     8
-#define QE_ALIGNMENT_OF_PRAM   64
-
-/* RISC allocation */
-#define QE_RISC_ALLOCATION_RISC1       0x1  /* RISC 1 */
-#define QE_RISC_ALLOCATION_RISC2       0x2  /* RISC 2 */
-#define QE_RISC_ALLOCATION_RISC3       0x4  /* RISC 3 */
-#define QE_RISC_ALLOCATION_RISC4       0x8  /* RISC 4 */
-#define QE_RISC_ALLOCATION_RISC1_AND_RISC2     (QE_RISC_ALLOCATION_RISC1 | \
-                                                QE_RISC_ALLOCATION_RISC2)
-#define QE_RISC_ALLOCATION_FOUR_RISCS  (QE_RISC_ALLOCATION_RISC1 | \
-                                        QE_RISC_ALLOCATION_RISC2 | \
-                                        QE_RISC_ALLOCATION_RISC3 | \
-                                        QE_RISC_ALLOCATION_RISC4)
-
-/* QE extended filtering Table Lookup Key Size */
-enum qe_fltr_tbl_lookup_key_size {
-       QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES
-               = 0x3f,         /* LookupKey parsed by the Generate LookupKey
-                                  CMD is truncated to 8 bytes */
-       QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES
-               = 0x5f,         /* LookupKey parsed by the Generate LookupKey
-                                  CMD is truncated to 16 bytes */
-};
-
-/* QE FLTR extended filtering Largest External Table Lookup Key Size */
-enum qe_fltr_largest_external_tbl_lookup_key_size {
-       QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE
-               = 0x0,/* not used */
-       QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES
-               = QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES,        /* 8 bytes */
-       QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES
-               = QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES,       /* 16 bytes */
-};
-
-/* structure representing QE parameter RAM */
-struct qe_timer_tables {
-       u16 tm_base;            /* QE timer table base adr */
-       u16 tm_ptr;             /* QE timer table pointer */
-       u16 r_tmr;              /* QE timer mode register */
-       u16 r_tmv;              /* QE timer valid register */
-       u32 tm_cmd;             /* QE timer cmd register */
-       u32 tm_cnt;             /* QE timer internal cnt */
-} __attribute__ ((packed));
-
-#define QE_FLTR_TAD_SIZE       8
-
-/* QE extended filtering Termination Action Descriptor (TAD) */
-struct qe_fltr_tad {
-       u8 serialized[QE_FLTR_TAD_SIZE];
-} __attribute__ ((packed));
-
-/* Communication Direction */
-enum comm_dir {
-       COMM_DIR_NONE = 0,
-       COMM_DIR_RX = 1,
-       COMM_DIR_TX = 2,
-       COMM_DIR_RX_AND_TX = 3
-};
-
-/* QE CMXUCR Registers.
- * There are two UCCs represented in each of the four CMXUCR registers.
- * These values are for the UCC in the LSBs
- */
-#define QE_CMXUCR_MII_ENET_MNG         0x00007000
-#define QE_CMXUCR_MII_ENET_MNG_SHIFT   12
-#define QE_CMXUCR_GRANT                        0x00008000
-#define QE_CMXUCR_TSA                  0x00004000
-#define QE_CMXUCR_BKPT                 0x00000100
-#define QE_CMXUCR_TX_CLK_SRC_MASK      0x0000000F
-
-/* QE CMXGCR Registers.
-*/
-#define QE_CMXGCR_MII_ENET_MNG         0x00007000
-#define QE_CMXGCR_MII_ENET_MNG_SHIFT   12
-#define QE_CMXGCR_USBCS                        0x0000000f
-#define QE_CMXGCR_USBCS_CLK3           0x1
-#define QE_CMXGCR_USBCS_CLK5           0x2
-#define QE_CMXGCR_USBCS_CLK7           0x3
-#define QE_CMXGCR_USBCS_CLK9           0x4
-#define QE_CMXGCR_USBCS_CLK13          0x5
-#define QE_CMXGCR_USBCS_CLK17          0x6
-#define QE_CMXGCR_USBCS_CLK19          0x7
-#define QE_CMXGCR_USBCS_CLK21          0x8
-#define QE_CMXGCR_USBCS_BRG9           0x9
-#define QE_CMXGCR_USBCS_BRG10          0xa
-
-/* QE CECR Commands.
-*/
-#define QE_CR_FLG                      0x00010000
-#define QE_RESET                       0x80000000
-#define QE_INIT_TX_RX                  0x00000000
-#define QE_INIT_RX                     0x00000001
-#define QE_INIT_TX                     0x00000002
-#define QE_ENTER_HUNT_MODE             0x00000003
-#define QE_STOP_TX                     0x00000004
-#define QE_GRACEFUL_STOP_TX            0x00000005
-#define QE_RESTART_TX                  0x00000006
-#define QE_CLOSE_RX_BD                 0x00000007
-#define QE_SWITCH_COMMAND              0x00000007
-#define QE_SET_GROUP_ADDRESS           0x00000008
-#define QE_START_IDMA                  0x00000009
-#define QE_MCC_STOP_RX                 0x00000009
-#define QE_ATM_TRANSMIT                        0x0000000a
-#define QE_HPAC_CLEAR_ALL              0x0000000b
-#define QE_GRACEFUL_STOP_RX            0x0000001a
-#define QE_RESTART_RX                  0x0000001b
-#define QE_HPAC_SET_PRIORITY           0x0000010b
-#define QE_HPAC_STOP_TX                        0x0000020b
-#define QE_HPAC_STOP_RX                        0x0000030b
-#define QE_HPAC_GRACEFUL_STOP_TX       0x0000040b
-#define QE_HPAC_GRACEFUL_STOP_RX       0x0000050b
-#define QE_HPAC_START_TX               0x0000060b
-#define QE_HPAC_START_RX               0x0000070b
-#define QE_USB_STOP_TX                 0x0000000a
-#define QE_USB_RESTART_TX              0x0000000c
-#define QE_QMC_STOP_TX                 0x0000000c
-#define QE_QMC_STOP_RX                 0x0000000d
-#define QE_SS7_SU_FIL_RESET            0x0000000e
-/* jonathbr added from here down for 83xx */
-#define QE_RESET_BCS                   0x0000000a
-#define QE_MCC_INIT_TX_RX_16           0x00000003
-#define QE_MCC_STOP_TX                 0x00000004
-#define QE_MCC_INIT_TX_1               0x00000005
-#define QE_MCC_INIT_RX_1               0x00000006
-#define QE_MCC_RESET                   0x00000007
-#define QE_SET_TIMER                   0x00000008
-#define QE_RANDOM_NUMBER               0x0000000c
-#define QE_ATM_MULTI_THREAD_INIT       0x00000011
-#define QE_ASSIGN_PAGE                 0x00000012
-#define QE_ADD_REMOVE_HASH_ENTRY       0x00000013
-#define QE_START_FLOW_CONTROL          0x00000014
-#define QE_STOP_FLOW_CONTROL           0x00000015
-#define QE_ASSIGN_PAGE_TO_DEVICE       0x00000016
-
-#define QE_ASSIGN_RISC                 0x00000010
-#define QE_CR_MCN_NORMAL_SHIFT         6
-#define QE_CR_MCN_USB_SHIFT            4
-#define QE_CR_MCN_RISC_ASSIGN_SHIFT    8
-#define QE_CR_SNUM_SHIFT               17
-
-/* QE CECR Sub Block - sub block of QE command.
-*/
-#define QE_CR_SUBBLOCK_INVALID         0x00000000
-#define QE_CR_SUBBLOCK_USB             0x03200000
-#define QE_CR_SUBBLOCK_UCCFAST1                0x02000000
-#define QE_CR_SUBBLOCK_UCCFAST2                0x02200000
-#define QE_CR_SUBBLOCK_UCCFAST3                0x02400000
-#define QE_CR_SUBBLOCK_UCCFAST4                0x02600000
-#define QE_CR_SUBBLOCK_UCCFAST5                0x02800000
-#define QE_CR_SUBBLOCK_UCCFAST6                0x02a00000
-#define QE_CR_SUBBLOCK_UCCFAST7                0x02c00000
-#define QE_CR_SUBBLOCK_UCCFAST8                0x02e00000
-#define QE_CR_SUBBLOCK_UCCSLOW1                0x00000000
-#define QE_CR_SUBBLOCK_UCCSLOW2                0x00200000
-#define QE_CR_SUBBLOCK_UCCSLOW3                0x00400000
-#define QE_CR_SUBBLOCK_UCCSLOW4                0x00600000
-#define QE_CR_SUBBLOCK_UCCSLOW5                0x00800000
-#define QE_CR_SUBBLOCK_UCCSLOW6                0x00a00000
-#define QE_CR_SUBBLOCK_UCCSLOW7                0x00c00000
-#define QE_CR_SUBBLOCK_UCCSLOW8                0x00e00000
-#define QE_CR_SUBBLOCK_MCC1            0x03800000
-#define QE_CR_SUBBLOCK_MCC2            0x03a00000
-#define QE_CR_SUBBLOCK_MCC3            0x03000000
-#define QE_CR_SUBBLOCK_IDMA1           0x02800000
-#define QE_CR_SUBBLOCK_IDMA2           0x02a00000
-#define QE_CR_SUBBLOCK_IDMA3           0x02c00000
-#define QE_CR_SUBBLOCK_IDMA4           0x02e00000
-#define QE_CR_SUBBLOCK_HPAC            0x01e00000
-#define QE_CR_SUBBLOCK_SPI1            0x01400000
-#define QE_CR_SUBBLOCK_SPI2            0x01600000
-#define QE_CR_SUBBLOCK_RAND            0x01c00000
-#define QE_CR_SUBBLOCK_TIMER           0x01e00000
-#define QE_CR_SUBBLOCK_GENERAL         0x03c00000
-
-/* QE CECR Protocol - For non-MCC, specifies mode for QE CECR command */
-#define QE_CR_PROTOCOL_UNSPECIFIED     0x00    /* For all other protocols */
-#define QE_CR_PROTOCOL_HDLC_TRANSPARENT        0x00
-#define QE_CR_PROTOCOL_QMC             0x02
-#define QE_CR_PROTOCOL_UART            0x04
-#define QE_CR_PROTOCOL_ATM_POS         0x0A
-#define QE_CR_PROTOCOL_ETHERNET                0x0C
-#define QE_CR_PROTOCOL_L2_SWITCH       0x0D
-
-/* BRG configuration register */
-#define QE_BRGC_ENABLE         0x00010000
-#define QE_BRGC_DIVISOR_SHIFT  1
-#define QE_BRGC_DIVISOR_MAX    0xFFF
-#define QE_BRGC_DIV16          1
-
-/* QE Timers registers */
-#define QE_GTCFR1_PCAS 0x80
-#define QE_GTCFR1_STP2 0x20
-#define QE_GTCFR1_RST2 0x10
-#define QE_GTCFR1_GM2  0x08
-#define QE_GTCFR1_GM1  0x04
-#define QE_GTCFR1_STP1 0x02
-#define QE_GTCFR1_RST1 0x01
-
-/* SDMA registers */
-#define QE_SDSR_BER1   0x02000000
-#define QE_SDSR_BER2   0x01000000
-
-#define QE_SDMR_GLB_1_MSK      0x80000000
-#define QE_SDMR_ADR_SEL                0x20000000
-#define QE_SDMR_BER1_MSK       0x02000000
-#define QE_SDMR_BER2_MSK       0x01000000
-#define QE_SDMR_EB1_MSK                0x00800000
-#define QE_SDMR_ER1_MSK                0x00080000
-#define QE_SDMR_ER2_MSK                0x00040000
-#define QE_SDMR_CEN_MASK       0x0000E000
-#define QE_SDMR_SBER_1         0x00000200
-#define QE_SDMR_SBER_2         0x00000200
-#define QE_SDMR_EB1_PR_MASK    0x000000C0
-#define QE_SDMR_ER1_PR         0x00000008
-
-#define QE_SDMR_CEN_SHIFT      13
-#define QE_SDMR_EB1_PR_SHIFT   6
-
-#define QE_SDTM_MSNUM_SHIFT    24
-
-#define QE_SDEBCR_BA_MASK      0x01FFFFFF
-
-/* Communication Processor */
-#define QE_CP_CERCR_MEE                0x8000  /* Multi-user RAM ECC enable */
-#define QE_CP_CERCR_IEE                0x4000  /* Instruction RAM ECC enable */
-#define QE_CP_CERCR_CIR                0x0800  /* Common instruction RAM */
-
-/* I-RAM */
-#define QE_IRAM_IADD_AIE       0x80000000      /* Auto Increment Enable */
-#define QE_IRAM_IADD_BADDR     0x00080000      /* Base Address */
-#define QE_IRAM_READY           0x80000000      /* Ready */
-
-/* UPC */
-#define UPGCR_PROTOCOL 0x80000000      /* protocol ul2 or pl2 */
-#define UPGCR_TMS      0x40000000      /* Transmit master/slave mode */
-#define UPGCR_RMS      0x20000000      /* Receive master/slave mode */
-#define UPGCR_ADDR     0x10000000      /* Master MPHY Addr multiplexing */
-#define UPGCR_DIAG     0x01000000      /* Diagnostic mode */
-
-/* UCC GUEMR register */
-#define UCC_GUEMR_MODE_MASK_RX 0x02
-#define UCC_GUEMR_MODE_FAST_RX 0x02
-#define UCC_GUEMR_MODE_SLOW_RX 0x00
-#define UCC_GUEMR_MODE_MASK_TX 0x01
-#define UCC_GUEMR_MODE_FAST_TX 0x01
-#define UCC_GUEMR_MODE_SLOW_TX 0x00
-#define UCC_GUEMR_MODE_MASK (UCC_GUEMR_MODE_MASK_RX | UCC_GUEMR_MODE_MASK_TX)
-#define UCC_GUEMR_SET_RESERVED3        0x10    /* Bit 3 in the guemr is reserved but
-                                          must be set 1 */
-
-/* structure representing UCC SLOW parameter RAM */
-struct ucc_slow_pram {
-       __be16 rbase;           /* RX BD base address */
-       __be16 tbase;           /* TX BD base address */
-       u8 rbmr;                /* RX bus mode register (same as CPM's RFCR) */
-       u8 tbmr;                /* TX bus mode register (same as CPM's TFCR) */
-       __be16 mrblr;           /* Rx buffer length */
-       __be32 rstate;          /* Rx internal state */
-       __be32 rptr;            /* Rx internal data pointer */
-       __be16 rbptr;           /* rb BD Pointer */
-       __be16 rcount;          /* Rx internal byte count */
-       __be32 rtemp;           /* Rx temp */
-       __be32 tstate;          /* Tx internal state */
-       __be32 tptr;            /* Tx internal data pointer */
-       __be16 tbptr;           /* Tx BD pointer */
-       __be16 tcount;          /* Tx byte count */
-       __be32 ttemp;           /* Tx temp */
-       __be32 rcrc;            /* temp receive CRC */
-       __be32 tcrc;            /* temp transmit CRC */
-} __attribute__ ((packed));
-
-/* General UCC SLOW Mode Register (GUMRH & GUMRL) */
-#define UCC_SLOW_GUMR_H_SAM_QMC                0x00000000
-#define UCC_SLOW_GUMR_H_SAM_SATM       0x00008000
-#define UCC_SLOW_GUMR_H_REVD           0x00002000
-#define UCC_SLOW_GUMR_H_TRX            0x00001000
-#define UCC_SLOW_GUMR_H_TTX            0x00000800
-#define UCC_SLOW_GUMR_H_CDP            0x00000400
-#define UCC_SLOW_GUMR_H_CTSP           0x00000200
-#define UCC_SLOW_GUMR_H_CDS            0x00000100
-#define UCC_SLOW_GUMR_H_CTSS           0x00000080
-#define UCC_SLOW_GUMR_H_TFL            0x00000040
-#define UCC_SLOW_GUMR_H_RFW            0x00000020
-#define UCC_SLOW_GUMR_H_TXSY           0x00000010
-#define UCC_SLOW_GUMR_H_4SYNC          0x00000004
-#define UCC_SLOW_GUMR_H_8SYNC          0x00000008
-#define UCC_SLOW_GUMR_H_16SYNC         0x0000000c
-#define UCC_SLOW_GUMR_H_RTSM           0x00000002
-#define UCC_SLOW_GUMR_H_RSYN           0x00000001
-
-#define UCC_SLOW_GUMR_L_TCI            0x10000000
-#define UCC_SLOW_GUMR_L_RINV           0x02000000
-#define UCC_SLOW_GUMR_L_TINV           0x01000000
-#define UCC_SLOW_GUMR_L_TEND           0x00040000
-#define UCC_SLOW_GUMR_L_TDCR_MASK      0x00030000
-#define UCC_SLOW_GUMR_L_TDCR_32                0x00030000
-#define UCC_SLOW_GUMR_L_TDCR_16                0x00020000
-#define UCC_SLOW_GUMR_L_TDCR_8         0x00010000
-#define UCC_SLOW_GUMR_L_TDCR_1         0x00000000
-#define UCC_SLOW_GUMR_L_RDCR_MASK      0x0000c000
-#define UCC_SLOW_GUMR_L_RDCR_32                0x0000c000
-#define UCC_SLOW_GUMR_L_RDCR_16                0x00008000
-#define UCC_SLOW_GUMR_L_RDCR_8         0x00004000
-#define UCC_SLOW_GUMR_L_RDCR_1         0x00000000
-#define UCC_SLOW_GUMR_L_RENC_NRZI      0x00000800
-#define UCC_SLOW_GUMR_L_RENC_NRZ       0x00000000
-#define UCC_SLOW_GUMR_L_TENC_NRZI      0x00000100
-#define UCC_SLOW_GUMR_L_TENC_NRZ       0x00000000
-#define UCC_SLOW_GUMR_L_DIAG_MASK      0x000000c0
-#define UCC_SLOW_GUMR_L_DIAG_LE                0x000000c0
-#define UCC_SLOW_GUMR_L_DIAG_ECHO      0x00000080
-#define UCC_SLOW_GUMR_L_DIAG_LOOP      0x00000040
-#define UCC_SLOW_GUMR_L_DIAG_NORM      0x00000000
-#define UCC_SLOW_GUMR_L_ENR            0x00000020
-#define UCC_SLOW_GUMR_L_ENT            0x00000010
-#define UCC_SLOW_GUMR_L_MODE_MASK      0x0000000F
-#define UCC_SLOW_GUMR_L_MODE_BISYNC    0x00000008
-#define UCC_SLOW_GUMR_L_MODE_AHDLC     0x00000006
-#define UCC_SLOW_GUMR_L_MODE_UART      0x00000004
-#define UCC_SLOW_GUMR_L_MODE_QMC       0x00000002
-
-/* General UCC FAST Mode Register */
-#define UCC_FAST_GUMR_TCI      0x20000000
-#define UCC_FAST_GUMR_TRX      0x10000000
-#define UCC_FAST_GUMR_TTX      0x08000000
-#define UCC_FAST_GUMR_CDP      0x04000000
-#define UCC_FAST_GUMR_CTSP     0x02000000
-#define UCC_FAST_GUMR_CDS      0x01000000
-#define UCC_FAST_GUMR_CTSS     0x00800000
-#define UCC_FAST_GUMR_TXSY     0x00020000
-#define UCC_FAST_GUMR_RSYN     0x00010000
-#define UCC_FAST_GUMR_RTSM     0x00002000
-#define UCC_FAST_GUMR_REVD     0x00000400
-#define UCC_FAST_GUMR_ENR      0x00000020
-#define UCC_FAST_GUMR_ENT      0x00000010
-
-/* UART Slow UCC Event Register (UCCE) */
-#define UCC_UART_UCCE_AB       0x0200
-#define UCC_UART_UCCE_IDLE     0x0100
-#define UCC_UART_UCCE_GRA      0x0080
-#define UCC_UART_UCCE_BRKE     0x0040
-#define UCC_UART_UCCE_BRKS     0x0020
-#define UCC_UART_UCCE_CCR      0x0008
-#define UCC_UART_UCCE_BSY      0x0004
-#define UCC_UART_UCCE_TX       0x0002
-#define UCC_UART_UCCE_RX       0x0001
-
-/* HDLC Slow UCC Event Register (UCCE) */
-#define UCC_HDLC_UCCE_GLR      0x1000
-#define UCC_HDLC_UCCE_GLT      0x0800
-#define UCC_HDLC_UCCE_IDLE     0x0100
-#define UCC_HDLC_UCCE_BRKE     0x0040
-#define UCC_HDLC_UCCE_BRKS     0x0020
-#define UCC_HDLC_UCCE_TXE      0x0010
-#define UCC_HDLC_UCCE_RXF      0x0008
-#define UCC_HDLC_UCCE_BSY      0x0004
-#define UCC_HDLC_UCCE_TXB      0x0002
-#define UCC_HDLC_UCCE_RXB      0x0001
-
-/* BISYNC Slow UCC Event Register (UCCE) */
-#define UCC_BISYNC_UCCE_GRA    0x0080
-#define UCC_BISYNC_UCCE_TXE    0x0010
-#define UCC_BISYNC_UCCE_RCH    0x0008
-#define UCC_BISYNC_UCCE_BSY    0x0004
-#define UCC_BISYNC_UCCE_TXB    0x0002
-#define UCC_BISYNC_UCCE_RXB    0x0001
-
-/* Gigabit Ethernet Fast UCC Event Register (UCCE) */
-#define UCC_GETH_UCCE_MPD       0x80000000
-#define UCC_GETH_UCCE_SCAR      0x40000000
-#define UCC_GETH_UCCE_GRA       0x20000000
-#define UCC_GETH_UCCE_CBPR      0x10000000
-#define UCC_GETH_UCCE_BSY       0x08000000
-#define UCC_GETH_UCCE_RXC       0x04000000
-#define UCC_GETH_UCCE_TXC       0x02000000
-#define UCC_GETH_UCCE_TXE       0x01000000
-#define UCC_GETH_UCCE_TXB7      0x00800000
-#define UCC_GETH_UCCE_TXB6      0x00400000
-#define UCC_GETH_UCCE_TXB5      0x00200000
-#define UCC_GETH_UCCE_TXB4      0x00100000
-#define UCC_GETH_UCCE_TXB3      0x00080000
-#define UCC_GETH_UCCE_TXB2      0x00040000
-#define UCC_GETH_UCCE_TXB1      0x00020000
-#define UCC_GETH_UCCE_TXB0      0x00010000
-#define UCC_GETH_UCCE_RXB7      0x00008000
-#define UCC_GETH_UCCE_RXB6      0x00004000
-#define UCC_GETH_UCCE_RXB5      0x00002000
-#define UCC_GETH_UCCE_RXB4      0x00001000
-#define UCC_GETH_UCCE_RXB3      0x00000800
-#define UCC_GETH_UCCE_RXB2      0x00000400
-#define UCC_GETH_UCCE_RXB1      0x00000200
-#define UCC_GETH_UCCE_RXB0      0x00000100
-#define UCC_GETH_UCCE_RXF7      0x00000080
-#define UCC_GETH_UCCE_RXF6      0x00000040
-#define UCC_GETH_UCCE_RXF5      0x00000020
-#define UCC_GETH_UCCE_RXF4      0x00000010
-#define UCC_GETH_UCCE_RXF3      0x00000008
-#define UCC_GETH_UCCE_RXF2      0x00000004
-#define UCC_GETH_UCCE_RXF1      0x00000002
-#define UCC_GETH_UCCE_RXF0      0x00000001
-
-/* UCC Protocol Specific Mode Register (UPSMR), when used for UART */
-#define UCC_UART_UPSMR_FLC             0x8000
-#define UCC_UART_UPSMR_SL              0x4000
-#define UCC_UART_UPSMR_CL_MASK         0x3000
-#define UCC_UART_UPSMR_CL_8            0x3000
-#define UCC_UART_UPSMR_CL_7            0x2000
-#define UCC_UART_UPSMR_CL_6            0x1000
-#define UCC_UART_UPSMR_CL_5            0x0000
-#define UCC_UART_UPSMR_UM_MASK         0x0c00
-#define UCC_UART_UPSMR_UM_NORMAL       0x0000
-#define UCC_UART_UPSMR_UM_MAN_MULTI    0x0400
-#define UCC_UART_UPSMR_UM_AUTO_MULTI   0x0c00
-#define UCC_UART_UPSMR_FRZ             0x0200
-#define UCC_UART_UPSMR_RZS             0x0100
-#define UCC_UART_UPSMR_SYN             0x0080
-#define UCC_UART_UPSMR_DRT             0x0040
-#define UCC_UART_UPSMR_PEN             0x0010
-#define UCC_UART_UPSMR_RPM_MASK                0x000c
-#define UCC_UART_UPSMR_RPM_ODD         0x0000
-#define UCC_UART_UPSMR_RPM_LOW         0x0004
-#define UCC_UART_UPSMR_RPM_EVEN                0x0008
-#define UCC_UART_UPSMR_RPM_HIGH                0x000C
-#define UCC_UART_UPSMR_TPM_MASK                0x0003
-#define UCC_UART_UPSMR_TPM_ODD         0x0000
-#define UCC_UART_UPSMR_TPM_LOW         0x0001
-#define UCC_UART_UPSMR_TPM_EVEN                0x0002
-#define UCC_UART_UPSMR_TPM_HIGH                0x0003
-
-/* UCC Protocol Specific Mode Register (UPSMR), when used for Ethernet */
-#define UCC_GETH_UPSMR_FTFE     0x80000000
-#define UCC_GETH_UPSMR_PTPE     0x40000000
-#define UCC_GETH_UPSMR_ECM      0x04000000
-#define UCC_GETH_UPSMR_HSE      0x02000000
-#define UCC_GETH_UPSMR_PRO      0x00400000
-#define UCC_GETH_UPSMR_CAP      0x00200000
-#define UCC_GETH_UPSMR_RSH      0x00100000
-#define UCC_GETH_UPSMR_RPM      0x00080000
-#define UCC_GETH_UPSMR_R10M     0x00040000
-#define UCC_GETH_UPSMR_RLPB     0x00020000
-#define UCC_GETH_UPSMR_TBIM     0x00010000
-#define UCC_GETH_UPSMR_RES1     0x00002000
-#define UCC_GETH_UPSMR_RMM      0x00001000
-#define UCC_GETH_UPSMR_CAM      0x00000400
-#define UCC_GETH_UPSMR_BRO      0x00000200
-#define UCC_GETH_UPSMR_SMM     0x00000080
-#define UCC_GETH_UPSMR_SGMM    0x00000020
-
-/* UCC Transmit On Demand Register (UTODR) */
-#define UCC_SLOW_TOD   0x8000
-#define UCC_FAST_TOD   0x8000
-
-/* UCC Bus Mode Register masks */
-/* Not to be confused with the Bundle Mode Register */
-#define UCC_BMR_GBL            0x20
-#define UCC_BMR_BO_BE          0x10
-#define UCC_BMR_CETM           0x04
-#define UCC_BMR_DTB            0x02
-#define UCC_BMR_BDB            0x01
-
-/* Function code masks */
-#define FC_GBL                         0x20
-#define FC_DTB_LCL                     0x02
-#define UCC_FAST_FUNCTION_CODE_GBL     0x20
-#define UCC_FAST_FUNCTION_CODE_DTB_LCL 0x02
-#define UCC_FAST_FUNCTION_CODE_BDB_LCL 0x01
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_QE_H */
diff --git a/arch/powerpc/include/asm/qe_ic.h b/arch/powerpc/include/asm/qe_ic.h
deleted file mode 100644 (file)
index 1e155ca..0000000
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
- *
- * Authors:    Shlomi Gridish <gridish@freescale.com>
- *             Li Yang <leoli@freescale.com>
- *
- * Description:
- * QE IC external definitions and structure.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-#ifndef _ASM_POWERPC_QE_IC_H
-#define _ASM_POWERPC_QE_IC_H
-
-#include <linux/irq.h>
-
-struct device_node;
-struct qe_ic;
-
-#define NUM_OF_QE_IC_GROUPS    6
-
-/* Flags when we init the QE IC */
-#define QE_IC_SPREADMODE_GRP_W                 0x00000001
-#define QE_IC_SPREADMODE_GRP_X                 0x00000002
-#define QE_IC_SPREADMODE_GRP_Y                 0x00000004
-#define QE_IC_SPREADMODE_GRP_Z                 0x00000008
-#define QE_IC_SPREADMODE_GRP_RISCA             0x00000010
-#define QE_IC_SPREADMODE_GRP_RISCB             0x00000020
-
-#define QE_IC_LOW_SIGNAL                       0x00000100
-#define QE_IC_HIGH_SIGNAL                      0x00000200
-
-#define QE_IC_GRP_W_PRI0_DEST_SIGNAL_HIGH      0x00001000
-#define QE_IC_GRP_W_PRI1_DEST_SIGNAL_HIGH      0x00002000
-#define QE_IC_GRP_X_PRI0_DEST_SIGNAL_HIGH      0x00004000
-#define QE_IC_GRP_X_PRI1_DEST_SIGNAL_HIGH      0x00008000
-#define QE_IC_GRP_Y_PRI0_DEST_SIGNAL_HIGH      0x00010000
-#define QE_IC_GRP_Y_PRI1_DEST_SIGNAL_HIGH      0x00020000
-#define QE_IC_GRP_Z_PRI0_DEST_SIGNAL_HIGH      0x00040000
-#define QE_IC_GRP_Z_PRI1_DEST_SIGNAL_HIGH      0x00080000
-#define QE_IC_GRP_RISCA_PRI0_DEST_SIGNAL_HIGH  0x00100000
-#define QE_IC_GRP_RISCA_PRI1_DEST_SIGNAL_HIGH  0x00200000
-#define QE_IC_GRP_RISCB_PRI0_DEST_SIGNAL_HIGH  0x00400000
-#define QE_IC_GRP_RISCB_PRI1_DEST_SIGNAL_HIGH  0x00800000
-#define QE_IC_GRP_W_DEST_SIGNAL_SHIFT          (12)
-
-/* QE interrupt sources groups */
-enum qe_ic_grp_id {
-       QE_IC_GRP_W = 0,        /* QE interrupt controller group W */
-       QE_IC_GRP_X,            /* QE interrupt controller group X */
-       QE_IC_GRP_Y,            /* QE interrupt controller group Y */
-       QE_IC_GRP_Z,            /* QE interrupt controller group Z */
-       QE_IC_GRP_RISCA,        /* QE interrupt controller RISC group A */
-       QE_IC_GRP_RISCB         /* QE interrupt controller RISC group B */
-};
-
-#ifdef CONFIG_QUICC_ENGINE
-void qe_ic_init(struct device_node *node, unsigned int flags,
-               void (*low_handler)(struct irq_desc *desc),
-               void (*high_handler)(struct irq_desc *desc));
-unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic);
-unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic);
-#else
-static inline void qe_ic_init(struct device_node *node, unsigned int flags,
-               void (*low_handler)(struct irq_desc *desc),
-               void (*high_handler)(struct irq_desc *desc))
-{}
-static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
-{ return 0; }
-static inline unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
-{ return 0; }
-#endif /* CONFIG_QUICC_ENGINE */
-
-void qe_ic_set_highest_priority(unsigned int virq, int high);
-int qe_ic_set_priority(unsigned int virq, unsigned int priority);
-int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high);
-
-static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc)
-{
-       struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
-       unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
-
-       if (cascade_irq != NO_IRQ)
-               generic_handle_irq(cascade_irq);
-}
-
-static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc)
-{
-       struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
-       unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
-
-       if (cascade_irq != NO_IRQ)
-               generic_handle_irq(cascade_irq);
-}
-
-static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc)
-{
-       struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
-       unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
-       struct irq_chip *chip = irq_desc_get_chip(desc);
-
-       if (cascade_irq != NO_IRQ)
-               generic_handle_irq(cascade_irq);
-
-       chip->irq_eoi(&desc->irq_data);
-}
-
-static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc)
-{
-       struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
-       unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
-       struct irq_chip *chip = irq_desc_get_chip(desc);
-
-       if (cascade_irq != NO_IRQ)
-               generic_handle_irq(cascade_irq);
-
-       chip->irq_eoi(&desc->irq_data);
-}
-
-static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
-{
-       struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
-       unsigned int cascade_irq;
-       struct irq_chip *chip = irq_desc_get_chip(desc);
-
-       cascade_irq = qe_ic_get_high_irq(qe_ic);
-       if (cascade_irq == NO_IRQ)
-               cascade_irq = qe_ic_get_low_irq(qe_ic);
-
-       if (cascade_irq != NO_IRQ)
-               generic_handle_irq(cascade_irq);
-
-       chip->irq_eoi(&desc->irq_data);
-}
-
-#endif /* _ASM_POWERPC_QE_IC_H */
index 2220f7a..c4cb2ff 100644 (file)
 #define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
                                     : : "r" (v) : "memory")
 #define mtmsr(v)       __mtmsrd((v), 0)
+#define __MTMSR                "mtmsrd"
 #else
 #define mtmsr(v)       asm volatile("mtmsr %0" : \
                                     : "r" ((unsigned long)(v)) \
                                     : "memory")
+#define __MTMSR                "mtmsr"
 #endif
 
+static inline void mtmsr_isync(unsigned long val)
+{
+       asm volatile(__MTMSR " %0; " ASM_FTR_IFCLR("isync", "nop", %1) : :
+                       "r" (val), "i" (CPU_FTR_ARCH_206) : "memory");
+}
+
 #define mfspr(rn)      ({unsigned long rval; \
                        asm volatile("mfspr %0," __stringify(rn) \
                                : "=r" (rval)); rval;})
                                     : "r" ((unsigned long)(v)) \
                                     : "memory")
 
+extern void msr_check_and_set(unsigned long bits);
+extern bool strict_msr_control;
+extern void __msr_check_and_clear(unsigned long bits);
+static inline void msr_check_and_clear(unsigned long bits)
+{
+       if (strict_msr_control)
+               __msr_check_and_clear(bits);
+}
+
 static inline unsigned long mfvtb (void)
 {
 #ifdef CONFIG_PPC_BOOK3S_64
index b77ef36..51400ba 100644 (file)
@@ -334,10 +334,11 @@ extern void (*rtas_flash_term_hook)(int);
 
 extern struct rtas_t rtas;
 
-extern void enter_rtas(unsigned long);
 extern int rtas_token(const char *service);
 extern int rtas_service_present(const char *service);
 extern int rtas_call(int token, int, int, int *, ...);
+void rtas_call_unlocked(struct rtas_args *args, int token, int nargs,
+                       int nret, ...);
 extern void rtas_restart(char *cmd);
 extern void rtas_power_off(void);
 extern void rtas_halt(void);
index 15cca17..5b268b6 100644 (file)
@@ -4,6 +4,8 @@
 #ifndef _ASM_POWERPC_SWITCH_TO_H
 #define _ASM_POWERPC_SWITCH_TO_H
 
+#include <asm/reg.h>
+
 struct thread_struct;
 struct task_struct;
 struct pt_regs;
@@ -12,74 +14,59 @@ extern struct task_struct *__switch_to(struct task_struct *,
        struct task_struct *);
 #define switch_to(prev, next, last)    ((last) = __switch_to((prev), (next)))
 
-struct thread_struct;
 extern struct task_struct *_switch(struct thread_struct *prev,
                                   struct thread_struct *next);
-#ifdef CONFIG_PPC_BOOK3S_64
-static inline void save_early_sprs(struct thread_struct *prev)
-{
-       if (cpu_has_feature(CPU_FTR_ARCH_207S))
-               prev->tar = mfspr(SPRN_TAR);
-       if (cpu_has_feature(CPU_FTR_DSCR))
-               prev->dscr = mfspr(SPRN_DSCR);
-}
-#else
-static inline void save_early_sprs(struct thread_struct *prev) {}
-#endif
 
-extern void enable_kernel_fp(void);
-extern void enable_kernel_altivec(void);
-extern void enable_kernel_vsx(void);
-extern int emulate_altivec(struct pt_regs *);
-extern void __giveup_vsx(struct task_struct *);
-extern void giveup_vsx(struct task_struct *);
-extern void enable_kernel_spe(void);
-extern void giveup_spe(struct task_struct *);
-extern void load_up_spe(struct task_struct *);
 extern void switch_booke_debug_regs(struct debug_reg *new_debug);
 
-#ifndef CONFIG_SMP
-extern void discard_lazy_cpu_state(void);
-#else
-static inline void discard_lazy_cpu_state(void)
-{
-}
-#endif
+extern int emulate_altivec(struct pt_regs *);
+
+extern void flush_all_to_thread(struct task_struct *);
+extern void giveup_all(struct task_struct *);
 
 #ifdef CONFIG_PPC_FPU
+extern void enable_kernel_fp(void);
 extern void flush_fp_to_thread(struct task_struct *);
 extern void giveup_fpu(struct task_struct *);
+extern void __giveup_fpu(struct task_struct *);
+static inline void disable_kernel_fp(void)
+{
+       msr_check_and_clear(MSR_FP);
+}
 #else
 static inline void flush_fp_to_thread(struct task_struct *t) { }
-static inline void giveup_fpu(struct task_struct *t) { }
 #endif
 
 #ifdef CONFIG_ALTIVEC
+extern void enable_kernel_altivec(void);
 extern void flush_altivec_to_thread(struct task_struct *);
 extern void giveup_altivec(struct task_struct *);
-extern void giveup_altivec_notask(void);
-#else
-static inline void flush_altivec_to_thread(struct task_struct *t)
-{
-}
-static inline void giveup_altivec(struct task_struct *t)
+extern void __giveup_altivec(struct task_struct *);
+static inline void disable_kernel_altivec(void)
 {
+       msr_check_and_clear(MSR_VEC);
 }
 #endif
 
 #ifdef CONFIG_VSX
+extern void enable_kernel_vsx(void);
 extern void flush_vsx_to_thread(struct task_struct *);
-#else
-static inline void flush_vsx_to_thread(struct task_struct *t)
+extern void giveup_vsx(struct task_struct *);
+extern void __giveup_vsx(struct task_struct *);
+static inline void disable_kernel_vsx(void)
 {
+       msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
 }
 #endif
 
 #ifdef CONFIG_SPE
+extern void enable_kernel_spe(void);
 extern void flush_spe_to_thread(struct task_struct *);
-#else
-static inline void flush_spe_to_thread(struct task_struct *t)
+extern void giveup_spe(struct task_struct *);
+extern void __giveup_spe(struct task_struct *);
+static inline void disable_kernel_spe(void)
 {
+       msr_check_and_clear(MSR_SPE);
 }
 #endif
 
index e682a71..c508686 100644 (file)
@@ -44,7 +44,7 @@ static inline void isync(void)
        MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
 #define PPC_ACQUIRE_BARRIER     "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
 #define PPC_RELEASE_BARRIER     stringify_in_c(LWSYNC) "\n"
-#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n"
+#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(sync) "\n"
 #define PPC_ATOMIC_EXIT_BARRIER         "\n" stringify_in_c(sync) "\n"
 #else
 #define PPC_ACQUIRE_BARRIER
index 10fc784..2d7109a 100644 (file)
@@ -27,7 +27,6 @@ extern struct clock_event_device decrementer_clockevent;
 
 struct rtc_time;
 extern void to_tm(int tim, struct rtc_time * tm);
-extern void GregorianDay(struct rtc_time *tm);
 extern void tick_broadcast_ipi_handler(void);
 
 extern void generic_calibrate_decr(void);
diff --git a/arch/powerpc/include/asm/ucc.h b/arch/powerpc/include/asm/ucc.h
deleted file mode 100644 (file)
index 6927ac2..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
- *
- * Authors:    Shlomi Gridish <gridish@freescale.com>
- *             Li Yang <leoli@freescale.com>
- *
- * Description:
- * Internal header file for UCC unit routines.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-#ifndef __UCC_H__
-#define __UCC_H__
-
-#include <asm/immap_qe.h>
-#include <asm/qe.h>
-
-#define STATISTICS
-
-#define UCC_MAX_NUM    8
-
-/* Slow or fast type for UCCs.
-*/
-enum ucc_speed_type {
-       UCC_SPEED_TYPE_FAST = UCC_GUEMR_MODE_FAST_RX | UCC_GUEMR_MODE_FAST_TX,
-       UCC_SPEED_TYPE_SLOW = UCC_GUEMR_MODE_SLOW_RX | UCC_GUEMR_MODE_SLOW_TX
-};
-
-/* ucc_set_type
- * Sets UCC to slow or fast mode.
- *
- * ucc_num - (In) number of UCC (0-7).
- * speed   - (In) slow or fast mode for UCC.
- */
-int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed);
-
-int ucc_set_qe_mux_mii_mng(unsigned int ucc_num);
-
-int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
-       enum comm_dir mode);
-
-int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask);
-
-/* QE MUX clock routing for UCC
-*/
-static inline int ucc_set_qe_mux_grant(unsigned int ucc_num, int set)
-{
-       return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_GRANT);
-}
-
-static inline int ucc_set_qe_mux_tsa(unsigned int ucc_num, int set)
-{
-       return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_TSA);
-}
-
-static inline int ucc_set_qe_mux_bkpt(unsigned int ucc_num, int set)
-{
-       return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_BKPT);
-}
-
-#endif                         /* __UCC_H__ */
diff --git a/arch/powerpc/include/asm/ucc_fast.h b/arch/powerpc/include/asm/ucc_fast.h
deleted file mode 100644 (file)
index 72ea9ba..0000000
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * Internal header file for UCC FAST unit routines.
- *
- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
- *
- * Authors:    Shlomi Gridish <gridish@freescale.com>
- *             Li Yang <leoli@freescale.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-#ifndef __UCC_FAST_H__
-#define __UCC_FAST_H__
-
-#include <linux/kernel.h>
-
-#include <asm/immap_qe.h>
-#include <asm/qe.h>
-
-#include <asm/ucc.h>
-
-/* Receive BD's status */
-#define R_E    0x80000000      /* buffer empty */
-#define R_W    0x20000000      /* wrap bit */
-#define R_I    0x10000000      /* interrupt on reception */
-#define R_L    0x08000000      /* last */
-#define R_F    0x04000000      /* first */
-
-/* transmit BD's status */
-#define T_R    0x80000000      /* ready bit */
-#define T_W    0x20000000      /* wrap bit */
-#define T_I    0x10000000      /* interrupt on completion */
-#define T_L    0x08000000      /* last */
-
-/* Rx Data buffer must be 4 bytes aligned in most cases */
-#define UCC_FAST_RX_ALIGN                      4
-#define UCC_FAST_MRBLR_ALIGNMENT               4
-#define UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT      8
-
-/* Sizes */
-#define UCC_FAST_URFS_MIN_VAL                          0x88
-#define UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR        8
-
-/* ucc_fast_channel_protocol_mode - UCC FAST mode */
-enum ucc_fast_channel_protocol_mode {
-       UCC_FAST_PROTOCOL_MODE_HDLC = 0x00000000,
-       UCC_FAST_PROTOCOL_MODE_RESERVED01 = 0x00000001,
-       UCC_FAST_PROTOCOL_MODE_RESERVED_QMC = 0x00000002,
-       UCC_FAST_PROTOCOL_MODE_RESERVED02 = 0x00000003,
-       UCC_FAST_PROTOCOL_MODE_RESERVED_UART = 0x00000004,
-       UCC_FAST_PROTOCOL_MODE_RESERVED03 = 0x00000005,
-       UCC_FAST_PROTOCOL_MODE_RESERVED_EX_MAC_1 = 0x00000006,
-       UCC_FAST_PROTOCOL_MODE_RESERVED_EX_MAC_2 = 0x00000007,
-       UCC_FAST_PROTOCOL_MODE_RESERVED_BISYNC = 0x00000008,
-       UCC_FAST_PROTOCOL_MODE_RESERVED04 = 0x00000009,
-       UCC_FAST_PROTOCOL_MODE_ATM = 0x0000000A,
-       UCC_FAST_PROTOCOL_MODE_RESERVED05 = 0x0000000B,
-       UCC_FAST_PROTOCOL_MODE_ETHERNET = 0x0000000C,
-       UCC_FAST_PROTOCOL_MODE_RESERVED06 = 0x0000000D,
-       UCC_FAST_PROTOCOL_MODE_POS = 0x0000000E,
-       UCC_FAST_PROTOCOL_MODE_RESERVED07 = 0x0000000F
-};
-
-/* ucc_fast_transparent_txrx - UCC Fast Transparent TX & RX */
-enum ucc_fast_transparent_txrx {
-       UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL = 0x00000000,
-       UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_TRANSPARENT = 0x18000000
-};
-
-/* UCC fast diagnostic mode */
-enum ucc_fast_diag_mode {
-       UCC_FAST_DIAGNOSTIC_NORMAL = 0x0,
-       UCC_FAST_DIAGNOSTIC_LOCAL_LOOP_BACK = 0x40000000,
-       UCC_FAST_DIAGNOSTIC_AUTO_ECHO = 0x80000000,
-       UCC_FAST_DIAGNOSTIC_LOOP_BACK_AND_ECHO = 0xC0000000
-};
-
-/* UCC fast Sync length (transparent mode only) */
-enum ucc_fast_sync_len {
-       UCC_FAST_SYNC_LEN_NOT_USED = 0x0,
-       UCC_FAST_SYNC_LEN_AUTOMATIC = 0x00004000,
-       UCC_FAST_SYNC_LEN_8_BIT = 0x00008000,
-       UCC_FAST_SYNC_LEN_16_BIT = 0x0000C000
-};
-
-/* UCC fast RTS mode */
-enum ucc_fast_ready_to_send {
-       UCC_FAST_SEND_IDLES_BETWEEN_FRAMES = 0x00000000,
-       UCC_FAST_SEND_FLAGS_BETWEEN_FRAMES = 0x00002000
-};
-
-/* UCC fast receiver decoding mode */
-enum ucc_fast_rx_decoding_method {
-       UCC_FAST_RX_ENCODING_NRZ = 0x00000000,
-       UCC_FAST_RX_ENCODING_NRZI = 0x00000800,
-       UCC_FAST_RX_ENCODING_RESERVED0 = 0x00001000,
-       UCC_FAST_RX_ENCODING_RESERVED1 = 0x00001800
-};
-
-/* UCC fast transmitter encoding mode */
-enum ucc_fast_tx_encoding_method {
-       UCC_FAST_TX_ENCODING_NRZ = 0x00000000,
-       UCC_FAST_TX_ENCODING_NRZI = 0x00000100,
-       UCC_FAST_TX_ENCODING_RESERVED0 = 0x00000200,
-       UCC_FAST_TX_ENCODING_RESERVED1 = 0x00000300
-};
-
-/* UCC fast CRC length */
-enum ucc_fast_transparent_tcrc {
-       UCC_FAST_16_BIT_CRC = 0x00000000,
-       UCC_FAST_CRC_RESERVED0 = 0x00000040,
-       UCC_FAST_32_BIT_CRC = 0x00000080,
-       UCC_FAST_CRC_RESERVED1 = 0x000000C0
-};
-
-/* Fast UCC initialization structure */
-struct ucc_fast_info {
-       int ucc_num;
-       enum qe_clock rx_clock;
-       enum qe_clock tx_clock;
-       u32 regs;
-       int irq;
-       u32 uccm_mask;
-       int bd_mem_part;
-       int brkpt_support;
-       int grant_support;
-       int tsa;
-       int cdp;
-       int cds;
-       int ctsp;
-       int ctss;
-       int tci;
-       int txsy;
-       int rtsm;
-       int revd;
-       int rsyn;
-       u16 max_rx_buf_length;
-       u16 urfs;
-       u16 urfet;
-       u16 urfset;
-       u16 utfs;
-       u16 utfet;
-       u16 utftt;
-       u16 ufpt;
-       enum ucc_fast_channel_protocol_mode mode;
-       enum ucc_fast_transparent_txrx ttx_trx;
-       enum ucc_fast_tx_encoding_method tenc;
-       enum ucc_fast_rx_decoding_method renc;
-       enum ucc_fast_transparent_tcrc tcrc;
-       enum ucc_fast_sync_len synl;
-};
-
-struct ucc_fast_private {
-       struct ucc_fast_info *uf_info;
-       struct ucc_fast __iomem *uf_regs; /* a pointer to the UCC regs. */
-       u32 __iomem *p_ucce;    /* a pointer to the event register in memory. */
-       u32 __iomem *p_uccm;    /* a pointer to the mask register in memory. */
-#ifdef CONFIG_UGETH_TX_ON_DEMAND
-       u16 __iomem *p_utodr;   /* pointer to the transmit on demand register */
-#endif
-       int enabled_tx;         /* Whether channel is enabled for Tx (ENT) */
-       int enabled_rx;         /* Whether channel is enabled for Rx (ENR) */
-       int stopped_tx;         /* Whether channel has been stopped for Tx
-                                  (STOP_TX, etc.) */
-       int stopped_rx;         /* Whether channel has been stopped for Rx */
-       u32 ucc_fast_tx_virtual_fifo_base_offset;/* pointer to base of Tx
-                                                   virtual fifo */
-       u32 ucc_fast_rx_virtual_fifo_base_offset;/* pointer to base of Rx
-                                                   virtual fifo */
-#ifdef STATISTICS
-       u32 tx_frames;          /* Transmitted frames counter. */
-       u32 rx_frames;          /* Received frames counter (only frames
-                                  passed to application). */
-       u32 tx_discarded;       /* Discarded tx frames counter (frames that
-                                  were discarded by the driver due to errors).
-                                  */
-       u32 rx_discarded;       /* Discarded rx frames counter (frames that
-                                  were discarded by the driver due to errors).
-                                  */
-#endif                         /* STATISTICS */
-       u16 mrblr;              /* maximum receive buffer length */
-};
-
-/* ucc_fast_init
- * Initializes Fast UCC according to user provided parameters.
- *
- * uf_info  - (In) pointer to the fast UCC info structure.
- * uccf_ret - (Out) pointer to the fast UCC structure.
- */
-int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret);
-
-/* ucc_fast_free
- * Frees all resources for fast UCC.
- *
- * uccf - (In) pointer to the fast UCC structure.
- */
-void ucc_fast_free(struct ucc_fast_private * uccf);
-
-/* ucc_fast_enable
- * Enables a fast UCC port.
- * This routine enables Tx and/or Rx through the General UCC Mode Register.
- *
- * uccf - (In) pointer to the fast UCC structure.
- * mode - (In) TX, RX, or both.
- */
-void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode);
-
-/* ucc_fast_disable
- * Disables a fast UCC port.
- * This routine disables Tx and/or Rx through the General UCC Mode Register.
- *
- * uccf - (In) pointer to the fast UCC structure.
- * mode - (In) TX, RX, or both.
- */
-void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode);
-
-/* ucc_fast_irq
- * Handles interrupts on fast UCC.
- * Called from the general interrupt routine to handle interrupts on fast UCC.
- *
- * uccf - (In) pointer to the fast UCC structure.
- */
-void ucc_fast_irq(struct ucc_fast_private * uccf);
-
-/* ucc_fast_transmit_on_demand
- * Immediately forces a poll of the transmitter for data to be sent.
- * Typically, the hardware performs a periodic poll for data that the
- * transmit routine has set up to be transmitted. In cases where
- * this polling cycle is not soon enough, this optional routine can
- * be invoked to force a poll right away, instead. Proper use for
- * each transmission for which this functionality is desired is to
- * call the transmit routine and then this routine right after.
- *
- * uccf - (In) pointer to the fast UCC structure.
- */
-void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf);
-
-u32 ucc_fast_get_qe_cr_subblock(int uccf_num);
-
-void ucc_fast_dump_regs(struct ucc_fast_private * uccf);
-
-#endif                         /* __UCC_FAST_H__ */
diff --git a/arch/powerpc/include/asm/ucc_slow.h b/arch/powerpc/include/asm/ucc_slow.h
deleted file mode 100644 (file)
index 233ef5f..0000000
+++ /dev/null
@@ -1,277 +0,0 @@
-/*
- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
- *
- * Authors:    Shlomi Gridish <gridish@freescale.com>
- *             Li Yang <leoli@freescale.com>
- *
- * Description:
- * Internal header file for UCC SLOW unit routines.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-#ifndef __UCC_SLOW_H__
-#define __UCC_SLOW_H__
-
-#include <linux/kernel.h>
-
-#include <asm/immap_qe.h>
-#include <asm/qe.h>
-
-#include <asm/ucc.h>
-
-/* transmit BD's status */
-#define T_R    0x80000000      /* ready bit */
-#define T_PAD  0x40000000      /* add pads to short frames */
-#define T_W    0x20000000      /* wrap bit */
-#define T_I    0x10000000      /* interrupt on completion */
-#define T_L    0x08000000      /* last */
-
-#define T_A    0x04000000      /* Address - the data transmitted as address
-                                  chars */
-#define T_TC   0x04000000      /* transmit CRC */
-#define T_CM   0x02000000      /* continuous mode */
-#define T_DEF  0x02000000      /* collision on previous attempt to transmit */
-#define T_P    0x01000000      /* Preamble - send Preamble sequence before
-                                  data */
-#define T_HB   0x01000000      /* heartbeat */
-#define T_NS   0x00800000      /* No Stop */
-#define T_LC   0x00800000      /* late collision */
-#define T_RL   0x00400000      /* retransmission limit */
-#define T_UN   0x00020000      /* underrun */
-#define T_CT   0x00010000      /* CTS lost */
-#define T_CSL  0x00010000      /* carrier sense lost */
-#define T_RC   0x003c0000      /* retry count */
-
-/* Receive BD's status */
-#define R_E    0x80000000      /* buffer empty */
-#define R_W    0x20000000      /* wrap bit */
-#define R_I    0x10000000      /* interrupt on reception */
-#define R_L    0x08000000      /* last */
-#define R_C    0x08000000      /* the last byte in this buffer is a cntl
-                                  char */
-#define R_F    0x04000000      /* first */
-#define R_A    0x04000000      /* the first byte in this buffer is address
-                                  byte */
-#define R_CM   0x02000000      /* continuous mode */
-#define R_ID   0x01000000      /* buffer close on reception of idles */
-#define R_M    0x01000000      /* Frame received because of promiscuous
-                                  mode */
-#define R_AM   0x00800000      /* Address match */
-#define R_DE   0x00800000      /* Address match */
-#define R_LG   0x00200000      /* Break received */
-#define R_BR   0x00200000      /* Frame length violation */
-#define R_NO   0x00100000      /* Rx Non Octet Aligned Packet */
-#define R_FR   0x00100000      /* Framing Error (no stop bit) character
-                                  received */
-#define R_PR   0x00080000      /* Parity Error character received */
-#define R_AB   0x00080000      /* Frame Aborted */
-#define R_SH   0x00080000      /* frame is too short */
-#define R_CR   0x00040000      /* CRC Error */
-#define R_OV   0x00020000      /* Overrun */
-#define R_CD   0x00010000      /* CD lost */
-#define R_CL   0x00010000      /* this frame is closed because of a
-                                  collision */
-
-/* Rx Data buffer must be 4 bytes aligned in most cases.*/
-#define UCC_SLOW_RX_ALIGN              4
-#define UCC_SLOW_MRBLR_ALIGNMENT       4
-#define UCC_SLOW_PRAM_SIZE             0x100
-#define ALIGNMENT_OF_UCC_SLOW_PRAM     64
-
-/* UCC Slow Channel Protocol Mode */
-enum ucc_slow_channel_protocol_mode {
-       UCC_SLOW_CHANNEL_PROTOCOL_MODE_QMC = 0x00000002,
-       UCC_SLOW_CHANNEL_PROTOCOL_MODE_UART = 0x00000004,
-       UCC_SLOW_CHANNEL_PROTOCOL_MODE_BISYNC = 0x00000008,
-};
-
-/* UCC Slow Transparent Transmit CRC (TCRC) */
-enum ucc_slow_transparent_tcrc {
-       /* 16-bit CCITT CRC (HDLC).  (X16 + X12 + X5 + 1) */
-       UCC_SLOW_TRANSPARENT_TCRC_CCITT_CRC16 = 0x00000000,
-       /* CRC16 (BISYNC).  (X16 + X15 + X2 + 1) */
-       UCC_SLOW_TRANSPARENT_TCRC_CRC16 = 0x00004000,
-       /* 32-bit CCITT CRC (Ethernet and HDLC) */
-       UCC_SLOW_TRANSPARENT_TCRC_CCITT_CRC32 = 0x00008000,
-};
-
-/* UCC Slow oversampling rate for transmitter (TDCR) */
-enum ucc_slow_tx_oversampling_rate {
-       /* 1x clock mode */
-       UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_1 = 0x00000000,
-       /* 8x clock mode */
-       UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_8 = 0x00010000,
-       /* 16x clock mode */
-       UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_16 = 0x00020000,
-       /* 32x clock mode */
-       UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_32 = 0x00030000,
-};
-
-/* UCC Slow Oversampling rate for receiver (RDCR)
-*/
-enum ucc_slow_rx_oversampling_rate {
-       /* 1x clock mode */
-       UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_1 = 0x00000000,
-       /* 8x clock mode */
-       UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_8 = 0x00004000,
-       /* 16x clock mode */
-       UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_16 = 0x00008000,
-       /* 32x clock mode */
-       UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_32 = 0x0000c000,
-};
-
-/* UCC Slow Transmitter encoding method (TENC)
-*/
-enum ucc_slow_tx_encoding_method {
-       UCC_SLOW_TRANSMITTER_ENCODING_METHOD_TENC_NRZ = 0x00000000,
-       UCC_SLOW_TRANSMITTER_ENCODING_METHOD_TENC_NRZI = 0x00000100
-};
-
-/* UCC Slow Receiver decoding method (RENC)
-*/
-enum ucc_slow_rx_decoding_method {
-       UCC_SLOW_RECEIVER_DECODING_METHOD_RENC_NRZ = 0x00000000,
-       UCC_SLOW_RECEIVER_DECODING_METHOD_RENC_NRZI = 0x00000800
-};
-
-/* UCC Slow Diagnostic mode (DIAG)
-*/
-enum ucc_slow_diag_mode {
-       UCC_SLOW_DIAG_MODE_NORMAL = 0x00000000,
-       UCC_SLOW_DIAG_MODE_LOOPBACK = 0x00000040,
-       UCC_SLOW_DIAG_MODE_ECHO = 0x00000080,
-       UCC_SLOW_DIAG_MODE_LOOPBACK_ECHO = 0x000000c0
-};
-
-struct ucc_slow_info {
-       int ucc_num;
-       int protocol;                   /* QE_CR_PROTOCOL_xxx */
-       enum qe_clock rx_clock;
-       enum qe_clock tx_clock;
-       phys_addr_t regs;
-       int irq;
-       u16 uccm_mask;
-       int data_mem_part;
-       int init_tx;
-       int init_rx;
-       u32 tx_bd_ring_len;
-       u32 rx_bd_ring_len;
-       int rx_interrupts;
-       int brkpt_support;
-       int grant_support;
-       int tsa;
-       int cdp;
-       int cds;
-       int ctsp;
-       int ctss;
-       int rinv;
-       int tinv;
-       int rtsm;
-       int rfw;
-       int tci;
-       int tend;
-       int tfl;
-       int txsy;
-       u16 max_rx_buf_length;
-       enum ucc_slow_transparent_tcrc tcrc;
-       enum ucc_slow_channel_protocol_mode mode;
-       enum ucc_slow_diag_mode diag;
-       enum ucc_slow_tx_oversampling_rate tdcr;
-       enum ucc_slow_rx_oversampling_rate rdcr;
-       enum ucc_slow_tx_encoding_method tenc;
-       enum ucc_slow_rx_decoding_method renc;
-};
-
-struct ucc_slow_private {
-       struct ucc_slow_info *us_info;
-       struct ucc_slow __iomem *us_regs; /* Ptr to memory map of UCC regs */
-       struct ucc_slow_pram *us_pram;  /* a pointer to the parameter RAM */
-       u32 us_pram_offset;
-       int enabled_tx;         /* Whether channel is enabled for Tx (ENT) */
-       int enabled_rx;         /* Whether channel is enabled for Rx (ENR) */
-       int stopped_tx;         /* Whether channel has been stopped for Tx
-                                  (STOP_TX, etc.) */
-       int stopped_rx;         /* Whether channel has been stopped for Rx */
-       struct list_head confQ; /* frames passed to chip waiting for tx */
-       u32 first_tx_bd_mask;   /* mask is used in Tx routine to save status
-                                  and length for first BD in a frame */
-       u32 tx_base_offset;     /* first BD in Tx BD table offset (In MURAM) */
-       u32 rx_base_offset;     /* first BD in Rx BD table offset (In MURAM) */
-       struct qe_bd *confBd;   /* next BD for confirm after Tx */
-       struct qe_bd *tx_bd;    /* next BD for new Tx request */
-       struct qe_bd *rx_bd;    /* next BD to collect after Rx */
-       void *p_rx_frame;       /* accumulating receive frame */
-       u16 *p_ucce;            /* a pointer to the event register in memory.
-                                */
-       u16 *p_uccm;            /* a pointer to the mask register in memory */
-       u16 saved_uccm;         /* a saved mask for the RX Interrupt bits */
-#ifdef STATISTICS
-       u32 tx_frames;          /* Transmitted frames counters */
-       u32 rx_frames;          /* Received frames counters (only frames
-                                  passed to application) */
-       u32 rx_discarded;       /* Discarded frames counters (frames that
-                                  were discarded by the driver due to
-                                  errors) */
-#endif                         /* STATISTICS */
-};
-
-/* ucc_slow_init
- * Initializes Slow UCC according to provided parameters.
- *
- * us_info  - (In) pointer to the slow UCC info structure.
- * uccs_ret - (Out) pointer to the slow UCC structure.
- */
-int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret);
-
-/* ucc_slow_free
- * Frees all resources for slow UCC.
- *
- * uccs - (In) pointer to the slow UCC structure.
- */
-void ucc_slow_free(struct ucc_slow_private * uccs);
-
-/* ucc_slow_enable
- * Enables a fast UCC port.
- * This routine enables Tx and/or Rx through the General UCC Mode Register.
- *
- * uccs - (In) pointer to the slow UCC structure.
- * mode - (In) TX, RX, or both.
- */
-void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode);
-
-/* ucc_slow_disable
- * Disables a fast UCC port.
- * This routine disables Tx and/or Rx through the General UCC Mode Register.
- *
- * uccs - (In) pointer to the slow UCC structure.
- * mode - (In) TX, RX, or both.
- */
-void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode);
-
-/* ucc_slow_graceful_stop_tx
- * Smoothly stops transmission on a specified slow UCC.
- *
- * uccs - (In) pointer to the slow UCC structure.
- */
-void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs);
-
-/* ucc_slow_stop_tx
- * Stops transmission on a specified slow UCC.
- *
- * uccs - (In) pointer to the slow UCC structure.
- */
-void ucc_slow_stop_tx(struct ucc_slow_private * uccs);
-
-/* ucc_slow_restart_tx
- * Restarts transmitting on a specified slow UCC.
- *
- * uccs - (In) pointer to the slow UCC structure.
- */
-void ucc_slow_restart_tx(struct ucc_slow_private *uccs);
-
-u32 ucc_slow_get_qe_cr_subblock(int uccs_num);
-
-#endif                         /* __UCC_SLOW_H__ */
index 4b6b8ac..6a5ace5 100644 (file)
 #include <uapi/asm/unistd.h>
 
 
-#define __NR_syscalls          379
+#define NR_syscalls            379
 
 #define __NR__exit __NR_exit
-#define NR_syscalls    __NR_syscalls
 
 #ifndef __ASSEMBLY__
 
index b73a819..1afe90a 100644 (file)
@@ -41,7 +41,7 @@
 #include <linux/unistd.h>
 #include <linux/time.h>
 
-#define SYSCALL_MAP_SIZE      ((__NR_syscalls + 31) / 32)
+#define SYSCALL_MAP_SIZE      ((NR_syscalls + 31) / 32)
 
 /*
  * So here is the ppc64 backward compatible version
index 4368604..8dde199 100644 (file)
@@ -43,5 +43,7 @@
 #define PPC_FEATURE2_TAR               0x04000000
 #define PPC_FEATURE2_VEC_CRYPTO                0x02000000
 #define PPC_FEATURE2_HTM_NOSC          0x01000000
+#define PPC_FEATURE2_ARCH_3_00         0x00800000 /* ISA 3.00 */
+#define PPC_FEATURE2_HAS_IEEE128       0x00400000 /* VSX IEEE Binary Float 128-bit */
 
 #endif /* _UAPI__ASM_POWERPC_CPUTABLE_H */
index 59dad11..c2d21d1 100644 (file)
@@ -295,6 +295,8 @@ do {                                                                        \
 #define R_PPC64_TLSLD          108
 #define R_PPC64_TOCSAVE                109
 
+#define R_PPC64_ENTRY          118
+
 #define R_PPC64_REL16          249
 #define R_PPC64_REL16_LO       250
 #define R_PPC64_REL16_HI       251
index 86150fb..8e7cb8e 100644 (file)
@@ -960,6 +960,7 @@ int fix_alignment(struct pt_regs *regs)
                        preempt_disable();
                        enable_kernel_fp();
                        cvt_df(&data.dd, (float *)&data.x32.low32);
+                       disable_kernel_fp();
                        preempt_enable();
 #else
                        return 0;
@@ -1000,6 +1001,7 @@ int fix_alignment(struct pt_regs *regs)
                preempt_disable();
                enable_kernel_fp();
                cvt_fd((float *)&data.x32.low32, &data.dd);
+               disable_kernel_fp();
                preempt_enable();
 #else
                return 0;
index 221d584..07cebc3 100644 (file)
@@ -185,14 +185,16 @@ int main(void)
        DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
        DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
        DEFINE(PACAIRQHAPPENED, offsetof(struct paca_struct, irq_happened));
-       DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
+#ifdef CONFIG_PPC_BOOK3S
+       DEFINE(PACACONTEXTID, offsetof(struct paca_struct, mm_ctx_id));
 #ifdef CONFIG_PPC_MM_SLICES
        DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
-                                           context.low_slices_psize));
+                                           mm_ctx_low_slices_psize));
        DEFINE(PACAHIGHSLICEPSIZE, offsetof(struct paca_struct,
-                                           context.high_slices_psize));
+                                           mm_ctx_high_slices_psize));
        DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
 #endif /* CONFIG_PPC_MM_SLICES */
+#endif
 
 #ifdef CONFIG_PPC_BOOK3E
        DEFINE(PACAPGD, offsetof(struct paca_struct, pgd));
@@ -222,7 +224,7 @@ int main(void)
 #ifdef CONFIG_PPC_MM_SLICES
        DEFINE(MMUPSIZESLLP, offsetof(struct mmu_psize_def, sllp));
 #else
-       DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp));
+       DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, mm_ctx_sllp));
 #endif /* CONFIG_PPC_MM_SLICES */
        DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
        DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
index a94f155..0d525ce 100644 (file)
@@ -223,7 +223,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
 
        beq-    1f
        ACCOUNT_CPU_USER_EXIT(r11, r12)
-       HMT_MEDIUM_LOW_HAS_PPR
+
+BEGIN_FTR_SECTION
+       HMT_MEDIUM_LOW
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
        ld      r13,GPR13(r1)   /* only restore r13 if returning to usermode */
 1:     ld      r2,GPR2(r1)
        ld      r1,GPR1(r1)
@@ -312,7 +316,13 @@ syscall_exit_work:
        subi    r12,r12,TI_FLAGS
 
 4:     /* Anything else left to do? */
-       SET_DEFAULT_THREAD_PPR(r3, r10)         /* Set thread.ppr = 3 */
+BEGIN_FTR_SECTION
+       lis     r3,INIT_PPR@highest     /* Set thread.ppr = 3 */
+       ld      r10,PACACURRENT(r13)
+       sldi    r3,r3,32        /* bits 11-13 are used for ppr */
+       std     r3,TASKTHREADPPR(r10)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
        andi.   r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
        beq     ret_from_except_lite
 
@@ -452,43 +462,11 @@ _GLOBAL(_switch)
        /* r3-r13 are caller saved -- Cort */
        SAVE_8GPRS(14, r1)
        SAVE_10GPRS(22, r1)
-       mflr    r20             /* Return to switch caller */
-       mfmsr   r22
-       li      r0, MSR_FP
-#ifdef CONFIG_VSX
-BEGIN_FTR_SECTION
-       oris    r0,r0,MSR_VSX@h /* Disable VSX */
-END_FTR_SECTION_IFSET(CPU_FTR_VSX)
-#endif /* CONFIG_VSX */
-#ifdef CONFIG_ALTIVEC
-BEGIN_FTR_SECTION
-       oris    r0,r0,MSR_VEC@h /* Disable altivec */
-       mfspr   r24,SPRN_VRSAVE /* save vrsave register value */
-       std     r24,THREAD_VRSAVE(r3)
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-#endif /* CONFIG_ALTIVEC */
-       and.    r0,r0,r22
-       beq+    1f
-       andc    r22,r22,r0
-       MTMSRD(r22)
-       isync
-1:     std     r20,_NIP(r1)
+       std     r0,_NIP(r1)     /* Return to switch caller */
        mfcr    r23
        std     r23,_CCR(r1)
        std     r1,KSP(r3)      /* Set old stack pointer */
 
-#ifdef CONFIG_PPC_BOOK3S_64
-BEGIN_FTR_SECTION
-       /* Event based branch registers */
-       mfspr   r0, SPRN_BESCR
-       std     r0, THREAD_BESCR(r3)
-       mfspr   r0, SPRN_EBBHR
-       std     r0, THREAD_EBBHR(r3)
-       mfspr   r0, SPRN_EBBRR
-       std     r0, THREAD_EBBRR(r3)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
-#endif
-
 #ifdef CONFIG_SMP
        /* We need a sync somewhere here to make sure that if the
         * previous task gets rescheduled on another CPU, it sees all
@@ -576,47 +554,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
        mr      r1,r8           /* start using new stack pointer */
        std     r7,PACAKSAVE(r13)
 
-#ifdef CONFIG_PPC_BOOK3S_64
-BEGIN_FTR_SECTION
-       /* Event based branch registers */
-       ld      r0, THREAD_BESCR(r4)
-       mtspr   SPRN_BESCR, r0
-       ld      r0, THREAD_EBBHR(r4)
-       mtspr   SPRN_EBBHR, r0
-       ld      r0, THREAD_EBBRR(r4)
-       mtspr   SPRN_EBBRR, r0
-
-       ld      r0,THREAD_TAR(r4)
-       mtspr   SPRN_TAR,r0
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
-#endif
-
-#ifdef CONFIG_ALTIVEC
-BEGIN_FTR_SECTION
-       ld      r0,THREAD_VRSAVE(r4)
-       mtspr   SPRN_VRSAVE,r0          /* if G4, restore VRSAVE reg */
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_PPC64
-BEGIN_FTR_SECTION
-       lwz     r6,THREAD_DSCR_INHERIT(r4)
-       ld      r0,THREAD_DSCR(r4)
-       cmpwi   r6,0
-       bne     1f
-       ld      r0,PACA_DSCR_DEFAULT(r13)
-1:
-BEGIN_FTR_SECTION_NESTED(70)
-       mfspr   r8, SPRN_FSCR
-       rldimi  r8, r6, FSCR_DSCR_LG, (63 - FSCR_DSCR_LG)
-       mtspr   SPRN_FSCR, r8
-END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
-       cmpd    r0,r25
-       beq     2f
-       mtspr   SPRN_DSCR,r0
-2:
-END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
-#endif
-
        ld      r6,_CCR(r1)
        mtcrf   0xFF,r6
 
index 0a0399c..7716ceb 100644 (file)
@@ -96,7 +96,6 @@ __start_interrupts:
 
        .globl system_reset_pSeries;
 system_reset_pSeries:
-       HMT_MEDIUM_PPR_DISCARD
        SET_SCRATCH0(r13)
 #ifdef CONFIG_PPC_P7_NAP
 BEGIN_FTR_SECTION
@@ -164,7 +163,6 @@ machine_check_pSeries_1:
         * some code path might still want to branch into the original
         * vector
         */
-       HMT_MEDIUM_PPR_DISCARD
        SET_SCRATCH0(r13)               /* save r13 */
 #ifdef CONFIG_PPC_P7_NAP
 BEGIN_FTR_SECTION
@@ -199,7 +197,6 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
        . = 0x300
        .globl data_access_pSeries
 data_access_pSeries:
-       HMT_MEDIUM_PPR_DISCARD
        SET_SCRATCH0(r13)
        EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
                                 KVMTEST, 0x300)
@@ -207,7 +204,6 @@ data_access_pSeries:
        . = 0x380
        .globl data_access_slb_pSeries
 data_access_slb_pSeries:
-       HMT_MEDIUM_PPR_DISCARD
        SET_SCRATCH0(r13)
        EXCEPTION_PROLOG_0(PACA_EXSLB)
        EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
@@ -234,15 +230,14 @@ data_access_slb_pSeries:
        bctr
 #endif
 
-       STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
+       STD_EXCEPTION_PSERIES(0x400, instruction_access)
 
        . = 0x480
        .globl instruction_access_slb_pSeries
 instruction_access_slb_pSeries:
-       HMT_MEDIUM_PPR_DISCARD
        SET_SCRATCH0(r13)
        EXCEPTION_PROLOG_0(PACA_EXSLB)
-       EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
+       EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x480)
        std     r3,PACA_EXSLB+EX_R3(r13)
        mfspr   r3,SPRN_SRR0            /* SRR0 is faulting address */
 #ifdef __DISABLED__
@@ -269,25 +264,24 @@ instruction_access_slb_pSeries:
        .globl hardware_interrupt_hv;
 hardware_interrupt_pSeries:
 hardware_interrupt_hv:
-       HMT_MEDIUM_PPR_DISCARD
        BEGIN_FTR_SECTION
                _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
                                            EXC_HV, SOFTEN_TEST_HV)
                KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
        FTR_SECTION_ELSE
                _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
-                                           EXC_STD, SOFTEN_TEST_HV_201)
+                                           EXC_STD, SOFTEN_TEST_PR)
                KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
        ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
 
-       STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
+       STD_EXCEPTION_PSERIES(0x600, alignment)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x600)
 
-       STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
+       STD_EXCEPTION_PSERIES(0x700, program_check)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x700)
 
-       STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
+       STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x800)
 
        . = 0x900
        .globl decrementer_pSeries
@@ -297,10 +291,10 @@ decrementer_pSeries:
        STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
 
        MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xa00)
 
-       STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
+       STD_EXCEPTION_PSERIES(0xb00, trap_0b)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xb00)
 
        . = 0xc00
        .globl  system_call_pSeries
@@ -331,8 +325,8 @@ system_call_pSeries:
        SYSCALL_PSERIES_3
        KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
 
-       STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
+       STD_EXCEPTION_PSERIES(0xd00, single_step)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xd00)
 
        /* At 0xe??? we have a bunch of hypervisor exceptions, we branch
         * out of line to handle them
@@ -407,13 +401,12 @@ hv_facility_unavailable_trampoline:
        KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
 #endif /* CONFIG_CBE_RAS */
 
-       STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
-       KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
+       STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
+       KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
 
        . = 0x1500
        .global denorm_exception_hv
 denorm_exception_hv:
-       HMT_MEDIUM_PPR_DISCARD
        mtspr   SPRN_SPRG_HSCRATCH0,r13
        EXCEPTION_PROLOG_0(PACA_EXGEN)
        EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500)
@@ -435,8 +428,8 @@ denorm_exception_hv:
        KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
 #endif /* CONFIG_CBE_RAS */
 
-       STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
+       STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x1700)
 
 #ifdef CONFIG_CBE_RAS
        STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
@@ -527,7 +520,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
 machine_check_pSeries:
        .globl machine_check_fwnmi
 machine_check_fwnmi:
-       HMT_MEDIUM_PPR_DISCARD
        SET_SCRATCH0(r13)               /* save r13 */
        EXCEPTION_PROLOG_0(PACA_EXMC)
 machine_check_pSeries_0:
@@ -536,9 +528,9 @@ machine_check_pSeries_0:
        KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
        KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
        KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
-       KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x400)
+       KVM_HANDLER(PACA_EXSLB, EXC_STD, 0x480)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x900)
        KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
 
 #ifdef CONFIG_PPC_DENORMALISATION
@@ -621,13 +613,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
 
        /* moved from 0xf00 */
        STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf00)
        STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf20)
        STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf40)
        STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf60)
        STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable)
        KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82)
 
@@ -711,7 +703,6 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
        .globl system_reset_fwnmi
       .align 7
 system_reset_fwnmi:
-       HMT_MEDIUM_PPR_DISCARD
        SET_SCRATCH0(r13)               /* save r13 */
        EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
                                 NOTEST, 0x100)
@@ -1556,29 +1547,19 @@ do_hash_page:
        lwz     r0,TI_PREEMPT(r11)      /* If we're in an "NMI" */
        andis.  r0,r0,NMI_MASK@h        /* (i.e. an irq when soft-disabled) */
        bne     77f                     /* then don't call hash_page now */
-       /*
-        * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
-        * accessing a userspace segment (even from the kernel). We assume
-        * kernel addresses always have the high bit set.
-        */
-       rlwinm  r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
-       rotldi  r0,r3,15                /* Move high bit into MSR_PR posn */
-       orc     r0,r12,r0               /* MSR_PR | ~high_bit */
-       rlwimi  r4,r0,32-13,30,30       /* becomes _PAGE_USER access bit */
-       ori     r4,r4,1                 /* add _PAGE_PRESENT */
-       rlwimi  r4,r5,22+2,31-2,31-2    /* Set _PAGE_EXEC if trap is 0x400 */
 
        /*
         * r3 contains the faulting address
-        * r4 contains the required access permissions
+        * r4 msr
         * r5 contains the trap number
         * r6 contains dsisr
         *
         * at return r3 = 0 for success, 1 for page fault, negative for error
         */
+        mr     r4,r12
        ld      r6,_DSISR(r1)
-       bl      hash_page               /* build HPTE if possible */
-       cmpdi   r3,0                    /* see if hash_page succeeded */
+       bl      __hash_page             /* build HPTE if possible */
+        cmpdi  r3,0                    /* see if __hash_page succeeded */
 
        /* Success */
        beq     fast_exc_return_irq     /* Return from exception on success */
index 9ad236e..2117eac 100644 (file)
@@ -73,29 +73,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
        MTFSF_L(fr0)
        REST_32FPVSRS(0, R4, R7)
 
-       /* FP/VSX off again */
-       MTMSRD(r6)
-       SYNC
-
        blr
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 
-/*
- * Enable use of the FPU, and VSX if possible, for the caller.
- */
-_GLOBAL(fp_enable)
-       mfmsr   r3
-       ori     r3,r3,MSR_FP
-#ifdef CONFIG_VSX
-BEGIN_FTR_SECTION
-       oris    r3,r3,MSR_VSX@h
-END_FTR_SECTION_IFSET(CPU_FTR_VSX)
-#endif
-       SYNC
-       MTMSRD(r3)
-       isync                   /* (not necessary for arch 2.02 and later) */
-       blr
-
 /*
  * Load state from memory into FP registers including FPSCR.
  * Assumes the caller has enabled FP in the MSR.
@@ -136,31 +116,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
        SYNC
        MTMSRD(r5)                      /* enable use of fpu now */
        isync
-/*
- * For SMP, we don't do lazy FPU switching because it just gets too
- * horrendously complex, especially when a task switches from one CPU
- * to another.  Instead we call giveup_fpu in switch_to.
- */
-#ifndef CONFIG_SMP
-       LOAD_REG_ADDRBASE(r3, last_task_used_math)
-       toreal(r3)
-       PPC_LL  r4,ADDROFF(last_task_used_math)(r3)
-       PPC_LCMPI       0,r4,0
-       beq     1f
-       toreal(r4)
-       addi    r4,r4,THREAD            /* want last_task_used_math->thread */
-       addi    r10,r4,THREAD_FPSTATE
-       SAVE_32FPVSRS(0, R5, R10)
-       mffs    fr0
-       stfd    fr0,FPSTATE_FPSCR(r10)
-       PPC_LL  r5,PT_REGS(r4)
-       toreal(r5)
-       PPC_LL  r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-       li      r10,MSR_FP|MSR_FE0|MSR_FE1
-       andc    r4,r4,r10               /* disable FP for previous task */
-       PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* CONFIG_SMP */
        /* enable use of FP after return */
 #ifdef CONFIG_PPC32
        mfspr   r5,SPRN_SPRG_THREAD     /* current task's THREAD (phys) */
@@ -179,36 +134,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
        lfd     fr0,FPSTATE_FPSCR(r10)
        MTFSF_L(fr0)
        REST_32FPVSRS(0, R4, R10)
-#ifndef CONFIG_SMP
-       subi    r4,r5,THREAD
-       fromreal(r4)
-       PPC_STL r4,ADDROFF(last_task_used_math)(r3)
-#endif /* CONFIG_SMP */
        /* restore registers and return */
        /* we haven't used ctr or xer or lr */
        blr
 
 /*
- * giveup_fpu(tsk)
+ * __giveup_fpu(tsk)
  * Disable FP for the task given as the argument,
  * and save the floating-point registers in its thread_struct.
  * Enables the FPU for use in the kernel on return.
  */
-_GLOBAL(giveup_fpu)
-       mfmsr   r5
-       ori     r5,r5,MSR_FP
-#ifdef CONFIG_VSX
-BEGIN_FTR_SECTION
-       oris    r5,r5,MSR_VSX@h
-END_FTR_SECTION_IFSET(CPU_FTR_VSX)
-#endif
-       SYNC_601
-       ISYNC_601
-       MTMSRD(r5)                      /* enable use of fpu now */
-       SYNC_601
-       isync
-       PPC_LCMPI       0,r3,0
-       beqlr-                          /* if no previous owner, done */
+_GLOBAL(__giveup_fpu)
        addi    r3,r3,THREAD            /* want THREAD of task */
        PPC_LL  r6,THREAD_FPSAVEAREA(r3)
        PPC_LL  r5,PT_REGS(r3)
@@ -230,11 +166,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
        andc    r4,r4,r3                /* disable FP for previous task */
        PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 1:
-#ifndef CONFIG_SMP
-       li      r5,0
-       LOAD_REG_ADDRBASE(r4,last_task_used_math)
-       PPC_STL r5,ADDROFF(last_task_used_math)(r4)
-#endif /* CONFIG_SMP */
        blr
 
 /*
index fffd1f9..f705171 100644 (file)
@@ -857,29 +857,6 @@ _GLOBAL(load_up_spe)
        oris    r5,r5,MSR_SPE@h
        mtmsr   r5                      /* enable use of SPE now */
        isync
-/*
- * For SMP, we don't do lazy SPE switching because it just gets too
- * horrendously complex, especially when a task switches from one CPU
- * to another.  Instead we call giveup_spe in switch_to.
- */
-#ifndef CONFIG_SMP
-       lis     r3,last_task_used_spe@ha
-       lwz     r4,last_task_used_spe@l(r3)
-       cmpi    0,r4,0
-       beq     1f
-       addi    r4,r4,THREAD    /* want THREAD of last_task_used_spe */
-       SAVE_32EVRS(0,r10,r4,THREAD_EVR0)
-       evxor   evr10, evr10, evr10     /* clear out evr10 */
-       evmwumiaa evr10, evr10, evr10   /* evr10 <- ACC = 0 * 0 + ACC */
-       li      r5,THREAD_ACC
-       evstddx evr10, r4, r5           /* save off accumulator */
-       lwz     r5,PT_REGS(r4)
-       lwz     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-       lis     r10,MSR_SPE@h
-       andc    r4,r4,r10       /* disable SPE for previous task */
-       stw     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* !CONFIG_SMP */
        /* enable use of SPE after return */
        oris    r9,r9,MSR_SPE@h
        mfspr   r5,SPRN_SPRG_THREAD     /* current task's THREAD (phys) */
@@ -889,10 +866,6 @@ _GLOBAL(load_up_spe)
        evlddx  evr4,r10,r5
        evmra   evr4,evr4
        REST_32EVRS(0,r10,r5,THREAD_EVR0)
-#ifndef CONFIG_SMP
-       subi    r4,r5,THREAD
-       stw     r4,last_task_used_spe@l(r3)
-#endif /* !CONFIG_SMP */
        blr
 
 /*
@@ -1011,16 +984,10 @@ _GLOBAL(__setup_ehv_ivors)
 
 #ifdef CONFIG_SPE
 /*
- * extern void giveup_spe(struct task_struct *prev)
+ * extern void __giveup_spe(struct task_struct *prev)
  *
  */
-_GLOBAL(giveup_spe)
-       mfmsr   r5
-       oris    r5,r5,MSR_SPE@h
-       mtmsr   r5                      /* enable use of SPE now */
-       isync
-       cmpi    0,r3,0
-       beqlr-                          /* if no previous owner, done */
+_GLOBAL(__giveup_spe)
        addi    r3,r3,THREAD            /* want THREAD of task */
        lwz     r5,PT_REGS(r3)
        cmpi    0,r5,0
@@ -1035,11 +1002,6 @@ _GLOBAL(giveup_spe)
        andc    r4,r4,r3                /* disable SPE for previous task */
        stw     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 1:
-#ifndef CONFIG_SMP
-       li      r5,0
-       lis     r4,last_task_used_spe@ha
-       stw     r5,last_task_used_spe@l(r4)
-#endif /* !CONFIG_SMP */
        blr
 #endif /* CONFIG_SPE */
 
index 112ccf4..cf4fb54 100644 (file)
@@ -89,13 +89,6 @@ _GLOBAL(power7_powersave_common)
        std     r0,_LINK(r1)
        std     r0,_NIP(r1)
 
-#ifndef CONFIG_SMP
-       /* Make sure FPU, VSX etc... are flushed as we may lose
-        * state when going to nap mode
-        */
-       bl      discard_lazy_cpu_state
-#endif /* CONFIG_SMP */
-
        /* Hard disable interrupts */
        mfmsr   r9
        rldicl  r9,r9,48,1
index ed3ab50..be8edd6 100644 (file)
@@ -743,6 +743,8 @@ relocate_new_kernel:
        /* Check for 47x cores */
        mfspr   r3,SPRN_PVR
        srwi    r3,r3,16
+       cmplwi  cr0,r3,PVR_476FPE@h
+       beq     setup_map_47x
        cmplwi  cr0,r3,PVR_476@h
        beq     setup_map_47x
        cmplwi  cr0,r3,PVR_476_ISS@h
index 6838451..59663af 100644 (file)
@@ -635,6 +635,33 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
                         */
                        break;
 
+               case R_PPC64_ENTRY:
+                       /*
+                        * Optimize ELFv2 large code model entry point if
+                        * the TOC is within 2GB range of current location.
+                        */
+                       value = my_r2(sechdrs, me) - (unsigned long)location;
+                       if (value + 0x80008000 > 0xffffffff)
+                               break;
+                       /*
+                        * Check for the large code model prolog sequence:
+                        *      ld r2, ...(r12)
+                        *      add r2, r2, r12
+                        */
+                       if ((((uint32_t *)location)[0] & ~0xfffc)
+                           != 0xe84c0000)
+                               break;
+                       if (((uint32_t *)location)[1] != 0x7c426214)
+                               break;
+                       /*
+                        * If found, replace it with:
+                        *      addis r2, r12, (.TOC.-func)@ha
+                        *      addi r2, r12, (.TOC.-func)@l
+                        */
+                       ((uint32_t *)location)[0] = 0x3c4c0000 + PPC_HA(value);
+                       ((uint32_t *)location)[1] = 0x38420000 + PPC_LO(value);
+                       break;
+
                case R_PPC64_REL16_HA:
                        /* Subtract location pointer */
                        value -= (unsigned long)location;
index 202963e..41e1607 100644 (file)
@@ -19,13 +19,11 @@ EXPORT_SYMBOL(_mcount);
 #endif
 
 #ifdef CONFIG_PPC_FPU
-EXPORT_SYMBOL(giveup_fpu);
 EXPORT_SYMBOL(load_fp_state);
 EXPORT_SYMBOL(store_fp_state);
 #endif
 
 #ifdef CONFIG_ALTIVEC
-EXPORT_SYMBOL(giveup_altivec);
 EXPORT_SYMBOL(load_vr_state);
 EXPORT_SYMBOL(store_vr_state);
 #endif
@@ -34,10 +32,6 @@ EXPORT_SYMBOL(store_vr_state);
 EXPORT_SYMBOL_GPL(__giveup_vsx);
 #endif
 
-#ifdef CONFIG_SPE
-EXPORT_SYMBOL(giveup_spe);
-#endif
-
 #ifdef CONFIG_EPAPR_PARAVIRT
 EXPORT_SYMBOL(epapr_hypercall_start);
 #endif
index 646bf4d..dccc87e 100644 (file)
 
 extern unsigned long _get_SP(void);
 
-#ifndef CONFIG_SMP
-struct task_struct *last_task_used_math = NULL;
-struct task_struct *last_task_used_altivec = NULL;
-struct task_struct *last_task_used_vsx = NULL;
-struct task_struct *last_task_used_spe = NULL;
-#endif
-
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-void giveup_fpu_maybe_transactional(struct task_struct *tsk)
+static void check_if_tm_restore_required(struct task_struct *tsk)
 {
        /*
         * If we are saving the current thread's registers, and the
@@ -89,34 +82,67 @@ void giveup_fpu_maybe_transactional(struct task_struct *tsk)
                tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
                set_thread_flag(TIF_RESTORE_TM);
        }
+}
+#else
+static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+
+bool strict_msr_control;
+EXPORT_SYMBOL(strict_msr_control);
+
+static int __init enable_strict_msr_control(char *str)
+{
+       strict_msr_control = true;
+       pr_info("Enabling strict facility control\n");
 
-       giveup_fpu(tsk);
+       return 0;
 }
+early_param("ppc_strict_facility_enable", enable_strict_msr_control);
 
-void giveup_altivec_maybe_transactional(struct task_struct *tsk)
+void msr_check_and_set(unsigned long bits)
 {
-       /*
-        * If we are saving the current thread's registers, and the
-        * thread is in a transactional state, set the TIF_RESTORE_TM
-        * bit so that we know to restore the registers before
-        * returning to userspace.
-        */
-       if (tsk == current && tsk->thread.regs &&
-           MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
-           !test_thread_flag(TIF_RESTORE_TM)) {
-               tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
-               set_thread_flag(TIF_RESTORE_TM);
-       }
+       unsigned long oldmsr = mfmsr();
+       unsigned long newmsr;
 
-       giveup_altivec(tsk);
+       newmsr = oldmsr | bits;
+
+#ifdef CONFIG_VSX
+       if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
+               newmsr |= MSR_VSX;
+#endif
+
+       if (oldmsr != newmsr)
+               mtmsr_isync(newmsr);
 }
 
-#else
-#define giveup_fpu_maybe_transactional(tsk)    giveup_fpu(tsk)
-#define giveup_altivec_maybe_transactional(tsk)        giveup_altivec(tsk)
-#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+void __msr_check_and_clear(unsigned long bits)
+{
+       unsigned long oldmsr = mfmsr();
+       unsigned long newmsr;
+
+       newmsr = oldmsr & ~bits;
+
+#ifdef CONFIG_VSX
+       if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
+               newmsr &= ~MSR_VSX;
+#endif
+
+       if (oldmsr != newmsr)
+               mtmsr_isync(newmsr);
+}
+EXPORT_SYMBOL(__msr_check_and_clear);
 
 #ifdef CONFIG_PPC_FPU
+void giveup_fpu(struct task_struct *tsk)
+{
+       check_if_tm_restore_required(tsk);
+
+       msr_check_and_set(MSR_FP);
+       __giveup_fpu(tsk);
+       msr_check_and_clear(MSR_FP);
+}
+EXPORT_SYMBOL(giveup_fpu);
+
 /*
  * Make sure the floating-point register state in the
  * the thread_struct is up to date for task tsk.
@@ -134,52 +160,56 @@ void flush_fp_to_thread(struct task_struct *tsk)
                 */
                preempt_disable();
                if (tsk->thread.regs->msr & MSR_FP) {
-#ifdef CONFIG_SMP
                        /*
                         * This should only ever be called for current or
                         * for a stopped child process.  Since we save away
-                        * the FP register state on context switch on SMP,
+                        * the FP register state on context switch,
                         * there is something wrong if a stopped child appears
                         * to still have its FP state in the CPU registers.
                         */
                        BUG_ON(tsk != current);
-#endif
-                       giveup_fpu_maybe_transactional(tsk);
+                       giveup_fpu(tsk);
                }
                preempt_enable();
        }
 }
 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
-#endif /* CONFIG_PPC_FPU */
 
 void enable_kernel_fp(void)
 {
        WARN_ON(preemptible());
 
-#ifdef CONFIG_SMP
-       if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
-               giveup_fpu_maybe_transactional(current);
-       else
-               giveup_fpu(NULL);       /* just enables FP for kernel */
-#else
-       giveup_fpu_maybe_transactional(last_task_used_math);
-#endif /* CONFIG_SMP */
+       msr_check_and_set(MSR_FP);
+
+       if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
+               check_if_tm_restore_required(current);
+               __giveup_fpu(current);
+       }
 }
 EXPORT_SYMBOL(enable_kernel_fp);
+#endif /* CONFIG_PPC_FPU */
 
 #ifdef CONFIG_ALTIVEC
+void giveup_altivec(struct task_struct *tsk)
+{
+       check_if_tm_restore_required(tsk);
+
+       msr_check_and_set(MSR_VEC);
+       __giveup_altivec(tsk);
+       msr_check_and_clear(MSR_VEC);
+}
+EXPORT_SYMBOL(giveup_altivec);
+
 void enable_kernel_altivec(void)
 {
        WARN_ON(preemptible());
 
-#ifdef CONFIG_SMP
-       if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
-               giveup_altivec_maybe_transactional(current);
-       else
-               giveup_altivec_notask();
-#else
-       giveup_altivec_maybe_transactional(last_task_used_altivec);
-#endif /* CONFIG_SMP */
+       msr_check_and_set(MSR_VEC);
+
+       if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
+               check_if_tm_restore_required(current);
+               __giveup_altivec(current);
+       }
 }
 EXPORT_SYMBOL(enable_kernel_altivec);
 
@@ -192,10 +222,8 @@ void flush_altivec_to_thread(struct task_struct *tsk)
        if (tsk->thread.regs) {
                preempt_disable();
                if (tsk->thread.regs->msr & MSR_VEC) {
-#ifdef CONFIG_SMP
                        BUG_ON(tsk != current);
-#endif
-                       giveup_altivec_maybe_transactional(tsk);
+                       giveup_altivec(tsk);
                }
                preempt_enable();
        }
@@ -204,37 +232,43 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
 #endif /* CONFIG_ALTIVEC */
 
 #ifdef CONFIG_VSX
-void enable_kernel_vsx(void)
+void giveup_vsx(struct task_struct *tsk)
 {
-       WARN_ON(preemptible());
+       check_if_tm_restore_required(tsk);
 
-#ifdef CONFIG_SMP
-       if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
-               giveup_vsx(current);
-       else
-               giveup_vsx(NULL);       /* just enable vsx for kernel - force */
-#else
-       giveup_vsx(last_task_used_vsx);
-#endif /* CONFIG_SMP */
+       msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
+       if (tsk->thread.regs->msr & MSR_FP)
+               __giveup_fpu(tsk);
+       if (tsk->thread.regs->msr & MSR_VEC)
+               __giveup_altivec(tsk);
+       __giveup_vsx(tsk);
+       msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
 }
-EXPORT_SYMBOL(enable_kernel_vsx);
+EXPORT_SYMBOL(giveup_vsx);
 
-void giveup_vsx(struct task_struct *tsk)
+void enable_kernel_vsx(void)
 {
-       giveup_fpu_maybe_transactional(tsk);
-       giveup_altivec_maybe_transactional(tsk);
-       __giveup_vsx(tsk);
+       WARN_ON(preemptible());
+
+       msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
+
+       if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
+               check_if_tm_restore_required(current);
+               if (current->thread.regs->msr & MSR_FP)
+                       __giveup_fpu(current);
+               if (current->thread.regs->msr & MSR_VEC)
+                       __giveup_altivec(current);
+               __giveup_vsx(current);
+       }
 }
-EXPORT_SYMBOL(giveup_vsx);
+EXPORT_SYMBOL(enable_kernel_vsx);
 
 void flush_vsx_to_thread(struct task_struct *tsk)
 {
        if (tsk->thread.regs) {
                preempt_disable();
                if (tsk->thread.regs->msr & MSR_VSX) {
-#ifdef CONFIG_SMP
                        BUG_ON(tsk != current);
-#endif
                        giveup_vsx(tsk);
                }
                preempt_enable();
@@ -244,19 +278,26 @@ EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
 #endif /* CONFIG_VSX */
 
 #ifdef CONFIG_SPE
+void giveup_spe(struct task_struct *tsk)
+{
+       check_if_tm_restore_required(tsk);
+
+       msr_check_and_set(MSR_SPE);
+       __giveup_spe(tsk);
+       msr_check_and_clear(MSR_SPE);
+}
+EXPORT_SYMBOL(giveup_spe);
 
 void enable_kernel_spe(void)
 {
        WARN_ON(preemptible());
 
-#ifdef CONFIG_SMP
-       if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
-               giveup_spe(current);
-       else
-               giveup_spe(NULL);       /* just enable SPE for kernel - force */
-#else
-       giveup_spe(last_task_used_spe);
-#endif /* __SMP __ */
+       msr_check_and_set(MSR_SPE);
+
+       if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
+               check_if_tm_restore_required(current);
+               __giveup_spe(current);
+       }
 }
 EXPORT_SYMBOL(enable_kernel_spe);
 
@@ -265,9 +306,7 @@ void flush_spe_to_thread(struct task_struct *tsk)
        if (tsk->thread.regs) {
                preempt_disable();
                if (tsk->thread.regs->msr & MSR_SPE) {
-#ifdef CONFIG_SMP
                        BUG_ON(tsk != current);
-#endif
                        tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
                        giveup_spe(tsk);
                }
@@ -276,31 +315,81 @@ void flush_spe_to_thread(struct task_struct *tsk)
 }
 #endif /* CONFIG_SPE */
 
-#ifndef CONFIG_SMP
-/*
- * If we are doing lazy switching of CPU state (FP, altivec or SPE),
- * and the current task has some state, discard it.
- */
-void discard_lazy_cpu_state(void)
+static unsigned long msr_all_available;
+
+static int __init init_msr_all_available(void)
 {
-       preempt_disable();
-       if (last_task_used_math == current)
-               last_task_used_math = NULL;
+#ifdef CONFIG_PPC_FPU
+       msr_all_available |= MSR_FP;
+#endif
 #ifdef CONFIG_ALTIVEC
-       if (last_task_used_altivec == current)
-               last_task_used_altivec = NULL;
-#endif /* CONFIG_ALTIVEC */
+       if (cpu_has_feature(CPU_FTR_ALTIVEC))
+               msr_all_available |= MSR_VEC;
+#endif
 #ifdef CONFIG_VSX
-       if (last_task_used_vsx == current)
-               last_task_used_vsx = NULL;
-#endif /* CONFIG_VSX */
+       if (cpu_has_feature(CPU_FTR_VSX))
+               msr_all_available |= MSR_VSX;
+#endif
 #ifdef CONFIG_SPE
-       if (last_task_used_spe == current)
-               last_task_used_spe = NULL;
+       if (cpu_has_feature(CPU_FTR_SPE))
+               msr_all_available |= MSR_SPE;
 #endif
-       preempt_enable();
+
+       return 0;
+}
+early_initcall(init_msr_all_available);
+
+void giveup_all(struct task_struct *tsk)
+{
+       unsigned long usermsr;
+
+       if (!tsk->thread.regs)
+               return;
+
+       usermsr = tsk->thread.regs->msr;
+
+       if ((usermsr & msr_all_available) == 0)
+               return;
+
+       msr_check_and_set(msr_all_available);
+
+#ifdef CONFIG_PPC_FPU
+       if (usermsr & MSR_FP)
+               __giveup_fpu(tsk);
+#endif
+#ifdef CONFIG_ALTIVEC
+       if (usermsr & MSR_VEC)
+               __giveup_altivec(tsk);
+#endif
+#ifdef CONFIG_VSX
+       if (usermsr & MSR_VSX)
+               __giveup_vsx(tsk);
+#endif
+#ifdef CONFIG_SPE
+       if (usermsr & MSR_SPE)
+               __giveup_spe(tsk);
+#endif
+
+       msr_check_and_clear(msr_all_available);
+}
+EXPORT_SYMBOL(giveup_all);
+
+void flush_all_to_thread(struct task_struct *tsk)
+{
+       if (tsk->thread.regs) {
+               preempt_disable();
+               BUG_ON(tsk != current);
+               giveup_all(tsk);
+
+#ifdef CONFIG_SPE
+               if (tsk->thread.regs->msr & MSR_SPE)
+                       tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
+#endif
+
+               preempt_enable();
+       }
 }
-#endif /* CONFIG_SMP */
+EXPORT_SYMBOL(flush_all_to_thread);
 
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
 void do_send_trap(struct pt_regs *regs, unsigned long address,
@@ -744,13 +833,15 @@ void restore_tm_state(struct pt_regs *regs)
        msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
        msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
        if (msr_diff & MSR_FP) {
-               fp_enable();
+               msr_check_and_set(MSR_FP);
                load_fp_state(&current->thread.fp_state);
+               msr_check_and_clear(MSR_FP);
                regs->msr |= current->thread.fpexc_mode;
        }
        if (msr_diff & MSR_VEC) {
-               vec_enable();
+               msr_check_and_set(MSR_VEC);
                load_vr_state(&current->thread.vr_state);
+               msr_check_and_clear(MSR_VEC);
        }
        regs->msr |= msr_diff;
 }
@@ -760,112 +851,87 @@ void restore_tm_state(struct pt_regs *regs)
 #define __switch_to_tm(prev)
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 
-struct task_struct *__switch_to(struct task_struct *prev,
-       struct task_struct *new)
+static inline void save_sprs(struct thread_struct *t)
 {
-       struct thread_struct *new_thread, *old_thread;
-       struct task_struct *last;
-#ifdef CONFIG_PPC_BOOK3S_64
-       struct ppc64_tlb_batch *batch;
+#ifdef CONFIG_ALTIVEC
+       if (cpu_has_feature(cpu_has_feature(CPU_FTR_ALTIVEC)))
+               t->vrsave = mfspr(SPRN_VRSAVE);
 #endif
+#ifdef CONFIG_PPC_BOOK3S_64
+       if (cpu_has_feature(CPU_FTR_DSCR))
+               t->dscr = mfspr(SPRN_DSCR);
 
-       WARN_ON(!irqs_disabled());
+       if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+               t->bescr = mfspr(SPRN_BESCR);
+               t->ebbhr = mfspr(SPRN_EBBHR);
+               t->ebbrr = mfspr(SPRN_EBBRR);
 
-       /* Back up the TAR and DSCR across context switches.
-        * Note that the TAR is not available for use in the kernel.  (To
-        * provide this, the TAR should be backed up/restored on exception
-        * entry/exit instead, and be in pt_regs.  FIXME, this should be in
-        * pt_regs anyway (for debug).)
-        * Save the TAR and DSCR here before we do treclaim/trecheckpoint as
-        * these will change them.
-        */
-       save_early_sprs(&prev->thread);
+               t->fscr = mfspr(SPRN_FSCR);
 
-       __switch_to_tm(prev);
+               /*
+                * Note that the TAR is not available for use in the kernel.
+                * (To provide this, the TAR should be backed up/restored on
+                * exception entry/exit instead, and be in pt_regs.  FIXME,
+                * this should be in pt_regs anyway (for debug).)
+                */
+               t->tar = mfspr(SPRN_TAR);
+       }
+#endif
+}
 
-#ifdef CONFIG_SMP
-       /* avoid complexity of lazy save/restore of fpu
-        * by just saving it every time we switch out if
-        * this task used the fpu during the last quantum.
-        *
-        * If it tries to use the fpu again, it'll trap and
-        * reload its fp regs.  So we don't have to do a restore
-        * every switch, just a save.
-        *  -- Cort
-        */
-       if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
-               giveup_fpu(prev);
+static inline void restore_sprs(struct thread_struct *old_thread,
+                               struct thread_struct *new_thread)
+{
 #ifdef CONFIG_ALTIVEC
-       /*
-        * If the previous thread used altivec in the last quantum
-        * (thus changing altivec regs) then save them.
-        * We used to check the VRSAVE register but not all apps
-        * set it, so we don't rely on it now (and in fact we need
-        * to save & restore VSCR even if VRSAVE == 0).  -- paulus
-        *
-        * On SMP we always save/restore altivec regs just to avoid the
-        * complexity of changing processors.
-        *  -- Cort
-        */
-       if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
-               giveup_altivec(prev);
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_VSX
-       if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
-               /* VMX and FPU registers are already save here */
-               __giveup_vsx(prev);
-#endif /* CONFIG_VSX */
-#ifdef CONFIG_SPE
-       /*
-        * If the previous thread used spe in the last quantum
-        * (thus changing spe regs) then save them.
-        *
-        * On SMP we always save/restore spe regs just to avoid the
-        * complexity of changing processors.
-        */
-       if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
-               giveup_spe(prev);
-#endif /* CONFIG_SPE */
+       if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
+           old_thread->vrsave != new_thread->vrsave)
+               mtspr(SPRN_VRSAVE, new_thread->vrsave);
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+       if (cpu_has_feature(CPU_FTR_DSCR)) {
+               u64 dscr = get_paca()->dscr_default;
+               u64 fscr = old_thread->fscr & ~FSCR_DSCR;
 
-#else  /* CONFIG_SMP */
-#ifdef CONFIG_ALTIVEC
-       /* Avoid the trap.  On smp this this never happens since
-        * we don't set last_task_used_altivec -- Cort
-        */
-       if (new->thread.regs && last_task_used_altivec == new)
-               new->thread.regs->msr |= MSR_VEC;
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_VSX
-       if (new->thread.regs && last_task_used_vsx == new)
-               new->thread.regs->msr |= MSR_VSX;
-#endif /* CONFIG_VSX */
-#ifdef CONFIG_SPE
-       /* Avoid the trap.  On smp this this never happens since
-        * we don't set last_task_used_spe
-        */
-       if (new->thread.regs && last_task_used_spe == new)
-               new->thread.regs->msr |= MSR_SPE;
-#endif /* CONFIG_SPE */
+               if (new_thread->dscr_inherit) {
+                       dscr = new_thread->dscr;
+                       fscr |= FSCR_DSCR;
+               }
 
-#endif /* CONFIG_SMP */
+               if (old_thread->dscr != dscr)
+                       mtspr(SPRN_DSCR, dscr);
 
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
-       switch_booke_debug_regs(&new->thread.debug);
-#else
-/*
- * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
- * schedule DABR
- */
-#ifndef CONFIG_HAVE_HW_BREAKPOINT
-       if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk)))
-               __set_breakpoint(&new->thread.hw_brk);
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+               if (old_thread->fscr != fscr)
+                       mtspr(SPRN_FSCR, fscr);
+       }
+
+       if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+               if (old_thread->bescr != new_thread->bescr)
+                       mtspr(SPRN_BESCR, new_thread->bescr);
+               if (old_thread->ebbhr != new_thread->ebbhr)
+                       mtspr(SPRN_EBBHR, new_thread->ebbhr);
+               if (old_thread->ebbrr != new_thread->ebbrr)
+                       mtspr(SPRN_EBBRR, new_thread->ebbrr);
+
+               if (old_thread->tar != new_thread->tar)
+                       mtspr(SPRN_TAR, new_thread->tar);
+       }
 #endif
+}
 
+struct task_struct *__switch_to(struct task_struct *prev,
+       struct task_struct *new)
+{
+       struct thread_struct *new_thread, *old_thread;
+       struct task_struct *last;
+#ifdef CONFIG_PPC_BOOK3S_64
+       struct ppc64_tlb_batch *batch;
+#endif
 
        new_thread = &new->thread;
        old_thread = &current->thread;
 
+       WARN_ON(!irqs_disabled());
+
 #ifdef CONFIG_PPC64
        /*
         * Collect processor utilization data per process
@@ -890,6 +956,30 @@ struct task_struct *__switch_to(struct task_struct *prev,
        }
 #endif /* CONFIG_PPC_BOOK3S_64 */
 
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+       switch_booke_debug_regs(&new->thread.debug);
+#else
+/*
+ * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
+ * schedule DABR
+ */
+#ifndef CONFIG_HAVE_HW_BREAKPOINT
+       if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk)))
+               __set_breakpoint(&new->thread.hw_brk);
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+#endif
+
+       /*
+        * We need to save SPRs before treclaim/trecheckpoint as these will
+        * change a number of them.
+        */
+       save_sprs(&prev->thread);
+
+       __switch_to_tm(prev);
+
+       /* Save FPU, Altivec, VSX and SPE state */
+       giveup_all(prev);
+
        /*
         * We can't take a PMU exception inside _switch() since there is a
         * window where the kernel stack SLB and the kernel stack are out
@@ -899,6 +989,15 @@ struct task_struct *__switch_to(struct task_struct *prev,
 
        tm_recheckpoint_new_task(new);
 
+       /*
+        * Call restore_sprs() before calling _switch(). If we move it after
+        * _switch() then we miss out on calling it for new tasks. The reason
+        * for this is we manually create a stack frame for new tasks that
+        * directly returns through ret_from_fork() or
+        * ret_from_kernel_thread(). See copy_thread() for details.
+        */
+       restore_sprs(old_thread, new_thread);
+
        last = _switch(old_thread, new_thread);
 
 #ifdef CONFIG_PPC_BOOK3S_64
@@ -952,10 +1051,12 @@ static void show_instructions(struct pt_regs *regs)
        printk("\n");
 }
 
-static struct regbit {
+struct regbit {
        unsigned long bit;
        const char *name;
-} msr_bits[] = {
+};
+
+static struct regbit msr_bits[] = {
 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
        {MSR_SF,        "SF"},
        {MSR_HV,        "HV"},
@@ -985,16 +1086,49 @@ static struct regbit {
        {0,             NULL}
 };
 
-static void printbits(unsigned long val, struct regbit *bits)
+static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
 {
-       const char *sep = "";
+       const char *s = "";
 
-       printk("<");
        for (; bits->bit; ++bits)
                if (val & bits->bit) {
-                       printk("%s%s", sep, bits->name);
-                       sep = ",";
+                       printk("%s%s", s, bits->name);
+                       s = sep;
                }
+}
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+static struct regbit msr_tm_bits[] = {
+       {MSR_TS_T,      "T"},
+       {MSR_TS_S,      "S"},
+       {MSR_TM,        "E"},
+       {0,             NULL}
+};
+
+static void print_tm_bits(unsigned long val)
+{
+/*
+ * This only prints something if at least one of the TM bit is set.
+ * Inside the TM[], the output means:
+ *   E: Enabled                (bit 32)
+ *   S: Suspended      (bit 33)
+ *   T: Transactional  (bit 34)
+ */
+       if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
+               printk(",TM[");
+               print_bits(val, msr_tm_bits, "");
+               printk("]");
+       }
+}
+#else
+static void print_tm_bits(unsigned long val) {}
+#endif
+
+static void print_msr_bits(unsigned long val)
+{
+       printk("<");
+       print_bits(val, msr_bits, ",");
+       print_tm_bits(val);
        printk(">");
 }
 
@@ -1019,7 +1153,7 @@ void show_regs(struct pt_regs * regs)
        printk("REGS: %p TRAP: %04lx   %s  (%s)\n",
               regs, regs->trap, print_tainted(), init_utsname()->release);
        printk("MSR: "REG" ", regs->msr);
-       printbits(regs->msr, msr_bits);
+       print_msr_bits(regs->msr);
        printk("  CR: %08lx  XER: %08lx\n", regs->ccr, regs->xer);
        trap = TRAP(regs);
        if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
@@ -1061,13 +1195,10 @@ void show_regs(struct pt_regs * regs)
 
 void exit_thread(void)
 {
-       discard_lazy_cpu_state();
 }
 
 void flush_thread(void)
 {
-       discard_lazy_cpu_state();
-
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
        flush_ptrace_hw_breakpoint(current);
 #else /* CONFIG_HAVE_HW_BREAKPOINT */
@@ -1086,10 +1217,7 @@ release_thread(struct task_struct *t)
  */
 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 {
-       flush_fp_to_thread(src);
-       flush_altivec_to_thread(src);
-       flush_vsx_to_thread(src);
-       flush_spe_to_thread(src);
+       flush_all_to_thread(src);
        /*
         * Flush TM state out so we can copy it.  __switch_to_tm() does this
         * flush but it removes the checkpointed state from the current CPU and
@@ -1212,7 +1340,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
 #ifdef CONFIG_PPC64 
        if (cpu_has_feature(CPU_FTR_DSCR)) {
                p->thread.dscr_inherit = current->thread.dscr_inherit;
-               p->thread.dscr = current->thread.dscr;
+               p->thread.dscr = mfspr(SPRN_DSCR);
        }
        if (cpu_has_feature(CPU_FTR_HAS_PPR))
                p->thread.ppr = INIT_PPR;
@@ -1305,7 +1433,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
                regs->msr = MSR_USER32;
        }
 #endif
-       discard_lazy_cpu_state();
 #ifdef CONFIG_VSX
        current->thread.used_vsr = 0;
 #endif
index 92dea8d..da51925 100644 (file)
@@ -389,6 +389,7 @@ static void __init prom_printf(const char *format, ...)
                        break;
                }
        }
+       va_end(args);
 }
 
 
index 737c0d0..30a03c0 100644 (file)
@@ -60,6 +60,7 @@ struct pt_regs_offset {
 #define STR(s) #s                      /* convert to string */
 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
 #define GPR_OFFSET_NAME(num)   \
+       {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
        {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
 #define REG_OFFSET_END {.name = NULL, .offset = 0}
 
index 5a753fa..28736ff 100644 (file)
@@ -44,6 +44,9 @@
 #include <asm/mmu.h>
 #include <asm/topology.h>
 
+/* This is here deliberately so it's only used in this file */
+void enter_rtas(unsigned long);
+
 struct rtas_t rtas = {
        .lock = __ARCH_SPIN_LOCK_UNLOCKED
 };
@@ -93,21 +96,13 @@ static void unlock_rtas(unsigned long flags)
  */
 static void call_rtas_display_status(unsigned char c)
 {
-       struct rtas_args *args = &rtas.args;
        unsigned long s;
 
        if (!rtas.base)
                return;
-       s = lock_rtas();
-
-       args->token = cpu_to_be32(10);
-       args->nargs = cpu_to_be32(1);
-       args->nret  = cpu_to_be32(1);
-       args->rets  = &(args->args[1]);
-       args->args[0] = cpu_to_be32(c);
-
-       enter_rtas(__pa(args));
 
+       s = lock_rtas();
+       rtas_call_unlocked(&rtas.args, 10, 1, 1, NULL, c);
        unlock_rtas(s);
 }
 
@@ -418,6 +413,36 @@ static char *__fetch_rtas_last_error(char *altbuf)
 #define get_errorlog_buffer()          NULL
 #endif
 
+
+static void
+va_rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret,
+                     va_list list)
+{
+       int i;
+
+       args->token = cpu_to_be32(token);
+       args->nargs = cpu_to_be32(nargs);
+       args->nret  = cpu_to_be32(nret);
+       args->rets  = &(args->args[nargs]);
+
+       for (i = 0; i < nargs; ++i)
+               args->args[i] = cpu_to_be32(va_arg(list, __u32));
+
+       for (i = 0; i < nret; ++i)
+               args->rets[i] = 0;
+
+       enter_rtas(__pa(args));
+}
+
+void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, ...)
+{
+       va_list list;
+
+       va_start(list, nret);
+       va_rtas_call_unlocked(args, token, nargs, nret, list);
+       va_end(list);
+}
+
 int rtas_call(int token, int nargs, int nret, int *outputs, ...)
 {
        va_list list;
@@ -431,22 +456,14 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
                return -1;
 
        s = lock_rtas();
+
+       /* We use the global rtas args buffer */
        rtas_args = &rtas.args;
 
-       rtas_args->token = cpu_to_be32(token);
-       rtas_args->nargs = cpu_to_be32(nargs);
-       rtas_args->nret  = cpu_to_be32(nret);
-       rtas_args->rets  = &(rtas_args->args[nargs]);
        va_start(list, outputs);
-       for (i = 0; i < nargs; ++i)
-               rtas_args->args[i] = cpu_to_be32(va_arg(list, __u32));
+       va_rtas_call_unlocked(rtas_args, token, nargs, nret, list);
        va_end(list);
 
-       for (i = 0; i < nret; ++i)
-               rtas_args->rets[i] = 0;
-
-       enter_rtas(__pa(rtas_args));
-
        /* A -1 return code indicates that the last command couldn't
           be completed due to a hardware error. */
        if (be32_to_cpu(rtas_args->rets[0]) == -1)
index ef7c24e..b6aa378 100644 (file)
@@ -458,7 +458,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
         * contains valid data
         */
        if (current->thread.used_vsr && ctx_has_vsx_region) {
-               __giveup_vsx(current);
+               flush_vsx_to_thread(current);
                if (copy_vsx_to_user(&frame->mc_vsregs, current))
                        return 1;
                msr |= MSR_VSX;
@@ -606,7 +606,7 @@ static int save_tm_user_regs(struct pt_regs *regs,
         * contains valid data
         */
        if (current->thread.used_vsr) {
-               __giveup_vsx(current);
+               flush_vsx_to_thread(current);
                if (copy_vsx_to_user(&frame->mc_vsregs, current))
                        return 1;
                if (msr & MSR_VSX) {
@@ -687,15 +687,6 @@ static long restore_user_regs(struct pt_regs *regs,
        if (sig)
                regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
 
-       /*
-        * Do this before updating the thread state in
-        * current->thread.fpr/vr/evr.  That way, if we get preempted
-        * and another task grabs the FPU/Altivec/SPE, it won't be
-        * tempted to save the current CPU state into the thread_struct
-        * and corrupt what we are writing there.
-        */
-       discard_lazy_cpu_state();
-
 #ifdef CONFIG_ALTIVEC
        /*
         * Force the process to reload the altivec registers from
@@ -798,15 +789,6 @@ static long restore_tm_user_regs(struct pt_regs *regs,
        /* Restore the previous little-endian mode */
        regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
 
-       /*
-        * Do this before updating the thread state in
-        * current->thread.fpr/vr/evr.  That way, if we get preempted
-        * and another task grabs the FPU/Altivec/SPE, it won't be
-        * tempted to save the current CPU state into the thread_struct
-        * and corrupt what we are writing there.
-        */
-       discard_lazy_cpu_state();
-
 #ifdef CONFIG_ALTIVEC
        regs->msr &= ~MSR_VEC;
        if (msr & MSR_VEC) {
index c676ece..2552079 100644 (file)
@@ -147,7 +147,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
         * VMX data.
         */
        if (current->thread.used_vsr && ctx_has_vsx_region) {
-               __giveup_vsx(current);
+               flush_vsx_to_thread(current);
                v_regs += ELF_NVRREG;
                err |= copy_vsx_to_user(v_regs, current);
                /* set MSR_VSX in the MSR value in the frame to
@@ -270,7 +270,7 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
         * VMX data.
         */
        if (current->thread.used_vsr) {
-               __giveup_vsx(current);
+               flush_vsx_to_thread(current);
                v_regs += ELF_NVRREG;
                tm_v_regs += ELF_NVRREG;
 
@@ -349,15 +349,6 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
        if (set != NULL)
                err |=  __get_user(set->sig[0], &sc->oldmask);
 
-       /*
-        * Do this before updating the thread state in
-        * current->thread.fpr/vr.  That way, if we get preempted
-        * and another task grabs the FPU/Altivec, it won't be
-        * tempted to save the current CPU state into the thread_struct
-        * and corrupt what we are writing there.
-        */
-       discard_lazy_cpu_state();
-
        /*
         * Force reload of FP/VEC.
         * This has to be done before copying stuff into current->thread.fpr/vr
@@ -468,15 +459,6 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
        err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
        err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
 
-       /*
-        * Do this before updating the thread state in
-        * current->thread.fpr/vr.  That way, if we get preempted
-        * and another task grabs the FPU/Altivec, it won't be
-        * tempted to save the current CPU state into the thread_struct
-        * and corrupt what we are writing there.
-        */
-       discard_lazy_cpu_state();
-
        /*
         * Force reload of FP/VEC.
         * This has to be done before copying stuff into current->thread.fpr/vr
index ea43a34..4f24606 100644 (file)
@@ -61,3 +61,10 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
        save_context_stack(trace, tsk->thread.ksp, tsk, 0);
 }
 EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+
+void
+save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
+{
+       save_context_stack(trace, regs->gpr[1], current, 0);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_regs);
index eae33e1..6669b17 100644 (file)
@@ -20,9 +20,7 @@ void save_processor_state(void)
         * flush out all the special registers so we don't need
         * to save them in the snapshot
         */
-       flush_fp_to_thread(current);
-       flush_altivec_to_thread(current);
-       flush_spe_to_thread(current);
+       flush_all_to_thread(current);
 
 #ifdef CONFIG_PPC64
        hard_irq_disable();
index 2384129..55323a6 100644 (file)
@@ -57,4 +57,4 @@
 
 START_TABLE
 #include <asm/systbl.h>
-END_TABLE __NR_syscalls
+END_TABLE NR_syscalls
index 19415e7..31b6e7c 100644 (file)
@@ -16,7 +16,7 @@ awk   'BEGIN { num = -1; }    # Ignore the beginning of the file
        /^START_TABLE/ { num = 0; next; }
        /^END_TABLE/ {
                if (num != $2) {
-                       printf "__NR_syscalls (%s) is not one more than the last syscall (%s)\n",
+                       printf "NR_syscalls (%s) is not one more than the last syscall (%s)\n",
                                $2, num - 1;
                        exit(1);
                }
index 1be1092..81b0900 100644 (file)
@@ -1002,38 +1002,6 @@ static int month_days[12] = {
        31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
 };
 
-/*
- * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
- */
-void GregorianDay(struct rtc_time * tm)
-{
-       int leapsToDate;
-       int lastYear;
-       int day;
-       int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
-
-       lastYear = tm->tm_year - 1;
-
-       /*
-        * Number of leap corrections to apply up to end of last year
-        */
-       leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
-
-       /*
-        * This year is a leap year if it is divisible by 4 except when it is
-        * divisible by 100 unless it is divisible by 400
-        *
-        * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
-        */
-       day = tm->tm_mon > 2 && leapyear(tm->tm_year);
-
-       day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
-                  tm->tm_mday;
-
-       tm->tm_wday = day % 7;
-}
-EXPORT_SYMBOL_GPL(GregorianDay);
-
 void to_tm(int tim, struct rtc_time * tm)
 {
        register int    i;
@@ -1064,9 +1032,9 @@ void to_tm(int tim, struct rtc_time * tm)
        tm->tm_mday = day + 1;
 
        /*
-        * Determine the day of week
+        * No-one uses the day of the week.
         */
-       GregorianDay(tm);
+       tm->tm_wday = -1;
 }
 EXPORT_SYMBOL(to_tm);
 
index 37de90f..b6becc7 100644 (file)
@@ -1313,13 +1313,6 @@ void nonrecoverable_exception(struct pt_regs *regs)
        die("nonrecoverable exception", regs, SIGKILL);
 }
 
-void trace_syscall(struct pt_regs *regs)
-{
-       printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld    %s\n",
-              current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0],
-              regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
-}
-
 void kernel_fp_unavailable_exception(struct pt_regs *regs)
 {
        enum ctx_state prev_state = exception_enter();
index b457bfa..def1b8b 100644 (file)
@@ -671,7 +671,7 @@ static void __init vdso_setup_syscall_map(void)
        extern unsigned long sys_ni_syscall;
 
 
-       for (i = 0; i < __NR_syscalls; i++) {
+       for (i = 0; i < NR_syscalls; i++) {
 #ifdef CONFIG_PPC64
                if (sys_call_table[i*2] != sys_ni_syscall)
                        vdso_data->syscall_map_64[i >> 5] |=
index 59cf5f4..3745113 100644 (file)
@@ -61,7 +61,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map)
        addi    r3,r3,CFG_SYSCALL_MAP32
        cmpli   cr0,r4,0
        beqlr
-       li      r0,__NR_syscalls
+       li      r0,NR_syscalls
        stw     r0,0(r4)
        crclr   cr0*4+so
        blr
index 2f01c4a..184a6ba 100644 (file)
@@ -62,7 +62,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map)
        cmpli   cr0,r4,0
        crclr   cr0*4+so
        beqlr
-       li      r0,__NR_syscalls
+       li      r0,NR_syscalls
        stw     r0,0(r4)
        blr
   .cfi_endproc
index f5c80d5..162d0f7 100644 (file)
@@ -29,23 +29,9 @@ _GLOBAL(do_load_up_transact_altivec)
        addi    r10,r3,THREAD_TRANSACT_VRSTATE
        REST_32VRS(0,r4,r10)
 
-       /* Disable VEC again. */
-       MTMSRD(r6)
-       isync
-
        blr
 #endif
 
-/*
- * Enable use of VMX/Altivec for the caller.
- */
-_GLOBAL(vec_enable)
-       mfmsr   r3
-       oris    r3,r3,MSR_VEC@h
-       MTMSRD(r3)
-       isync
-       blr
-
 /*
  * Load state from memory into VMX registers including VSCR.
  * Assumes the caller has enabled VMX in the MSR.
@@ -84,39 +70,6 @@ _GLOBAL(load_up_altivec)
        MTMSRD(r5)                      /* enable use of AltiVec now */
        isync
 
-/*
- * For SMP, we don't do lazy VMX switching because it just gets too
- * horrendously complex, especially when a task switches from one CPU
- * to another.  Instead we call giveup_altvec in switch_to.
- * VRSAVE isn't dealt with here, that is done in the normal context
- * switch code. Note that we could rely on vrsave value to eventually
- * avoid saving all of the VREGs here...
- */
-#ifndef CONFIG_SMP
-       LOAD_REG_ADDRBASE(r3, last_task_used_altivec)
-       toreal(r3)
-       PPC_LL  r4,ADDROFF(last_task_used_altivec)(r3)
-       PPC_LCMPI       0,r4,0
-       beq     1f
-
-       /* Save VMX state to last_task_used_altivec's THREAD struct */
-       toreal(r4)
-       addi    r4,r4,THREAD
-       addi    r6,r4,THREAD_VRSTATE
-       SAVE_32VRS(0,r5,r6)
-       mfvscr  v0
-       li      r10,VRSTATE_VSCR
-       stvx    v0,r10,r6
-       /* Disable VMX for last_task_used_altivec */
-       PPC_LL  r5,PT_REGS(r4)
-       toreal(r5)
-       PPC_LL  r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-       lis     r10,MSR_VEC@h
-       andc    r4,r4,r10
-       PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* CONFIG_SMP */
-
        /* Hack: if we get an altivec unavailable trap with VRSAVE
         * set to all zeros, we assume this is a broken application
         * that fails to set it properly, and thus we switch it to
@@ -145,39 +98,15 @@ _GLOBAL(load_up_altivec)
        lvx     v0,r10,r6
        mtvscr  v0
        REST_32VRS(0,r4,r6)
-#ifndef CONFIG_SMP
-       /* Update last_task_used_altivec to 'current' */
-       subi    r4,r5,THREAD            /* Back to 'current' */
-       fromreal(r4)
-       PPC_STL r4,ADDROFF(last_task_used_altivec)(r3)
-#endif /* CONFIG_SMP */
        /* restore registers and return */
        blr
 
-_GLOBAL(giveup_altivec_notask)
-       mfmsr   r3
-       andis.  r4,r3,MSR_VEC@h
-       bnelr                           /* Already enabled? */
-       oris    r3,r3,MSR_VEC@h
-       SYNC
-       MTMSRD(r3)                      /* enable use of VMX now */
-       isync
-       blr
-
 /*
- * giveup_altivec(tsk)
+ * __giveup_altivec(tsk)
  * Disable VMX for the task given as the argument,
  * and save the vector registers in its thread_struct.
- * Enables the VMX for use in the kernel on return.
  */
-_GLOBAL(giveup_altivec)
-       mfmsr   r5
-       oris    r5,r5,MSR_VEC@h
-       SYNC
-       MTMSRD(r5)                      /* enable use of VMX now */
-       isync
-       PPC_LCMPI       0,r3,0
-       beqlr                           /* if no previous owner, done */
+_GLOBAL(__giveup_altivec)
        addi    r3,r3,THREAD            /* want THREAD of task */
        PPC_LL  r7,THREAD_VRSAVEAREA(r3)
        PPC_LL  r5,PT_REGS(r3)
@@ -203,11 +132,6 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
        andc    r4,r4,r3                /* disable FP for previous task */
        PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 1:
-#ifndef CONFIG_SMP
-       li      r5,0
-       LOAD_REG_ADDRBASE(r4,last_task_used_altivec)
-       PPC_STL r5,ADDROFF(last_task_used_altivec)(r4)
-#endif /* CONFIG_SMP */
        blr
 
 #ifdef CONFIG_VSX
@@ -230,20 +154,6 @@ _GLOBAL(load_up_vsx)
        andis.  r5,r12,MSR_VEC@h
        beql+   load_up_altivec         /* skip if already loaded */
 
-#ifndef CONFIG_SMP
-       ld      r3,last_task_used_vsx@got(r2)
-       ld      r4,0(r3)
-       cmpdi   0,r4,0
-       beq     1f
-       /* Disable VSX for last_task_used_vsx */
-       addi    r4,r4,THREAD
-       ld      r5,PT_REGS(r4)
-       ld      r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-       lis     r6,MSR_VSX@h
-       andc    r6,r4,r6
-       std     r6,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* CONFIG_SMP */
        ld      r4,PACACURRENT(r13)
        addi    r4,r4,THREAD            /* Get THREAD */
        li      r6,1
@@ -251,27 +161,14 @@ _GLOBAL(load_up_vsx)
        /* enable use of VSX after return */
        oris    r12,r12,MSR_VSX@h
        std     r12,_MSR(r1)
-#ifndef CONFIG_SMP
-       /* Update last_task_used_vsx to 'current' */
-       ld      r4,PACACURRENT(r13)
-       std     r4,0(r3)
-#endif /* CONFIG_SMP */
        b       fast_exception_return
 
 /*
  * __giveup_vsx(tsk)
  * Disable VSX for the task given as the argument.
  * Does NOT save vsx registers.
- * Enables the VSX for use in the kernel on return.
  */
 _GLOBAL(__giveup_vsx)
-       mfmsr   r5
-       oris    r5,r5,MSR_VSX@h
-       mtmsrd  r5                      /* enable use of VSX now */
-       isync
-
-       cmpdi   0,r3,0
-       beqlr-                          /* if no previous owner, done */
        addi    r3,r3,THREAD            /* want THREAD of task */
        ld      r5,PT_REGS(r3)
        cmpdi   0,r5,0
@@ -281,11 +178,6 @@ _GLOBAL(__giveup_vsx)
        andc    r4,r4,r3                /* disable VSX for previous task */
        std     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 1:
-#ifndef CONFIG_SMP
-       li      r5,0
-       ld      r4,last_task_used_vsx@got(r2)
-       std     r5,0(r4)
-#endif /* CONFIG_SMP */
        blr
 
 #endif /* CONFIG_VSX */
index 6b35269..cff207b 100644 (file)
@@ -2700,9 +2700,8 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
                        goto out;
        }
 
-       flush_fp_to_thread(current);
-       flush_altivec_to_thread(current);
-       flush_vsx_to_thread(current);
+       flush_all_to_thread(current);
+
        vcpu->arch.wqp = &vcpu->arch.vcore->wq;
        vcpu->arch.pgdir = current->mm->pgd;
        vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
index a759d9a..eab96cf 100644 (file)
@@ -1265,6 +1265,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
        if (rcomp)
                kvmppc_set_cr(vcpu, cr);
 
+       disable_kernel_fp();
        preempt_enable();
 
        return emulated;
index 70fb08d..95bceca 100644 (file)
@@ -751,6 +751,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
                preempt_disable();
                enable_kernel_fp();
                load_fp_state(&vcpu->arch.fp);
+               disable_kernel_fp();
                t->fp_save_area = &vcpu->arch.fp;
                preempt_enable();
        }
@@ -760,6 +761,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
                preempt_disable();
                enable_kernel_altivec();
                load_vr_state(&vcpu->arch.vr);
+               disable_kernel_altivec();
                t->vr_save_area = &vcpu->arch.vr;
                preempt_enable();
 #endif
@@ -788,6 +790,7 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
                preempt_disable();
                enable_kernel_fp();
                load_fp_state(&vcpu->arch.fp);
+               disable_kernel_fp();
                preempt_enable();
        }
 #ifdef CONFIG_ALTIVEC
@@ -795,6 +798,7 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
                preempt_disable();
                enable_kernel_altivec();
                load_vr_state(&vcpu->arch.vr);
+               disable_kernel_altivec();
                preempt_enable();
        }
 #endif
@@ -1486,21 +1490,8 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
                goto out;
        /* interrupts now hard-disabled */
 
-       /* Save FPU state in thread_struct */
-       if (current->thread.regs->msr & MSR_FP)
-               giveup_fpu(current);
-
-#ifdef CONFIG_ALTIVEC
-       /* Save Altivec state in thread_struct */
-       if (current->thread.regs->msr & MSR_VEC)
-               giveup_altivec(current);
-#endif
-
-#ifdef CONFIG_VSX
-       /* Save VSX state in thread_struct */
-       if (current->thread.regs->msr & MSR_VSX)
-               __giveup_vsx(current);
-#endif
+       /* Save FPU, Altivec and VSX state */
+       giveup_all(current);
 
        /* Preload FPU if it's enabled */
        if (kvmppc_get_msr(vcpu) & MSR_FP)
index fd58751..778ef86 100644 (file)
@@ -98,6 +98,7 @@ void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
        preempt_disable();
        enable_kernel_spe();
        kvmppc_save_guest_spe(vcpu);
+       disable_kernel_spe();
        vcpu->arch.shadow_msr &= ~MSR_SPE;
        preempt_enable();
 }
@@ -107,6 +108,7 @@ static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
        preempt_disable();
        enable_kernel_spe();
        kvmppc_load_guest_spe(vcpu);
+       disable_kernel_spe();
        vcpu->arch.shadow_msr |= MSR_SPE;
        preempt_enable();
 }
@@ -141,6 +143,7 @@ static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
        if (!(current->thread.regs->msr & MSR_FP)) {
                enable_kernel_fp();
                load_fp_state(&vcpu->arch.fp);
+               disable_kernel_fp();
                current->thread.fp_save_area = &vcpu->arch.fp;
                current->thread.regs->msr |= MSR_FP;
        }
@@ -182,6 +185,7 @@ static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
                if (!(current->thread.regs->msr & MSR_VEC)) {
                        enable_kernel_altivec();
                        load_vr_state(&vcpu->arch.vr);
+                       disable_kernel_altivec();
                        current->thread.vr_save_area = &vcpu->arch.vr;
                        current->thread.regs->msr |= MSR_VEC;
                }
index ac93a3b..b27e030 100644 (file)
@@ -46,6 +46,7 @@ int enter_vmx_usercopy(void)
  */
 int exit_vmx_usercopy(void)
 {
+       disable_kernel_altivec();
        pagefault_enable();
        preempt_enable();
        return 0;
@@ -70,6 +71,7 @@ int enter_vmx_copy(void)
  */
 void *exit_vmx_copy(void *dest)
 {
+       disable_kernel_altivec();
        preempt_enable();
        return dest;
 }
index e905f7c..07f49f1 100644 (file)
@@ -74,6 +74,7 @@ void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
                v2 += 4;
        } while (--lines > 0);
 
+       disable_kernel_altivec();
        preempt_enable();
 }
 EXPORT_SYMBOL(xor_altivec_2);
@@ -102,6 +103,7 @@ void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
                v3 += 4;
        } while (--lines > 0);
 
+       disable_kernel_altivec();
        preempt_enable();
 }
 EXPORT_SYMBOL(xor_altivec_3);
@@ -135,6 +137,7 @@ void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
                v4 += 4;
        } while (--lines > 0);
 
+       disable_kernel_altivec();
        preempt_enable();
 }
 EXPORT_SYMBOL(xor_altivec_4);
@@ -172,6 +175,7 @@ void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
                v5 += 4;
        } while (--lines > 0);
 
+       disable_kernel_altivec();
        preempt_enable();
 }
 EXPORT_SYMBOL(xor_altivec_5);
index 5810967..31a5d42 100644 (file)
@@ -110,10 +110,10 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
                unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_HWWRITE;
 
                pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v);
-               pmd_val(*pmdp++) = val;
-               pmd_val(*pmdp++) = val;
-               pmd_val(*pmdp++) = val;
-               pmd_val(*pmdp++) = val;
+               *pmdp++ = __pmd(val);
+               *pmdp++ = __pmd(val);
+               *pmdp++ = __pmd(val);
+               *pmdp++ = __pmd(val);
 
                v += LARGE_PAGE_SIZE_16M;
                p += LARGE_PAGE_SIZE_16M;
@@ -125,7 +125,7 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
                unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_HWWRITE;
 
                pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v);
-               pmd_val(*pmdp) = val;
+               *pmdp = __pmd(val);
 
                v += LARGE_PAGE_SIZE_4M;
                p += LARGE_PAGE_SIZE_4M;
index 3eb73a3..1ffeda8 100644 (file)
@@ -14,10 +14,13 @@ obj-$(CONFIG_PPC_MMU_NOHASH)        += mmu_context_nohash.o tlb_nohash.o \
 obj-$(CONFIG_PPC_BOOK3E)       += tlb_low_$(CONFIG_WORD_SIZE)e.o
 hash64-$(CONFIG_PPC_NATIVE)    := hash_native_64.o
 obj-$(CONFIG_PPC_STD_MMU_64)   += hash_utils_64.o slb_low.o slb.o $(hash64-y)
-obj-$(CONFIG_PPC_STD_MMU_32)   += ppc_mmu_32.o
-obj-$(CONFIG_PPC_STD_MMU)      += hash_low_$(CONFIG_WORD_SIZE).o \
-                                  tlb_hash$(CONFIG_WORD_SIZE).o \
+obj-$(CONFIG_PPC_STD_MMU_32)   += ppc_mmu_32.o hash_low_32.o
+obj-$(CONFIG_PPC_STD_MMU)      += tlb_hash$(CONFIG_WORD_SIZE).o \
                                   mmu_context_hash$(CONFIG_WORD_SIZE).o
+ifeq ($(CONFIG_PPC_STD_MMU_64),y)
+obj-$(CONFIG_PPC_4K_PAGES)     += hash64_4k.o
+obj-$(CONFIG_PPC_64K_PAGES)    += hash64_64k.o
+endif
 obj-$(CONFIG_PPC_ICSWX)                += icswx.o
 obj-$(CONFIG_PPC_ICSWX_PID)    += icswx_pid.o
 obj-$(CONFIG_40x)              += 40x_mmu.o
diff --git a/arch/powerpc/mm/hash64_4k.c b/arch/powerpc/mm/hash64_4k.c
new file mode 100644 (file)
index 0000000..e7c0454
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ * Copyright IBM Corporation, 2015
+ * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+
+#include <linux/mm.h>
+#include <asm/machdep.h>
+#include <asm/mmu.h>
+
+int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
+                  pte_t *ptep, unsigned long trap, unsigned long flags,
+                  int ssize, int subpg_prot)
+{
+       unsigned long hpte_group;
+       unsigned long rflags, pa;
+       unsigned long old_pte, new_pte;
+       unsigned long vpn, hash, slot;
+       unsigned long shift = mmu_psize_defs[MMU_PAGE_4K].shift;
+
+       /*
+        * atomically mark the linux large page PTE busy and dirty
+        */
+       do {
+               pte_t pte = READ_ONCE(*ptep);
+
+               old_pte = pte_val(pte);
+               /* If PTE busy, retry the access */
+               if (unlikely(old_pte & _PAGE_BUSY))
+                       return 0;
+               /* If PTE permissions don't match, take page fault */
+               if (unlikely(access & ~old_pte))
+                       return 1;
+               /*
+                * Try to lock the PTE, add ACCESSED and DIRTY if it was
+                * a write access. Since this is 4K insert of 64K page size
+                * also add _PAGE_COMBO
+                */
+               new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE;
+               if (access & _PAGE_RW)
+                       new_pte |= _PAGE_DIRTY;
+       } while (old_pte != __cmpxchg_u64((unsigned long *)ptep,
+                                         old_pte, new_pte));
+       /*
+        * PP bits. _PAGE_USER is already PP bit 0x2, so we only
+        * need to add in 0x1 if it's a read-only user page
+        */
+       rflags = htab_convert_pte_flags(new_pte);
+
+       if (!cpu_has_feature(CPU_FTR_NOEXECUTE) &&
+           !cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
+               rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
+
+       vpn  = hpt_vpn(ea, vsid, ssize);
+       if (unlikely(old_pte & _PAGE_HASHPTE)) {
+               /*
+                * There MIGHT be an HPTE for this pte
+                */
+               hash = hpt_hash(vpn, shift, ssize);
+               if (old_pte & _PAGE_F_SECOND)
+                       hash = ~hash;
+               slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
+               slot += (old_pte & _PAGE_F_GIX) >> _PAGE_F_GIX_SHIFT;
+
+               if (ppc_md.hpte_updatepp(slot, rflags, vpn, MMU_PAGE_4K,
+                                        MMU_PAGE_4K, ssize, flags) == -1)
+                       old_pte &= ~_PAGE_HPTEFLAGS;
+       }
+
+       if (likely(!(old_pte & _PAGE_HASHPTE))) {
+
+               pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
+               hash = hpt_hash(vpn, shift, ssize);
+
+repeat:
+               hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+
+               /* Insert into the hash table, primary slot */
+               slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
+                                 MMU_PAGE_4K, MMU_PAGE_4K, ssize);
+               /*
+                * Primary is full, try the secondary
+                */
+               if (unlikely(slot == -1)) {
+                       hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+                       slot = ppc_md.hpte_insert(hpte_group, vpn, pa,
+                                                 rflags, HPTE_V_SECONDARY,
+                                                 MMU_PAGE_4K, MMU_PAGE_4K, ssize);
+                       if (slot == -1) {
+                               if (mftb() & 0x1)
+                                       hpte_group = ((hash & htab_hash_mask) *
+                                                     HPTES_PER_GROUP) & ~0x7UL;
+                               ppc_md.hpte_remove(hpte_group);
+                               /*
+                                * FIXME!! Should be try the group from which we removed ?
+                                */
+                               goto repeat;
+                       }
+               }
+               /*
+                * Hypervisor failure. Restore old pmd and return -1
+                * similar to __hash_page_*
+                */
+               if (unlikely(slot == -2)) {
+                       *ptep = __pte(old_pte);
+                       hash_failure_debug(ea, access, vsid, trap, ssize,
+                                          MMU_PAGE_4K, MMU_PAGE_4K, old_pte);
+                       return -1;
+               }
+               new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
+               new_pte |= (slot << _PAGE_F_GIX_SHIFT) & (_PAGE_F_SECOND | _PAGE_F_GIX);
+       }
+       *ptep = __pte(new_pte & ~_PAGE_BUSY);
+       return 0;
+}
diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c
new file mode 100644 (file)
index 0000000..0762c1e
--- /dev/null
@@ -0,0 +1,322 @@
+/*
+ * Copyright IBM Corporation, 2015
+ * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+
+#include <linux/mm.h>
+#include <asm/machdep.h>
+#include <asm/mmu.h>
+/*
+ * index from 0 - 15
+ */
+bool __rpte_sub_valid(real_pte_t rpte, unsigned long index)
+{
+       unsigned long g_idx;
+       unsigned long ptev = pte_val(rpte.pte);
+
+       g_idx = (ptev & _PAGE_COMBO_VALID) >> _PAGE_F_GIX_SHIFT;
+       index = index >> 2;
+       if (g_idx & (0x1 << index))
+               return true;
+       else
+               return false;
+}
+/*
+ * index from 0 - 15
+ */
+static unsigned long mark_subptegroup_valid(unsigned long ptev, unsigned long index)
+{
+       unsigned long g_idx;
+
+       if (!(ptev & _PAGE_COMBO))
+               return ptev;
+       index = index >> 2;
+       g_idx = 0x1 << index;
+
+       return ptev | (g_idx << _PAGE_F_GIX_SHIFT);
+}
+
+int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
+                  pte_t *ptep, unsigned long trap, unsigned long flags,
+                  int ssize, int subpg_prot)
+{
+       real_pte_t rpte;
+       unsigned long *hidxp;
+       unsigned long hpte_group;
+       unsigned int subpg_index;
+       unsigned long rflags, pa, hidx;
+       unsigned long old_pte, new_pte, subpg_pte;
+       unsigned long vpn, hash, slot;
+       unsigned long shift = mmu_psize_defs[MMU_PAGE_4K].shift;
+
+       /*
+        * atomically mark the linux large page PTE busy and dirty
+        */
+       do {
+               pte_t pte = READ_ONCE(*ptep);
+
+               old_pte = pte_val(pte);
+               /* If PTE busy, retry the access */
+               if (unlikely(old_pte & _PAGE_BUSY))
+                       return 0;
+               /* If PTE permissions don't match, take page fault */
+               if (unlikely(access & ~old_pte))
+                       return 1;
+               /*
+                * Try to lock the PTE, add ACCESSED and DIRTY if it was
+                * a write access. Since this is 4K insert of 64K page size
+                * also add _PAGE_COMBO
+                */
+               new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED | _PAGE_COMBO;
+               if (access & _PAGE_RW)
+                       new_pte |= _PAGE_DIRTY;
+       } while (old_pte != __cmpxchg_u64((unsigned long *)ptep,
+                                         old_pte, new_pte));
+       /*
+        * Handle the subpage protection bits
+        */
+       subpg_pte = new_pte & ~subpg_prot;
+       rflags = htab_convert_pte_flags(subpg_pte);
+
+       if (!cpu_has_feature(CPU_FTR_NOEXECUTE) &&
+           !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
+
+               /*
+                * No CPU has hugepages but lacks no execute, so we
+                * don't need to worry about that case
+                */
+               rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
+       }
+
+       subpg_index = (ea & (PAGE_SIZE - 1)) >> shift;
+       vpn  = hpt_vpn(ea, vsid, ssize);
+       rpte = __real_pte(__pte(old_pte), ptep);
+       /*
+        *None of the sub 4k page is hashed
+        */
+       if (!(old_pte & _PAGE_HASHPTE))
+               goto htab_insert_hpte;
+       /*
+        * Check if the pte was already inserted into the hash table
+        * as a 64k HW page, and invalidate the 64k HPTE if so.
+        */
+       if (!(old_pte & _PAGE_COMBO)) {
+               flush_hash_page(vpn, rpte, MMU_PAGE_64K, ssize, flags);
+               old_pte &= ~_PAGE_HASHPTE | _PAGE_F_GIX | _PAGE_F_SECOND;
+               goto htab_insert_hpte;
+       }
+       /*
+        * Check for sub page valid and update
+        */
+       if (__rpte_sub_valid(rpte, subpg_index)) {
+               int ret;
+
+               hash = hpt_hash(vpn, shift, ssize);
+               hidx = __rpte_to_hidx(rpte, subpg_index);
+               if (hidx & _PTEIDX_SECONDARY)
+                       hash = ~hash;
+               slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
+               slot += hidx & _PTEIDX_GROUP_IX;
+
+               ret = ppc_md.hpte_updatepp(slot, rflags, vpn,
+                                          MMU_PAGE_4K, MMU_PAGE_4K,
+                                          ssize, flags);
+               /*
+                *if we failed because typically the HPTE wasn't really here
+                * we try an insertion.
+                */
+               if (ret == -1)
+                       goto htab_insert_hpte;
+
+               *ptep = __pte(new_pte & ~_PAGE_BUSY);
+               return 0;
+       }
+
+htab_insert_hpte:
+       /*
+        * handle _PAGE_4K_PFN case
+        */
+       if (old_pte & _PAGE_4K_PFN) {
+               /*
+                * All the sub 4k page have the same
+                * physical address.
+                */
+               pa = pte_pfn(__pte(old_pte)) << HW_PAGE_SHIFT;
+       } else {
+               pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
+               pa += (subpg_index << shift);
+       }
+       hash = hpt_hash(vpn, shift, ssize);
+repeat:
+       hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+
+       /* Insert into the hash table, primary slot */
+       slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
+                                 MMU_PAGE_4K, MMU_PAGE_4K, ssize);
+       /*
+        * Primary is full, try the secondary
+        */
+       if (unlikely(slot == -1)) {
+               hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+               slot = ppc_md.hpte_insert(hpte_group, vpn, pa,
+                                         rflags, HPTE_V_SECONDARY,
+                                         MMU_PAGE_4K, MMU_PAGE_4K, ssize);
+               if (slot == -1) {
+                       if (mftb() & 0x1)
+                               hpte_group = ((hash & htab_hash_mask) *
+                                             HPTES_PER_GROUP) & ~0x7UL;
+                       ppc_md.hpte_remove(hpte_group);
+                       /*
+                        * FIXME!! Should be try the group from which we removed ?
+                        */
+                       goto repeat;
+               }
+       }
+       /*
+        * Hypervisor failure. Restore old pmd and return -1
+        * similar to __hash_page_*
+        */
+       if (unlikely(slot == -2)) {
+               *ptep = __pte(old_pte);
+               hash_failure_debug(ea, access, vsid, trap, ssize,
+                                  MMU_PAGE_4K, MMU_PAGE_4K, old_pte);
+               return -1;
+       }
+       /*
+        * Insert slot number & secondary bit in PTE second half,
+        * clear _PAGE_BUSY and set appropriate HPTE slot bit
+        * Since we have _PAGE_BUSY set on ptep, we can be sure
+        * nobody is undating hidx.
+        */
+       hidxp = (unsigned long *)(ptep + PTRS_PER_PTE);
+       rpte.hidx &= ~(0xfUL << (subpg_index << 2));
+       *hidxp = rpte.hidx  | (slot << (subpg_index << 2));
+       new_pte = mark_subptegroup_valid(new_pte, subpg_index);
+       new_pte |=  _PAGE_HASHPTE;
+       /*
+        * check __real_pte for details on matching smp_rmb()
+        */
+       smp_wmb();
+       *ptep = __pte(new_pte & ~_PAGE_BUSY);
+       return 0;
+}
+
+int __hash_page_64K(unsigned long ea, unsigned long access,
+                   unsigned long vsid, pte_t *ptep, unsigned long trap,
+                   unsigned long flags, int ssize)
+{
+
+       unsigned long hpte_group;
+       unsigned long rflags, pa;
+       unsigned long old_pte, new_pte;
+       unsigned long vpn, hash, slot;
+       unsigned long shift = mmu_psize_defs[MMU_PAGE_64K].shift;
+
+       /*
+        * atomically mark the linux large page PTE busy and dirty
+        */
+       do {
+               pte_t pte = READ_ONCE(*ptep);
+
+               old_pte = pte_val(pte);
+               /* If PTE busy, retry the access */
+               if (unlikely(old_pte & _PAGE_BUSY))
+                       return 0;
+               /* If PTE permissions don't match, take page fault */
+               if (unlikely(access & ~old_pte))
+                       return 1;
+               /*
+                * Check if PTE has the cache-inhibit bit set
+                * If so, bail out and refault as a 4k page
+                */
+               if (!mmu_has_feature(MMU_FTR_CI_LARGE_PAGE) &&
+                   unlikely(old_pte & _PAGE_NO_CACHE))
+                       return 0;
+               /*
+                * Try to lock the PTE, add ACCESSED and DIRTY if it was
+                * a write access. Since this is 4K insert of 64K page size
+                * also add _PAGE_COMBO
+                */
+               new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
+               if (access & _PAGE_RW)
+                       new_pte |= _PAGE_DIRTY;
+       } while (old_pte != __cmpxchg_u64((unsigned long *)ptep,
+                                         old_pte, new_pte));
+
+       rflags = htab_convert_pte_flags(new_pte);
+
+       if (!cpu_has_feature(CPU_FTR_NOEXECUTE) &&
+           !cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
+               rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
+
+       vpn  = hpt_vpn(ea, vsid, ssize);
+       if (unlikely(old_pte & _PAGE_HASHPTE)) {
+               /*
+                * There MIGHT be an HPTE for this pte
+                */
+               hash = hpt_hash(vpn, shift, ssize);
+               if (old_pte & _PAGE_F_SECOND)
+                       hash = ~hash;
+               slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
+               slot += (old_pte & _PAGE_F_GIX) >> _PAGE_F_GIX_SHIFT;
+
+               if (ppc_md.hpte_updatepp(slot, rflags, vpn, MMU_PAGE_64K,
+                                        MMU_PAGE_64K, ssize, flags) == -1)
+                       old_pte &= ~_PAGE_HPTEFLAGS;
+       }
+
+       if (likely(!(old_pte & _PAGE_HASHPTE))) {
+
+               pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
+               hash = hpt_hash(vpn, shift, ssize);
+
+repeat:
+               hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+
+               /* Insert into the hash table, primary slot */
+               slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
+                                 MMU_PAGE_64K, MMU_PAGE_64K, ssize);
+               /*
+                * Primary is full, try the secondary
+                */
+               if (unlikely(slot == -1)) {
+                       hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+                       slot = ppc_md.hpte_insert(hpte_group, vpn, pa,
+                                                 rflags, HPTE_V_SECONDARY,
+                                                 MMU_PAGE_64K, MMU_PAGE_64K, ssize);
+                       if (slot == -1) {
+                               if (mftb() & 0x1)
+                                       hpte_group = ((hash & htab_hash_mask) *
+                                                     HPTES_PER_GROUP) & ~0x7UL;
+                               ppc_md.hpte_remove(hpte_group);
+                               /*
+                                * FIXME!! Should be try the group from which we removed ?
+                                */
+                               goto repeat;
+                       }
+               }
+               /*
+                * Hypervisor failure. Restore old pmd and return -1
+                * similar to __hash_page_*
+                */
+               if (unlikely(slot == -2)) {
+                       *ptep = __pte(old_pte);
+                       hash_failure_debug(ea, access, vsid, trap, ssize,
+                                          MMU_PAGE_64K, MMU_PAGE_64K, old_pte);
+                       return -1;
+               }
+               new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
+               new_pte |= (slot << _PAGE_F_GIX_SHIFT) & (_PAGE_F_SECOND | _PAGE_F_GIX);
+       }
+       *ptep = __pte(new_pte & ~_PAGE_BUSY);
+       return 0;
+}
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
deleted file mode 100644 (file)
index 3b49e32..0000000
+++ /dev/null
@@ -1,1003 +0,0 @@
-/*
- * ppc64 MMU hashtable management routines
- *
- * (c) Copyright IBM Corp. 2003, 2005
- *
- * Maintained by: Benjamin Herrenschmidt
- *                <benh@kernel.crashing.org>
- *
- * This file is covered by the GNU Public Licence v2 as
- * described in the kernel's COPYING file.
- */
-
-#include <asm/reg.h>
-#include <asm/pgtable.h>
-#include <asm/mmu.h>
-#include <asm/page.h>
-#include <asm/types.h>
-#include <asm/ppc_asm.h>
-#include <asm/asm-offsets.h>
-#include <asm/cputable.h>
-
-       .text
-
-/*
- * Stackframe:
- *             
- *         +-> Back chain                      (SP + 256)
- *         |   General register save area      (SP + 112)
- *         |   Parameter save area             (SP + 48)
- *         |   TOC save area                   (SP + 40)
- *         |   link editor doubleword          (SP + 32)
- *         |   compiler doubleword             (SP + 24)
- *         |   LR save area                    (SP + 16)
- *         |   CR save area                    (SP + 8)
- * SP ---> +-- Back chain                      (SP + 0)
- */
-
-#ifndef CONFIG_PPC_64K_PAGES
-
-/*****************************************************************************
- *                                                                           *
- *           4K SW & 4K HW pages implementation                              *
- *                                                                           *
- *****************************************************************************/
-
-
-/*
- * _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
- *              pte_t *ptep, unsigned long trap, unsigned long flags,
- *              int ssize)
- *
- * Adds a 4K page to the hash table in a segment of 4K pages only
- */
-
-_GLOBAL(__hash_page_4K)
-       mflr    r0
-       std     r0,16(r1)
-       stdu    r1,-STACKFRAMESIZE(r1)
-       /* Save all params that we need after a function call */
-       std     r6,STK_PARAM(R6)(r1)
-       std     r8,STK_PARAM(R8)(r1)
-       std     r9,STK_PARAM(R9)(r1)
-       
-       /* Save non-volatile registers.
-        * r31 will hold "old PTE"
-        * r30 is "new PTE"
-        * r29 is vpn
-        * r28 is a hash value
-        * r27 is hashtab mask (maybe dynamic patched instead ?)
-        */
-       std     r27,STK_REG(R27)(r1)
-       std     r28,STK_REG(R28)(r1)
-       std     r29,STK_REG(R29)(r1)
-       std     r30,STK_REG(R30)(r1)
-       std     r31,STK_REG(R31)(r1)
-       
-       /* Step 1:
-        *
-        * Check permissions, atomically mark the linux PTE busy
-        * and hashed.
-        */ 
-1:
-       ldarx   r31,0,r6
-       /* Check access rights (access & ~(pte_val(*ptep))) */
-       andc.   r0,r4,r31
-       bne-    htab_wrong_access
-       /* Check if PTE is busy */
-       andi.   r0,r31,_PAGE_BUSY
-       /* If so, just bail out and refault if needed. Someone else
-        * is changing this PTE anyway and might hash it.
-        */
-       bne-    htab_bail_ok
-
-       /* Prepare new PTE value (turn access RW into DIRTY, then
-        * add BUSY,HASHPTE and ACCESSED)
-        */
-       rlwinm  r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
-       or      r30,r30,r31
-       ori     r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE
-       /* Write the linux PTE atomically (setting busy) */
-       stdcx.  r30,0,r6
-       bne-    1b
-       isync
-
-       /* Step 2:
-        *
-        * Insert/Update the HPTE in the hash table. At this point,
-        * r4 (access) is re-useable, we use it for the new HPTE flags
-        */
-
-BEGIN_FTR_SECTION
-       cmpdi   r9,0                    /* check segment size */
-       bne     3f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
-       /* Calc vpn and put it in r29 */
-       sldi    r29,r5,SID_SHIFT - VPN_SHIFT
-       rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
-       or      r29,r28,r29
-       /*
-        * Calculate hash value for primary slot and store it in r28
-        * r3 = va, r5 = vsid
-        * r0 = (va >> 12) & ((1ul << (28 - 12)) -1)
-        */
-       rldicl  r0,r3,64-12,48
-       xor     r28,r5,r0               /* hash */
-       b       4f
-
-3:     /* Calc vpn and put it in r29 */
-       sldi    r29,r5,SID_SHIFT_1T - VPN_SHIFT
-       rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
-       or      r29,r28,r29
-
-       /*
-        * calculate hash value for primary slot and
-        * store it in r28 for 1T segment
-        * r3 = va, r5 = vsid
-        */
-       sldi    r28,r5,25               /* vsid << 25 */
-       /* r0 =  (va >> 12) & ((1ul << (40 - 12)) -1) */
-       rldicl  r0,r3,64-12,36
-       xor     r28,r28,r5              /* vsid ^ ( vsid << 25) */
-       xor     r28,r28,r0              /* hash */
-
-       /* Convert linux PTE bits into HW equivalents */
-4:     andi.   r3,r30,0x1fe            /* Get basic set of flags */
-       xori    r3,r3,HPTE_R_N          /* _PAGE_EXEC -> NOEXEC */
-       rlwinm  r0,r30,32-9+1,30,30     /* _PAGE_RW -> _PAGE_USER (r0) */
-       rlwinm  r4,r30,32-7+1,30,30     /* _PAGE_DIRTY -> _PAGE_USER (r4) */
-       and     r0,r0,r4                /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
-       andc    r0,r30,r0               /* r0 = pte & ~r0 */
-       rlwimi  r3,r0,32-1,31,31        /* Insert result into PP lsb */
-       /*
-        * Always add "C" bit for perf. Memory coherence is always enabled
-        */
-       ori     r3,r3,HPTE_R_C | HPTE_R_M
-
-       /* We eventually do the icache sync here (maybe inline that
-        * code rather than call a C function...) 
-        */
-BEGIN_FTR_SECTION
-       mr      r4,r30
-       mr      r5,r7
-       bl      hash_page_do_lazy_icache
-END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
-
-       /* At this point, r3 contains new PP bits, save them in
-        * place of "access" in the param area (sic)
-        */
-       std     r3,STK_PARAM(R4)(r1)
-
-       /* Get htab_hash_mask */
-       ld      r4,htab_hash_mask@got(2)
-       ld      r27,0(r4)       /* htab_hash_mask -> r27 */
-
-       /* Check if we may already be in the hashtable, in this case, we
-        * go to out-of-line code to try to modify the HPTE
-        */
-       andi.   r0,r31,_PAGE_HASHPTE
-       bne     htab_modify_pte
-
-htab_insert_pte:
-       /* Clear hpte bits in new pte (we also clear BUSY btw) and
-        * add _PAGE_HASHPTE
-        */
-       lis     r0,_PAGE_HPTEFLAGS@h
-       ori     r0,r0,_PAGE_HPTEFLAGS@l
-       andc    r30,r30,r0
-       ori     r30,r30,_PAGE_HASHPTE
-
-       /* physical address r5 */
-       rldicl  r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
-       sldi    r5,r5,PAGE_SHIFT
-
-       /* Calculate primary group hash */
-       and     r0,r28,r27
-       rldicr  r3,r0,3,63-3            /* r3 = (hash & mask) << 3 */
-
-       /* Call ppc_md.hpte_insert */
-       ld      r6,STK_PARAM(R4)(r1)    /* Retrieve new pp bits */
-       mr      r4,r29                  /* Retrieve vpn */
-       li      r7,0                    /* !bolted, !secondary */
-       li      r8,MMU_PAGE_4K          /* page size */
-       li      r9,MMU_PAGE_4K          /* actual page size */
-       ld      r10,STK_PARAM(R9)(r1)   /* segment size */
-.globl htab_call_hpte_insert1
-htab_call_hpte_insert1:
-       bl      .                       /* Patched by htab_finish_init() */
-       cmpdi   0,r3,0
-       bge     htab_pte_insert_ok      /* Insertion successful */
-       cmpdi   0,r3,-2                 /* Critical failure */
-       beq-    htab_pte_insert_failure
-
-       /* Now try secondary slot */
-       
-       /* physical address r5 */
-       rldicl  r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
-       sldi    r5,r5,PAGE_SHIFT
-
-       /* Calculate secondary group hash */
-       andc    r0,r27,r28
-       rldicr  r3,r0,3,63-3    /* r0 = (~hash & mask) << 3 */
-       
-       /* Call ppc_md.hpte_insert */
-       ld      r6,STK_PARAM(R4)(r1)    /* Retrieve new pp bits */
-       mr      r4,r29                  /* Retrieve vpn */
-       li      r7,HPTE_V_SECONDARY     /* !bolted, secondary */
-       li      r8,MMU_PAGE_4K          /* page size */
-       li      r9,MMU_PAGE_4K          /* actual page size */
-       ld      r10,STK_PARAM(R9)(r1)   /* segment size */
-.globl htab_call_hpte_insert2
-htab_call_hpte_insert2:
-       bl      .                       /* Patched by htab_finish_init() */
-       cmpdi   0,r3,0
-       bge+    htab_pte_insert_ok      /* Insertion successful */
-       cmpdi   0,r3,-2                 /* Critical failure */
-       beq-    htab_pte_insert_failure
-
-       /* Both are full, we need to evict something */
-       mftb    r0
-       /* Pick a random group based on TB */
-       andi.   r0,r0,1
-       mr      r5,r28
-       bne     2f
-       not     r5,r5
-2:     and     r0,r5,r27
-       rldicr  r3,r0,3,63-3    /* r0 = (hash & mask) << 3 */   
-       /* Call ppc_md.hpte_remove */
-.globl htab_call_hpte_remove
-htab_call_hpte_remove:
-       bl      .                       /* Patched by htab_finish_init() */
-
-       /* Try all again */
-       b       htab_insert_pte 
-
-htab_bail_ok:
-       li      r3,0
-       b       htab_bail
-
-htab_pte_insert_ok:
-       /* Insert slot number & secondary bit in PTE */
-       rldimi  r30,r3,12,63-15
-               
-       /* Write out the PTE with a normal write
-        * (maybe add eieio may be good still ?)
-        */
-htab_write_out_pte:
-       ld      r6,STK_PARAM(R6)(r1)
-       std     r30,0(r6)
-       li      r3, 0
-htab_bail:
-       ld      r27,STK_REG(R27)(r1)
-       ld      r28,STK_REG(R28)(r1)
-       ld      r29,STK_REG(R29)(r1)
-       ld      r30,STK_REG(R30)(r1)
-       ld      r31,STK_REG(R31)(r1)
-       addi    r1,r1,STACKFRAMESIZE
-       ld      r0,16(r1)
-       mtlr    r0
-       blr
-
-htab_modify_pte:
-       /* Keep PP bits in r4 and slot idx from the PTE around in r3 */
-       mr      r4,r3
-       rlwinm  r3,r31,32-12,29,31
-
-       /* Secondary group ? if yes, get a inverted hash value */
-       mr      r5,r28
-       andi.   r0,r31,_PAGE_SECONDARY
-       beq     1f
-       not     r5,r5
-1:
-       /* Calculate proper slot value for ppc_md.hpte_updatepp */
-       and     r0,r5,r27
-       rldicr  r0,r0,3,63-3    /* r0 = (hash & mask) << 3 */
-       add     r3,r0,r3        /* add slot idx */
-
-       /* Call ppc_md.hpte_updatepp */
-       mr      r5,r29                  /* vpn */
-       li      r6,MMU_PAGE_4K          /* base page size */
-       li      r7,MMU_PAGE_4K          /* actual page size */
-       ld      r8,STK_PARAM(R9)(r1)    /* segment size */
-       ld      r9,STK_PARAM(R8)(r1)    /* get "flags" param */
-.globl htab_call_hpte_updatepp
-htab_call_hpte_updatepp:
-       bl      .                       /* Patched by htab_finish_init() */
-
-       /* if we failed because typically the HPTE wasn't really here
-        * we try an insertion. 
-        */
-       cmpdi   0,r3,-1
-       beq-    htab_insert_pte
-
-       /* Clear the BUSY bit and Write out the PTE */
-       li      r0,_PAGE_BUSY
-       andc    r30,r30,r0
-       b       htab_write_out_pte
-
-htab_wrong_access:
-       /* Bail out clearing reservation */
-       stdcx.  r31,0,r6
-       li      r3,1
-       b       htab_bail
-
-htab_pte_insert_failure:
-       /* Bail out restoring old PTE */
-       ld      r6,STK_PARAM(R6)(r1)
-       std     r31,0(r6)
-       li      r3,-1
-       b       htab_bail
-
-
-#else /* CONFIG_PPC_64K_PAGES */
-
-
-/*****************************************************************************
- *                                                                           *
- *           64K SW & 4K or 64K HW in a 4K segment pages implementation      *
- *                                                                           *
- *****************************************************************************/
-
-/* _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
- *              pte_t *ptep, unsigned long trap, unsigned local flags,
- *              int ssize, int subpg_prot)
- */
-
-/*
- * For now, we do NOT implement Admixed pages
- */
-_GLOBAL(__hash_page_4K)
-       mflr    r0
-       std     r0,16(r1)
-       stdu    r1,-STACKFRAMESIZE(r1)
-       /* Save all params that we need after a function call */
-       std     r6,STK_PARAM(R6)(r1)
-       std     r8,STK_PARAM(R8)(r1)
-       std     r9,STK_PARAM(R9)(r1)
-
-       /* Save non-volatile registers.
-        * r31 will hold "old PTE"
-        * r30 is "new PTE"
-        * r29 is vpn
-        * r28 is a hash value
-        * r27 is hashtab mask (maybe dynamic patched instead ?)
-        * r26 is the hidx mask
-        * r25 is the index in combo page
-        */
-       std     r25,STK_REG(R25)(r1)
-       std     r26,STK_REG(R26)(r1)
-       std     r27,STK_REG(R27)(r1)
-       std     r28,STK_REG(R28)(r1)
-       std     r29,STK_REG(R29)(r1)
-       std     r30,STK_REG(R30)(r1)
-       std     r31,STK_REG(R31)(r1)
-
-       /* Step 1:
-        *
-        * Check permissions, atomically mark the linux PTE busy
-        * and hashed.
-        */
-1:
-       ldarx   r31,0,r6
-       /* Check access rights (access & ~(pte_val(*ptep))) */
-       andc.   r0,r4,r31
-       bne-    htab_wrong_access
-       /* Check if PTE is busy */
-       andi.   r0,r31,_PAGE_BUSY
-       /* If so, just bail out and refault if needed. Someone else
-        * is changing this PTE anyway and might hash it.
-        */
-       bne-    htab_bail_ok
-       /* Prepare new PTE value (turn access RW into DIRTY, then
-        * add BUSY and ACCESSED)
-        */
-       rlwinm  r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
-       or      r30,r30,r31
-       ori     r30,r30,_PAGE_BUSY | _PAGE_ACCESSED
-       oris    r30,r30,_PAGE_COMBO@h
-       /* Write the linux PTE atomically (setting busy) */
-       stdcx.  r30,0,r6
-       bne-    1b
-       isync
-
-       /* Step 2:
-        *
-        * Insert/Update the HPTE in the hash table. At this point,
-        * r4 (access) is re-useable, we use it for the new HPTE flags
-        */
-
-       /* Load the hidx index */
-       rldicl  r25,r3,64-12,60
-
-BEGIN_FTR_SECTION
-       cmpdi   r9,0                    /* check segment size */
-       bne     3f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
-       /* Calc vpn and put it in r29 */
-       sldi    r29,r5,SID_SHIFT - VPN_SHIFT
-       /*
-        * clrldi r3,r3,64 - SID_SHIFT -->  ea & 0xfffffff
-        * srdi  r28,r3,VPN_SHIFT
-        */
-       rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
-       or      r29,r28,r29
-       /*
-        * Calculate hash value for primary slot and store it in r28
-        * r3 = va, r5 = vsid
-        * r0 = (va >> 12) & ((1ul << (28 - 12)) -1)
-        */
-       rldicl  r0,r3,64-12,48
-       xor     r28,r5,r0               /* hash */
-       b       4f
-
-3:     /* Calc vpn and put it in r29 */
-       sldi    r29,r5,SID_SHIFT_1T - VPN_SHIFT
-       /*
-        * clrldi r3,r3,64 - SID_SHIFT_1T -->  ea & 0xffffffffff
-        * srdi r28,r3,VPN_SHIFT
-        */
-       rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
-       or      r29,r28,r29
-
-       /*
-        * Calculate hash value for primary slot and
-        * store it in r28  for 1T segment
-        * r3 = va, r5 = vsid
-        */
-       sldi    r28,r5,25               /* vsid << 25 */
-       /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */
-       rldicl  r0,r3,64-12,36
-       xor     r28,r28,r5              /* vsid ^ ( vsid << 25) */
-       xor     r28,r28,r0              /* hash */
-
-       /* Convert linux PTE bits into HW equivalents */
-4:
-#ifdef CONFIG_PPC_SUBPAGE_PROT
-       andc    r10,r30,r10
-       andi.   r3,r10,0x1fe            /* Get basic set of flags */
-       rlwinm  r0,r10,32-9+1,30,30     /* _PAGE_RW -> _PAGE_USER (r0) */
-#else
-       andi.   r3,r30,0x1fe            /* Get basic set of flags */
-       rlwinm  r0,r30,32-9+1,30,30     /* _PAGE_RW -> _PAGE_USER (r0) */
-#endif
-       xori    r3,r3,HPTE_R_N          /* _PAGE_EXEC -> NOEXEC */
-       rlwinm  r4,r30,32-7+1,30,30     /* _PAGE_DIRTY -> _PAGE_USER (r4) */
-       and     r0,r0,r4                /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
-       andc    r0,r3,r0                /* r0 = pte & ~r0 */
-       rlwimi  r3,r0,32-1,31,31        /* Insert result into PP lsb */
-       /*
-        * Always add "C" bit for perf. Memory coherence is always enabled
-        */
-       ori     r3,r3,HPTE_R_C | HPTE_R_M
-
-       /* We eventually do the icache sync here (maybe inline that
-        * code rather than call a C function...)
-        */
-BEGIN_FTR_SECTION
-       mr      r4,r30
-       mr      r5,r7
-       bl      hash_page_do_lazy_icache
-END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
-
-       /* At this point, r3 contains new PP bits, save them in
-        * place of "access" in the param area (sic)
-        */
-       std     r3,STK_PARAM(R4)(r1)
-
-       /* Get htab_hash_mask */
-       ld      r4,htab_hash_mask@got(2)
-       ld      r27,0(r4)       /* htab_hash_mask -> r27 */
-
-       /* Check if we may already be in the hashtable, in this case, we
-        * go to out-of-line code to try to modify the HPTE. We look for
-        * the bit at (1 >> (index + 32))
-        */
-       rldicl. r0,r31,64-12,48
-       li      r26,0                   /* Default hidx */
-       beq     htab_insert_pte
-
-       /*
-        * Check if the pte was already inserted into the hash table
-        * as a 64k HW page, and invalidate the 64k HPTE if so.
-        */
-       andis.  r0,r31,_PAGE_COMBO@h
-       beq     htab_inval_old_hpte
-
-       ld      r6,STK_PARAM(R6)(r1)
-       ori     r26,r6,PTE_PAGE_HIDX_OFFSET /* Load the hidx mask. */
-       ld      r26,0(r26)
-       addi    r5,r25,36               /* Check actual HPTE_SUB bit, this */
-       rldcr.  r0,r31,r5,0             /* must match pgtable.h definition */
-       bne     htab_modify_pte
-
-htab_insert_pte:
-       /* real page number in r5, PTE RPN value + index */
-       andis.  r0,r31,_PAGE_4K_PFN@h
-       srdi    r5,r31,PTE_RPN_SHIFT
-       bne-    htab_special_pfn
-       sldi    r5,r5,PAGE_FACTOR
-       add     r5,r5,r25
-htab_special_pfn:
-       sldi    r5,r5,HW_PAGE_SHIFT
-
-       /* Calculate primary group hash */
-       and     r0,r28,r27
-       rldicr  r3,r0,3,63-3            /* r0 = (hash & mask) << 3 */
-
-       /* Call ppc_md.hpte_insert */
-       ld      r6,STK_PARAM(R4)(r1)    /* Retrieve new pp bits */
-       mr      r4,r29                  /* Retrieve vpn */
-       li      r7,0                    /* !bolted, !secondary */
-       li      r8,MMU_PAGE_4K          /* page size */
-       li      r9,MMU_PAGE_4K          /* actual page size */
-       ld      r10,STK_PARAM(R9)(r1)   /* segment size */
-.globl htab_call_hpte_insert1
-htab_call_hpte_insert1:
-       bl      .                       /* patched by htab_finish_init() */
-       cmpdi   0,r3,0
-       bge     htab_pte_insert_ok      /* Insertion successful */
-       cmpdi   0,r3,-2                 /* Critical failure */
-       beq-    htab_pte_insert_failure
-
-       /* Now try secondary slot */
-
-       /* real page number in r5, PTE RPN value + index */
-       andis.  r0,r31,_PAGE_4K_PFN@h
-       srdi    r5,r31,PTE_RPN_SHIFT
-       bne-    3f
-       sldi    r5,r5,PAGE_FACTOR
-       add     r5,r5,r25
-3:     sldi    r5,r5,HW_PAGE_SHIFT
-
-       /* Calculate secondary group hash */
-       andc    r0,r27,r28
-       rldicr  r3,r0,3,63-3            /* r0 = (~hash & mask) << 3 */
-
-       /* Call ppc_md.hpte_insert */
-       ld      r6,STK_PARAM(R4)(r1)    /* Retrieve new pp bits */
-       mr      r4,r29                  /* Retrieve vpn */
-       li      r7,HPTE_V_SECONDARY     /* !bolted, secondary */
-       li      r8,MMU_PAGE_4K          /* page size */
-       li      r9,MMU_PAGE_4K          /* actual page size */
-       ld      r10,STK_PARAM(R9)(r1)   /* segment size */
-.globl htab_call_hpte_insert2
-htab_call_hpte_insert2:
-       bl      .                       /* patched by htab_finish_init() */
-       cmpdi   0,r3,0
-       bge+    htab_pte_insert_ok      /* Insertion successful */
-       cmpdi   0,r3,-2                 /* Critical failure */
-       beq-    htab_pte_insert_failure
-
-       /* Both are full, we need to evict something */
-       mftb    r0
-       /* Pick a random group based on TB */
-       andi.   r0,r0,1
-       mr      r5,r28
-       bne     2f
-       not     r5,r5
-2:     and     r0,r5,r27
-       rldicr  r3,r0,3,63-3            /* r0 = (hash & mask) << 3 */
-       /* Call ppc_md.hpte_remove */
-.globl htab_call_hpte_remove
-htab_call_hpte_remove:
-       bl      .                       /* patched by htab_finish_init() */
-
-       /* Try all again */
-       b       htab_insert_pte
-
-       /*
-        * Call out to C code to invalidate an 64k HW HPTE that is
-        * useless now that the segment has been switched to 4k pages.
-        */
-htab_inval_old_hpte:
-       mr      r3,r29                  /* vpn */
-       mr      r4,r31                  /* PTE.pte */
-       li      r5,0                    /* PTE.hidx */
-       li      r6,MMU_PAGE_64K         /* psize */
-       ld      r7,STK_PARAM(R9)(r1)    /* ssize */
-       ld      r8,STK_PARAM(R8)(r1)    /* flags */
-       bl      flush_hash_page
-       /* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */
-       lis     r0,_PAGE_HPTE_SUB@h
-       ori     r0,r0,_PAGE_HPTE_SUB@l
-       andc    r30,r30,r0
-       b       htab_insert_pte
-       
-htab_bail_ok:
-       li      r3,0
-       b       htab_bail
-
-htab_pte_insert_ok:
-       /* Insert slot number & secondary bit in PTE second half,
-        * clear _PAGE_BUSY and set approriate HPTE slot bit
-        */
-       ld      r6,STK_PARAM(R6)(r1)
-       li      r0,_PAGE_BUSY
-       andc    r30,r30,r0
-       /* HPTE SUB bit */
-       li      r0,1
-       subfic  r5,r25,27               /* Must match bit position in */
-       sld     r0,r0,r5                /* pgtable.h */
-       or      r30,r30,r0
-       /* hindx */
-       sldi    r5,r25,2
-       sld     r3,r3,r5
-       li      r4,0xf
-       sld     r4,r4,r5
-       andc    r26,r26,r4
-       or      r26,r26,r3
-       ori     r5,r6,PTE_PAGE_HIDX_OFFSET
-       std     r26,0(r5)
-       lwsync
-       std     r30,0(r6)
-       li      r3, 0
-htab_bail:
-       ld      r25,STK_REG(R25)(r1)
-       ld      r26,STK_REG(R26)(r1)
-       ld      r27,STK_REG(R27)(r1)
-       ld      r28,STK_REG(R28)(r1)
-       ld      r29,STK_REG(R29)(r1)
-       ld      r30,STK_REG(R30)(r1)
-       ld      r31,STK_REG(R31)(r1)
-       addi    r1,r1,STACKFRAMESIZE
-       ld      r0,16(r1)
-       mtlr    r0
-       blr
-
-htab_modify_pte:
-       /* Keep PP bits in r4 and slot idx from the PTE around in r3 */
-       mr      r4,r3
-       sldi    r5,r25,2
-       srd     r3,r26,r5
-
-       /* Secondary group ? if yes, get a inverted hash value */
-       mr      r5,r28
-       andi.   r0,r3,0x8 /* page secondary ? */
-       beq     1f
-       not     r5,r5
-1:     andi.   r3,r3,0x7 /* extract idx alone */
-
-       /* Calculate proper slot value for ppc_md.hpte_updatepp */
-       and     r0,r5,r27
-       rldicr  r0,r0,3,63-3    /* r0 = (hash & mask) << 3 */
-       add     r3,r0,r3        /* add slot idx */
-
-       /* Call ppc_md.hpte_updatepp */
-       mr      r5,r29                  /* vpn */
-       li      r6,MMU_PAGE_4K          /* base page size */
-       li      r7,MMU_PAGE_4K          /* actual page size */
-       ld      r8,STK_PARAM(R9)(r1)    /* segment size */
-       ld      r9,STK_PARAM(R8)(r1)    /* get "flags" param */
-.globl htab_call_hpte_updatepp
-htab_call_hpte_updatepp:
-       bl      .                       /* patched by htab_finish_init() */
-
-       /* if we failed because typically the HPTE wasn't really here
-        * we try an insertion.
-        */
-       cmpdi   0,r3,-1
-       beq-    htab_insert_pte
-
-       /* Clear the BUSY bit and Write out the PTE */
-       li      r0,_PAGE_BUSY
-       andc    r30,r30,r0
-       ld      r6,STK_PARAM(R6)(r1)
-       std     r30,0(r6)
-       li      r3,0
-       b       htab_bail
-
-htab_wrong_access:
-       /* Bail out clearing reservation */
-       stdcx.  r31,0,r6
-       li      r3,1
-       b       htab_bail
-
-htab_pte_insert_failure:
-       /* Bail out restoring old PTE */
-       ld      r6,STK_PARAM(R6)(r1)
-       std     r31,0(r6)
-       li      r3,-1
-       b       htab_bail
-
-#endif /* CONFIG_PPC_64K_PAGES */
-
-#ifdef CONFIG_PPC_64K_PAGES
-
-/*****************************************************************************
- *                                                                           *
- *           64K SW & 64K HW in a 64K segment pages implementation           *
- *                                                                           *
- *****************************************************************************/
-
-_GLOBAL(__hash_page_64K)
-       mflr    r0
-       std     r0,16(r1)
-       stdu    r1,-STACKFRAMESIZE(r1)
-       /* Save all params that we need after a function call */
-       std     r6,STK_PARAM(R6)(r1)
-       std     r8,STK_PARAM(R8)(r1)
-       std     r9,STK_PARAM(R9)(r1)
-
-       /* Save non-volatile registers.
-        * r31 will hold "old PTE"
-        * r30 is "new PTE"
-        * r29 is vpn
-        * r28 is a hash value
-        * r27 is hashtab mask (maybe dynamic patched instead ?)
-        */
-       std     r27,STK_REG(R27)(r1)
-       std     r28,STK_REG(R28)(r1)
-       std     r29,STK_REG(R29)(r1)
-       std     r30,STK_REG(R30)(r1)
-       std     r31,STK_REG(R31)(r1)
-
-       /* Step 1:
-        *
-        * Check permissions, atomically mark the linux PTE busy
-        * and hashed.
-        */
-1:
-       ldarx   r31,0,r6
-       /* Check access rights (access & ~(pte_val(*ptep))) */
-       andc.   r0,r4,r31
-       bne-    ht64_wrong_access
-       /* Check if PTE is busy */
-       andi.   r0,r31,_PAGE_BUSY
-       /* If so, just bail out and refault if needed. Someone else
-        * is changing this PTE anyway and might hash it.
-        */
-       bne-    ht64_bail_ok
-BEGIN_FTR_SECTION
-       /* Check if PTE has the cache-inhibit bit set */
-       andi.   r0,r31,_PAGE_NO_CACHE
-       /* If so, bail out and refault as a 4k page */
-       bne-    ht64_bail_ok
-END_MMU_FTR_SECTION_IFCLR(MMU_FTR_CI_LARGE_PAGE)
-       /* Prepare new PTE value (turn access RW into DIRTY, then
-        * add BUSY and ACCESSED)
-        */
-       rlwinm  r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
-       or      r30,r30,r31
-       ori     r30,r30,_PAGE_BUSY | _PAGE_ACCESSED
-       /* Write the linux PTE atomically (setting busy) */
-       stdcx.  r30,0,r6
-       bne-    1b
-       isync
-
-       /* Step 2:
-        *
-        * Insert/Update the HPTE in the hash table. At this point,
-        * r4 (access) is re-useable, we use it for the new HPTE flags
-        */
-
-BEGIN_FTR_SECTION
-       cmpdi   r9,0                    /* check segment size */
-       bne     3f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
-       /* Calc vpn and put it in r29 */
-       sldi    r29,r5,SID_SHIFT - VPN_SHIFT
-       rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
-       or      r29,r28,r29
-
-       /* Calculate hash value for primary slot and store it in r28
-        * r3 = va, r5 = vsid
-        * r0 = (va >> 16) & ((1ul << (28 - 16)) -1)
-        */
-       rldicl  r0,r3,64-16,52
-       xor     r28,r5,r0               /* hash */
-       b       4f
-
-3:     /* Calc vpn and put it in r29 */
-       sldi    r29,r5,SID_SHIFT_1T - VPN_SHIFT
-       rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
-       or      r29,r28,r29
-       /*
-        * calculate hash value for primary slot and
-        * store it in r28 for 1T segment
-        * r3 = va, r5 = vsid
-        */
-       sldi    r28,r5,25               /* vsid << 25 */
-       /* r0 = (va >> 16) & ((1ul << (40 - 16)) -1) */
-       rldicl  r0,r3,64-16,40
-       xor     r28,r28,r5              /* vsid ^ ( vsid << 25) */
-       xor     r28,r28,r0              /* hash */
-
-       /* Convert linux PTE bits into HW equivalents */
-4:     andi.   r3,r30,0x1fe            /* Get basic set of flags */
-       xori    r3,r3,HPTE_R_N          /* _PAGE_EXEC -> NOEXEC */
-       rlwinm  r0,r30,32-9+1,30,30     /* _PAGE_RW -> _PAGE_USER (r0) */
-       rlwinm  r4,r30,32-7+1,30,30     /* _PAGE_DIRTY -> _PAGE_USER (r4) */
-       and     r0,r0,r4                /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
-       andc    r0,r30,r0               /* r0 = pte & ~r0 */
-       rlwimi  r3,r0,32-1,31,31        /* Insert result into PP lsb */
-       /*
-        * Always add "C" bit for perf. Memory coherence is always enabled
-        */
-       ori     r3,r3,HPTE_R_C | HPTE_R_M
-
-       /* We eventually do the icache sync here (maybe inline that
-        * code rather than call a C function...)
-        */
-BEGIN_FTR_SECTION
-       mr      r4,r30
-       mr      r5,r7
-       bl      hash_page_do_lazy_icache
-END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
-
-       /* At this point, r3 contains new PP bits, save them in
-        * place of "access" in the param area (sic)
-        */
-       std     r3,STK_PARAM(R4)(r1)
-
-       /* Get htab_hash_mask */
-       ld      r4,htab_hash_mask@got(2)
-       ld      r27,0(r4)       /* htab_hash_mask -> r27 */
-
-       /* Check if we may already be in the hashtable, in this case, we
-        * go to out-of-line code to try to modify the HPTE
-        */
-       rldicl. r0,r31,64-12,48
-       bne     ht64_modify_pte
-
-ht64_insert_pte:
-       /* Clear hpte bits in new pte (we also clear BUSY btw) and
-        * add _PAGE_HPTE_SUB0
-        */
-       lis     r0,_PAGE_HPTEFLAGS@h
-       ori     r0,r0,_PAGE_HPTEFLAGS@l
-       andc    r30,r30,r0
-#ifdef CONFIG_PPC_64K_PAGES
-       oris    r30,r30,_PAGE_HPTE_SUB0@h
-#else
-       ori     r30,r30,_PAGE_HASHPTE
-#endif
-       /* Phyical address in r5 */
-       rldicl  r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
-       sldi    r5,r5,PAGE_SHIFT
-
-       /* Calculate primary group hash */
-       and     r0,r28,r27
-       rldicr  r3,r0,3,63-3    /* r0 = (hash & mask) << 3 */
-
-       /* Call ppc_md.hpte_insert */
-       ld      r6,STK_PARAM(R4)(r1)    /* Retrieve new pp bits */
-       mr      r4,r29                  /* Retrieve vpn */
-       li      r7,0                    /* !bolted, !secondary */
-       li      r8,MMU_PAGE_64K
-       li      r9,MMU_PAGE_64K         /* actual page size */
-       ld      r10,STK_PARAM(R9)(r1)   /* segment size */
-.globl ht64_call_hpte_insert1
-ht64_call_hpte_insert1:
-       bl      .                       /* patched by htab_finish_init() */
-       cmpdi   0,r3,0
-       bge     ht64_pte_insert_ok      /* Insertion successful */
-       cmpdi   0,r3,-2                 /* Critical failure */
-       beq-    ht64_pte_insert_failure
-
-       /* Now try secondary slot */
-
-       /* Phyical address in r5 */
-       rldicl  r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
-       sldi    r5,r5,PAGE_SHIFT
-
-       /* Calculate secondary group hash */
-       andc    r0,r27,r28
-       rldicr  r3,r0,3,63-3    /* r0 = (~hash & mask) << 3 */
-
-       /* Call ppc_md.hpte_insert */
-       ld      r6,STK_PARAM(R4)(r1)    /* Retrieve new pp bits */
-       mr      r4,r29                  /* Retrieve vpn */
-       li      r7,HPTE_V_SECONDARY     /* !bolted, secondary */
-       li      r8,MMU_PAGE_64K
-       li      r9,MMU_PAGE_64K         /* actual page size */
-       ld      r10,STK_PARAM(R9)(r1)   /* segment size */
-.globl ht64_call_hpte_insert2
-ht64_call_hpte_insert2:
-       bl      .                       /* patched by htab_finish_init() */
-       cmpdi   0,r3,0
-       bge+    ht64_pte_insert_ok      /* Insertion successful */
-       cmpdi   0,r3,-2                 /* Critical failure */
-       beq-    ht64_pte_insert_failure
-
-       /* Both are full, we need to evict something */
-       mftb    r0
-       /* Pick a random group based on TB */
-       andi.   r0,r0,1
-       mr      r5,r28
-       bne     2f
-       not     r5,r5
-2:     and     r0,r5,r27
-       rldicr  r3,r0,3,63-3    /* r0 = (hash & mask) << 3 */
-       /* Call ppc_md.hpte_remove */
-.globl ht64_call_hpte_remove
-ht64_call_hpte_remove:
-       bl      .                       /* patched by htab_finish_init() */
-
-       /* Try all again */
-       b       ht64_insert_pte
-
-ht64_bail_ok:
-       li      r3,0
-       b       ht64_bail
-
-ht64_pte_insert_ok:
-       /* Insert slot number & secondary bit in PTE */
-       rldimi  r30,r3,12,63-15
-
-       /* Write out the PTE with a normal write
-        * (maybe add eieio may be good still ?)
-        */
-ht64_write_out_pte:
-       ld      r6,STK_PARAM(R6)(r1)
-       std     r30,0(r6)
-       li      r3, 0
-ht64_bail:
-       ld      r27,STK_REG(R27)(r1)
-       ld      r28,STK_REG(R28)(r1)
-       ld      r29,STK_REG(R29)(r1)
-       ld      r30,STK_REG(R30)(r1)
-       ld      r31,STK_REG(R31)(r1)
-       addi    r1,r1,STACKFRAMESIZE
-       ld      r0,16(r1)
-       mtlr    r0
-       blr
-
-ht64_modify_pte:
-       /* Keep PP bits in r4 and slot idx from the PTE around in r3 */
-       mr      r4,r3
-       rlwinm  r3,r31,32-12,29,31
-
-       /* Secondary group ? if yes, get a inverted hash value */
-       mr      r5,r28
-       andi.   r0,r31,_PAGE_F_SECOND
-       beq     1f
-       not     r5,r5
-1:
-       /* Calculate proper slot value for ppc_md.hpte_updatepp */
-       and     r0,r5,r27
-       rldicr  r0,r0,3,63-3    /* r0 = (hash & mask) << 3 */
-       add     r3,r0,r3        /* add slot idx */
-
-       /* Call ppc_md.hpte_updatepp */
-       mr      r5,r29                  /* vpn */
-       li      r6,MMU_PAGE_64K         /* base page size */
-       li      r7,MMU_PAGE_64K         /* actual page size */
-       ld      r8,STK_PARAM(R9)(r1)    /* segment size */
-       ld      r9,STK_PARAM(R8)(r1)    /* get "flags" param */
-.globl ht64_call_hpte_updatepp
-ht64_call_hpte_updatepp:
-       bl      .                       /* patched by htab_finish_init() */
-
-       /* if we failed because typically the HPTE wasn't really here
-        * we try an insertion.
-        */
-       cmpdi   0,r3,-1
-       beq-    ht64_insert_pte
-
-       /* Clear the BUSY bit and Write out the PTE */
-       li      r0,_PAGE_BUSY
-       andc    r30,r30,r0
-       b       ht64_write_out_pte
-
-ht64_wrong_access:
-       /* Bail out clearing reservation */
-       stdcx.  r31,0,r6
-       li      r3,1
-       b       ht64_bail
-
-ht64_pte_insert_failure:
-       /* Bail out restoring old PTE */
-       ld      r6,STK_PARAM(R6)(r1)
-       std     r31,0(r6)
-       li      r3,-1
-       b       ht64_bail
-
-
-#endif /* CONFIG_PPC_64K_PAGES */
-
-
-/*****************************************************************************
- *                                                                           *
- *           Huge pages implementation is in hugetlbpage.c                   *
- *                                                                           *
- *****************************************************************************/
index c8822af..8eaac81 100644 (file)
@@ -429,6 +429,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
        local_irq_restore(flags);
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 static void native_hugepage_invalidate(unsigned long vsid,
                                       unsigned long addr,
                                       unsigned char *hpte_slot_array,
@@ -482,6 +483,15 @@ static void native_hugepage_invalidate(unsigned long vsid,
        }
        local_irq_restore(flags);
 }
+#else
+static void native_hugepage_invalidate(unsigned long vsid,
+                                      unsigned long addr,
+                                      unsigned char *hpte_slot_array,
+                                      int psize, int ssize, int local)
+{
+       WARN(1, "%s called without THP support\n", __func__);
+}
+#endif
 
 static inline int __hpte_actual_psize(unsigned int lp, int psize)
 {
index 7f9616f..ba59d59 100644 (file)
@@ -159,24 +159,41 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = {
        },
 };
 
-static unsigned long htab_convert_pte_flags(unsigned long pteflags)
+unsigned long htab_convert_pte_flags(unsigned long pteflags)
 {
-       unsigned long rflags = pteflags & 0x1fa;
+       unsigned long rflags = 0;
 
        /* _PAGE_EXEC -> NOEXEC */
        if ((pteflags & _PAGE_EXEC) == 0)
                rflags |= HPTE_R_N;
-
-       /* PP bits. PAGE_USER is already PP bit 0x2, so we only
-        * need to add in 0x1 if it's a read-only user page
+       /*
+        * PP bits:
+        * Linux use slb key 0 for kernel and 1 for user.
+        * kernel areas are mapped by PP bits 00
+        * and and there is no kernel RO (_PAGE_KERNEL_RO).
+        * User area mapped by 0x2 and read only use by
+        * 0x3.
         */
-       if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) &&
-                                        (pteflags & _PAGE_DIRTY)))
-               rflags |= 1;
+       if (pteflags & _PAGE_USER) {
+               rflags |= 0x2;
+               if (!((pteflags & _PAGE_RW) && (pteflags & _PAGE_DIRTY)))
+                       rflags |= 0x1;
+       }
        /*
         * Always add "C" bit for perf. Memory coherence is always enabled
         */
-       return rflags | HPTE_R_C | HPTE_R_M;
+       rflags |=  HPTE_R_C | HPTE_R_M;
+       /*
+        * Add in WIG bits
+        */
+       if (pteflags & _PAGE_WRITETHRU)
+               rflags |= HPTE_R_W;
+       if (pteflags & _PAGE_NO_CACHE)
+               rflags |= HPTE_R_I;
+       if (pteflags & _PAGE_GUARDED)
+               rflags |= HPTE_R_G;
+
+       return rflags;
 }
 
 int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
@@ -629,46 +646,6 @@ int remove_section_mapping(unsigned long start, unsigned long end)
 }
 #endif /* CONFIG_MEMORY_HOTPLUG */
 
-extern u32 htab_call_hpte_insert1[];
-extern u32 htab_call_hpte_insert2[];
-extern u32 htab_call_hpte_remove[];
-extern u32 htab_call_hpte_updatepp[];
-extern u32 ht64_call_hpte_insert1[];
-extern u32 ht64_call_hpte_insert2[];
-extern u32 ht64_call_hpte_remove[];
-extern u32 ht64_call_hpte_updatepp[];
-
-static void __init htab_finish_init(void)
-{
-#ifdef CONFIG_PPC_64K_PAGES
-       patch_branch(ht64_call_hpte_insert1,
-               ppc_function_entry(ppc_md.hpte_insert),
-               BRANCH_SET_LINK);
-       patch_branch(ht64_call_hpte_insert2,
-               ppc_function_entry(ppc_md.hpte_insert),
-               BRANCH_SET_LINK);
-       patch_branch(ht64_call_hpte_remove,
-               ppc_function_entry(ppc_md.hpte_remove),
-               BRANCH_SET_LINK);
-       patch_branch(ht64_call_hpte_updatepp,
-               ppc_function_entry(ppc_md.hpte_updatepp),
-               BRANCH_SET_LINK);
-#endif /* CONFIG_PPC_64K_PAGES */
-
-       patch_branch(htab_call_hpte_insert1,
-               ppc_function_entry(ppc_md.hpte_insert),
-               BRANCH_SET_LINK);
-       patch_branch(htab_call_hpte_insert2,
-               ppc_function_entry(ppc_md.hpte_insert),
-               BRANCH_SET_LINK);
-       patch_branch(htab_call_hpte_remove,
-               ppc_function_entry(ppc_md.hpte_remove),
-               BRANCH_SET_LINK);
-       patch_branch(htab_call_hpte_updatepp,
-               ppc_function_entry(ppc_md.hpte_updatepp),
-               BRANCH_SET_LINK);
-}
-
 static void __init htab_initialize(void)
 {
        unsigned long table;
@@ -815,7 +792,6 @@ static void __init htab_initialize(void)
                                         mmu_linear_psize, mmu_kernel_ssize));
        }
 
-       htab_finish_init();
 
        DBG(" <- htab_initialize()\n");
 }
@@ -877,11 +853,11 @@ static unsigned int get_paca_psize(unsigned long addr)
        unsigned long index, mask_index;
 
        if (addr < SLICE_LOW_TOP) {
-               lpsizes = get_paca()->context.low_slices_psize;
+               lpsizes = get_paca()->mm_ctx_low_slices_psize;
                index = GET_LOW_SLICE_INDEX(addr);
                return (lpsizes >> (index * 4)) & 0xF;
        }
-       hpsizes = get_paca()->context.high_slices_psize;
+       hpsizes = get_paca()->mm_ctx_high_slices_psize;
        index = GET_HIGH_SLICE_INDEX(addr);
        mask_index = index & 0x1;
        return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xF;
@@ -890,7 +866,7 @@ static unsigned int get_paca_psize(unsigned long addr)
 #else
 unsigned int get_paca_psize(unsigned long addr)
 {
-       return get_paca()->context.user_psize;
+       return get_paca()->mm_ctx_user_psize;
 }
 #endif
 
@@ -906,7 +882,8 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
        slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K);
        copro_flush_all_slbs(mm);
        if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) {
-               get_paca()->context = mm->context;
+
+               copy_mm_to_paca(&mm->context);
                slb_flush_and_rebolt();
        }
 }
@@ -973,7 +950,7 @@ static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
 {
        if (user_region) {
                if (psize != get_paca_psize(ea)) {
-                       get_paca()->context = mm->context;
+                       copy_mm_to_paca(&mm->context);
                        slb_flush_and_rebolt();
                }
        } else if (get_paca()->vmalloc_sllp !=
@@ -1148,9 +1125,10 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
                }
        }
 
+#endif /* CONFIG_PPC_64K_PAGES */
+
        if (current->mm == mm)
                check_paca_psize(ea, mm, psize, user_region);
-#endif /* CONFIG_PPC_64K_PAGES */
 
 #ifdef CONFIG_PPC_64K_PAGES
        if (psize == MMU_PAGE_64K)
@@ -1203,6 +1181,35 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
 }
 EXPORT_SYMBOL_GPL(hash_page);
 
+int __hash_page(unsigned long ea, unsigned long msr, unsigned long trap,
+               unsigned long dsisr)
+{
+       unsigned long access = _PAGE_PRESENT;
+       unsigned long flags = 0;
+       struct mm_struct *mm = current->mm;
+
+       if (REGION_ID(ea) == VMALLOC_REGION_ID)
+               mm = &init_mm;
+
+       if (dsisr & DSISR_NOHPTE)
+               flags |= HPTE_NOHPTE_UPDATE;
+
+       if (dsisr & DSISR_ISSTORE)
+               access |= _PAGE_RW;
+       /*
+        * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
+        * accessing a userspace segment (even from the kernel). We assume
+        * kernel addresses always have the high bit set.
+        */
+       if ((msr & MSR_PR) || (REGION_ID(ea) == USER_REGION_ID))
+               access |= _PAGE_USER;
+
+       if (trap == 0x400)
+               access |= _PAGE_EXEC;
+
+       return hash_page_mm(mm, ea, access, trap, flags);
+}
+
 void hash_preload(struct mm_struct *mm, unsigned long ea,
                  unsigned long access, unsigned long trap)
 {
index 4d87122..baf1301 100644 (file)
@@ -54,18 +54,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
                        new_pmd |= _PAGE_DIRTY;
        } while (old_pmd != __cmpxchg_u64((unsigned long *)pmdp,
                                          old_pmd, new_pmd));
-       /*
-        * PP bits. _PAGE_USER is already PP bit 0x2, so we only
-        * need to add in 0x1 if it's a read-only user page
-        */
-       rflags = new_pmd & _PAGE_USER;
-       if ((new_pmd & _PAGE_USER) && !((new_pmd & _PAGE_RW) &&
-                                          (new_pmd & _PAGE_DIRTY)))
-               rflags |= 0x1;
-       /*
-        * _PAGE_EXEC -> HW_NO_EXEC since it's inverted
-        */
-       rflags |= ((new_pmd & _PAGE_EXEC) ? 0 : HPTE_R_N);
+       rflags = htab_convert_pte_flags(new_pmd);
 
 #if 0
        if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
@@ -82,7 +71,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
         */
        shift = mmu_psize_defs[psize].shift;
        index = (ea & ~HPAGE_PMD_MASK) >> shift;
-       BUG_ON(index >= 4096);
+       BUG_ON(index >= PTE_FRAG_SIZE);
 
        vpn = hpt_vpn(ea, vsid, ssize);
        hpte_slot_array = get_hpte_slot_array(pmdp);
@@ -131,13 +120,6 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
                pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
                new_pmd |= _PAGE_HASHPTE;
 
-               /* Add in WIMG bits */
-               rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
-                                     _PAGE_GUARDED));
-               /*
-                * enable the memory coherence always
-                */
-               rflags |= HPTE_R_M;
 repeat:
                hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
 
index ba47aaf..7e6d088 100644 (file)
@@ -51,6 +51,48 @@ static inline int mmu_get_tsize(int psize)
        return mmu_psize_defs[psize].enc;
 }
 
+#if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_PPC64)
+#include <asm/paca.h>
+
+static inline void book3e_tlb_lock(void)
+{
+       struct paca_struct *paca = get_paca();
+       unsigned long tmp;
+       int token = smp_processor_id() + 1;
+
+       asm volatile("1: lbarx %0, 0, %1;"
+                    "cmpwi %0, 0;"
+                    "bne 2f;"
+                    "stbcx. %2, 0, %1;"
+                    "bne 1b;"
+                    "b 3f;"
+                    "2: lbzx %0, 0, %1;"
+                    "cmpwi %0, 0;"
+                    "bne 2b;"
+                    "b 1b;"
+                    "3:"
+                    : "=&r" (tmp)
+                    : "r" (&paca->tcd_ptr->lock), "r" (token)
+                    : "memory");
+}
+
+static inline void book3e_tlb_unlock(void)
+{
+       struct paca_struct *paca = get_paca();
+
+       isync();
+       paca->tcd_ptr->lock = 0;
+}
+#else
+static inline void book3e_tlb_lock(void)
+{
+}
+
+static inline void book3e_tlb_unlock(void)
+{
+}
+#endif
+
 static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid)
 {
        int found = 0;
@@ -109,7 +151,10 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
         */
        local_irq_save(flags);
 
+       book3e_tlb_lock();
+
        if (unlikely(book3e_tlb_exists(ea, mm->context.id))) {
+               book3e_tlb_unlock();
                local_irq_restore(flags);
                return;
        }
@@ -141,6 +186,7 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
 
        asm volatile ("tlbwe");
 
+       book3e_tlb_unlock();
        local_irq_restore(flags);
 }
 
index d94b1af..e2138c7 100644 (file)
@@ -59,10 +59,8 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
                        new_pte |= _PAGE_DIRTY;
        } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
                                         old_pte, new_pte));
+       rflags = htab_convert_pte_flags(new_pte);
 
-       rflags = 0x2 | (!(new_pte & _PAGE_RW));
-       /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
-       rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
        sz = ((1UL) << shift);
        if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
                /* No CPU has hugepages but lacks no execute, so we
@@ -91,18 +89,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
                pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
 
                /* clear HPTE slot informations in new PTE */
-#ifdef CONFIG_PPC_64K_PAGES
-               new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0;
-#else
                new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
-#endif
-               /* Add in WIMG bits */
-               rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
-                                     _PAGE_COHERENT | _PAGE_GUARDED));
-               /*
-                * enable the memory coherence always
-                */
-               rflags |= HPTE_R_M;
 
                slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0,
                                             mmu_psize, ssize);
@@ -127,3 +114,21 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
        *ptep = __pte(new_pte & ~_PAGE_BUSY);
        return 0;
 }
+
+#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_DEBUG_VM)
+/*
+ * This enables us to catch the wrong page directory format
+ * Moved here so that we can use WARN() in the call.
+ */
+int hugepd_ok(hugepd_t hpd)
+{
+       bool is_hugepd;
+
+       /*
+        * We should not find this format in page directory, warn otherwise.
+        */
+       is_hugepd = (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
+       WARN(is_hugepd, "Found wrong page directory format\n");
+       return 0;
+}
+#endif
index 9833fee..61b8b7c 100644 (file)
@@ -53,78 +53,6 @@ static unsigned nr_gpages;
 
 #define hugepd_none(hpd)       ((hpd).pd == 0)
 
-#ifdef CONFIG_PPC_BOOK3S_64
-/*
- * At this point we do the placement change only for BOOK3S 64. This would
- * possibly work on other subarchs.
- */
-
-/*
- * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
- * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD;
- *
- * Defined in such a way that we can optimize away code block at build time
- * if CONFIG_HUGETLB_PAGE=n.
- */
-int pmd_huge(pmd_t pmd)
-{
-       /*
-        * leaf pte for huge page, bottom two bits != 00
-        */
-       return ((pmd_val(pmd) & 0x3) != 0x0);
-}
-
-int pud_huge(pud_t pud)
-{
-       /*
-        * leaf pte for huge page, bottom two bits != 00
-        */
-       return ((pud_val(pud) & 0x3) != 0x0);
-}
-
-int pgd_huge(pgd_t pgd)
-{
-       /*
-        * leaf pte for huge page, bottom two bits != 00
-        */
-       return ((pgd_val(pgd) & 0x3) != 0x0);
-}
-
-#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_DEBUG_VM)
-/*
- * This enables us to catch the wrong page directory format
- * Moved here so that we can use WARN() in the call.
- */
-int hugepd_ok(hugepd_t hpd)
-{
-       bool is_hugepd;
-
-       /*
-        * We should not find this format in page directory, warn otherwise.
-        */
-       is_hugepd = (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
-       WARN(is_hugepd, "Found wrong page directory format\n");
-       return 0;
-}
-#endif
-
-#else
-int pmd_huge(pmd_t pmd)
-{
-       return 0;
-}
-
-int pud_huge(pud_t pud)
-{
-       return 0;
-}
-
-int pgd_huge(pgd_t pgd)
-{
-       return 0;
-}
-#endif
-
 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 {
        /* Only called for hugetlbfs pages, hence can ignore THP */
@@ -966,8 +894,8 @@ void flush_dcache_icache_hugepage(struct page *page)
  * We have 4 cases for pgds and pmds:
  * (1) invalid (all zeroes)
  * (2) pointer to next table, as normal; bottom 6 bits == 0
- * (3) leaf pte for huge page, bottom two bits != 00
- * (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table
+ * (3) leaf pte for huge page _PAGE_PTE set
+ * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
  *
  * So long as we atomically load page table pointers we are safe against teardown,
  * we can follow the address down to the the page and take a ref on it.
index d747dd7..379a6a9 100644 (file)
@@ -87,11 +87,7 @@ static void pgd_ctor(void *addr)
 
 static void pmd_ctor(void *addr)
 {
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       memset(addr, 0, PMD_TABLE_SIZE * 2);
-#else
        memset(addr, 0, PMD_TABLE_SIZE);
-#endif
 }
 
 struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
index 83dfcb5..83dfd79 100644 (file)
@@ -179,6 +179,10 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
         */
        VM_WARN_ON((pte_val(*ptep) & (_PAGE_PRESENT | _PAGE_USER)) ==
                (_PAGE_PRESENT | _PAGE_USER));
+       /*
+        * Add the pte bit when tryint set a pte
+        */
+       pte = __pte(pte_val(pte) | _PAGE_PTE);
 
        /* Note: mm->context.id might not yet have been assigned as
         * this context might not have been activated yet when this
index e92cb21..ea6bc31 100644 (file)
@@ -359,7 +359,7 @@ struct page *pud_page(pud_t pud)
 struct page *pmd_page(pmd_t pmd)
 {
        if (pmd_trans_huge(pmd) || pmd_huge(pmd))
-               return pfn_to_page(pmd_pfn(pmd));
+               return pte_page(pmd_pte(pmd));
        return virt_to_page(pmd_page_vaddr(pmd));
 }
 
@@ -625,7 +625,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma,
        "1:     ldarx   %0,0,%3\n\
                andi.   %1,%0,%6\n\
                bne-    1b \n\
-               ori     %1,%0,%4 \n\
+               oris    %1,%0,%4@h \n\
                stdcx.  %1,0,%3 \n\
                bne-    1b"
        : "=&r" (old), "=&r" (tmp), "=m" (*pmdp)
@@ -759,22 +759,15 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
 
 static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
 {
-       pmd_val(pmd) |= pgprot_val(pgprot);
-       return pmd;
+       return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
 }
 
 pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
 {
-       pmd_t pmd;
-       /*
-        * For a valid pte, we would have _PAGE_PRESENT always
-        * set. We use this to check THP page at pmd level.
-        * leaf pte for huge page, bottom two bits != 00
-        */
-       pmd_val(pmd) = pfn << PTE_RPN_SHIFT;
-       pmd_val(pmd) |= _PAGE_THP_HUGE;
-       pmd = pmd_set_protbits(pmd, pgprot);
-       return pmd;
+       unsigned long pmdv;
+
+       pmdv = pfn << PTE_RPN_SHIFT;
+       return pmd_set_protbits(__pmd(pmdv), pgprot);
 }
 
 pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
@@ -784,10 +777,11 @@ pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
 
 pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
 {
+       unsigned long pmdv;
 
-       pmd_val(pmd) &= _HPAGE_CHG_MASK;
-       pmd = pmd_set_protbits(pmd, newprot);
-       return pmd;
+       pmdv = pmd_val(pmd);
+       pmdv &= _HPAGE_CHG_MASK;
+       return pmd_set_protbits(__pmd(pmdv), newprot);
 }
 
 /*
index 515730e..825b687 100644 (file)
@@ -228,7 +228,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
                asm volatile("slbie %0" : : "r" (slbie_data));
 
        get_paca()->slb_cache_ptr = 0;
-       get_paca()->context = mm->context;
+       copy_mm_to_paca(&mm->context);
 
        /*
         * preload some userspace segments into the SLB.
index 0f432a7..42954f0 100644 (file)
@@ -185,8 +185,7 @@ static void slice_flush_segments(void *parm)
        if (mm != current->active_mm)
                return;
 
-       /* update the paca copy of the context struct */
-       get_paca()->context = current->active_mm->context;
+       copy_mm_to_paca(&current->active_mm->context);
 
        local_irq_save(flags);
        slb_flush_and_rebolt();
index bf4c447..4bc6bbb 100644 (file)
@@ -37,8 +37,8 @@
 #include <asm/udbg.h>
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
-#include <asm/qe.h>
-#include <asm/qe_ic.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/qe_ic.h>
 
 #include "mpc83xx.h"
 
@@ -136,8 +136,6 @@ static void __init mpc83xx_km_setup_arch(void)
        mpc83xx_setup_pci();
 
 #ifdef CONFIG_QUICC_ENGINE
-       qe_reset();
-
        np = of_find_node_by_name(NULL, "par_io");
        if (np != NULL) {
                par_io_init(np);
index ef9d01a..7e923ca 100644 (file)
@@ -17,7 +17,7 @@
 #include <asm/io.h>
 #include <asm/hw_irq.h>
 #include <asm/ipic.h>
-#include <asm/qe_ic.h>
+#include <soc/fsl/qe/qe_ic.h>
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 
index 8d76220..a973b2a 100644 (file)
@@ -36,8 +36,8 @@
 #include <asm/udbg.h>
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
-#include <asm/qe.h>
-#include <asm/qe_ic.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/qe_ic.h>
 
 #include "mpc83xx.h"
 
@@ -74,8 +74,6 @@ static void __init mpc832x_sys_setup_arch(void)
        mpc83xx_setup_pci();
 
 #ifdef CONFIG_QUICC_ENGINE
-       qe_reset();
-
        if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) {
                par_io_init(np);
                of_node_put(np);
index eff5baa..ea2b87d 100644 (file)
@@ -25,8 +25,8 @@
 #include <asm/time.h>
 #include <asm/ipic.h>
 #include <asm/udbg.h>
-#include <asm/qe.h>
-#include <asm/qe_ic.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/qe_ic.h>
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 
@@ -203,8 +203,6 @@ static void __init mpc832x_rdb_setup_arch(void)
        mpc83xx_setup_pci();
 
 #ifdef CONFIG_QUICC_ENGINE
-       qe_reset();
-
        if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) {
                par_io_init(np);
                of_node_put(np);
index 1a26d2f..dd70b85 100644 (file)
@@ -44,8 +44,8 @@
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 #include <sysdev/simple_gpio.h>
-#include <asm/qe.h>
-#include <asm/qe_ic.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/qe_ic.h>
 
 #include "mpc83xx.h"
 
@@ -82,8 +82,6 @@ static void __init mpc836x_mds_setup_arch(void)
        mpc83xx_setup_pci();
 
 #ifdef CONFIG_QUICC_ENGINE
-       qe_reset();
-
        if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) {
                par_io_init(np);
                of_node_put(np);
index b63b42d..4cd7153 100644 (file)
@@ -20,8 +20,8 @@
 #include <asm/time.h>
 #include <asm/ipic.h>
 #include <asm/udbg.h>
-#include <asm/qe.h>
-#include <asm/qe_ic.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/qe_ic.h>
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 
@@ -35,9 +35,6 @@ static void __init mpc836x_rdk_setup_arch(void)
                ppc_md.progress("mpc836x_rdk_setup_arch()", 0);
 
        mpc83xx_setup_pci();
-#ifdef CONFIG_QUICC_ENGINE
-       qe_reset();
-#endif
 }
 
 /*
index f0927e5..dcfafd6 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/pci.h>
 #include <asm/mpic.h>
 #include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
 #include <asm/udbg.h>
 
 #include "mpc85xx.h"
@@ -46,10 +47,12 @@ static void __init bsc913x_qds_setup_arch(void)
        mpc85xx_smp_init();
 #endif
 
+       fsl_pci_assign_primary();
+
        pr_info("bsc913x board from Freescale Semiconductor\n");
 }
 
-machine_device_initcall(bsc9132_qds, mpc85xx_common_publish_devices);
+machine_arch_initcall(bsc9132_qds, mpc85xx_common_publish_devices);
 
 /*
  * Called very early, device-tree isn't unflattened
@@ -67,6 +70,9 @@ define_machine(bsc9132_qds) {
        .probe                  = bsc9132_qds_probe,
        .setup_arch             = bsc913x_qds_setup_arch,
        .init_IRQ               = bsc913x_qds_pic_init,
+#ifdef CONFIG_PCI
+       .pcibios_fixup_bus      = fsl_pcibios_fixup_bus,
+#endif
        .get_irq                = mpic_get_irq,
        .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
index 23791de..949f22c 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
 
-#include <asm/qe.h>
+#include <soc/fsl/qe/qe.h>
 #include <sysdev/cpm2_pic.h>
 
 #include "mpc85xx.h"
@@ -105,7 +105,6 @@ void __init mpc85xx_qe_init(void)
                return;
        }
 
-       qe_reset();
        of_node_put(np);
 
 }
index 46d05c9..a2b0bc8 100644 (file)
@@ -27,7 +27,7 @@
 #include <asm/udbg.h>
 #include <asm/mpic.h>
 #include <asm/ehv_pic.h>
-#include <asm/qe_ic.h>
+#include <soc/fsl/qe/qe_ic.h>
 
 #include <linux/of_platform.h>
 #include <sysdev/fsl_soc.h>
index 7d12a19..de72a5f 100644 (file)
 
 #include "mpc85xx.h"
 
-#ifdef CONFIG_PCI
-static int mpc85xx_exclude_device(struct pci_controller *hose,
-                                  u_char bus, u_char devfn)
-{
-       if (bus == 0 && PCI_SLOT(devfn) == 0)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-       else
-               return PCIBIOS_SUCCESSFUL;
-}
-#endif /* CONFIG_PCI */
-
 static void __init mpc85xx_ads_pic_init(void)
 {
        struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
@@ -145,10 +134,6 @@ static void __init mpc85xx_ads_setup_arch(void)
        init_ioports();
 #endif
 
-#ifdef CONFIG_PCI
-       ppc_md.pci_exclude_device = mpc85xx_exclude_device;
-#endif
-
        fsl_pci_assign_primary();
 }
 
index f0be439..f61cbe2 100644 (file)
@@ -48,8 +48,8 @@
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 #include <sysdev/simple_gpio.h>
-#include <asm/qe.h>
-#include <asm/qe_ic.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/qe_ic.h>
 #include <asm/mpic.h>
 #include <asm/swiotlb.h>
 #include "smp.h"
index 50dcc00..3f4dad1 100644 (file)
@@ -26,8 +26,8 @@
 #include <asm/prom.h>
 #include <asm/udbg.h>
 #include <asm/mpic.h>
-#include <asm/qe.h>
-#include <asm/qe_ic.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/qe_ic.h>
 
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
index 892e613..71bc255 100644 (file)
@@ -22,8 +22,8 @@
 #include <asm/pci-bridge.h>
 #include <asm/udbg.h>
 #include <asm/mpic.h>
-#include <asm/qe.h>
-#include <asm/qe_ic.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/qe_ic.h>
 
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
index b7f9c40..46a3533 100644 (file)
@@ -272,17 +272,6 @@ config TAU_AVERAGE
 
          If in doubt, say N here.
 
-config QUICC_ENGINE
-       bool "Freescale QUICC Engine (QE) Support"
-       depends on FSL_SOC && PPC32
-       select PPC_LIB_RHEAP
-       select CRC32
-       help
-         The QUICC Engine (QE) is a new generation of communications
-         coprocessors on Freescale embedded CPUs (akin to CPM in older chips).
-         Selecting this option means that you wish to build a kernel
-         for a machine with a QE coprocessor.
-
 config QE_GPIO
        bool "QE GPIO support"
        depends on QUICC_ENGINE
@@ -295,7 +284,6 @@ config CPM2
        bool "Enable support for the CPM2 (Communications Processor Module)"
        depends on (FSL_SOC_BOOKE && PPC32) || 8260
        select CPM
-       select PPC_LIB_RHEAP
        select PPC_PCI_CHOICE
        select ARCH_REQUIRE_GPIOLIB
        help
@@ -325,6 +313,7 @@ config FSL_ULI1575
 
 config CPM
        bool
+       select GENERIC_ALLOCATOR
 
 config OF_RTC
        bool
index 429fc59..d9088f0 100644 (file)
@@ -33,11 +33,6 @@ config PPC_IBM_CELL_BLADE
        select PPC_UDBG_16550
        select UDBG_RTAS_CONSOLE
 
-config PPC_CELL_QPACE
-       bool "IBM Cell - QPACE"
-       depends on PPC64 && PPC_BOOK3S && CPU_BIG_ENDIAN
-       select PPC_CELL_COMMON
-
 config AXON_MSI
        bool
        depends on PPC_IBM_CELL_BLADE && PCI_MSI
index 34699bd..0046430 100644 (file)
@@ -11,7 +11,6 @@ obj-$(CONFIG_PPC_IBM_CELL_POWERBUTTON)        += cbe_powerbutton.o
 
 ifeq ($(CONFIG_SMP),y)
 obj-$(CONFIG_PPC_CELL_NATIVE)          += smp.o
-obj-$(CONFIG_PPC_CELL_QPACE)           += smp.o
 endif
 
 # needed only when building loadable spufs.ko
@@ -26,6 +25,3 @@ obj-$(CONFIG_SPU_BASE)                        += spu_callbacks.o spu_base.o \
                                           spufs/
 
 obj-$(CONFIG_AXON_MSI)                 += axon_msi.o
-
-# qpace setup
-obj-$(CONFIG_PPC_CELL_QPACE)           += qpace_setup.o
diff --git a/arch/powerpc/platforms/cell/qpace_setup.c b/arch/powerpc/platforms/cell/qpace_setup.c
deleted file mode 100644 (file)
index d328140..0000000
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- *  linux/arch/powerpc/platforms/cell/qpace_setup.c
- *
- *  Copyright (C) 1995  Linus Torvalds
- *  Adapted from 'alpha' version by Gary Thomas
- *  Modified by Cort Dougan (cort@cs.nmt.edu)
- *  Modified by PPC64 Team, IBM Corp
- *  Modified by Cell Team, IBM Deutschland Entwicklung GmbH
- *  Modified by Benjamin Krill <ben@codiert.org>, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/console.h>
-#include <linux/of_platform.h>
-
-#include <asm/mmu.h>
-#include <asm/processor.h>
-#include <asm/io.h>
-#include <asm/kexec.h>
-#include <asm/pgtable.h>
-#include <asm/prom.h>
-#include <asm/rtas.h>
-#include <asm/dma.h>
-#include <asm/machdep.h>
-#include <asm/time.h>
-#include <asm/cputable.h>
-#include <asm/irq.h>
-#include <asm/spu.h>
-#include <asm/spu_priv1.h>
-#include <asm/udbg.h>
-#include <asm/cell-regs.h>
-
-#include "interrupt.h"
-#include "pervasive.h"
-#include "ras.h"
-
-static void qpace_show_cpuinfo(struct seq_file *m)
-{
-       struct device_node *root;
-       const char *model = "";
-
-       root = of_find_node_by_path("/");
-       if (root)
-               model = of_get_property(root, "model", NULL);
-       seq_printf(m, "machine\t\t: CHRP %s\n", model);
-       of_node_put(root);
-}
-
-static void qpace_progress(char *s, unsigned short hex)
-{
-       printk("*** %04x : %s\n", hex, s ? s : "");
-}
-
-static const struct of_device_id qpace_bus_ids[] __initconst = {
-       { .type = "soc", },
-       { .compatible = "soc", },
-       { .type = "spider", },
-       { .type = "axon", },
-       { .type = "plb5", },
-       { .type = "plb4", },
-       { .type = "opb", },
-       { .type = "ebc", },
-       {},
-};
-
-static int __init qpace_publish_devices(void)
-{
-       int node;
-
-       /* Publish OF platform devices for southbridge IOs */
-       of_platform_bus_probe(NULL, qpace_bus_ids, NULL);
-
-       /* There is no device for the MIC memory controller, thus we create
-        * a platform device for it to attach the EDAC driver to.
-        */
-       for_each_online_node(node) {
-               if (cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(node)) == NULL)
-                       continue;
-               platform_device_register_simple("cbe-mic", node, NULL, 0);
-       }
-
-       return 0;
-}
-machine_subsys_initcall(qpace, qpace_publish_devices);
-
-static void __init qpace_setup_arch(void)
-{
-#ifdef CONFIG_SPU_BASE
-       spu_priv1_ops = &spu_priv1_mmio_ops;
-       spu_management_ops = &spu_management_of_ops;
-#endif
-
-       cbe_regs_init();
-
-#ifdef CONFIG_CBE_RAS
-       cbe_ras_init();
-#endif
-
-#ifdef CONFIG_SMP
-       smp_init_cell();
-#endif
-
-       /* init to some ~sane value until calibrate_delay() runs */
-       loops_per_jiffy = 50000000;
-
-       cbe_pervasive_init();
-#ifdef CONFIG_DUMMY_CONSOLE
-       conswitchp = &dummy_con;
-#endif
-}
-
-static int __init qpace_probe(void)
-{
-       unsigned long root = of_get_flat_dt_root();
-
-       if (!of_flat_dt_is_compatible(root, "IBM,QPACE"))
-               return 0;
-
-       hpte_init_native();
-       pm_power_off = rtas_power_off;
-
-       return 1;
-}
-
-define_machine(qpace) {
-       .name                   = "QPACE",
-       .probe                  = qpace_probe,
-       .setup_arch             = qpace_setup_arch,
-       .show_cpuinfo           = qpace_show_cpuinfo,
-       .restart                = rtas_restart,
-       .halt                   = rtas_halt,
-       .get_boot_time          = rtas_get_boot_time,
-       .get_rtc_time           = rtas_get_rtc_time,
-       .set_rtc_time           = rtas_set_rtc_time,
-       .calibrate_decr         = generic_calibrate_decr,
-       .progress               = qpace_progress,
-       .init_IRQ               = iic_init_IRQ,
-};
index 4ddf769..9f79004 100644 (file)
@@ -326,7 +326,7 @@ static int spu_process_callback(struct spu_context *ctx)
        spu_ret = -ENOSYS;
        npc += 4;
 
-       if (s.nr_ret < __NR_syscalls) {
+       if (s.nr_ret < NR_syscalls) {
                spu_release(ctx);
                /* do actual system call from here */
                spu_ret = spu_sys_callback(&s);
index b4a369d..81799d7 100644 (file)
@@ -77,7 +77,7 @@ void maple_get_rtc_time(struct rtc_time *tm)
        if ((tm->tm_year + 1900) < 1970)
                tm->tm_year += 100;
 
-       GregorianDay(tm);
+       tm->tm_wday = -1;
 }
 
 int maple_set_rtc_time(struct rtc_time *tm)
index 76f5013..c3c9bbb 100644 (file)
@@ -84,6 +84,7 @@ static void __init bootx_printf(const char *format, ...)
                        break;
                }
        }
+       va_end(args);
 }
 #else /* CONFIG_BOOTX_TEXT */
 static void __init bootx_printf(const char *format, ...) {}
index 6f4f8b0..9815463 100644 (file)
@@ -258,13 +258,14 @@ static unsigned int pmac_pic_get_irq(void)
 #ifdef CONFIG_XMON
 static struct irqaction xmon_action = {
        .handler        = xmon_irq,
-       .flags          = 0,
+       .flags          = IRQF_NO_THREAD,
        .name           = "NMI - XMON"
 };
 #endif
 
 static struct irqaction gatwick_cascade_action = {
        .handler        = gatwick_action,
+       .flags          = IRQF_NO_THREAD,
        .name           = "cascade",
 };
 
index 1c8cdb6..f1516b5 100644 (file)
@@ -2,9 +2,10 @@ obj-y                  += setup.o opal-wrappers.o opal.o opal-async.o idle.o
 obj-y                  += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o
 obj-y                  += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o
 obj-y                  += opal-msglog.o opal-hmi.o opal-power.o opal-irqchip.o
+obj-y                  += opal-kmsg.o
 
 obj-$(CONFIG_SMP)      += smp.o subcore.o subcore-asm.o
-obj-$(CONFIG_PCI)      += pci.o pci-p5ioc2.o pci-ioda.o
+obj-$(CONFIG_PCI)      += pci.o pci-p5ioc2.o pci-ioda.o npu-dma.o
 obj-$(CONFIG_EEH)      += eeh-powernv.o
 obj-$(CONFIG_PPC_SCOM) += opal-xscom.o
 obj-$(CONFIG_MEMORY_FAILURE)   += opal-memory-errors.o
index e1c9072..5f152b9 100644 (file)
@@ -48,8 +48,8 @@ static int pnv_eeh_init(void)
        struct pci_controller *hose;
        struct pnv_phb *phb;
 
-       if (!firmware_has_feature(FW_FEATURE_OPALv3)) {
-               pr_warn("%s: OPALv3 is required !\n",
+       if (!firmware_has_feature(FW_FEATURE_OPAL)) {
+               pr_warn("%s: OPAL is required !\n",
                        __func__);
                return -EINVAL;
        }
index 59d735d..15bfbcd 100644 (file)
@@ -242,7 +242,7 @@ static int __init pnv_init_idle_states(void)
        if (cpuidle_disable != IDLE_NO_OVERRIDE)
                goto out;
 
-       if (!firmware_has_feature(FW_FEATURE_OPALv3))
+       if (!firmware_has_feature(FW_FEATURE_OPAL))
                goto out;
 
        power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
new file mode 100644 (file)
index 0000000..e85aa90
--- /dev/null
@@ -0,0 +1,348 @@
+/*
+ * This file implements the DMA operations for NVLink devices. The NPU
+ * devices all point to the same iommu table as the parent PCI device.
+ *
+ * Copyright Alistair Popple, IBM Corporation 2015.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+
+#include <linux/export.h>
+#include <linux/pci.h>
+#include <linux/memblock.h>
+
+#include <asm/iommu.h>
+#include <asm/pnv-pci.h>
+#include <asm/msi_bitmap.h>
+#include <asm/opal.h>
+
+#include "powernv.h"
+#include "pci.h"
+
+/*
+ * Other types of TCE cache invalidation are not functional in the
+ * hardware.
+ */
+#define TCE_KILL_INVAL_ALL PPC_BIT(0)
+
+static struct pci_dev *get_pci_dev(struct device_node *dn)
+{
+       return PCI_DN(dn)->pcidev;
+}
+
+/* Given a NPU device get the associated PCI device. */
+struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev)
+{
+       struct device_node *dn;
+       struct pci_dev *gpdev;
+
+       /* Get assoicated PCI device */
+       dn = of_parse_phandle(npdev->dev.of_node, "ibm,gpu", 0);
+       if (!dn)
+               return NULL;
+
+       gpdev = get_pci_dev(dn);
+       of_node_put(dn);
+
+       return gpdev;
+}
+EXPORT_SYMBOL(pnv_pci_get_gpu_dev);
+
+/* Given the real PCI device get a linked NPU device. */
+struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
+{
+       struct device_node *dn;
+       struct pci_dev *npdev;
+
+       /* Get assoicated PCI device */
+       dn = of_parse_phandle(gpdev->dev.of_node, "ibm,npu", index);
+       if (!dn)
+               return NULL;
+
+       npdev = get_pci_dev(dn);
+       of_node_put(dn);
+
+       return npdev;
+}
+EXPORT_SYMBOL(pnv_pci_get_npu_dev);
+
+#define NPU_DMA_OP_UNSUPPORTED()                                       \
+       dev_err_once(dev, "%s operation unsupported for NVLink devices\n", \
+               __func__)
+
+static void *dma_npu_alloc(struct device *dev, size_t size,
+                          dma_addr_t *dma_handle, gfp_t flag,
+                          struct dma_attrs *attrs)
+{
+       NPU_DMA_OP_UNSUPPORTED();
+       return NULL;
+}
+
+static void dma_npu_free(struct device *dev, size_t size,
+                        void *vaddr, dma_addr_t dma_handle,
+                        struct dma_attrs *attrs)
+{
+       NPU_DMA_OP_UNSUPPORTED();
+}
+
+static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page,
+                                  unsigned long offset, size_t size,
+                                  enum dma_data_direction direction,
+                                  struct dma_attrs *attrs)
+{
+       NPU_DMA_OP_UNSUPPORTED();
+       return 0;
+}
+
+static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist,
+                         int nelems, enum dma_data_direction direction,
+                         struct dma_attrs *attrs)
+{
+       NPU_DMA_OP_UNSUPPORTED();
+       return 0;
+}
+
+static int dma_npu_dma_supported(struct device *dev, u64 mask)
+{
+       NPU_DMA_OP_UNSUPPORTED();
+       return 0;
+}
+
+static u64 dma_npu_get_required_mask(struct device *dev)
+{
+       NPU_DMA_OP_UNSUPPORTED();
+       return 0;
+}
+
+struct dma_map_ops dma_npu_ops = {
+       .map_page               = dma_npu_map_page,
+       .map_sg                 = dma_npu_map_sg,
+       .alloc                  = dma_npu_alloc,
+       .free                   = dma_npu_free,
+       .dma_supported          = dma_npu_dma_supported,
+       .get_required_mask      = dma_npu_get_required_mask,
+};
+
+/*
+ * Returns the PE assoicated with the PCI device of the given
+ * NPU. Returns the linked pci device if pci_dev != NULL.
+ */
+static struct pnv_ioda_pe *get_gpu_pci_dev_and_pe(struct pnv_ioda_pe *npe,
+                                                 struct pci_dev **gpdev)
+{
+       struct pnv_phb *phb;
+       struct pci_controller *hose;
+       struct pci_dev *pdev;
+       struct pnv_ioda_pe *pe;
+       struct pci_dn *pdn;
+
+       if (npe->flags & PNV_IODA_PE_PEER) {
+               pe = npe->peers[0];
+               pdev = pe->pdev;
+       } else {
+               pdev = pnv_pci_get_gpu_dev(npe->pdev);
+               if (!pdev)
+                       return NULL;
+
+               pdn = pci_get_pdn(pdev);
+               if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
+                       return NULL;
+
+               hose = pci_bus_to_host(pdev->bus);
+               phb = hose->private_data;
+               pe = &phb->ioda.pe_array[pdn->pe_number];
+       }
+
+       if (gpdev)
+               *gpdev = pdev;
+
+       return pe;
+}
+
+void pnv_npu_tce_invalidate_entire(struct pnv_ioda_pe *npe)
+{
+       struct pnv_phb *phb = npe->phb;
+
+       if (WARN_ON(phb->type != PNV_PHB_NPU ||
+                   !phb->ioda.tce_inval_reg ||
+                   !(npe->flags & PNV_IODA_PE_DEV)))
+               return;
+
+       mb(); /* Ensure previous TCE table stores are visible */
+       __raw_writeq(cpu_to_be64(TCE_KILL_INVAL_ALL),
+               phb->ioda.tce_inval_reg);
+}
+
+void pnv_npu_tce_invalidate(struct pnv_ioda_pe *npe,
+                               struct iommu_table *tbl,
+                               unsigned long index,
+                               unsigned long npages,
+                               bool rm)
+{
+       struct pnv_phb *phb = npe->phb;
+
+       /* We can only invalidate the whole cache on NPU */
+       unsigned long val = TCE_KILL_INVAL_ALL;
+
+       if (WARN_ON(phb->type != PNV_PHB_NPU ||
+                   !phb->ioda.tce_inval_reg ||
+                   !(npe->flags & PNV_IODA_PE_DEV)))
+               return;
+
+       mb(); /* Ensure previous TCE table stores are visible */
+       if (rm)
+               __raw_rm_writeq(cpu_to_be64(val),
+                 (__be64 __iomem *) phb->ioda.tce_inval_reg_phys);
+       else
+               __raw_writeq(cpu_to_be64(val),
+                       phb->ioda.tce_inval_reg);
+}
+
+void pnv_npu_init_dma_pe(struct pnv_ioda_pe *npe)
+{
+       struct pnv_ioda_pe *gpe;
+       struct pci_dev *gpdev;
+       int i, avail = -1;
+
+       if (!npe->pdev || !(npe->flags & PNV_IODA_PE_DEV))
+               return;
+
+       gpe = get_gpu_pci_dev_and_pe(npe, &gpdev);
+       if (!gpe)
+               return;
+
+       for (i = 0; i < PNV_IODA_MAX_PEER_PES; i++) {
+               /* Nothing to do if the PE is already connected. */
+               if (gpe->peers[i] == npe)
+                       return;
+
+               if (!gpe->peers[i])
+                       avail = i;
+       }
+
+       if (WARN_ON(avail < 0))
+               return;
+
+       gpe->peers[avail] = npe;
+       gpe->flags |= PNV_IODA_PE_PEER;
+
+       /*
+        * We assume that the NPU devices only have a single peer PE
+        * (the GPU PCIe device PE).
+        */
+       npe->peers[0] = gpe;
+       npe->flags |= PNV_IODA_PE_PEER;
+}
+
+/*
+ * For the NPU we want to point the TCE table at the same table as the
+ * real PCI device.
+ */
+static void pnv_npu_disable_bypass(struct pnv_ioda_pe *npe)
+{
+       struct pnv_phb *phb = npe->phb;
+       struct pci_dev *gpdev;
+       struct pnv_ioda_pe *gpe;
+       void *addr;
+       unsigned int size;
+       int64_t rc;
+
+       /*
+        * Find the assoicated PCI devices and get the dma window
+        * information from there.
+        */
+       if (!npe->pdev || !(npe->flags & PNV_IODA_PE_DEV))
+               return;
+
+       gpe = get_gpu_pci_dev_and_pe(npe, &gpdev);
+       if (!gpe)
+               return;
+
+       addr = (void *)gpe->table_group.tables[0]->it_base;
+       size = gpe->table_group.tables[0]->it_size << 3;
+       rc = opal_pci_map_pe_dma_window(phb->opal_id, npe->pe_number,
+                                       npe->pe_number, 1, __pa(addr),
+                                       size, 0x1000);
+       if (rc != OPAL_SUCCESS)
+               pr_warn("%s: Error %lld setting DMA window on PHB#%d-PE#%d\n",
+                       __func__, rc, phb->hose->global_number, npe->pe_number);
+
+       /*
+        * We don't initialise npu_pe->tce32_table as we always use
+        * dma_npu_ops which are nops.
+        */
+       set_dma_ops(&npe->pdev->dev, &dma_npu_ops);
+}
+
+/*
+ * Enable/disable bypass mode on the NPU. The NPU only supports one
+ * window per link, so bypass needs to be explicity enabled or
+ * disabled. Unlike for a PHB3 bypass and non-bypass modes can't be
+ * active at the same time.
+ */
+int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe, bool enable)
+{
+       struct pnv_phb *phb = npe->phb;
+       int64_t rc = 0;
+
+       if (phb->type != PNV_PHB_NPU || !npe->pdev)
+               return -EINVAL;
+
+       if (enable) {
+               /* Enable the bypass window */
+               phys_addr_t top = memblock_end_of_DRAM();
+
+               npe->tce_bypass_base = 0;
+               top = roundup_pow_of_two(top);
+               dev_info(&npe->pdev->dev, "Enabling bypass for PE %d\n",
+                        npe->pe_number);
+               rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
+                                       npe->pe_number, npe->pe_number,
+                                       npe->tce_bypass_base, top);
+       } else {
+               /*
+                * Disable the bypass window by replacing it with the
+                * TCE32 window.
+                */
+               pnv_npu_disable_bypass(npe);
+       }
+
+       return rc;
+}
+
+int pnv_npu_dma_set_mask(struct pci_dev *npdev, u64 dma_mask)
+{
+       struct pci_controller *hose = pci_bus_to_host(npdev->bus);
+       struct pnv_phb *phb = hose->private_data;
+       struct pci_dn *pdn = pci_get_pdn(npdev);
+       struct pnv_ioda_pe *npe, *gpe;
+       struct pci_dev *gpdev;
+       uint64_t top;
+       bool bypass = false;
+
+       if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
+               return -ENXIO;
+
+       /* We only do bypass if it's enabled on the linked device */
+       npe = &phb->ioda.pe_array[pdn->pe_number];
+       gpe = get_gpu_pci_dev_and_pe(npe, &gpdev);
+       if (!gpe)
+               return -ENODEV;
+
+       if (gpe->tce_bypass_enabled) {
+               top = gpe->tce_bypass_base + memblock_end_of_DRAM() - 1;
+               bypass = (dma_mask >= top);
+       }
+
+       if (bypass)
+               dev_info(&npdev->dev, "Using 64-bit DMA iommu bypass\n");
+       else
+               dev_info(&npdev->dev, "Using 32-bit DMA via iommu\n");
+
+       pnv_npu_dma_set_bypass(npe, bypass);
+       *npdev->dev.dma_mask = dma_mask;
+
+       return 0;
+}
diff --git a/arch/powerpc/platforms/powernv/opal-kmsg.c b/arch/powerpc/platforms/powernv/opal-kmsg.c
new file mode 100644 (file)
index 0000000..6f1214d
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * kmsg dumper that ensures the OPAL console fully flushes panic messages
+ *
+ * Author: Russell Currey <ruscur@russell.cc>
+ *
+ * Copyright 2015 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kmsg_dump.h>
+
+#include <asm/opal.h>
+#include <asm/opal-api.h>
+
+/*
+ * Console output is controlled by OPAL firmware.  The kernel regularly calls
+ * OPAL_POLL_EVENTS, which flushes some console output.  In a panic state,
+ * however, the kernel no longer calls OPAL_POLL_EVENTS and the panic message
+ * may not be completely printed.  This function does not actually dump the
+ * message, it just ensures that OPAL completely flushes the console buffer.
+ */
+static void force_opal_console_flush(struct kmsg_dumper *dumper,
+                                    enum kmsg_dump_reason reason)
+{
+       int i;
+       int64_t ret;
+
+       /*
+        * Outside of a panic context the pollers will continue to run,
+        * so we don't need to do any special flushing.
+        */
+       if (reason != KMSG_DUMP_PANIC)
+               return;
+
+       if (opal_check_token(OPAL_CONSOLE_FLUSH)) {
+               ret = opal_console_flush(0);
+
+               if (ret == OPAL_UNSUPPORTED || ret == OPAL_PARAMETER)
+                       return;
+
+               /* Incrementally flush until there's nothing left */
+               while (opal_console_flush(0) != OPAL_SUCCESS);
+       } else {
+               /*
+                * If OPAL_CONSOLE_FLUSH is not implemented in the firmware,
+                * the console can still be flushed by calling the polling
+                * function enough times to flush the buffer.  We don't know
+                * how much output still needs to be flushed, but we can be
+                * generous since the kernel is in panic and doesn't need
+                * to do much else.
+                */
+               printk(KERN_NOTICE "opal: OPAL_CONSOLE_FLUSH missing.\n");
+               for (i = 0; i < 1024; i++) {
+                       opal_poll_events(NULL);
+               }
+       }
+}
+
+static struct kmsg_dumper opal_kmsg_dumper = {
+       .dump = force_opal_console_flush
+};
+
+void __init opal_kmsg_init(void)
+{
+       int rc;
+
+       /* Add our dumper to the list */
+       rc = kmsg_dump_register(&opal_kmsg_dumper);
+       if (rc != 0)
+               pr_err("opal: kmsg_dump_register failed; returned %d\n", rc);
+}
index 4ece8e4..e315e70 100644 (file)
@@ -434,7 +434,6 @@ static const struct of_device_id opal_prd_match[] = {
 static struct platform_driver opal_prd_driver = {
        .driver = {
                .name           = "opal-prd",
-               .owner          = THIS_MODULE,
                .of_match_table = opal_prd_match,
        },
        .probe  = opal_prd_probe,
index 37dbee1..f886886 100644 (file)
@@ -31,8 +31,7 @@ static void opal_to_tm(u32 y_m_d, u64 h_m_s_ms, struct rtc_time *tm)
        tm->tm_hour     = bcd2bin((h_m_s_ms >> 56) & 0xff);
        tm->tm_min      = bcd2bin((h_m_s_ms >> 48) & 0xff);
        tm->tm_sec      = bcd2bin((h_m_s_ms >> 40) & 0xff);
-
-        GregorianDay(tm);
+       tm->tm_wday     = -1;
 }
 
 unsigned long __init opal_get_boot_time(void)
@@ -51,7 +50,7 @@ unsigned long __init opal_get_boot_time(void)
                rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
                if (rc == OPAL_BUSY_EVENT)
                        opal_poll_events(NULL);
-               else
+               else if (rc == OPAL_BUSY)
                        mdelay(10);
        }
        if (rc != OPAL_SUCCESS)
index b7a464f..e45b88a 100644 (file)
@@ -301,3 +301,4 @@ OPAL_CALL(opal_flash_erase,                 OPAL_FLASH_ERASE);
 OPAL_CALL(opal_prd_msg,                                OPAL_PRD_MSG);
 OPAL_CALL(opal_leds_get_ind,                   OPAL_LEDS_GET_INDICATOR);
 OPAL_CALL(opal_leds_set_ind,                   OPAL_LEDS_SET_INDICATOR);
+OPAL_CALL(opal_console_flush,                  OPAL_CONSOLE_FLUSH);
index 7634d1c..d0ac535 100644 (file)
@@ -126,7 +126,7 @@ static const struct scom_controller opal_scom_controller = {
 
 static int opal_xscom_init(void)
 {
-       if (firmware_has_feature(FW_FEATURE_OPALv3))
+       if (firmware_has_feature(FW_FEATURE_OPAL))
                scom_init(&opal_scom_controller);
        return 0;
 }
index 57cffb8..4e0da5a 100644 (file)
@@ -98,16 +98,11 @@ int __init early_init_dt_scan_opal(unsigned long node,
        pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n",
                 opal.size, sizep, runtimesz);
 
-       powerpc_firmware_features |= FW_FEATURE_OPAL;
        if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
-               powerpc_firmware_features |= FW_FEATURE_OPALv2;
-               powerpc_firmware_features |= FW_FEATURE_OPALv3;
-               pr_info("OPAL V3 detected !\n");
-       } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
-               powerpc_firmware_features |= FW_FEATURE_OPALv2;
-               pr_info("OPAL V2 detected !\n");
+               powerpc_firmware_features |= FW_FEATURE_OPAL;
+               pr_info("OPAL detected !\n");
        } else {
-               pr_info("OPAL V1 detected !\n");
+               panic("OPAL != V3 detected, no longer supported.\n");
        }
 
        /* Reinit all cores with the right endian */
@@ -352,17 +347,15 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
         * enough room and be done with it
         */
        spin_lock_irqsave(&opal_write_lock, flags);
-       if (firmware_has_feature(FW_FEATURE_OPALv2)) {
-               rc = opal_console_write_buffer_space(vtermno, &olen);
-               len = be64_to_cpu(olen);
-               if (rc || len < total_len) {
-                       spin_unlock_irqrestore(&opal_write_lock, flags);
-                       /* Closed -> drop characters */
-                       if (rc)
-                               return total_len;
-                       opal_poll_events(NULL);
-                       return -EAGAIN;
-               }
+       rc = opal_console_write_buffer_space(vtermno, &olen);
+       len = be64_to_cpu(olen);
+       if (rc || len < total_len) {
+               spin_unlock_irqrestore(&opal_write_lock, flags);
+               /* Closed -> drop characters */
+               if (rc)
+                       return total_len;
+               opal_poll_events(NULL);
+               return -EAGAIN;
        }
 
        /* We still try to handle partial completions, though they
@@ -555,7 +548,7 @@ bool opal_mce_check_early_recovery(struct pt_regs *regs)
                goto out;
 
        if ((regs->nip >= opal.base) &&
-                       (regs->nip <= (opal.base + opal.size)))
+                       (regs->nip < (opal.base + opal.size)))
                recover_addr = find_recovery_address(regs->nip);
 
        /*
@@ -696,10 +689,7 @@ static int __init opal_init(void)
        }
 
        /* Register OPAL consoles if any ports */
-       if (firmware_has_feature(FW_FEATURE_OPALv2))
-               consoles = of_find_node_by_path("/ibm,opal/consoles");
-       else
-               consoles = of_node_get(opal_node);
+       consoles = of_find_node_by_path("/ibm,opal/consoles");
        if (consoles) {
                for_each_child_of_node(consoles, np) {
                        if (strcmp(np->name, "serial"))
@@ -758,6 +748,9 @@ static int __init opal_init(void)
        opal_pdev_init(opal_node, "ibm,opal-flash");
        opal_pdev_init(opal_node, "ibm,opal-prd");
 
+       /* Initialise OPAL kmsg dumper for flushing console on panic */
+       opal_kmsg_init();
+
        return 0;
 }
 machine_subsys_initcall(powernv, opal_init);
index 414fd1a..573ae19 100644 (file)
@@ -116,16 +116,6 @@ static int __init iommu_setup(char *str)
 }
 early_param("iommu", iommu_setup);
 
-/*
- * stdcix is only supposed to be used in hypervisor real mode as per
- * the architecture spec
- */
-static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr)
-{
-       __asm__ __volatile__("stdcix %0,0,%1"
-               : : "r" (val), "r" (paddr) : "memory");
-}
-
 static inline bool pnv_pci_is_mem_pref_64(unsigned long flags)
 {
        return ((flags & (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)) ==
@@ -344,7 +334,7 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
                return;
        }
 
-       if (!firmware_has_feature(FW_FEATURE_OPALv3)) {
+       if (!firmware_has_feature(FW_FEATURE_OPAL)) {
                pr_info("  Firmware too old to support M64 window\n");
                return;
        }
@@ -357,6 +347,7 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
        }
 
        res = &hose->mem_resources[1];
+       res->name = dn->full_name;
        res->start = of_translate_address(dn, r + 2);
        res->end = res->start + of_read_number(r + 4, 2) - 1;
        res->flags = (IORESOURCE_MEM | IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
@@ -780,8 +771,12 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
                return -ENXIO;
        }
 
-       /* Configure PELTV */
-       pnv_ioda_set_peltv(phb, pe, true);
+       /*
+        * Configure PELTV. NPUs don't have a PELTV table so skip
+        * configuration on them.
+        */
+       if (phb->type != PNV_PHB_NPU)
+               pnv_ioda_set_peltv(phb, pe, true);
 
        /* Setup reverse map */
        for (rid = pe->rid; rid < rid_end; rid++)
@@ -924,7 +919,6 @@ static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)
 }
 #endif /* CONFIG_PCI_IOV */
 
-#if 0
 static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
 {
        struct pci_controller *hose = pci_bus_to_host(dev->bus);
@@ -941,11 +935,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
        if (pdn->pe_number != IODA_INVALID_PE)
                return NULL;
 
-       /* PE#0 has been pre-set */
-       if (dev->bus->number == 0)
-               pe_num = 0;
-       else
-               pe_num = pnv_ioda_alloc_pe(phb);
+       pe_num = pnv_ioda_alloc_pe(phb);
        if (pe_num == IODA_INVALID_PE) {
                pr_warning("%s: Not enough PE# available, disabling device\n",
                           pci_name(dev));
@@ -963,6 +953,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
        pci_dev_get(dev);
        pdn->pcidev = dev;
        pdn->pe_number = pe_num;
+       pe->flags = PNV_IODA_PE_DEV;
        pe->pdev = dev;
        pe->pbus = NULL;
        pe->tce32_seg = -1;
@@ -993,7 +984,6 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
 
        return pe;
 }
-#endif /* Useful for SRIOV case */
 
 static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
 {
@@ -1007,6 +997,7 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
                                pci_name(dev));
                        continue;
                }
+               pdn->pcidev = dev;
                pdn->pe_number = pe->pe_number;
                pe->dma_weight += pnv_ioda_dma_weight(dev);
                if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
@@ -1083,6 +1074,77 @@ static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
        pnv_ioda_link_pe_by_weight(phb, pe);
 }
 
+static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev)
+{
+       int pe_num, found_pe = false, rc;
+       long rid;
+       struct pnv_ioda_pe *pe;
+       struct pci_dev *gpu_pdev;
+       struct pci_dn *npu_pdn;
+       struct pci_controller *hose = pci_bus_to_host(npu_pdev->bus);
+       struct pnv_phb *phb = hose->private_data;
+
+       /*
+        * Due to a hardware errata PE#0 on the NPU is reserved for
+        * error handling. This means we only have three PEs remaining
+        * which need to be assigned to four links, implying some
+        * links must share PEs.
+        *
+        * To achieve this we assign PEs such that NPUs linking the
+        * same GPU get assigned the same PE.
+        */
+       gpu_pdev = pnv_pci_get_gpu_dev(npu_pdev);
+       for (pe_num = 0; pe_num < phb->ioda.total_pe; pe_num++) {
+               pe = &phb->ioda.pe_array[pe_num];
+               if (!pe->pdev)
+                       continue;
+
+               if (pnv_pci_get_gpu_dev(pe->pdev) == gpu_pdev) {
+                       /*
+                        * This device has the same peer GPU so should
+                        * be assigned the same PE as the existing
+                        * peer NPU.
+                        */
+                       dev_info(&npu_pdev->dev,
+                               "Associating to existing PE %d\n", pe_num);
+                       pci_dev_get(npu_pdev);
+                       npu_pdn = pci_get_pdn(npu_pdev);
+                       rid = npu_pdev->bus->number << 8 | npu_pdn->devfn;
+                       npu_pdn->pcidev = npu_pdev;
+                       npu_pdn->pe_number = pe_num;
+                       pe->dma_weight += pnv_ioda_dma_weight(npu_pdev);
+                       phb->ioda.pe_rmap[rid] = pe->pe_number;
+
+                       /* Map the PE to this link */
+                       rc = opal_pci_set_pe(phb->opal_id, pe_num, rid,
+                                       OpalPciBusAll,
+                                       OPAL_COMPARE_RID_DEVICE_NUMBER,
+                                       OPAL_COMPARE_RID_FUNCTION_NUMBER,
+                                       OPAL_MAP_PE);
+                       WARN_ON(rc != OPAL_SUCCESS);
+                       found_pe = true;
+                       break;
+               }
+       }
+
+       if (!found_pe)
+               /*
+                * Could not find an existing PE so allocate a new
+                * one.
+                */
+               return pnv_ioda_setup_dev_PE(npu_pdev);
+       else
+               return pe;
+}
+
+static void pnv_ioda_setup_npu_PEs(struct pci_bus *bus)
+{
+       struct pci_dev *pdev;
+
+       list_for_each_entry(pdev, &bus->devices, bus_list)
+               pnv_ioda_setup_npu_PE(pdev);
+}
+
 static void pnv_ioda_setup_PEs(struct pci_bus *bus)
 {
        struct pci_dev *dev;
@@ -1119,7 +1181,17 @@ static void pnv_pci_ioda_setup_PEs(void)
                if (phb->reserve_m64_pe)
                        phb->reserve_m64_pe(hose->bus, NULL, true);
 
-               pnv_ioda_setup_PEs(hose->bus);
+               /*
+                * On NPU PHB, we expect separate PEs for individual PCI
+                * functions. PCI bus dependent PEs are required for the
+                * remaining types of PHBs.
+                */
+               if (phb->type == PNV_PHB_NPU) {
+                       /* PE#0 is needed for error reporting */
+                       pnv_ioda_reserve_pe(phb, 0);
+                       pnv_ioda_setup_npu_PEs(hose->bus);
+               } else
+                       pnv_ioda_setup_PEs(hose->bus);
        }
 }
 
@@ -1578,6 +1650,8 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
        struct pnv_ioda_pe *pe;
        uint64_t top;
        bool bypass = false;
+       struct pci_dev *linked_npu_dev;
+       int i;
 
        if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
                return -ENODEV;;
@@ -1596,6 +1670,18 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
                set_dma_ops(&pdev->dev, &dma_iommu_ops);
        }
        *pdev->dev.dma_mask = dma_mask;
+
+       /* Update peer npu devices */
+       if (pe->flags & PNV_IODA_PE_PEER)
+               for (i = 0; i < PNV_IODA_MAX_PEER_PES; i++) {
+                       if (!pe->peers[i])
+                               continue;
+
+                       linked_npu_dev = pe->peers[i]->pdev;
+                       if (dma_get_mask(&linked_npu_dev->dev) != dma_mask)
+                               dma_set_mask(&linked_npu_dev->dev, dma_mask);
+               }
+
        return 0;
 }
 
@@ -1740,12 +1826,23 @@ static inline void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_ioda_pe *pe)
        /* 01xb - invalidate TCEs that match the specified PE# */
        unsigned long val = (0x4ull << 60) | (pe->pe_number & 0xFF);
        struct pnv_phb *phb = pe->phb;
+       struct pnv_ioda_pe *npe;
+       int i;
 
        if (!phb->ioda.tce_inval_reg)
                return;
 
        mb(); /* Ensure above stores are visible */
        __raw_writeq(cpu_to_be64(val), phb->ioda.tce_inval_reg);
+
+       if (pe->flags & PNV_IODA_PE_PEER)
+               for (i = 0; i < PNV_IODA_MAX_PEER_PES; i++) {
+                       npe = pe->peers[i];
+                       if (!npe || npe->phb->type != PNV_PHB_NPU)
+                               continue;
+
+                       pnv_npu_tce_invalidate_entire(npe);
+               }
 }
 
 static void pnv_pci_ioda2_do_tce_invalidate(unsigned pe_number, bool rm,
@@ -1780,15 +1877,28 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
        struct iommu_table_group_link *tgl;
 
        list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
+               struct pnv_ioda_pe *npe;
                struct pnv_ioda_pe *pe = container_of(tgl->table_group,
                                struct pnv_ioda_pe, table_group);
                __be64 __iomem *invalidate = rm ?
                        (__be64 __iomem *)pe->phb->ioda.tce_inval_reg_phys :
                        pe->phb->ioda.tce_inval_reg;
+               int i;
 
                pnv_pci_ioda2_do_tce_invalidate(pe->pe_number, rm,
                        invalidate, tbl->it_page_shift,
                        index, npages);
+
+               if (pe->flags & PNV_IODA_PE_PEER)
+                       /* Invalidate PEs using the same TCE table */
+                       for (i = 0; i < PNV_IODA_MAX_PEER_PES; i++) {
+                               npe = pe->peers[i];
+                               if (!npe || npe->phb->type != PNV_PHB_NPU)
+                                       continue;
+
+                               pnv_npu_tce_invalidate(npe, tbl, index,
+                                                       npages, rm);
+                       }
        }
 }
 
@@ -2436,10 +2546,17 @@ static void pnv_ioda_setup_dma(struct pnv_phb *phb)
                        pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n",
                                pe->dma_weight, segs);
                        pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs);
-               } else {
+               } else if (phb->type == PNV_PHB_IODA2) {
                        pe_info(pe, "Assign DMA32 space\n");
                        segs = 0;
                        pnv_pci_ioda2_setup_dma_pe(phb, pe);
+               } else if (phb->type == PNV_PHB_NPU) {
+                       /*
+                        * We initialise the DMA space for an NPU PHB
+                        * after setup of the PHB is complete as we
+                        * point the NPU TVT to the the same location
+                        * as the PHB3 TVT.
+                        */
                }
 
                remaining -= segs;
@@ -2881,6 +2998,11 @@ static void pnv_pci_ioda_setup_seg(void)
 
        list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
                phb = hose->private_data;
+
+               /* NPU PHB does not support IO or MMIO segmentation */
+               if (phb->type == PNV_PHB_NPU)
+                       continue;
+
                list_for_each_entry(pe, &phb->ioda.pe_list, list) {
                        pnv_ioda_setup_pe_seg(hose, pe);
                }
@@ -2920,6 +3042,27 @@ static void pnv_pci_ioda_create_dbgfs(void)
 #endif /* CONFIG_DEBUG_FS */
 }
 
+static void pnv_npu_ioda_fixup(void)
+{
+       bool enable_bypass;
+       struct pci_controller *hose, *tmp;
+       struct pnv_phb *phb;
+       struct pnv_ioda_pe *pe;
+
+       list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+               phb = hose->private_data;
+               if (phb->type != PNV_PHB_NPU)
+                       continue;
+
+               list_for_each_entry(pe, &phb->ioda.pe_dma_list, dma_link) {
+                       enable_bypass = dma_get_mask(&pe->pdev->dev) ==
+                               DMA_BIT_MASK(64);
+                       pnv_npu_init_dma_pe(pe);
+                       pnv_npu_dma_set_bypass(pe, enable_bypass);
+               }
+       }
+}
+
 static void pnv_pci_ioda_fixup(void)
 {
        pnv_pci_ioda_setup_PEs();
@@ -2932,6 +3075,9 @@ static void pnv_pci_ioda_fixup(void)
        eeh_init();
        eeh_addr_cache_build();
 #endif
+
+       /* Link NPU IODA tables to their PCI devices. */
+       pnv_npu_ioda_fixup();
 }
 
 /*
@@ -3046,6 +3192,19 @@ static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
        .shutdown = pnv_pci_ioda_shutdown,
 };
 
+static const struct pci_controller_ops pnv_npu_ioda_controller_ops = {
+       .dma_dev_setup = pnv_pci_dma_dev_setup,
+#ifdef CONFIG_PCI_MSI
+       .setup_msi_irqs = pnv_setup_msi_irqs,
+       .teardown_msi_irqs = pnv_teardown_msi_irqs,
+#endif
+       .enable_device_hook = pnv_pci_enable_device_hook,
+       .window_alignment = pnv_pci_window_alignment,
+       .reset_secondary_bus = pnv_pci_reset_secondary_bus,
+       .dma_set_mask = pnv_npu_dma_set_mask,
+       .shutdown = pnv_pci_ioda_shutdown,
+};
+
 static void __init pnv_pci_init_ioda_phb(struct device_node *np,
                                         u64 hub_id, int ioda_type)
 {
@@ -3101,6 +3260,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
                phb->model = PNV_PHB_MODEL_P7IOC;
        else if (of_device_is_compatible(np, "ibm,power8-pciex"))
                phb->model = PNV_PHB_MODEL_PHB3;
+       else if (of_device_is_compatible(np, "ibm,power8-npu-pciex"))
+               phb->model = PNV_PHB_MODEL_NPU;
        else
                phb->model = PNV_PHB_MODEL_UNKNOWN;
 
@@ -3201,7 +3362,11 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
         * the child P2P bridges) can form individual PE.
         */
        ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
-       hose->controller_ops = pnv_pci_ioda_controller_ops;
+
+       if (phb->type == PNV_PHB_NPU)
+               hose->controller_ops = pnv_npu_ioda_controller_ops;
+       else
+               hose->controller_ops = pnv_pci_ioda_controller_ops;
 
 #ifdef CONFIG_PCI_IOV
        ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
@@ -3236,6 +3401,11 @@ void __init pnv_pci_init_ioda2_phb(struct device_node *np)
        pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
 }
 
+void __init pnv_pci_init_npu_phb(struct device_node *np)
+{
+       pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU);
+}
+
 void __init pnv_pci_init_ioda_hub(struct device_node *np)
 {
        struct device_node *phbn;
index f2dd772..2f55c86 100644 (file)
@@ -1,8 +1,6 @@
 /*
  * Support PCI/PCIe on PowerNV platforms
  *
- * Currently supports only P5IOC2
- *
  * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
  *
  * This program is free software; you can redistribute it and/or
@@ -807,6 +805,10 @@ void __init pnv_pci_init(void)
        for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
                pnv_pci_init_ioda2_phb(np);
 
+       /* Look for NPU PHBs */
+       for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb")
+               pnv_pci_init_npu_phb(np);
+
        /* Setup the linkage between OF nodes and PHBs */
        pci_devs_phb_init();
 
index c8ff50e..7f56313 100644 (file)
@@ -7,6 +7,7 @@ enum pnv_phb_type {
        PNV_PHB_P5IOC2  = 0,
        PNV_PHB_IODA1   = 1,
        PNV_PHB_IODA2   = 2,
+       PNV_PHB_NPU     = 3,
 };
 
 /* Precise PHB model for error management */
@@ -15,6 +16,7 @@ enum pnv_phb_model {
        PNV_PHB_MODEL_P5IOC2,
        PNV_PHB_MODEL_P7IOC,
        PNV_PHB_MODEL_PHB3,
+       PNV_PHB_MODEL_NPU,
 };
 
 #define PNV_PCI_DIAG_BUF_SIZE  8192
@@ -24,6 +26,7 @@ enum pnv_phb_model {
 #define PNV_IODA_PE_MASTER     (1 << 3)        /* Master PE in compound case   */
 #define PNV_IODA_PE_SLAVE      (1 << 4)        /* Slave PE in compound case    */
 #define PNV_IODA_PE_VF         (1 << 5)        /* PE for one VF                */
+#define PNV_IODA_PE_PEER       (1 << 6)        /* PE has peers                 */
 
 /* Data associated with a PE, including IOMMU tracking etc.. */
 struct pnv_phb;
@@ -31,6 +34,9 @@ struct pnv_ioda_pe {
        unsigned long           flags;
        struct pnv_phb          *phb;
 
+#define PNV_IODA_MAX_PEER_PES  8
+       struct pnv_ioda_pe      *peers[PNV_IODA_MAX_PEER_PES];
+
        /* A PE can be associated with a single device or an
         * entire bus (& children). In the former case, pdev
         * is populated, in the later case, pbus is.
@@ -229,6 +235,7 @@ extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
 extern void pnv_pci_init_p5ioc2_hub(struct device_node *np);
 extern void pnv_pci_init_ioda_hub(struct device_node *np);
 extern void pnv_pci_init_ioda2_phb(struct device_node *np);
+extern void pnv_pci_init_npu_phb(struct device_node *np);
 extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
                                        __be64 *startp, __be64 *endp, bool rm);
 extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
@@ -238,4 +245,16 @@ extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev);
 extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
 extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
 
+/* Nvlink functions */
+extern void pnv_npu_tce_invalidate_entire(struct pnv_ioda_pe *npe);
+extern void pnv_npu_tce_invalidate(struct pnv_ioda_pe *npe,
+                                      struct iommu_table *tbl,
+                                      unsigned long index,
+                                      unsigned long npages,
+                                      bool rm);
+extern void pnv_npu_init_dma_pe(struct pnv_ioda_pe *npe);
+extern void pnv_npu_setup_dma_pe(struct pnv_ioda_pe *npe);
+extern int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe, bool enabled);
+extern int pnv_npu_dma_set_mask(struct pci_dev *npdev, u64 dma_mask);
+
 #endif /* __POWERNV_PCI_H */
index a9a8fa3..1acb0c7 100644 (file)
@@ -90,12 +90,8 @@ static void pnv_show_cpuinfo(struct seq_file *m)
        if (root)
                model = of_get_property(root, "model", NULL);
        seq_printf(m, "machine\t\t: PowerNV %s\n", model);
-       if (firmware_has_feature(FW_FEATURE_OPALv3))
-               seq_printf(m, "firmware\t: OPAL v3\n");
-       else if (firmware_has_feature(FW_FEATURE_OPALv2))
-               seq_printf(m, "firmware\t: OPAL v2\n");
-       else if (firmware_has_feature(FW_FEATURE_OPAL))
-               seq_printf(m, "firmware\t: OPAL v1\n");
+       if (firmware_has_feature(FW_FEATURE_OPAL))
+               seq_printf(m, "firmware\t: OPAL\n");
        else
                seq_printf(m, "firmware\t: BML\n");
        of_node_put(root);
@@ -224,9 +220,9 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
 {
        xics_kexec_teardown_cpu(secondary);
 
-       /* On OPAL v3, we return all CPUs to firmware */
+       /* On OPAL, we return all CPUs to firmware */
 
-       if (!firmware_has_feature(FW_FEATURE_OPALv3))
+       if (!firmware_has_feature(FW_FEATURE_OPAL))
                return;
 
        if (secondary) {
index ca26483..ad7b1a3 100644 (file)
@@ -61,14 +61,15 @@ static int pnv_smp_kick_cpu(int nr)
        unsigned long start_here =
                        __pa(ppc_function_entry(generic_secondary_smp_init));
        long rc;
+       uint8_t status;
 
        BUG_ON(nr < 0 || nr >= NR_CPUS);
 
        /*
-        * If we already started or OPALv2 is not supported, we just
+        * If we already started or OPAL is not supported, we just
         * kick the CPU via the PACA
         */
-       if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPALv2))
+       if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPAL))
                goto kick;
 
        /*
@@ -77,55 +78,42 @@ static int pnv_smp_kick_cpu(int nr)
         * first time. OPAL v3 allows us to query OPAL to know if it
         * has the CPUs, so we do that
         */
-       if (firmware_has_feature(FW_FEATURE_OPALv3)) {
-               uint8_t status;
-
-               rc = opal_query_cpu_status(pcpu, &status);
-               if (rc != OPAL_SUCCESS) {
-                       pr_warn("OPAL Error %ld querying CPU %d state\n",
-                               rc, nr);
-                       return -ENODEV;
-               }
+       rc = opal_query_cpu_status(pcpu, &status);
+       if (rc != OPAL_SUCCESS) {
+               pr_warn("OPAL Error %ld querying CPU %d state\n", rc, nr);
+               return -ENODEV;
+       }
 
-               /*
-                * Already started, just kick it, probably coming from
-                * kexec and spinning
-                */
-               if (status == OPAL_THREAD_STARTED)
-                       goto kick;
+       /*
+        * Already started, just kick it, probably coming from
+        * kexec and spinning
+        */
+       if (status == OPAL_THREAD_STARTED)
+               goto kick;
 
-               /*
-                * Available/inactive, let's kick it
-                */
-               if (status == OPAL_THREAD_INACTIVE) {
-                       pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n",
-                                nr, pcpu);
-                       rc = opal_start_cpu(pcpu, start_here);
-                       if (rc != OPAL_SUCCESS) {
-                               pr_warn("OPAL Error %ld starting CPU %d\n",
-                                       rc, nr);
-                               return -ENODEV;
-                       }
-               } else {
-                       /*
-                        * An unavailable CPU (or any other unknown status)
-                        * shouldn't be started. It should also
-                        * not be in the possible map but currently it can
-                        * happen
-                        */
-                       pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable"
-                                " (status %d)...\n", nr, pcpu, status);
+       /*
+        * Available/inactive, let's kick it
+        */
+       if (status == OPAL_THREAD_INACTIVE) {
+               pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu);
+               rc = opal_start_cpu(pcpu, start_here);
+               if (rc != OPAL_SUCCESS) {
+                       pr_warn("OPAL Error %ld starting CPU %d\n", rc, nr);
                        return -ENODEV;
                }
        } else {
                /*
-                * On OPAL v2, we just kick it and hope for the best,
-                * we must not test the error from opal_start_cpu() or
-                * we would fail to get CPUs from kexec.
+                * An unavailable CPU (or any other unknown status)
+                * shouldn't be started. It should also
+                * not be in the possible map but currently it can
+                * happen
                 */
-               opal_start_cpu(pcpu, start_here);
+               pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable"
+                        " (status %d)...\n", nr, pcpu, status);
+               return -ENODEV;
        }
- kick:
+
+kick:
        return smp_generic_kick_cpu(nr);
 }
 
index f244dcb..2b93ae8 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/of.h>
 
 #include "of_helpers.h"
-#include "offline_states.h"
 #include "pseries.h"
 
 #include <asm/prom.h>
@@ -338,185 +337,6 @@ int dlpar_release_drc(u32 drc_index)
        return 0;
 }
 
-#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
-
-static int dlpar_online_cpu(struct device_node *dn)
-{
-       int rc = 0;
-       unsigned int cpu;
-       int len, nthreads, i;
-       const __be32 *intserv;
-       u32 thread;
-
-       intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
-       if (!intserv)
-               return -EINVAL;
-
-       nthreads = len / sizeof(u32);
-
-       cpu_maps_update_begin();
-       for (i = 0; i < nthreads; i++) {
-               thread = be32_to_cpu(intserv[i]);
-               for_each_present_cpu(cpu) {
-                       if (get_hard_smp_processor_id(cpu) != thread)
-                               continue;
-                       BUG_ON(get_cpu_current_state(cpu)
-                                       != CPU_STATE_OFFLINE);
-                       cpu_maps_update_done();
-                       rc = device_online(get_cpu_device(cpu));
-                       if (rc)
-                               goto out;
-                       cpu_maps_update_begin();
-
-                       break;
-               }
-               if (cpu == num_possible_cpus())
-                       printk(KERN_WARNING "Could not find cpu to online "
-                              "with physical id 0x%x\n", thread);
-       }
-       cpu_maps_update_done();
-
-out:
-       return rc;
-
-}
-
-static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
-{
-       struct device_node *dn, *parent;
-       u32 drc_index;
-       int rc;
-
-       rc = kstrtou32(buf, 0, &drc_index);
-       if (rc)
-               return -EINVAL;
-
-       rc = dlpar_acquire_drc(drc_index);
-       if (rc)
-               return -EINVAL;
-
-       parent = of_find_node_by_path("/cpus");
-       if (!parent)
-               return -ENODEV;
-
-       dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent);
-       of_node_put(parent);
-       if (!dn) {
-               dlpar_release_drc(drc_index);
-               return -EINVAL;
-       }
-
-       rc = dlpar_attach_node(dn);
-       if (rc) {
-               dlpar_release_drc(drc_index);
-               dlpar_free_cc_nodes(dn);
-               return rc;
-       }
-
-       rc = dlpar_online_cpu(dn);
-       if (rc)
-               return rc;
-
-       return count;
-}
-
-static int dlpar_offline_cpu(struct device_node *dn)
-{
-       int rc = 0;
-       unsigned int cpu;
-       int len, nthreads, i;
-       const __be32 *intserv;
-       u32 thread;
-
-       intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
-       if (!intserv)
-               return -EINVAL;
-
-       nthreads = len / sizeof(u32);
-
-       cpu_maps_update_begin();
-       for (i = 0; i < nthreads; i++) {
-               thread = be32_to_cpu(intserv[i]);
-               for_each_present_cpu(cpu) {
-                       if (get_hard_smp_processor_id(cpu) != thread)
-                               continue;
-
-                       if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE)
-                               break;
-
-                       if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
-                               set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
-                               cpu_maps_update_done();
-                               rc = device_offline(get_cpu_device(cpu));
-                               if (rc)
-                                       goto out;
-                               cpu_maps_update_begin();
-                               break;
-
-                       }
-
-                       /*
-                        * The cpu is in CPU_STATE_INACTIVE.
-                        * Upgrade it's state to CPU_STATE_OFFLINE.
-                        */
-                       set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
-                       BUG_ON(plpar_hcall_norets(H_PROD, thread)
-                                                               != H_SUCCESS);
-                       __cpu_die(cpu);
-                       break;
-               }
-               if (cpu == num_possible_cpus())
-                       printk(KERN_WARNING "Could not find cpu to offline "
-                              "with physical id 0x%x\n", thread);
-       }
-       cpu_maps_update_done();
-
-out:
-       return rc;
-
-}
-
-static ssize_t dlpar_cpu_release(const char *buf, size_t count)
-{
-       struct device_node *dn;
-       u32 drc_index;
-       int rc;
-
-       dn = of_find_node_by_path(buf);
-       if (!dn)
-               return -EINVAL;
-
-       rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index);
-       if (rc) {
-               of_node_put(dn);
-               return -EINVAL;
-       }
-
-       rc = dlpar_offline_cpu(dn);
-       if (rc) {
-               of_node_put(dn);
-               return -EINVAL;
-       }
-
-       rc = dlpar_release_drc(drc_index);
-       if (rc) {
-               of_node_put(dn);
-               return rc;
-       }
-
-       rc = dlpar_detach_node(dn);
-       if (rc) {
-               dlpar_acquire_drc(drc_index);
-               return rc;
-       }
-
-       of_node_put(dn);
-
-       return count;
-}
-
-#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
-
 static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
 {
        int rc;
@@ -536,6 +356,9 @@ static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
        case PSERIES_HP_ELOG_RESOURCE_MEM:
                rc = dlpar_memory(hp_elog);
                break;
+       case PSERIES_HP_ELOG_RESOURCE_CPU:
+               rc = dlpar_cpu(hp_elog);
+               break;
        default:
                pr_warn_ratelimited("Invalid resource (%d) specified\n",
                                    hp_elog->resource);
@@ -565,6 +388,9 @@ static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
        if (!strncmp(arg, "memory", 6)) {
                hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
                arg += strlen("memory ");
+       } else if (!strncmp(arg, "cpu", 3)) {
+               hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU;
+               arg += strlen("cpu ");
        } else {
                pr_err("Invalid resource specified: \"%s\"\n", buf);
                rc = -EINVAL;
@@ -624,16 +450,7 @@ static CLASS_ATTR(dlpar, S_IWUSR, NULL, dlpar_store);
 
 static int __init pseries_dlpar_init(void)
 {
-       int rc;
-
-#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
-       ppc_md.cpu_probe = dlpar_cpu_probe;
-       ppc_md.cpu_release = dlpar_cpu_release;
-#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
-
-       rc = sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
-
-       return rc;
+       return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
 }
 machine_device_initcall(pseries, pseries_dlpar_init);
 
index 6247544..32274f7 100644 (file)
  *      2 of the License, or (at your option) any later version.
  */
 
+#define pr_fmt(fmt)     "pseries-hotplug-cpu: " fmt
+
 #include <linux/kernel.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 #include <linux/sched.h>       /* for idle_task_exit */
 #include <linux/cpu.h>
 #include <linux/of.h>
+#include <linux/slab.h>
 #include <asm/prom.h>
 #include <asm/rtas.h>
 #include <asm/firmware.h>
@@ -32,6 +35,7 @@
 #include <asm/xics.h>
 #include <asm/plpar_wrappers.h>
 
+#include "pseries.h"
 #include "offline_states.h"
 
 /* This version can't take the spinlock, because it never returns */
@@ -88,13 +92,7 @@ void set_default_offline_state(int cpu)
 
 static void rtas_stop_self(void)
 {
-       static struct rtas_args args = {
-               .nargs = 0,
-               .nret = cpu_to_be32(1),
-               .rets = &args.args[0],
-       };
-
-       args.token = cpu_to_be32(rtas_stop_self_token);
+       static struct rtas_args args;
 
        local_irq_disable();
 
@@ -102,7 +100,8 @@ static void rtas_stop_self(void)
 
        printk("cpu %u (hwid %u) Ready to die...\n",
               smp_processor_id(), hard_smp_processor_id());
-       enter_rtas(__pa(&args));
+
+       rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL);
 
        panic("Alas, I survived.\n");
 }
@@ -339,6 +338,536 @@ static void pseries_remove_processor(struct device_node *np)
        cpu_maps_update_done();
 }
 
+static int dlpar_online_cpu(struct device_node *dn)
+{
+       int rc = 0;
+       unsigned int cpu;
+       int len, nthreads, i;
+       const __be32 *intserv;
+       u32 thread;
+
+       intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
+       if (!intserv)
+               return -EINVAL;
+
+       nthreads = len / sizeof(u32);
+
+       cpu_maps_update_begin();
+       for (i = 0; i < nthreads; i++) {
+               thread = be32_to_cpu(intserv[i]);
+               for_each_present_cpu(cpu) {
+                       if (get_hard_smp_processor_id(cpu) != thread)
+                               continue;
+                       BUG_ON(get_cpu_current_state(cpu)
+                                       != CPU_STATE_OFFLINE);
+                       cpu_maps_update_done();
+                       rc = device_online(get_cpu_device(cpu));
+                       if (rc)
+                               goto out;
+                       cpu_maps_update_begin();
+
+                       break;
+               }
+               if (cpu == num_possible_cpus())
+                       printk(KERN_WARNING "Could not find cpu to online "
+                              "with physical id 0x%x\n", thread);
+       }
+       cpu_maps_update_done();
+
+out:
+       return rc;
+
+}
+
+static bool dlpar_cpu_exists(struct device_node *parent, u32 drc_index)
+{
+       struct device_node *child = NULL;
+       u32 my_drc_index;
+       bool found;
+       int rc;
+
+       /* Assume cpu doesn't exist */
+       found = false;
+
+       for_each_child_of_node(parent, child) {
+               rc = of_property_read_u32(child, "ibm,my-drc-index",
+                                         &my_drc_index);
+               if (rc)
+                       continue;
+
+               if (my_drc_index == drc_index) {
+                       of_node_put(child);
+                       found = true;
+                       break;
+               }
+       }
+
+       return found;
+}
+
+static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index)
+{
+       bool found = false;
+       int rc, index;
+
+       index = 0;
+       while (!found) {
+               u32 drc;
+
+               rc = of_property_read_u32_index(parent, "ibm,drc-indexes",
+                                               index++, &drc);
+               if (rc)
+                       break;
+
+               if (drc == drc_index)
+                       found = true;
+       }
+
+       return found;
+}
+
+static ssize_t dlpar_cpu_add(u32 drc_index)
+{
+       struct device_node *dn, *parent;
+       int rc, saved_rc;
+
+       pr_debug("Attempting to add CPU, drc index: %x\n", drc_index);
+
+       parent = of_find_node_by_path("/cpus");
+       if (!parent) {
+               pr_warn("Failed to find CPU root node \"/cpus\"\n");
+               return -ENODEV;
+       }
+
+       if (dlpar_cpu_exists(parent, drc_index)) {
+               of_node_put(parent);
+               pr_warn("CPU with drc index %x already exists\n", drc_index);
+               return -EINVAL;
+       }
+
+       if (!valid_cpu_drc_index(parent, drc_index)) {
+               of_node_put(parent);
+               pr_warn("Cannot find CPU (drc index %x) to add.\n", drc_index);
+               return -EINVAL;
+       }
+
+       rc = dlpar_acquire_drc(drc_index);
+       if (rc) {
+               pr_warn("Failed to acquire DRC, rc: %d, drc index: %x\n",
+                       rc, drc_index);
+               of_node_put(parent);
+               return -EINVAL;
+       }
+
+       dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent);
+       of_node_put(parent);
+       if (!dn) {
+               pr_warn("Failed call to configure-connector, drc index: %x\n",
+                       drc_index);
+               dlpar_release_drc(drc_index);
+               return -EINVAL;
+       }
+
+       rc = dlpar_attach_node(dn);
+       if (rc) {
+               saved_rc = rc;
+               pr_warn("Failed to attach node %s, rc: %d, drc index: %x\n",
+                       dn->name, rc, drc_index);
+
+               rc = dlpar_release_drc(drc_index);
+               if (!rc)
+                       dlpar_free_cc_nodes(dn);
+
+               return saved_rc;
+       }
+
+       rc = dlpar_online_cpu(dn);
+       if (rc) {
+               saved_rc = rc;
+               pr_warn("Failed to online cpu %s, rc: %d, drc index: %x\n",
+                       dn->name, rc, drc_index);
+
+               rc = dlpar_detach_node(dn);
+               if (!rc)
+                       dlpar_release_drc(drc_index);
+
+               return saved_rc;
+       }
+
+       pr_debug("Successfully added CPU %s, drc index: %x\n", dn->name,
+                drc_index);
+       return rc;
+}
+
+static int dlpar_offline_cpu(struct device_node *dn)
+{
+       int rc = 0;
+       unsigned int cpu;
+       int len, nthreads, i;
+       const __be32 *intserv;
+       u32 thread;
+
+       intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
+       if (!intserv)
+               return -EINVAL;
+
+       nthreads = len / sizeof(u32);
+
+       cpu_maps_update_begin();
+       for (i = 0; i < nthreads; i++) {
+               thread = be32_to_cpu(intserv[i]);
+               for_each_present_cpu(cpu) {
+                       if (get_hard_smp_processor_id(cpu) != thread)
+                               continue;
+
+                       if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE)
+                               break;
+
+                       if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
+                               set_preferred_offline_state(cpu,
+                                                           CPU_STATE_OFFLINE);
+                               cpu_maps_update_done();
+                               rc = device_offline(get_cpu_device(cpu));
+                               if (rc)
+                                       goto out;
+                               cpu_maps_update_begin();
+                               break;
+
+                       }
+
+                       /*
+                        * The cpu is in CPU_STATE_INACTIVE.
+                        * Upgrade it's state to CPU_STATE_OFFLINE.
+                        */
+                       set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
+                       BUG_ON(plpar_hcall_norets(H_PROD, thread)
+                                                               != H_SUCCESS);
+                       __cpu_die(cpu);
+                       break;
+               }
+               if (cpu == num_possible_cpus())
+                       printk(KERN_WARNING "Could not find cpu to offline with physical id 0x%x\n", thread);
+       }
+       cpu_maps_update_done();
+
+out:
+       return rc;
+
+}
+
+static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index)
+{
+       int rc;
+
+       pr_debug("Attemping to remove CPU %s, drc index: %x\n",
+                dn->name, drc_index);
+
+       rc = dlpar_offline_cpu(dn);
+       if (rc) {
+               pr_warn("Failed to offline CPU %s, rc: %d\n", dn->name, rc);
+               return -EINVAL;
+       }
+
+       rc = dlpar_release_drc(drc_index);
+       if (rc) {
+               pr_warn("Failed to release drc (%x) for CPU %s, rc: %d\n",
+                       drc_index, dn->name, rc);
+               dlpar_online_cpu(dn);
+               return rc;
+       }
+
+       rc = dlpar_detach_node(dn);
+       if (rc) {
+               int saved_rc = rc;
+
+               pr_warn("Failed to detach CPU %s, rc: %d", dn->name, rc);
+
+               rc = dlpar_acquire_drc(drc_index);
+               if (!rc)
+                       dlpar_online_cpu(dn);
+
+               return saved_rc;
+       }
+
+       pr_debug("Successfully removed CPU, drc index: %x\n", drc_index);
+       return 0;
+}
+
+static struct device_node *cpu_drc_index_to_dn(u32 drc_index)
+{
+       struct device_node *dn;
+       u32 my_index;
+       int rc;
+
+       for_each_node_by_type(dn, "cpu") {
+               rc = of_property_read_u32(dn, "ibm,my-drc-index", &my_index);
+               if (rc)
+                       continue;
+
+               if (my_index == drc_index)
+                       break;
+       }
+
+       return dn;
+}
+
+static int dlpar_cpu_remove_by_index(u32 drc_index)
+{
+       struct device_node *dn;
+       int rc;
+
+       dn = cpu_drc_index_to_dn(drc_index);
+       if (!dn) {
+               pr_warn("Cannot find CPU (drc index %x) to remove\n",
+                       drc_index);
+               return -ENODEV;
+       }
+
+       rc = dlpar_cpu_remove(dn, drc_index);
+       of_node_put(dn);
+       return rc;
+}
+
+static int find_dlpar_cpus_to_remove(u32 *cpu_drcs, int cpus_to_remove)
+{
+       struct device_node *dn;
+       int cpus_found = 0;
+       int rc;
+
+       /* We want to find cpus_to_remove + 1 CPUs to ensure we do not
+        * remove the last CPU.
+        */
+       for_each_node_by_type(dn, "cpu") {
+               cpus_found++;
+
+               if (cpus_found > cpus_to_remove) {
+                       of_node_put(dn);
+                       break;
+               }
+
+               /* Note that cpus_found is always 1 ahead of the index
+                * into the cpu_drcs array, so we use cpus_found - 1
+                */
+               rc = of_property_read_u32(dn, "ibm,my-drc-index",
+                                         &cpu_drcs[cpus_found - 1]);
+               if (rc) {
+                       pr_warn("Error occurred getting drc-index for %s\n",
+                               dn->name);
+                       of_node_put(dn);
+                       return -1;
+               }
+       }
+
+       if (cpus_found < cpus_to_remove) {
+               pr_warn("Failed to find enough CPUs (%d of %d) to remove\n",
+                       cpus_found, cpus_to_remove);
+       } else if (cpus_found == cpus_to_remove) {
+               pr_warn("Cannot remove all CPUs\n");
+       }
+
+       return cpus_found;
+}
+
+static int dlpar_cpu_remove_by_count(u32 cpus_to_remove)
+{
+       u32 *cpu_drcs;
+       int cpus_found;
+       int cpus_removed = 0;
+       int i, rc;
+
+       pr_debug("Attempting to hot-remove %d CPUs\n", cpus_to_remove);
+
+       cpu_drcs = kcalloc(cpus_to_remove, sizeof(*cpu_drcs), GFP_KERNEL);
+       if (!cpu_drcs)
+               return -EINVAL;
+
+       cpus_found = find_dlpar_cpus_to_remove(cpu_drcs, cpus_to_remove);
+       if (cpus_found <= cpus_to_remove) {
+               kfree(cpu_drcs);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < cpus_to_remove; i++) {
+               rc = dlpar_cpu_remove_by_index(cpu_drcs[i]);
+               if (rc)
+                       break;
+
+               cpus_removed++;
+       }
+
+       if (cpus_removed != cpus_to_remove) {
+               pr_warn("CPU hot-remove failed, adding back removed CPUs\n");
+
+               for (i = 0; i < cpus_removed; i++)
+                       dlpar_cpu_add(cpu_drcs[i]);
+
+               rc = -EINVAL;
+       } else {
+               rc = 0;
+       }
+
+       kfree(cpu_drcs);
+       return rc;
+}
+
+static int find_dlpar_cpus_to_add(u32 *cpu_drcs, u32 cpus_to_add)
+{
+       struct device_node *parent;
+       int cpus_found = 0;
+       int index, rc;
+
+       parent = of_find_node_by_path("/cpus");
+       if (!parent) {
+               pr_warn("Could not find CPU root node in device tree\n");
+               kfree(cpu_drcs);
+               return -1;
+       }
+
+       /* Search the ibm,drc-indexes array for possible CPU drcs to
+        * add. Note that the format of the ibm,drc-indexes array is
+        * the number of entries in the array followed by the array
+        * of drc values so we start looking at index = 1.
+        */
+       index = 1;
+       while (cpus_found < cpus_to_add) {
+               u32 drc;
+
+               rc = of_property_read_u32_index(parent, "ibm,drc-indexes",
+                                               index++, &drc);
+               if (rc)
+                       break;
+
+               if (dlpar_cpu_exists(parent, drc))
+                       continue;
+
+               cpu_drcs[cpus_found++] = drc;
+       }
+
+       of_node_put(parent);
+       return cpus_found;
+}
+
+static int dlpar_cpu_add_by_count(u32 cpus_to_add)
+{
+       u32 *cpu_drcs;
+       int cpus_added = 0;
+       int cpus_found;
+       int i, rc;
+
+       pr_debug("Attempting to hot-add %d CPUs\n", cpus_to_add);
+
+       cpu_drcs = kcalloc(cpus_to_add, sizeof(*cpu_drcs), GFP_KERNEL);
+       if (!cpu_drcs)
+               return -EINVAL;
+
+       cpus_found = find_dlpar_cpus_to_add(cpu_drcs, cpus_to_add);
+       if (cpus_found < cpus_to_add) {
+               pr_warn("Failed to find enough CPUs (%d of %d) to add\n",
+                       cpus_found, cpus_to_add);
+               kfree(cpu_drcs);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < cpus_to_add; i++) {
+               rc = dlpar_cpu_add(cpu_drcs[i]);
+               if (rc)
+                       break;
+
+               cpus_added++;
+       }
+
+       if (cpus_added < cpus_to_add) {
+               pr_warn("CPU hot-add failed, removing any added CPUs\n");
+
+               for (i = 0; i < cpus_added; i++)
+                       dlpar_cpu_remove_by_index(cpu_drcs[i]);
+
+               rc = -EINVAL;
+       } else {
+               rc = 0;
+       }
+
+       kfree(cpu_drcs);
+       return rc;
+}
+
+int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
+{
+       u32 count, drc_index;
+       int rc;
+
+       count = hp_elog->_drc_u.drc_count;
+       drc_index = hp_elog->_drc_u.drc_index;
+
+       lock_device_hotplug();
+
+       switch (hp_elog->action) {
+       case PSERIES_HP_ELOG_ACTION_REMOVE:
+               if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
+                       rc = dlpar_cpu_remove_by_count(count);
+               else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
+                       rc = dlpar_cpu_remove_by_index(drc_index);
+               else
+                       rc = -EINVAL;
+               break;
+       case PSERIES_HP_ELOG_ACTION_ADD:
+               if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
+                       rc = dlpar_cpu_add_by_count(count);
+               else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
+                       rc = dlpar_cpu_add(drc_index);
+               else
+                       rc = -EINVAL;
+               break;
+       default:
+               pr_err("Invalid action (%d) specified\n", hp_elog->action);
+               rc = -EINVAL;
+               break;
+       }
+
+       unlock_device_hotplug();
+       return rc;
+}
+
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
+
+static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
+{
+       u32 drc_index;
+       int rc;
+
+       rc = kstrtou32(buf, 0, &drc_index);
+       if (rc)
+               return -EINVAL;
+
+       rc = dlpar_cpu_add(drc_index);
+
+       return rc ? rc : count;
+}
+
+static ssize_t dlpar_cpu_release(const char *buf, size_t count)
+{
+       struct device_node *dn;
+       u32 drc_index;
+       int rc;
+
+       dn = of_find_node_by_path(buf);
+       if (!dn)
+               return -EINVAL;
+
+       rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index);
+       if (rc) {
+               of_node_put(dn);
+               return -EINVAL;
+       }
+
+       rc = dlpar_cpu_remove(dn, drc_index);
+       of_node_put(dn);
+
+       return rc ? rc : count;
+}
+
+#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
+
 static int pseries_smp_notifier(struct notifier_block *nb,
                                unsigned long action, void *data)
 {
@@ -385,6 +914,11 @@ static int __init pseries_cpu_hotplug_init(void)
        int cpu;
        int qcss_tok;
 
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
+       ppc_md.cpu_probe = dlpar_cpu_probe;
+       ppc_md.cpu_release = dlpar_cpu_release;
+#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
+
        for_each_node_by_name(np, "interrupt-controller") {
                typep = of_get_property(np, "compatible", NULL);
                if (strstr(typep, "open-pic")) {
index b7a67e3..477290a 100644 (file)
@@ -315,48 +315,48 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot,
        return 0;
 }
 
-static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot)
+static long __pSeries_lpar_hpte_find(unsigned long want_v, unsigned long hpte_group)
 {
-       unsigned long dword0;
-       unsigned long lpar_rc;
-       unsigned long dummy_word1;
-       unsigned long flags;
+       long lpar_rc;
+       unsigned long i, j;
+       struct {
+               unsigned long pteh;
+               unsigned long ptel;
+       } ptes[4];
 
-       /* Read 1 pte at a time                        */
-       /* Do not need RPN to logical page translation */
-       /* No cross CEC PFT access                     */
-       flags = 0;
+       for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
 
-       lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1);
+               lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
+               if (lpar_rc != H_SUCCESS)
+                       continue;
 
-       BUG_ON(lpar_rc != H_SUCCESS);
+               for (j = 0; j < 4; j++) {
+                       if (HPTE_V_COMPARE(ptes[j].pteh, want_v) &&
+                           (ptes[j].pteh & HPTE_V_VALID))
+                               return i + j;
+               }
+       }
 
-       return dword0;
+       return -1;
 }
 
 static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
 {
-       unsigned long hash;
-       unsigned long i;
        long slot;
-       unsigned long want_v, hpte_v;
+       unsigned long hash;
+       unsigned long want_v;
+       unsigned long hpte_group;
 
        hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
        want_v = hpte_encode_avpn(vpn, psize, ssize);
 
        /* Bolted entries are always in the primary group */
-       slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
-       for (i = 0; i < HPTES_PER_GROUP; i++) {
-               hpte_v = pSeries_lpar_hpte_getword0(slot);
-
-               if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
-                       /* HPTE matches */
-                       return slot;
-               ++slot;
-       }
-
-       return -1;
-} 
+       hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
+       slot = __pSeries_lpar_hpte_find(want_v, hpte_group);
+       if (slot < 0)
+               return -1;
+       return hpte_group + slot;
+}
 
 static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
                                             unsigned long ea,
@@ -396,6 +396,7 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
        BUG_ON(lpar_rc != H_SUCCESS);
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 /*
  * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
  * to make sure that we avoid bouncing the hypervisor tlbie lock.
@@ -494,6 +495,15 @@ static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
                __pSeries_lpar_hugepage_invalidate(slot_array, vpn_array,
                                                   index, psize, ssize);
 }
+#else
+static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
+                                            unsigned long addr,
+                                            unsigned char *hpte_slot_array,
+                                            int psize, int ssize, int local)
+{
+       WARN(1, "%s called without THP support\n", __func__);
+}
+#endif
 
 static void pSeries_lpar_hpte_removebolted(unsigned long ea,
                                           int psize, int ssize)
index 8411c27..7aa83f0 100644 (file)
@@ -73,6 +73,15 @@ static inline int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
 }
 #endif
 
+#ifdef CONFIG_HOTPLUG_CPU
+int dlpar_cpu(struct pseries_hp_errorlog *hp_elog);
+#else
+static inline int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
+{
+       return -EOPNOTSUPP;
+}
+#endif
+
 /* PCI root bridge prepare function override for pseries */
 struct pci_host_bridge;
 int pseries_root_bridge_prepare(struct pci_host_bridge *bridge);
index 3b6647e..9a3e27b 100644 (file)
@@ -40,6 +40,9 @@ static int ras_check_exception_token;
 #define EPOW_SENSOR_TOKEN      9
 #define EPOW_SENSOR_INDEX      0
 
+/* EPOW events counter variable */
+static int num_epow_events;
+
 static irqreturn_t ras_epow_interrupt(int irq, void *dev_id);
 static irqreturn_t ras_error_interrupt(int irq, void *dev_id);
 
@@ -82,32 +85,30 @@ static void handle_system_shutdown(char event_modifier)
 {
        switch (event_modifier) {
        case EPOW_SHUTDOWN_NORMAL:
-               pr_emerg("Firmware initiated power off");
+               pr_emerg("Power off requested\n");
                orderly_poweroff(true);
                break;
 
        case EPOW_SHUTDOWN_ON_UPS:
-               pr_emerg("Loss of power reported by firmware, system is "
-                       "running on UPS/battery");
-               pr_emerg("Check RTAS error log for details");
+               pr_emerg("Loss of system power detected. System is running on"
+                        " UPS/battery. Check RTAS error log for details\n");
                orderly_poweroff(true);
                break;
 
        case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS:
-               pr_emerg("Loss of system critical functions reported by "
-                       "firmware");
-               pr_emerg("Check RTAS error log for details");
+               pr_emerg("Loss of system critical functions detected. Check"
+                        " RTAS error log for details\n");
                orderly_poweroff(true);
                break;
 
        case EPOW_SHUTDOWN_AMBIENT_TEMPERATURE_TOO_HIGH:
-               pr_emerg("Ambient temperature too high reported by firmware");
-               pr_emerg("Check RTAS error log for details");
+               pr_emerg("High ambient temperature detected. Check RTAS"
+                        " error log for details\n");
                orderly_poweroff(true);
                break;
 
        default:
-               pr_err("Unknown power/cooling shutdown event (modifier %d)",
+               pr_err("Unknown power/cooling shutdown event (modifier = %d)\n",
                        event_modifier);
        }
 }
@@ -145,17 +146,20 @@ static void rtas_parse_epow_errlog(struct rtas_error_log *log)
 
        switch (action_code) {
        case EPOW_RESET:
-               pr_err("Non critical power or cooling issue cleared");
+               if (num_epow_events) {
+                       pr_info("Non critical power/cooling issue cleared\n");
+                       num_epow_events--;
+               }
                break;
 
        case EPOW_WARN_COOLING:
-               pr_err("Non critical cooling issue reported by firmware");
-               pr_err("Check RTAS error log for details");
+               pr_info("Non-critical cooling issue detected. Check RTAS error"
+                       " log for details\n");
                break;
 
        case EPOW_WARN_POWER:
-               pr_err("Non critical power issue reported by firmware");
-               pr_err("Check RTAS error log for details");
+               pr_info("Non-critical power issue detected. Check RTAS error"
+                       " log for details\n");
                break;
 
        case EPOW_SYSTEM_SHUTDOWN:
@@ -163,23 +167,27 @@ static void rtas_parse_epow_errlog(struct rtas_error_log *log)
                break;
 
        case EPOW_SYSTEM_HALT:
-               pr_emerg("Firmware initiated power off");
+               pr_emerg("Critical power/cooling issue detected. Check RTAS"
+                        " error log for details. Powering off.\n");
                orderly_poweroff(true);
                break;
 
        case EPOW_MAIN_ENCLOSURE:
        case EPOW_POWER_OFF:
-               pr_emerg("Critical power/cooling issue reported by firmware");
-               pr_emerg("Check RTAS error log for details");
-               pr_emerg("Immediate power off");
+               pr_emerg("System about to lose power. Check RTAS error log "
+                        " for details. Powering off immediately.\n");
                emergency_sync();
                kernel_power_off();
                break;
 
        default:
-               pr_err("Unknown power/cooling event (action code %d)",
+               pr_err("Unknown power/cooling event (action code  = %d)\n",
                        action_code);
        }
+
+       /* Increment epow events counter variable */
+       if (action_code != EPOW_RESET)
+               num_epow_events++;
 }
 
 /* Handle environmental and power warning (EPOW) interrupts. */
@@ -249,13 +257,12 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
        log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, fatal);
 
        if (fatal) {
-               pr_emerg("Fatal hardware error reported by firmware");
-               pr_emerg("Check RTAS error log for details");
-               pr_emerg("Immediate power off");
+               pr_emerg("Fatal hardware error detected. Check RTAS error"
+                        " log for details. Powering off immediately\n");
                emergency_sync();
                kernel_power_off();
        } else {
-               pr_err("Recoverable hardware error reported by firmware");
+               pr_err("Recoverable hardware error detected\n");
        }
 
        spin_unlock(&ras_log_buf_lock);
index 5b492a6..bd6bd72 100644 (file)
@@ -26,7 +26,6 @@ obj-$(CONFIG_FSL_85XX_CACHE_SRAM)     += fsl_85xx_l2ctlr.o fsl_85xx_cache_sram.o
 obj-$(CONFIG_SIMPLE_GPIO)      += simple_gpio.o
 obj-$(CONFIG_FSL_RIO)          += fsl_rio.o fsl_rmu.o
 obj-$(CONFIG_TSI108_BRIDGE)    += tsi108_pci.o tsi108_dev.o
-obj-$(CONFIG_QUICC_ENGINE)     += qe_lib/
 mv64x60-$(CONFIG_PCI)          += mv64x60_pci.o
 obj-$(CONFIG_MV64X60)          += $(mv64x60-y) mv64x60_pic.o mv64x60_dev.o \
                                   mv64x60_udbg.o
index 7a399b4..c713b34 100644 (file)
@@ -313,6 +313,7 @@ static const struct of_device_id axon_ram_device_id[] = {
        },
        {}
 };
+MODULE_DEVICE_TABLE(of, axon_ram_device_id);
 
 static struct platform_driver axon_ram_driver = {
        .probe          = axon_ram_probe,
index e00a5ee..9d32465 100644 (file)
@@ -27,8 +27,8 @@
 
 #include <asm/udbg.h>
 #include <asm/io.h>
-#include <asm/rheap.h>
 #include <asm/cpm.h>
+#include <soc/fsl/qe/qe.h>
 
 #include <mm/mmu_decl.h>
 
@@ -65,162 +65,6 @@ void __init udbg_init_cpm(void)
 }
 #endif
 
-static spinlock_t cpm_muram_lock;
-static rh_block_t cpm_boot_muram_rh_block[16];
-static rh_info_t cpm_muram_info;
-static u8 __iomem *muram_vbase;
-static phys_addr_t muram_pbase;
-
-/* Max address size we deal with */
-#define OF_MAX_ADDR_CELLS      4
-
-int cpm_muram_init(void)
-{
-       struct device_node *np;
-       struct resource r;
-       u32 zero[OF_MAX_ADDR_CELLS] = {};
-       resource_size_t max = 0;
-       int i = 0;
-       int ret = 0;
-
-       if (muram_pbase)
-               return 0;
-
-       spin_lock_init(&cpm_muram_lock);
-       /* initialize the info header */
-       rh_init(&cpm_muram_info, 1,
-               sizeof(cpm_boot_muram_rh_block) /
-               sizeof(cpm_boot_muram_rh_block[0]),
-               cpm_boot_muram_rh_block);
-
-       np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data");
-       if (!np) {
-               /* try legacy bindings */
-               np = of_find_node_by_name(NULL, "data-only");
-               if (!np) {
-                       printk(KERN_ERR "Cannot find CPM muram data node");
-                       ret = -ENODEV;
-                       goto out;
-               }
-       }
-
-       muram_pbase = of_translate_address(np, zero);
-       if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
-               printk(KERN_ERR "Cannot translate zero through CPM muram node");
-               ret = -ENODEV;
-               goto out;
-       }
-
-       while (of_address_to_resource(np, i++, &r) == 0) {
-               if (r.end > max)
-                       max = r.end;
-
-               rh_attach_region(&cpm_muram_info, r.start - muram_pbase,
-                                resource_size(&r));
-       }
-
-       muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1);
-       if (!muram_vbase) {
-               printk(KERN_ERR "Cannot map CPM muram");
-               ret = -ENOMEM;
-       }
-
-out:
-       of_node_put(np);
-       return ret;
-}
-
-/**
- * cpm_muram_alloc - allocate the requested size worth of multi-user ram
- * @size: number of bytes to allocate
- * @align: requested alignment, in bytes
- *
- * This function returns an offset into the muram area.
- * Use cpm_dpram_addr() to get the virtual address of the area.
- * Use cpm_muram_free() to free the allocation.
- */
-unsigned long cpm_muram_alloc(unsigned long size, unsigned long align)
-{
-       unsigned long start;
-       unsigned long flags;
-
-       spin_lock_irqsave(&cpm_muram_lock, flags);
-       cpm_muram_info.alignment = align;
-       start = rh_alloc(&cpm_muram_info, size, "commproc");
-       if (!IS_ERR_VALUE(start))
-               memset_io(cpm_muram_addr(start), 0, size);
-       spin_unlock_irqrestore(&cpm_muram_lock, flags);
-
-       return start;
-}
-EXPORT_SYMBOL(cpm_muram_alloc);
-
-/**
- * cpm_muram_free - free a chunk of multi-user ram
- * @offset: The beginning of the chunk as returned by cpm_muram_alloc().
- */
-int cpm_muram_free(unsigned long offset)
-{
-       int ret;
-       unsigned long flags;
-
-       spin_lock_irqsave(&cpm_muram_lock, flags);
-       ret = rh_free(&cpm_muram_info, offset);
-       spin_unlock_irqrestore(&cpm_muram_lock, flags);
-
-       return ret;
-}
-EXPORT_SYMBOL(cpm_muram_free);
-
-/**
- * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram
- * @offset: the offset into the muram area to reserve
- * @size: the number of bytes to reserve
- *
- * This function returns "start" on success, -ENOMEM on failure.
- * Use cpm_dpram_addr() to get the virtual address of the area.
- * Use cpm_muram_free() to free the allocation.
- */
-unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
-{
-       unsigned long start;
-       unsigned long flags;
-
-       spin_lock_irqsave(&cpm_muram_lock, flags);
-       cpm_muram_info.alignment = 1;
-       start = rh_alloc_fixed(&cpm_muram_info, offset, size, "commproc");
-       spin_unlock_irqrestore(&cpm_muram_lock, flags);
-
-       return start;
-}
-EXPORT_SYMBOL(cpm_muram_alloc_fixed);
-
-/**
- * cpm_muram_addr - turn a muram offset into a virtual address
- * @offset: muram offset to convert
- */
-void __iomem *cpm_muram_addr(unsigned long offset)
-{
-       return muram_vbase + offset;
-}
-EXPORT_SYMBOL(cpm_muram_addr);
-
-unsigned long cpm_muram_offset(void __iomem *addr)
-{
-       return addr - (void __iomem *)muram_vbase;
-}
-EXPORT_SYMBOL(cpm_muram_offset);
-
-/**
- * cpm_muram_dma - turn a muram virtual address into a DMA address
- * @offset: virtual address from cpm_muram_addr() to convert
- */
-dma_addr_t cpm_muram_dma(void __iomem *addr)
-{
-       return muram_pbase + ((u8 __iomem *)addr - muram_vbase);
-}
-EXPORT_SYMBOL(cpm_muram_dma);
-
 #if defined(CONFIG_CPM2) || defined(CONFIG_8xx_GPIO)
 
 struct cpm2_ioports {
index 38138cf..47f7810 100644 (file)
@@ -243,8 +243,6 @@ static irqreturn_t fsl_lbc_ctrl_irq(int irqno, void *data)
        if (status & LTESR_CS)
                dev_err(ctrl->dev, "Chip select error: "
                        "LTESR 0x%08X\n", status);
-       if (status & LTESR_UPM)
-               ;
        if (status & LTESR_FCT) {
                dev_err(ctrl->dev, "FCM command time-out: "
                        "LTESR 0x%08X\n", status);
index a1ac80b..c69e88e 100644 (file)
@@ -218,6 +218,19 @@ static void setup_pci_atmu(struct pci_controller *hose)
         */
        setup_inbound = !is_kdump();
 
+       if (of_device_is_compatible(hose->dn, "fsl,bsc9132-pcie")) {
+               /*
+                * BSC9132 Rev1.0 has an issue where all the PEX inbound
+                * windows have implemented the default target value as 0xf
+                * for CCSR space.In all Freescale legacy devices the target
+                * of 0xf is reserved for local memory space. 9132 Rev1.0
+                * now has local mempry space mapped to target 0x0 instead of
+                * 0xf. Hence adding a workaround to remove the target 0xf
+                * defined for memory space from Inbound window attributes.
+                */
+               piwar &= ~PIWAR_TGI_LOCAL;
+       }
+
        if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
                if (in_be32(&pci->block_rev1) >= PCIE_IP_REV_2_2) {
                        win_idx = 2;
diff --git a/arch/powerpc/sysdev/qe_lib/Kconfig b/arch/powerpc/sysdev/qe_lib/Kconfig
deleted file mode 100644 (file)
index 3c25199..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-# QE Communication options
-#
-
-config UCC_SLOW
-       bool
-       default y if SERIAL_QE
-       help
-         This option provides qe_lib support to UCC slow
-         protocols: UART, BISYNC, QMC
-
-config UCC_FAST
-       bool
-       default y if UCC_GETH
-       help
-         This option provides qe_lib support to UCC fast
-         protocols: HDLC, Ethernet, ATM, transparent
-
-config UCC
-       bool
-       default y if UCC_FAST || UCC_SLOW
-
-config QE_USB
-       bool
-       default y if USB_FSL_QE
-       help
-         QE USB Controller support
diff --git a/arch/powerpc/sysdev/qe_lib/Makefile b/arch/powerpc/sysdev/qe_lib/Makefile
deleted file mode 100644 (file)
index f1855c1..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#
-# Makefile for the linux ppc-specific parts of QE
-#
-obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_ic.o qe_io.o
-
-obj-$(CONFIG_UCC)      += ucc.o
-obj-$(CONFIG_UCC_SLOW) += ucc_slow.o
-obj-$(CONFIG_UCC_FAST) += ucc_fast.o
-obj-$(CONFIG_QE_USB)   += usb.o
-obj-$(CONFIG_QE_GPIO)  += gpio.o
diff --git a/arch/powerpc/sysdev/qe_lib/gpio.c b/arch/powerpc/sysdev/qe_lib/gpio.c
deleted file mode 100644 (file)
index 521e67a..0000000
+++ /dev/null
@@ -1,317 +0,0 @@
-/*
- * QUICC Engine GPIOs
- *
- * Copyright (c) MontaVista Software, Inc. 2008.
- *
- * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
-#include <linux/gpio.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <asm/qe.h>
-
-struct qe_gpio_chip {
-       struct of_mm_gpio_chip mm_gc;
-       spinlock_t lock;
-
-       unsigned long pin_flags[QE_PIO_PINS];
-#define QE_PIN_REQUESTED 0
-
-       /* shadowed data register to clear/set bits safely */
-       u32 cpdata;
-
-       /* saved_regs used to restore dedicated functions */
-       struct qe_pio_regs saved_regs;
-};
-
-static inline struct qe_gpio_chip *
-to_qe_gpio_chip(struct of_mm_gpio_chip *mm_gc)
-{
-       return container_of(mm_gc, struct qe_gpio_chip, mm_gc);
-}
-
-static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
-{
-       struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
-       struct qe_pio_regs __iomem *regs = mm_gc->regs;
-
-       qe_gc->cpdata = in_be32(&regs->cpdata);
-       qe_gc->saved_regs.cpdata = qe_gc->cpdata;
-       qe_gc->saved_regs.cpdir1 = in_be32(&regs->cpdir1);
-       qe_gc->saved_regs.cpdir2 = in_be32(&regs->cpdir2);
-       qe_gc->saved_regs.cppar1 = in_be32(&regs->cppar1);
-       qe_gc->saved_regs.cppar2 = in_be32(&regs->cppar2);
-       qe_gc->saved_regs.cpodr = in_be32(&regs->cpodr);
-}
-
-static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
-{
-       struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
-       struct qe_pio_regs __iomem *regs = mm_gc->regs;
-       u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
-
-       return in_be32(&regs->cpdata) & pin_mask;
-}
-
-static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
-{
-       struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
-       struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
-       struct qe_pio_regs __iomem *regs = mm_gc->regs;
-       unsigned long flags;
-       u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
-
-       spin_lock_irqsave(&qe_gc->lock, flags);
-
-       if (val)
-               qe_gc->cpdata |= pin_mask;
-       else
-               qe_gc->cpdata &= ~pin_mask;
-
-       out_be32(&regs->cpdata, qe_gc->cpdata);
-
-       spin_unlock_irqrestore(&qe_gc->lock, flags);
-}
-
-static int qe_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
-{
-       struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
-       struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
-       unsigned long flags;
-
-       spin_lock_irqsave(&qe_gc->lock, flags);
-
-       __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_IN, 0, 0, 0);
-
-       spin_unlock_irqrestore(&qe_gc->lock, flags);
-
-       return 0;
-}
-
-static int qe_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
-{
-       struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
-       struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
-       unsigned long flags;
-
-       qe_gpio_set(gc, gpio, val);
-
-       spin_lock_irqsave(&qe_gc->lock, flags);
-
-       __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_OUT, 0, 0, 0);
-
-       spin_unlock_irqrestore(&qe_gc->lock, flags);
-
-       return 0;
-}
-
-struct qe_pin {
-       /*
-        * The qe_gpio_chip name is unfortunate, we should change that to
-        * something like qe_pio_controller. Someday.
-        */
-       struct qe_gpio_chip *controller;
-       int num;
-};
-
-/**
- * qe_pin_request - Request a QE pin
- * @np:                device node to get a pin from
- * @index:     index of a pin in the device tree
- * Context:    non-atomic
- *
- * This function return qe_pin so that you could use it with the rest of
- * the QE Pin Multiplexing API.
- */
-struct qe_pin *qe_pin_request(struct device_node *np, int index)
-{
-       struct qe_pin *qe_pin;
-       struct gpio_chip *gc;
-       struct of_mm_gpio_chip *mm_gc;
-       struct qe_gpio_chip *qe_gc;
-       int err;
-       unsigned long flags;
-
-       qe_pin = kzalloc(sizeof(*qe_pin), GFP_KERNEL);
-       if (!qe_pin) {
-               pr_debug("%s: can't allocate memory\n", __func__);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       err = of_get_gpio(np, index);
-       if (err < 0)
-               goto err0;
-       gc = gpio_to_chip(err);
-       if (WARN_ON(!gc))
-               goto err0;
-
-       if (!of_device_is_compatible(gc->of_node, "fsl,mpc8323-qe-pario-bank")) {
-               pr_debug("%s: tried to get a non-qe pin\n", __func__);
-               err = -EINVAL;
-               goto err0;
-       }
-
-       mm_gc = to_of_mm_gpio_chip(gc);
-       qe_gc = to_qe_gpio_chip(mm_gc);
-
-       spin_lock_irqsave(&qe_gc->lock, flags);
-
-       err -= gc->base;
-       if (test_and_set_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[err]) == 0) {
-               qe_pin->controller = qe_gc;
-               qe_pin->num = err;
-               err = 0;
-       } else {
-               err = -EBUSY;
-       }
-
-       spin_unlock_irqrestore(&qe_gc->lock, flags);
-
-       if (!err)
-               return qe_pin;
-err0:
-       kfree(qe_pin);
-       pr_debug("%s failed with status %d\n", __func__, err);
-       return ERR_PTR(err);
-}
-EXPORT_SYMBOL(qe_pin_request);
-
-/**
- * qe_pin_free - Free a pin
- * @qe_pin:    pointer to the qe_pin structure
- * Context:    any
- *
- * This function frees the qe_pin structure and makes a pin available
- * for further qe_pin_request() calls.
- */
-void qe_pin_free(struct qe_pin *qe_pin)
-{
-       struct qe_gpio_chip *qe_gc = qe_pin->controller;
-       unsigned long flags;
-       const int pin = qe_pin->num;
-
-       spin_lock_irqsave(&qe_gc->lock, flags);
-       test_and_clear_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[pin]);
-       spin_unlock_irqrestore(&qe_gc->lock, flags);
-
-       kfree(qe_pin);
-}
-EXPORT_SYMBOL(qe_pin_free);
-
-/**
- * qe_pin_set_dedicated - Revert a pin to a dedicated peripheral function mode
- * @qe_pin:    pointer to the qe_pin structure
- * Context:    any
- *
- * This function resets a pin to a dedicated peripheral function that
- * has been set up by the firmware.
- */
-void qe_pin_set_dedicated(struct qe_pin *qe_pin)
-{
-       struct qe_gpio_chip *qe_gc = qe_pin->controller;
-       struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
-       struct qe_pio_regs *sregs = &qe_gc->saved_regs;
-       int pin = qe_pin->num;
-       u32 mask1 = 1 << (QE_PIO_PINS - (pin + 1));
-       u32 mask2 = 0x3 << (QE_PIO_PINS - (pin % (QE_PIO_PINS / 2) + 1) * 2);
-       bool second_reg = pin > (QE_PIO_PINS / 2) - 1;
-       unsigned long flags;
-
-       spin_lock_irqsave(&qe_gc->lock, flags);
-
-       if (second_reg) {
-               clrsetbits_be32(&regs->cpdir2, mask2, sregs->cpdir2 & mask2);
-               clrsetbits_be32(&regs->cppar2, mask2, sregs->cppar2 & mask2);
-       } else {
-               clrsetbits_be32(&regs->cpdir1, mask2, sregs->cpdir1 & mask2);
-               clrsetbits_be32(&regs->cppar1, mask2, sregs->cppar1 & mask2);
-       }
-
-       if (sregs->cpdata & mask1)
-               qe_gc->cpdata |= mask1;
-       else
-               qe_gc->cpdata &= ~mask1;
-
-       out_be32(&regs->cpdata, qe_gc->cpdata);
-       clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1);
-
-       spin_unlock_irqrestore(&qe_gc->lock, flags);
-}
-EXPORT_SYMBOL(qe_pin_set_dedicated);
-
-/**
- * qe_pin_set_gpio - Set a pin to the GPIO mode
- * @qe_pin:    pointer to the qe_pin structure
- * Context:    any
- *
- * This function sets a pin to the GPIO mode.
- */
-void qe_pin_set_gpio(struct qe_pin *qe_pin)
-{
-       struct qe_gpio_chip *qe_gc = qe_pin->controller;
-       struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
-       unsigned long flags;
-
-       spin_lock_irqsave(&qe_gc->lock, flags);
-
-       /* Let's make it input by default, GPIO API is able to change that. */
-       __par_io_config_pin(regs, qe_pin->num, QE_PIO_DIR_IN, 0, 0, 0);
-
-       spin_unlock_irqrestore(&qe_gc->lock, flags);
-}
-EXPORT_SYMBOL(qe_pin_set_gpio);
-
-static int __init qe_add_gpiochips(void)
-{
-       struct device_node *np;
-
-       for_each_compatible_node(np, NULL, "fsl,mpc8323-qe-pario-bank") {
-               int ret;
-               struct qe_gpio_chip *qe_gc;
-               struct of_mm_gpio_chip *mm_gc;
-               struct gpio_chip *gc;
-
-               qe_gc = kzalloc(sizeof(*qe_gc), GFP_KERNEL);
-               if (!qe_gc) {
-                       ret = -ENOMEM;
-                       goto err;
-               }
-
-               spin_lock_init(&qe_gc->lock);
-
-               mm_gc = &qe_gc->mm_gc;
-               gc = &mm_gc->gc;
-
-               mm_gc->save_regs = qe_gpio_save_regs;
-               gc->ngpio = QE_PIO_PINS;
-               gc->direction_input = qe_gpio_dir_in;
-               gc->direction_output = qe_gpio_dir_out;
-               gc->get = qe_gpio_get;
-               gc->set = qe_gpio_set;
-
-               ret = of_mm_gpiochip_add(np, mm_gc);
-               if (ret)
-                       goto err;
-               continue;
-err:
-               pr_err("%s: registration failed with status %d\n",
-                      np->full_name, ret);
-               kfree(qe_gc);
-               /* try others anyway */
-       }
-       return 0;
-}
-arch_initcall(qe_add_gpiochips);
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
deleted file mode 100644 (file)
index c2518cd..0000000
+++ /dev/null
@@ -1,706 +0,0 @@
-/*
- * Copyright (C) 2006-2010 Freescale Semiconductor, Inc. All rights reserved.
- *
- * Authors:    Shlomi Gridish <gridish@freescale.com>
- *             Li Yang <leoli@freescale.com>
- * Based on cpm2_common.c from Dan Malek (dmalek@jlc.net)
- *
- * Description:
- * General Purpose functions for the global management of the
- * QUICC Engine (QE).
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/ioport.h>
-#include <linux/crc32.h>
-#include <linux/mod_devicetable.h>
-#include <linux/of_platform.h>
-#include <asm/irq.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/immap_qe.h>
-#include <asm/qe.h>
-#include <asm/prom.h>
-#include <asm/rheap.h>
-
-static void qe_snums_init(void);
-static int qe_sdma_init(void);
-
-static DEFINE_SPINLOCK(qe_lock);
-DEFINE_SPINLOCK(cmxgcr_lock);
-EXPORT_SYMBOL(cmxgcr_lock);
-
-/* QE snum state */
-enum qe_snum_state {
-       QE_SNUM_STATE_USED,
-       QE_SNUM_STATE_FREE
-};
-
-/* QE snum */
-struct qe_snum {
-       u8 num;
-       enum qe_snum_state state;
-};
-
-/* We allocate this here because it is used almost exclusively for
- * the communication processor devices.
- */
-struct qe_immap __iomem *qe_immr;
-EXPORT_SYMBOL(qe_immr);
-
-static struct qe_snum snums[QE_NUM_OF_SNUM];   /* Dynamically allocated SNUMs */
-static unsigned int qe_num_of_snum;
-
-static phys_addr_t qebase = -1;
-
-phys_addr_t get_qe_base(void)
-{
-       struct device_node *qe;
-       int size;
-       const u32 *prop;
-
-       if (qebase != -1)
-               return qebase;
-
-       qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
-       if (!qe) {
-               qe = of_find_node_by_type(NULL, "qe");
-               if (!qe)
-                       return qebase;
-       }
-
-       prop = of_get_property(qe, "reg", &size);
-       if (prop && size >= sizeof(*prop))
-               qebase = of_translate_address(qe, prop);
-       of_node_put(qe);
-
-       return qebase;
-}
-
-EXPORT_SYMBOL(get_qe_base);
-
-void qe_reset(void)
-{
-       if (qe_immr == NULL)
-               qe_immr = ioremap(get_qe_base(), QE_IMMAP_SIZE);
-
-       qe_snums_init();
-
-       qe_issue_cmd(QE_RESET, QE_CR_SUBBLOCK_INVALID,
-                    QE_CR_PROTOCOL_UNSPECIFIED, 0);
-
-       /* Reclaim the MURAM memory for our use. */
-       qe_muram_init();
-
-       if (qe_sdma_init())
-               panic("sdma init failed!");
-}
-
-int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
-{
-       unsigned long flags;
-       u8 mcn_shift = 0, dev_shift = 0;
-       u32 ret;
-
-       spin_lock_irqsave(&qe_lock, flags);
-       if (cmd == QE_RESET) {
-               out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG));
-       } else {
-               if (cmd == QE_ASSIGN_PAGE) {
-                       /* Here device is the SNUM, not sub-block */
-                       dev_shift = QE_CR_SNUM_SHIFT;
-               } else if (cmd == QE_ASSIGN_RISC) {
-                       /* Here device is the SNUM, and mcnProtocol is
-                        * e_QeCmdRiscAssignment value */
-                       dev_shift = QE_CR_SNUM_SHIFT;
-                       mcn_shift = QE_CR_MCN_RISC_ASSIGN_SHIFT;
-               } else {
-                       if (device == QE_CR_SUBBLOCK_USB)
-                               mcn_shift = QE_CR_MCN_USB_SHIFT;
-                       else
-                               mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
-               }
-
-               out_be32(&qe_immr->cp.cecdr, cmd_input);
-               out_be32(&qe_immr->cp.cecr,
-                        (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32)
-                         mcn_protocol << mcn_shift));
-       }
-
-       /* wait for the QE_CR_FLG to clear */
-       ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0,
-                          100, 0);
-       /* On timeout (e.g. failure), the expression will be false (ret == 0),
-          otherwise it will be true (ret == 1). */
-       spin_unlock_irqrestore(&qe_lock, flags);
-
-       return ret == 1;
-}
-EXPORT_SYMBOL(qe_issue_cmd);
-
-/* Set a baud rate generator. This needs lots of work. There are
- * 16 BRGs, which can be connected to the QE channels or output
- * as clocks. The BRGs are in two different block of internal
- * memory mapped space.
- * The BRG clock is the QE clock divided by 2.
- * It was set up long ago during the initial boot phase and is
- * is given to us.
- * Baud rate clocks are zero-based in the driver code (as that maps
- * to port numbers). Documentation uses 1-based numbering.
- */
-static unsigned int brg_clk = 0;
-
-unsigned int qe_get_brg_clk(void)
-{
-       struct device_node *qe;
-       int size;
-       const u32 *prop;
-
-       if (brg_clk)
-               return brg_clk;
-
-       qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
-       if (!qe) {
-               qe = of_find_node_by_type(NULL, "qe");
-               if (!qe)
-                       return brg_clk;
-       }
-
-       prop = of_get_property(qe, "brg-frequency", &size);
-       if (prop && size == sizeof(*prop))
-               brg_clk = *prop;
-
-       of_node_put(qe);
-
-       return brg_clk;
-}
-EXPORT_SYMBOL(qe_get_brg_clk);
-
-/* Program the BRG to the given sampling rate and multiplier
- *
- * @brg: the BRG, QE_BRG1 - QE_BRG16
- * @rate: the desired sampling rate
- * @multiplier: corresponds to the value programmed in GUMR_L[RDCR] or
- * GUMR_L[TDCR].  E.g., if this BRG is the RX clock, and GUMR_L[RDCR]=01,
- * then 'multiplier' should be 8.
- */
-int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
-{
-       u32 divisor, tempval;
-       u32 div16 = 0;
-
-       if ((brg < QE_BRG1) || (brg > QE_BRG16))
-               return -EINVAL;
-
-       divisor = qe_get_brg_clk() / (rate * multiplier);
-
-       if (divisor > QE_BRGC_DIVISOR_MAX + 1) {
-               div16 = QE_BRGC_DIV16;
-               divisor /= 16;
-       }
-
-       /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
-          that the BRG divisor must be even if you're not using divide-by-16
-          mode. */
-       if (!div16 && (divisor & 1) && (divisor > 3))
-               divisor++;
-
-       tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
-               QE_BRGC_ENABLE | div16;
-
-       out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval);
-
-       return 0;
-}
-EXPORT_SYMBOL(qe_setbrg);
-
-/* Convert a string to a QE clock source enum
- *
- * This function takes a string, typically from a property in the device
- * tree, and returns the corresponding "enum qe_clock" value.
-*/
-enum qe_clock qe_clock_source(const char *source)
-{
-       unsigned int i;
-
-       if (strcasecmp(source, "none") == 0)
-               return QE_CLK_NONE;
-
-       if (strncasecmp(source, "brg", 3) == 0) {
-               i = simple_strtoul(source + 3, NULL, 10);
-               if ((i >= 1) && (i <= 16))
-                       return (QE_BRG1 - 1) + i;
-               else
-                       return QE_CLK_DUMMY;
-       }
-
-       if (strncasecmp(source, "clk", 3) == 0) {
-               i = simple_strtoul(source + 3, NULL, 10);
-               if ((i >= 1) && (i <= 24))
-                       return (QE_CLK1 - 1) + i;
-               else
-                       return QE_CLK_DUMMY;
-       }
-
-       return QE_CLK_DUMMY;
-}
-EXPORT_SYMBOL(qe_clock_source);
-
-/* Initialize SNUMs (thread serial numbers) according to
- * QE Module Control chapter, SNUM table
- */
-static void qe_snums_init(void)
-{
-       int i;
-       static const u8 snum_init_76[] = {
-               0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
-               0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
-               0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9,
-               0xD8, 0xD9, 0xE8, 0xE9, 0x44, 0x45, 0x4C, 0x4D,
-               0x54, 0x55, 0x5C, 0x5D, 0x64, 0x65, 0x6C, 0x6D,
-               0x74, 0x75, 0x7C, 0x7D, 0x84, 0x85, 0x8C, 0x8D,
-               0x94, 0x95, 0x9C, 0x9D, 0xA4, 0xA5, 0xAC, 0xAD,
-               0xB4, 0xB5, 0xBC, 0xBD, 0xC4, 0xC5, 0xCC, 0xCD,
-               0xD4, 0xD5, 0xDC, 0xDD, 0xE4, 0xE5, 0xEC, 0xED,
-               0xF4, 0xF5, 0xFC, 0xFD,
-       };
-       static const u8 snum_init_46[] = {
-               0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
-               0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
-               0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9,
-               0xD8, 0xD9, 0xE8, 0xE9, 0x08, 0x09, 0x18, 0x19,
-               0x28, 0x29, 0x38, 0x39, 0x48, 0x49, 0x58, 0x59,
-               0x68, 0x69, 0x78, 0x79, 0x80, 0x81,
-       };
-       static const u8 *snum_init;
-
-       qe_num_of_snum = qe_get_num_of_snums();
-
-       if (qe_num_of_snum == 76)
-               snum_init = snum_init_76;
-       else
-               snum_init = snum_init_46;
-
-       for (i = 0; i < qe_num_of_snum; i++) {
-               snums[i].num = snum_init[i];
-               snums[i].state = QE_SNUM_STATE_FREE;
-       }
-}
-
-int qe_get_snum(void)
-{
-       unsigned long flags;
-       int snum = -EBUSY;
-       int i;
-
-       spin_lock_irqsave(&qe_lock, flags);
-       for (i = 0; i < qe_num_of_snum; i++) {
-               if (snums[i].state == QE_SNUM_STATE_FREE) {
-                       snums[i].state = QE_SNUM_STATE_USED;
-                       snum = snums[i].num;
-                       break;
-               }
-       }
-       spin_unlock_irqrestore(&qe_lock, flags);
-
-       return snum;
-}
-EXPORT_SYMBOL(qe_get_snum);
-
-void qe_put_snum(u8 snum)
-{
-       int i;
-
-       for (i = 0; i < qe_num_of_snum; i++) {
-               if (snums[i].num == snum) {
-                       snums[i].state = QE_SNUM_STATE_FREE;
-                       break;
-               }
-       }
-}
-EXPORT_SYMBOL(qe_put_snum);
-
-static int qe_sdma_init(void)
-{
-       struct sdma __iomem *sdma = &qe_immr->sdma;
-       static unsigned long sdma_buf_offset = (unsigned long)-ENOMEM;
-
-       if (!sdma)
-               return -ENODEV;
-
-       /* allocate 2 internal temporary buffers (512 bytes size each) for
-        * the SDMA */
-       if (IS_ERR_VALUE(sdma_buf_offset)) {
-               sdma_buf_offset = qe_muram_alloc(512 * 2, 4096);
-               if (IS_ERR_VALUE(sdma_buf_offset))
-                       return -ENOMEM;
-       }
-
-       out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK);
-       out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK |
-                                       (0x1 << QE_SDMR_CEN_SHIFT)));
-
-       return 0;
-}
-
-/* The maximum number of RISCs we support */
-#define MAX_QE_RISC     4
-
-/* Firmware information stored here for qe_get_firmware_info() */
-static struct qe_firmware_info qe_firmware_info;
-
-/*
- * Set to 1 if QE firmware has been uploaded, and therefore
- * qe_firmware_info contains valid data.
- */
-static int qe_firmware_uploaded;
-
-/*
- * Upload a QE microcode
- *
- * This function is a worker function for qe_upload_firmware().  It does
- * the actual uploading of the microcode.
- */
-static void qe_upload_microcode(const void *base,
-       const struct qe_microcode *ucode)
-{
-       const __be32 *code = base + be32_to_cpu(ucode->code_offset);
-       unsigned int i;
-
-       if (ucode->major || ucode->minor || ucode->revision)
-               printk(KERN_INFO "qe-firmware: "
-                       "uploading microcode '%s' version %u.%u.%u\n",
-                       ucode->id, ucode->major, ucode->minor, ucode->revision);
-       else
-               printk(KERN_INFO "qe-firmware: "
-                       "uploading microcode '%s'\n", ucode->id);
-
-       /* Use auto-increment */
-       out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) |
-               QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR);
-
-       for (i = 0; i < be32_to_cpu(ucode->count); i++)
-               out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i]));
-       
-       /* Set I-RAM Ready Register */
-       out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY));
-}
-
-/*
- * Upload a microcode to the I-RAM at a specific address.
- *
- * See Documentation/powerpc/qe_firmware.txt for information on QE microcode
- * uploading.
- *
- * Currently, only version 1 is supported, so the 'version' field must be
- * set to 1.
- *
- * The SOC model and revision are not validated, they are only displayed for
- * informational purposes.
- *
- * 'calc_size' is the calculated size, in bytes, of the firmware structure and
- * all of the microcode structures, minus the CRC.
- *
- * 'length' is the size that the structure says it is, including the CRC.
- */
-int qe_upload_firmware(const struct qe_firmware *firmware)
-{
-       unsigned int i;
-       unsigned int j;
-       u32 crc;
-       size_t calc_size = sizeof(struct qe_firmware);
-       size_t length;
-       const struct qe_header *hdr;
-
-       if (!firmware) {
-               printk(KERN_ERR "qe-firmware: invalid pointer\n");
-               return -EINVAL;
-       }
-
-       hdr = &firmware->header;
-       length = be32_to_cpu(hdr->length);
-
-       /* Check the magic */
-       if ((hdr->magic[0] != 'Q') || (hdr->magic[1] != 'E') ||
-           (hdr->magic[2] != 'F')) {
-               printk(KERN_ERR "qe-firmware: not a microcode\n");
-               return -EPERM;
-       }
-
-       /* Check the version */
-       if (hdr->version != 1) {
-               printk(KERN_ERR "qe-firmware: unsupported version\n");
-               return -EPERM;
-       }
-
-       /* Validate some of the fields */
-       if ((firmware->count < 1) || (firmware->count > MAX_QE_RISC)) {
-               printk(KERN_ERR "qe-firmware: invalid data\n");
-               return -EINVAL;
-       }
-
-       /* Validate the length and check if there's a CRC */
-       calc_size += (firmware->count - 1) * sizeof(struct qe_microcode);
-
-       for (i = 0; i < firmware->count; i++)
-               /*
-                * For situations where the second RISC uses the same microcode
-                * as the first, the 'code_offset' and 'count' fields will be
-                * zero, so it's okay to add those.
-                */
-               calc_size += sizeof(__be32) *
-                       be32_to_cpu(firmware->microcode[i].count);
-
-       /* Validate the length */
-       if (length != calc_size + sizeof(__be32)) {
-               printk(KERN_ERR "qe-firmware: invalid length\n");
-               return -EPERM;
-       }
-
-       /* Validate the CRC */
-       crc = be32_to_cpu(*(__be32 *)((void *)firmware + calc_size));
-       if (crc != crc32(0, firmware, calc_size)) {
-               printk(KERN_ERR "qe-firmware: firmware CRC is invalid\n");
-               return -EIO;
-       }
-
-       /*
-        * If the microcode calls for it, split the I-RAM.
-        */
-       if (!firmware->split)
-               setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
-
-       if (firmware->soc.model)
-               printk(KERN_INFO
-                       "qe-firmware: firmware '%s' for %u V%u.%u\n",
-                       firmware->id, be16_to_cpu(firmware->soc.model),
-                       firmware->soc.major, firmware->soc.minor);
-       else
-               printk(KERN_INFO "qe-firmware: firmware '%s'\n",
-                       firmware->id);
-
-       /*
-        * The QE only supports one microcode per RISC, so clear out all the
-        * saved microcode information and put in the new.
-        */
-       memset(&qe_firmware_info, 0, sizeof(qe_firmware_info));
-       strlcpy(qe_firmware_info.id, firmware->id, sizeof(qe_firmware_info.id));
-       qe_firmware_info.extended_modes = firmware->extended_modes;
-       memcpy(qe_firmware_info.vtraps, firmware->vtraps,
-               sizeof(firmware->vtraps));
-
-       /* Loop through each microcode. */
-       for (i = 0; i < firmware->count; i++) {
-               const struct qe_microcode *ucode = &firmware->microcode[i];
-
-               /* Upload a microcode if it's present */
-               if (ucode->code_offset)
-                       qe_upload_microcode(firmware, ucode);
-
-               /* Program the traps for this processor */
-               for (j = 0; j < 16; j++) {
-                       u32 trap = be32_to_cpu(ucode->traps[j]);
-
-                       if (trap)
-                               out_be32(&qe_immr->rsp[i].tibcr[j], trap);
-               }
-
-               /* Enable traps */
-               out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr));
-       }
-
-       qe_firmware_uploaded = 1;
-
-       return 0;
-}
-EXPORT_SYMBOL(qe_upload_firmware);
-
-/*
- * Get info on the currently-loaded firmware
- *
- * This function also checks the device tree to see if the boot loader has
- * uploaded a firmware already.
- */
-struct qe_firmware_info *qe_get_firmware_info(void)
-{
-       static int initialized;
-       struct property *prop;
-       struct device_node *qe;
-       struct device_node *fw = NULL;
-       const char *sprop;
-       unsigned int i;
-
-       /*
-        * If we haven't checked yet, and a driver hasn't uploaded a firmware
-        * yet, then check the device tree for information.
-        */
-       if (qe_firmware_uploaded)
-               return &qe_firmware_info;
-
-       if (initialized)
-               return NULL;
-
-       initialized = 1;
-
-       /*
-        * Newer device trees have an "fsl,qe" compatible property for the QE
-        * node, but we still need to support older device trees.
-       */
-       qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
-       if (!qe) {
-               qe = of_find_node_by_type(NULL, "qe");
-               if (!qe)
-                       return NULL;
-       }
-
-       /* Find the 'firmware' child node */
-       for_each_child_of_node(qe, fw) {
-               if (strcmp(fw->name, "firmware") == 0)
-                       break;
-       }
-
-       of_node_put(qe);
-
-       /* Did we find the 'firmware' node? */
-       if (!fw)
-               return NULL;
-
-       qe_firmware_uploaded = 1;
-
-       /* Copy the data into qe_firmware_info*/
-       sprop = of_get_property(fw, "id", NULL);
-       if (sprop)
-               strlcpy(qe_firmware_info.id, sprop,
-                       sizeof(qe_firmware_info.id));
-
-       prop = of_find_property(fw, "extended-modes", NULL);
-       if (prop && (prop->length == sizeof(u64))) {
-               const u64 *iprop = prop->value;
-
-               qe_firmware_info.extended_modes = *iprop;
-       }
-
-       prop = of_find_property(fw, "virtual-traps", NULL);
-       if (prop && (prop->length == 32)) {
-               const u32 *iprop = prop->value;
-
-               for (i = 0; i < ARRAY_SIZE(qe_firmware_info.vtraps); i++)
-                       qe_firmware_info.vtraps[i] = iprop[i];
-       }
-
-       of_node_put(fw);
-
-       return &qe_firmware_info;
-}
-EXPORT_SYMBOL(qe_get_firmware_info);
-
-unsigned int qe_get_num_of_risc(void)
-{
-       struct device_node *qe;
-       int size;
-       unsigned int num_of_risc = 0;
-       const u32 *prop;
-
-       qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
-       if (!qe) {
-               /* Older devices trees did not have an "fsl,qe"
-                * compatible property, so we need to look for
-                * the QE node by name.
-                */
-               qe = of_find_node_by_type(NULL, "qe");
-               if (!qe)
-                       return num_of_risc;
-       }
-
-       prop = of_get_property(qe, "fsl,qe-num-riscs", &size);
-       if (prop && size == sizeof(*prop))
-               num_of_risc = *prop;
-
-       of_node_put(qe);
-
-       return num_of_risc;
-}
-EXPORT_SYMBOL(qe_get_num_of_risc);
-
-unsigned int qe_get_num_of_snums(void)
-{
-       struct device_node *qe;
-       int size;
-       unsigned int num_of_snums;
-       const u32 *prop;
-
-       num_of_snums = 28; /* The default number of snum for threads is 28 */
-       qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
-       if (!qe) {
-               /* Older devices trees did not have an "fsl,qe"
-                * compatible property, so we need to look for
-                * the QE node by name.
-                */
-               qe = of_find_node_by_type(NULL, "qe");
-               if (!qe)
-                       return num_of_snums;
-       }
-
-       prop = of_get_property(qe, "fsl,qe-num-snums", &size);
-       if (prop && size == sizeof(*prop)) {
-               num_of_snums = *prop;
-               if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) {
-                       /* No QE ever has fewer than 28 SNUMs */
-                       pr_err("QE: number of snum is invalid\n");
-                       of_node_put(qe);
-                       return -EINVAL;
-               }
-       }
-
-       of_node_put(qe);
-
-       return num_of_snums;
-}
-EXPORT_SYMBOL(qe_get_num_of_snums);
-
-#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx)
-static int qe_resume(struct platform_device *ofdev)
-{
-       if (!qe_alive_during_sleep())
-               qe_reset();
-       return 0;
-}
-
-static int qe_probe(struct platform_device *ofdev)
-{
-       return 0;
-}
-
-static const struct of_device_id qe_ids[] = {
-       { .compatible = "fsl,qe", },
-       { },
-};
-
-static struct platform_driver qe_driver = {
-       .driver = {
-               .name = "fsl-qe",
-               .of_match_table = qe_ids,
-       },
-       .probe = qe_probe,
-       .resume = qe_resume,
-};
-
-static int __init qe_drv_init(void)
-{
-       return platform_driver_register(&qe_driver);
-}
-device_initcall(qe_drv_init);
-#endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
deleted file mode 100644 (file)
index ef36f16..0000000
+++ /dev/null
@@ -1,502 +0,0 @@
-/*
- * arch/powerpc/sysdev/qe_lib/qe_ic.c
- *
- * Copyright (C) 2006 Freescale Semiconductor, Inc.  All rights reserved.
- *
- * Author: Li Yang <leoli@freescale.com>
- * Based on code from Shlomi Gridish <gridish@freescale.com>
- *
- * QUICC ENGINE Interrupt Controller
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/reboot.h>
-#include <linux/slab.h>
-#include <linux/stddef.h>
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/device.h>
-#include <linux/spinlock.h>
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/qe_ic.h>
-
-#include "qe_ic.h"
-
-static DEFINE_RAW_SPINLOCK(qe_ic_lock);
-
-static struct qe_ic_info qe_ic_info[] = {
-       [1] = {
-              .mask = 0x00008000,
-              .mask_reg = QEIC_CIMR,
-              .pri_code = 0,
-              .pri_reg = QEIC_CIPWCC,
-              },
-       [2] = {
-              .mask = 0x00004000,
-              .mask_reg = QEIC_CIMR,
-              .pri_code = 1,
-              .pri_reg = QEIC_CIPWCC,
-              },
-       [3] = {
-              .mask = 0x00002000,
-              .mask_reg = QEIC_CIMR,
-              .pri_code = 2,
-              .pri_reg = QEIC_CIPWCC,
-              },
-       [10] = {
-               .mask = 0x00000040,
-               .mask_reg = QEIC_CIMR,
-               .pri_code = 1,
-               .pri_reg = QEIC_CIPZCC,
-               },
-       [11] = {
-               .mask = 0x00000020,
-               .mask_reg = QEIC_CIMR,
-               .pri_code = 2,
-               .pri_reg = QEIC_CIPZCC,
-               },
-       [12] = {
-               .mask = 0x00000010,
-               .mask_reg = QEIC_CIMR,
-               .pri_code = 3,
-               .pri_reg = QEIC_CIPZCC,
-               },
-       [13] = {
-               .mask = 0x00000008,
-               .mask_reg = QEIC_CIMR,
-               .pri_code = 4,
-               .pri_reg = QEIC_CIPZCC,
-               },
-       [14] = {
-               .mask = 0x00000004,
-               .mask_reg = QEIC_CIMR,
-               .pri_code = 5,
-               .pri_reg = QEIC_CIPZCC,
-               },
-       [15] = {
-               .mask = 0x00000002,
-               .mask_reg = QEIC_CIMR,
-               .pri_code = 6,
-               .pri_reg = QEIC_CIPZCC,
-               },
-       [20] = {
-               .mask = 0x10000000,
-               .mask_reg = QEIC_CRIMR,
-               .pri_code = 3,
-               .pri_reg = QEIC_CIPRTA,
-               },
-       [25] = {
-               .mask = 0x00800000,
-               .mask_reg = QEIC_CRIMR,
-               .pri_code = 0,
-               .pri_reg = QEIC_CIPRTB,
-               },
-       [26] = {
-               .mask = 0x00400000,
-               .mask_reg = QEIC_CRIMR,
-               .pri_code = 1,
-               .pri_reg = QEIC_CIPRTB,
-               },
-       [27] = {
-               .mask = 0x00200000,
-               .mask_reg = QEIC_CRIMR,
-               .pri_code = 2,
-               .pri_reg = QEIC_CIPRTB,
-               },
-       [28] = {
-               .mask = 0x00100000,
-               .mask_reg = QEIC_CRIMR,
-               .pri_code = 3,
-               .pri_reg = QEIC_CIPRTB,
-               },
-       [32] = {
-               .mask = 0x80000000,
-               .mask_reg = QEIC_CIMR,
-               .pri_code = 0,
-               .pri_reg = QEIC_CIPXCC,
-               },
-       [33] = {
-               .mask = 0x40000000,
-               .mask_reg = QEIC_CIMR,
-               .pri_code = 1,
-               .pri_reg = QEIC_CIPXCC,
-               },
-       [34] = {
-               .mask = 0x20000000,
-               .mask_reg = QEIC_CIMR,
-               .pri_code = 2,
-               .pri_reg = QEIC_CIPXCC,
-               },
-       [35] = {
-               .mask = 0x10000000,
-               .mask_reg = QEIC_CIMR,
-               .pri_code = 3,
-               .pri_reg = QEIC_CIPXCC,
-               },
-       [36] = {
-               .mask = 0x08000000,
-               .mask_reg = QEIC_CIMR,
-               .pri_code = 4,
-               .pri_reg = QEIC_CIPXCC,
-               },
-       [40] = {
-               .mask = 0x00800000,
-               .mask_reg = QEIC_CIMR,
-               .pri_code = 0,
-               .pri_reg = QEIC_CIPYCC,
-               },
-       [41] = {
-               .mask = 0x00400000,
-               .mask_reg = QEIC_CIMR,
-               .pri_code = 1,
-               .pri_reg = QEIC_CIPYCC,
-               },
-       [42] = {
-               .mask = 0x00200000,
-               .mask_reg = QEIC_CIMR,
-               .pri_code = 2,
-               .pri_reg = QEIC_CIPYCC,
-               },
-       [43] = {
-               .mask = 0x00100000,
-               .mask_reg = QEIC_CIMR,
-               .pri_code = 3,
-               .pri_reg = QEIC_CIPYCC,
-               },
-};
-
-static inline u32 qe_ic_read(volatile __be32  __iomem * base, unsigned int reg)
-{
-       return in_be32(base + (reg >> 2));
-}
-
-static inline void qe_ic_write(volatile __be32  __iomem * base, unsigned int reg,
-                              u32 value)
-{
-       out_be32(base + (reg >> 2), value);
-}
-
-static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
-{
-       return irq_get_chip_data(virq);
-}
-
-static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
-{
-       return irq_data_get_irq_chip_data(d);
-}
-
-static void qe_ic_unmask_irq(struct irq_data *d)
-{
-       struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
-       unsigned int src = irqd_to_hwirq(d);
-       unsigned long flags;
-       u32 temp;
-
-       raw_spin_lock_irqsave(&qe_ic_lock, flags);
-
-       temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
-       qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
-                   temp | qe_ic_info[src].mask);
-
-       raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
-}
-
-static void qe_ic_mask_irq(struct irq_data *d)
-{
-       struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
-       unsigned int src = irqd_to_hwirq(d);
-       unsigned long flags;
-       u32 temp;
-
-       raw_spin_lock_irqsave(&qe_ic_lock, flags);
-
-       temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
-       qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
-                   temp & ~qe_ic_info[src].mask);
-
-       /* Flush the above write before enabling interrupts; otherwise,
-        * spurious interrupts will sometimes happen.  To be 100% sure
-        * that the write has reached the device before interrupts are
-        * enabled, the mask register would have to be read back; however,
-        * this is not required for correctness, only to avoid wasting
-        * time on a large number of spurious interrupts.  In testing,
-        * a sync reduced the observed spurious interrupts to zero.
-        */
-       mb();
-
-       raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
-}
-
-static struct irq_chip qe_ic_irq_chip = {
-       .name = "QEIC",
-       .irq_unmask = qe_ic_unmask_irq,
-       .irq_mask = qe_ic_mask_irq,
-       .irq_mask_ack = qe_ic_mask_irq,
-};
-
-static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
-                           enum irq_domain_bus_token bus_token)
-{
-       /* Exact match, unless qe_ic node is NULL */
-       struct device_node *of_node = irq_domain_get_of_node(h);
-       return of_node == NULL || of_node == node;
-}
-
-static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
-                         irq_hw_number_t hw)
-{
-       struct qe_ic *qe_ic = h->host_data;
-       struct irq_chip *chip;
-
-       if (qe_ic_info[hw].mask == 0) {
-               printk(KERN_ERR "Can't map reserved IRQ\n");
-               return -EINVAL;
-       }
-       /* Default chip */
-       chip = &qe_ic->hc_irq;
-
-       irq_set_chip_data(virq, qe_ic);
-       irq_set_status_flags(virq, IRQ_LEVEL);
-
-       irq_set_chip_and_handler(virq, chip, handle_level_irq);
-
-       return 0;
-}
-
-static const struct irq_domain_ops qe_ic_host_ops = {
-       .match = qe_ic_host_match,
-       .map = qe_ic_host_map,
-       .xlate = irq_domain_xlate_onetwocell,
-};
-
-/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
-unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
-{
-       int irq;
-
-       BUG_ON(qe_ic == NULL);
-
-       /* get the interrupt source vector. */
-       irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
-
-       if (irq == 0)
-               return NO_IRQ;
-
-       return irq_linear_revmap(qe_ic->irqhost, irq);
-}
-
-/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
-unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
-{
-       int irq;
-
-       BUG_ON(qe_ic == NULL);
-
-       /* get the interrupt source vector. */
-       irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
-
-       if (irq == 0)
-               return NO_IRQ;
-
-       return irq_linear_revmap(qe_ic->irqhost, irq);
-}
-
-void __init qe_ic_init(struct device_node *node, unsigned int flags,
-                      void (*low_handler)(struct irq_desc *desc),
-                      void (*high_handler)(struct irq_desc *desc))
-{
-       struct qe_ic *qe_ic;
-       struct resource res;
-       u32 temp = 0, ret, high_active = 0;
-
-       ret = of_address_to_resource(node, 0, &res);
-       if (ret)
-               return;
-
-       qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
-       if (qe_ic == NULL)
-               return;
-
-       qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
-                                              &qe_ic_host_ops, qe_ic);
-       if (qe_ic->irqhost == NULL) {
-               kfree(qe_ic);
-               return;
-       }
-
-       qe_ic->regs = ioremap(res.start, resource_size(&res));
-
-       qe_ic->hc_irq = qe_ic_irq_chip;
-
-       qe_ic->virq_high = irq_of_parse_and_map(node, 0);
-       qe_ic->virq_low = irq_of_parse_and_map(node, 1);
-
-       if (qe_ic->virq_low == NO_IRQ) {
-               printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
-               kfree(qe_ic);
-               return;
-       }
-
-       /* default priority scheme is grouped. If spread mode is    */
-       /* required, configure cicr accordingly.                    */
-       if (flags & QE_IC_SPREADMODE_GRP_W)
-               temp |= CICR_GWCC;
-       if (flags & QE_IC_SPREADMODE_GRP_X)
-               temp |= CICR_GXCC;
-       if (flags & QE_IC_SPREADMODE_GRP_Y)
-               temp |= CICR_GYCC;
-       if (flags & QE_IC_SPREADMODE_GRP_Z)
-               temp |= CICR_GZCC;
-       if (flags & QE_IC_SPREADMODE_GRP_RISCA)
-               temp |= CICR_GRTA;
-       if (flags & QE_IC_SPREADMODE_GRP_RISCB)
-               temp |= CICR_GRTB;
-
-       /* choose destination signal for highest priority interrupt */
-       if (flags & QE_IC_HIGH_SIGNAL) {
-               temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT);
-               high_active = 1;
-       }
-
-       qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
-
-       irq_set_handler_data(qe_ic->virq_low, qe_ic);
-       irq_set_chained_handler(qe_ic->virq_low, low_handler);
-
-       if (qe_ic->virq_high != NO_IRQ &&
-                       qe_ic->virq_high != qe_ic->virq_low) {
-               irq_set_handler_data(qe_ic->virq_high, qe_ic);
-               irq_set_chained_handler(qe_ic->virq_high, high_handler);
-       }
-}
-
-void qe_ic_set_highest_priority(unsigned int virq, int high)
-{
-       struct qe_ic *qe_ic = qe_ic_from_irq(virq);
-       unsigned int src = virq_to_hw(virq);
-       u32 temp = 0;
-
-       temp = qe_ic_read(qe_ic->regs, QEIC_CICR);
-
-       temp &= ~CICR_HP_MASK;
-       temp |= src << CICR_HP_SHIFT;
-
-       temp &= ~CICR_HPIT_MASK;
-       temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT;
-
-       qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
-}
-
-/* Set Priority level within its group, from 1 to 8 */
-int qe_ic_set_priority(unsigned int virq, unsigned int priority)
-{
-       struct qe_ic *qe_ic = qe_ic_from_irq(virq);
-       unsigned int src = virq_to_hw(virq);
-       u32 temp;
-
-       if (priority > 8 || priority == 0)
-               return -EINVAL;
-       if (src > 127)
-               return -EINVAL;
-       if (qe_ic_info[src].pri_reg == 0)
-               return -EINVAL;
-
-       temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg);
-
-       if (priority < 4) {
-               temp &= ~(0x7 << (32 - priority * 3));
-               temp |= qe_ic_info[src].pri_code << (32 - priority * 3);
-       } else {
-               temp &= ~(0x7 << (24 - priority * 3));
-               temp |= qe_ic_info[src].pri_code << (24 - priority * 3);
-       }
-
-       qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp);
-
-       return 0;
-}
-
-/* Set a QE priority to use high irq, only priority 1~2 can use high irq */
-int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high)
-{
-       struct qe_ic *qe_ic = qe_ic_from_irq(virq);
-       unsigned int src = virq_to_hw(virq);
-       u32 temp, control_reg = QEIC_CICNR, shift = 0;
-
-       if (priority > 2 || priority == 0)
-               return -EINVAL;
-
-       switch (qe_ic_info[src].pri_reg) {
-       case QEIC_CIPZCC:
-               shift = CICNR_ZCC1T_SHIFT;
-               break;
-       case QEIC_CIPWCC:
-               shift = CICNR_WCC1T_SHIFT;
-               break;
-       case QEIC_CIPYCC:
-               shift = CICNR_YCC1T_SHIFT;
-               break;
-       case QEIC_CIPXCC:
-               shift = CICNR_XCC1T_SHIFT;
-               break;
-       case QEIC_CIPRTA:
-               shift = CRICR_RTA1T_SHIFT;
-               control_reg = QEIC_CRICR;
-               break;
-       case QEIC_CIPRTB:
-               shift = CRICR_RTB1T_SHIFT;
-               control_reg = QEIC_CRICR;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       shift += (2 - priority) * 2;
-       temp = qe_ic_read(qe_ic->regs, control_reg);
-       temp &= ~(SIGNAL_MASK << shift);
-       temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift;
-       qe_ic_write(qe_ic->regs, control_reg, temp);
-
-       return 0;
-}
-
-static struct bus_type qe_ic_subsys = {
-       .name = "qe_ic",
-       .dev_name = "qe_ic",
-};
-
-static struct device device_qe_ic = {
-       .id = 0,
-       .bus = &qe_ic_subsys,
-};
-
-static int __init init_qe_ic_sysfs(void)
-{
-       int rc;
-
-       printk(KERN_DEBUG "Registering qe_ic with sysfs...\n");
-
-       rc = subsys_system_register(&qe_ic_subsys, NULL);
-       if (rc) {
-               printk(KERN_ERR "Failed registering qe_ic sys class\n");
-               return -ENODEV;
-       }
-       rc = device_register(&device_qe_ic);
-       if (rc) {
-               printk(KERN_ERR "Failed registering qe_ic sys device\n");
-               return -ENODEV;
-       }
-       return 0;
-}
-
-subsys_initcall(init_qe_ic_sysfs);
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.h b/arch/powerpc/sysdev/qe_lib/qe_ic.h
deleted file mode 100644 (file)
index efef7ab..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * arch/powerpc/sysdev/qe_lib/qe_ic.h
- *
- * QUICC ENGINE Interrupt Controller Header
- *
- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
- *
- * Author: Li Yang <leoli@freescale.com>
- * Based on code from Shlomi Gridish <gridish@freescale.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-#ifndef _POWERPC_SYSDEV_QE_IC_H
-#define _POWERPC_SYSDEV_QE_IC_H
-
-#include <asm/qe_ic.h>
-
-#define NR_QE_IC_INTS          64
-
-/* QE IC registers offset */
-#define QEIC_CICR              0x00
-#define QEIC_CIVEC             0x04
-#define QEIC_CRIPNR            0x08
-#define QEIC_CIPNR             0x0c
-#define QEIC_CIPXCC            0x10
-#define QEIC_CIPYCC            0x14
-#define QEIC_CIPWCC            0x18
-#define QEIC_CIPZCC            0x1c
-#define QEIC_CIMR              0x20
-#define QEIC_CRIMR             0x24
-#define QEIC_CICNR             0x28
-#define QEIC_CIPRTA            0x30
-#define QEIC_CIPRTB            0x34
-#define QEIC_CRICR             0x3c
-#define QEIC_CHIVEC            0x60
-
-/* Interrupt priority registers */
-#define CIPCC_SHIFT_PRI0       29
-#define CIPCC_SHIFT_PRI1       26
-#define CIPCC_SHIFT_PRI2       23
-#define CIPCC_SHIFT_PRI3       20
-#define CIPCC_SHIFT_PRI4       13
-#define CIPCC_SHIFT_PRI5       10
-#define CIPCC_SHIFT_PRI6       7
-#define CIPCC_SHIFT_PRI7       4
-
-/* CICR priority modes */
-#define CICR_GWCC              0x00040000
-#define CICR_GXCC              0x00020000
-#define CICR_GYCC              0x00010000
-#define CICR_GZCC              0x00080000
-#define CICR_GRTA              0x00200000
-#define CICR_GRTB              0x00400000
-#define CICR_HPIT_SHIFT                8
-#define CICR_HPIT_MASK         0x00000300
-#define CICR_HP_SHIFT          24
-#define CICR_HP_MASK           0x3f000000
-
-/* CICNR */
-#define CICNR_WCC1T_SHIFT      20
-#define CICNR_ZCC1T_SHIFT      28
-#define CICNR_YCC1T_SHIFT      12
-#define CICNR_XCC1T_SHIFT      4
-
-/* CRICR */
-#define CRICR_RTA1T_SHIFT      20
-#define CRICR_RTB1T_SHIFT      28
-
-/* Signal indicator */
-#define SIGNAL_MASK            3
-#define SIGNAL_HIGH            2
-#define SIGNAL_LOW             0
-
-struct qe_ic {
-       /* Control registers offset */
-       volatile u32 __iomem *regs;
-
-       /* The remapper for this QEIC */
-       struct irq_domain *irqhost;
-
-       /* The "linux" controller struct */
-       struct irq_chip hc_irq;
-
-       /* VIRQ numbers of QE high/low irqs */
-       unsigned int virq_high;
-       unsigned int virq_low;
-};
-
-/*
- * QE interrupt controller internal structure
- */
-struct qe_ic_info {
-       u32     mask;     /* location of this source at the QIMR register. */
-       u32     mask_reg; /* Mask register offset */
-       u8      pri_code; /* for grouped interrupts sources - the interrupt
-                            code as appears at the group priority register */
-       u32     pri_reg;  /* Group priority register offset */
-};
-
-#endif /* _POWERPC_SYSDEV_QE_IC_H */
diff --git a/arch/powerpc/sysdev/qe_lib/qe_io.c b/arch/powerpc/sysdev/qe_lib/qe_io.c
deleted file mode 100644 (file)
index 7ea0174..0000000
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * arch/powerpc/sysdev/qe_lib/qe_io.c
- *
- * QE Parallel I/O ports configuration routines
- *
- * Copyright 2006 Freescale Semiconductor, Inc. All rights reserved.
- *
- * Author: Li Yang <LeoLi@freescale.com>
- * Based on code from Shlomi Gridish <gridish@freescale.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <linux/stddef.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/ioport.h>
-
-#include <asm/io.h>
-#include <asm/qe.h>
-#include <asm/prom.h>
-#include <sysdev/fsl_soc.h>
-
-#undef DEBUG
-
-static struct qe_pio_regs __iomem *par_io;
-static int num_par_io_ports = 0;
-
-int par_io_init(struct device_node *np)
-{
-       struct resource res;
-       int ret;
-       const u32 *num_ports;
-
-       /* Map Parallel I/O ports registers */
-       ret = of_address_to_resource(np, 0, &res);
-       if (ret)
-               return ret;
-       par_io = ioremap(res.start, resource_size(&res));
-
-       num_ports = of_get_property(np, "num-ports", NULL);
-       if (num_ports)
-               num_par_io_ports = *num_ports;
-
-       return 0;
-}
-
-void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
-                        int open_drain, int assignment, int has_irq)
-{
-       u32 pin_mask1bit;
-       u32 pin_mask2bits;
-       u32 new_mask2bits;
-       u32 tmp_val;
-
-       /* calculate pin location for single and 2 bits information */
-       pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
-
-       /* Set open drain, if required */
-       tmp_val = in_be32(&par_io->cpodr);
-       if (open_drain)
-               out_be32(&par_io->cpodr, pin_mask1bit | tmp_val);
-       else
-               out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val);
-
-       /* define direction */
-       tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
-               in_be32(&par_io->cpdir2) :
-               in_be32(&par_io->cpdir1);
-
-       /* get all bits mask for 2 bit per port */
-       pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
-                               (pin % (QE_PIO_PINS / 2) + 1) * 2));
-
-       /* Get the final mask we need for the right definition */
-       new_mask2bits = (u32) (dir << (QE_PIO_PINS -
-                               (pin % (QE_PIO_PINS / 2) + 1) * 2));
-
-       /* clear and set 2 bits mask */
-       if (pin > (QE_PIO_PINS / 2) - 1) {
-               out_be32(&par_io->cpdir2,
-                        ~pin_mask2bits & tmp_val);
-               tmp_val &= ~pin_mask2bits;
-               out_be32(&par_io->cpdir2, new_mask2bits | tmp_val);
-       } else {
-               out_be32(&par_io->cpdir1,
-                        ~pin_mask2bits & tmp_val);
-               tmp_val &= ~pin_mask2bits;
-               out_be32(&par_io->cpdir1, new_mask2bits | tmp_val);
-       }
-       /* define pin assignment */
-       tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
-               in_be32(&par_io->cppar2) :
-               in_be32(&par_io->cppar1);
-
-       new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
-                       (pin % (QE_PIO_PINS / 2) + 1) * 2));
-       /* clear and set 2 bits mask */
-       if (pin > (QE_PIO_PINS / 2) - 1) {
-               out_be32(&par_io->cppar2,
-                        ~pin_mask2bits & tmp_val);
-               tmp_val &= ~pin_mask2bits;
-               out_be32(&par_io->cppar2, new_mask2bits | tmp_val);
-       } else {
-               out_be32(&par_io->cppar1,
-                        ~pin_mask2bits & tmp_val);
-               tmp_val &= ~pin_mask2bits;
-               out_be32(&par_io->cppar1, new_mask2bits | tmp_val);
-       }
-}
-EXPORT_SYMBOL(__par_io_config_pin);
-
-int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
-                     int assignment, int has_irq)
-{
-       if (!par_io || port >= num_par_io_ports)
-               return -EINVAL;
-
-       __par_io_config_pin(&par_io[port], pin, dir, open_drain, assignment,
-                           has_irq);
-       return 0;
-}
-EXPORT_SYMBOL(par_io_config_pin);
-
-int par_io_data_set(u8 port, u8 pin, u8 val)
-{
-       u32 pin_mask, tmp_val;
-
-       if (port >= num_par_io_ports)
-               return -EINVAL;
-       if (pin >= QE_PIO_PINS)
-               return -EINVAL;
-       /* calculate pin location */
-       pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
-
-       tmp_val = in_be32(&par_io[port].cpdata);
-
-       if (val == 0)           /* clear */
-               out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val);
-       else                    /* set */
-               out_be32(&par_io[port].cpdata, pin_mask | tmp_val);
-
-       return 0;
-}
-EXPORT_SYMBOL(par_io_data_set);
-
-int par_io_of_config(struct device_node *np)
-{
-       struct device_node *pio;
-       const phandle *ph;
-       int pio_map_len;
-       const unsigned int *pio_map;
-
-       if (par_io == NULL) {
-               printk(KERN_ERR "par_io not initialized\n");
-               return -1;
-       }
-
-       ph = of_get_property(np, "pio-handle", NULL);
-       if (ph == NULL) {
-               printk(KERN_ERR "pio-handle not available\n");
-               return -1;
-       }
-
-       pio = of_find_node_by_phandle(*ph);
-
-       pio_map = of_get_property(pio, "pio-map", &pio_map_len);
-       if (pio_map == NULL) {
-               printk(KERN_ERR "pio-map is not set!\n");
-               return -1;
-       }
-       pio_map_len /= sizeof(unsigned int);
-       if ((pio_map_len % 6) != 0) {
-               printk(KERN_ERR "pio-map format wrong!\n");
-               return -1;
-       }
-
-       while (pio_map_len > 0) {
-               par_io_config_pin((u8) pio_map[0], (u8) pio_map[1],
-                               (int) pio_map[2], (int) pio_map[3],
-                               (int) pio_map[4], (int) pio_map[5]);
-               pio_map += 6;
-               pio_map_len -= 6;
-       }
-       of_node_put(pio);
-       return 0;
-}
-EXPORT_SYMBOL(par_io_of_config);
diff --git a/arch/powerpc/sysdev/qe_lib/ucc.c b/arch/powerpc/sysdev/qe_lib/ucc.c
deleted file mode 100644 (file)
index 621575b..0000000
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * arch/powerpc/sysdev/qe_lib/ucc.c
- *
- * QE UCC API Set - UCC specific routines implementations.
- *
- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
- *
- * Authors:    Shlomi Gridish <gridish@freescale.com>
- *             Li Yang <leoli@freescale.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/stddef.h>
-#include <linux/spinlock.h>
-#include <linux/export.h>
-
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/immap_qe.h>
-#include <asm/qe.h>
-#include <asm/ucc.h>
-
-int ucc_set_qe_mux_mii_mng(unsigned int ucc_num)
-{
-       unsigned long flags;
-
-       if (ucc_num > UCC_MAX_NUM - 1)
-               return -EINVAL;
-
-       spin_lock_irqsave(&cmxgcr_lock, flags);
-       clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
-               ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
-       spin_unlock_irqrestore(&cmxgcr_lock, flags);
-
-       return 0;
-}
-EXPORT_SYMBOL(ucc_set_qe_mux_mii_mng);
-
-/* Configure the UCC to either Slow or Fast.
- *
- * A given UCC can be figured to support either "slow" devices (e.g. UART)
- * or "fast" devices (e.g. Ethernet).
- *
- * 'ucc_num' is the UCC number, from 0 - 7.
- *
- * This function also sets the UCC_GUEMR_SET_RESERVED3 bit because that bit
- * must always be set to 1.
- */
-int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed)
-{
-       u8 __iomem *guemr;
-
-       /* The GUEMR register is at the same location for both slow and fast
-          devices, so we just use uccX.slow.guemr. */
-       switch (ucc_num) {
-       case 0: guemr = &qe_immr->ucc1.slow.guemr;
-               break;
-       case 1: guemr = &qe_immr->ucc2.slow.guemr;
-               break;
-       case 2: guemr = &qe_immr->ucc3.slow.guemr;
-               break;
-       case 3: guemr = &qe_immr->ucc4.slow.guemr;
-               break;
-       case 4: guemr = &qe_immr->ucc5.slow.guemr;
-               break;
-       case 5: guemr = &qe_immr->ucc6.slow.guemr;
-               break;
-       case 6: guemr = &qe_immr->ucc7.slow.guemr;
-               break;
-       case 7: guemr = &qe_immr->ucc8.slow.guemr;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
-               UCC_GUEMR_SET_RESERVED3 | speed);
-
-       return 0;
-}
-
-static void get_cmxucr_reg(unsigned int ucc_num, __be32 __iomem **cmxucr,
-       unsigned int *reg_num, unsigned int *shift)
-{
-       unsigned int cmx = ((ucc_num & 1) << 1) + (ucc_num > 3);
-
-       *reg_num = cmx + 1;
-       *cmxucr = &qe_immr->qmx.cmxucr[cmx];
-       *shift = 16 - 8 * (ucc_num & 2);
-}
-
-int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask)
-{
-       __be32 __iomem *cmxucr;
-       unsigned int reg_num;
-       unsigned int shift;
-
-       /* check if the UCC number is in range. */
-       if (ucc_num > UCC_MAX_NUM - 1)
-               return -EINVAL;
-
-       get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
-
-       if (set)
-               setbits32(cmxucr, mask << shift);
-       else
-               clrbits32(cmxucr, mask << shift);
-
-       return 0;
-}
-
-int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
-       enum comm_dir mode)
-{
-       __be32 __iomem *cmxucr;
-       unsigned int reg_num;
-       unsigned int shift;
-       u32 clock_bits = 0;
-
-       /* check if the UCC number is in range. */
-       if (ucc_num > UCC_MAX_NUM - 1)
-               return -EINVAL;
-
-       /* The communications direction must be RX or TX */
-       if (!((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX)))
-               return -EINVAL;
-
-       get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
-
-       switch (reg_num) {
-       case 1:
-               switch (clock) {
-               case QE_BRG1:   clock_bits = 1; break;
-               case QE_BRG2:   clock_bits = 2; break;
-               case QE_BRG7:   clock_bits = 3; break;
-               case QE_BRG8:   clock_bits = 4; break;
-               case QE_CLK9:   clock_bits = 5; break;
-               case QE_CLK10:  clock_bits = 6; break;
-               case QE_CLK11:  clock_bits = 7; break;
-               case QE_CLK12:  clock_bits = 8; break;
-               case QE_CLK15:  clock_bits = 9; break;
-               case QE_CLK16:  clock_bits = 10; break;
-               default: break;
-               }
-               break;
-       case 2:
-               switch (clock) {
-               case QE_BRG5:   clock_bits = 1; break;
-               case QE_BRG6:   clock_bits = 2; break;
-               case QE_BRG7:   clock_bits = 3; break;
-               case QE_BRG8:   clock_bits = 4; break;
-               case QE_CLK13:  clock_bits = 5; break;
-               case QE_CLK14:  clock_bits = 6; break;
-               case QE_CLK19:  clock_bits = 7; break;
-               case QE_CLK20:  clock_bits = 8; break;
-               case QE_CLK15:  clock_bits = 9; break;
-               case QE_CLK16:  clock_bits = 10; break;
-               default: break;
-               }
-               break;
-       case 3:
-               switch (clock) {
-               case QE_BRG9:   clock_bits = 1; break;
-               case QE_BRG10:  clock_bits = 2; break;
-               case QE_BRG15:  clock_bits = 3; break;
-               case QE_BRG16:  clock_bits = 4; break;
-               case QE_CLK3:   clock_bits = 5; break;
-               case QE_CLK4:   clock_bits = 6; break;
-               case QE_CLK17:  clock_bits = 7; break;
-               case QE_CLK18:  clock_bits = 8; break;
-               case QE_CLK7:   clock_bits = 9; break;
-               case QE_CLK8:   clock_bits = 10; break;
-               case QE_CLK16:  clock_bits = 11; break;
-               default: break;
-               }
-               break;
-       case 4:
-               switch (clock) {
-               case QE_BRG13:  clock_bits = 1; break;
-               case QE_BRG14:  clock_bits = 2; break;
-               case QE_BRG15:  clock_bits = 3; break;
-               case QE_BRG16:  clock_bits = 4; break;
-               case QE_CLK5:   clock_bits = 5; break;
-               case QE_CLK6:   clock_bits = 6; break;
-               case QE_CLK21:  clock_bits = 7; break;
-               case QE_CLK22:  clock_bits = 8; break;
-               case QE_CLK7:   clock_bits = 9; break;
-               case QE_CLK8:   clock_bits = 10; break;
-               case QE_CLK16:  clock_bits = 11; break;
-               default: break;
-               }
-               break;
-       default: break;
-       }
-
-       /* Check for invalid combination of clock and UCC number */
-       if (!clock_bits)
-               return -ENOENT;
-
-       if (mode == COMM_DIR_RX)
-               shift += 4;
-
-       clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
-               clock_bits << shift);
-
-       return 0;
-}
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_fast.c b/arch/powerpc/sysdev/qe_lib/ucc_fast.c
deleted file mode 100644 (file)
index 65aaf15..0000000
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
- *
- * Authors:    Shlomi Gridish <gridish@freescale.com>
- *             Li Yang <leoli@freescale.com>
- *
- * Description:
- * QE UCC Fast API Set - UCC Fast specific routines implementations.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/stddef.h>
-#include <linux/interrupt.h>
-#include <linux/err.h>
-#include <linux/export.h>
-
-#include <asm/io.h>
-#include <asm/immap_qe.h>
-#include <asm/qe.h>
-
-#include <asm/ucc.h>
-#include <asm/ucc_fast.h>
-
-void ucc_fast_dump_regs(struct ucc_fast_private * uccf)
-{
-       printk(KERN_INFO "UCC%u Fast registers:\n", uccf->uf_info->ucc_num);
-       printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
-
-       printk(KERN_INFO "gumr  : addr=0x%p, val=0x%08x\n",
-                 &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr));
-       printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
-                 &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr));
-       printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
-                 &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr));
-       printk(KERN_INFO "udsr  : addr=0x%p, val=0x%04x\n",
-                 &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr));
-       printk(KERN_INFO "ucce  : addr=0x%p, val=0x%08x\n",
-                 &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce));
-       printk(KERN_INFO "uccm  : addr=0x%p, val=0x%08x\n",
-                 &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm));
-       printk(KERN_INFO "uccs  : addr=0x%p, val=0x%02x\n",
-                 &uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs));
-       printk(KERN_INFO "urfb  : addr=0x%p, val=0x%08x\n",
-                 &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb));
-       printk(KERN_INFO "urfs  : addr=0x%p, val=0x%04x\n",
-                 &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs));
-       printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
-                 &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet));
-       printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
-                 &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset));
-       printk(KERN_INFO "utfb  : addr=0x%p, val=0x%08x\n",
-                 &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb));
-       printk(KERN_INFO "utfs  : addr=0x%p, val=0x%04x\n",
-                 &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs));
-       printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
-                 &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet));
-       printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
-                 &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt));
-       printk(KERN_INFO "utpt  : addr=0x%p, val=0x%04x\n",
-                 &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt));
-       printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
-                 &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry));
-       printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
-                 &uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr));
-}
-EXPORT_SYMBOL(ucc_fast_dump_regs);
-
-u32 ucc_fast_get_qe_cr_subblock(int uccf_num)
-{
-       switch (uccf_num) {
-       case 0: return QE_CR_SUBBLOCK_UCCFAST1;
-       case 1: return QE_CR_SUBBLOCK_UCCFAST2;
-       case 2: return QE_CR_SUBBLOCK_UCCFAST3;
-       case 3: return QE_CR_SUBBLOCK_UCCFAST4;
-       case 4: return QE_CR_SUBBLOCK_UCCFAST5;
-       case 5: return QE_CR_SUBBLOCK_UCCFAST6;
-       case 6: return QE_CR_SUBBLOCK_UCCFAST7;
-       case 7: return QE_CR_SUBBLOCK_UCCFAST8;
-       default: return QE_CR_SUBBLOCK_INVALID;
-       }
-}
-EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock);
-
-void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
-{
-       out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD);
-}
-EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
-
-void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
-{
-       struct ucc_fast __iomem *uf_regs;
-       u32 gumr;
-
-       uf_regs = uccf->uf_regs;
-
-       /* Enable reception and/or transmission on this UCC. */
-       gumr = in_be32(&uf_regs->gumr);
-       if (mode & COMM_DIR_TX) {
-               gumr |= UCC_FAST_GUMR_ENT;
-               uccf->enabled_tx = 1;
-       }
-       if (mode & COMM_DIR_RX) {
-               gumr |= UCC_FAST_GUMR_ENR;
-               uccf->enabled_rx = 1;
-       }
-       out_be32(&uf_regs->gumr, gumr);
-}
-EXPORT_SYMBOL(ucc_fast_enable);
-
-void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
-{
-       struct ucc_fast __iomem *uf_regs;
-       u32 gumr;
-
-       uf_regs = uccf->uf_regs;
-
-       /* Disable reception and/or transmission on this UCC. */
-       gumr = in_be32(&uf_regs->gumr);
-       if (mode & COMM_DIR_TX) {
-               gumr &= ~UCC_FAST_GUMR_ENT;
-               uccf->enabled_tx = 0;
-       }
-       if (mode & COMM_DIR_RX) {
-               gumr &= ~UCC_FAST_GUMR_ENR;
-               uccf->enabled_rx = 0;
-       }
-       out_be32(&uf_regs->gumr, gumr);
-}
-EXPORT_SYMBOL(ucc_fast_disable);
-
-int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret)
-{
-       struct ucc_fast_private *uccf;
-       struct ucc_fast __iomem *uf_regs;
-       u32 gumr;
-       int ret;
-
-       if (!uf_info)
-               return -EINVAL;
-
-       /* check if the UCC port number is in range. */
-       if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) {
-               printk(KERN_ERR "%s: illegal UCC number\n", __func__);
-               return -EINVAL;
-       }
-
-       /* Check that 'max_rx_buf_length' is properly aligned (4). */
-       if (uf_info->max_rx_buf_length & (UCC_FAST_MRBLR_ALIGNMENT - 1)) {
-               printk(KERN_ERR "%s: max_rx_buf_length not aligned\n",
-                       __func__);
-               return -EINVAL;
-       }
-
-       /* Validate Virtual Fifo register values */
-       if (uf_info->urfs < UCC_FAST_URFS_MIN_VAL) {
-               printk(KERN_ERR "%s: urfs is too small\n", __func__);
-               return -EINVAL;
-       }
-
-       if (uf_info->urfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
-               printk(KERN_ERR "%s: urfs is not aligned\n", __func__);
-               return -EINVAL;
-       }
-
-       if (uf_info->urfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
-               printk(KERN_ERR "%s: urfet is not aligned.\n", __func__);
-               return -EINVAL;
-       }
-
-       if (uf_info->urfset & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
-               printk(KERN_ERR "%s: urfset is not aligned\n", __func__);
-               return -EINVAL;
-       }
-
-       if (uf_info->utfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
-               printk(KERN_ERR "%s: utfs is not aligned\n", __func__);
-               return -EINVAL;
-       }
-
-       if (uf_info->utfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
-               printk(KERN_ERR "%s: utfet is not aligned\n", __func__);
-               return -EINVAL;
-       }
-
-       if (uf_info->utftt & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
-               printk(KERN_ERR "%s: utftt is not aligned\n", __func__);
-               return -EINVAL;
-       }
-
-       uccf = kzalloc(sizeof(struct ucc_fast_private), GFP_KERNEL);
-       if (!uccf) {
-               printk(KERN_ERR "%s: Cannot allocate private data\n",
-                       __func__);
-               return -ENOMEM;
-       }
-
-       /* Fill fast UCC structure */
-       uccf->uf_info = uf_info;
-       /* Set the PHY base address */
-       uccf->uf_regs = ioremap(uf_info->regs, sizeof(struct ucc_fast));
-       if (uccf->uf_regs == NULL) {
-               printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__);
-               kfree(uccf);
-               return -ENOMEM;
-       }
-
-       uccf->enabled_tx = 0;
-       uccf->enabled_rx = 0;
-       uccf->stopped_tx = 0;
-       uccf->stopped_rx = 0;
-       uf_regs = uccf->uf_regs;
-       uccf->p_ucce = &uf_regs->ucce;
-       uccf->p_uccm = &uf_regs->uccm;
-#ifdef CONFIG_UGETH_TX_ON_DEMAND
-       uccf->p_utodr = &uf_regs->utodr;
-#endif
-#ifdef STATISTICS
-       uccf->tx_frames = 0;
-       uccf->rx_frames = 0;
-       uccf->rx_discarded = 0;
-#endif                         /* STATISTICS */
-
-       /* Set UCC to fast type */
-       ret = ucc_set_type(uf_info->ucc_num, UCC_SPEED_TYPE_FAST);
-       if (ret) {
-               printk(KERN_ERR "%s: cannot set UCC type\n", __func__);
-               ucc_fast_free(uccf);
-               return ret;
-       }
-
-       uccf->mrblr = uf_info->max_rx_buf_length;
-
-       /* Set GUMR */
-       /* For more details see the hardware spec. */
-       gumr = uf_info->ttx_trx;
-       if (uf_info->tci)
-               gumr |= UCC_FAST_GUMR_TCI;
-       if (uf_info->cdp)
-               gumr |= UCC_FAST_GUMR_CDP;
-       if (uf_info->ctsp)
-               gumr |= UCC_FAST_GUMR_CTSP;
-       if (uf_info->cds)
-               gumr |= UCC_FAST_GUMR_CDS;
-       if (uf_info->ctss)
-               gumr |= UCC_FAST_GUMR_CTSS;
-       if (uf_info->txsy)
-               gumr |= UCC_FAST_GUMR_TXSY;
-       if (uf_info->rsyn)
-               gumr |= UCC_FAST_GUMR_RSYN;
-       gumr |= uf_info->synl;
-       if (uf_info->rtsm)
-               gumr |= UCC_FAST_GUMR_RTSM;
-       gumr |= uf_info->renc;
-       if (uf_info->revd)
-               gumr |= UCC_FAST_GUMR_REVD;
-       gumr |= uf_info->tenc;
-       gumr |= uf_info->tcrc;
-       gumr |= uf_info->mode;
-       out_be32(&uf_regs->gumr, gumr);
-
-       /* Allocate memory for Tx Virtual Fifo */
-       uccf->ucc_fast_tx_virtual_fifo_base_offset =
-           qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
-       if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
-               printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n",
-                       __func__);
-               uccf->ucc_fast_tx_virtual_fifo_base_offset = 0;
-               ucc_fast_free(uccf);
-               return -ENOMEM;
-       }
-
-       /* Allocate memory for Rx Virtual Fifo */
-       uccf->ucc_fast_rx_virtual_fifo_base_offset =
-               qe_muram_alloc(uf_info->urfs +
-                          UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
-                          UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
-       if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
-               printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n",
-                       __func__);
-               uccf->ucc_fast_rx_virtual_fifo_base_offset = 0;
-               ucc_fast_free(uccf);
-               return -ENOMEM;
-       }
-
-       /* Set Virtual Fifo registers */
-       out_be16(&uf_regs->urfs, uf_info->urfs);
-       out_be16(&uf_regs->urfet, uf_info->urfet);
-       out_be16(&uf_regs->urfset, uf_info->urfset);
-       out_be16(&uf_regs->utfs, uf_info->utfs);
-       out_be16(&uf_regs->utfet, uf_info->utfet);
-       out_be16(&uf_regs->utftt, uf_info->utftt);
-       /* utfb, urfb are offsets from MURAM base */
-       out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset);
-       out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset);
-
-       /* Mux clocking */
-       /* Grant Support */
-       ucc_set_qe_mux_grant(uf_info->ucc_num, uf_info->grant_support);
-       /* Breakpoint Support */
-       ucc_set_qe_mux_bkpt(uf_info->ucc_num, uf_info->brkpt_support);
-       /* Set Tsa or NMSI mode. */
-       ucc_set_qe_mux_tsa(uf_info->ucc_num, uf_info->tsa);
-       /* If NMSI (not Tsa), set Tx and Rx clock. */
-       if (!uf_info->tsa) {
-               /* Rx clock routing */
-               if ((uf_info->rx_clock != QE_CLK_NONE) &&
-                   ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->rx_clock,
-                                       COMM_DIR_RX)) {
-                       printk(KERN_ERR "%s: illegal value for RX clock\n",
-                              __func__);
-                       ucc_fast_free(uccf);
-                       return -EINVAL;
-               }
-               /* Tx clock routing */
-               if ((uf_info->tx_clock != QE_CLK_NONE) &&
-                   ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->tx_clock,
-                                       COMM_DIR_TX)) {
-                       printk(KERN_ERR "%s: illegal value for TX clock\n",
-                              __func__);
-                       ucc_fast_free(uccf);
-                       return -EINVAL;
-               }
-       }
-
-       /* Set interrupt mask register at UCC level. */
-       out_be32(&uf_regs->uccm, uf_info->uccm_mask);
-
-       /* First, clear anything pending at UCC level,
-        * otherwise, old garbage may come through
-        * as soon as the dam is opened. */
-
-       /* Writing '1' clears */
-       out_be32(&uf_regs->ucce, 0xffffffff);
-
-       *uccf_ret = uccf;
-       return 0;
-}
-EXPORT_SYMBOL(ucc_fast_init);
-
-void ucc_fast_free(struct ucc_fast_private * uccf)
-{
-       if (!uccf)
-               return;
-
-       if (uccf->ucc_fast_tx_virtual_fifo_base_offset)
-               qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset);
-
-       if (uccf->ucc_fast_rx_virtual_fifo_base_offset)
-               qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset);
-
-       if (uccf->uf_regs)
-               iounmap(uccf->uf_regs);
-
-       kfree(uccf);
-}
-EXPORT_SYMBOL(ucc_fast_free);
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_slow.c b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
deleted file mode 100644 (file)
index 5f91628..0000000
+++ /dev/null
@@ -1,374 +0,0 @@
-/*
- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
- *
- * Authors:    Shlomi Gridish <gridish@freescale.com>
- *             Li Yang <leoli@freescale.com>
- *
- * Description:
- * QE UCC Slow API Set - UCC Slow specific routines implementations.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/stddef.h>
-#include <linux/interrupt.h>
-#include <linux/err.h>
-#include <linux/export.h>
-
-#include <asm/io.h>
-#include <asm/immap_qe.h>
-#include <asm/qe.h>
-
-#include <asm/ucc.h>
-#include <asm/ucc_slow.h>
-
-u32 ucc_slow_get_qe_cr_subblock(int uccs_num)
-{
-       switch (uccs_num) {
-       case 0: return QE_CR_SUBBLOCK_UCCSLOW1;
-       case 1: return QE_CR_SUBBLOCK_UCCSLOW2;
-       case 2: return QE_CR_SUBBLOCK_UCCSLOW3;
-       case 3: return QE_CR_SUBBLOCK_UCCSLOW4;
-       case 4: return QE_CR_SUBBLOCK_UCCSLOW5;
-       case 5: return QE_CR_SUBBLOCK_UCCSLOW6;
-       case 6: return QE_CR_SUBBLOCK_UCCSLOW7;
-       case 7: return QE_CR_SUBBLOCK_UCCSLOW8;
-       default: return QE_CR_SUBBLOCK_INVALID;
-       }
-}
-EXPORT_SYMBOL(ucc_slow_get_qe_cr_subblock);
-
-void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs)
-{
-       struct ucc_slow_info *us_info = uccs->us_info;
-       u32 id;
-
-       id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
-       qe_issue_cmd(QE_GRACEFUL_STOP_TX, id,
-                        QE_CR_PROTOCOL_UNSPECIFIED, 0);
-}
-EXPORT_SYMBOL(ucc_slow_graceful_stop_tx);
-
-void ucc_slow_stop_tx(struct ucc_slow_private * uccs)
-{
-       struct ucc_slow_info *us_info = uccs->us_info;
-       u32 id;
-
-       id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
-       qe_issue_cmd(QE_STOP_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
-}
-EXPORT_SYMBOL(ucc_slow_stop_tx);
-
-void ucc_slow_restart_tx(struct ucc_slow_private * uccs)
-{
-       struct ucc_slow_info *us_info = uccs->us_info;
-       u32 id;
-
-       id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
-       qe_issue_cmd(QE_RESTART_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
-}
-EXPORT_SYMBOL(ucc_slow_restart_tx);
-
-void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
-{
-       struct ucc_slow *us_regs;
-       u32 gumr_l;
-
-       us_regs = uccs->us_regs;
-
-       /* Enable reception and/or transmission on this UCC. */
-       gumr_l = in_be32(&us_regs->gumr_l);
-       if (mode & COMM_DIR_TX) {
-               gumr_l |= UCC_SLOW_GUMR_L_ENT;
-               uccs->enabled_tx = 1;
-       }
-       if (mode & COMM_DIR_RX) {
-               gumr_l |= UCC_SLOW_GUMR_L_ENR;
-               uccs->enabled_rx = 1;
-       }
-       out_be32(&us_regs->gumr_l, gumr_l);
-}
-EXPORT_SYMBOL(ucc_slow_enable);
-
-void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
-{
-       struct ucc_slow *us_regs;
-       u32 gumr_l;
-
-       us_regs = uccs->us_regs;
-
-       /* Disable reception and/or transmission on this UCC. */
-       gumr_l = in_be32(&us_regs->gumr_l);
-       if (mode & COMM_DIR_TX) {
-               gumr_l &= ~UCC_SLOW_GUMR_L_ENT;
-               uccs->enabled_tx = 0;
-       }
-       if (mode & COMM_DIR_RX) {
-               gumr_l &= ~UCC_SLOW_GUMR_L_ENR;
-               uccs->enabled_rx = 0;
-       }
-       out_be32(&us_regs->gumr_l, gumr_l);
-}
-EXPORT_SYMBOL(ucc_slow_disable);
-
-/* Initialize the UCC for Slow operations
- *
- * The caller should initialize the following us_info
- */
-int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret)
-{
-       struct ucc_slow_private *uccs;
-       u32 i;
-       struct ucc_slow __iomem *us_regs;
-       u32 gumr;
-       struct qe_bd *bd;
-       u32 id;
-       u32 command;
-       int ret = 0;
-
-       if (!us_info)
-               return -EINVAL;
-
-       /* check if the UCC port number is in range. */
-       if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) {
-               printk(KERN_ERR "%s: illegal UCC number\n", __func__);
-               return -EINVAL;
-       }
-
-       /*
-        * Set mrblr
-        * Check that 'max_rx_buf_length' is properly aligned (4), unless
-        * rfw is 1, meaning that QE accepts one byte at a time, unlike normal
-        * case when QE accepts 32 bits at a time.
-        */
-       if ((!us_info->rfw) &&
-               (us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) {
-               printk(KERN_ERR "max_rx_buf_length not aligned.\n");
-               return -EINVAL;
-       }
-
-       uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL);
-       if (!uccs) {
-               printk(KERN_ERR "%s: Cannot allocate private data\n",
-                       __func__);
-               return -ENOMEM;
-       }
-
-       /* Fill slow UCC structure */
-       uccs->us_info = us_info;
-       /* Set the PHY base address */
-       uccs->us_regs = ioremap(us_info->regs, sizeof(struct ucc_slow));
-       if (uccs->us_regs == NULL) {
-               printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__);
-               kfree(uccs);
-               return -ENOMEM;
-       }
-
-       uccs->saved_uccm = 0;
-       uccs->p_rx_frame = 0;
-       us_regs = uccs->us_regs;
-       uccs->p_ucce = (u16 *) & (us_regs->ucce);
-       uccs->p_uccm = (u16 *) & (us_regs->uccm);
-#ifdef STATISTICS
-       uccs->rx_frames = 0;
-       uccs->tx_frames = 0;
-       uccs->rx_discarded = 0;
-#endif                         /* STATISTICS */
-
-       /* Get PRAM base */
-       uccs->us_pram_offset =
-               qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM);
-       if (IS_ERR_VALUE(uccs->us_pram_offset)) {
-               printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __func__);
-               ucc_slow_free(uccs);
-               return -ENOMEM;
-       }
-       id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
-       qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, us_info->protocol,
-                    uccs->us_pram_offset);
-
-       uccs->us_pram = qe_muram_addr(uccs->us_pram_offset);
-
-       /* Set UCC to slow type */
-       ret = ucc_set_type(us_info->ucc_num, UCC_SPEED_TYPE_SLOW);
-       if (ret) {
-               printk(KERN_ERR "%s: cannot set UCC type", __func__);
-               ucc_slow_free(uccs);
-               return ret;
-       }
-
-       out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length);
-
-       INIT_LIST_HEAD(&uccs->confQ);
-
-       /* Allocate BDs. */
-       uccs->rx_base_offset =
-               qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd),
-                               QE_ALIGNMENT_OF_BD);
-       if (IS_ERR_VALUE(uccs->rx_base_offset)) {
-               printk(KERN_ERR "%s: cannot allocate %u RX BDs\n", __func__,
-                       us_info->rx_bd_ring_len);
-               uccs->rx_base_offset = 0;
-               ucc_slow_free(uccs);
-               return -ENOMEM;
-       }
-
-       uccs->tx_base_offset =
-               qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd),
-                       QE_ALIGNMENT_OF_BD);
-       if (IS_ERR_VALUE(uccs->tx_base_offset)) {
-               printk(KERN_ERR "%s: cannot allocate TX BDs", __func__);
-               uccs->tx_base_offset = 0;
-               ucc_slow_free(uccs);
-               return -ENOMEM;
-       }
-
-       /* Init Tx bds */
-       bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset);
-       for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) {
-               /* clear bd buffer */
-               out_be32(&bd->buf, 0);
-               /* set bd status and length */
-               out_be32((u32 *) bd, 0);
-               bd++;
-       }
-       /* for last BD set Wrap bit */
-       out_be32(&bd->buf, 0);
-       out_be32((u32 *) bd, cpu_to_be32(T_W));
-
-       /* Init Rx bds */
-       bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
-       for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) {
-               /* set bd status and length */
-               out_be32((u32*)bd, 0);
-               /* clear bd buffer */
-               out_be32(&bd->buf, 0);
-               bd++;
-       }
-       /* for last BD set Wrap bit */
-       out_be32((u32*)bd, cpu_to_be32(R_W));
-       out_be32(&bd->buf, 0);
-
-       /* Set GUMR (For more details see the hardware spec.). */
-       /* gumr_h */
-       gumr = us_info->tcrc;
-       if (us_info->cdp)
-               gumr |= UCC_SLOW_GUMR_H_CDP;
-       if (us_info->ctsp)
-               gumr |= UCC_SLOW_GUMR_H_CTSP;
-       if (us_info->cds)
-               gumr |= UCC_SLOW_GUMR_H_CDS;
-       if (us_info->ctss)
-               gumr |= UCC_SLOW_GUMR_H_CTSS;
-       if (us_info->tfl)
-               gumr |= UCC_SLOW_GUMR_H_TFL;
-       if (us_info->rfw)
-               gumr |= UCC_SLOW_GUMR_H_RFW;
-       if (us_info->txsy)
-               gumr |= UCC_SLOW_GUMR_H_TXSY;
-       if (us_info->rtsm)
-               gumr |= UCC_SLOW_GUMR_H_RTSM;
-       out_be32(&us_regs->gumr_h, gumr);
-
-       /* gumr_l */
-       gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc |
-               us_info->diag | us_info->mode;
-       if (us_info->tci)
-               gumr |= UCC_SLOW_GUMR_L_TCI;
-       if (us_info->rinv)
-               gumr |= UCC_SLOW_GUMR_L_RINV;
-       if (us_info->tinv)
-               gumr |= UCC_SLOW_GUMR_L_TINV;
-       if (us_info->tend)
-               gumr |= UCC_SLOW_GUMR_L_TEND;
-       out_be32(&us_regs->gumr_l, gumr);
-
-       /* Function code registers */
-
-       /* if the data is in cachable memory, the 'global' */
-       /* in the function code should be set. */
-       uccs->us_pram->tbmr = UCC_BMR_BO_BE;
-       uccs->us_pram->rbmr = UCC_BMR_BO_BE;
-
-       /* rbase, tbase are offsets from MURAM base */
-       out_be16(&uccs->us_pram->rbase, uccs->rx_base_offset);
-       out_be16(&uccs->us_pram->tbase, uccs->tx_base_offset);
-
-       /* Mux clocking */
-       /* Grant Support */
-       ucc_set_qe_mux_grant(us_info->ucc_num, us_info->grant_support);
-       /* Breakpoint Support */
-       ucc_set_qe_mux_bkpt(us_info->ucc_num, us_info->brkpt_support);
-       /* Set Tsa or NMSI mode. */
-       ucc_set_qe_mux_tsa(us_info->ucc_num, us_info->tsa);
-       /* If NMSI (not Tsa), set Tx and Rx clock. */
-       if (!us_info->tsa) {
-               /* Rx clock routing */
-               if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->rx_clock,
-                                       COMM_DIR_RX)) {
-                       printk(KERN_ERR "%s: illegal value for RX clock\n",
-                              __func__);
-                       ucc_slow_free(uccs);
-                       return -EINVAL;
-               }
-               /* Tx clock routing */
-               if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->tx_clock,
-                                       COMM_DIR_TX)) {
-                       printk(KERN_ERR "%s: illegal value for TX clock\n",
-                              __func__);
-                       ucc_slow_free(uccs);
-                       return -EINVAL;
-               }
-       }
-
-       /* Set interrupt mask register at UCC level. */
-       out_be16(&us_regs->uccm, us_info->uccm_mask);
-
-       /* First, clear anything pending at UCC level,
-        * otherwise, old garbage may come through
-        * as soon as the dam is opened. */
-
-       /* Writing '1' clears */
-       out_be16(&us_regs->ucce, 0xffff);
-
-       /* Issue QE Init command */
-       if (us_info->init_tx && us_info->init_rx)
-               command = QE_INIT_TX_RX;
-       else if (us_info->init_tx)
-               command = QE_INIT_TX;
-       else
-               command = QE_INIT_RX;   /* We know at least one is TRUE */
-
-       qe_issue_cmd(command, id, us_info->protocol, 0);
-
-       *uccs_ret = uccs;
-       return 0;
-}
-EXPORT_SYMBOL(ucc_slow_init);
-
-void ucc_slow_free(struct ucc_slow_private * uccs)
-{
-       if (!uccs)
-               return;
-
-       if (uccs->rx_base_offset)
-               qe_muram_free(uccs->rx_base_offset);
-
-       if (uccs->tx_base_offset)
-               qe_muram_free(uccs->tx_base_offset);
-
-       if (uccs->us_pram)
-               qe_muram_free(uccs->us_pram_offset);
-
-       if (uccs->us_regs)
-               iounmap(uccs->us_regs);
-
-       kfree(uccs);
-}
-EXPORT_SYMBOL(ucc_slow_free);
-
diff --git a/arch/powerpc/sysdev/qe_lib/usb.c b/arch/powerpc/sysdev/qe_lib/usb.c
deleted file mode 100644 (file)
index 27f23bd..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * QE USB routines
- *
- * Copyright 2006 Freescale Semiconductor, Inc.
- *               Shlomi Gridish <gridish@freescale.com>
- *               Jerry Huang <Chang-Ming.Huang@freescale.com>
- * Copyright (c) MontaVista Software, Inc. 2008.
- *               Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/export.h>
-#include <linux/io.h>
-#include <asm/immap_qe.h>
-#include <asm/qe.h>
-
-int qe_usb_clock_set(enum qe_clock clk, int rate)
-{
-       struct qe_mux __iomem *mux = &qe_immr->qmx;
-       unsigned long flags;
-       u32 val;
-
-       switch (clk) {
-       case QE_CLK3:  val = QE_CMXGCR_USBCS_CLK3;  break;
-       case QE_CLK5:  val = QE_CMXGCR_USBCS_CLK5;  break;
-       case QE_CLK7:  val = QE_CMXGCR_USBCS_CLK7;  break;
-       case QE_CLK9:  val = QE_CMXGCR_USBCS_CLK9;  break;
-       case QE_CLK13: val = QE_CMXGCR_USBCS_CLK13; break;
-       case QE_CLK17: val = QE_CMXGCR_USBCS_CLK17; break;
-       case QE_CLK19: val = QE_CMXGCR_USBCS_CLK19; break;
-       case QE_CLK21: val = QE_CMXGCR_USBCS_CLK21; break;
-       case QE_BRG9:  val = QE_CMXGCR_USBCS_BRG9;  break;
-       case QE_BRG10: val = QE_CMXGCR_USBCS_BRG10; break;
-       default:
-               pr_err("%s: requested unknown clock %d\n", __func__, clk);
-               return -EINVAL;
-       }
-
-       if (qe_clock_is_brg(clk))
-               qe_setbrg(clk, rate, 1);
-
-       spin_lock_irqsave(&cmxgcr_lock, flags);
-
-       clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val);
-
-       spin_unlock_irqrestore(&cmxgcr_lock, flags);
-
-       return 0;
-}
-EXPORT_SYMBOL(qe_usb_clock_set);
index 786bf01..07a8508 100644 (file)
@@ -320,6 +320,7 @@ static inline void disable_surveillance(void)
 #ifdef CONFIG_PPC_PSERIES
        /* Since this can't be a module, args should end up below 4GB. */
        static struct rtas_args args;
+       int token;
 
        /*
         * At this point we have got all the cpus we can into
@@ -328,17 +329,12 @@ static inline void disable_surveillance(void)
         * If we did try to take rtas.lock there would be a
         * real possibility of deadlock.
         */
-       args.token = rtas_token("set-indicator");
-       if (args.token == RTAS_UNKNOWN_SERVICE)
+       token = rtas_token("set-indicator");
+       if (token == RTAS_UNKNOWN_SERVICE)
                return;
-       args.token = cpu_to_be32(args.token);
-       args.nargs = cpu_to_be32(3);
-       args.nret = cpu_to_be32(1);
-       args.rets = &args.args[3];
-       args.args[0] = cpu_to_be32(SURVEILLANCE_TOKEN);
-       args.args[1] = 0;
-       args.args[2] = 0;
-       enter_rtas(__pa(&args));
+
+       rtas_call_unlocked(&args, token, 3, 1, NULL, SURVEILLANCE_TOKEN, 0, 0);
+
 #endif /* CONFIG_PPC_PSERIES */
 }
 
@@ -1522,6 +1518,8 @@ static void excprint(struct pt_regs *fp)
 
        if (trap == 0x700)
                print_bug_trap(fp);
+
+       printf(linux_banner);
 }
 
 static void prregs(struct pt_regs *fp)
index cb50138..547890f 100644 (file)
@@ -586,7 +586,7 @@ static int __init powernv_cpufreq_init(void)
        int rc = 0;
 
        /* Don't probe on pseries (guest) platforms */
-       if (!firmware_has_feature(FW_FEATURE_OPALv3))
+       if (!firmware_has_feature(FW_FEATURE_OPAL))
                return -ENODEV;
 
        /* Discover pstates from device tree and init */
index 845bafc..e12dc30 100644 (file)
@@ -264,7 +264,7 @@ static int powernv_idle_probe(void)
        if (cpuidle_disable != IDLE_NO_OVERRIDE)
                return -ENODEV;
 
-       if (firmware_has_feature(FW_FEATURE_OPALv3)) {
+       if (firmware_has_feature(FW_FEATURE_OPAL)) {
                cpuidle_state_table = powernv_states;
                /* Device tree can indicate more idle states */
                max_idle_state = powernv_add_idle_states();
index 263af70..022c7ab 100644 (file)
@@ -83,10 +83,10 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
 
        preempt_disable();
        pagefault_disable();
-       enable_kernel_altivec();
        enable_kernel_vsx();
        ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
        ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+       disable_kernel_vsx();
        pagefault_enable();
        preempt_enable();
 
@@ -103,9 +103,9 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
        } else {
                preempt_disable();
                pagefault_disable();
-               enable_kernel_altivec();
                enable_kernel_vsx();
                aes_p8_encrypt(src, dst, &ctx->enc_key);
+               disable_kernel_vsx();
                pagefault_enable();
                preempt_enable();
        }
@@ -120,9 +120,9 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
        } else {
                preempt_disable();
                pagefault_disable();
-               enable_kernel_altivec();
                enable_kernel_vsx();
                aes_p8_decrypt(src, dst, &ctx->dec_key);
+               disable_kernel_vsx();
                pagefault_enable();
                preempt_enable();
        }
index 78a9786..495577b 100644 (file)
@@ -84,10 +84,10 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
 
        preempt_disable();
        pagefault_disable();
-       enable_kernel_altivec();
        enable_kernel_vsx();
        ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
        ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+       disable_kernel_vsx();
        pagefault_enable();
        preempt_enable();
 
@@ -115,7 +115,6 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
        } else {
                preempt_disable();
                pagefault_disable();
-               enable_kernel_altivec();
                enable_kernel_vsx();
 
                blkcipher_walk_init(&walk, dst, src, nbytes);
@@ -129,6 +128,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
                        ret = blkcipher_walk_done(desc, &walk, nbytes);
                }
 
+               disable_kernel_vsx();
                pagefault_enable();
                preempt_enable();
        }
@@ -156,7 +156,6 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
        } else {
                preempt_disable();
                pagefault_disable();
-               enable_kernel_altivec();
                enable_kernel_vsx();
 
                blkcipher_walk_init(&walk, dst, src, nbytes);
@@ -170,6 +169,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
                        ret = blkcipher_walk_done(desc, &walk, nbytes);
                }
 
+               disable_kernel_vsx();
                pagefault_enable();
                preempt_enable();
        }
index 1febc4f..0a3c1b0 100644 (file)
@@ -81,9 +81,9 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
        struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
 
        pagefault_disable();
-       enable_kernel_altivec();
        enable_kernel_vsx();
        ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+       disable_kernel_vsx();
        pagefault_enable();
 
        ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
@@ -100,9 +100,9 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
        unsigned int nbytes = walk->nbytes;
 
        pagefault_disable();
-       enable_kernel_altivec();
        enable_kernel_vsx();
        aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
+       disable_kernel_vsx();
        pagefault_enable();
 
        crypto_xor(keystream, src, nbytes);
@@ -133,7 +133,6 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
                ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
                while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
                        pagefault_disable();
-                       enable_kernel_altivec();
                        enable_kernel_vsx();
                        aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
                                                    walk.dst.virt.addr,
@@ -142,6 +141,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
                                                    AES_BLOCK_SIZE,
                                                    &ctx->enc_key,
                                                    walk.iv);
+                       disable_kernel_vsx();
                        pagefault_enable();
 
                        /* We need to update IV mostly for last bytes/round */
index 2183a2e..6c999cb 100644 (file)
@@ -118,10 +118,9 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
 
        preempt_disable();
        pagefault_disable();
-       enable_kernel_altivec();
        enable_kernel_vsx();
-       enable_kernel_fp();
        gcm_init_p8(ctx->htable, (const u64 *) key);
+       disable_kernel_vsx();
        pagefault_enable();
        preempt_enable();
        return crypto_shash_setkey(ctx->fallback, key, keylen);
@@ -149,11 +148,10 @@ static int p8_ghash_update(struct shash_desc *desc,
                               GHASH_DIGEST_SIZE - dctx->bytes);
                        preempt_disable();
                        pagefault_disable();
-                       enable_kernel_altivec();
                        enable_kernel_vsx();
-                       enable_kernel_fp();
                        gcm_ghash_p8(dctx->shash, ctx->htable,
                                     dctx->buffer, GHASH_DIGEST_SIZE);
+                       disable_kernel_vsx();
                        pagefault_enable();
                        preempt_enable();
                        src += GHASH_DIGEST_SIZE - dctx->bytes;
@@ -164,10 +162,9 @@ static int p8_ghash_update(struct shash_desc *desc,
                if (len) {
                        preempt_disable();
                        pagefault_disable();
-                       enable_kernel_altivec();
                        enable_kernel_vsx();
-                       enable_kernel_fp();
                        gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
+                       disable_kernel_vsx();
                        pagefault_enable();
                        preempt_enable();
                        src += len;
@@ -195,11 +192,10 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
                                dctx->buffer[i] = 0;
                        preempt_disable();
                        pagefault_disable();
-                       enable_kernel_altivec();
                        enable_kernel_vsx();
-                       enable_kernel_fp();
                        gcm_ghash_p8(dctx->shash, ctx->htable,
                                     dctx->buffer, GHASH_DIGEST_SIZE);
+                       disable_kernel_vsx();
                        pagefault_enable();
                        preempt_enable();
                        dctx->bytes = 0;
index 048901a..caaec65 100644 (file)
@@ -582,6 +582,7 @@ static struct of_device_id rackmeter_match[] = {
        { .name = "i2s" },
        { }
 };
+MODULE_DEVICE_TABLE(of, rackmeter_match);
 
 static struct macio_driver rackmeter_driver = {
        .driver = {
index f9512bf..01ee736 100644 (file)
@@ -425,8 +425,9 @@ static int __init via_pmu_start(void)
                        gpio_irq = irq_of_parse_and_map(gpio_node, 0);
 
                if (gpio_irq != NO_IRQ) {
-                       if (request_irq(gpio_irq, gpio1_interrupt, IRQF_TIMER,
-                                       "GPIO1 ADB", (void *)0))
+                       if (request_irq(gpio_irq, gpio1_interrupt,
+                                       IRQF_NO_SUSPEND, "GPIO1 ADB",
+                                       (void *)0))
                                printk(KERN_ERR "pmu: can't get irq %d"
                                       " (GPIO1)\n", gpio_irq);
                        else
index 6982f60..be2ac5c 100644 (file)
@@ -1,4 +1,5 @@
-ccflags-y := -Werror -Wno-unused-const-variable
+ccflags-y                      := $(call cc-disable-warning, unused-const-variable)
+ccflags-$(CONFIG_PPC_WERROR)   += -Werror
 
 cxl-y                          += main.o file.o irq.o fault.o native.o
 cxl-y                          += context.o sysfs.o debugfs.o pci.o trace.o
index 103baf0..ea3eeb7 100644 (file)
@@ -25,7 +25,6 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
 
        afu = cxl_pci_to_afu(dev);
 
-       get_device(&afu->dev);
        ctx = cxl_context_alloc();
        if (IS_ERR(ctx)) {
                rc = PTR_ERR(ctx);
@@ -61,7 +60,6 @@ err_mapping:
 err_ctx:
        kfree(ctx);
 err_dev:
-       put_device(&afu->dev);
        return ERR_PTR(rc);
 }
 EXPORT_SYMBOL_GPL(cxl_dev_context_init);
@@ -87,8 +85,6 @@ int cxl_release_context(struct cxl_context *ctx)
        if (ctx->status >= STARTED)
                return -EBUSY;
 
-       put_device(&ctx->afu->dev);
-
        cxl_context_free(ctx);
 
        return 0;
@@ -176,7 +172,7 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
 
        if (task) {
                ctx->pid = get_task_pid(task, PIDTYPE_PID);
-               get_pid(ctx->pid);
+               ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID);
                kernel = false;
        }
 
index 2faa127..262b88e 100644 (file)
@@ -42,7 +42,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
        spin_lock_init(&ctx->sste_lock);
        ctx->afu = afu;
        ctx->master = master;
-       ctx->pid = NULL; /* Set in start work ioctl */
+       ctx->pid = ctx->glpid = NULL; /* Set in start work ioctl */
        mutex_init(&ctx->mapping_lock);
        ctx->mapping = mapping;
 
@@ -97,6 +97,12 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
        ctx->pe = i;
        ctx->elem = &ctx->afu->spa[i];
        ctx->pe_inserted = false;
+
+       /*
+        * take a ref on the afu so that it stays alive at-least till
+        * this context is reclaimed inside reclaim_ctx.
+        */
+       cxl_afu_get(afu);
        return 0;
 }
 
@@ -211,7 +217,11 @@ int __detach_context(struct cxl_context *ctx)
        WARN_ON(cxl_detach_process(ctx) &&
                cxl_adapter_link_ok(ctx->afu->adapter));
        flush_work(&ctx->fault_work); /* Only needed for dedicated process */
+
+       /* release the reference to the group leader and mm handling pid */
        put_pid(ctx->pid);
+       put_pid(ctx->glpid);
+
        cxl_ctx_put();
        return 0;
 }
@@ -278,6 +288,9 @@ static void reclaim_ctx(struct rcu_head *rcu)
        if (ctx->irq_bitmap)
                kfree(ctx->irq_bitmap);
 
+       /* Drop ref to the afu device taken during cxl_context_init */
+       cxl_afu_put(ctx->afu);
+
        kfree(ctx);
 }
 
index 0cfb9c1..a521bc7 100644 (file)
@@ -403,6 +403,18 @@ struct cxl_afu {
        bool enabled;
 };
 
+/* AFU refcount management */
+static inline struct cxl_afu *cxl_afu_get(struct cxl_afu *afu)
+{
+
+       return (get_device(&afu->dev) == NULL) ? NULL : afu;
+}
+
+static inline void  cxl_afu_put(struct cxl_afu *afu)
+{
+       put_device(&afu->dev);
+}
+
 
 struct cxl_irq_name {
        struct list_head list;
@@ -433,6 +445,9 @@ struct cxl_context {
        unsigned int sst_size, sst_lru;
 
        wait_queue_head_t wq;
+       /* pid of the group leader associated with the pid */
+       struct pid *glpid;
+       /* use mm context associated with this pid for ds faults */
        struct pid *pid;
        spinlock_t lock; /* Protects pending_irq_mask, pending_fault and fault_addr */
        /* Only used in PR mode */
index 25a5418..81c3f75 100644 (file)
@@ -166,13 +166,92 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
        cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
 }
 
+/*
+ * Returns the mm_struct corresponding to the context ctx via ctx->pid
+ * In case the task has exited we use the task group leader accessible
+ * via ctx->glpid to find the next task in the thread group that has a
+ * valid  mm_struct associated with it. If a task with valid mm_struct
+ * is found the ctx->pid is updated to use the task struct for subsequent
+ * translations. In case no valid mm_struct is found in the task group to
+ * service the fault a NULL is returned.
+ */
+static struct mm_struct *get_mem_context(struct cxl_context *ctx)
+{
+       struct task_struct *task = NULL;
+       struct mm_struct *mm = NULL;
+       struct pid *old_pid = ctx->pid;
+
+       if (old_pid == NULL) {
+               pr_warn("%s: Invalid context for pe=%d\n",
+                        __func__, ctx->pe);
+               return NULL;
+       }
+
+       task = get_pid_task(old_pid, PIDTYPE_PID);
+
+       /*
+        * pid_alive may look racy but this saves us from costly
+        * get_task_mm when the task is a zombie. In worst case
+        * we may think a task is alive, which is about to die
+        * but get_task_mm will return NULL.
+        */
+       if (task != NULL && pid_alive(task))
+               mm = get_task_mm(task);
+
+       /* release the task struct that was taken earlier */
+       if (task)
+               put_task_struct(task);
+       else
+               pr_devel("%s: Context owning pid=%i for pe=%i dead\n",
+                       __func__, pid_nr(old_pid), ctx->pe);
+
+       /*
+        * If we couldn't find the mm context then use the group
+        * leader to iterate over the task group and find a task
+        * that gives us mm_struct.
+        */
+       if (unlikely(mm == NULL && ctx->glpid != NULL)) {
+
+               rcu_read_lock();
+               task = pid_task(ctx->glpid, PIDTYPE_PID);
+               if (task)
+                       do {
+                               mm = get_task_mm(task);
+                               if (mm) {
+                                       ctx->pid = get_task_pid(task,
+                                                               PIDTYPE_PID);
+                                       break;
+                               }
+                               task = next_thread(task);
+                       } while (task && !thread_group_leader(task));
+               rcu_read_unlock();
+
+               /* check if we switched pid */
+               if (ctx->pid != old_pid) {
+                       if (mm)
+                               pr_devel("%s:pe=%i switch pid %i->%i\n",
+                                        __func__, ctx->pe, pid_nr(old_pid),
+                                        pid_nr(ctx->pid));
+                       else
+                               pr_devel("%s:Cannot find mm for pid=%i\n",
+                                        __func__, pid_nr(old_pid));
+
+                       /* drop the reference to older pid */
+                       put_pid(old_pid);
+               }
+       }
+
+       return mm;
+}
+
+
+
 void cxl_handle_fault(struct work_struct *fault_work)
 {
        struct cxl_context *ctx =
                container_of(fault_work, struct cxl_context, fault_work);
        u64 dsisr = ctx->dsisr;
        u64 dar = ctx->dar;
-       struct task_struct *task = NULL;
        struct mm_struct *mm = NULL;
 
        if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
@@ -195,17 +274,17 @@ void cxl_handle_fault(struct work_struct *fault_work)
                "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar);
 
        if (!ctx->kernel) {
-               if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
-                       pr_devel("cxl_handle_fault unable to get task %i\n",
-                                pid_nr(ctx->pid));
+
+               mm = get_mem_context(ctx);
+               /* indicates all the thread in task group have exited */
+               if (mm == NULL) {
+                       pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
+                                __func__, ctx->pe, pid_nr(ctx->pid));
                        cxl_ack_ae(ctx);
                        return;
-               }
-               if (!(mm = get_task_mm(task))) {
-                       pr_devel("cxl_handle_fault unable to get mm %i\n",
-                                pid_nr(ctx->pid));
-                       cxl_ack_ae(ctx);
-                       goto out;
+               } else {
+                       pr_devel("Handling page fault for pe=%d pid=%i\n",
+                                ctx->pe, pid_nr(ctx->pid));
                }
        }
 
@@ -218,33 +297,22 @@ void cxl_handle_fault(struct work_struct *fault_work)
 
        if (mm)
                mmput(mm);
-out:
-       if (task)
-               put_task_struct(task);
 }
 
 static void cxl_prefault_one(struct cxl_context *ctx, u64 ea)
 {
-       int rc;
-       struct task_struct *task;
        struct mm_struct *mm;
 
-       if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
-               pr_devel("cxl_prefault_one unable to get task %i\n",
-                        pid_nr(ctx->pid));
-               return;
-       }
-       if (!(mm = get_task_mm(task))) {
+       mm = get_mem_context(ctx);
+       if (mm == NULL) {
                pr_devel("cxl_prefault_one unable to get mm %i\n",
                         pid_nr(ctx->pid));
-               put_task_struct(task);
                return;
        }
 
-       rc = cxl_fault_segment(ctx, mm, ea);
+       cxl_fault_segment(ctx, mm, ea);
 
        mmput(mm);
-       put_task_struct(task);
 }
 
 static u64 next_segment(u64 ea, u64 vsid)
@@ -263,18 +331,13 @@ static void cxl_prefault_vma(struct cxl_context *ctx)
        struct copro_slb slb;
        struct vm_area_struct *vma;
        int rc;
-       struct task_struct *task;
        struct mm_struct *mm;
 
-       if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
-               pr_devel("cxl_prefault_vma unable to get task %i\n",
-                        pid_nr(ctx->pid));
-               return;
-       }
-       if (!(mm = get_task_mm(task))) {
+       mm = get_mem_context(ctx);
+       if (mm == NULL) {
                pr_devel("cxl_prefault_vm unable to get mm %i\n",
                         pid_nr(ctx->pid));
-               goto out1;
+               return;
        }
 
        down_read(&mm->mmap_sem);
@@ -295,8 +358,6 @@ static void cxl_prefault_vma(struct cxl_context *ctx)
        up_read(&mm->mmap_sem);
 
        mmput(mm);
-out1:
-       put_task_struct(task);
 }
 
 void cxl_prefault(struct cxl_context *ctx, u64 wed)
index 7ccd299..783337d 100644 (file)
@@ -67,7 +67,13 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
                spin_unlock(&adapter->afu_list_lock);
                goto err_put_adapter;
        }
-       get_device(&afu->dev);
+
+       /*
+        * taking a ref to the afu so that it doesn't go away
+        * for rest of the function. This ref is released before
+        * we return.
+        */
+       cxl_afu_get(afu);
        spin_unlock(&adapter->afu_list_lock);
 
        if (!afu->current_mode)
@@ -90,13 +96,12 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
        file->private_data = ctx;
        cxl_ctx_get();
 
-       /* Our ref on the AFU will now hold the adapter */
-       put_device(&adapter->dev);
-
-       return 0;
+       /* indicate success */
+       rc = 0;
 
 err_put_afu:
-       put_device(&afu->dev);
+       /* release the ref taken earlier */
+       cxl_afu_put(afu);
 err_put_adapter:
        put_device(&adapter->dev);
        return rc;
@@ -131,8 +136,6 @@ int afu_release(struct inode *inode, struct file *file)
                mutex_unlock(&ctx->mapping_lock);
        }
 
-       put_device(&ctx->afu->dev);
-
        /*
         * At this this point all bottom halfs have finished and we should be
         * getting no more IRQs from the hardware for this context.  Once it's
@@ -198,8 +201,12 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
         * where a process (master, some daemon, etc) has opened the chardev on
         * behalf of another process, so the AFU's mm gets bound to the process
         * that performs this ioctl and not the process that opened the file.
+        * Also we grab the PID of the group leader so that if the task that
+        * has performed the attach operation exits the mm context of the
+        * process is still accessible.
         */
-       ctx->pid = get_pid(get_task_pid(current, PIDTYPE_PID));
+       ctx->pid = get_task_pid(current, PIDTYPE_PID);
+       ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
 
        trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
 
index 85761d7..4c1903f 100644 (file)
@@ -138,6 +138,7 @@ static const struct pci_device_id cxl_pci_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
        { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
        { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
+       { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0601), },
        { PCI_DEVICE_CLASS(0x120000, ~0), },
 
        { }
index c241e15..cbd4331 100644 (file)
@@ -203,7 +203,7 @@ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
        mask <<= shift;
        val <<= shift;
 
-       v = (in_le32(ioaddr) & ~mask) || (val & mask);
+       v = (in_le32(ioaddr) & ~mask) | (val & mask);
 
        out_le32(ioaddr, v);
        return PCIBIOS_SUCCESSFUL;
index 622005a..f3c63dc 100644 (file)
@@ -29,7 +29,7 @@
 
 #include <asm/io.h>
 #if IS_ENABLED(CONFIG_UCC_GETH)
-#include <asm/ucc.h>   /* for ucc_set_qe_mux_mii_mng() */
+#include <soc/fsl/qe/ucc.h>
 #endif
 
 #include "gianfar.h"
index cbddbe2..5bf1ade 100644 (file)
 #include <asm/uaccess.h>
 #include <asm/irq.h>
 #include <asm/io.h>
-#include <asm/immap_qe.h>
-#include <asm/qe.h>
-#include <asm/ucc.h>
-#include <asm/ucc_fast.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/ucc.h>
+#include <soc/fsl/qe/ucc_fast.h>
 #include <asm/machdep.h>
 
 #include "ucc_geth.h"
index 75f3371..5da19b4 100644 (file)
 #include <linux/list.h>
 #include <linux/if_ether.h>
 
-#include <asm/immap_qe.h>
-#include <asm/qe.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
 
-#include <asm/ucc.h>
-#include <asm/ucc_fast.h>
+#include <soc/fsl/qe/ucc.h>
+#include <soc/fsl/qe/ucc_fast.h>
 
 #define DRV_DESC "QE UCC Gigabit Ethernet Controller"
 #define DRV_NAME "ucc_geth"
index df39ce0..9c18d6f 100644 (file)
@@ -40,7 +40,7 @@ static void opal_to_tm(u32 y_m_d, u64 h_m_s_ms, struct rtc_time *tm)
        tm->tm_min  = bcd2bin((h_m_s_ms >> 48) & 0xff);
        tm->tm_sec  = bcd2bin((h_m_s_ms >> 40) & 0xff);
 
-       GregorianDay(tm);
+       tm->tm_wday = -1;
 }
 
 static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms)
index 4e853ed..ad0df75 100644 (file)
@@ -1,6 +1,7 @@
 menu "SOC (System On Chip) specific Drivers"
 
 source "drivers/soc/brcmstb/Kconfig"
+source "drivers/soc/fsl/qe/Kconfig"
 source "drivers/soc/mediatek/Kconfig"
 source "drivers/soc/qcom/Kconfig"
 source "drivers/soc/rockchip/Kconfig"
index f2ba2e9..9536b80 100644 (file)
@@ -4,6 +4,7 @@
 
 obj-$(CONFIG_SOC_BRCMSTB)      += brcmstb/
 obj-$(CONFIG_MACH_DOVE)                += dove/
+obj-y                          += fsl/
 obj-$(CONFIG_ARCH_MEDIATEK)    += mediatek/
 obj-$(CONFIG_ARCH_QCOM)                += qcom/
 obj-$(CONFIG_ARCH_ROCKCHIP)            += rockchip/
diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile
new file mode 100644 (file)
index 0000000..203307f
--- /dev/null
@@ -0,0 +1,6 @@
+#
+# Makefile for the Linux Kernel SOC fsl specific device drivers
+#
+
+obj-$(CONFIG_QUICC_ENGINE)             += qe/
+obj-$(CONFIG_CPM)                      += qe/
diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig
new file mode 100644 (file)
index 0000000..20978f2
--- /dev/null
@@ -0,0 +1,38 @@
+#
+# QE Communication options
+#
+
+config QUICC_ENGINE
+       bool "Freescale QUICC Engine (QE) Support"
+       depends on FSL_SOC && PPC32
+       select GENERIC_ALLOCATOR
+       select CRC32
+       help
+         The QUICC Engine (QE) is a new generation of communications
+         coprocessors on Freescale embedded CPUs (akin to CPM in older chips).
+         Selecting this option means that you wish to build a kernel
+         for a machine with a QE coprocessor.
+
+config UCC_SLOW
+       bool
+       default y if SERIAL_QE
+       help
+         This option provides qe_lib support to UCC slow
+         protocols: UART, BISYNC, QMC
+
+config UCC_FAST
+       bool
+       default y if UCC_GETH
+       help
+         This option provides qe_lib support to UCC fast
+         protocols: HDLC, Ethernet, ATM, transparent
+
+config UCC
+       bool
+       default y if UCC_FAST || UCC_SLOW
+
+config QE_USB
+       bool
+       default y if USB_FSL_QE
+       help
+         QE USB Controller support
diff --git a/drivers/soc/fsl/qe/Makefile b/drivers/soc/fsl/qe/Makefile
new file mode 100644 (file)
index 0000000..ffac541
--- /dev/null
@@ -0,0 +1,10 @@
+#
+# Makefile for the linux ppc-specific parts of QE
+#
+obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_common.o qe_ic.o qe_io.o
+obj-$(CONFIG_CPM)      += qe_common.o
+obj-$(CONFIG_UCC)      += ucc.o
+obj-$(CONFIG_UCC_SLOW) += ucc_slow.o
+obj-$(CONFIG_UCC_FAST) += ucc_fast.o
+obj-$(CONFIG_QE_USB)   += usb.o
+obj-$(CONFIG_QE_GPIO)  += gpio.o
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
new file mode 100644 (file)
index 0000000..aa5c11a
--- /dev/null
@@ -0,0 +1,317 @@
+/*
+ * QUICC Engine GPIOs
+ *
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <soc/fsl/qe/qe.h>
+
+struct qe_gpio_chip {
+       struct of_mm_gpio_chip mm_gc;
+       spinlock_t lock;
+
+       unsigned long pin_flags[QE_PIO_PINS];
+#define QE_PIN_REQUESTED 0
+
+       /* shadowed data register to clear/set bits safely */
+       u32 cpdata;
+
+       /* saved_regs used to restore dedicated functions */
+       struct qe_pio_regs saved_regs;
+};
+
+static inline struct qe_gpio_chip *
+to_qe_gpio_chip(struct of_mm_gpio_chip *mm_gc)
+{
+       return container_of(mm_gc, struct qe_gpio_chip, mm_gc);
+}
+
+static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
+{
+       struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
+       struct qe_pio_regs __iomem *regs = mm_gc->regs;
+
+       qe_gc->cpdata = in_be32(&regs->cpdata);
+       qe_gc->saved_regs.cpdata = qe_gc->cpdata;
+       qe_gc->saved_regs.cpdir1 = in_be32(&regs->cpdir1);
+       qe_gc->saved_regs.cpdir2 = in_be32(&regs->cpdir2);
+       qe_gc->saved_regs.cppar1 = in_be32(&regs->cppar1);
+       qe_gc->saved_regs.cppar2 = in_be32(&regs->cppar2);
+       qe_gc->saved_regs.cpodr = in_be32(&regs->cpodr);
+}
+
+static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+       struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+       struct qe_pio_regs __iomem *regs = mm_gc->regs;
+       u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
+
+       return in_be32(&regs->cpdata) & pin_mask;
+}
+
+static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+       struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+       struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
+       struct qe_pio_regs __iomem *regs = mm_gc->regs;
+       unsigned long flags;
+       u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
+
+       spin_lock_irqsave(&qe_gc->lock, flags);
+
+       if (val)
+               qe_gc->cpdata |= pin_mask;
+       else
+               qe_gc->cpdata &= ~pin_mask;
+
+       out_be32(&regs->cpdata, qe_gc->cpdata);
+
+       spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+
+static int qe_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+       struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+       struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
+       unsigned long flags;
+
+       spin_lock_irqsave(&qe_gc->lock, flags);
+
+       __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_IN, 0, 0, 0);
+
+       spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+       return 0;
+}
+
+static int qe_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+       struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+       struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
+       unsigned long flags;
+
+       qe_gpio_set(gc, gpio, val);
+
+       spin_lock_irqsave(&qe_gc->lock, flags);
+
+       __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_OUT, 0, 0, 0);
+
+       spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+       return 0;
+}
+
+struct qe_pin {
+       /*
+        * The qe_gpio_chip name is unfortunate, we should change that to
+        * something like qe_pio_controller. Someday.
+        */
+       struct qe_gpio_chip *controller;
+       int num;
+};
+
+/**
+ * qe_pin_request - Request a QE pin
+ * @np:                device node to get a pin from
+ * @index:     index of a pin in the device tree
+ * Context:    non-atomic
+ *
+ * This function return qe_pin so that you could use it with the rest of
+ * the QE Pin Multiplexing API.
+ */
+struct qe_pin *qe_pin_request(struct device_node *np, int index)
+{
+       struct qe_pin *qe_pin;
+       struct gpio_chip *gc;
+       struct of_mm_gpio_chip *mm_gc;
+       struct qe_gpio_chip *qe_gc;
+       int err;
+       unsigned long flags;
+
+       qe_pin = kzalloc(sizeof(*qe_pin), GFP_KERNEL);
+       if (!qe_pin) {
+               pr_debug("%s: can't allocate memory\n", __func__);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       err = of_get_gpio(np, index);
+       if (err < 0)
+               goto err0;
+       gc = gpio_to_chip(err);
+       if (WARN_ON(!gc))
+               goto err0;
+
+       if (!of_device_is_compatible(gc->of_node, "fsl,mpc8323-qe-pario-bank")) {
+               pr_debug("%s: tried to get a non-qe pin\n", __func__);
+               err = -EINVAL;
+               goto err0;
+       }
+
+       mm_gc = to_of_mm_gpio_chip(gc);
+       qe_gc = to_qe_gpio_chip(mm_gc);
+
+       spin_lock_irqsave(&qe_gc->lock, flags);
+
+       err -= gc->base;
+       if (test_and_set_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[err]) == 0) {
+               qe_pin->controller = qe_gc;
+               qe_pin->num = err;
+               err = 0;
+       } else {
+               err = -EBUSY;
+       }
+
+       spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+       if (!err)
+               return qe_pin;
+err0:
+       kfree(qe_pin);
+       pr_debug("%s failed with status %d\n", __func__, err);
+       return ERR_PTR(err);
+}
+EXPORT_SYMBOL(qe_pin_request);
+
+/**
+ * qe_pin_free - Free a pin
+ * @qe_pin:    pointer to the qe_pin structure
+ * Context:    any
+ *
+ * This function frees the qe_pin structure and makes a pin available
+ * for further qe_pin_request() calls.
+ */
+void qe_pin_free(struct qe_pin *qe_pin)
+{
+       struct qe_gpio_chip *qe_gc = qe_pin->controller;
+       unsigned long flags;
+       const int pin = qe_pin->num;
+
+       spin_lock_irqsave(&qe_gc->lock, flags);
+       test_and_clear_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[pin]);
+       spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+       kfree(qe_pin);
+}
+EXPORT_SYMBOL(qe_pin_free);
+
+/**
+ * qe_pin_set_dedicated - Revert a pin to a dedicated peripheral function mode
+ * @qe_pin:    pointer to the qe_pin structure
+ * Context:    any
+ *
+ * This function resets a pin to a dedicated peripheral function that
+ * has been set up by the firmware.
+ */
+void qe_pin_set_dedicated(struct qe_pin *qe_pin)
+{
+       struct qe_gpio_chip *qe_gc = qe_pin->controller;
+       struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
+       struct qe_pio_regs *sregs = &qe_gc->saved_regs;
+       int pin = qe_pin->num;
+       u32 mask1 = 1 << (QE_PIO_PINS - (pin + 1));
+       u32 mask2 = 0x3 << (QE_PIO_PINS - (pin % (QE_PIO_PINS / 2) + 1) * 2);
+       bool second_reg = pin > (QE_PIO_PINS / 2) - 1;
+       unsigned long flags;
+
+       spin_lock_irqsave(&qe_gc->lock, flags);
+
+       if (second_reg) {
+               clrsetbits_be32(&regs->cpdir2, mask2, sregs->cpdir2 & mask2);
+               clrsetbits_be32(&regs->cppar2, mask2, sregs->cppar2 & mask2);
+       } else {
+               clrsetbits_be32(&regs->cpdir1, mask2, sregs->cpdir1 & mask2);
+               clrsetbits_be32(&regs->cppar1, mask2, sregs->cppar1 & mask2);
+       }
+
+       if (sregs->cpdata & mask1)
+               qe_gc->cpdata |= mask1;
+       else
+               qe_gc->cpdata &= ~mask1;
+
+       out_be32(&regs->cpdata, qe_gc->cpdata);
+       clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1);
+
+       spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+EXPORT_SYMBOL(qe_pin_set_dedicated);
+
+/**
+ * qe_pin_set_gpio - Set a pin to the GPIO mode
+ * @qe_pin:    pointer to the qe_pin structure
+ * Context:    any
+ *
+ * This function sets a pin to the GPIO mode.
+ */
+void qe_pin_set_gpio(struct qe_pin *qe_pin)
+{
+       struct qe_gpio_chip *qe_gc = qe_pin->controller;
+       struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
+       unsigned long flags;
+
+       spin_lock_irqsave(&qe_gc->lock, flags);
+
+       /* Let's make it input by default, GPIO API is able to change that. */
+       __par_io_config_pin(regs, qe_pin->num, QE_PIO_DIR_IN, 0, 0, 0);
+
+       spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+EXPORT_SYMBOL(qe_pin_set_gpio);
+
+static int __init qe_add_gpiochips(void)
+{
+       struct device_node *np;
+
+       for_each_compatible_node(np, NULL, "fsl,mpc8323-qe-pario-bank") {
+               int ret;
+               struct qe_gpio_chip *qe_gc;
+               struct of_mm_gpio_chip *mm_gc;
+               struct gpio_chip *gc;
+
+               qe_gc = kzalloc(sizeof(*qe_gc), GFP_KERNEL);
+               if (!qe_gc) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
+
+               spin_lock_init(&qe_gc->lock);
+
+               mm_gc = &qe_gc->mm_gc;
+               gc = &mm_gc->gc;
+
+               mm_gc->save_regs = qe_gpio_save_regs;
+               gc->ngpio = QE_PIO_PINS;
+               gc->direction_input = qe_gpio_dir_in;
+               gc->direction_output = qe_gpio_dir_out;
+               gc->get = qe_gpio_get;
+               gc->set = qe_gpio_set;
+
+               ret = of_mm_gpiochip_add(np, mm_gc);
+               if (ret)
+                       goto err;
+               continue;
+err:
+               pr_err("%s: registration failed with status %d\n",
+                      np->full_name, ret);
+               kfree(qe_gc);
+               /* try others anyway */
+       }
+       return 0;
+}
+arch_initcall(qe_add_gpiochips);
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
new file mode 100644 (file)
index 0000000..709fc63
--- /dev/null
@@ -0,0 +1,719 @@
+/*
+ * Copyright (C) 2006-2010 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors:    Shlomi Gridish <gridish@freescale.com>
+ *             Li Yang <leoli@freescale.com>
+ * Based on cpm2_common.c from Dan Malek (dmalek@jlc.net)
+ *
+ * Description:
+ * General Purpose functions for the global management of the
+ * QUICC Engine (QE).
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/crc32.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_platform.h>
+#include <asm/irq.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+#include <asm/prom.h>
+#include <asm/rheap.h>
+
+static void qe_snums_init(void);
+static int qe_sdma_init(void);
+
+static DEFINE_SPINLOCK(qe_lock);
+DEFINE_SPINLOCK(cmxgcr_lock);
+EXPORT_SYMBOL(cmxgcr_lock);
+
+/* QE snum state */
+enum qe_snum_state {
+       QE_SNUM_STATE_USED,
+       QE_SNUM_STATE_FREE
+};
+
+/* QE snum */
+struct qe_snum {
+       u8 num;
+       enum qe_snum_state state;
+};
+
+/* We allocate this here because it is used almost exclusively for
+ * the communication processor devices.
+ */
+struct qe_immap __iomem *qe_immr;
+EXPORT_SYMBOL(qe_immr);
+
+static struct qe_snum snums[QE_NUM_OF_SNUM];   /* Dynamically allocated SNUMs */
+static unsigned int qe_num_of_snum;
+
+static phys_addr_t qebase = -1;
+
+phys_addr_t get_qe_base(void)
+{
+       struct device_node *qe;
+       int size;
+       const u32 *prop;
+
+       if (qebase != -1)
+               return qebase;
+
+       qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
+       if (!qe) {
+               qe = of_find_node_by_type(NULL, "qe");
+               if (!qe)
+                       return qebase;
+       }
+
+       prop = of_get_property(qe, "reg", &size);
+       if (prop && size >= sizeof(*prop))
+               qebase = of_translate_address(qe, prop);
+       of_node_put(qe);
+
+       return qebase;
+}
+
+EXPORT_SYMBOL(get_qe_base);
+
+void qe_reset(void)
+{
+       if (qe_immr == NULL)
+               qe_immr = ioremap(get_qe_base(), QE_IMMAP_SIZE);
+
+       qe_snums_init();
+
+       qe_issue_cmd(QE_RESET, QE_CR_SUBBLOCK_INVALID,
+                    QE_CR_PROTOCOL_UNSPECIFIED, 0);
+
+       /* Reclaim the MURAM memory for our use. */
+       qe_muram_init();
+
+       if (qe_sdma_init())
+               panic("sdma init failed!");
+}
+
+int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
+{
+       unsigned long flags;
+       u8 mcn_shift = 0, dev_shift = 0;
+       u32 ret;
+
+       spin_lock_irqsave(&qe_lock, flags);
+       if (cmd == QE_RESET) {
+               out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG));
+       } else {
+               if (cmd == QE_ASSIGN_PAGE) {
+                       /* Here device is the SNUM, not sub-block */
+                       dev_shift = QE_CR_SNUM_SHIFT;
+               } else if (cmd == QE_ASSIGN_RISC) {
+                       /* Here device is the SNUM, and mcnProtocol is
+                        * e_QeCmdRiscAssignment value */
+                       dev_shift = QE_CR_SNUM_SHIFT;
+                       mcn_shift = QE_CR_MCN_RISC_ASSIGN_SHIFT;
+               } else {
+                       if (device == QE_CR_SUBBLOCK_USB)
+                               mcn_shift = QE_CR_MCN_USB_SHIFT;
+                       else
+                               mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
+               }
+
+               out_be32(&qe_immr->cp.cecdr, cmd_input);
+               out_be32(&qe_immr->cp.cecr,
+                        (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32)
+                         mcn_protocol << mcn_shift));
+       }
+
+       /* wait for the QE_CR_FLG to clear */
+       ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0,
+                          100, 0);
+       /* On timeout (e.g. failure), the expression will be false (ret == 0),
+          otherwise it will be true (ret == 1). */
+       spin_unlock_irqrestore(&qe_lock, flags);
+
+       return ret == 1;
+}
+EXPORT_SYMBOL(qe_issue_cmd);
+
+/* Set a baud rate generator. This needs lots of work. There are
+ * 16 BRGs, which can be connected to the QE channels or output
+ * as clocks. The BRGs are in two different block of internal
+ * memory mapped space.
+ * The BRG clock is the QE clock divided by 2.
+ * It was set up long ago during the initial boot phase and is
+ * is given to us.
+ * Baud rate clocks are zero-based in the driver code (as that maps
+ * to port numbers). Documentation uses 1-based numbering.
+ */
+static unsigned int brg_clk = 0;
+
+unsigned int qe_get_brg_clk(void)
+{
+       struct device_node *qe;
+       int size;
+       const u32 *prop;
+
+       if (brg_clk)
+               return brg_clk;
+
+       qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
+       if (!qe) {
+               qe = of_find_node_by_type(NULL, "qe");
+               if (!qe)
+                       return brg_clk;
+       }
+
+       prop = of_get_property(qe, "brg-frequency", &size);
+       if (prop && size == sizeof(*prop))
+               brg_clk = *prop;
+
+       of_node_put(qe);
+
+       return brg_clk;
+}
+EXPORT_SYMBOL(qe_get_brg_clk);
+
+/* Program the BRG to the given sampling rate and multiplier
+ *
+ * @brg: the BRG, QE_BRG1 - QE_BRG16
+ * @rate: the desired sampling rate
+ * @multiplier: corresponds to the value programmed in GUMR_L[RDCR] or
+ * GUMR_L[TDCR].  E.g., if this BRG is the RX clock, and GUMR_L[RDCR]=01,
+ * then 'multiplier' should be 8.
+ */
+int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
+{
+       u32 divisor, tempval;
+       u32 div16 = 0;
+
+       if ((brg < QE_BRG1) || (brg > QE_BRG16))
+               return -EINVAL;
+
+       divisor = qe_get_brg_clk() / (rate * multiplier);
+
+       if (divisor > QE_BRGC_DIVISOR_MAX + 1) {
+               div16 = QE_BRGC_DIV16;
+               divisor /= 16;
+       }
+
+       /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
+          that the BRG divisor must be even if you're not using divide-by-16
+          mode. */
+       if (!div16 && (divisor & 1) && (divisor > 3))
+               divisor++;
+
+       tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
+               QE_BRGC_ENABLE | div16;
+
+       out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval);
+
+       return 0;
+}
+EXPORT_SYMBOL(qe_setbrg);
+
+/* Convert a string to a QE clock source enum
+ *
+ * This function takes a string, typically from a property in the device
+ * tree, and returns the corresponding "enum qe_clock" value.
+*/
+enum qe_clock qe_clock_source(const char *source)
+{
+       unsigned int i;
+
+       if (strcasecmp(source, "none") == 0)
+               return QE_CLK_NONE;
+
+       if (strncasecmp(source, "brg", 3) == 0) {
+               i = simple_strtoul(source + 3, NULL, 10);
+               if ((i >= 1) && (i <= 16))
+                       return (QE_BRG1 - 1) + i;
+               else
+                       return QE_CLK_DUMMY;
+       }
+
+       if (strncasecmp(source, "clk", 3) == 0) {
+               i = simple_strtoul(source + 3, NULL, 10);
+               if ((i >= 1) && (i <= 24))
+                       return (QE_CLK1 - 1) + i;
+               else
+                       return QE_CLK_DUMMY;
+       }
+
+       return QE_CLK_DUMMY;
+}
+EXPORT_SYMBOL(qe_clock_source);
+
+/* Initialize SNUMs (thread serial numbers) according to
+ * QE Module Control chapter, SNUM table
+ */
+static void qe_snums_init(void)
+{
+       int i;
+       static const u8 snum_init_76[] = {
+               0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
+               0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
+               0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9,
+               0xD8, 0xD9, 0xE8, 0xE9, 0x44, 0x45, 0x4C, 0x4D,
+               0x54, 0x55, 0x5C, 0x5D, 0x64, 0x65, 0x6C, 0x6D,
+               0x74, 0x75, 0x7C, 0x7D, 0x84, 0x85, 0x8C, 0x8D,
+               0x94, 0x95, 0x9C, 0x9D, 0xA4, 0xA5, 0xAC, 0xAD,
+               0xB4, 0xB5, 0xBC, 0xBD, 0xC4, 0xC5, 0xCC, 0xCD,
+               0xD4, 0xD5, 0xDC, 0xDD, 0xE4, 0xE5, 0xEC, 0xED,
+               0xF4, 0xF5, 0xFC, 0xFD,
+       };
+       static const u8 snum_init_46[] = {
+               0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
+               0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
+               0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9,
+               0xD8, 0xD9, 0xE8, 0xE9, 0x08, 0x09, 0x18, 0x19,
+               0x28, 0x29, 0x38, 0x39, 0x48, 0x49, 0x58, 0x59,
+               0x68, 0x69, 0x78, 0x79, 0x80, 0x81,
+       };
+       static const u8 *snum_init;
+
+       qe_num_of_snum = qe_get_num_of_snums();
+
+       if (qe_num_of_snum == 76)
+               snum_init = snum_init_76;
+       else
+               snum_init = snum_init_46;
+
+       for (i = 0; i < qe_num_of_snum; i++) {
+               snums[i].num = snum_init[i];
+               snums[i].state = QE_SNUM_STATE_FREE;
+       }
+}
+
+int qe_get_snum(void)
+{
+       unsigned long flags;
+       int snum = -EBUSY;
+       int i;
+
+       spin_lock_irqsave(&qe_lock, flags);
+       for (i = 0; i < qe_num_of_snum; i++) {
+               if (snums[i].state == QE_SNUM_STATE_FREE) {
+                       snums[i].state = QE_SNUM_STATE_USED;
+                       snum = snums[i].num;
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&qe_lock, flags);
+
+       return snum;
+}
+EXPORT_SYMBOL(qe_get_snum);
+
+void qe_put_snum(u8 snum)
+{
+       int i;
+
+       for (i = 0; i < qe_num_of_snum; i++) {
+               if (snums[i].num == snum) {
+                       snums[i].state = QE_SNUM_STATE_FREE;
+                       break;
+               }
+       }
+}
+EXPORT_SYMBOL(qe_put_snum);
+
+static int qe_sdma_init(void)
+{
+       struct sdma __iomem *sdma = &qe_immr->sdma;
+       static unsigned long sdma_buf_offset = (unsigned long)-ENOMEM;
+
+       if (!sdma)
+               return -ENODEV;
+
+       /* allocate 2 internal temporary buffers (512 bytes size each) for
+        * the SDMA */
+       if (IS_ERR_VALUE(sdma_buf_offset)) {
+               sdma_buf_offset = qe_muram_alloc(512 * 2, 4096);
+               if (IS_ERR_VALUE(sdma_buf_offset))
+                       return -ENOMEM;
+       }
+
+       out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK);
+       out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK |
+                                       (0x1 << QE_SDMR_CEN_SHIFT)));
+
+       return 0;
+}
+
+/* The maximum number of RISCs we support */
+#define MAX_QE_RISC     4
+
+/* Firmware information stored here for qe_get_firmware_info() */
+static struct qe_firmware_info qe_firmware_info;
+
+/*
+ * Set to 1 if QE firmware has been uploaded, and therefore
+ * qe_firmware_info contains valid data.
+ */
+static int qe_firmware_uploaded;
+
+/*
+ * Upload a QE microcode
+ *
+ * This function is a worker function for qe_upload_firmware().  It does
+ * the actual uploading of the microcode.
+ */
+static void qe_upload_microcode(const void *base,
+       const struct qe_microcode *ucode)
+{
+       const __be32 *code = base + be32_to_cpu(ucode->code_offset);
+       unsigned int i;
+
+       if (ucode->major || ucode->minor || ucode->revision)
+               printk(KERN_INFO "qe-firmware: "
+                       "uploading microcode '%s' version %u.%u.%u\n",
+                       ucode->id, ucode->major, ucode->minor, ucode->revision);
+       else
+               printk(KERN_INFO "qe-firmware: "
+                       "uploading microcode '%s'\n", ucode->id);
+
+       /* Use auto-increment */
+       out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) |
+               QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR);
+
+       for (i = 0; i < be32_to_cpu(ucode->count); i++)
+               out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i]));
+       
+       /* Set I-RAM Ready Register */
+       out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY));
+}
+
+/*
+ * Upload a microcode to the I-RAM at a specific address.
+ *
+ * See Documentation/powerpc/qe_firmware.txt for information on QE microcode
+ * uploading.
+ *
+ * Currently, only version 1 is supported, so the 'version' field must be
+ * set to 1.
+ *
+ * The SOC model and revision are not validated, they are only displayed for
+ * informational purposes.
+ *
+ * 'calc_size' is the calculated size, in bytes, of the firmware structure and
+ * all of the microcode structures, minus the CRC.
+ *
+ * 'length' is the size that the structure says it is, including the CRC.
+ */
+int qe_upload_firmware(const struct qe_firmware *firmware)
+{
+       unsigned int i;
+       unsigned int j;
+       u32 crc;
+       size_t calc_size = sizeof(struct qe_firmware);
+       size_t length;
+       const struct qe_header *hdr;
+
+       if (!firmware) {
+               printk(KERN_ERR "qe-firmware: invalid pointer\n");
+               return -EINVAL;
+       }
+
+       hdr = &firmware->header;
+       length = be32_to_cpu(hdr->length);
+
+       /* Check the magic */
+       if ((hdr->magic[0] != 'Q') || (hdr->magic[1] != 'E') ||
+           (hdr->magic[2] != 'F')) {
+               printk(KERN_ERR "qe-firmware: not a microcode\n");
+               return -EPERM;
+       }
+
+       /* Check the version */
+       if (hdr->version != 1) {
+               printk(KERN_ERR "qe-firmware: unsupported version\n");
+               return -EPERM;
+       }
+
+       /* Validate some of the fields */
+       if ((firmware->count < 1) || (firmware->count > MAX_QE_RISC)) {
+               printk(KERN_ERR "qe-firmware: invalid data\n");
+               return -EINVAL;
+       }
+
+       /* Validate the length and check if there's a CRC */
+       calc_size += (firmware->count - 1) * sizeof(struct qe_microcode);
+
+       for (i = 0; i < firmware->count; i++)
+               /*
+                * For situations where the second RISC uses the same microcode
+                * as the first, the 'code_offset' and 'count' fields will be
+                * zero, so it's okay to add those.
+                */
+               calc_size += sizeof(__be32) *
+                       be32_to_cpu(firmware->microcode[i].count);
+
+       /* Validate the length */
+       if (length != calc_size + sizeof(__be32)) {
+               printk(KERN_ERR "qe-firmware: invalid length\n");
+               return -EPERM;
+       }
+
+       /* Validate the CRC */
+       crc = be32_to_cpu(*(__be32 *)((void *)firmware + calc_size));
+       if (crc != crc32(0, firmware, calc_size)) {
+               printk(KERN_ERR "qe-firmware: firmware CRC is invalid\n");
+               return -EIO;
+       }
+
+       /*
+        * If the microcode calls for it, split the I-RAM.
+        */
+       if (!firmware->split)
+               setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
+
+       if (firmware->soc.model)
+               printk(KERN_INFO
+                       "qe-firmware: firmware '%s' for %u V%u.%u\n",
+                       firmware->id, be16_to_cpu(firmware->soc.model),
+                       firmware->soc.major, firmware->soc.minor);
+       else
+               printk(KERN_INFO "qe-firmware: firmware '%s'\n",
+                       firmware->id);
+
+       /*
+        * The QE only supports one microcode per RISC, so clear out all the
+        * saved microcode information and put in the new.
+        */
+       memset(&qe_firmware_info, 0, sizeof(qe_firmware_info));
+       strlcpy(qe_firmware_info.id, firmware->id, sizeof(qe_firmware_info.id));
+       qe_firmware_info.extended_modes = firmware->extended_modes;
+       memcpy(qe_firmware_info.vtraps, firmware->vtraps,
+               sizeof(firmware->vtraps));
+
+       /* Loop through each microcode. */
+       for (i = 0; i < firmware->count; i++) {
+               const struct qe_microcode *ucode = &firmware->microcode[i];
+
+               /* Upload a microcode if it's present */
+               if (ucode->code_offset)
+                       qe_upload_microcode(firmware, ucode);
+
+               /* Program the traps for this processor */
+               for (j = 0; j < 16; j++) {
+                       u32 trap = be32_to_cpu(ucode->traps[j]);
+
+                       if (trap)
+                               out_be32(&qe_immr->rsp[i].tibcr[j], trap);
+               }
+
+               /* Enable traps */
+               out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr));
+       }
+
+       qe_firmware_uploaded = 1;
+
+       return 0;
+}
+EXPORT_SYMBOL(qe_upload_firmware);
+
+/*
+ * Get info on the currently-loaded firmware
+ *
+ * This function also checks the device tree to see if the boot loader has
+ * uploaded a firmware already.
+ */
+struct qe_firmware_info *qe_get_firmware_info(void)
+{
+       static int initialized;
+       struct property *prop;
+       struct device_node *qe;
+       struct device_node *fw = NULL;
+       const char *sprop;
+       unsigned int i;
+
+       /*
+        * If we haven't checked yet, and a driver hasn't uploaded a firmware
+        * yet, then check the device tree for information.
+        */
+       if (qe_firmware_uploaded)
+               return &qe_firmware_info;
+
+       if (initialized)
+               return NULL;
+
+       initialized = 1;
+
+       /*
+        * Newer device trees have an "fsl,qe" compatible property for the QE
+        * node, but we still need to support older device trees.
+       */
+       qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
+       if (!qe) {
+               qe = of_find_node_by_type(NULL, "qe");
+               if (!qe)
+                       return NULL;
+       }
+
+       /* Find the 'firmware' child node */
+       for_each_child_of_node(qe, fw) {
+               if (strcmp(fw->name, "firmware") == 0)
+                       break;
+       }
+
+       of_node_put(qe);
+
+       /* Did we find the 'firmware' node? */
+       if (!fw)
+               return NULL;
+
+       qe_firmware_uploaded = 1;
+
+       /* Copy the data into qe_firmware_info*/
+       sprop = of_get_property(fw, "id", NULL);
+       if (sprop)
+               strlcpy(qe_firmware_info.id, sprop,
+                       sizeof(qe_firmware_info.id));
+
+       prop = of_find_property(fw, "extended-modes", NULL);
+       if (prop && (prop->length == sizeof(u64))) {
+               const u64 *iprop = prop->value;
+
+               qe_firmware_info.extended_modes = *iprop;
+       }
+
+       prop = of_find_property(fw, "virtual-traps", NULL);
+       if (prop && (prop->length == 32)) {
+               const u32 *iprop = prop->value;
+
+               for (i = 0; i < ARRAY_SIZE(qe_firmware_info.vtraps); i++)
+                       qe_firmware_info.vtraps[i] = iprop[i];
+       }
+
+       of_node_put(fw);
+
+       return &qe_firmware_info;
+}
+EXPORT_SYMBOL(qe_get_firmware_info);
+
+unsigned int qe_get_num_of_risc(void)
+{
+       struct device_node *qe;
+       int size;
+       unsigned int num_of_risc = 0;
+       const u32 *prop;
+
+       qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
+       if (!qe) {
+               /* Older devices trees did not have an "fsl,qe"
+                * compatible property, so we need to look for
+                * the QE node by name.
+                */
+               qe = of_find_node_by_type(NULL, "qe");
+               if (!qe)
+                       return num_of_risc;
+       }
+
+       prop = of_get_property(qe, "fsl,qe-num-riscs", &size);
+       if (prop && size == sizeof(*prop))
+               num_of_risc = *prop;
+
+       of_node_put(qe);
+
+       return num_of_risc;
+}
+EXPORT_SYMBOL(qe_get_num_of_risc);
+
+unsigned int qe_get_num_of_snums(void)
+{
+       struct device_node *qe;
+       int size;
+       unsigned int num_of_snums;
+       const u32 *prop;
+
+       num_of_snums = 28; /* The default number of snum for threads is 28 */
+       qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
+       if (!qe) {
+               /* Older devices trees did not have an "fsl,qe"
+                * compatible property, so we need to look for
+                * the QE node by name.
+                */
+               qe = of_find_node_by_type(NULL, "qe");
+               if (!qe)
+                       return num_of_snums;
+       }
+
+       prop = of_get_property(qe, "fsl,qe-num-snums", &size);
+       if (prop && size == sizeof(*prop)) {
+               num_of_snums = *prop;
+               if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) {
+                       /* No QE ever has fewer than 28 SNUMs */
+                       pr_err("QE: number of snum is invalid\n");
+                       of_node_put(qe);
+                       return -EINVAL;
+               }
+       }
+
+       of_node_put(qe);
+
+       return num_of_snums;
+}
+EXPORT_SYMBOL(qe_get_num_of_snums);
+
+static int __init qe_init(void)
+{
+       struct device_node *np;
+
+       np = of_find_compatible_node(NULL, NULL, "fsl,qe");
+       if (!np)
+               return -ENODEV;
+       qe_reset();
+       of_node_put(np);
+       return 0;
+}
+subsys_initcall(qe_init);
+
+#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx)
+static int qe_resume(struct platform_device *ofdev)
+{
+       if (!qe_alive_during_sleep())
+               qe_reset();
+       return 0;
+}
+
+static int qe_probe(struct platform_device *ofdev)
+{
+       return 0;
+}
+
+static const struct of_device_id qe_ids[] = {
+       { .compatible = "fsl,qe", },
+       { },
+};
+
+static struct platform_driver qe_driver = {
+       .driver = {
+               .name = "fsl-qe",
+               .of_match_table = qe_ids,
+       },
+       .probe = qe_probe,
+       .resume = qe_resume,
+};
+
+static int __init qe_drv_init(void)
+{
+       return platform_driver_register(&qe_driver);
+}
+device_initcall(qe_drv_init);
+#endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
new file mode 100644 (file)
index 0000000..419fa5b
--- /dev/null
@@ -0,0 +1,235 @@
+/*
+ * Common CPM code
+ *
+ * Author: Scott Wood <scottwood@freescale.com>
+ *
+ * Copyright 2007-2008,2010 Freescale Semiconductor, Inc.
+ *
+ * Some parts derived from commproc.c/cpm2_common.c, which is:
+ * Copyright (c) 1997 Dan error_act (dmalek@jlc.net)
+ * Copyright (c) 1999-2001 Dan Malek <dan@embeddedalley.com>
+ * Copyright (c) 2000 MontaVista Software, Inc (source@mvista.com)
+ * 2006 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+#include <linux/genalloc.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/of_device.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <soc/fsl/qe/qe.h>
+
+static struct gen_pool *muram_pool;
+static spinlock_t cpm_muram_lock;
+static u8 __iomem *muram_vbase;
+static phys_addr_t muram_pbase;
+
+struct muram_block {
+       struct list_head head;
+       unsigned long start;
+       int size;
+};
+
+static LIST_HEAD(muram_block_list);
+
+/* max address size we deal with */
+#define OF_MAX_ADDR_CELLS      4
+#define GENPOOL_OFFSET         (4096 * 8)
+
+int cpm_muram_init(void)
+{
+       struct device_node *np;
+       struct resource r;
+       u32 zero[OF_MAX_ADDR_CELLS] = {};
+       resource_size_t max = 0;
+       int i = 0;
+       int ret = 0;
+
+       if (muram_pbase)
+               return 0;
+
+       spin_lock_init(&cpm_muram_lock);
+       np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data");
+       if (!np) {
+               /* try legacy bindings */
+               np = of_find_node_by_name(NULL, "data-only");
+               if (!np) {
+                       pr_err("Cannot find CPM muram data node");
+                       ret = -ENODEV;
+                       goto out_muram;
+               }
+       }
+
+       muram_pool = gen_pool_create(0, -1);
+       muram_pbase = of_translate_address(np, zero);
+       if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
+               pr_err("Cannot translate zero through CPM muram node");
+               ret = -ENODEV;
+               goto out_pool;
+       }
+
+       while (of_address_to_resource(np, i++, &r) == 0) {
+               if (r.end > max)
+                       max = r.end;
+               ret = gen_pool_add(muram_pool, r.start - muram_pbase +
+                                  GENPOOL_OFFSET, resource_size(&r), -1);
+               if (ret) {
+                       pr_err("QE: couldn't add muram to pool!\n");
+                       goto out_pool;
+               }
+       }
+
+       muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1);
+       if (!muram_vbase) {
+               pr_err("Cannot map QE muram");
+               ret = -ENOMEM;
+               goto out_pool;
+       }
+       goto out_muram;
+out_pool:
+       gen_pool_destroy(muram_pool);
+out_muram:
+       of_node_put(np);
+       return ret;
+}
+
+/*
+ * cpm_muram_alloc - allocate the requested size worth of multi-user ram
+ * @size: number of bytes to allocate
+ * @align: requested alignment, in bytes
+ *
+ * This function returns an offset into the muram area.
+ * Use cpm_dpram_addr() to get the virtual address of the area.
+ * Use cpm_muram_free() to free the allocation.
+ */
+unsigned long cpm_muram_alloc(unsigned long size, unsigned long align)
+{
+       unsigned long start;
+       unsigned long flags;
+       struct genpool_data_align muram_pool_data;
+
+       spin_lock_irqsave(&cpm_muram_lock, flags);
+       muram_pool_data.align = align;
+       start = cpm_muram_alloc_common(size, gen_pool_first_fit_align,
+                                      &muram_pool_data);
+       spin_unlock_irqrestore(&cpm_muram_lock, flags);
+       return start;
+}
+EXPORT_SYMBOL(cpm_muram_alloc);
+
+/**
+ * cpm_muram_free - free a chunk of multi-user ram
+ * @offset: The beginning of the chunk as returned by cpm_muram_alloc().
+ */
+int cpm_muram_free(unsigned long offset)
+{
+       unsigned long flags;
+       int size;
+       struct muram_block *tmp;
+
+       size = 0;
+       spin_lock_irqsave(&cpm_muram_lock, flags);
+       list_for_each_entry(tmp, &muram_block_list, head) {
+               if (tmp->start == offset) {
+                       size = tmp->size;
+                       list_del(&tmp->head);
+                       kfree(tmp);
+                       break;
+               }
+       }
+       gen_pool_free(muram_pool, offset + GENPOOL_OFFSET, size);
+       spin_unlock_irqrestore(&cpm_muram_lock, flags);
+       return size;
+}
+EXPORT_SYMBOL(cpm_muram_free);
+
+/*
+ * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram
+ * @offset: offset of allocation start address
+ * @size: number of bytes to allocate
+ * This function returns an offset into the muram area
+ * Use cpm_dpram_addr() to get the virtual address of the area.
+ * Use cpm_muram_free() to free the allocation.
+ */
+unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
+{
+       unsigned long start;
+       unsigned long flags;
+       struct genpool_data_fixed muram_pool_data_fixed;
+
+       spin_lock_irqsave(&cpm_muram_lock, flags);
+       muram_pool_data_fixed.offset = offset + GENPOOL_OFFSET;
+       start = cpm_muram_alloc_common(size, gen_pool_fixed_alloc,
+                                      &muram_pool_data_fixed);
+       spin_unlock_irqrestore(&cpm_muram_lock, flags);
+       return start;
+}
+EXPORT_SYMBOL(cpm_muram_alloc_fixed);
+
+/*
+ * cpm_muram_alloc_common - cpm_muram_alloc common code
+ * @size: number of bytes to allocate
+ * @algo: algorithm for alloc.
+ * @data: data for genalloc's algorithm.
+ *
+ * This function returns an offset into the muram area.
+ */
+unsigned long cpm_muram_alloc_common(unsigned long size, genpool_algo_t algo,
+                                    void *data)
+{
+       struct muram_block *entry;
+       unsigned long start;
+
+       start = gen_pool_alloc_algo(muram_pool, size, algo, data);
+       if (!start)
+               goto out2;
+       start = start - GENPOOL_OFFSET;
+       memset_io(cpm_muram_addr(start), 0, size);
+       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               goto out1;
+       entry->start = start;
+       entry->size = size;
+       list_add(&entry->head, &muram_block_list);
+
+       return start;
+out1:
+       gen_pool_free(muram_pool, start, size);
+out2:
+       return (unsigned long)-ENOMEM;
+}
+
+/**
+ * cpm_muram_addr - turn a muram offset into a virtual address
+ * @offset: muram offset to convert
+ */
+void __iomem *cpm_muram_addr(unsigned long offset)
+{
+       return muram_vbase + offset;
+}
+EXPORT_SYMBOL(cpm_muram_addr);
+
+unsigned long cpm_muram_offset(void __iomem *addr)
+{
+       return addr - (void __iomem *)muram_vbase;
+}
+EXPORT_SYMBOL(cpm_muram_offset);
+
+/**
+ * cpm_muram_dma - turn a muram virtual address into a DMA address
+ * @offset: virtual address from cpm_muram_addr() to convert
+ */
+dma_addr_t cpm_muram_dma(void __iomem *addr)
+{
+       return muram_pbase + ((u8 __iomem *)addr - muram_vbase);
+}
+EXPORT_SYMBOL(cpm_muram_dma);
diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c
new file mode 100644 (file)
index 0000000..b77d01f
--- /dev/null
@@ -0,0 +1,503 @@
+/*
+ * arch/powerpc/sysdev/qe_lib/qe_ic.c
+ *
+ * Copyright (C) 2006 Freescale Semiconductor, Inc.  All rights reserved.
+ *
+ * Author: Li Yang <leoli@freescale.com>
+ * Based on code from Shlomi Gridish <gridish@freescale.com>
+ *
+ * QUICC ENGINE Interrupt Controller
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <soc/fsl/qe/qe_ic.h>
+
+#include "qe_ic.h"
+
+static DEFINE_RAW_SPINLOCK(qe_ic_lock);
+
+static struct qe_ic_info qe_ic_info[] = {
+       [1] = {
+              .mask = 0x00008000,
+              .mask_reg = QEIC_CIMR,
+              .pri_code = 0,
+              .pri_reg = QEIC_CIPWCC,
+              },
+       [2] = {
+              .mask = 0x00004000,
+              .mask_reg = QEIC_CIMR,
+              .pri_code = 1,
+              .pri_reg = QEIC_CIPWCC,
+              },
+       [3] = {
+              .mask = 0x00002000,
+              .mask_reg = QEIC_CIMR,
+              .pri_code = 2,
+              .pri_reg = QEIC_CIPWCC,
+              },
+       [10] = {
+               .mask = 0x00000040,
+               .mask_reg = QEIC_CIMR,
+               .pri_code = 1,
+               .pri_reg = QEIC_CIPZCC,
+               },
+       [11] = {
+               .mask = 0x00000020,
+               .mask_reg = QEIC_CIMR,
+               .pri_code = 2,
+               .pri_reg = QEIC_CIPZCC,
+               },
+       [12] = {
+               .mask = 0x00000010,
+               .mask_reg = QEIC_CIMR,
+               .pri_code = 3,
+               .pri_reg = QEIC_CIPZCC,
+               },
+       [13] = {
+               .mask = 0x00000008,
+               .mask_reg = QEIC_CIMR,
+               .pri_code = 4,
+               .pri_reg = QEIC_CIPZCC,
+               },
+       [14] = {
+               .mask = 0x00000004,
+               .mask_reg = QEIC_CIMR,
+               .pri_code = 5,
+               .pri_reg = QEIC_CIPZCC,
+               },
+       [15] = {
+               .mask = 0x00000002,
+               .mask_reg = QEIC_CIMR,
+               .pri_code = 6,
+               .pri_reg = QEIC_CIPZCC,
+               },
+       [20] = {
+               .mask = 0x10000000,
+               .mask_reg = QEIC_CRIMR,
+               .pri_code = 3,
+               .pri_reg = QEIC_CIPRTA,
+               },
+       [25] = {
+               .mask = 0x00800000,
+               .mask_reg = QEIC_CRIMR,
+               .pri_code = 0,
+               .pri_reg = QEIC_CIPRTB,
+               },
+       [26] = {
+               .mask = 0x00400000,
+               .mask_reg = QEIC_CRIMR,
+               .pri_code = 1,
+               .pri_reg = QEIC_CIPRTB,
+               },
+       [27] = {
+               .mask = 0x00200000,
+               .mask_reg = QEIC_CRIMR,
+               .pri_code = 2,
+               .pri_reg = QEIC_CIPRTB,
+               },
+       [28] = {
+               .mask = 0x00100000,
+               .mask_reg = QEIC_CRIMR,
+               .pri_code = 3,
+               .pri_reg = QEIC_CIPRTB,
+               },
+       [32] = {
+               .mask = 0x80000000,
+               .mask_reg = QEIC_CIMR,
+               .pri_code = 0,
+               .pri_reg = QEIC_CIPXCC,
+               },
+       [33] = {
+               .mask = 0x40000000,
+               .mask_reg = QEIC_CIMR,
+               .pri_code = 1,
+               .pri_reg = QEIC_CIPXCC,
+               },
+       [34] = {
+               .mask = 0x20000000,
+               .mask_reg = QEIC_CIMR,
+               .pri_code = 2,
+               .pri_reg = QEIC_CIPXCC,
+               },
+       [35] = {
+               .mask = 0x10000000,
+               .mask_reg = QEIC_CIMR,
+               .pri_code = 3,
+               .pri_reg = QEIC_CIPXCC,
+               },
+       [36] = {
+               .mask = 0x08000000,
+               .mask_reg = QEIC_CIMR,
+               .pri_code = 4,
+               .pri_reg = QEIC_CIPXCC,
+               },
+       [40] = {
+               .mask = 0x00800000,
+               .mask_reg = QEIC_CIMR,
+               .pri_code = 0,
+               .pri_reg = QEIC_CIPYCC,
+               },
+       [41] = {
+               .mask = 0x00400000,
+               .mask_reg = QEIC_CIMR,
+               .pri_code = 1,
+               .pri_reg = QEIC_CIPYCC,
+               },
+       [42] = {
+               .mask = 0x00200000,
+               .mask_reg = QEIC_CIMR,
+               .pri_code = 2,
+               .pri_reg = QEIC_CIPYCC,
+               },
+       [43] = {
+               .mask = 0x00100000,
+               .mask_reg = QEIC_CIMR,
+               .pri_code = 3,
+               .pri_reg = QEIC_CIPYCC,
+               },
+};
+
+static inline u32 qe_ic_read(volatile __be32  __iomem * base, unsigned int reg)
+{
+       return in_be32(base + (reg >> 2));
+}
+
+static inline void qe_ic_write(volatile __be32  __iomem * base, unsigned int reg,
+                              u32 value)
+{
+       out_be32(base + (reg >> 2), value);
+}
+
+static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
+{
+       return irq_get_chip_data(virq);
+}
+
+static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
+{
+       return irq_data_get_irq_chip_data(d);
+}
+
+static void qe_ic_unmask_irq(struct irq_data *d)
+{
+       struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
+       unsigned int src = irqd_to_hwirq(d);
+       unsigned long flags;
+       u32 temp;
+
+       raw_spin_lock_irqsave(&qe_ic_lock, flags);
+
+       temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
+       qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
+                   temp | qe_ic_info[src].mask);
+
+       raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
+}
+
+static void qe_ic_mask_irq(struct irq_data *d)
+{
+       struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
+       unsigned int src = irqd_to_hwirq(d);
+       unsigned long flags;
+       u32 temp;
+
+       raw_spin_lock_irqsave(&qe_ic_lock, flags);
+
+       temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
+       qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
+                   temp & ~qe_ic_info[src].mask);
+
+       /* Flush the above write before enabling interrupts; otherwise,
+        * spurious interrupts will sometimes happen.  To be 100% sure
+        * that the write has reached the device before interrupts are
+        * enabled, the mask register would have to be read back; however,
+        * this is not required for correctness, only to avoid wasting
+        * time on a large number of spurious interrupts.  In testing,
+        * a sync reduced the observed spurious interrupts to zero.
+        */
+       mb();
+
+       raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
+}
+
+static struct irq_chip qe_ic_irq_chip = {
+       .name = "QEIC",
+       .irq_unmask = qe_ic_unmask_irq,
+       .irq_mask = qe_ic_mask_irq,
+       .irq_mask_ack = qe_ic_mask_irq,
+};
+
+static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
+                           enum irq_domain_bus_token bus_token)
+{
+       /* Exact match, unless qe_ic node is NULL */
+       struct device_node *of_node = irq_domain_get_of_node(h);
+       return of_node == NULL || of_node == node;
+}
+
+static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
+                         irq_hw_number_t hw)
+{
+       struct qe_ic *qe_ic = h->host_data;
+       struct irq_chip *chip;
+
+       if (qe_ic_info[hw].mask == 0) {
+               printk(KERN_ERR "Can't map reserved IRQ\n");
+               return -EINVAL;
+       }
+       /* Default chip */
+       chip = &qe_ic->hc_irq;
+
+       irq_set_chip_data(virq, qe_ic);
+       irq_set_status_flags(virq, IRQ_LEVEL);
+
+       irq_set_chip_and_handler(virq, chip, handle_level_irq);
+
+       return 0;
+}
+
+static const struct irq_domain_ops qe_ic_host_ops = {
+       .match = qe_ic_host_match,
+       .map = qe_ic_host_map,
+       .xlate = irq_domain_xlate_onetwocell,
+};
+
+/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
+unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
+{
+       int irq;
+
+       BUG_ON(qe_ic == NULL);
+
+       /* get the interrupt source vector. */
+       irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
+
+       if (irq == 0)
+               return NO_IRQ;
+
+       return irq_linear_revmap(qe_ic->irqhost, irq);
+}
+
+/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
+unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
+{
+       int irq;
+
+       BUG_ON(qe_ic == NULL);
+
+       /* get the interrupt source vector. */
+       irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
+
+       if (irq == 0)
+               return NO_IRQ;
+
+       return irq_linear_revmap(qe_ic->irqhost, irq);
+}
+
+void __init qe_ic_init(struct device_node *node, unsigned int flags,
+                      void (*low_handler)(struct irq_desc *desc),
+                      void (*high_handler)(struct irq_desc *desc))
+{
+       struct qe_ic *qe_ic;
+       struct resource res;
+       u32 temp = 0, ret, high_active = 0;
+
+       ret = of_address_to_resource(node, 0, &res);
+       if (ret)
+               return;
+
+       qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
+       if (qe_ic == NULL)
+               return;
+
+       qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
+                                              &qe_ic_host_ops, qe_ic);
+       if (qe_ic->irqhost == NULL) {
+               kfree(qe_ic);
+               return;
+       }
+
+       qe_ic->regs = ioremap(res.start, resource_size(&res));
+
+       qe_ic->hc_irq = qe_ic_irq_chip;
+
+       qe_ic->virq_high = irq_of_parse_and_map(node, 0);
+       qe_ic->virq_low = irq_of_parse_and_map(node, 1);
+
+       if (qe_ic->virq_low == NO_IRQ) {
+               printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
+               kfree(qe_ic);
+               return;
+       }
+
+       /* default priority scheme is grouped. If spread mode is    */
+       /* required, configure cicr accordingly.                    */
+       if (flags & QE_IC_SPREADMODE_GRP_W)
+               temp |= CICR_GWCC;
+       if (flags & QE_IC_SPREADMODE_GRP_X)
+               temp |= CICR_GXCC;
+       if (flags & QE_IC_SPREADMODE_GRP_Y)
+               temp |= CICR_GYCC;
+       if (flags & QE_IC_SPREADMODE_GRP_Z)
+               temp |= CICR_GZCC;
+       if (flags & QE_IC_SPREADMODE_GRP_RISCA)
+               temp |= CICR_GRTA;
+       if (flags & QE_IC_SPREADMODE_GRP_RISCB)
+               temp |= CICR_GRTB;
+
+       /* choose destination signal for highest priority interrupt */
+       if (flags & QE_IC_HIGH_SIGNAL) {
+               temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT);
+               high_active = 1;
+       }
+
+       qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
+
+       irq_set_handler_data(qe_ic->virq_low, qe_ic);
+       irq_set_chained_handler(qe_ic->virq_low, low_handler);
+
+       if (qe_ic->virq_high != NO_IRQ &&
+                       qe_ic->virq_high != qe_ic->virq_low) {
+               irq_set_handler_data(qe_ic->virq_high, qe_ic);
+               irq_set_chained_handler(qe_ic->virq_high, high_handler);
+       }
+}
+
+void qe_ic_set_highest_priority(unsigned int virq, int high)
+{
+       struct qe_ic *qe_ic = qe_ic_from_irq(virq);
+       unsigned int src = virq_to_hw(virq);
+       u32 temp = 0;
+
+       temp = qe_ic_read(qe_ic->regs, QEIC_CICR);
+
+       temp &= ~CICR_HP_MASK;
+       temp |= src << CICR_HP_SHIFT;
+
+       temp &= ~CICR_HPIT_MASK;
+       temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT;
+
+       qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
+}
+
+/* Set Priority level within its group, from 1 to 8 */
+int qe_ic_set_priority(unsigned int virq, unsigned int priority)
+{
+       struct qe_ic *qe_ic = qe_ic_from_irq(virq);
+       unsigned int src = virq_to_hw(virq);
+       u32 temp;
+
+       if (priority > 8 || priority == 0)
+               return -EINVAL;
+       if (src > 127)
+               return -EINVAL;
+       if (qe_ic_info[src].pri_reg == 0)
+               return -EINVAL;
+
+       temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg);
+
+       if (priority < 4) {
+               temp &= ~(0x7 << (32 - priority * 3));
+               temp |= qe_ic_info[src].pri_code << (32 - priority * 3);
+       } else {
+               temp &= ~(0x7 << (24 - priority * 3));
+               temp |= qe_ic_info[src].pri_code << (24 - priority * 3);
+       }
+
+       qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp);
+
+       return 0;
+}
+
+/* Set a QE priority to use high irq, only priority 1~2 can use high irq */
+int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high)
+{
+       struct qe_ic *qe_ic = qe_ic_from_irq(virq);
+       unsigned int src = virq_to_hw(virq);
+       u32 temp, control_reg = QEIC_CICNR, shift = 0;
+
+       if (priority > 2 || priority == 0)
+               return -EINVAL;
+
+       switch (qe_ic_info[src].pri_reg) {
+       case QEIC_CIPZCC:
+               shift = CICNR_ZCC1T_SHIFT;
+               break;
+       case QEIC_CIPWCC:
+               shift = CICNR_WCC1T_SHIFT;
+               break;
+       case QEIC_CIPYCC:
+               shift = CICNR_YCC1T_SHIFT;
+               break;
+       case QEIC_CIPXCC:
+               shift = CICNR_XCC1T_SHIFT;
+               break;
+       case QEIC_CIPRTA:
+               shift = CRICR_RTA1T_SHIFT;
+               control_reg = QEIC_CRICR;
+               break;
+       case QEIC_CIPRTB:
+               shift = CRICR_RTB1T_SHIFT;
+               control_reg = QEIC_CRICR;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       shift += (2 - priority) * 2;
+       temp = qe_ic_read(qe_ic->regs, control_reg);
+       temp &= ~(SIGNAL_MASK << shift);
+       temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift;
+       qe_ic_write(qe_ic->regs, control_reg, temp);
+
+       return 0;
+}
+
+static struct bus_type qe_ic_subsys = {
+       .name = "qe_ic",
+       .dev_name = "qe_ic",
+};
+
+static struct device device_qe_ic = {
+       .id = 0,
+       .bus = &qe_ic_subsys,
+};
+
+static int __init init_qe_ic_sysfs(void)
+{
+       int rc;
+
+       printk(KERN_DEBUG "Registering qe_ic with sysfs...\n");
+
+       rc = subsys_system_register(&qe_ic_subsys, NULL);
+       if (rc) {
+               printk(KERN_ERR "Failed registering qe_ic sys class\n");
+               return -ENODEV;
+       }
+       rc = device_register(&device_qe_ic);
+       if (rc) {
+               printk(KERN_ERR "Failed registering qe_ic sys device\n");
+               return -ENODEV;
+       }
+       return 0;
+}
+
+subsys_initcall(init_qe_ic_sysfs);
diff --git a/drivers/soc/fsl/qe/qe_ic.h b/drivers/soc/fsl/qe/qe_ic.h
new file mode 100644 (file)
index 0000000..926a2ed
--- /dev/null
@@ -0,0 +1,103 @@
+/*
+ * drivers/soc/fsl/qe/qe_ic.h
+ *
+ * QUICC ENGINE Interrupt Controller Header
+ *
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Li Yang <leoli@freescale.com>
+ * Based on code from Shlomi Gridish <gridish@freescale.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#ifndef _POWERPC_SYSDEV_QE_IC_H
+#define _POWERPC_SYSDEV_QE_IC_H
+
+#include <soc/fsl/qe/qe_ic.h>
+
+#define NR_QE_IC_INTS          64
+
+/* QE IC registers offset */
+#define QEIC_CICR              0x00
+#define QEIC_CIVEC             0x04
+#define QEIC_CRIPNR            0x08
+#define QEIC_CIPNR             0x0c
+#define QEIC_CIPXCC            0x10
+#define QEIC_CIPYCC            0x14
+#define QEIC_CIPWCC            0x18
+#define QEIC_CIPZCC            0x1c
+#define QEIC_CIMR              0x20
+#define QEIC_CRIMR             0x24
+#define QEIC_CICNR             0x28
+#define QEIC_CIPRTA            0x30
+#define QEIC_CIPRTB            0x34
+#define QEIC_CRICR             0x3c
+#define QEIC_CHIVEC            0x60
+
+/* Interrupt priority registers */
+#define CIPCC_SHIFT_PRI0       29
+#define CIPCC_SHIFT_PRI1       26
+#define CIPCC_SHIFT_PRI2       23
+#define CIPCC_SHIFT_PRI3       20
+#define CIPCC_SHIFT_PRI4       13
+#define CIPCC_SHIFT_PRI5       10
+#define CIPCC_SHIFT_PRI6       7
+#define CIPCC_SHIFT_PRI7       4
+
+/* CICR priority modes */
+#define CICR_GWCC              0x00040000
+#define CICR_GXCC              0x00020000
+#define CICR_GYCC              0x00010000
+#define CICR_GZCC              0x00080000
+#define CICR_GRTA              0x00200000
+#define CICR_GRTB              0x00400000
+#define CICR_HPIT_SHIFT                8
+#define CICR_HPIT_MASK         0x00000300
+#define CICR_HP_SHIFT          24
+#define CICR_HP_MASK           0x3f000000
+
+/* CICNR */
+#define CICNR_WCC1T_SHIFT      20
+#define CICNR_ZCC1T_SHIFT      28
+#define CICNR_YCC1T_SHIFT      12
+#define CICNR_XCC1T_SHIFT      4
+
+/* CRICR */
+#define CRICR_RTA1T_SHIFT      20
+#define CRICR_RTB1T_SHIFT      28
+
+/* Signal indicator */
+#define SIGNAL_MASK            3
+#define SIGNAL_HIGH            2
+#define SIGNAL_LOW             0
+
+struct qe_ic {
+       /* Control registers offset */
+       volatile u32 __iomem *regs;
+
+       /* The remapper for this QEIC */
+       struct irq_domain *irqhost;
+
+       /* The "linux" controller struct */
+       struct irq_chip hc_irq;
+
+       /* VIRQ numbers of QE high/low irqs */
+       unsigned int virq_high;
+       unsigned int virq_low;
+};
+
+/*
+ * QE interrupt controller internal structure
+ */
+struct qe_ic_info {
+       u32     mask;     /* location of this source at the QIMR register. */
+       u32     mask_reg; /* Mask register offset */
+       u8      pri_code; /* for grouped interrupts sources - the interrupt
+                            code as appears at the group priority register */
+       u32     pri_reg;  /* Group priority register offset */
+};
+
+#endif /* _POWERPC_SYSDEV_QE_IC_H */
diff --git a/drivers/soc/fsl/qe/qe_io.c b/drivers/soc/fsl/qe/qe_io.c
new file mode 100644 (file)
index 0000000..7ae59ab
--- /dev/null
@@ -0,0 +1,192 @@
+/*
+ * arch/powerpc/sysdev/qe_lib/qe_io.c
+ *
+ * QE Parallel I/O ports configuration routines
+ *
+ * Copyright 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Li Yang <LeoLi@freescale.com>
+ * Based on code from Shlomi Gridish <gridish@freescale.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+
+#include <asm/io.h>
+#include <soc/fsl/qe/qe.h>
+#include <asm/prom.h>
+#include <sysdev/fsl_soc.h>
+
+#undef DEBUG
+
+static struct qe_pio_regs __iomem *par_io;
+static int num_par_io_ports = 0;
+
+int par_io_init(struct device_node *np)
+{
+       struct resource res;
+       int ret;
+       const u32 *num_ports;
+
+       /* Map Parallel I/O ports registers */
+       ret = of_address_to_resource(np, 0, &res);
+       if (ret)
+               return ret;
+       par_io = ioremap(res.start, resource_size(&res));
+
+       num_ports = of_get_property(np, "num-ports", NULL);
+       if (num_ports)
+               num_par_io_ports = *num_ports;
+
+       return 0;
+}
+
+void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
+                        int open_drain, int assignment, int has_irq)
+{
+       u32 pin_mask1bit;
+       u32 pin_mask2bits;
+       u32 new_mask2bits;
+       u32 tmp_val;
+
+       /* calculate pin location for single and 2 bits information */
+       pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
+
+       /* Set open drain, if required */
+       tmp_val = in_be32(&par_io->cpodr);
+       if (open_drain)
+               out_be32(&par_io->cpodr, pin_mask1bit | tmp_val);
+       else
+               out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val);
+
+       /* define direction */
+       tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
+               in_be32(&par_io->cpdir2) :
+               in_be32(&par_io->cpdir1);
+
+       /* get all bits mask for 2 bit per port */
+       pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
+                               (pin % (QE_PIO_PINS / 2) + 1) * 2));
+
+       /* Get the final mask we need for the right definition */
+       new_mask2bits = (u32) (dir << (QE_PIO_PINS -
+                               (pin % (QE_PIO_PINS / 2) + 1) * 2));
+
+       /* clear and set 2 bits mask */
+       if (pin > (QE_PIO_PINS / 2) - 1) {
+               out_be32(&par_io->cpdir2,
+                        ~pin_mask2bits & tmp_val);
+               tmp_val &= ~pin_mask2bits;
+               out_be32(&par_io->cpdir2, new_mask2bits | tmp_val);
+       } else {
+               out_be32(&par_io->cpdir1,
+                        ~pin_mask2bits & tmp_val);
+               tmp_val &= ~pin_mask2bits;
+               out_be32(&par_io->cpdir1, new_mask2bits | tmp_val);
+       }
+       /* define pin assignment */
+       tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
+               in_be32(&par_io->cppar2) :
+               in_be32(&par_io->cppar1);
+
+       new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
+                       (pin % (QE_PIO_PINS / 2) + 1) * 2));
+       /* clear and set 2 bits mask */
+       if (pin > (QE_PIO_PINS / 2) - 1) {
+               out_be32(&par_io->cppar2,
+                        ~pin_mask2bits & tmp_val);
+               tmp_val &= ~pin_mask2bits;
+               out_be32(&par_io->cppar2, new_mask2bits | tmp_val);
+       } else {
+               out_be32(&par_io->cppar1,
+                        ~pin_mask2bits & tmp_val);
+               tmp_val &= ~pin_mask2bits;
+               out_be32(&par_io->cppar1, new_mask2bits | tmp_val);
+       }
+}
+EXPORT_SYMBOL(__par_io_config_pin);
+
+int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
+                     int assignment, int has_irq)
+{
+       if (!par_io || port >= num_par_io_ports)
+               return -EINVAL;
+
+       __par_io_config_pin(&par_io[port], pin, dir, open_drain, assignment,
+                           has_irq);
+       return 0;
+}
+EXPORT_SYMBOL(par_io_config_pin);
+
+int par_io_data_set(u8 port, u8 pin, u8 val)
+{
+       u32 pin_mask, tmp_val;
+
+       if (port >= num_par_io_ports)
+               return -EINVAL;
+       if (pin >= QE_PIO_PINS)
+               return -EINVAL;
+       /* calculate pin location */
+       pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
+
+       tmp_val = in_be32(&par_io[port].cpdata);
+
+       if (val == 0)           /* clear */
+               out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val);
+       else                    /* set */
+               out_be32(&par_io[port].cpdata, pin_mask | tmp_val);
+
+       return 0;
+}
+EXPORT_SYMBOL(par_io_data_set);
+
+int par_io_of_config(struct device_node *np)
+{
+       struct device_node *pio;
+       const phandle *ph;
+       int pio_map_len;
+       const unsigned int *pio_map;
+
+       if (par_io == NULL) {
+               printk(KERN_ERR "par_io not initialized\n");
+               return -1;
+       }
+
+       ph = of_get_property(np, "pio-handle", NULL);
+       if (ph == NULL) {
+               printk(KERN_ERR "pio-handle not available\n");
+               return -1;
+       }
+
+       pio = of_find_node_by_phandle(*ph);
+
+       pio_map = of_get_property(pio, "pio-map", &pio_map_len);
+       if (pio_map == NULL) {
+               printk(KERN_ERR "pio-map is not set!\n");
+               return -1;
+       }
+       pio_map_len /= sizeof(unsigned int);
+       if ((pio_map_len % 6) != 0) {
+               printk(KERN_ERR "pio-map format wrong!\n");
+               return -1;
+       }
+
+       while (pio_map_len > 0) {
+               par_io_config_pin((u8) pio_map[0], (u8) pio_map[1],
+                               (int) pio_map[2], (int) pio_map[3],
+                               (int) pio_map[4], (int) pio_map[5]);
+               pio_map += 6;
+               pio_map_len -= 6;
+       }
+       of_node_put(pio);
+       return 0;
+}
+EXPORT_SYMBOL(par_io_of_config);
diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
new file mode 100644 (file)
index 0000000..b59d335
--- /dev/null
@@ -0,0 +1,212 @@
+/*
+ * arch/powerpc/sysdev/qe_lib/ucc.c
+ *
+ * QE UCC API Set - UCC specific routines implementations.
+ *
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors:    Shlomi Gridish <gridish@freescale.com>
+ *             Li Yang <leoli@freescale.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/ucc.h>
+
+int ucc_set_qe_mux_mii_mng(unsigned int ucc_num)
+{
+       unsigned long flags;
+
+       if (ucc_num > UCC_MAX_NUM - 1)
+               return -EINVAL;
+
+       spin_lock_irqsave(&cmxgcr_lock, flags);
+       clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
+               ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
+       spin_unlock_irqrestore(&cmxgcr_lock, flags);
+
+       return 0;
+}
+EXPORT_SYMBOL(ucc_set_qe_mux_mii_mng);
+
+/* Configure the UCC to either Slow or Fast.
+ *
+ * A given UCC can be figured to support either "slow" devices (e.g. UART)
+ * or "fast" devices (e.g. Ethernet).
+ *
+ * 'ucc_num' is the UCC number, from 0 - 7.
+ *
+ * This function also sets the UCC_GUEMR_SET_RESERVED3 bit because that bit
+ * must always be set to 1.
+ */
+int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed)
+{
+       u8 __iomem *guemr;
+
+       /* The GUEMR register is at the same location for both slow and fast
+          devices, so we just use uccX.slow.guemr. */
+       switch (ucc_num) {
+       case 0: guemr = &qe_immr->ucc1.slow.guemr;
+               break;
+       case 1: guemr = &qe_immr->ucc2.slow.guemr;
+               break;
+       case 2: guemr = &qe_immr->ucc3.slow.guemr;
+               break;
+       case 3: guemr = &qe_immr->ucc4.slow.guemr;
+               break;
+       case 4: guemr = &qe_immr->ucc5.slow.guemr;
+               break;
+       case 5: guemr = &qe_immr->ucc6.slow.guemr;
+               break;
+       case 6: guemr = &qe_immr->ucc7.slow.guemr;
+               break;
+       case 7: guemr = &qe_immr->ucc8.slow.guemr;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
+               UCC_GUEMR_SET_RESERVED3 | speed);
+
+       return 0;
+}
+
+static void get_cmxucr_reg(unsigned int ucc_num, __be32 __iomem **cmxucr,
+       unsigned int *reg_num, unsigned int *shift)
+{
+       unsigned int cmx = ((ucc_num & 1) << 1) + (ucc_num > 3);
+
+       *reg_num = cmx + 1;
+       *cmxucr = &qe_immr->qmx.cmxucr[cmx];
+       *shift = 16 - 8 * (ucc_num & 2);
+}
+
+int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask)
+{
+       __be32 __iomem *cmxucr;
+       unsigned int reg_num;
+       unsigned int shift;
+
+       /* check if the UCC number is in range. */
+       if (ucc_num > UCC_MAX_NUM - 1)
+               return -EINVAL;
+
+       get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
+
+       if (set)
+               setbits32(cmxucr, mask << shift);
+       else
+               clrbits32(cmxucr, mask << shift);
+
+       return 0;
+}
+
+int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
+       enum comm_dir mode)
+{
+       __be32 __iomem *cmxucr;
+       unsigned int reg_num;
+       unsigned int shift;
+       u32 clock_bits = 0;
+
+       /* check if the UCC number is in range. */
+       if (ucc_num > UCC_MAX_NUM - 1)
+               return -EINVAL;
+
+       /* The communications direction must be RX or TX */
+       if (!((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX)))
+               return -EINVAL;
+
+       get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
+
+       switch (reg_num) {
+       case 1:
+               switch (clock) {
+               case QE_BRG1:   clock_bits = 1; break;
+               case QE_BRG2:   clock_bits = 2; break;
+               case QE_BRG7:   clock_bits = 3; break;
+               case QE_BRG8:   clock_bits = 4; break;
+               case QE_CLK9:   clock_bits = 5; break;
+               case QE_CLK10:  clock_bits = 6; break;
+               case QE_CLK11:  clock_bits = 7; break;
+               case QE_CLK12:  clock_bits = 8; break;
+               case QE_CLK15:  clock_bits = 9; break;
+               case QE_CLK16:  clock_bits = 10; break;
+               default: break;
+               }
+               break;
+       case 2:
+               switch (clock) {
+               case QE_BRG5:   clock_bits = 1; break;
+               case QE_BRG6:   clock_bits = 2; break;
+               case QE_BRG7:   clock_bits = 3; break;
+               case QE_BRG8:   clock_bits = 4; break;
+               case QE_CLK13:  clock_bits = 5; break;
+               case QE_CLK14:  clock_bits = 6; break;
+               case QE_CLK19:  clock_bits = 7; break;
+               case QE_CLK20:  clock_bits = 8; break;
+               case QE_CLK15:  clock_bits = 9; break;
+               case QE_CLK16:  clock_bits = 10; break;
+               default: break;
+               }
+               break;
+       case 3:
+               switch (clock) {
+               case QE_BRG9:   clock_bits = 1; break;
+               case QE_BRG10:  clock_bits = 2; break;
+               case QE_BRG15:  clock_bits = 3; break;
+               case QE_BRG16:  clock_bits = 4; break;
+               case QE_CLK3:   clock_bits = 5; break;
+               case QE_CLK4:   clock_bits = 6; break;
+               case QE_CLK17:  clock_bits = 7; break;
+               case QE_CLK18:  clock_bits = 8; break;
+               case QE_CLK7:   clock_bits = 9; break;
+               case QE_CLK8:   clock_bits = 10; break;
+               case QE_CLK16:  clock_bits = 11; break;
+               default: break;
+               }
+               break;
+       case 4:
+               switch (clock) {
+               case QE_BRG13:  clock_bits = 1; break;
+               case QE_BRG14:  clock_bits = 2; break;
+               case QE_BRG15:  clock_bits = 3; break;
+               case QE_BRG16:  clock_bits = 4; break;
+               case QE_CLK5:   clock_bits = 5; break;
+               case QE_CLK6:   clock_bits = 6; break;
+               case QE_CLK21:  clock_bits = 7; break;
+               case QE_CLK22:  clock_bits = 8; break;
+               case QE_CLK7:   clock_bits = 9; break;
+               case QE_CLK8:   clock_bits = 10; break;
+               case QE_CLK16:  clock_bits = 11; break;
+               default: break;
+               }
+               break;
+       default: break;
+       }
+
+       /* Check for invalid combination of clock and UCC number */
+       if (!clock_bits)
+               return -ENOENT;
+
+       if (mode == COMM_DIR_RX)
+               shift += 4;
+
+       clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+               clock_bits << shift);
+
+       return 0;
+}
diff --git a/drivers/soc/fsl/qe/ucc_fast.c b/drivers/soc/fsl/qe/ucc_fast.c
new file mode 100644 (file)
index 0000000..a768931
--- /dev/null
@@ -0,0 +1,363 @@
+/*
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors:    Shlomi Gridish <gridish@freescale.com>
+ *             Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * QE UCC Fast API Set - UCC Fast specific routines implementations.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/export.h>
+
+#include <asm/io.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+#include <soc/fsl/qe/ucc.h>
+#include <soc/fsl/qe/ucc_fast.h>
+
+void ucc_fast_dump_regs(struct ucc_fast_private * uccf)
+{
+       printk(KERN_INFO "UCC%u Fast registers:\n", uccf->uf_info->ucc_num);
+       printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
+
+       printk(KERN_INFO "gumr  : addr=0x%p, val=0x%08x\n",
+                 &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr));
+       printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
+                 &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr));
+       printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
+                 &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr));
+       printk(KERN_INFO "udsr  : addr=0x%p, val=0x%04x\n",
+                 &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr));
+       printk(KERN_INFO "ucce  : addr=0x%p, val=0x%08x\n",
+                 &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce));
+       printk(KERN_INFO "uccm  : addr=0x%p, val=0x%08x\n",
+                 &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm));
+       printk(KERN_INFO "uccs  : addr=0x%p, val=0x%02x\n",
+                 &uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs));
+       printk(KERN_INFO "urfb  : addr=0x%p, val=0x%08x\n",
+                 &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb));
+       printk(KERN_INFO "urfs  : addr=0x%p, val=0x%04x\n",
+                 &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs));
+       printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
+                 &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet));
+       printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
+                 &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset));
+       printk(KERN_INFO "utfb  : addr=0x%p, val=0x%08x\n",
+                 &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb));
+       printk(KERN_INFO "utfs  : addr=0x%p, val=0x%04x\n",
+                 &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs));
+       printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
+                 &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet));
+       printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
+                 &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt));
+       printk(KERN_INFO "utpt  : addr=0x%p, val=0x%04x\n",
+                 &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt));
+       printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
+                 &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry));
+       printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
+                 &uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr));
+}
+EXPORT_SYMBOL(ucc_fast_dump_regs);
+
+u32 ucc_fast_get_qe_cr_subblock(int uccf_num)
+{
+       switch (uccf_num) {
+       case 0: return QE_CR_SUBBLOCK_UCCFAST1;
+       case 1: return QE_CR_SUBBLOCK_UCCFAST2;
+       case 2: return QE_CR_SUBBLOCK_UCCFAST3;
+       case 3: return QE_CR_SUBBLOCK_UCCFAST4;
+       case 4: return QE_CR_SUBBLOCK_UCCFAST5;
+       case 5: return QE_CR_SUBBLOCK_UCCFAST6;
+       case 6: return QE_CR_SUBBLOCK_UCCFAST7;
+       case 7: return QE_CR_SUBBLOCK_UCCFAST8;
+       default: return QE_CR_SUBBLOCK_INVALID;
+       }
+}
+EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock);
+
+void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
+{
+       out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD);
+}
+EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
+
+void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
+{
+       struct ucc_fast __iomem *uf_regs;
+       u32 gumr;
+
+       uf_regs = uccf->uf_regs;
+
+       /* Enable reception and/or transmission on this UCC. */
+       gumr = in_be32(&uf_regs->gumr);
+       if (mode & COMM_DIR_TX) {
+               gumr |= UCC_FAST_GUMR_ENT;
+               uccf->enabled_tx = 1;
+       }
+       if (mode & COMM_DIR_RX) {
+               gumr |= UCC_FAST_GUMR_ENR;
+               uccf->enabled_rx = 1;
+       }
+       out_be32(&uf_regs->gumr, gumr);
+}
+EXPORT_SYMBOL(ucc_fast_enable);
+
+void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
+{
+       struct ucc_fast __iomem *uf_regs;
+       u32 gumr;
+
+       uf_regs = uccf->uf_regs;
+
+       /* Disable reception and/or transmission on this UCC. */
+       gumr = in_be32(&uf_regs->gumr);
+       if (mode & COMM_DIR_TX) {
+               gumr &= ~UCC_FAST_GUMR_ENT;
+               uccf->enabled_tx = 0;
+       }
+       if (mode & COMM_DIR_RX) {
+               gumr &= ~UCC_FAST_GUMR_ENR;
+               uccf->enabled_rx = 0;
+       }
+       out_be32(&uf_regs->gumr, gumr);
+}
+EXPORT_SYMBOL(ucc_fast_disable);
+
+int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret)
+{
+       struct ucc_fast_private *uccf;
+       struct ucc_fast __iomem *uf_regs;
+       u32 gumr;
+       int ret;
+
+       if (!uf_info)
+               return -EINVAL;
+
+       /* check if the UCC port number is in range. */
+       if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) {
+               printk(KERN_ERR "%s: illegal UCC number\n", __func__);
+               return -EINVAL;
+       }
+
+       /* Check that 'max_rx_buf_length' is properly aligned (4). */
+       if (uf_info->max_rx_buf_length & (UCC_FAST_MRBLR_ALIGNMENT - 1)) {
+               printk(KERN_ERR "%s: max_rx_buf_length not aligned\n",
+                       __func__);
+               return -EINVAL;
+       }
+
+       /* Validate Virtual Fifo register values */
+       if (uf_info->urfs < UCC_FAST_URFS_MIN_VAL) {
+               printk(KERN_ERR "%s: urfs is too small\n", __func__);
+               return -EINVAL;
+       }
+
+       if (uf_info->urfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+               printk(KERN_ERR "%s: urfs is not aligned\n", __func__);
+               return -EINVAL;
+       }
+
+       if (uf_info->urfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+               printk(KERN_ERR "%s: urfet is not aligned.\n", __func__);
+               return -EINVAL;
+       }
+
+       if (uf_info->urfset & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+               printk(KERN_ERR "%s: urfset is not aligned\n", __func__);
+               return -EINVAL;
+       }
+
+       if (uf_info->utfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+               printk(KERN_ERR "%s: utfs is not aligned\n", __func__);
+               return -EINVAL;
+       }
+
+       if (uf_info->utfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+               printk(KERN_ERR "%s: utfet is not aligned\n", __func__);
+               return -EINVAL;
+       }
+
+       if (uf_info->utftt & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+               printk(KERN_ERR "%s: utftt is not aligned\n", __func__);
+               return -EINVAL;
+       }
+
+       uccf = kzalloc(sizeof(struct ucc_fast_private), GFP_KERNEL);
+       if (!uccf) {
+               printk(KERN_ERR "%s: Cannot allocate private data\n",
+                       __func__);
+               return -ENOMEM;
+       }
+
+       /* Fill fast UCC structure */
+       uccf->uf_info = uf_info;
+       /* Set the PHY base address */
+       uccf->uf_regs = ioremap(uf_info->regs, sizeof(struct ucc_fast));
+       if (uccf->uf_regs == NULL) {
+               printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__);
+               kfree(uccf);
+               return -ENOMEM;
+       }
+
+       uccf->enabled_tx = 0;
+       uccf->enabled_rx = 0;
+       uccf->stopped_tx = 0;
+       uccf->stopped_rx = 0;
+       uf_regs = uccf->uf_regs;
+       uccf->p_ucce = &uf_regs->ucce;
+       uccf->p_uccm = &uf_regs->uccm;
+#ifdef CONFIG_UGETH_TX_ON_DEMAND
+       uccf->p_utodr = &uf_regs->utodr;
+#endif
+#ifdef STATISTICS
+       uccf->tx_frames = 0;
+       uccf->rx_frames = 0;
+       uccf->rx_discarded = 0;
+#endif                         /* STATISTICS */
+
+       /* Set UCC to fast type */
+       ret = ucc_set_type(uf_info->ucc_num, UCC_SPEED_TYPE_FAST);
+       if (ret) {
+               printk(KERN_ERR "%s: cannot set UCC type\n", __func__);
+               ucc_fast_free(uccf);
+               return ret;
+       }
+
+       uccf->mrblr = uf_info->max_rx_buf_length;
+
+       /* Set GUMR */
+       /* For more details see the hardware spec. */
+       gumr = uf_info->ttx_trx;
+       if (uf_info->tci)
+               gumr |= UCC_FAST_GUMR_TCI;
+       if (uf_info->cdp)
+               gumr |= UCC_FAST_GUMR_CDP;
+       if (uf_info->ctsp)
+               gumr |= UCC_FAST_GUMR_CTSP;
+       if (uf_info->cds)
+               gumr |= UCC_FAST_GUMR_CDS;
+       if (uf_info->ctss)
+               gumr |= UCC_FAST_GUMR_CTSS;
+       if (uf_info->txsy)
+               gumr |= UCC_FAST_GUMR_TXSY;
+       if (uf_info->rsyn)
+               gumr |= UCC_FAST_GUMR_RSYN;
+       gumr |= uf_info->synl;
+       if (uf_info->rtsm)
+               gumr |= UCC_FAST_GUMR_RTSM;
+       gumr |= uf_info->renc;
+       if (uf_info->revd)
+               gumr |= UCC_FAST_GUMR_REVD;
+       gumr |= uf_info->tenc;
+       gumr |= uf_info->tcrc;
+       gumr |= uf_info->mode;
+       out_be32(&uf_regs->gumr, gumr);
+
+       /* Allocate memory for Tx Virtual Fifo */
+       uccf->ucc_fast_tx_virtual_fifo_base_offset =
+           qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+       if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
+               printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n",
+                       __func__);
+               uccf->ucc_fast_tx_virtual_fifo_base_offset = 0;
+               ucc_fast_free(uccf);
+               return -ENOMEM;
+       }
+
+       /* Allocate memory for Rx Virtual Fifo */
+       uccf->ucc_fast_rx_virtual_fifo_base_offset =
+               qe_muram_alloc(uf_info->urfs +
+                          UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
+                          UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+       if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
+               printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n",
+                       __func__);
+               uccf->ucc_fast_rx_virtual_fifo_base_offset = 0;
+               ucc_fast_free(uccf);
+               return -ENOMEM;
+       }
+
+       /* Set Virtual Fifo registers */
+       out_be16(&uf_regs->urfs, uf_info->urfs);
+       out_be16(&uf_regs->urfet, uf_info->urfet);
+       out_be16(&uf_regs->urfset, uf_info->urfset);
+       out_be16(&uf_regs->utfs, uf_info->utfs);
+       out_be16(&uf_regs->utfet, uf_info->utfet);
+       out_be16(&uf_regs->utftt, uf_info->utftt);
+       /* utfb, urfb are offsets from MURAM base */
+       out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset);
+       out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset);
+
+       /* Mux clocking */
+       /* Grant Support */
+       ucc_set_qe_mux_grant(uf_info->ucc_num, uf_info->grant_support);
+       /* Breakpoint Support */
+       ucc_set_qe_mux_bkpt(uf_info->ucc_num, uf_info->brkpt_support);
+       /* Set Tsa or NMSI mode. */
+       ucc_set_qe_mux_tsa(uf_info->ucc_num, uf_info->tsa);
+       /* If NMSI (not Tsa), set Tx and Rx clock. */
+       if (!uf_info->tsa) {
+               /* Rx clock routing */
+               if ((uf_info->rx_clock != QE_CLK_NONE) &&
+                   ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->rx_clock,
+                                       COMM_DIR_RX)) {
+                       printk(KERN_ERR "%s: illegal value for RX clock\n",
+                              __func__);
+                       ucc_fast_free(uccf);
+                       return -EINVAL;
+               }
+               /* Tx clock routing */
+               if ((uf_info->tx_clock != QE_CLK_NONE) &&
+                   ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->tx_clock,
+                                       COMM_DIR_TX)) {
+                       printk(KERN_ERR "%s: illegal value for TX clock\n",
+                              __func__);
+                       ucc_fast_free(uccf);
+                       return -EINVAL;
+               }
+       }
+
+       /* Set interrupt mask register at UCC level. */
+       out_be32(&uf_regs->uccm, uf_info->uccm_mask);
+
+       /* First, clear anything pending at UCC level,
+        * otherwise, old garbage may come through
+        * as soon as the dam is opened. */
+
+       /* Writing '1' clears */
+       out_be32(&uf_regs->ucce, 0xffffffff);
+
+       *uccf_ret = uccf;
+       return 0;
+}
+EXPORT_SYMBOL(ucc_fast_init);
+
+void ucc_fast_free(struct ucc_fast_private * uccf)
+{
+       if (!uccf)
+               return;
+
+       if (uccf->ucc_fast_tx_virtual_fifo_base_offset)
+               qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset);
+
+       if (uccf->ucc_fast_rx_virtual_fifo_base_offset)
+               qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset);
+
+       if (uccf->uf_regs)
+               iounmap(uccf->uf_regs);
+
+       kfree(uccf);
+}
+EXPORT_SYMBOL(ucc_fast_free);
diff --git a/drivers/soc/fsl/qe/ucc_slow.c b/drivers/soc/fsl/qe/ucc_slow.c
new file mode 100644 (file)
index 0000000..9334bdb
--- /dev/null
@@ -0,0 +1,374 @@
+/*
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors:    Shlomi Gridish <gridish@freescale.com>
+ *             Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * QE UCC Slow API Set - UCC Slow specific routines implementations.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/export.h>
+
+#include <asm/io.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+#include <soc/fsl/qe/ucc.h>
+#include <soc/fsl/qe/ucc_slow.h>
+
+u32 ucc_slow_get_qe_cr_subblock(int uccs_num)
+{
+       switch (uccs_num) {
+       case 0: return QE_CR_SUBBLOCK_UCCSLOW1;
+       case 1: return QE_CR_SUBBLOCK_UCCSLOW2;
+       case 2: return QE_CR_SUBBLOCK_UCCSLOW3;
+       case 3: return QE_CR_SUBBLOCK_UCCSLOW4;
+       case 4: return QE_CR_SUBBLOCK_UCCSLOW5;
+       case 5: return QE_CR_SUBBLOCK_UCCSLOW6;
+       case 6: return QE_CR_SUBBLOCK_UCCSLOW7;
+       case 7: return QE_CR_SUBBLOCK_UCCSLOW8;
+       default: return QE_CR_SUBBLOCK_INVALID;
+       }
+}
+EXPORT_SYMBOL(ucc_slow_get_qe_cr_subblock);
+
+void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs)
+{
+       struct ucc_slow_info *us_info = uccs->us_info;
+       u32 id;
+
+       id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+       qe_issue_cmd(QE_GRACEFUL_STOP_TX, id,
+                        QE_CR_PROTOCOL_UNSPECIFIED, 0);
+}
+EXPORT_SYMBOL(ucc_slow_graceful_stop_tx);
+
+void ucc_slow_stop_tx(struct ucc_slow_private * uccs)
+{
+       struct ucc_slow_info *us_info = uccs->us_info;
+       u32 id;
+
+       id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+       qe_issue_cmd(QE_STOP_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
+}
+EXPORT_SYMBOL(ucc_slow_stop_tx);
+
+void ucc_slow_restart_tx(struct ucc_slow_private * uccs)
+{
+       struct ucc_slow_info *us_info = uccs->us_info;
+       u32 id;
+
+       id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+       qe_issue_cmd(QE_RESTART_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
+}
+EXPORT_SYMBOL(ucc_slow_restart_tx);
+
+void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
+{
+       struct ucc_slow *us_regs;
+       u32 gumr_l;
+
+       us_regs = uccs->us_regs;
+
+       /* Enable reception and/or transmission on this UCC. */
+       gumr_l = in_be32(&us_regs->gumr_l);
+       if (mode & COMM_DIR_TX) {
+               gumr_l |= UCC_SLOW_GUMR_L_ENT;
+               uccs->enabled_tx = 1;
+       }
+       if (mode & COMM_DIR_RX) {
+               gumr_l |= UCC_SLOW_GUMR_L_ENR;
+               uccs->enabled_rx = 1;
+       }
+       out_be32(&us_regs->gumr_l, gumr_l);
+}
+EXPORT_SYMBOL(ucc_slow_enable);
+
+void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
+{
+       struct ucc_slow *us_regs;
+       u32 gumr_l;
+
+       us_regs = uccs->us_regs;
+
+       /* Disable reception and/or transmission on this UCC. */
+       gumr_l = in_be32(&us_regs->gumr_l);
+       if (mode & COMM_DIR_TX) {
+               gumr_l &= ~UCC_SLOW_GUMR_L_ENT;
+               uccs->enabled_tx = 0;
+       }
+       if (mode & COMM_DIR_RX) {
+               gumr_l &= ~UCC_SLOW_GUMR_L_ENR;
+               uccs->enabled_rx = 0;
+       }
+       out_be32(&us_regs->gumr_l, gumr_l);
+}
+EXPORT_SYMBOL(ucc_slow_disable);
+
+/* Initialize the UCC for Slow operations
+ *
+ * The caller should initialize the following us_info
+ */
+int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret)
+{
+       struct ucc_slow_private *uccs;
+       u32 i;
+       struct ucc_slow __iomem *us_regs;
+       u32 gumr;
+       struct qe_bd *bd;
+       u32 id;
+       u32 command;
+       int ret = 0;
+
+       if (!us_info)
+               return -EINVAL;
+
+       /* check if the UCC port number is in range. */
+       if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) {
+               printk(KERN_ERR "%s: illegal UCC number\n", __func__);
+               return -EINVAL;
+       }
+
+       /*
+        * Set mrblr
+        * Check that 'max_rx_buf_length' is properly aligned (4), unless
+        * rfw is 1, meaning that QE accepts one byte at a time, unlike normal
+        * case when QE accepts 32 bits at a time.
+        */
+       if ((!us_info->rfw) &&
+               (us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) {
+               printk(KERN_ERR "max_rx_buf_length not aligned.\n");
+               return -EINVAL;
+       }
+
+       uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL);
+       if (!uccs) {
+               printk(KERN_ERR "%s: Cannot allocate private data\n",
+                       __func__);
+               return -ENOMEM;
+       }
+
+       /* Fill slow UCC structure */
+       uccs->us_info = us_info;
+       /* Set the PHY base address */
+       uccs->us_regs = ioremap(us_info->regs, sizeof(struct ucc_slow));
+       if (uccs->us_regs == NULL) {
+               printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__);
+               kfree(uccs);
+               return -ENOMEM;
+       }
+
+       uccs->saved_uccm = 0;
+       uccs->p_rx_frame = 0;
+       us_regs = uccs->us_regs;
+       uccs->p_ucce = (u16 *) & (us_regs->ucce);
+       uccs->p_uccm = (u16 *) & (us_regs->uccm);
+#ifdef STATISTICS
+       uccs->rx_frames = 0;
+       uccs->tx_frames = 0;
+       uccs->rx_discarded = 0;
+#endif                         /* STATISTICS */
+
+       /* Get PRAM base */
+       uccs->us_pram_offset =
+               qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM);
+       if (IS_ERR_VALUE(uccs->us_pram_offset)) {
+               printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __func__);
+               ucc_slow_free(uccs);
+               return -ENOMEM;
+       }
+       id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+       qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, us_info->protocol,
+                    uccs->us_pram_offset);
+
+       uccs->us_pram = qe_muram_addr(uccs->us_pram_offset);
+
+       /* Set UCC to slow type */
+       ret = ucc_set_type(us_info->ucc_num, UCC_SPEED_TYPE_SLOW);
+       if (ret) {
+               printk(KERN_ERR "%s: cannot set UCC type", __func__);
+               ucc_slow_free(uccs);
+               return ret;
+       }
+
+       out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length);
+
+       INIT_LIST_HEAD(&uccs->confQ);
+
+       /* Allocate BDs. */
+       uccs->rx_base_offset =
+               qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd),
+                               QE_ALIGNMENT_OF_BD);
+       if (IS_ERR_VALUE(uccs->rx_base_offset)) {
+               printk(KERN_ERR "%s: cannot allocate %u RX BDs\n", __func__,
+                       us_info->rx_bd_ring_len);
+               uccs->rx_base_offset = 0;
+               ucc_slow_free(uccs);
+               return -ENOMEM;
+       }
+
+       uccs->tx_base_offset =
+               qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd),
+                       QE_ALIGNMENT_OF_BD);
+       if (IS_ERR_VALUE(uccs->tx_base_offset)) {
+               printk(KERN_ERR "%s: cannot allocate TX BDs", __func__);
+               uccs->tx_base_offset = 0;
+               ucc_slow_free(uccs);
+               return -ENOMEM;
+       }
+
+       /* Init Tx bds */
+       bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset);
+       for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) {
+               /* clear bd buffer */
+               out_be32(&bd->buf, 0);
+               /* set bd status and length */
+               out_be32((u32 *) bd, 0);
+               bd++;
+       }
+       /* for last BD set Wrap bit */
+       out_be32(&bd->buf, 0);
+       out_be32((u32 *) bd, cpu_to_be32(T_W));
+
+       /* Init Rx bds */
+       bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
+       for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) {
+               /* set bd status and length */
+               out_be32((u32*)bd, 0);
+               /* clear bd buffer */
+               out_be32(&bd->buf, 0);
+               bd++;
+       }
+       /* for last BD set Wrap bit */
+       out_be32((u32*)bd, cpu_to_be32(R_W));
+       out_be32(&bd->buf, 0);
+
+       /* Set GUMR (For more details see the hardware spec.). */
+       /* gumr_h */
+       gumr = us_info->tcrc;
+       if (us_info->cdp)
+               gumr |= UCC_SLOW_GUMR_H_CDP;
+       if (us_info->ctsp)
+               gumr |= UCC_SLOW_GUMR_H_CTSP;
+       if (us_info->cds)
+               gumr |= UCC_SLOW_GUMR_H_CDS;
+       if (us_info->ctss)
+               gumr |= UCC_SLOW_GUMR_H_CTSS;
+       if (us_info->tfl)
+               gumr |= UCC_SLOW_GUMR_H_TFL;
+       if (us_info->rfw)
+               gumr |= UCC_SLOW_GUMR_H_RFW;
+       if (us_info->txsy)
+               gumr |= UCC_SLOW_GUMR_H_TXSY;
+       if (us_info->rtsm)
+               gumr |= UCC_SLOW_GUMR_H_RTSM;
+       out_be32(&us_regs->gumr_h, gumr);
+
+       /* gumr_l */
+       gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc |
+               us_info->diag | us_info->mode;
+       if (us_info->tci)
+               gumr |= UCC_SLOW_GUMR_L_TCI;
+       if (us_info->rinv)
+               gumr |= UCC_SLOW_GUMR_L_RINV;
+       if (us_info->tinv)
+               gumr |= UCC_SLOW_GUMR_L_TINV;
+       if (us_info->tend)
+               gumr |= UCC_SLOW_GUMR_L_TEND;
+       out_be32(&us_regs->gumr_l, gumr);
+
+       /* Function code registers */
+
+       /* if the data is in cachable memory, the 'global' */
+       /* in the function code should be set. */
+       uccs->us_pram->tbmr = UCC_BMR_BO_BE;
+       uccs->us_pram->rbmr = UCC_BMR_BO_BE;
+
+       /* rbase, tbase are offsets from MURAM base */
+       out_be16(&uccs->us_pram->rbase, uccs->rx_base_offset);
+       out_be16(&uccs->us_pram->tbase, uccs->tx_base_offset);
+
+       /* Mux clocking */
+       /* Grant Support */
+       ucc_set_qe_mux_grant(us_info->ucc_num, us_info->grant_support);
+       /* Breakpoint Support */
+       ucc_set_qe_mux_bkpt(us_info->ucc_num, us_info->brkpt_support);
+       /* Set Tsa or NMSI mode. */
+       ucc_set_qe_mux_tsa(us_info->ucc_num, us_info->tsa);
+       /* If NMSI (not Tsa), set Tx and Rx clock. */
+       if (!us_info->tsa) {
+               /* Rx clock routing */
+               if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->rx_clock,
+                                       COMM_DIR_RX)) {
+                       printk(KERN_ERR "%s: illegal value for RX clock\n",
+                              __func__);
+                       ucc_slow_free(uccs);
+                       return -EINVAL;
+               }
+               /* Tx clock routing */
+               if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->tx_clock,
+                                       COMM_DIR_TX)) {
+                       printk(KERN_ERR "%s: illegal value for TX clock\n",
+                              __func__);
+                       ucc_slow_free(uccs);
+                       return -EINVAL;
+               }
+       }
+
+       /* Set interrupt mask register at UCC level. */
+       out_be16(&us_regs->uccm, us_info->uccm_mask);
+
+       /* First, clear anything pending at UCC level,
+        * otherwise, old garbage may come through
+        * as soon as the dam is opened. */
+
+       /* Writing '1' clears */
+       out_be16(&us_regs->ucce, 0xffff);
+
+       /* Issue QE Init command */
+       if (us_info->init_tx && us_info->init_rx)
+               command = QE_INIT_TX_RX;
+       else if (us_info->init_tx)
+               command = QE_INIT_TX;
+       else
+               command = QE_INIT_RX;   /* We know at least one is TRUE */
+
+       qe_issue_cmd(command, id, us_info->protocol, 0);
+
+       *uccs_ret = uccs;
+       return 0;
+}
+EXPORT_SYMBOL(ucc_slow_init);
+
+void ucc_slow_free(struct ucc_slow_private * uccs)
+{
+       if (!uccs)
+               return;
+
+       if (uccs->rx_base_offset)
+               qe_muram_free(uccs->rx_base_offset);
+
+       if (uccs->tx_base_offset)
+               qe_muram_free(uccs->tx_base_offset);
+
+       if (uccs->us_pram)
+               qe_muram_free(uccs->us_pram_offset);
+
+       if (uccs->us_regs)
+               iounmap(uccs->us_regs);
+
+       kfree(uccs);
+}
+EXPORT_SYMBOL(ucc_slow_free);
+
diff --git a/drivers/soc/fsl/qe/usb.c b/drivers/soc/fsl/qe/usb.c
new file mode 100644 (file)
index 0000000..111f7ab
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * QE USB routines
+ *
+ * Copyright 2006 Freescale Semiconductor, Inc.
+ *               Shlomi Gridish <gridish@freescale.com>
+ *               Jerry Huang <Chang-Ming.Huang@freescale.com>
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ *               Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+int qe_usb_clock_set(enum qe_clock clk, int rate)
+{
+       struct qe_mux __iomem *mux = &qe_immr->qmx;
+       unsigned long flags;
+       u32 val;
+
+       switch (clk) {
+       case QE_CLK3:  val = QE_CMXGCR_USBCS_CLK3;  break;
+       case QE_CLK5:  val = QE_CMXGCR_USBCS_CLK5;  break;
+       case QE_CLK7:  val = QE_CMXGCR_USBCS_CLK7;  break;
+       case QE_CLK9:  val = QE_CMXGCR_USBCS_CLK9;  break;
+       case QE_CLK13: val = QE_CMXGCR_USBCS_CLK13; break;
+       case QE_CLK17: val = QE_CMXGCR_USBCS_CLK17; break;
+       case QE_CLK19: val = QE_CMXGCR_USBCS_CLK19; break;
+       case QE_CLK21: val = QE_CMXGCR_USBCS_CLK21; break;
+       case QE_BRG9:  val = QE_CMXGCR_USBCS_BRG9;  break;
+       case QE_BRG10: val = QE_CMXGCR_USBCS_BRG10; break;
+       default:
+               pr_err("%s: requested unknown clock %d\n", __func__, clk);
+               return -EINVAL;
+       }
+
+       if (qe_clock_is_brg(clk))
+               qe_setbrg(clk, rate, 1);
+
+       spin_lock_irqsave(&cmxgcr_lock, flags);
+
+       clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val);
+
+       spin_unlock_irqrestore(&cmxgcr_lock, flags);
+
+       return 0;
+}
+EXPORT_SYMBOL(qe_usb_clock_set);
index 896add8..8f7b26e 100644 (file)
@@ -16,7 +16,7 @@
  * option) any later version.
  */
 #include <asm/cpm.h>
-#include <asm/qe.h>
+#include <soc/fsl/qe/qe.h>
 #include <linux/dma-mapping.h>
 #include <linux/fsl_devices.h>
 #include <linux/kernel.h>
index 73190f5..1a7dc3c 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/dma-mapping.h>
 
 #include <linux/fs_uart_pd.h>
-#include <asm/ucc_slow.h>
+#include <soc/fsl/qe/ucc_slow.h>
 
 #include <linux/firmware.h>
 #include <asm/reg.h>
index 5fb6f8b..53c0692 100644 (file)
@@ -38,7 +38,7 @@
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
 #include <linux/usb/otg.h>
-#include <asm/qe.h>
+#include <soc/fsl/qe/qe.h>
 #include <asm/cpm.h>
 #include <asm/dma.h>
 #include <asm/reg.h>
index c6cebb9..0960f41 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/of_platform.h>
 #include <linux/of_gpio.h>
 #include <linux/slab.h>
-#include <asm/qe.h>
+#include <soc/fsl/qe/qe.h>
 #include <asm/fsl_gtm.h>
 #include "fhci.h"
 
index 3bacdd7..60d55eb 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/usb.h>
 #include <linux/usb/hcd.h>
 #include <linux/gpio.h>
-#include <asm/qe.h>
+#include <soc/fsl/qe/qe.h>
 #include "fhci.h"
 
 /* virtual root hub specific descriptor */
index 95ca598..a9609a3 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/io.h>
 #include <linux/usb.h>
 #include <linux/usb/hcd.h>
-#include <asm/qe.h>
+#include <soc/fsl/qe/qe.h>
 #include <asm/fsl_gtm.h>
 #include "fhci.h"
 
index 154e6a0..3fc82c1 100644 (file)
@@ -27,8 +27,8 @@
 #include <linux/io.h>
 #include <linux/usb.h>
 #include <linux/usb/hcd.h>
-#include <asm/qe.h>
-#include <asm/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/immap_qe.h>
 
 #define USB_CLOCK      48000000
 
index 7ff168d..29d4385 100644 (file)
 #ifndef __GENALLOC_H__
 #define __GENALLOC_H__
 
+#include <linux/types.h>
 #include <linux/spinlock_types.h>
 
 struct device;
 struct device_node;
+struct gen_pool;
 
 /**
  * Allocation callback function type definition
@@ -47,7 +49,7 @@ typedef unsigned long (*genpool_algo_t)(unsigned long *map,
                        unsigned long size,
                        unsigned long start,
                        unsigned int nr,
-                       void *data);
+                       void *data, struct gen_pool *pool);
 
 /*
  *  General purpose special memory pool descriptor.
@@ -75,6 +77,20 @@ struct gen_pool_chunk {
        unsigned long bits[0];          /* bitmap for allocating memory chunk */
 };
 
+/*
+ *  gen_pool data descriptor for gen_pool_first_fit_align.
+ */
+struct genpool_data_align {
+       int align;              /* alignment by bytes for starting address */
+};
+
+/*
+ *  gen_pool data descriptor for gen_pool_fixed_alloc.
+ */
+struct genpool_data_fixed {
+       unsigned long offset;           /* The offset of the specific region */
+};
+
 extern struct gen_pool *gen_pool_create(int, int);
 extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
 extern int gen_pool_add_virt(struct gen_pool *, unsigned long, phys_addr_t,
@@ -98,6 +114,8 @@ static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr,
 }
 extern void gen_pool_destroy(struct gen_pool *);
 extern unsigned long gen_pool_alloc(struct gen_pool *, size_t);
+extern unsigned long gen_pool_alloc_algo(struct gen_pool *, size_t,
+               genpool_algo_t algo, void *data);
 extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size,
                dma_addr_t *dma);
 extern void gen_pool_free(struct gen_pool *, unsigned long, size_t);
@@ -110,14 +128,26 @@ extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo,
                void *data);
 
 extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
-               unsigned long start, unsigned int nr, void *data);
+               unsigned long start, unsigned int nr, void *data,
+               struct gen_pool *pool);
+
+extern unsigned long gen_pool_fixed_alloc(unsigned long *map,
+               unsigned long size, unsigned long start, unsigned int nr,
+               void *data, struct gen_pool *pool);
+
+extern unsigned long gen_pool_first_fit_align(unsigned long *map,
+               unsigned long size, unsigned long start, unsigned int nr,
+               void *data, struct gen_pool *pool);
+
 
 extern unsigned long gen_pool_first_fit_order_align(unsigned long *map,
                unsigned long size, unsigned long start, unsigned int nr,
-               void *data);
+               void *data, struct gen_pool *pool);
 
 extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
-               unsigned long start, unsigned int nr, void *data);
+               unsigned long start, unsigned int nr, void *data,
+               struct gen_pool *pool);
+
 
 extern struct gen_pool *devm_gen_pool_create(struct device *dev,
                int min_alloc_order, int nid, const char *name);
diff --git a/include/soc/fsl/qe/immap_qe.h b/include/soc/fsl/qe/immap_qe.h
new file mode 100644 (file)
index 0000000..bedbff8
--- /dev/null
@@ -0,0 +1,491 @@
+/*
+ * QUICC Engine (QE) Internal Memory Map.
+ * The Internal Memory Map for devices with QE on them. This
+ * is the superset of all QE devices (8360, etc.).
+
+ * Copyright (C) 2006. Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors:    Shlomi Gridish <gridish@freescale.com>
+ *             Li Yang <leoli@freescale.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#ifndef _ASM_POWERPC_IMMAP_QE_H
+#define _ASM_POWERPC_IMMAP_QE_H
+#ifdef __KERNEL__
+
+#include <linux/kernel.h>
+#include <asm/io.h>
+
+#define QE_IMMAP_SIZE  (1024 * 1024)   /* 1MB from 1MB+IMMR */
+
+/* QE I-RAM */
+struct qe_iram {
+       __be32  iadd;           /* I-RAM Address Register */
+       __be32  idata;          /* I-RAM Data Register */
+       u8      res0[0x04];
+       __be32  iready;         /* I-RAM Ready Register */
+       u8      res1[0x70];
+} __attribute__ ((packed));
+
+/* QE Interrupt Controller */
+struct qe_ic_regs {
+       __be32  qicr;
+       __be32  qivec;
+       __be32  qripnr;
+       __be32  qipnr;
+       __be32  qipxcc;
+       __be32  qipycc;
+       __be32  qipwcc;
+       __be32  qipzcc;
+       __be32  qimr;
+       __be32  qrimr;
+       __be32  qicnr;
+       u8      res0[0x4];
+       __be32  qiprta;
+       __be32  qiprtb;
+       u8      res1[0x4];
+       __be32  qricr;
+       u8      res2[0x20];
+       __be32  qhivec;
+       u8      res3[0x1C];
+} __attribute__ ((packed));
+
+/* Communications Processor */
+struct cp_qe {
+       __be32  cecr;           /* QE command register */
+       __be32  ceccr;          /* QE controller configuration register */
+       __be32  cecdr;          /* QE command data register */
+       u8      res0[0xA];
+       __be16  ceter;          /* QE timer event register */
+       u8      res1[0x2];
+       __be16  cetmr;          /* QE timers mask register */
+       __be32  cetscr;         /* QE time-stamp timer control register */
+       __be32  cetsr1;         /* QE time-stamp register 1 */
+       __be32  cetsr2;         /* QE time-stamp register 2 */
+       u8      res2[0x8];
+       __be32  cevter;         /* QE virtual tasks event register */
+       __be32  cevtmr;         /* QE virtual tasks mask register */
+       __be16  cercr;          /* QE RAM control register */
+       u8      res3[0x2];
+       u8      res4[0x24];
+       __be16  ceexe1;         /* QE external request 1 event register */
+       u8      res5[0x2];
+       __be16  ceexm1;         /* QE external request 1 mask register */
+       u8      res6[0x2];
+       __be16  ceexe2;         /* QE external request 2 event register */
+       u8      res7[0x2];
+       __be16  ceexm2;         /* QE external request 2 mask register */
+       u8      res8[0x2];
+       __be16  ceexe3;         /* QE external request 3 event register */
+       u8      res9[0x2];
+       __be16  ceexm3;         /* QE external request 3 mask register */
+       u8      res10[0x2];
+       __be16  ceexe4;         /* QE external request 4 event register */
+       u8      res11[0x2];
+       __be16  ceexm4;         /* QE external request 4 mask register */
+       u8      res12[0x3A];
+       __be32  ceurnr;         /* QE microcode revision number register */
+       u8      res13[0x244];
+} __attribute__ ((packed));
+
+/* QE Multiplexer */
+struct qe_mux {
+       __be32  cmxgcr;         /* CMX general clock route register */
+       __be32  cmxsi1cr_l;     /* CMX SI1 clock route low register */
+       __be32  cmxsi1cr_h;     /* CMX SI1 clock route high register */
+       __be32  cmxsi1syr;      /* CMX SI1 SYNC route register */
+       __be32  cmxucr[4];      /* CMX UCCx clock route registers */
+       __be32  cmxupcr;        /* CMX UPC clock route register */
+       u8      res0[0x1C];
+} __attribute__ ((packed));
+
+/* QE Timers */
+struct qe_timers {
+       u8      gtcfr1;         /* Timer 1 and Timer 2 global config register*/
+       u8      res0[0x3];
+       u8      gtcfr2;         /* Timer 3 and timer 4 global config register*/
+       u8      res1[0xB];
+       __be16  gtmdr1;         /* Timer 1 mode register */
+       __be16  gtmdr2;         /* Timer 2 mode register */
+       __be16  gtrfr1;         /* Timer 1 reference register */
+       __be16  gtrfr2;         /* Timer 2 reference register */
+       __be16  gtcpr1;         /* Timer 1 capture register */
+       __be16  gtcpr2;         /* Timer 2 capture register */
+       __be16  gtcnr1;         /* Timer 1 counter */
+       __be16  gtcnr2;         /* Timer 2 counter */
+       __be16  gtmdr3;         /* Timer 3 mode register */
+       __be16  gtmdr4;         /* Timer 4 mode register */
+       __be16  gtrfr3;         /* Timer 3 reference register */
+       __be16  gtrfr4;         /* Timer 4 reference register */
+       __be16  gtcpr3;         /* Timer 3 capture register */
+       __be16  gtcpr4;         /* Timer 4 capture register */
+       __be16  gtcnr3;         /* Timer 3 counter */
+       __be16  gtcnr4;         /* Timer 4 counter */
+       __be16  gtevr1;         /* Timer 1 event register */
+       __be16  gtevr2;         /* Timer 2 event register */
+       __be16  gtevr3;         /* Timer 3 event register */
+       __be16  gtevr4;         /* Timer 4 event register */
+       __be16  gtps;           /* Timer 1 prescale register */
+       u8 res2[0x46];
+} __attribute__ ((packed));
+
+/* BRG */
+struct qe_brg {
+       __be32  brgc[16];       /* BRG configuration registers */
+       u8      res0[0x40];
+} __attribute__ ((packed));
+
+/* SPI */
+struct spi {
+       u8      res0[0x20];
+       __be32  spmode;         /* SPI mode register */
+       u8      res1[0x2];
+       u8      spie;           /* SPI event register */
+       u8      res2[0x1];
+       u8      res3[0x2];
+       u8      spim;           /* SPI mask register */
+       u8      res4[0x1];
+       u8      res5[0x1];
+       u8      spcom;          /* SPI command register */
+       u8      res6[0x2];
+       __be32  spitd;          /* SPI transmit data register (cpu mode) */
+       __be32  spird;          /* SPI receive data register (cpu mode) */
+       u8      res7[0x8];
+} __attribute__ ((packed));
+
+/* SI */
+struct si1 {
+       __be16  siamr1;         /* SI1 TDMA mode register */
+       __be16  sibmr1;         /* SI1 TDMB mode register */
+       __be16  sicmr1;         /* SI1 TDMC mode register */
+       __be16  sidmr1;         /* SI1 TDMD mode register */
+       u8      siglmr1_h;      /* SI1 global mode register high */
+       u8      res0[0x1];
+       u8      sicmdr1_h;      /* SI1 command register high */
+       u8      res2[0x1];
+       u8      sistr1_h;       /* SI1 status register high */
+       u8      res3[0x1];
+       __be16  sirsr1_h;       /* SI1 RAM shadow address register high */
+       u8      sitarc1;        /* SI1 RAM counter Tx TDMA */
+       u8      sitbrc1;        /* SI1 RAM counter Tx TDMB */
+       u8      sitcrc1;        /* SI1 RAM counter Tx TDMC */
+       u8      sitdrc1;        /* SI1 RAM counter Tx TDMD */
+       u8      sirarc1;        /* SI1 RAM counter Rx TDMA */
+       u8      sirbrc1;        /* SI1 RAM counter Rx TDMB */
+       u8      sircrc1;        /* SI1 RAM counter Rx TDMC */
+       u8      sirdrc1;        /* SI1 RAM counter Rx TDMD */
+       u8      res4[0x8];
+       __be16  siemr1;         /* SI1 TDME mode register 16 bits */
+       __be16  sifmr1;         /* SI1 TDMF mode register 16 bits */
+       __be16  sigmr1;         /* SI1 TDMG mode register 16 bits */
+       __be16  sihmr1;         /* SI1 TDMH mode register 16 bits */
+       u8      siglmg1_l;      /* SI1 global mode register low 8 bits */
+       u8      res5[0x1];
+       u8      sicmdr1_l;      /* SI1 command register low 8 bits */
+       u8      res6[0x1];
+       u8      sistr1_l;       /* SI1 status register low 8 bits */
+       u8      res7[0x1];
+       __be16  sirsr1_l;       /* SI1 RAM shadow address register low 16 bits*/
+       u8      siterc1;        /* SI1 RAM counter Tx TDME 8 bits */
+       u8      sitfrc1;        /* SI1 RAM counter Tx TDMF 8 bits */
+       u8      sitgrc1;        /* SI1 RAM counter Tx TDMG 8 bits */
+       u8      sithrc1;        /* SI1 RAM counter Tx TDMH 8 bits */
+       u8      sirerc1;        /* SI1 RAM counter Rx TDME 8 bits */
+       u8      sirfrc1;        /* SI1 RAM counter Rx TDMF 8 bits */
+       u8      sirgrc1;        /* SI1 RAM counter Rx TDMG 8 bits */
+       u8      sirhrc1;        /* SI1 RAM counter Rx TDMH 8 bits */
+       u8      res8[0x8];
+       __be32  siml1;          /* SI1 multiframe limit register */
+       u8      siedm1;         /* SI1 extended diagnostic mode register */
+       u8      res9[0xBB];
+} __attribute__ ((packed));
+
+/* SI Routing Tables */
+struct sir {
+       u8      tx[0x400];
+       u8      rx[0x400];
+       u8      res0[0x800];
+} __attribute__ ((packed));
+
+/* USB Controller */
+struct qe_usb_ctlr {
+       u8      usb_usmod;
+       u8      usb_usadr;
+       u8      usb_uscom;
+       u8      res1[1];
+       __be16  usb_usep[4];
+       u8      res2[4];
+       __be16  usb_usber;
+       u8      res3[2];
+       __be16  usb_usbmr;
+       u8      res4[1];
+       u8      usb_usbs;
+       __be16  usb_ussft;
+       u8      res5[2];
+       __be16  usb_usfrn;
+       u8      res6[0x22];
+} __attribute__ ((packed));
+
+/* MCC */
+struct qe_mcc {
+       __be32  mcce;           /* MCC event register */
+       __be32  mccm;           /* MCC mask register */
+       __be32  mccf;           /* MCC configuration register */
+       __be32  merl;           /* MCC emergency request level register */
+       u8      res0[0xF0];
+} __attribute__ ((packed));
+
+/* QE UCC Slow */
+struct ucc_slow {
+       __be32  gumr_l;         /* UCCx general mode register (low) */
+       __be32  gumr_h;         /* UCCx general mode register (high) */
+       __be16  upsmr;          /* UCCx protocol-specific mode register */
+       u8      res0[0x2];
+       __be16  utodr;          /* UCCx transmit on demand register */
+       __be16  udsr;           /* UCCx data synchronization register */
+       __be16  ucce;           /* UCCx event register */
+       u8      res1[0x2];
+       __be16  uccm;           /* UCCx mask register */
+       u8      res2[0x1];
+       u8      uccs;           /* UCCx status register */
+       u8      res3[0x24];
+       __be16  utpt;
+       u8      res4[0x52];
+       u8      guemr;          /* UCC general extended mode register */
+} __attribute__ ((packed));
+
+/* QE UCC Fast */
+struct ucc_fast {
+       __be32  gumr;           /* UCCx general mode register */
+       __be32  upsmr;          /* UCCx protocol-specific mode register */
+       __be16  utodr;          /* UCCx transmit on demand register */
+       u8      res0[0x2];
+       __be16  udsr;           /* UCCx data synchronization register */
+       u8      res1[0x2];
+       __be32  ucce;           /* UCCx event register */
+       __be32  uccm;           /* UCCx mask register */
+       u8      uccs;           /* UCCx status register */
+       u8      res2[0x7];
+       __be32  urfb;           /* UCC receive FIFO base */
+       __be16  urfs;           /* UCC receive FIFO size */
+       u8      res3[0x2];
+       __be16  urfet;          /* UCC receive FIFO emergency threshold */
+       __be16  urfset;         /* UCC receive FIFO special emergency
+                                  threshold */
+       __be32  utfb;           /* UCC transmit FIFO base */
+       __be16  utfs;           /* UCC transmit FIFO size */
+       u8      res4[0x2];
+       __be16  utfet;          /* UCC transmit FIFO emergency threshold */
+       u8      res5[0x2];
+       __be16  utftt;          /* UCC transmit FIFO transmit threshold */
+       u8      res6[0x2];
+       __be16  utpt;           /* UCC transmit polling timer */
+       u8      res7[0x2];
+       __be32  urtry;          /* UCC retry counter register */
+       u8      res8[0x4C];
+       u8      guemr;          /* UCC general extended mode register */
+} __attribute__ ((packed));
+
+struct ucc {
+       union {
+               struct  ucc_slow slow;
+               struct  ucc_fast fast;
+               u8      res[0x200];     /* UCC blocks are 512 bytes each */
+       };
+} __attribute__ ((packed));
+
+/* MultiPHY UTOPIA POS Controllers (UPC) */
+struct upc {
+       __be32  upgcr;          /* UTOPIA/POS general configuration register */
+       __be32  uplpa;          /* UTOPIA/POS last PHY address */
+       __be32  uphec;          /* ATM HEC register */
+       __be32  upuc;           /* UTOPIA/POS UCC configuration */
+       __be32  updc1;          /* UTOPIA/POS device 1 configuration */
+       __be32  updc2;          /* UTOPIA/POS device 2 configuration */
+       __be32  updc3;          /* UTOPIA/POS device 3 configuration */
+       __be32  updc4;          /* UTOPIA/POS device 4 configuration */
+       __be32  upstpa;         /* UTOPIA/POS STPA threshold */
+       u8      res0[0xC];
+       __be32  updrs1_h;       /* UTOPIA/POS device 1 rate select */
+       __be32  updrs1_l;       /* UTOPIA/POS device 1 rate select */
+       __be32  updrs2_h;       /* UTOPIA/POS device 2 rate select */
+       __be32  updrs2_l;       /* UTOPIA/POS device 2 rate select */
+       __be32  updrs3_h;       /* UTOPIA/POS device 3 rate select */
+       __be32  updrs3_l;       /* UTOPIA/POS device 3 rate select */
+       __be32  updrs4_h;       /* UTOPIA/POS device 4 rate select */
+       __be32  updrs4_l;       /* UTOPIA/POS device 4 rate select */
+       __be32  updrp1;         /* UTOPIA/POS device 1 receive priority low */
+       __be32  updrp2;         /* UTOPIA/POS device 2 receive priority low */
+       __be32  updrp3;         /* UTOPIA/POS device 3 receive priority low */
+       __be32  updrp4;         /* UTOPIA/POS device 4 receive priority low */
+       __be32  upde1;          /* UTOPIA/POS device 1 event */
+       __be32  upde2;          /* UTOPIA/POS device 2 event */
+       __be32  upde3;          /* UTOPIA/POS device 3 event */
+       __be32  upde4;          /* UTOPIA/POS device 4 event */
+       __be16  uprp1;
+       __be16  uprp2;
+       __be16  uprp3;
+       __be16  uprp4;
+       u8      res1[0x8];
+       __be16  uptirr1_0;      /* Device 1 transmit internal rate 0 */
+       __be16  uptirr1_1;      /* Device 1 transmit internal rate 1 */
+       __be16  uptirr1_2;      /* Device 1 transmit internal rate 2 */
+       __be16  uptirr1_3;      /* Device 1 transmit internal rate 3 */
+       __be16  uptirr2_0;      /* Device 2 transmit internal rate 0 */
+       __be16  uptirr2_1;      /* Device 2 transmit internal rate 1 */
+       __be16  uptirr2_2;      /* Device 2 transmit internal rate 2 */
+       __be16  uptirr2_3;      /* Device 2 transmit internal rate 3 */
+       __be16  uptirr3_0;      /* Device 3 transmit internal rate 0 */
+       __be16  uptirr3_1;      /* Device 3 transmit internal rate 1 */
+       __be16  uptirr3_2;      /* Device 3 transmit internal rate 2 */
+       __be16  uptirr3_3;      /* Device 3 transmit internal rate 3 */
+       __be16  uptirr4_0;      /* Device 4 transmit internal rate 0 */
+       __be16  uptirr4_1;      /* Device 4 transmit internal rate 1 */
+       __be16  uptirr4_2;      /* Device 4 transmit internal rate 2 */
+       __be16  uptirr4_3;      /* Device 4 transmit internal rate 3 */
+       __be32  uper1;          /* Device 1 port enable register */
+       __be32  uper2;          /* Device 2 port enable register */
+       __be32  uper3;          /* Device 3 port enable register */
+       __be32  uper4;          /* Device 4 port enable register */
+       u8      res2[0x150];
+} __attribute__ ((packed));
+
+/* SDMA */
+struct sdma {
+       __be32  sdsr;           /* Serial DMA status register */
+       __be32  sdmr;           /* Serial DMA mode register */
+       __be32  sdtr1;          /* SDMA system bus threshold register */
+       __be32  sdtr2;          /* SDMA secondary bus threshold register */
+       __be32  sdhy1;          /* SDMA system bus hysteresis register */
+       __be32  sdhy2;          /* SDMA secondary bus hysteresis register */
+       __be32  sdta1;          /* SDMA system bus address register */
+       __be32  sdta2;          /* SDMA secondary bus address register */
+       __be32  sdtm1;          /* SDMA system bus MSNUM register */
+       __be32  sdtm2;          /* SDMA secondary bus MSNUM register */
+       u8      res0[0x10];
+       __be32  sdaqr;          /* SDMA address bus qualify register */
+       __be32  sdaqmr;         /* SDMA address bus qualify mask register */
+       u8      res1[0x4];
+       __be32  sdebcr;         /* SDMA CAM entries base register */
+       u8      res2[0x38];
+} __attribute__ ((packed));
+
+/* Debug Space */
+struct dbg {
+       __be32  bpdcr;          /* Breakpoint debug command register */
+       __be32  bpdsr;          /* Breakpoint debug status register */
+       __be32  bpdmr;          /* Breakpoint debug mask register */
+       __be32  bprmrr0;        /* Breakpoint request mode risc register 0 */
+       __be32  bprmrr1;        /* Breakpoint request mode risc register 1 */
+       u8      res0[0x8];
+       __be32  bprmtr0;        /* Breakpoint request mode trb register 0 */
+       __be32  bprmtr1;        /* Breakpoint request mode trb register 1 */
+       u8      res1[0x8];
+       __be32  bprmir;         /* Breakpoint request mode immediate register */
+       __be32  bprmsr;         /* Breakpoint request mode serial register */
+       __be32  bpemr;          /* Breakpoint exit mode register */
+       u8      res2[0x48];
+} __attribute__ ((packed));
+
+/*
+ * RISC Special Registers (Trap and Breakpoint).  These are described in
+ * the QE Developer's Handbook.
+ */
+struct rsp {
+       __be32 tibcr[16];       /* Trap/instruction breakpoint control regs */
+       u8 res0[64];
+       __be32 ibcr0;
+       __be32 ibs0;
+       __be32 ibcnr0;
+       u8 res1[4];
+       __be32 ibcr1;
+       __be32 ibs1;
+       __be32 ibcnr1;
+       __be32 npcr;
+       __be32 dbcr;
+       __be32 dbar;
+       __be32 dbamr;
+       __be32 dbsr;
+       __be32 dbcnr;
+       u8 res2[12];
+       __be32 dbdr_h;
+       __be32 dbdr_l;
+       __be32 dbdmr_h;
+       __be32 dbdmr_l;
+       __be32 bsr;
+       __be32 bor;
+       __be32 bior;
+       u8 res3[4];
+       __be32 iatr[4];
+       __be32 eccr;            /* Exception control configuration register */
+       __be32 eicr;
+       u8 res4[0x100-0xf8];
+} __attribute__ ((packed));
+
+struct qe_immap {
+       struct qe_iram          iram;           /* I-RAM */
+       struct qe_ic_regs       ic;             /* Interrupt Controller */
+       struct cp_qe            cp;             /* Communications Processor */
+       struct qe_mux           qmx;            /* QE Multiplexer */
+       struct qe_timers        qet;            /* QE Timers */
+       struct spi              spi[0x2];       /* spi */
+       struct qe_mcc           mcc;            /* mcc */
+       struct qe_brg           brg;            /* brg */
+       struct qe_usb_ctlr      usb;            /* USB */
+       struct si1              si1;            /* SI */
+       u8                      res11[0x800];
+       struct sir              sir;            /* SI Routing Tables */
+       struct ucc              ucc1;           /* ucc1 */
+       struct ucc              ucc3;           /* ucc3 */
+       struct ucc              ucc5;           /* ucc5 */
+       struct ucc              ucc7;           /* ucc7 */
+       u8                      res12[0x600];
+       struct upc              upc1;           /* MultiPHY UTOPIA POS Ctrlr 1*/
+       struct ucc              ucc2;           /* ucc2 */
+       struct ucc              ucc4;           /* ucc4 */
+       struct ucc              ucc6;           /* ucc6 */
+       struct ucc              ucc8;           /* ucc8 */
+       u8                      res13[0x600];
+       struct upc              upc2;           /* MultiPHY UTOPIA POS Ctrlr 2*/
+       struct sdma             sdma;           /* SDMA */
+       struct dbg              dbg;            /* 0x104080 - 0x1040FF
+                                                  Debug Space */
+       struct rsp              rsp[0x2];       /* 0x104100 - 0x1042FF
+                                                  RISC Special Registers
+                                                  (Trap and Breakpoint) */
+       u8                      res14[0x300];   /* 0x104300 - 0x1045FF */
+       u8                      res15[0x3A00];  /* 0x104600 - 0x107FFF */
+       u8                      res16[0x8000];  /* 0x108000 - 0x110000 */
+       u8                      muram[0xC000];  /* 0x110000 - 0x11C000
+                                                  Multi-user RAM */
+       u8                      res17[0x24000]; /* 0x11C000 - 0x140000 */
+       u8                      res18[0xC0000]; /* 0x140000 - 0x200000 */
+} __attribute__ ((packed));
+
+extern struct qe_immap __iomem *qe_immr;
+extern phys_addr_t get_qe_base(void);
+
+/*
+ * Returns the offset within the QE address space of the given pointer.
+ *
+ * Note that the QE does not support 36-bit physical addresses, so if
+ * get_qe_base() returns a number above 4GB, the caller will probably fail.
+ */
+static inline phys_addr_t immrbar_virt_to_phys(void *address)
+{
+       void *q = (void *)qe_immr;
+
+       /* Is it a MURAM address? */
+       if ((address >= q) && (address < (q + QE_IMMAP_SIZE)))
+               return get_qe_base() + (address - q);
+
+       /* It's an address returned by kmalloc */
+       return virt_to_phys(address);
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_IMMAP_QE_H */
diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h
new file mode 100644 (file)
index 0000000..c7fa36c
--- /dev/null
@@ -0,0 +1,790 @@
+/*
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors:    Shlomi Gridish <gridish@freescale.com>
+ *             Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * QUICC Engine (QE) external definitions and structure.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#ifndef _ASM_POWERPC_QE_H
+#define _ASM_POWERPC_QE_H
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <linux/genalloc.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <asm/cpm.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/types.h>
+
+#define QE_NUM_OF_SNUM 256     /* There are 256 serial number in QE */
+#define QE_NUM_OF_BRGS 16
+#define QE_NUM_OF_PORTS        1024
+
+/* Memory partitions
+*/
+#define MEM_PART_SYSTEM                0
+#define MEM_PART_SECONDARY     1
+#define MEM_PART_MURAM         2
+
+/* Clocks and BRGs */
+enum qe_clock {
+       QE_CLK_NONE = 0,
+       QE_BRG1,                /* Baud Rate Generator 1 */
+       QE_BRG2,                /* Baud Rate Generator 2 */
+       QE_BRG3,                /* Baud Rate Generator 3 */
+       QE_BRG4,                /* Baud Rate Generator 4 */
+       QE_BRG5,                /* Baud Rate Generator 5 */
+       QE_BRG6,                /* Baud Rate Generator 6 */
+       QE_BRG7,                /* Baud Rate Generator 7 */
+       QE_BRG8,                /* Baud Rate Generator 8 */
+       QE_BRG9,                /* Baud Rate Generator 9 */
+       QE_BRG10,               /* Baud Rate Generator 10 */
+       QE_BRG11,               /* Baud Rate Generator 11 */
+       QE_BRG12,               /* Baud Rate Generator 12 */
+       QE_BRG13,               /* Baud Rate Generator 13 */
+       QE_BRG14,               /* Baud Rate Generator 14 */
+       QE_BRG15,               /* Baud Rate Generator 15 */
+       QE_BRG16,               /* Baud Rate Generator 16 */
+       QE_CLK1,                /* Clock 1 */
+       QE_CLK2,                /* Clock 2 */
+       QE_CLK3,                /* Clock 3 */
+       QE_CLK4,                /* Clock 4 */
+       QE_CLK5,                /* Clock 5 */
+       QE_CLK6,                /* Clock 6 */
+       QE_CLK7,                /* Clock 7 */
+       QE_CLK8,                /* Clock 8 */
+       QE_CLK9,                /* Clock 9 */
+       QE_CLK10,               /* Clock 10 */
+       QE_CLK11,               /* Clock 11 */
+       QE_CLK12,               /* Clock 12 */
+       QE_CLK13,               /* Clock 13 */
+       QE_CLK14,               /* Clock 14 */
+       QE_CLK15,               /* Clock 15 */
+       QE_CLK16,               /* Clock 16 */
+       QE_CLK17,               /* Clock 17 */
+       QE_CLK18,               /* Clock 18 */
+       QE_CLK19,               /* Clock 19 */
+       QE_CLK20,               /* Clock 20 */
+       QE_CLK21,               /* Clock 21 */
+       QE_CLK22,               /* Clock 22 */
+       QE_CLK23,               /* Clock 23 */
+       QE_CLK24,               /* Clock 24 */
+       QE_CLK_DUMMY
+};
+
+static inline bool qe_clock_is_brg(enum qe_clock clk)
+{
+       return clk >= QE_BRG1 && clk <= QE_BRG16;
+}
+
+extern spinlock_t cmxgcr_lock;
+
+/* Export QE common operations */
+#ifdef CONFIG_QUICC_ENGINE
+extern void qe_reset(void);
+#else
+static inline void qe_reset(void) {}
+#endif
+
+int cpm_muram_init(void);
+
+#if defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE)
+unsigned long cpm_muram_alloc(unsigned long size, unsigned long align);
+int cpm_muram_free(unsigned long offset);
+unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size);
+unsigned long cpm_muram_alloc_common(unsigned long size, genpool_algo_t algo,
+                                    void *data);
+void __iomem *cpm_muram_addr(unsigned long offset);
+unsigned long cpm_muram_offset(void __iomem *addr);
+dma_addr_t cpm_muram_dma(void __iomem *addr);
+#else
+static inline unsigned long cpm_muram_alloc(unsigned long size,
+                                           unsigned long align)
+{
+       return -ENOSYS;
+}
+
+static inline int cpm_muram_free(unsigned long offset)
+{
+       return -ENOSYS;
+}
+
+static inline unsigned long cpm_muram_alloc_fixed(unsigned long offset,
+                                                 unsigned long size)
+{
+       return -ENOSYS;
+}
+
+static inline void __iomem *cpm_muram_addr(unsigned long offset)
+{
+       return NULL;
+}
+
+static inline unsigned long cpm_muram_offset(void __iomem *addr)
+{
+       return -ENOSYS;
+}
+
+static inline dma_addr_t cpm_muram_dma(void __iomem *addr)
+{
+       return 0;
+}
+#endif /* defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE) */
+
+/* QE PIO */
+#define QE_PIO_PINS 32
+
+struct qe_pio_regs {
+       __be32  cpodr;          /* Open drain register */
+       __be32  cpdata;         /* Data register */
+       __be32  cpdir1;         /* Direction register */
+       __be32  cpdir2;         /* Direction register */
+       __be32  cppar1;         /* Pin assignment register */
+       __be32  cppar2;         /* Pin assignment register */
+#ifdef CONFIG_PPC_85xx
+       u8      pad[8];
+#endif
+};
+
+#define QE_PIO_DIR_IN  2
+#define QE_PIO_DIR_OUT 1
+extern void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin,
+                               int dir, int open_drain, int assignment,
+                               int has_irq);
+#ifdef CONFIG_QUICC_ENGINE
+extern int par_io_init(struct device_node *np);
+extern int par_io_of_config(struct device_node *np);
+extern int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
+                            int assignment, int has_irq);
+extern int par_io_data_set(u8 port, u8 pin, u8 val);
+#else
+static inline int par_io_init(struct device_node *np) { return -ENOSYS; }
+static inline int par_io_of_config(struct device_node *np) { return -ENOSYS; }
+static inline int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
+               int assignment, int has_irq) { return -ENOSYS; }
+static inline int par_io_data_set(u8 port, u8 pin, u8 val) { return -ENOSYS; }
+#endif /* CONFIG_QUICC_ENGINE */
+
+/*
+ * Pin multiplexing functions.
+ */
+struct qe_pin;
+#ifdef CONFIG_QE_GPIO
+extern struct qe_pin *qe_pin_request(struct device_node *np, int index);
+extern void qe_pin_free(struct qe_pin *qe_pin);
+extern void qe_pin_set_gpio(struct qe_pin *qe_pin);
+extern void qe_pin_set_dedicated(struct qe_pin *pin);
+#else
+static inline struct qe_pin *qe_pin_request(struct device_node *np, int index)
+{
+       return ERR_PTR(-ENOSYS);
+}
+static inline void qe_pin_free(struct qe_pin *qe_pin) {}
+static inline void qe_pin_set_gpio(struct qe_pin *qe_pin) {}
+static inline void qe_pin_set_dedicated(struct qe_pin *pin) {}
+#endif /* CONFIG_QE_GPIO */
+
+#ifdef CONFIG_QUICC_ENGINE
+int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input);
+#else
+static inline int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol,
+                              u32 cmd_input)
+{
+       return -ENOSYS;
+}
+#endif /* CONFIG_QUICC_ENGINE */
+
+/* QE internal API */
+enum qe_clock qe_clock_source(const char *source);
+unsigned int qe_get_brg_clk(void);
+int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier);
+int qe_get_snum(void);
+void qe_put_snum(u8 snum);
+unsigned int qe_get_num_of_risc(void);
+unsigned int qe_get_num_of_snums(void);
+
+static inline int qe_alive_during_sleep(void)
+{
+       /*
+        * MPC8568E reference manual says:
+        *
+        * "...power down sequence waits for all I/O interfaces to become idle.
+        *  In some applications this may happen eventually without actively
+        *  shutting down interfaces, but most likely, software will have to
+        *  take steps to shut down the eTSEC, QUICC Engine Block, and PCI
+        *  interfaces before issuing the command (either the write to the core
+        *  MSR[WE] as described above or writing to POWMGTCSR) to put the
+        *  device into sleep state."
+        *
+        * MPC8569E reference manual has a similar paragraph.
+        */
+#ifdef CONFIG_PPC_85xx
+       return 0;
+#else
+       return 1;
+#endif
+}
+
+/* we actually use cpm_muram implementation, define this for convenience */
+#define qe_muram_init cpm_muram_init
+#define qe_muram_alloc cpm_muram_alloc
+#define qe_muram_alloc_fixed cpm_muram_alloc_fixed
+#define qe_muram_free cpm_muram_free
+#define qe_muram_addr cpm_muram_addr
+#define qe_muram_offset cpm_muram_offset
+
+/* Structure that defines QE firmware binary files.
+ *
+ * See Documentation/powerpc/qe_firmware.txt for a description of these
+ * fields.
+ */
+struct qe_firmware {
+       struct qe_header {
+               __be32 length;  /* Length of the entire structure, in bytes */
+               u8 magic[3];    /* Set to { 'Q', 'E', 'F' } */
+               u8 version;     /* Version of this layout. First ver is '1' */
+       } header;
+       u8 id[62];      /* Null-terminated identifier string */
+       u8 split;       /* 0 = shared I-RAM, 1 = split I-RAM */
+       u8 count;       /* Number of microcode[] structures */
+       struct {
+               __be16 model;           /* The SOC model  */
+               u8 major;               /* The SOC revision major */
+               u8 minor;               /* The SOC revision minor */
+       } __attribute__ ((packed)) soc;
+       u8 padding[4];                  /* Reserved, for alignment */
+       __be64 extended_modes;          /* Extended modes */
+       __be32 vtraps[8];               /* Virtual trap addresses */
+       u8 reserved[4];                 /* Reserved, for future expansion */
+       struct qe_microcode {
+               u8 id[32];              /* Null-terminated identifier */
+               __be32 traps[16];       /* Trap addresses, 0 == ignore */
+               __be32 eccr;            /* The value for the ECCR register */
+               __be32 iram_offset;     /* Offset into I-RAM for the code */
+               __be32 count;           /* Number of 32-bit words of the code */
+               __be32 code_offset;     /* Offset of the actual microcode */
+               u8 major;               /* The microcode version major */
+               u8 minor;               /* The microcode version minor */
+               u8 revision;            /* The microcode version revision */
+               u8 padding;             /* Reserved, for alignment */
+               u8 reserved[4];         /* Reserved, for future expansion */
+       } __attribute__ ((packed)) microcode[1];
+       /* All microcode binaries should be located here */
+       /* CRC32 should be located here, after the microcode binaries */
+} __attribute__ ((packed));
+
+struct qe_firmware_info {
+       char id[64];            /* Firmware name */
+       u32 vtraps[8];          /* Virtual trap addresses */
+       u64 extended_modes;     /* Extended modes */
+};
+
+#ifdef CONFIG_QUICC_ENGINE
+/* Upload a firmware to the QE */
+int qe_upload_firmware(const struct qe_firmware *firmware);
+#else
+static inline int qe_upload_firmware(const struct qe_firmware *firmware)
+{
+       return -ENOSYS;
+}
+#endif /* CONFIG_QUICC_ENGINE */
+
+/* Obtain information on the uploaded firmware */
+struct qe_firmware_info *qe_get_firmware_info(void);
+
+/* QE USB */
+int qe_usb_clock_set(enum qe_clock clk, int rate);
+
+/* Buffer descriptors */
+struct qe_bd {
+       __be16 status;
+       __be16 length;
+       __be32 buf;
+} __attribute__ ((packed));
+
+#define BD_STATUS_MASK 0xffff0000
+#define BD_LENGTH_MASK 0x0000ffff
+
+/* Alignment */
+#define QE_INTR_TABLE_ALIGN    16      /* ??? */
+#define QE_ALIGNMENT_OF_BD     8
+#define QE_ALIGNMENT_OF_PRAM   64
+
+/* RISC allocation */
+#define QE_RISC_ALLOCATION_RISC1       0x1  /* RISC 1 */
+#define QE_RISC_ALLOCATION_RISC2       0x2  /* RISC 2 */
+#define QE_RISC_ALLOCATION_RISC3       0x4  /* RISC 3 */
+#define QE_RISC_ALLOCATION_RISC4       0x8  /* RISC 4 */
+#define QE_RISC_ALLOCATION_RISC1_AND_RISC2     (QE_RISC_ALLOCATION_RISC1 | \
+                                                QE_RISC_ALLOCATION_RISC2)
+#define QE_RISC_ALLOCATION_FOUR_RISCS  (QE_RISC_ALLOCATION_RISC1 | \
+                                        QE_RISC_ALLOCATION_RISC2 | \
+                                        QE_RISC_ALLOCATION_RISC3 | \
+                                        QE_RISC_ALLOCATION_RISC4)
+
+/* QE extended filtering Table Lookup Key Size */
+enum qe_fltr_tbl_lookup_key_size {
+       QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES
+               = 0x3f,         /* LookupKey parsed by the Generate LookupKey
+                                  CMD is truncated to 8 bytes */
+       QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES
+               = 0x5f,         /* LookupKey parsed by the Generate LookupKey
+                                  CMD is truncated to 16 bytes */
+};
+
+/* QE FLTR extended filtering Largest External Table Lookup Key Size */
+enum qe_fltr_largest_external_tbl_lookup_key_size {
+       QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE
+               = 0x0,/* not used */
+       QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES
+               = QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES,        /* 8 bytes */
+       QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES
+               = QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES,       /* 16 bytes */
+};
+
+/* structure representing QE parameter RAM */
+struct qe_timer_tables {
+       u16 tm_base;            /* QE timer table base adr */
+       u16 tm_ptr;             /* QE timer table pointer */
+       u16 r_tmr;              /* QE timer mode register */
+       u16 r_tmv;              /* QE timer valid register */
+       u32 tm_cmd;             /* QE timer cmd register */
+       u32 tm_cnt;             /* QE timer internal cnt */
+} __attribute__ ((packed));
+
+#define QE_FLTR_TAD_SIZE       8
+
+/* QE extended filtering Termination Action Descriptor (TAD) */
+struct qe_fltr_tad {
+       u8 serialized[QE_FLTR_TAD_SIZE];
+} __attribute__ ((packed));
+
+/* Communication Direction */
+enum comm_dir {
+       COMM_DIR_NONE = 0,
+       COMM_DIR_RX = 1,
+       COMM_DIR_TX = 2,
+       COMM_DIR_RX_AND_TX = 3
+};
+
+/* QE CMXUCR Registers.
+ * There are two UCCs represented in each of the four CMXUCR registers.
+ * These values are for the UCC in the LSBs
+ */
+#define QE_CMXUCR_MII_ENET_MNG         0x00007000
+#define QE_CMXUCR_MII_ENET_MNG_SHIFT   12
+#define QE_CMXUCR_GRANT                        0x00008000
+#define QE_CMXUCR_TSA                  0x00004000
+#define QE_CMXUCR_BKPT                 0x00000100
+#define QE_CMXUCR_TX_CLK_SRC_MASK      0x0000000F
+
+/* QE CMXGCR Registers.
+*/
+#define QE_CMXGCR_MII_ENET_MNG         0x00007000
+#define QE_CMXGCR_MII_ENET_MNG_SHIFT   12
+#define QE_CMXGCR_USBCS                        0x0000000f
+#define QE_CMXGCR_USBCS_CLK3           0x1
+#define QE_CMXGCR_USBCS_CLK5           0x2
+#define QE_CMXGCR_USBCS_CLK7           0x3
+#define QE_CMXGCR_USBCS_CLK9           0x4
+#define QE_CMXGCR_USBCS_CLK13          0x5
+#define QE_CMXGCR_USBCS_CLK17          0x6
+#define QE_CMXGCR_USBCS_CLK19          0x7
+#define QE_CMXGCR_USBCS_CLK21          0x8
+#define QE_CMXGCR_USBCS_BRG9           0x9
+#define QE_CMXGCR_USBCS_BRG10          0xa
+
+/* QE CECR Commands.
+*/
+#define QE_CR_FLG                      0x00010000
+#define QE_RESET                       0x80000000
+#define QE_INIT_TX_RX                  0x00000000
+#define QE_INIT_RX                     0x00000001
+#define QE_INIT_TX                     0x00000002
+#define QE_ENTER_HUNT_MODE             0x00000003
+#define QE_STOP_TX                     0x00000004
+#define QE_GRACEFUL_STOP_TX            0x00000005
+#define QE_RESTART_TX                  0x00000006
+#define QE_CLOSE_RX_BD                 0x00000007
+#define QE_SWITCH_COMMAND              0x00000007
+#define QE_SET_GROUP_ADDRESS           0x00000008
+#define QE_START_IDMA                  0x00000009
+#define QE_MCC_STOP_RX                 0x00000009
+#define QE_ATM_TRANSMIT                        0x0000000a
+#define QE_HPAC_CLEAR_ALL              0x0000000b
+#define QE_GRACEFUL_STOP_RX            0x0000001a
+#define QE_RESTART_RX                  0x0000001b
+#define QE_HPAC_SET_PRIORITY           0x0000010b
+#define QE_HPAC_STOP_TX                        0x0000020b
+#define QE_HPAC_STOP_RX                        0x0000030b
+#define QE_HPAC_GRACEFUL_STOP_TX       0x0000040b
+#define QE_HPAC_GRACEFUL_STOP_RX       0x0000050b
+#define QE_HPAC_START_TX               0x0000060b
+#define QE_HPAC_START_RX               0x0000070b
+#define QE_USB_STOP_TX                 0x0000000a
+#define QE_USB_RESTART_TX              0x0000000c
+#define QE_QMC_STOP_TX                 0x0000000c
+#define QE_QMC_STOP_RX                 0x0000000d
+#define QE_SS7_SU_FIL_RESET            0x0000000e
+/* jonathbr added from here down for 83xx */
+#define QE_RESET_BCS                   0x0000000a
+#define QE_MCC_INIT_TX_RX_16           0x00000003
+#define QE_MCC_STOP_TX                 0x00000004
+#define QE_MCC_INIT_TX_1               0x00000005
+#define QE_MCC_INIT_RX_1               0x00000006
+#define QE_MCC_RESET                   0x00000007
+#define QE_SET_TIMER                   0x00000008
+#define QE_RANDOM_NUMBER               0x0000000c
+#define QE_ATM_MULTI_THREAD_INIT       0x00000011
+#define QE_ASSIGN_PAGE                 0x00000012
+#define QE_ADD_REMOVE_HASH_ENTRY       0x00000013
+#define QE_START_FLOW_CONTROL          0x00000014
+#define QE_STOP_FLOW_CONTROL           0x00000015
+#define QE_ASSIGN_PAGE_TO_DEVICE       0x00000016
+
+#define QE_ASSIGN_RISC                 0x00000010
+#define QE_CR_MCN_NORMAL_SHIFT         6
+#define QE_CR_MCN_USB_SHIFT            4
+#define QE_CR_MCN_RISC_ASSIGN_SHIFT    8
+#define QE_CR_SNUM_SHIFT               17
+
+/* QE CECR Sub Block - sub block of QE command.
+*/
+#define QE_CR_SUBBLOCK_INVALID         0x00000000
+#define QE_CR_SUBBLOCK_USB             0x03200000
+#define QE_CR_SUBBLOCK_UCCFAST1                0x02000000
+#define QE_CR_SUBBLOCK_UCCFAST2                0x02200000
+#define QE_CR_SUBBLOCK_UCCFAST3                0x02400000
+#define QE_CR_SUBBLOCK_UCCFAST4                0x02600000
+#define QE_CR_SUBBLOCK_UCCFAST5                0x02800000
+#define QE_CR_SUBBLOCK_UCCFAST6                0x02a00000
+#define QE_CR_SUBBLOCK_UCCFAST7                0x02c00000
+#define QE_CR_SUBBLOCK_UCCFAST8                0x02e00000
+#define QE_CR_SUBBLOCK_UCCSLOW1                0x00000000
+#define QE_CR_SUBBLOCK_UCCSLOW2                0x00200000
+#define QE_CR_SUBBLOCK_UCCSLOW3                0x00400000
+#define QE_CR_SUBBLOCK_UCCSLOW4                0x00600000
+#define QE_CR_SUBBLOCK_UCCSLOW5                0x00800000
+#define QE_CR_SUBBLOCK_UCCSLOW6                0x00a00000
+#define QE_CR_SUBBLOCK_UCCSLOW7                0x00c00000
+#define QE_CR_SUBBLOCK_UCCSLOW8                0x00e00000
+#define QE_CR_SUBBLOCK_MCC1            0x03800000
+#define QE_CR_SUBBLOCK_MCC2            0x03a00000
+#define QE_CR_SUBBLOCK_MCC3            0x03000000
+#define QE_CR_SUBBLOCK_IDMA1           0x02800000
+#define QE_CR_SUBBLOCK_IDMA2           0x02a00000
+#define QE_CR_SUBBLOCK_IDMA3           0x02c00000
+#define QE_CR_SUBBLOCK_IDMA4           0x02e00000
+#define QE_CR_SUBBLOCK_HPAC            0x01e00000
+#define QE_CR_SUBBLOCK_SPI1            0x01400000
+#define QE_CR_SUBBLOCK_SPI2            0x01600000
+#define QE_CR_SUBBLOCK_RAND            0x01c00000
+#define QE_CR_SUBBLOCK_TIMER           0x01e00000
+#define QE_CR_SUBBLOCK_GENERAL         0x03c00000
+
+/* QE CECR Protocol - For non-MCC, specifies mode for QE CECR command */
+#define QE_CR_PROTOCOL_UNSPECIFIED     0x00    /* For all other protocols */
+#define QE_CR_PROTOCOL_HDLC_TRANSPARENT        0x00
+#define QE_CR_PROTOCOL_QMC             0x02
+#define QE_CR_PROTOCOL_UART            0x04
+#define QE_CR_PROTOCOL_ATM_POS         0x0A
+#define QE_CR_PROTOCOL_ETHERNET                0x0C
+#define QE_CR_PROTOCOL_L2_SWITCH       0x0D
+
+/* BRG configuration register */
+#define QE_BRGC_ENABLE         0x00010000
+#define QE_BRGC_DIVISOR_SHIFT  1
+#define QE_BRGC_DIVISOR_MAX    0xFFF
+#define QE_BRGC_DIV16          1
+
+/* QE Timers registers */
+#define QE_GTCFR1_PCAS 0x80
+#define QE_GTCFR1_STP2 0x20
+#define QE_GTCFR1_RST2 0x10
+#define QE_GTCFR1_GM2  0x08
+#define QE_GTCFR1_GM1  0x04
+#define QE_GTCFR1_STP1 0x02
+#define QE_GTCFR1_RST1 0x01
+
+/* SDMA registers */
+#define QE_SDSR_BER1   0x02000000
+#define QE_SDSR_BER2   0x01000000
+
+#define QE_SDMR_GLB_1_MSK      0x80000000
+#define QE_SDMR_ADR_SEL                0x20000000
+#define QE_SDMR_BER1_MSK       0x02000000
+#define QE_SDMR_BER2_MSK       0x01000000
+#define QE_SDMR_EB1_MSK                0x00800000
+#define QE_SDMR_ER1_MSK                0x00080000
+#define QE_SDMR_ER2_MSK                0x00040000
+#define QE_SDMR_CEN_MASK       0x0000E000
+#define QE_SDMR_SBER_1         0x00000200
+#define QE_SDMR_SBER_2         0x00000200
+#define QE_SDMR_EB1_PR_MASK    0x000000C0
+#define QE_SDMR_ER1_PR         0x00000008
+
+#define QE_SDMR_CEN_SHIFT      13
+#define QE_SDMR_EB1_PR_SHIFT   6
+
+#define QE_SDTM_MSNUM_SHIFT    24
+
+#define QE_SDEBCR_BA_MASK      0x01FFFFFF
+
+/* Communication Processor */
+#define QE_CP_CERCR_MEE                0x8000  /* Multi-user RAM ECC enable */
+#define QE_CP_CERCR_IEE                0x4000  /* Instruction RAM ECC enable */
+#define QE_CP_CERCR_CIR                0x0800  /* Common instruction RAM */
+
+/* I-RAM */
+#define QE_IRAM_IADD_AIE       0x80000000      /* Auto Increment Enable */
+#define QE_IRAM_IADD_BADDR     0x00080000      /* Base Address */
+#define QE_IRAM_READY           0x80000000      /* Ready */
+
+/* UPC */
+#define UPGCR_PROTOCOL 0x80000000      /* protocol ul2 or pl2 */
+#define UPGCR_TMS      0x40000000      /* Transmit master/slave mode */
+#define UPGCR_RMS      0x20000000      /* Receive master/slave mode */
+#define UPGCR_ADDR     0x10000000      /* Master MPHY Addr multiplexing */
+#define UPGCR_DIAG     0x01000000      /* Diagnostic mode */
+
+/* UCC GUEMR register */
+#define UCC_GUEMR_MODE_MASK_RX 0x02
+#define UCC_GUEMR_MODE_FAST_RX 0x02
+#define UCC_GUEMR_MODE_SLOW_RX 0x00
+#define UCC_GUEMR_MODE_MASK_TX 0x01
+#define UCC_GUEMR_MODE_FAST_TX 0x01
+#define UCC_GUEMR_MODE_SLOW_TX 0x00
+#define UCC_GUEMR_MODE_MASK (UCC_GUEMR_MODE_MASK_RX | UCC_GUEMR_MODE_MASK_TX)
+#define UCC_GUEMR_SET_RESERVED3        0x10    /* Bit 3 in the guemr is reserved but
+                                          must be set 1 */
+
+/* structure representing UCC SLOW parameter RAM */
+struct ucc_slow_pram {
+       __be16 rbase;           /* RX BD base address */
+       __be16 tbase;           /* TX BD base address */
+       u8 rbmr;                /* RX bus mode register (same as CPM's RFCR) */
+       u8 tbmr;                /* TX bus mode register (same as CPM's TFCR) */
+       __be16 mrblr;           /* Rx buffer length */
+       __be32 rstate;          /* Rx internal state */
+       __be32 rptr;            /* Rx internal data pointer */
+       __be16 rbptr;           /* rb BD Pointer */
+       __be16 rcount;          /* Rx internal byte count */
+       __be32 rtemp;           /* Rx temp */
+       __be32 tstate;          /* Tx internal state */
+       __be32 tptr;            /* Tx internal data pointer */
+       __be16 tbptr;           /* Tx BD pointer */
+       __be16 tcount;          /* Tx byte count */
+       __be32 ttemp;           /* Tx temp */
+       __be32 rcrc;            /* temp receive CRC */
+       __be32 tcrc;            /* temp transmit CRC */
+} __attribute__ ((packed));
+
+/* General UCC SLOW Mode Register (GUMRH & GUMRL) */
+#define UCC_SLOW_GUMR_H_SAM_QMC                0x00000000
+#define UCC_SLOW_GUMR_H_SAM_SATM       0x00008000
+#define UCC_SLOW_GUMR_H_REVD           0x00002000
+#define UCC_SLOW_GUMR_H_TRX            0x00001000
+#define UCC_SLOW_GUMR_H_TTX            0x00000800
+#define UCC_SLOW_GUMR_H_CDP            0x00000400
+#define UCC_SLOW_GUMR_H_CTSP           0x00000200
+#define UCC_SLOW_GUMR_H_CDS            0x00000100
+#define UCC_SLOW_GUMR_H_CTSS           0x00000080
+#define UCC_SLOW_GUMR_H_TFL            0x00000040
+#define UCC_SLOW_GUMR_H_RFW            0x00000020
+#define UCC_SLOW_GUMR_H_TXSY           0x00000010
+#define UCC_SLOW_GUMR_H_4SYNC          0x00000004
+#define UCC_SLOW_GUMR_H_8SYNC          0x00000008
+#define UCC_SLOW_GUMR_H_16SYNC         0x0000000c
+#define UCC_SLOW_GUMR_H_RTSM           0x00000002
+#define UCC_SLOW_GUMR_H_RSYN           0x00000001
+
+#define UCC_SLOW_GUMR_L_TCI            0x10000000
+#define UCC_SLOW_GUMR_L_RINV           0x02000000
+#define UCC_SLOW_GUMR_L_TINV           0x01000000
+#define UCC_SLOW_GUMR_L_TEND           0x00040000
+#define UCC_SLOW_GUMR_L_TDCR_MASK      0x00030000
+#define UCC_SLOW_GUMR_L_TDCR_32                0x00030000
+#define UCC_SLOW_GUMR_L_TDCR_16                0x00020000
+#define UCC_SLOW_GUMR_L_TDCR_8         0x00010000
+#define UCC_SLOW_GUMR_L_TDCR_1         0x00000000
+#define UCC_SLOW_GUMR_L_RDCR_MASK      0x0000c000
+#define UCC_SLOW_GUMR_L_RDCR_32                0x0000c000
+#define UCC_SLOW_GUMR_L_RDCR_16                0x00008000
+#define UCC_SLOW_GUMR_L_RDCR_8         0x00004000
+#define UCC_SLOW_GUMR_L_RDCR_1         0x00000000
+#define UCC_SLOW_GUMR_L_RENC_NRZI      0x00000800
+#define UCC_SLOW_GUMR_L_RENC_NRZ       0x00000000
+#define UCC_SLOW_GUMR_L_TENC_NRZI      0x00000100
+#define UCC_SLOW_GUMR_L_TENC_NRZ       0x00000000
+#define UCC_SLOW_GUMR_L_DIAG_MASK      0x000000c0
+#define UCC_SLOW_GUMR_L_DIAG_LE                0x000000c0
+#define UCC_SLOW_GUMR_L_DIAG_ECHO      0x00000080
+#define UCC_SLOW_GUMR_L_DIAG_LOOP      0x00000040
+#define UCC_SLOW_GUMR_L_DIAG_NORM      0x00000000
+#define UCC_SLOW_GUMR_L_ENR            0x00000020
+#define UCC_SLOW_GUMR_L_ENT            0x00000010
+#define UCC_SLOW_GUMR_L_MODE_MASK      0x0000000F
+#define UCC_SLOW_GUMR_L_MODE_BISYNC    0x00000008
+#define UCC_SLOW_GUMR_L_MODE_AHDLC     0x00000006
+#define UCC_SLOW_GUMR_L_MODE_UART      0x00000004
+#define UCC_SLOW_GUMR_L_MODE_QMC       0x00000002
+
+/* General UCC FAST Mode Register */
+#define UCC_FAST_GUMR_TCI      0x20000000
+#define UCC_FAST_GUMR_TRX      0x10000000
+#define UCC_FAST_GUMR_TTX      0x08000000
+#define UCC_FAST_GUMR_CDP      0x04000000
+#define UCC_FAST_GUMR_CTSP     0x02000000
+#define UCC_FAST_GUMR_CDS      0x01000000
+#define UCC_FAST_GUMR_CTSS     0x00800000
+#define UCC_FAST_GUMR_TXSY     0x00020000
+#define UCC_FAST_GUMR_RSYN     0x00010000
+#define UCC_FAST_GUMR_RTSM     0x00002000
+#define UCC_FAST_GUMR_REVD     0x00000400
+#define UCC_FAST_GUMR_ENR      0x00000020
+#define UCC_FAST_GUMR_ENT      0x00000010
+
+/* UART Slow UCC Event Register (UCCE) */
+#define UCC_UART_UCCE_AB       0x0200
+#define UCC_UART_UCCE_IDLE     0x0100
+#define UCC_UART_UCCE_GRA      0x0080
+#define UCC_UART_UCCE_BRKE     0x0040
+#define UCC_UART_UCCE_BRKS     0x0020
+#define UCC_UART_UCCE_CCR      0x0008
+#define UCC_UART_UCCE_BSY      0x0004
+#define UCC_UART_UCCE_TX       0x0002
+#define UCC_UART_UCCE_RX       0x0001
+
+/* HDLC Slow UCC Event Register (UCCE) */
+#define UCC_HDLC_UCCE_GLR      0x1000
+#define UCC_HDLC_UCCE_GLT      0x0800
+#define UCC_HDLC_UCCE_IDLE     0x0100
+#define UCC_HDLC_UCCE_BRKE     0x0040
+#define UCC_HDLC_UCCE_BRKS     0x0020
+#define UCC_HDLC_UCCE_TXE      0x0010
+#define UCC_HDLC_UCCE_RXF      0x0008
+#define UCC_HDLC_UCCE_BSY      0x0004
+#define UCC_HDLC_UCCE_TXB      0x0002
+#define UCC_HDLC_UCCE_RXB      0x0001
+
+/* BISYNC Slow UCC Event Register (UCCE) */
+#define UCC_BISYNC_UCCE_GRA    0x0080
+#define UCC_BISYNC_UCCE_TXE    0x0010
+#define UCC_BISYNC_UCCE_RCH    0x0008
+#define UCC_BISYNC_UCCE_BSY    0x0004
+#define UCC_BISYNC_UCCE_TXB    0x0002
+#define UCC_BISYNC_UCCE_RXB    0x0001
+
+/* Gigabit Ethernet Fast UCC Event Register (UCCE) */
+#define UCC_GETH_UCCE_MPD       0x80000000
+#define UCC_GETH_UCCE_SCAR      0x40000000
+#define UCC_GETH_UCCE_GRA       0x20000000
+#define UCC_GETH_UCCE_CBPR      0x10000000
+#define UCC_GETH_UCCE_BSY       0x08000000
+#define UCC_GETH_UCCE_RXC       0x04000000
+#define UCC_GETH_UCCE_TXC       0x02000000
+#define UCC_GETH_UCCE_TXE       0x01000000
+#define UCC_GETH_UCCE_TXB7      0x00800000
+#define UCC_GETH_UCCE_TXB6      0x00400000
+#define UCC_GETH_UCCE_TXB5      0x00200000
+#define UCC_GETH_UCCE_TXB4      0x00100000
+#define UCC_GETH_UCCE_TXB3      0x00080000
+#define UCC_GETH_UCCE_TXB2      0x00040000
+#define UCC_GETH_UCCE_TXB1      0x00020000
+#define UCC_GETH_UCCE_TXB0      0x00010000
+#define UCC_GETH_UCCE_RXB7      0x00008000
+#define UCC_GETH_UCCE_RXB6      0x00004000
+#define UCC_GETH_UCCE_RXB5      0x00002000
+#define UCC_GETH_UCCE_RXB4      0x00001000
+#define UCC_GETH_UCCE_RXB3      0x00000800
+#define UCC_GETH_UCCE_RXB2      0x00000400
+#define UCC_GETH_UCCE_RXB1      0x00000200
+#define UCC_GETH_UCCE_RXB0      0x00000100
+#define UCC_GETH_UCCE_RXF7      0x00000080
+#define UCC_GETH_UCCE_RXF6      0x00000040
+#define UCC_GETH_UCCE_RXF5      0x00000020
+#define UCC_GETH_UCCE_RXF4      0x00000010
+#define UCC_GETH_UCCE_RXF3      0x00000008
+#define UCC_GETH_UCCE_RXF2      0x00000004
+#define UCC_GETH_UCCE_RXF1      0x00000002
+#define UCC_GETH_UCCE_RXF0      0x00000001
+
+/* UCC Protocol Specific Mode Register (UPSMR), when used for UART */
+#define UCC_UART_UPSMR_FLC             0x8000
+#define UCC_UART_UPSMR_SL              0x4000
+#define UCC_UART_UPSMR_CL_MASK         0x3000
+#define UCC_UART_UPSMR_CL_8            0x3000
+#define UCC_UART_UPSMR_CL_7            0x2000
+#define UCC_UART_UPSMR_CL_6            0x1000
+#define UCC_UART_UPSMR_CL_5            0x0000
+#define UCC_UART_UPSMR_UM_MASK         0x0c00
+#define UCC_UART_UPSMR_UM_NORMAL       0x0000
+#define UCC_UART_UPSMR_UM_MAN_MULTI    0x0400
+#define UCC_UART_UPSMR_UM_AUTO_MULTI   0x0c00
+#define UCC_UART_UPSMR_FRZ             0x0200
+#define UCC_UART_UPSMR_RZS             0x0100
+#define UCC_UART_UPSMR_SYN             0x0080
+#define UCC_UART_UPSMR_DRT             0x0040
+#define UCC_UART_UPSMR_PEN             0x0010
+#define UCC_UART_UPSMR_RPM_MASK                0x000c
+#define UCC_UART_UPSMR_RPM_ODD         0x0000
+#define UCC_UART_UPSMR_RPM_LOW         0x0004
+#define UCC_UART_UPSMR_RPM_EVEN                0x0008
+#define UCC_UART_UPSMR_RPM_HIGH                0x000C
+#define UCC_UART_UPSMR_TPM_MASK                0x0003
+#define UCC_UART_UPSMR_TPM_ODD         0x0000
+#define UCC_UART_UPSMR_TPM_LOW         0x0001
+#define UCC_UART_UPSMR_TPM_EVEN                0x0002
+#define UCC_UART_UPSMR_TPM_HIGH                0x0003
+
+/* UCC Protocol Specific Mode Register (UPSMR), when used for Ethernet */
+#define UCC_GETH_UPSMR_FTFE     0x80000000
+#define UCC_GETH_UPSMR_PTPE     0x40000000
+#define UCC_GETH_UPSMR_ECM      0x04000000
+#define UCC_GETH_UPSMR_HSE      0x02000000
+#define UCC_GETH_UPSMR_PRO      0x00400000
+#define UCC_GETH_UPSMR_CAP      0x00200000
+#define UCC_GETH_UPSMR_RSH      0x00100000
+#define UCC_GETH_UPSMR_RPM      0x00080000
+#define UCC_GETH_UPSMR_R10M     0x00040000
+#define UCC_GETH_UPSMR_RLPB     0x00020000
+#define UCC_GETH_UPSMR_TBIM     0x00010000
+#define UCC_GETH_UPSMR_RES1     0x00002000
+#define UCC_GETH_UPSMR_RMM      0x00001000
+#define UCC_GETH_UPSMR_CAM      0x00000400
+#define UCC_GETH_UPSMR_BRO      0x00000200
+#define UCC_GETH_UPSMR_SMM     0x00000080
+#define UCC_GETH_UPSMR_SGMM    0x00000020
+
+/* UCC Transmit On Demand Register (UTODR) */
+#define UCC_SLOW_TOD   0x8000
+#define UCC_FAST_TOD   0x8000
+
+/* UCC Bus Mode Register masks */
+/* Not to be confused with the Bundle Mode Register */
+#define UCC_BMR_GBL            0x20
+#define UCC_BMR_BO_BE          0x10
+#define UCC_BMR_CETM           0x04
+#define UCC_BMR_DTB            0x02
+#define UCC_BMR_BDB            0x01
+
+/* Function code masks */
+#define FC_GBL                         0x20
+#define FC_DTB_LCL                     0x02
+#define UCC_FAST_FUNCTION_CODE_GBL     0x20
+#define UCC_FAST_FUNCTION_CODE_DTB_LCL 0x02
+#define UCC_FAST_FUNCTION_CODE_BDB_LCL 0x01
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_QE_H */
diff --git a/include/soc/fsl/qe/qe_ic.h b/include/soc/fsl/qe/qe_ic.h
new file mode 100644 (file)
index 0000000..1e155ca
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors:    Shlomi Gridish <gridish@freescale.com>
+ *             Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * QE IC external definitions and structure.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#ifndef _ASM_POWERPC_QE_IC_H
+#define _ASM_POWERPC_QE_IC_H
+
+#include <linux/irq.h>
+
+struct device_node;
+struct qe_ic;
+
+#define NUM_OF_QE_IC_GROUPS    6
+
+/* Flags when we init the QE IC */
+#define QE_IC_SPREADMODE_GRP_W                 0x00000001
+#define QE_IC_SPREADMODE_GRP_X                 0x00000002
+#define QE_IC_SPREADMODE_GRP_Y                 0x00000004
+#define QE_IC_SPREADMODE_GRP_Z                 0x00000008
+#define QE_IC_SPREADMODE_GRP_RISCA             0x00000010
+#define QE_IC_SPREADMODE_GRP_RISCB             0x00000020
+
+#define QE_IC_LOW_SIGNAL                       0x00000100
+#define QE_IC_HIGH_SIGNAL                      0x00000200
+
+#define QE_IC_GRP_W_PRI0_DEST_SIGNAL_HIGH      0x00001000
+#define QE_IC_GRP_W_PRI1_DEST_SIGNAL_HIGH      0x00002000
+#define QE_IC_GRP_X_PRI0_DEST_SIGNAL_HIGH      0x00004000
+#define QE_IC_GRP_X_PRI1_DEST_SIGNAL_HIGH      0x00008000
+#define QE_IC_GRP_Y_PRI0_DEST_SIGNAL_HIGH      0x00010000
+#define QE_IC_GRP_Y_PRI1_DEST_SIGNAL_HIGH      0x00020000
+#define QE_IC_GRP_Z_PRI0_DEST_SIGNAL_HIGH      0x00040000
+#define QE_IC_GRP_Z_PRI1_DEST_SIGNAL_HIGH      0x00080000
+#define QE_IC_GRP_RISCA_PRI0_DEST_SIGNAL_HIGH  0x00100000
+#define QE_IC_GRP_RISCA_PRI1_DEST_SIGNAL_HIGH  0x00200000
+#define QE_IC_GRP_RISCB_PRI0_DEST_SIGNAL_HIGH  0x00400000
+#define QE_IC_GRP_RISCB_PRI1_DEST_SIGNAL_HIGH  0x00800000
+#define QE_IC_GRP_W_DEST_SIGNAL_SHIFT          (12)
+
+/* QE interrupt sources groups */
+enum qe_ic_grp_id {
+       QE_IC_GRP_W = 0,        /* QE interrupt controller group W */
+       QE_IC_GRP_X,            /* QE interrupt controller group X */
+       QE_IC_GRP_Y,            /* QE interrupt controller group Y */
+       QE_IC_GRP_Z,            /* QE interrupt controller group Z */
+       QE_IC_GRP_RISCA,        /* QE interrupt controller RISC group A */
+       QE_IC_GRP_RISCB         /* QE interrupt controller RISC group B */
+};
+
+#ifdef CONFIG_QUICC_ENGINE
+void qe_ic_init(struct device_node *node, unsigned int flags,
+               void (*low_handler)(struct irq_desc *desc),
+               void (*high_handler)(struct irq_desc *desc));
+unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic);
+unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic);
+#else
+static inline void qe_ic_init(struct device_node *node, unsigned int flags,
+               void (*low_handler)(struct irq_desc *desc),
+               void (*high_handler)(struct irq_desc *desc))
+{}
+static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
+{ return 0; }
+static inline unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
+{ return 0; }
+#endif /* CONFIG_QUICC_ENGINE */
+
+void qe_ic_set_highest_priority(unsigned int virq, int high);
+int qe_ic_set_priority(unsigned int virq, unsigned int priority);
+int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high);
+
+static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc)
+{
+       struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+       unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
+
+       if (cascade_irq != NO_IRQ)
+               generic_handle_irq(cascade_irq);
+}
+
+static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc)
+{
+       struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+       unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
+
+       if (cascade_irq != NO_IRQ)
+               generic_handle_irq(cascade_irq);
+}
+
+static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc)
+{
+       struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+       unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
+       struct irq_chip *chip = irq_desc_get_chip(desc);
+
+       if (cascade_irq != NO_IRQ)
+               generic_handle_irq(cascade_irq);
+
+       chip->irq_eoi(&desc->irq_data);
+}
+
+static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc)
+{
+       struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+       unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
+       struct irq_chip *chip = irq_desc_get_chip(desc);
+
+       if (cascade_irq != NO_IRQ)
+               generic_handle_irq(cascade_irq);
+
+       chip->irq_eoi(&desc->irq_data);
+}
+
+static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
+{
+       struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+       unsigned int cascade_irq;
+       struct irq_chip *chip = irq_desc_get_chip(desc);
+
+       cascade_irq = qe_ic_get_high_irq(qe_ic);
+       if (cascade_irq == NO_IRQ)
+               cascade_irq = qe_ic_get_low_irq(qe_ic);
+
+       if (cascade_irq != NO_IRQ)
+               generic_handle_irq(cascade_irq);
+
+       chip->irq_eoi(&desc->irq_data);
+}
+
+#endif /* _ASM_POWERPC_QE_IC_H */
diff --git a/include/soc/fsl/qe/ucc.h b/include/soc/fsl/qe/ucc.h
new file mode 100644 (file)
index 0000000..894f14c
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors:    Shlomi Gridish <gridish@freescale.com>
+ *             Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * Internal header file for UCC unit routines.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#ifndef __UCC_H__
+#define __UCC_H__
+
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+#define STATISTICS
+
+#define UCC_MAX_NUM    8
+
+/* Slow or fast type for UCCs.
+*/
+enum ucc_speed_type {
+       UCC_SPEED_TYPE_FAST = UCC_GUEMR_MODE_FAST_RX | UCC_GUEMR_MODE_FAST_TX,
+       UCC_SPEED_TYPE_SLOW = UCC_GUEMR_MODE_SLOW_RX | UCC_GUEMR_MODE_SLOW_TX
+};
+
+/* ucc_set_type
+ * Sets UCC to slow or fast mode.
+ *
+ * ucc_num - (In) number of UCC (0-7).
+ * speed   - (In) slow or fast mode for UCC.
+ */
+int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed);
+
+int ucc_set_qe_mux_mii_mng(unsigned int ucc_num);
+
+int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
+       enum comm_dir mode);
+
+int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask);
+
+/* QE MUX clock routing for UCC
+*/
+static inline int ucc_set_qe_mux_grant(unsigned int ucc_num, int set)
+{
+       return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_GRANT);
+}
+
+static inline int ucc_set_qe_mux_tsa(unsigned int ucc_num, int set)
+{
+       return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_TSA);
+}
+
+static inline int ucc_set_qe_mux_bkpt(unsigned int ucc_num, int set)
+{
+       return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_BKPT);
+}
+
+#endif                         /* __UCC_H__ */
diff --git a/include/soc/fsl/qe/ucc_fast.h b/include/soc/fsl/qe/ucc_fast.h
new file mode 100644 (file)
index 0000000..df8ea79
--- /dev/null
@@ -0,0 +1,244 @@
+/*
+ * Internal header file for UCC FAST unit routines.
+ *
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors:    Shlomi Gridish <gridish@freescale.com>
+ *             Li Yang <leoli@freescale.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#ifndef __UCC_FAST_H__
+#define __UCC_FAST_H__
+
+#include <linux/kernel.h>
+
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+#include <soc/fsl/qe/ucc.h>
+
+/* Receive BD's status */
+#define R_E    0x80000000      /* buffer empty */
+#define R_W    0x20000000      /* wrap bit */
+#define R_I    0x10000000      /* interrupt on reception */
+#define R_L    0x08000000      /* last */
+#define R_F    0x04000000      /* first */
+
+/* transmit BD's status */
+#define T_R    0x80000000      /* ready bit */
+#define T_W    0x20000000      /* wrap bit */
+#define T_I    0x10000000      /* interrupt on completion */
+#define T_L    0x08000000      /* last */
+
+/* Rx Data buffer must be 4 bytes aligned in most cases */
+#define UCC_FAST_RX_ALIGN                      4
+#define UCC_FAST_MRBLR_ALIGNMENT               4
+#define UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT      8
+
+/* Sizes */
+#define UCC_FAST_URFS_MIN_VAL                          0x88
+#define UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR        8
+
+/* ucc_fast_channel_protocol_mode - UCC FAST mode */
+enum ucc_fast_channel_protocol_mode {
+       UCC_FAST_PROTOCOL_MODE_HDLC = 0x00000000,
+       UCC_FAST_PROTOCOL_MODE_RESERVED01 = 0x00000001,
+       UCC_FAST_PROTOCOL_MODE_RESERVED_QMC = 0x00000002,
+       UCC_FAST_PROTOCOL_MODE_RESERVED02 = 0x00000003,
+       UCC_FAST_PROTOCOL_MODE_RESERVED_UART = 0x00000004,
+       UCC_FAST_PROTOCOL_MODE_RESERVED03 = 0x00000005,
+       UCC_FAST_PROTOCOL_MODE_RESERVED_EX_MAC_1 = 0x00000006,
+       UCC_FAST_PROTOCOL_MODE_RESERVED_EX_MAC_2 = 0x00000007,
+       UCC_FAST_PROTOCOL_MODE_RESERVED_BISYNC = 0x00000008,
+       UCC_FAST_PROTOCOL_MODE_RESERVED04 = 0x00000009,
+       UCC_FAST_PROTOCOL_MODE_ATM = 0x0000000A,
+       UCC_FAST_PROTOCOL_MODE_RESERVED05 = 0x0000000B,
+       UCC_FAST_PROTOCOL_MODE_ETHERNET = 0x0000000C,
+       UCC_FAST_PROTOCOL_MODE_RESERVED06 = 0x0000000D,
+       UCC_FAST_PROTOCOL_MODE_POS = 0x0000000E,
+       UCC_FAST_PROTOCOL_MODE_RESERVED07 = 0x0000000F
+};
+
+/* ucc_fast_transparent_txrx - UCC Fast Transparent TX & RX */
+enum ucc_fast_transparent_txrx {
+       UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL = 0x00000000,
+       UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_TRANSPARENT = 0x18000000
+};
+
+/* UCC fast diagnostic mode */
+enum ucc_fast_diag_mode {
+       UCC_FAST_DIAGNOSTIC_NORMAL = 0x0,
+       UCC_FAST_DIAGNOSTIC_LOCAL_LOOP_BACK = 0x40000000,
+       UCC_FAST_DIAGNOSTIC_AUTO_ECHO = 0x80000000,
+       UCC_FAST_DIAGNOSTIC_LOOP_BACK_AND_ECHO = 0xC0000000
+};
+
+/* UCC fast Sync length (transparent mode only) */
+enum ucc_fast_sync_len {
+       UCC_FAST_SYNC_LEN_NOT_USED = 0x0,
+       UCC_FAST_SYNC_LEN_AUTOMATIC = 0x00004000,
+       UCC_FAST_SYNC_LEN_8_BIT = 0x00008000,
+       UCC_FAST_SYNC_LEN_16_BIT = 0x0000C000
+};
+
+/* UCC fast RTS mode */
+enum ucc_fast_ready_to_send {
+       UCC_FAST_SEND_IDLES_BETWEEN_FRAMES = 0x00000000,
+       UCC_FAST_SEND_FLAGS_BETWEEN_FRAMES = 0x00002000
+};
+
+/* UCC fast receiver decoding mode */
+enum ucc_fast_rx_decoding_method {
+       UCC_FAST_RX_ENCODING_NRZ = 0x00000000,
+       UCC_FAST_RX_ENCODING_NRZI = 0x00000800,
+       UCC_FAST_RX_ENCODING_RESERVED0 = 0x00001000,
+       UCC_FAST_RX_ENCODING_RESERVED1 = 0x00001800
+};
+
+/* UCC fast transmitter encoding mode */
+enum ucc_fast_tx_encoding_method {
+       UCC_FAST_TX_ENCODING_NRZ = 0x00000000,
+       UCC_FAST_TX_ENCODING_NRZI = 0x00000100,
+       UCC_FAST_TX_ENCODING_RESERVED0 = 0x00000200,
+       UCC_FAST_TX_ENCODING_RESERVED1 = 0x00000300
+};
+
+/* UCC fast CRC length */
+enum ucc_fast_transparent_tcrc {
+       UCC_FAST_16_BIT_CRC = 0x00000000,
+       UCC_FAST_CRC_RESERVED0 = 0x00000040,
+       UCC_FAST_32_BIT_CRC = 0x00000080,
+       UCC_FAST_CRC_RESERVED1 = 0x000000C0
+};
+
+/* Fast UCC initialization structure */
+struct ucc_fast_info {
+       int ucc_num;
+       enum qe_clock rx_clock;
+       enum qe_clock tx_clock;
+       u32 regs;
+       int irq;
+       u32 uccm_mask;
+       int bd_mem_part;
+       int brkpt_support;
+       int grant_support;
+       int tsa;
+       int cdp;
+       int cds;
+       int ctsp;
+       int ctss;
+       int tci;
+       int txsy;
+       int rtsm;
+       int revd;
+       int rsyn;
+       u16 max_rx_buf_length;
+       u16 urfs;
+       u16 urfet;
+       u16 urfset;
+       u16 utfs;
+       u16 utfet;
+       u16 utftt;
+       u16 ufpt;
+       enum ucc_fast_channel_protocol_mode mode;
+       enum ucc_fast_transparent_txrx ttx_trx;
+       enum ucc_fast_tx_encoding_method tenc;
+       enum ucc_fast_rx_decoding_method renc;
+       enum ucc_fast_transparent_tcrc tcrc;
+       enum ucc_fast_sync_len synl;
+};
+
+struct ucc_fast_private {
+       struct ucc_fast_info *uf_info;
+       struct ucc_fast __iomem *uf_regs; /* a pointer to the UCC regs. */
+       u32 __iomem *p_ucce;    /* a pointer to the event register in memory. */
+       u32 __iomem *p_uccm;    /* a pointer to the mask register in memory. */
+#ifdef CONFIG_UGETH_TX_ON_DEMAND
+       u16 __iomem *p_utodr;   /* pointer to the transmit on demand register */
+#endif
+       int enabled_tx;         /* Whether channel is enabled for Tx (ENT) */
+       int enabled_rx;         /* Whether channel is enabled for Rx (ENR) */
+       int stopped_tx;         /* Whether channel has been stopped for Tx
+                                  (STOP_TX, etc.) */
+       int stopped_rx;         /* Whether channel has been stopped for Rx */
+       u32 ucc_fast_tx_virtual_fifo_base_offset;/* pointer to base of Tx
+                                                   virtual fifo */
+       u32 ucc_fast_rx_virtual_fifo_base_offset;/* pointer to base of Rx
+                                                   virtual fifo */
+#ifdef STATISTICS
+       u32 tx_frames;          /* Transmitted frames counter. */
+       u32 rx_frames;          /* Received frames counter (only frames
+                                  passed to application). */
+       u32 tx_discarded;       /* Discarded tx frames counter (frames that
+                                  were discarded by the driver due to errors).
+                                  */
+       u32 rx_discarded;       /* Discarded rx frames counter (frames that
+                                  were discarded by the driver due to errors).
+                                  */
+#endif                         /* STATISTICS */
+       u16 mrblr;              /* maximum receive buffer length */
+};
+
+/* ucc_fast_init
+ * Initializes Fast UCC according to user provided parameters.
+ *
+ * uf_info  - (In) pointer to the fast UCC info structure.
+ * uccf_ret - (Out) pointer to the fast UCC structure.
+ */
+int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret);
+
+/* ucc_fast_free
+ * Frees all resources for fast UCC.
+ *
+ * uccf - (In) pointer to the fast UCC structure.
+ */
+void ucc_fast_free(struct ucc_fast_private * uccf);
+
+/* ucc_fast_enable
+ * Enables a fast UCC port.
+ * This routine enables Tx and/or Rx through the General UCC Mode Register.
+ *
+ * uccf - (In) pointer to the fast UCC structure.
+ * mode - (In) TX, RX, or both.
+ */
+void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode);
+
+/* ucc_fast_disable
+ * Disables a fast UCC port.
+ * This routine disables Tx and/or Rx through the General UCC Mode Register.
+ *
+ * uccf - (In) pointer to the fast UCC structure.
+ * mode - (In) TX, RX, or both.
+ */
+void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode);
+
+/* ucc_fast_irq
+ * Handles interrupts on fast UCC.
+ * Called from the general interrupt routine to handle interrupts on fast UCC.
+ *
+ * uccf - (In) pointer to the fast UCC structure.
+ */
+void ucc_fast_irq(struct ucc_fast_private * uccf);
+
+/* ucc_fast_transmit_on_demand
+ * Immediately forces a poll of the transmitter for data to be sent.
+ * Typically, the hardware performs a periodic poll for data that the
+ * transmit routine has set up to be transmitted. In cases where
+ * this polling cycle is not soon enough, this optional routine can
+ * be invoked to force a poll right away, instead. Proper use for
+ * each transmission for which this functionality is desired is to
+ * call the transmit routine and then this routine right after.
+ *
+ * uccf - (In) pointer to the fast UCC structure.
+ */
+void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf);
+
+u32 ucc_fast_get_qe_cr_subblock(int uccf_num);
+
+void ucc_fast_dump_regs(struct ucc_fast_private * uccf);
+
+#endif                         /* __UCC_FAST_H__ */
diff --git a/include/soc/fsl/qe/ucc_slow.h b/include/soc/fsl/qe/ucc_slow.h
new file mode 100644 (file)
index 0000000..6c0573a
--- /dev/null
@@ -0,0 +1,277 @@
+/*
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors:    Shlomi Gridish <gridish@freescale.com>
+ *             Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * Internal header file for UCC SLOW unit routines.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#ifndef __UCC_SLOW_H__
+#define __UCC_SLOW_H__
+
+#include <linux/kernel.h>
+
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+#include <soc/fsl/qe/ucc.h>
+
+/* transmit BD's status */
+#define T_R    0x80000000      /* ready bit */
+#define T_PAD  0x40000000      /* add pads to short frames */
+#define T_W    0x20000000      /* wrap bit */
+#define T_I    0x10000000      /* interrupt on completion */
+#define T_L    0x08000000      /* last */
+
+#define T_A    0x04000000      /* Address - the data transmitted as address
+                                  chars */
+#define T_TC   0x04000000      /* transmit CRC */
+#define T_CM   0x02000000      /* continuous mode */
+#define T_DEF  0x02000000      /* collision on previous attempt to transmit */
+#define T_P    0x01000000      /* Preamble - send Preamble sequence before
+                                  data */
+#define T_HB   0x01000000      /* heartbeat */
+#define T_NS   0x00800000      /* No Stop */
+#define T_LC   0x00800000      /* late collision */
+#define T_RL   0x00400000      /* retransmission limit */
+#define T_UN   0x00020000      /* underrun */
+#define T_CT   0x00010000      /* CTS lost */
+#define T_CSL  0x00010000      /* carrier sense lost */
+#define T_RC   0x003c0000      /* retry count */
+
+/* Receive BD's status */
+#define R_E    0x80000000      /* buffer empty */
+#define R_W    0x20000000      /* wrap bit */
+#define R_I    0x10000000      /* interrupt on reception */
+#define R_L    0x08000000      /* last */
+#define R_C    0x08000000      /* the last byte in this buffer is a cntl
+                                  char */
+#define R_F    0x04000000      /* first */
+#define R_A    0x04000000      /* the first byte in this buffer is address
+                                  byte */
+#define R_CM   0x02000000      /* continuous mode */
+#define R_ID   0x01000000      /* buffer close on reception of idles */
+#define R_M    0x01000000      /* Frame received because of promiscuous
+                                  mode */
+#define R_AM   0x00800000      /* Address match */
+#define R_DE   0x00800000      /* Address match */
+#define R_LG   0x00200000      /* Break received */
+#define R_BR   0x00200000      /* Frame length violation */
+#define R_NO   0x00100000      /* Rx Non Octet Aligned Packet */
+#define R_FR   0x00100000      /* Framing Error (no stop bit) character
+                                  received */
+#define R_PR   0x00080000      /* Parity Error character received */
+#define R_AB   0x00080000      /* Frame Aborted */
+#define R_SH   0x00080000      /* frame is too short */
+#define R_CR   0x00040000      /* CRC Error */
+#define R_OV   0x00020000      /* Overrun */
+#define R_CD   0x00010000      /* CD lost */
+#define R_CL   0x00010000      /* this frame is closed because of a
+                                  collision */
+
+/* Rx Data buffer must be 4 bytes aligned in most cases.*/
+#define UCC_SLOW_RX_ALIGN              4
+#define UCC_SLOW_MRBLR_ALIGNMENT       4
+#define UCC_SLOW_PRAM_SIZE             0x100
+#define ALIGNMENT_OF_UCC_SLOW_PRAM     64
+
+/* UCC Slow Channel Protocol Mode */
+enum ucc_slow_channel_protocol_mode {
+       UCC_SLOW_CHANNEL_PROTOCOL_MODE_QMC = 0x00000002,
+       UCC_SLOW_CHANNEL_PROTOCOL_MODE_UART = 0x00000004,
+       UCC_SLOW_CHANNEL_PROTOCOL_MODE_BISYNC = 0x00000008,
+};
+
+/* UCC Slow Transparent Transmit CRC (TCRC) */
+enum ucc_slow_transparent_tcrc {
+       /* 16-bit CCITT CRC (HDLC).  (X16 + X12 + X5 + 1) */
+       UCC_SLOW_TRANSPARENT_TCRC_CCITT_CRC16 = 0x00000000,
+       /* CRC16 (BISYNC).  (X16 + X15 + X2 + 1) */
+       UCC_SLOW_TRANSPARENT_TCRC_CRC16 = 0x00004000,
+       /* 32-bit CCITT CRC (Ethernet and HDLC) */
+       UCC_SLOW_TRANSPARENT_TCRC_CCITT_CRC32 = 0x00008000,
+};
+
+/* UCC Slow oversampling rate for transmitter (TDCR) */
+enum ucc_slow_tx_oversampling_rate {
+       /* 1x clock mode */
+       UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_1 = 0x00000000,
+       /* 8x clock mode */
+       UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_8 = 0x00010000,
+       /* 16x clock mode */
+       UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_16 = 0x00020000,
+       /* 32x clock mode */
+       UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_32 = 0x00030000,
+};
+
+/* UCC Slow Oversampling rate for receiver (RDCR)
+*/
+enum ucc_slow_rx_oversampling_rate {
+       /* 1x clock mode */
+       UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_1 = 0x00000000,
+       /* 8x clock mode */
+       UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_8 = 0x00004000,
+       /* 16x clock mode */
+       UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_16 = 0x00008000,
+       /* 32x clock mode */
+       UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_32 = 0x0000c000,
+};
+
+/* UCC Slow Transmitter encoding method (TENC)
+*/
+enum ucc_slow_tx_encoding_method {
+       UCC_SLOW_TRANSMITTER_ENCODING_METHOD_TENC_NRZ = 0x00000000,
+       UCC_SLOW_TRANSMITTER_ENCODING_METHOD_TENC_NRZI = 0x00000100
+};
+
+/* UCC Slow Receiver decoding method (RENC)
+*/
+enum ucc_slow_rx_decoding_method {
+       UCC_SLOW_RECEIVER_DECODING_METHOD_RENC_NRZ = 0x00000000,
+       UCC_SLOW_RECEIVER_DECODING_METHOD_RENC_NRZI = 0x00000800
+};
+
+/* UCC Slow Diagnostic mode (DIAG)
+*/
+enum ucc_slow_diag_mode {
+       UCC_SLOW_DIAG_MODE_NORMAL = 0x00000000,
+       UCC_SLOW_DIAG_MODE_LOOPBACK = 0x00000040,
+       UCC_SLOW_DIAG_MODE_ECHO = 0x00000080,
+       UCC_SLOW_DIAG_MODE_LOOPBACK_ECHO = 0x000000c0
+};
+
+struct ucc_slow_info {
+       int ucc_num;
+       int protocol;                   /* QE_CR_PROTOCOL_xxx */
+       enum qe_clock rx_clock;
+       enum qe_clock tx_clock;
+       phys_addr_t regs;
+       int irq;
+       u16 uccm_mask;
+       int data_mem_part;
+       int init_tx;
+       int init_rx;
+       u32 tx_bd_ring_len;
+       u32 rx_bd_ring_len;
+       int rx_interrupts;
+       int brkpt_support;
+       int grant_support;
+       int tsa;
+       int cdp;
+       int cds;
+       int ctsp;
+       int ctss;
+       int rinv;
+       int tinv;
+       int rtsm;
+       int rfw;
+       int tci;
+       int tend;
+       int tfl;
+       int txsy;
+       u16 max_rx_buf_length;
+       enum ucc_slow_transparent_tcrc tcrc;
+       enum ucc_slow_channel_protocol_mode mode;
+       enum ucc_slow_diag_mode diag;
+       enum ucc_slow_tx_oversampling_rate tdcr;
+       enum ucc_slow_rx_oversampling_rate rdcr;
+       enum ucc_slow_tx_encoding_method tenc;
+       enum ucc_slow_rx_decoding_method renc;
+};
+
+struct ucc_slow_private {
+       struct ucc_slow_info *us_info;
+       struct ucc_slow __iomem *us_regs; /* Ptr to memory map of UCC regs */
+       struct ucc_slow_pram *us_pram;  /* a pointer to the parameter RAM */
+       u32 us_pram_offset;
+       int enabled_tx;         /* Whether channel is enabled for Tx (ENT) */
+       int enabled_rx;         /* Whether channel is enabled for Rx (ENR) */
+       int stopped_tx;         /* Whether channel has been stopped for Tx
+                                  (STOP_TX, etc.) */
+       int stopped_rx;         /* Whether channel has been stopped for Rx */
+       struct list_head confQ; /* frames passed to chip waiting for tx */
+       u32 first_tx_bd_mask;   /* mask is used in Tx routine to save status
+                                  and length for first BD in a frame */
+       u32 tx_base_offset;     /* first BD in Tx BD table offset (In MURAM) */
+       u32 rx_base_offset;     /* first BD in Rx BD table offset (In MURAM) */
+       struct qe_bd *confBd;   /* next BD for confirm after Tx */
+       struct qe_bd *tx_bd;    /* next BD for new Tx request */
+       struct qe_bd *rx_bd;    /* next BD to collect after Rx */
+       void *p_rx_frame;       /* accumulating receive frame */
+       u16 *p_ucce;            /* a pointer to the event register in memory.
+                                */
+       u16 *p_uccm;            /* a pointer to the mask register in memory */
+       u16 saved_uccm;         /* a saved mask for the RX Interrupt bits */
+#ifdef STATISTICS
+       u32 tx_frames;          /* Transmitted frames counters */
+       u32 rx_frames;          /* Received frames counters (only frames
+                                  passed to application) */
+       u32 rx_discarded;       /* Discarded frames counters (frames that
+                                  were discarded by the driver due to
+                                  errors) */
+#endif                         /* STATISTICS */
+};
+
+/* ucc_slow_init
+ * Initializes Slow UCC according to provided parameters.
+ *
+ * us_info  - (In) pointer to the slow UCC info structure.
+ * uccs_ret - (Out) pointer to the slow UCC structure.
+ */
+int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret);
+
+/* ucc_slow_free
+ * Frees all resources for slow UCC.
+ *
+ * uccs - (In) pointer to the slow UCC structure.
+ */
+void ucc_slow_free(struct ucc_slow_private * uccs);
+
+/* ucc_slow_enable
+ * Enables a fast UCC port.
+ * This routine enables Tx and/or Rx through the General UCC Mode Register.
+ *
+ * uccs - (In) pointer to the slow UCC structure.
+ * mode - (In) TX, RX, or both.
+ */
+void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode);
+
+/* ucc_slow_disable
+ * Disables a fast UCC port.
+ * This routine disables Tx and/or Rx through the General UCC Mode Register.
+ *
+ * uccs - (In) pointer to the slow UCC structure.
+ * mode - (In) TX, RX, or both.
+ */
+void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode);
+
+/* ucc_slow_graceful_stop_tx
+ * Smoothly stops transmission on a specified slow UCC.
+ *
+ * uccs - (In) pointer to the slow UCC structure.
+ */
+void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs);
+
+/* ucc_slow_stop_tx
+ * Stops transmission on a specified slow UCC.
+ *
+ * uccs - (In) pointer to the slow UCC structure.
+ */
+void ucc_slow_stop_tx(struct ucc_slow_private * uccs);
+
+/* ucc_slow_restart_tx
+ * Restarts transmitting on a specified slow UCC.
+ *
+ * uccs - (In) pointer to the slow UCC structure.
+ */
+void ucc_slow_restart_tx(struct ucc_slow_private *uccs);
+
+u32 ucc_slow_get_qe_cr_subblock(int uccs_num);
+
+#endif                         /* __UCC_SLOW_H__ */
index 116a166..0a11396 100644 (file)
@@ -269,6 +269,25 @@ EXPORT_SYMBOL(gen_pool_destroy);
  * NMI-safe cmpxchg implementation.
  */
 unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
+{
+       return gen_pool_alloc_algo(pool, size, pool->algo, pool->data);
+}
+EXPORT_SYMBOL(gen_pool_alloc);
+
+/**
+ * gen_pool_alloc_algo - allocate special memory from the pool
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @algo: algorithm passed from caller
+ * @data: data passed to algorithm
+ *
+ * Allocate the requested number of bytes from the specified pool.
+ * Uses the pool allocation function (with first-fit algorithm by default).
+ * Can not be used in NMI handler on architectures without
+ * NMI-safe cmpxchg implementation.
+ */
+unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
+               genpool_algo_t algo, void *data)
 {
        struct gen_pool_chunk *chunk;
        unsigned long addr = 0;
@@ -290,8 +309,8 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
 
                end_bit = chunk_size(chunk) >> order;
 retry:
-               start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
-                               pool->data);
+               start_bit = algo(chunk->bits, end_bit, start_bit,
+                                nbits, data, pool);
                if (start_bit >= end_bit)
                        continue;
                remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
@@ -310,7 +329,7 @@ retry:
        rcu_read_unlock();
        return addr;
 }
-EXPORT_SYMBOL(gen_pool_alloc);
+EXPORT_SYMBOL(gen_pool_alloc_algo);
 
 /**
  * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
@@ -501,14 +520,73 @@ EXPORT_SYMBOL(gen_pool_set_algo);
  * @start: The bitnumber to start searching at
  * @nr: The number of zeroed bits we're looking for
  * @data: additional data - unused
+ * @pool: pool to find the fit region memory from
  */
 unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
-               unsigned long start, unsigned int nr, void *data)
+               unsigned long start, unsigned int nr, void *data,
+               struct gen_pool *pool)
 {
        return bitmap_find_next_zero_area(map, size, start, nr, 0);
 }
 EXPORT_SYMBOL(gen_pool_first_fit);
 
+/**
+ * gen_pool_first_fit_align - find the first available region
+ * of memory matching the size requirement (alignment constraint)
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @data: data for alignment
+ * @pool: pool to get order from
+ */
+unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
+               unsigned long start, unsigned int nr, void *data,
+               struct gen_pool *pool)
+{
+       struct genpool_data_align *alignment;
+       unsigned long align_mask;
+       int order;
+
+       alignment = data;
+       order = pool->min_alloc_order;
+       align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
+       return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
+}
+EXPORT_SYMBOL(gen_pool_first_fit_align);
+
+/**
+ * gen_pool_fixed_alloc - reserve a specific region
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @data: data for alignment
+ * @pool: pool to get order from
+ */
+unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
+               unsigned long start, unsigned int nr, void *data,
+               struct gen_pool *pool)
+{
+       struct genpool_data_fixed *fixed_data;
+       int order;
+       unsigned long offset_bit;
+       unsigned long start_bit;
+
+       fixed_data = data;
+       order = pool->min_alloc_order;
+       offset_bit = fixed_data->offset >> order;
+       if (WARN_ON(fixed_data->offset & ((1UL << order) - 1)))
+               return size;
+
+       start_bit = bitmap_find_next_zero_area(map, size,
+                       start + offset_bit, nr, 0);
+       if (start_bit != offset_bit)
+               start_bit = size;
+       return start_bit;
+}
+EXPORT_SYMBOL(gen_pool_fixed_alloc);
+
 /**
  * gen_pool_first_fit_order_align - find the first available region
  * of memory matching the size requirement. The region will be aligned
@@ -518,10 +596,11 @@ EXPORT_SYMBOL(gen_pool_first_fit);
  * @start: The bitnumber to start searching at
  * @nr: The number of zeroed bits we're looking for
  * @data: additional data - unused
+ * @pool: pool to find the fit region memory from
  */
 unsigned long gen_pool_first_fit_order_align(unsigned long *map,
                unsigned long size, unsigned long start,
-               unsigned int nr, void *data)
+               unsigned int nr, void *data, struct gen_pool *pool)
 {
        unsigned long align_mask = roundup_pow_of_two(nr) - 1;
 
@@ -537,12 +616,14 @@ EXPORT_SYMBOL(gen_pool_first_fit_order_align);
  * @start: The bitnumber to start searching at
  * @nr: The number of zeroed bits we're looking for
  * @data: additional data - unused
+ * @pool: pool to find the fit region memory from
  *
  * Iterate over the bitmap to find the smallest free region
  * which we can allocate the memory.
  */
 unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
-               unsigned long start, unsigned int nr, void *data)
+               unsigned long start, unsigned int nr, void *data,
+               struct gen_pool *pool)
 {
        unsigned long start_bit = size;
        unsigned long len = size + 1;
index bec27fc..682aae8 100644 (file)
@@ -101,6 +101,7 @@ static void raid6_altivec$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
 
        raid6_altivec$#_gen_syndrome_real(disks, bytes, ptrs);
 
+       disable_kernel_altivec();
        preempt_enable();
 }
 
index 826470d..96e2486 100755 (executable)
@@ -263,7 +263,8 @@ if ($arch eq "x86_64") {
 
 } elsif ($arch eq "powerpc") {
     $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
-    $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?.*?)>:";
+    # See comment in the sparc64 section for why we use '\w'.
+    $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?\\w*?)>:";
     $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s\\.?_mcount\$";
 
     if ($bits == 64) {
index 5fa4870..912445f 100644 (file)
@@ -1,4 +1,4 @@
-TEST_PROGS := gettimeofday
+TEST_PROGS := gettimeofday context_switch
 
 CFLAGS += -O2
 
@@ -6,6 +6,9 @@ all: $(TEST_PROGS)
 
 $(TEST_PROGS): ../harness.c
 
+context_switch: ../utils.c
+context_switch: LDLIBS += -lpthread
+
 include ../../lib.mk
 
 clean:
diff --git a/tools/testing/selftests/powerpc/benchmarks/context_switch.c b/tools/testing/selftests/powerpc/benchmarks/context_switch.c
new file mode 100644 (file)
index 0000000..7b78594
--- /dev/null
@@ -0,0 +1,466 @@
+/*
+ * Context switch microbenchmark.
+ *
+ * Copyright (C) 2015 Anton Blanchard <anton@au.ibm.com>, IBM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define _GNU_SOURCE
+#include <sched.h>
+#include <string.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <getopt.h>
+#include <signal.h>
+#include <assert.h>
+#include <pthread.h>
+#include <limits.h>
+#include <sys/time.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/shm.h>
+#include <linux/futex.h>
+
+#include "../utils.h"
+
+static unsigned int timeout = 30;
+
+static int touch_vdso;
+struct timeval tv;
+
+static int touch_fp = 1;
+double fp;
+
+static int touch_vector = 1;
+typedef int v4si __attribute__ ((vector_size (16)));
+v4si a, b, c;
+
+#ifdef __powerpc__
+static int touch_altivec = 1;
+
+static void __attribute__((__target__("no-vsx"))) altivec_touch_fn(void)
+{
+       c = a + b;
+}
+#endif
+
+static void touch(void)
+{
+       if (touch_vdso)
+               gettimeofday(&tv, NULL);
+
+       if (touch_fp)
+               fp += 0.1;
+
+#ifdef __powerpc__
+       if (touch_altivec)
+               altivec_touch_fn();
+#endif
+
+       if (touch_vector)
+               c = a + b;
+
+       asm volatile("# %0 %1 %2": : "r"(&tv), "r"(&fp), "r"(&c));
+}
+
+static void start_thread_on(void *(*fn)(void *), void *arg, unsigned long cpu)
+{
+       pthread_t tid;
+       cpu_set_t cpuset;
+       pthread_attr_t attr;
+
+       CPU_ZERO(&cpuset);
+       CPU_SET(cpu, &cpuset);
+
+       pthread_attr_init(&attr);
+
+       if (pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset)) {
+               perror("pthread_attr_setaffinity_np");
+               exit(1);
+       }
+
+       if (pthread_create(&tid, &attr, fn, arg)) {
+               perror("pthread_create");
+               exit(1);
+       }
+}
+
+static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu)
+{
+       int pid;
+       cpu_set_t cpuset;
+
+       pid = fork();
+       if (pid == -1) {
+               perror("fork");
+               exit(1);
+       }
+
+       if (pid)
+               return;
+
+       CPU_ZERO(&cpuset);
+       CPU_SET(cpu, &cpuset);
+
+       if (sched_setaffinity(0, sizeof(cpuset), &cpuset)) {
+               perror("sched_setaffinity");
+               exit(1);
+       }
+
+       fn(arg);
+
+       exit(0);
+}
+
+static unsigned long iterations;
+static unsigned long iterations_prev;
+
+static void sigalrm_handler(int junk)
+{
+       unsigned long i = iterations;
+
+       printf("%ld\n", i - iterations_prev);
+       iterations_prev = i;
+
+       if (--timeout == 0)
+               kill(0, SIGUSR1);
+
+       alarm(1);
+}
+
+static void sigusr1_handler(int junk)
+{
+       exit(0);
+}
+
+struct actions {
+       void (*setup)(int, int);
+       void *(*thread1)(void *);
+       void *(*thread2)(void *);
+};
+
+#define READ 0
+#define WRITE 1
+
+static int pipe_fd1[2];
+static int pipe_fd2[2];
+
+static void pipe_setup(int cpu1, int cpu2)
+{
+       if (pipe(pipe_fd1) || pipe(pipe_fd2))
+               exit(1);
+}
+
+static void *pipe_thread1(void *arg)
+{
+       signal(SIGALRM, sigalrm_handler);
+       alarm(1);
+
+       while (1) {
+               assert(read(pipe_fd1[READ], &c, 1) == 1);
+               touch();
+
+               assert(write(pipe_fd2[WRITE], &c, 1) == 1);
+               touch();
+
+               iterations += 2;
+       }
+
+       return NULL;
+}
+
+static void *pipe_thread2(void *arg)
+{
+       while (1) {
+               assert(write(pipe_fd1[WRITE], &c, 1) == 1);
+               touch();
+
+               assert(read(pipe_fd2[READ], &c, 1) == 1);
+               touch();
+       }
+
+       return NULL;
+}
+
+static struct actions pipe_actions = {
+       .setup = pipe_setup,
+       .thread1 = pipe_thread1,
+       .thread2 = pipe_thread2,
+};
+
+static void yield_setup(int cpu1, int cpu2)
+{
+       if (cpu1 != cpu2) {
+               fprintf(stderr, "Both threads must be on the same CPU for yield test\n");
+               exit(1);
+       }
+}
+
+static void *yield_thread1(void *arg)
+{
+       signal(SIGALRM, sigalrm_handler);
+       alarm(1);
+
+       while (1) {
+               sched_yield();
+               touch();
+
+               iterations += 2;
+       }
+
+       return NULL;
+}
+
+static void *yield_thread2(void *arg)
+{
+       while (1) {
+               sched_yield();
+               touch();
+       }
+
+       return NULL;
+}
+
+static struct actions yield_actions = {
+       .setup = yield_setup,
+       .thread1 = yield_thread1,
+       .thread2 = yield_thread2,
+};
+
+static long sys_futex(void *addr1, int op, int val1, struct timespec *timeout,
+                     void *addr2, int val3)
+{
+       return syscall(SYS_futex, addr1, op, val1, timeout, addr2, val3);
+}
+
+static unsigned long cmpxchg(unsigned long *p, unsigned long expected,
+                            unsigned long desired)
+{
+       unsigned long exp = expected;
+
+       __atomic_compare_exchange_n(p, &exp, desired, 0,
+                                   __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+       return exp;
+}
+
+static unsigned long xchg(unsigned long *p, unsigned long val)
+{
+       return __atomic_exchange_n(p, val, __ATOMIC_SEQ_CST);
+}
+
+static int mutex_lock(unsigned long *m)
+{
+       int c;
+
+       c = cmpxchg(m, 0, 1);
+       if (!c)
+               return 0;
+
+       if (c == 1)
+               c = xchg(m, 2);
+
+       while (c) {
+               sys_futex(m, FUTEX_WAIT, 2, NULL, NULL, 0);
+               c = xchg(m, 2);
+       }
+
+       return 0;
+}
+
+static int mutex_unlock(unsigned long *m)
+{
+       if (*m == 2)
+               *m = 0;
+       else if (xchg(m, 0) == 1)
+               return 0;
+
+       sys_futex(m, FUTEX_WAKE, 1, NULL, NULL, 0);
+
+       return 0;
+}
+
+static unsigned long *m1, *m2;
+
+static void futex_setup(int cpu1, int cpu2)
+{
+       int shmid;
+       void *shmaddr;
+
+       shmid = shmget(IPC_PRIVATE, getpagesize(), SHM_R | SHM_W);
+       if (shmid < 0) {
+               perror("shmget");
+               exit(1);
+       }
+
+       shmaddr = shmat(shmid, NULL, 0);
+       if (shmaddr == (char *)-1) {
+               perror("shmat");
+               shmctl(shmid, IPC_RMID, NULL);
+               exit(1);
+       }
+
+       shmctl(shmid, IPC_RMID, NULL);
+
+       m1 = shmaddr;
+       m2 = shmaddr + sizeof(*m1);
+
+       *m1 = 0;
+       *m2 = 0;
+
+       mutex_lock(m1);
+       mutex_lock(m2);
+}
+
+static void *futex_thread1(void *arg)
+{
+       signal(SIGALRM, sigalrm_handler);
+       alarm(1);
+
+       while (1) {
+               mutex_lock(m2);
+               mutex_unlock(m1);
+
+               iterations += 2;
+       }
+
+       return NULL;
+}
+
+static void *futex_thread2(void *arg)
+{
+       while (1) {
+               mutex_unlock(m2);
+               mutex_lock(m1);
+       }
+
+       return NULL;
+}
+
+static struct actions futex_actions = {
+       .setup = futex_setup,
+       .thread1 = futex_thread1,
+       .thread2 = futex_thread2,
+};
+
+static int processes;
+
+static struct option options[] = {
+       { "test", required_argument, 0, 't' },
+       { "process", no_argument, &processes, 1 },
+       { "timeout", required_argument, 0, 's' },
+       { "vdso", no_argument, &touch_vdso, 1 },
+       { "no-fp", no_argument, &touch_fp, 0 },
+#ifdef __powerpc__
+       { "no-altivec", no_argument, &touch_altivec, 0 },
+#endif
+       { "no-vector", no_argument, &touch_vector, 0 },
+       { 0, },
+};
+
+static void usage(void)
+{
+       fprintf(stderr, "Usage: context_switch2 <options> CPU1 CPU2\n\n");
+       fprintf(stderr, "\t\t--test=X\tpipe, futex or yield (default)\n");
+       fprintf(stderr, "\t\t--process\tUse processes (default threads)\n");
+       fprintf(stderr, "\t\t--timeout=X\tDuration in seconds to run (default 30)\n");
+       fprintf(stderr, "\t\t--vdso\t\ttouch VDSO\n");
+       fprintf(stderr, "\t\t--fp\t\ttouch FP\n");
+#ifdef __powerpc__
+       fprintf(stderr, "\t\t--altivec\ttouch altivec\n");
+#endif
+       fprintf(stderr, "\t\t--vector\ttouch vector\n");
+}
+
+int main(int argc, char *argv[])
+{
+       signed char c;
+       struct actions *actions = &yield_actions;
+       int cpu1;
+       int cpu2;
+       static void (*start_fn)(void *(*fn)(void *), void *arg, unsigned long cpu);
+
+       while (1) {
+               int option_index = 0;
+
+               c = getopt_long(argc, argv, "", options, &option_index);
+
+               if (c == -1)
+                       break;
+
+               switch (c) {
+               case 0:
+                       if (options[option_index].flag != 0)
+                               break;
+
+                       usage();
+                       exit(1);
+                       break;
+
+               case 't':
+                       if (!strcmp(optarg, "pipe")) {
+                               actions = &pipe_actions;
+                       } else if (!strcmp(optarg, "yield")) {
+                               actions = &yield_actions;
+                       } else if (!strcmp(optarg, "futex")) {
+                               actions = &futex_actions;
+                       } else {
+                               usage();
+                               exit(1);
+                       }
+                       break;
+
+               case 's':
+                       timeout = atoi(optarg);
+                       break;
+
+               default:
+                       usage();
+                       exit(1);
+               }
+       }
+
+       if (processes)
+               start_fn = start_process_on;
+       else
+               start_fn = start_thread_on;
+
+       if (((argc - optind) != 2)) {
+               cpu1 = cpu2 = pick_online_cpu();
+       } else {
+               cpu1 = atoi(argv[optind++]);
+               cpu2 = atoi(argv[optind++]);
+       }
+
+       printf("Using %s with ", processes ? "processes" : "threads");
+
+       if (actions == &pipe_actions)
+               printf("pipe");
+       else if (actions == &yield_actions)
+               printf("yield");
+       else
+               printf("futex");
+
+       printf(" on cpus %d/%d touching FP:%s altivec:%s vector:%s vdso:%s\n",
+              cpu1, cpu2, touch_fp ?  "yes" : "no", touch_altivec ? "yes" : "no",
+              touch_vector ? "yes" : "no", touch_vdso ? "yes" : "no");
+
+       /* Create a new process group so we can signal everyone for exit */
+       setpgid(getpid(), getpid());
+
+       signal(SIGUSR1, sigusr1_handler);
+
+       actions->setup(cpu1, cpu2);
+
+       start_fn(actions->thread1, NULL, cpu1);
+       start_fn(actions->thread2, NULL, cpu2);
+
+       while (1)
+               sleep(3600);
+
+       return 0;
+}
index 8265504..08a8b95 100644 (file)
@@ -60,14 +60,6 @@ int dscr_inherit_exec(void)
                else
                        set_dscr(dscr);
 
-               /*
-                * XXX: Force a context switch out so that DSCR
-                * current value is copied into the thread struct
-                * which is required for the child to inherit the
-                * changed value.
-                */
-               sleep(1);
-
                pid = fork();
                if (pid == -1) {
                        perror("fork() failed");
index 4e414ca..3e5a6d1 100644 (file)
@@ -40,14 +40,6 @@ int dscr_inherit(void)
                else
                        set_dscr(dscr);
 
-               /*
-                * XXX: Force a context switch out so that DSCR
-                * current value is copied into the thread struct
-                * which is required for the child to inherit the
-                * changed value.
-                */
-               sleep(1);
-
                pid = fork();
                if (pid == -1) {
                        perror("fork() failed");
index f7997af..52f9be7 100644 (file)
@@ -116,46 +116,3 @@ int test_harness(int (test_function)(void), char *name)
 
        return rc;
 }
-
-static char auxv[4096];
-
-void *get_auxv_entry(int type)
-{
-       ElfW(auxv_t) *p;
-       void *result;
-       ssize_t num;
-       int fd;
-
-       fd = open("/proc/self/auxv", O_RDONLY);
-       if (fd == -1) {
-               perror("open");
-               return NULL;
-       }
-
-       result = NULL;
-
-       num = read(fd, auxv, sizeof(auxv));
-       if (num < 0) {
-               perror("read");
-               goto out;
-       }
-
-       if (num > sizeof(auxv)) {
-               printf("Overflowed auxv buffer\n");
-               goto out;
-       }
-
-       p = (ElfW(auxv_t) *)auxv;
-
-       while (p->a_type != AT_NULL) {
-               if (p->a_type == type) {
-                       result = (void *)p->a_un.a_val;
-                       break;
-               }
-
-               p++;
-       }
-out:
-       close(fd);
-       return result;
-}
index a9099d9..ac41a71 100644 (file)
@@ -2,7 +2,7 @@ noarg:
        $(MAKE) -C ../
 
 TEST_PROGS := count_instructions l3_bank_test per_event_excludes
-EXTRA_SOURCES := ../harness.c event.c lib.c
+EXTRA_SOURCES := ../harness.c event.c lib.c ../utils.c
 
 all: $(TEST_PROGS) ebb
 
@@ -12,6 +12,8 @@ $(TEST_PROGS): $(EXTRA_SOURCES)
 count_instructions: loop.S count_instructions.c $(EXTRA_SOURCES)
        $(CC) $(CFLAGS) -m64 -o $@ $^
 
+per_event_excludes: ../utils.c
+
 include ../../lib.mk
 
 DEFAULT_RUN_TESTS := $(RUN_TESTS)
index 5cdc9db..8d2279c 100644 (file)
@@ -18,7 +18,8 @@ TEST_PROGS := reg_access_test event_attributes_test cycles_test       \
 
 all: $(TEST_PROGS)
 
-$(TEST_PROGS): ../../harness.c ../event.c ../lib.c ebb.c ebb_handler.S trace.c busy_loop.S
+$(TEST_PROGS): ../../harness.c ../../utils.c ../event.c ../lib.c \
+              ebb.c ebb_handler.S trace.c busy_loop.S
 
 instruction_count_test: ../loop.S
 
index 9729d9f..e67452f 100644 (file)
@@ -13,7 +13,6 @@
 #include <stdlib.h>
 #include <string.h>
 #include <sys/ioctl.h>
-#include <linux/auxvec.h>
 
 #include "trace.h"
 #include "reg.h"
@@ -324,7 +323,7 @@ bool ebb_is_supported(void)
 {
 #ifdef PPC_FEATURE2_EBB
        /* EBB requires at least POWER8 */
-       return ((long)get_auxv_entry(AT_HWCAP2) & PPC_FEATURE2_EBB);
+       return have_hwcap2(PPC_FEATURE2_EBB);
 #else
        return false;
 #endif
index a07104c..a361ad3 100644 (file)
 #include "lib.h"
 
 
-int pick_online_cpu(void)
-{
-       cpu_set_t mask;
-       int cpu;
-
-       CPU_ZERO(&mask);
-
-       if (sched_getaffinity(0, sizeof(mask), &mask)) {
-               perror("sched_getaffinity");
-               return -1;
-       }
-
-       /* We prefer a primary thread, but skip 0 */
-       for (cpu = 8; cpu < CPU_SETSIZE; cpu += 8)
-               if (CPU_ISSET(cpu, &mask))
-                       return cpu;
-
-       /* Search for anything, but in reverse */
-       for (cpu = CPU_SETSIZE - 1; cpu >= 0; cpu--)
-               if (CPU_ISSET(cpu, &mask))
-                       return cpu;
-
-       printf("No cpus in affinity mask?!\n");
-       return -1;
-}
-
 int bind_to_cpu(int cpu)
 {
        cpu_set_t mask;
index ca5d72a..0213af4 100644 (file)
@@ -19,7 +19,6 @@ union pipe {
        int fds[2];
 };
 
-extern int pick_online_cpu(void);
 extern int bind_to_cpu(int cpu);
 extern int kill_child_and_wait(pid_t child_pid);
 extern int wait_for_child(pid_t child_pid);
diff --git a/tools/testing/selftests/powerpc/scripts/hmi.sh b/tools/testing/selftests/powerpc/scripts/hmi.sh
new file mode 100755 (executable)
index 0000000..83fb253
--- /dev/null
@@ -0,0 +1,89 @@
+#!/bin/sh
+#
+# Copyright 2015, Daniel Axtens, IBM Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+#  the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+
+# do we have ./getscom, ./putscom?
+if [ -x ./getscom ] && [ -x ./putscom ]; then
+       GETSCOM=./getscom
+       PUTSCOM=./putscom
+elif which getscom > /dev/null; then
+       GETSCOM=$(which getscom)
+       PUTSCOM=$(which putscom)
+else
+       cat <<EOF
+Can't find getscom/putscom in . or \$PATH.
+See https://github.com/open-power/skiboot.
+The tool is in external/xscom-utils
+EOF
+       exit 1
+fi
+
+# We will get 8 HMI events per injection
+# todo: deal with things being offline
+expected_hmis=8
+COUNT_HMIS() {
+    dmesg | grep -c 'Harmless Hypervisor Maintenance interrupt'
+}
+
+# massively expand snooze delay, allowing injection on all cores
+ppc64_cpu --smt-snooze-delay=1000000000
+
+# when we exit, restore it
+trap "ppc64_cpu --smt-snooze-delay=100" 0 1
+
+# for each chip+core combination
+# todo - less fragile parsing
+egrep -o 'OCC: Chip [0-9a-f]+ Core [0-9a-f]' < /sys/firmware/opal/msglog |
+while read chipcore; do
+       chip=$(echo "$chipcore"|awk '{print $3}')
+       core=$(echo "$chipcore"|awk '{print $5}')
+       fir="0x1${core}013100"
+
+       # verify that Core FIR is zero as expected
+       if [ "$($GETSCOM -c 0x${chip} $fir)" != 0 ]; then
+               echo "FIR was not zero before injection for chip $chip, core $core. Aborting!"
+               echo "Result of $GETSCOM -c 0x${chip} $fir:"
+               $GETSCOM -c 0x${chip} $fir
+               echo "If you get a -5 error, the core may be in idle state. Try stress-ng."
+               echo "Otherwise, try $PUTSCOM -c 0x${chip} $fir 0"
+               exit 1
+       fi
+
+       # keep track of the number of HMIs handled
+       old_hmis=$(COUNT_HMIS)
+
+       # do injection, adding a marker to dmesg for clarity
+       echo "Injecting HMI on core $core, chip $chip" | tee /dev/kmsg
+       # inject a RegFile recoverable error
+       if ! $PUTSCOM -c 0x${chip} $fir 2000000000000000 > /dev/null; then
+               echo "Error injecting. Aborting!"
+               exit 1
+       fi
+
+       # now we want to wait for all the HMIs to be processed
+       # we expect one per thread on the core
+       i=0;
+       new_hmis=$(COUNT_HMIS)
+       while [ $new_hmis -lt $((old_hmis + expected_hmis)) ] && [ $i -lt 12 ]; do
+           echo "Seen $((new_hmis - old_hmis)) HMI(s) out of $expected_hmis expected, sleeping"
+           sleep 5;
+           i=$((i + 1))
+           new_hmis=$(COUNT_HMIS)
+       done
+       if [ $i = 12 ]; then
+           echo "Haven't seen expected $expected_hmis recoveries after 1 min. Aborting."
+           exit 1
+       fi
+       echo "Processed $expected_hmis events; presumed success. Check dmesg."
+       echo ""
+done
index 2699635..7d0f14b 100644 (file)
@@ -1,2 +1,5 @@
 tm-resched-dscr
 tm-syscall
+tm-signal-msr-resv
+tm-signal-stack
+tm-vmxcopy
index 4bea62a..737f72c 100644 (file)
@@ -1,8 +1,8 @@
-TEST_PROGS := tm-resched-dscr tm-syscall
+TEST_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack tm-vmxcopy
 
 all: $(TEST_PROGS)
 
-$(TEST_PROGS): ../harness.c
+$(TEST_PROGS): ../harness.c ../utils.c
 
 tm-syscall: tm-syscall-asm.S
 tm-syscall: CFLAGS += -mhtm -I../../../../../usr/include
index 42d4c8c..8fde93d 100644 (file)
@@ -29,6 +29,7 @@
 #include <asm/tm.h>
 
 #include "utils.h"
+#include "tm.h"
 
 #define TBEGIN          ".long 0x7C00051D ;"
 #define TEND            ".long 0x7C00055D ;"
@@ -42,6 +43,8 @@ int test_body(void)
 {
        uint64_t rv, dscr1 = 1, dscr2, texasr;
 
+       SKIP_IF(!have_htm());
+
        printf("Check DSCR TM context switch: ");
        fflush(stdout);
        for (;;) {
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-msr-resv.c b/tools/testing/selftests/powerpc/tm/tm-signal-msr-resv.c
new file mode 100644 (file)
index 0000000..d86653f
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2015, Michael Neuling, IBM Corp.
+ * Licensed under GPLv2.
+ *
+ * Test the kernel's signal return code to ensure that it doesn't
+ * crash when both the transactional and suspend MSR bits are set in
+ * the signal context.
+ *
+ * For this test, we send ourselves a SIGUSR1.  In the SIGUSR1 handler
+ * we modify the signal context to set both MSR TM S and T bits (which
+ * is "reserved" by the PowerISA). When we return from the signal
+ * handler (implicit sigreturn), the kernel should detect reserved MSR
+ * value and send us with a SIGSEGV.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <signal.h>
+#include <unistd.h>
+
+#include "utils.h"
+#include "tm.h"
+
+int segv_expected = 0;
+
+void signal_segv(int signum)
+{
+       if (segv_expected && (signum == SIGSEGV))
+               _exit(0);
+       _exit(1);
+}
+
+void signal_usr1(int signum, siginfo_t *info, void *uc)
+{
+       ucontext_t *ucp = uc;
+
+       /* Link tm checkpointed context to normal context */
+       ucp->uc_link = ucp;
+       /* Set all TM bits so that the context is now invalid */
+#ifdef __powerpc64__
+       ucp->uc_mcontext.gp_regs[PT_MSR] |= (7ULL << 32);
+#else
+       ucp->uc_mcontext.regs->gpr[PT_MSR] |= (7ULL);
+#endif
+       /* Should segv on return becuase of invalid context */
+       segv_expected = 1;
+}
+
+int tm_signal_msr_resv()
+{
+       struct sigaction act;
+
+       SKIP_IF(!have_htm());
+
+       act.sa_sigaction = signal_usr1;
+       sigemptyset(&act.sa_mask);
+       act.sa_flags = SA_SIGINFO;
+       if (sigaction(SIGUSR1, &act, NULL) < 0) {
+               perror("sigaction sigusr1");
+               exit(1);
+       }
+       if (signal(SIGSEGV, signal_segv) == SIG_ERR)
+               exit(1);
+
+       raise(SIGUSR1);
+
+       /* We shouldn't get here as we exit in the segv handler */
+       return 1;
+}
+
+int main(void)
+{
+       return test_harness(tm_signal_msr_resv, "tm_signal_msr_resv");
+}
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-stack.c b/tools/testing/selftests/powerpc/tm/tm-signal-stack.c
new file mode 100644 (file)
index 0000000..e44a238
--- /dev/null
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2015, Michael Neuling, IBM Corp.
+ * Licensed under GPLv2.
+ *
+ * Test the kernel's signal delievery code to ensure that we don't
+ * trelaim twice in the kernel signal delivery code.  This can happen
+ * if we trigger a signal when in a transaction and the stack pointer
+ * is bogus.
+ *
+ * This test case registers a SEGV handler, sets the stack pointer
+ * (r1) to NULL, starts a transaction and then generates a SEGV.  The
+ * SEGV should be handled but we exit here as the stack pointer is
+ * invalid and hance we can't sigreturn.  We only need to check that
+ * this flow doesn't crash the kernel.
+ */
+
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <signal.h>
+
+#include "utils.h"
+#include "tm.h"
+
+void signal_segv(int signum)
+{
+       /* This should never actually run since stack is foobar */
+       exit(1);
+}
+
+int tm_signal_stack()
+{
+       int pid;
+
+       SKIP_IF(!have_htm());
+
+       pid = fork();
+       if (pid < 0)
+               exit(1);
+
+       if (pid) { /* Parent */
+               /*
+                * It's likely the whole machine will crash here so if
+                * the child ever exits, we are good.
+                */
+               wait(NULL);
+               return 0;
+       }
+
+       /*
+        * The flow here is:
+        * 1) register a signal handler (so signal delievery occurs)
+        * 2) make stack pointer (r1) = NULL
+        * 3) start transaction
+        * 4) cause segv
+        */
+       if (signal(SIGSEGV, signal_segv) == SIG_ERR)
+               exit(1);
+       asm volatile("li 1, 0 ;"                /* stack ptr == NULL */
+                    "1:"
+                    ".long 0x7C00051D ;"       /* tbegin */
+                    "beq 1b ;"                 /* retry forever */
+                    ".long 0x7C0005DD ; ;"     /* tsuspend */
+                    "ld 2, 0(1) ;"             /* trigger segv" */
+                    : : : "memory");
+
+       /* This should never get here due to above segv */
+       return 1;
+}
+
+int main(void)
+{
+       return test_harness(tm_signal_stack, "tm_signal_stack");
+}
index e835bf7..60560cb 100644 (file)
 #include <unistd.h>
 #include <sys/syscall.h>
 #include <asm/tm.h>
-#include <asm/cputable.h>
-#include <linux/auxvec.h>
 #include <sys/time.h>
 #include <stdlib.h>
 
 #include "utils.h"
+#include "tm.h"
 
 extern int getppid_tm_active(void);
 extern int getppid_tm_suspended(void);
@@ -77,16 +76,6 @@ pid_t getppid_tm(bool suspend)
        exit(-1);
 }
 
-static inline bool have_htm_nosc(void)
-{
-#ifdef PPC_FEATURE2_HTM_NOSC
-       return ((long)get_auxv_entry(AT_HWCAP2) & PPC_FEATURE2_HTM_NOSC);
-#else
-       printf("PPC_FEATURE2_HTM_NOSC not defined, can't check AT_HWCAP2\n");
-       return false;
-#endif
-}
-
 int tm_syscall(void)
 {
        unsigned count = 0;
diff --git a/tools/testing/selftests/powerpc/tm/tm-vmxcopy.c b/tools/testing/selftests/powerpc/tm/tm-vmxcopy.c
new file mode 100644 (file)
index 0000000..0274de7
--- /dev/null
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2015, Michael Neuling, IBM Corp.
+ * Licensed under GPLv2.
+ *
+ * Original: Michael Neuling 4/12/2013
+ * Edited: Rashmica Gupta 4/12/2015
+ *
+ * See if the altivec state is leaked out of an aborted transaction due to
+ * kernel vmx copy loops.
+ *
+ * When the transaction aborts, VSR values should rollback to the values
+ * they held before the transaction commenced. Using VSRs while transaction
+ * is suspended should not affect the checkpointed values.
+ *
+ * (1) write A to a VSR
+ * (2) start transaction
+ * (3) suspend transaction
+ * (4) change the VSR to B
+ * (5) trigger kernel vmx copy loop
+ * (6) abort transaction
+ * (7) check that the VSR value is A
+ */
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <string.h>
+#include <assert.h>
+
+#include "tm.h"
+#include "utils.h"
+
+int test_vmxcopy()
+{
+       long double vecin = 1.3;
+       long double vecout;
+       unsigned long pgsize = getpagesize();
+       int i;
+       int fd;
+       int size = pgsize*16;
+       char tmpfile[] = "/tmp/page_faultXXXXXX";
+       char buf[pgsize];
+       char *a;
+       uint64_t aborted = 0;
+
+       SKIP_IF(!have_htm());
+
+       fd = mkstemp(tmpfile);
+       assert(fd >= 0);
+
+       memset(buf, 0, pgsize);
+       for (i = 0; i < size; i += pgsize)
+               assert(write(fd, buf, pgsize) == pgsize);
+
+       unlink(tmpfile);
+
+       a = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
+       assert(a != MAP_FAILED);
+
+       asm __volatile__(
+               "lxvd2x 40,0,%[vecinptr];"      /* set 40 to initial value*/
+               "tbegin.;"
+               "beq    3f;"
+               "tsuspend.;"
+               "xxlxor 40,40,40;"              /* set 40 to 0 */
+               "std    5, 0(%[map]);"          /* cause kernel vmx copy page */
+               "tabort. 0;"
+               "tresume.;"
+               "tend.;"
+               "li     %[res], 0;"
+               "b      5f;"
+
+               /* Abort handler */
+               "3:;"
+               "li     %[res], 1;"
+
+               "5:;"
+               "stxvd2x 40,0,%[vecoutptr];"
+               : [res]"=r"(aborted)
+               : [vecinptr]"r"(&vecin),
+                 [vecoutptr]"r"(&vecout),
+                 [map]"r"(a)
+               : "memory", "r0", "r3", "r4", "r5", "r6", "r7");
+
+       if (aborted && (vecin != vecout)){
+               printf("FAILED: vector state leaked on abort %f != %f\n",
+                      (double)vecin, (double)vecout);
+               return 1;
+       }
+
+       munmap(a, size);
+
+       close(fd);
+
+       return 0;
+}
+
+int main(void)
+{
+       return test_harness(test_vmxcopy, "tm_vmxcopy");
+}
diff --git a/tools/testing/selftests/powerpc/tm/tm.h b/tools/testing/selftests/powerpc/tm/tm.h
new file mode 100644 (file)
index 0000000..24144b2
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2015, Michael Ellerman, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#ifndef _SELFTESTS_POWERPC_TM_TM_H
+#define _SELFTESTS_POWERPC_TM_TM_H
+
+#include <stdbool.h>
+#include <asm/cputable.h>
+
+#include "../utils.h"
+
+static inline bool have_htm(void)
+{
+#ifdef PPC_FEATURE2_HTM
+       return have_hwcap2(PPC_FEATURE2_HTM);
+#else
+       printf("PPC_FEATURE2_HTM not defined, can't check AT_HWCAP2\n");
+       return false;
+#endif
+}
+
+static inline bool have_htm_nosc(void)
+{
+#ifdef PPC_FEATURE2_HTM_NOSC
+       return have_hwcap2(PPC_FEATURE2_HTM_NOSC);
+#else
+       printf("PPC_FEATURE2_HTM_NOSC not defined, can't check AT_HWCAP2\n");
+       return false;
+#endif
+}
+
+#endif /* _SELFTESTS_POWERPC_TM_TM_H */
diff --git a/tools/testing/selftests/powerpc/utils.c b/tools/testing/selftests/powerpc/utils.c
new file mode 100644 (file)
index 0000000..dcf7418
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2013-2015, Michael Ellerman, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#define _GNU_SOURCE    /* For CPU_ZERO etc. */
+
+#include <elf.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <link.h>
+#include <sched.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "utils.h"
+
+static char auxv[4096];
+
+void *get_auxv_entry(int type)
+{
+       ElfW(auxv_t) *p;
+       void *result;
+       ssize_t num;
+       int fd;
+
+       fd = open("/proc/self/auxv", O_RDONLY);
+       if (fd == -1) {
+               perror("open");
+               return NULL;
+       }
+
+       result = NULL;
+
+       num = read(fd, auxv, sizeof(auxv));
+       if (num < 0) {
+               perror("read");
+               goto out;
+       }
+
+       if (num > sizeof(auxv)) {
+               printf("Overflowed auxv buffer\n");
+               goto out;
+       }
+
+       p = (ElfW(auxv_t) *)auxv;
+
+       while (p->a_type != AT_NULL) {
+               if (p->a_type == type) {
+                       result = (void *)p->a_un.a_val;
+                       break;
+               }
+
+               p++;
+       }
+out:
+       close(fd);
+       return result;
+}
+
+int pick_online_cpu(void)
+{
+       cpu_set_t mask;
+       int cpu;
+
+       CPU_ZERO(&mask);
+
+       if (sched_getaffinity(0, sizeof(mask), &mask)) {
+               perror("sched_getaffinity");
+               return -1;
+       }
+
+       /* We prefer a primary thread, but skip 0 */
+       for (cpu = 8; cpu < CPU_SETSIZE; cpu += 8)
+               if (CPU_ISSET(cpu, &mask))
+                       return cpu;
+
+       /* Search for anything, but in reverse */
+       for (cpu = CPU_SETSIZE - 1; cpu >= 0; cpu--)
+               if (CPU_ISSET(cpu, &mask))
+                       return cpu;
+
+       printf("No cpus in affinity mask?!\n");
+       return -1;
+}
index b7d4108..175ac6a 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <stdint.h>
 #include <stdbool.h>
+#include <linux/auxvec.h>
 
 /* Avoid headaches with PRI?64 - just use %ll? always */
 typedef unsigned long long u64;
@@ -21,6 +22,12 @@ typedef uint8_t u8;
 
 int test_harness(int (test_function)(void), char *name);
 extern void *get_auxv_entry(int type);
+int pick_online_cpu(void);
+
+static inline bool have_hwcap2(unsigned long ftr2)
+{
+       return ((unsigned long)get_auxv_entry(AT_HWCAP2) & ftr2) == ftr2;
+}
 
 /* Yes, this is evil */
 #define FAIL_IF(x)                                             \