Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 21 May 2010 22:23:54 +0000 (15:23 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 21 May 2010 22:23:54 +0000 (15:23 -0700)
* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus: (25 commits)
  MIPS: Use GCC __builtin_prefetch() to implement prefetch().
  MIPS: Octeon: Serial port fixes for OCTEON simulator.
  MIPS: Octeon: Get rid of early serial.
  MIPS: AR7: prevent race between clock initialization and devices registration
  MIPS: AR7: use ar7_has_high_vlynq() to determine watchdog base address
  MIPS: BCM63xx: Avoid namespace clash on GPIO_DIR_{IN,OUT}
  MIPS: MTX-1: Update defconfig
  MIPS: BCM47xx: Update defconfig
  MIPS: RB532: Update defconfig
  MIPS: AR7: Update defconfig
  RTC: rtc-cmos: Fix binary mode support
  MIPS: Oprofile: Loongson: Cleanup the comments
  MIPS: Oprofile: Loongson: Cleanup of the macros
  MIPS: Oprofile: Loongson: Remove unused variable from loongson2_cpu_setup()
  MIPS: Oprofile: Loongson: Remove useless parentheses
  MIPS: Oprofile: Loongson: Unify macro for setting events
  MIPS: nofpu and nodsp only affect CPU0
  MIPS: Clean up tables for bootmem allocation
  MIPS: Coding style cleanups of access of FCSR rounding mode bits
  MIPS: Loongson 2F: Add gpio/gpioilb support
  ...

413 files changed:
Documentation/DocBook/Makefile
Documentation/DocBook/drm.tmpl [new file with mode: 0644]
Documentation/DocBook/kgdb.tmpl
Documentation/kernel-parameters.txt
Documentation/powerpc/dts-bindings/4xx/reboot.txt [new file with mode: 0644]
Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt
MAINTAINERS
arch/arm/configs/am3517_evm_defconfig
arch/arm/configs/omap3_evm_defconfig
arch/arm/configs/rx51_defconfig
arch/arm/include/asm/kmap_types.h
arch/arm/kernel/kgdb.c
arch/arm/mach-msm/dma.c
arch/arm/mach-msm/include/mach/dma.h
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/board-omap3beagle.c
arch/arm/mach-omap2/board-rx51-peripherals.c
arch/arm/mach-omap2/board-rx51-video.c [new file with mode: 0644]
arch/arm/mach-omap2/board-rx51.c
arch/arm/mach-omap2/clock2420_data.c
arch/arm/mach-omap2/clock2430_data.c
arch/arm/mach-omap2/clock3xxx_data.c
arch/arm/mach-omap2/devices.c
arch/arm/plat-omap/include/plat/omap34xx.h
arch/blackfin/kernel/kgdb.c
arch/mips/include/asm/kgdb.h
arch/mips/kernel/kgdb.c
arch/mips/kernel/traps.c
arch/powerpc/Kconfig
arch/powerpc/Kconfig.debug
arch/powerpc/boot/Makefile
arch/powerpc/boot/dts/iss4xx-mpic.dts [new file with mode: 0644]
arch/powerpc/boot/dts/iss4xx.dts [new file with mode: 0644]
arch/powerpc/boot/dts/mpc8315erdb.dts
arch/powerpc/boot/dts/mpc8377_rdb.dts
arch/powerpc/boot/dts/mpc8378_rdb.dts
arch/powerpc/boot/dts/mpc8379_rdb.dts
arch/powerpc/boot/dts/p1020rdb.dts
arch/powerpc/boot/treeboot-iss4xx.c [new file with mode: 0644]
arch/powerpc/boot/wrapper
arch/powerpc/configs/44x/iss476-smp_defconfig [new file with mode: 0644]
arch/powerpc/configs/ppc64_defconfig
arch/powerpc/configs/pseries_defconfig
arch/powerpc/include/asm/cache.h
arch/powerpc/include/asm/cputable.h
arch/powerpc/include/asm/hvcall.h
arch/powerpc/include/asm/kexec.h
arch/powerpc/include/asm/kmap_types.h
arch/powerpc/include/asm/mmu-44x.h
arch/powerpc/include/asm/mmu.h
arch/powerpc/include/asm/mmzone.h
arch/powerpc/include/asm/mpic.h
arch/powerpc/include/asm/paca.h
arch/powerpc/include/asm/parport.h
arch/powerpc/include/asm/pgalloc-64.h
arch/powerpc/include/asm/pgtable-ppc32.h
arch/powerpc/include/asm/ptrace.h
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/reg_booke.h
arch/powerpc/include/asm/smp.h
arch/powerpc/include/asm/topology.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/crash.c
arch/powerpc/kernel/entry_32.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/head_44x.S
arch/powerpc/kernel/head_8xx.S
arch/powerpc/kernel/head_booke.h
arch/powerpc/kernel/head_fsl_booke.S
arch/powerpc/kernel/iommu.c
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/kgdb.c
arch/powerpc/kernel/kprobes.c
arch/powerpc/kernel/lparcfg.c
arch/powerpc/kernel/machine_kexec_64.c
arch/powerpc/kernel/misc_32.S
arch/powerpc/kernel/misc_64.S
arch/powerpc/kernel/paca.c
arch/powerpc/kernel/pci_of_scan.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/ptrace.c
arch/powerpc/kernel/rtas.c
arch/powerpc/kernel/rtasd.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/sysfs.c
arch/powerpc/kernel/traps.c
arch/powerpc/kernel/vio.c
arch/powerpc/lib/string.S
arch/powerpc/mm/44x_mmu.c
arch/powerpc/mm/fault.c
arch/powerpc/mm/fsl_booke_mmu.c
arch/powerpc/mm/init_64.c
arch/powerpc/mm/mmu_context_nohash.c
arch/powerpc/mm/mmu_decl.h
arch/powerpc/mm/numa.c
arch/powerpc/mm/pgtable_32.c
arch/powerpc/mm/pgtable_64.c
arch/powerpc/mm/tlb_nohash_low.S
arch/powerpc/platforms/44x/Kconfig
arch/powerpc/platforms/44x/Makefile
arch/powerpc/platforms/44x/iss4xx.c [new file with mode: 0644]
arch/powerpc/platforms/83xx/mpc831x_rdb.c
arch/powerpc/platforms/83xx/mpc837x_rdb.c
arch/powerpc/platforms/86xx/mpc8610_hpcd.c
arch/powerpc/platforms/Kconfig.cputype
arch/powerpc/platforms/cell/cbe_cpufreq.c
arch/powerpc/platforms/iseries/exception.S
arch/powerpc/platforms/iseries/pci.c
arch/powerpc/platforms/iseries/smp.c
arch/powerpc/platforms/pasemi/cpufreq.c
arch/powerpc/platforms/powermac/cpufreq_64.c
arch/powerpc/platforms/powermac/low_i2c.c
arch/powerpc/platforms/powermac/pmac.h
arch/powerpc/platforms/powermac/setup.c
arch/powerpc/platforms/powermac/smp.c
arch/powerpc/platforms/pseries/Makefile
arch/powerpc/platforms/pseries/dlpar.c
arch/powerpc/platforms/pseries/eeh.c
arch/powerpc/platforms/pseries/event_sources.c [new file with mode: 0644]
arch/powerpc/platforms/pseries/hotplug-cpu.c
arch/powerpc/platforms/pseries/hvCall.S
arch/powerpc/platforms/pseries/lpar.c
arch/powerpc/platforms/pseries/plpar_wrappers.h
arch/powerpc/platforms/pseries/pseries.h
arch/powerpc/platforms/pseries/ras.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/platforms/pseries/smp.c
arch/powerpc/platforms/pseries/xics.c
arch/powerpc/sysdev/mpc8xxx_gpio.c
arch/powerpc/sysdev/mpic.c
arch/powerpc/sysdev/ppc4xx_soc.c
arch/sh/kernel/kgdb.c
arch/sparc/kernel/kgdb_32.c
arch/sparc/kernel/kgdb_64.c
arch/x86/crypto/aesni-intel_asm.S
arch/x86/crypto/aesni-intel_glue.c
arch/x86/include/asm/cacheflush.h
arch/x86/include/asm/inst.h
arch/x86/include/asm/kgdb.h
arch/x86/include/asm/processor.h
arch/x86/kernel/cpu/common.c
arch/x86/kernel/early_printk.c
arch/x86/kernel/kgdb.c
arch/x86/kernel/setup.c
arch/x86/kernel/traps.c
arch/x86/mm/pageattr.c
crypto/ablkcipher.c
crypto/algapi.c
crypto/authenc.c
crypto/internal.h
crypto/pcrypt.c
crypto/scatterwalk.c
crypto/shash.c
crypto/tcrypt.c
crypto/tcrypt.h
crypto/testmgr.c
crypto/testmgr.h
crypto/vmac.c
drivers/char/agp/agp.h
drivers/char/agp/ali-agp.c
drivers/char/agp/amd-k7-agp.c
drivers/char/agp/amd64-agp.c
drivers/char/agp/ati-agp.c
drivers/char/agp/efficeon-agp.c
drivers/char/agp/intel-agp.c
drivers/char/agp/intel-agp.h [new file with mode: 0644]
drivers/char/agp/intel-gtt.c [new file with mode: 0644]
drivers/char/agp/nvidia-agp.c
drivers/char/agp/sis-agp.c
drivers/char/agp/uninorth-agp.c
drivers/char/agp/via-agp.c
drivers/char/random.c
drivers/crypto/Kconfig
drivers/crypto/Makefile
drivers/crypto/geode-aes.c
drivers/crypto/hifn_795x.c
drivers/crypto/mv_cesa.c
drivers/crypto/mv_cesa.h
drivers/crypto/n2_asm.S [new file with mode: 0644]
drivers/crypto/n2_core.c [new file with mode: 0644]
drivers/crypto/n2_core.h [new file with mode: 0644]
drivers/crypto/omap-sham.c [new file with mode: 0644]
drivers/crypto/talitos.c
drivers/crypto/talitos.h
drivers/gpu/drm/Kconfig
drivers/gpu/drm/drm_auth.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_dma.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/drm_sysfs.c
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/dvo.h
drivers/gpu/drm/i915/dvo_ch7017.c
drivers/gpu/drm/i915/dvo_ch7xxx.c
drivers/gpu/drm/i915/dvo_ivch.c
drivers/gpu/drm/i915/dvo_sil164.c
drivers/gpu/drm/i915/dvo_tfp410.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_debug.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_modes.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/nouveau/Makefile
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nouveau_bios.h
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_debugfs.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_drv.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_encoder.h
drivers/gpu/drm/nouveau/nouveau_fb.h
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_fbcon.h
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nouveau_grctx.c
drivers/gpu/drm/nouveau/nouveau_i2c.c
drivers/gpu/drm/nouveau/nouveau_irq.c
drivers/gpu/drm/nouveau/nouveau_reg.h
drivers/gpu/drm/nouveau/nouveau_state.c
drivers/gpu/drm/nouveau/nv04_fbcon.c
drivers/gpu/drm/nouveau/nv04_graph.c
drivers/gpu/drm/nouveau/nv40_grctx.c
drivers/gpu/drm/nouveau/nv50_calc.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nv50_crtc.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nv50_fbcon.c
drivers/gpu/drm/nouveau/nv50_sor.c
drivers/gpu/drm/radeon/atombios.h
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_reg.h
drivers/gpu/drm/radeon/evergreend.h [new file with mode: 0644]
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r100d.h
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r300d.h
drivers/gpu/drm/radeon/r420.c
drivers/gpu/drm/radeon/r500_reg.h
drivers/gpu/drm/radeon/r520.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_audio.c
drivers/gpu/drm/radeon/r600_blit_kms.c
drivers/gpu/drm/radeon/r600_hdmi.c
drivers/gpu/drm/radeon/r600_reg.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_bios.c
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_fence.c
drivers/gpu/drm/radeon/radeon_fixed.h [deleted file]
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_irq_kms.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_legacy_crtc.c
drivers/gpu/drm/radeon/radeon_legacy_encoders.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_object.h
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_reg.h
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/rs400.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rs600d.h
drivers/gpu/drm/radeon/rs690.c
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/radeon/rv515d.h
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/savage/savage_bci.c
drivers/gpu/drm/ttm/Makefile
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/ttm/ttm_memory.c
drivers/gpu/drm/ttm/ttm_page_alloc.c [new file with mode: 0644]
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
drivers/gpu/vga/Kconfig
drivers/macintosh/macio-adb.c
drivers/macintosh/smu.c
drivers/macintosh/therm_adt746x.c
drivers/macintosh/windfarm_pm81.c
drivers/macintosh/windfarm_pm91.c
drivers/misc/Makefile
drivers/misc/hdpuftrs/Makefile [deleted file]
drivers/misc/hdpuftrs/hdpu_cpustate.c [deleted file]
drivers/misc/hdpuftrs/hdpu_nexus.c [deleted file]
drivers/mmc/host/msm_sdcc.c
drivers/mmc/host/msm_sdcc.h
drivers/serial/8250.c
drivers/serial/amba-pl011.c
drivers/serial/kgdboc.c
drivers/serial/mpsc.c
drivers/serial/sh-sci.c
drivers/serial/sunzilog.c
drivers/usb/early/ehci-dbgp.c
drivers/video/Kconfig
drivers/video/efifb.c
drivers/video/fbmem.c
drivers/video/fbsysfs.c
drivers/video/offb.c
drivers/video/omap2/displays/Kconfig
drivers/video/omap2/displays/Makefile
drivers/video/omap2/displays/panel-acx565akm.c [new file with mode: 0644]
drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
drivers/video/omap2/displays/panel-taal.c
drivers/video/omap2/dss/Kconfig
drivers/video/omap2/dss/Makefile
drivers/video/omap2/dss/core.c
drivers/video/omap2/dss/display.c
drivers/video/omap2/dss/dss.c
drivers/video/omap2/dss/dss.h
drivers/video/omap2/dss/manager.c
drivers/video/omap2/dss/sdi.c
drivers/video/omap2/dss/venc.c
drivers/video/omap2/omapfb/omapfb-ioctl.c
drivers/video/omap2/omapfb/omapfb-sysfs.c
drivers/video/vesafb.c
drivers/video/vga16fb.c
fs/udf/dir.c
fs/udf/file.c
fs/udf/udfdecl.h
include/asm-generic/kmap_types.h
include/crypto/algapi.h
include/drm/drmP.h
include/drm/drm_crtc.h
include/drm/drm_crtc_helper.h
include/drm/drm_edid.h
include/drm/drm_fb_helper.h
include/drm/drm_fixed.h [new file with mode: 0644]
include/drm/radeon_drm.h
include/drm/ttm/ttm_bo_api.h
include/drm/ttm/ttm_bo_driver.h
include/drm/ttm/ttm_page_alloc.h [new file with mode: 0644]
include/linux/fb.h
include/linux/hdpu_features.h [deleted file]
include/linux/kdb.h [new file with mode: 0644]
include/linux/kgdb.h
include/linux/msm_mdp.h [new file with mode: 0644]
include/linux/padata.h
include/linux/serial_core.h
init/main.c
kernel/Makefile
kernel/debug/Makefile [new file with mode: 0644]
kernel/debug/debug_core.c [new file with mode: 0644]
kernel/debug/debug_core.h [new file with mode: 0644]
kernel/debug/gdbstub.c [new file with mode: 0644]
kernel/debug/kdb/.gitignore [new file with mode: 0644]
kernel/debug/kdb/Makefile [new file with mode: 0644]
kernel/debug/kdb/kdb_bp.c [new file with mode: 0644]
kernel/debug/kdb/kdb_bt.c [new file with mode: 0644]
kernel/debug/kdb/kdb_cmds [new file with mode: 0644]
kernel/debug/kdb/kdb_debugger.c [new file with mode: 0644]
kernel/debug/kdb/kdb_io.c [new file with mode: 0644]
kernel/debug/kdb/kdb_keyboard.c [new file with mode: 0644]
kernel/debug/kdb/kdb_main.c [new file with mode: 0644]
kernel/debug/kdb/kdb_private.h [new file with mode: 0644]
kernel/debug/kdb/kdb_support.c [new file with mode: 0644]
kernel/kallsyms.c
kernel/kgdb.c [deleted file]
kernel/module.c
kernel/padata.c
kernel/printk.c
kernel/sched.c
kernel/signal.c
kernel/sysctl.c
lib/Kconfig.kgdb
sound/aoa/core/gpio-pmf.c

index 325cfd1..c7e5dc7 100644 (file)
@@ -14,7 +14,7 @@ DOCBOOKS := z8530book.xml mcabook.xml device-drivers.xml \
            genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
            mac80211.xml debugobjects.xml sh.xml regulator.xml \
            alsa-driver-api.xml writing-an-alsa-driver.xml \
-           tracepoint.xml media.xml
+           tracepoint.xml media.xml drm.xml
 
 ###
 # The build process is as follows (targets):
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
new file mode 100644 (file)
index 0000000..7583dc7
--- /dev/null
@@ -0,0 +1,839 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+       "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="drmDevelopersGuide">
+  <bookinfo>
+    <title>Linux DRM Developer's Guide</title>
+
+    <copyright>
+      <year>2008-2009</year>
+      <holder>
+       Intel Corporation (Jesse Barnes &lt;jesse.barnes@intel.com&gt;)
+      </holder>
+    </copyright>
+
+    <legalnotice>
+      <para>
+       The contents of this file may be used under the terms of the GNU
+       General Public License version 2 (the "GPL") as distributed in
+       the kernel source COPYING file.
+      </para>
+    </legalnotice>
+  </bookinfo>
+
+<toc></toc>
+
+  <!-- Introduction -->
+
+  <chapter id="drmIntroduction">
+    <title>Introduction</title>
+    <para>
+      The Linux DRM layer contains code intended to support the needs
+      of complex graphics devices, usually containing programmable
+      pipelines well suited to 3D graphics acceleration.  Graphics
+      drivers in the kernel can make use of DRM functions to make
+      tasks like memory management, interrupt handling and DMA easier,
+      and provide a uniform interface to applications.
+    </para>
+    <para>
+      A note on versions: this guide covers features found in the DRM
+      tree, including the TTM memory manager, output configuration and
+      mode setting, and the new vblank internals, in addition to all
+      the regular features found in current kernels.
+    </para>
+    <para>
+      [Insert diagram of typical DRM stack here]
+    </para>
+  </chapter>
+
+  <!-- Internals -->
+
+  <chapter id="drmInternals">
+    <title>DRM Internals</title>
+    <para>
+      This chapter documents DRM internals relevant to driver authors
+      and developers working to add support for the latest features to
+      existing drivers.
+    </para>
+    <para>
+      First, we'll go over some typical driver initialization
+      requirements, like setting up command buffers, creating an
+      initial output configuration, and initializing core services.
+      Subsequent sections will cover core internals in more detail,
+      providing implementation notes and examples.
+    </para>
+    <para>
+      The DRM layer provides several services to graphics drivers,
+      many of them driven by the application interfaces it provides
+      through libdrm, the library that wraps most of the DRM ioctls.
+      These include vblank event handling, memory
+      management, output management, framebuffer management, command
+      submission &amp; fencing, suspend/resume support, and DMA
+      services.
+    </para>
+    <para>
+      The core of every DRM driver is struct drm_device.  Drivers
+      will typically statically initialize a drm_device structure,
+      then pass it to drm_init() at load time.
+    </para>
+
+  <!-- Internals: driver init -->
+
+  <sect1>
+    <title>Driver initialization</title>
+    <para>
+      Before calling the DRM initialization routines, the driver must
+      first create and fill out a struct drm_device structure.
+    </para>
+    <programlisting>
+      static struct drm_driver driver = {
+       /* don't use mtrr's here, the Xserver or user space app should
+        * deal with them for intel hardware.
+        */
+       .driver_features =
+           DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
+           DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_MODESET,
+       .load = i915_driver_load,
+       .unload = i915_driver_unload,
+       .firstopen = i915_driver_firstopen,
+       .lastclose = i915_driver_lastclose,
+       .preclose = i915_driver_preclose,
+       .save = i915_save,
+       .restore = i915_restore,
+       .device_is_agp = i915_driver_device_is_agp,
+       .get_vblank_counter = i915_get_vblank_counter,
+       .enable_vblank = i915_enable_vblank,
+       .disable_vblank = i915_disable_vblank,
+       .irq_preinstall = i915_driver_irq_preinstall,
+       .irq_postinstall = i915_driver_irq_postinstall,
+       .irq_uninstall = i915_driver_irq_uninstall,
+       .irq_handler = i915_driver_irq_handler,
+       .reclaim_buffers = drm_core_reclaim_buffers,
+       .get_map_ofs = drm_core_get_map_ofs,
+       .get_reg_ofs = drm_core_get_reg_ofs,
+       .fb_probe = intelfb_probe,
+       .fb_remove = intelfb_remove,
+       .fb_resize = intelfb_resize,
+       .master_create = i915_master_create,
+       .master_destroy = i915_master_destroy,
+#if defined(CONFIG_DEBUG_FS)
+       .debugfs_init = i915_debugfs_init,
+       .debugfs_cleanup = i915_debugfs_cleanup,
+#endif
+       .gem_init_object = i915_gem_init_object,
+       .gem_free_object = i915_gem_free_object,
+       .gem_vm_ops = &amp;i915_gem_vm_ops,
+       .ioctls = i915_ioctls,
+       .fops = {
+               .owner = THIS_MODULE,
+               .open = drm_open,
+               .release = drm_release,
+               .ioctl = drm_ioctl,
+               .mmap = drm_mmap,
+               .poll = drm_poll,
+               .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+               .compat_ioctl = i915_compat_ioctl,
+#endif
+               },
+       .pci_driver = {
+               .name = DRIVER_NAME,
+               .id_table = pciidlist,
+               .probe = probe,
+               .remove = __devexit_p(drm_cleanup_pci),
+               },
+       .name = DRIVER_NAME,
+       .desc = DRIVER_DESC,
+       .date = DRIVER_DATE,
+       .major = DRIVER_MAJOR,
+       .minor = DRIVER_MINOR,
+       .patchlevel = DRIVER_PATCHLEVEL,
+      };
+    </programlisting>
+    <para>
+      In the example above, taken from the i915 DRM driver, the driver
+      sets several flags indicating what core features it supports.
+      We'll go over the individual callbacks in later sections.  Since
+      flags indicate which features your driver supports to the DRM
+      core, you need to set most of them prior to calling drm_init().  Some,
+      like DRIVER_MODESET can be set later based on user supplied parameters,
+      but that's the exception rather than the rule.
+    </para>
+    <variablelist>
+      <title>Driver flags</title>
+      <varlistentry>
+       <term>DRIVER_USE_AGP</term>
+       <listitem><para>
+           Driver uses AGP interface
+       </para></listitem>
+      </varlistentry>
+      <varlistentry>
+       <term>DRIVER_REQUIRE_AGP</term>
+       <listitem><para>
+           Driver needs AGP interface to function.
+       </para></listitem>
+      </varlistentry>
+      <varlistentry>
+       <term>DRIVER_USE_MTRR</term>
+       <listitem>
+         <para>
+           Driver uses MTRR interface for mapping memory.  Deprecated.
+         </para>
+       </listitem>
+      </varlistentry>
+      <varlistentry>
+       <term>DRIVER_PCI_DMA</term>
+       <listitem><para>
+           Driver is capable of PCI DMA.  Deprecated.
+       </para></listitem>
+      </varlistentry>
+      <varlistentry>
+       <term>DRIVER_SG</term>
+       <listitem><para>
+           Driver can perform scatter/gather DMA.  Deprecated.
+       </para></listitem>
+      </varlistentry>
+      <varlistentry>
+       <term>DRIVER_HAVE_DMA</term>
+       <listitem><para>Driver supports DMA.  Deprecated.</para></listitem>
+      </varlistentry>
+      <varlistentry>
+       <term>DRIVER_HAVE_IRQ</term><term>DRIVER_IRQ_SHARED</term>
+       <listitem>
+         <para>
+           DRIVER_HAVE_IRQ indicates whether the driver has a IRQ
+           handler, DRIVER_IRQ_SHARED indicates whether the device &amp;
+           handler support shared IRQs (note that this is required of
+           PCI drivers).
+         </para>
+       </listitem>
+      </varlistentry>
+      <varlistentry>
+       <term>DRIVER_DMA_QUEUE</term>
+       <listitem>
+         <para>
+           If the driver queues DMA requests and completes them
+           asynchronously, this flag should be set.  Deprecated.
+         </para>
+       </listitem>
+      </varlistentry>
+      <varlistentry>
+       <term>DRIVER_FB_DMA</term>
+       <listitem>
+         <para>
+           Driver supports DMA to/from the framebuffer.  Deprecated.
+         </para>
+       </listitem>
+      </varlistentry>
+      <varlistentry>
+       <term>DRIVER_MODESET</term>
+       <listitem>
+         <para>
+           Driver supports mode setting interfaces.
+         </para>
+       </listitem>
+      </varlistentry>
+    </variablelist>
+    <para>
+      In this specific case, the driver requires AGP and supports
+      IRQs.  DMA, as we'll see, is handled by device specific ioctls
+      in this case.  It also supports the kernel mode setting APIs, though
+      unlike in the actual i915 driver source, this example unconditionally
+      exports KMS capability.
+    </para>
+  </sect1>
+
+  <!-- Internals: driver load -->
+
+  <sect1>
+    <title>Driver load</title>
+    <para>
+      In the previous section, we saw what a typical drm_driver
+      structure might look like.  One of the more important fields in
+      the structure is the hook for the load function.
+    </para>
+    <programlisting>
+      static struct drm_driver driver = {
+       ...
+       .load = i915_driver_load,
+        ...
+      };
+    </programlisting>
+    <para>
+      The load function has many responsibilities: allocating a driver
+      private structure, specifying supported performance counters,
+      configuring the device (e.g. mapping registers &amp; command
+      buffers), initializing the memory manager, and setting up the
+      initial output configuration.
+    </para>
+    <para>
+      Note that the tasks performed at driver load time must not
+      conflict with DRM client requirements.  For instance, if user
+      level mode setting drivers are in use, it would be problematic
+      to perform output discovery &amp; configuration at load time.
+      Likewise, if pre-memory management aware user level drivers are
+      in use, memory management and command buffer setup may need to
+      be omitted.  These requirements are driver specific, and care
+      needs to be taken to keep both old and new applications and
+      libraries working.  The i915 driver supports the "modeset"
+      module parameter to control whether advanced features are
+      enabled at load time or in legacy fashion.  If compatibility is
+      a concern (e.g. with drivers converted over to the new interfaces
+      from the old ones), care must be taken to prevent incompatible
+      device initialization and control with the currently active
+      userspace drivers.
+    </para>
+
+    <sect2>
+      <title>Driver private &amp; performance counters</title>
+      <para>
+       The driver private hangs off the main drm_device structure and
+       can be used for tracking various device specific bits of
+       information, like register offsets, command buffer status,
+       register state for suspend/resume, etc.  At load time, a
+       driver can simply allocate one and set drm_device.dev_priv
+       appropriately; at unload the driver can free it and set
+       drm_device.dev_priv to NULL.
+      </para>
+      <para>
+       The DRM supports several counters which can be used for rough
+       performance characterization.  Note that the DRM stat counter
+       system is not often used by applications, and supporting
+       additional counters is completely optional.
+      </para>
+      <para>
+       These interfaces are deprecated and should not be used.  If performance
+       monitoring is desired, the developer should investigate and
+       potentially enhance the kernel perf and tracing infrastructure to export
+       GPU related performance information to performance monitoring
+       tools and applications.
+      </para>
+    </sect2>
+
+    <sect2>
+      <title>Configuring the device</title>
+      <para>
+       Obviously, device configuration will be device specific.
+       However, there are several common operations: finding a
+       device's PCI resources, mapping them, and potentially setting
+       up an IRQ handler.
+      </para>
+      <para>
+       Finding &amp; mapping resources is fairly straightforward.  The
+       DRM wrapper functions, drm_get_resource_start() and
+       drm_get_resource_len() can be used to find BARs on the given
+       drm_device struct.  Once those values have been retrieved, the
+       driver load function can call drm_addmap() to create a new
+       mapping for the BAR in question.  Note you'll probably want a
+       drm_local_map_t in your driver private structure to track any
+       mappings you create.
+<!-- !Fdrivers/gpu/drm/drm_bufs.c drm_get_resource_* -->
+<!-- !Finclude/drm/drmP.h drm_local_map_t -->
+      </para>
+      <para>
+       if compatibility with other operating systems isn't a concern
+       (DRM drivers can run under various BSD variants and OpenSolaris),
+       native Linux calls can be used for the above, e.g. pci_resource_*
+       and iomap*/iounmap.  See the Linux device driver book for more
+       info.
+      </para>
+      <para>
+       Once you have a register map, you can use the DRM_READn() and
+       DRM_WRITEn() macros to access the registers on your device, or
+       use driver specific versions to offset into your MMIO space
+       relative to a driver specific base pointer (see I915_READ for
+       example).
+      </para>
+      <para>
+       If your device supports interrupt generation, you may want to
+       setup an interrupt handler at driver load time as well.  This
+       is done using the drm_irq_install() function.  If your device
+       supports vertical blank interrupts, it should call
+       drm_vblank_init() to initialize the core vblank handling code before
+       enabling interrupts on your device.  This ensures the vblank related
+       structures are allocated and allows the core to handle vblank events.
+      </para>
+<!--!Fdrivers/char/drm/drm_irq.c drm_irq_install-->
+      <para>
+       Once your interrupt handler is registered (it'll use your
+       drm_driver.irq_handler as the actual interrupt handling
+       function), you can safely enable interrupts on your device,
+       assuming any other state your interrupt handler uses is also
+       initialized.
+      </para>
+      <para>
+       Another task that may be necessary during configuration is
+       mapping the video BIOS.  On many devices, the VBIOS describes
+       device configuration, LCD panel timings (if any), and contains
+       flags indicating device state.  Mapping the BIOS can be done
+       using the pci_map_rom() call, a convenience function that
+       takes care of mapping the actual ROM, whether it has been
+       shadowed into memory (typically at address 0xc0000) or exists
+       on the PCI device in the ROM BAR.  Note that once you've
+       mapped the ROM and extracted any necessary information, be
+       sure to unmap it; on many devices the ROM address decoder is
+       shared with other BARs, so leaving it mapped can cause
+       undesired behavior like hangs or memory corruption.
+<!--!Fdrivers/pci/rom.c pci_map_rom-->
+      </para>
+    </sect2>
+
+    <sect2>
+      <title>Memory manager initialization</title>
+      <para>
+       In order to allocate command buffers, cursor memory, scanout
+       buffers, etc., as well as support the latest features provided
+       by packages like Mesa and the X.Org X server, your driver
+       should support a memory manager.
+      </para>
+      <para>
+       If your driver supports memory management (it should!), you'll
+       need to set that up at load time as well.  How you intialize
+       it depends on which memory manager you're using, TTM or GEM.
+      </para>
+      <sect3>
+       <title>TTM initialization</title>
+       <para>
+         TTM (for Translation Table Manager) manages video memory and
+         aperture space for graphics devices. TTM supports both UMA devices
+         and devices with dedicated video RAM (VRAM), i.e. most discrete
+         graphics devices.  If your device has dedicated RAM, supporting
+         TTM is desireable.  TTM also integrates tightly with your
+         driver specific buffer execution function.  See the radeon
+         driver for examples.
+       </para>
+       <para>
+         The core TTM structure is the ttm_bo_driver struct.  It contains
+         several fields with function pointers for initializing the TTM,
+         allocating and freeing memory, waiting for command completion
+         and fence synchronization, and memory migration.  See the
+         radeon_ttm.c file for an example of usage.
+       </para>
+       <para>
+         The ttm_global_reference structure is made up of several fields:
+       </para>
+       <programlisting>
+         struct ttm_global_reference {
+               enum ttm_global_types global_type;
+               size_t size;
+               void *object;
+               int (*init) (struct ttm_global_reference *);
+               void (*release) (struct ttm_global_reference *);
+         };
+       </programlisting>
+       <para>
+         There should be one global reference structure for your memory
+         manager as a whole, and there will be others for each object
+         created by the memory manager at runtime.  Your global TTM should
+         have a type of TTM_GLOBAL_TTM_MEM.  The size field for the global
+         object should be sizeof(struct ttm_mem_global), and the init and
+         release hooks should point at your driver specific init and
+         release routines, which will probably eventually call
+         ttm_mem_global_init and ttm_mem_global_release respectively.
+       </para>
+       <para>
+         Once your global TTM accounting structure is set up and initialized
+         (done by calling ttm_global_item_ref on the global object you
+         just created), you'll need to create a buffer object TTM to
+         provide a pool for buffer object allocation by clients and the
+         kernel itself.  The type of this object should be TTM_GLOBAL_TTM_BO,
+         and its size should be sizeof(struct ttm_bo_global).  Again,
+         driver specific init and release functions can be provided,
+         likely eventually calling ttm_bo_global_init and
+         ttm_bo_global_release, respectively.  Also like the previous
+         object, ttm_global_item_ref is used to create an initial reference
+         count for the TTM, which will call your initalization function.
+       </para>
+      </sect3>
+      <sect3>
+       <title>GEM initialization</title>
+       <para>
+         GEM is an alternative to TTM, designed specifically for UMA
+         devices.  It has simpler initialization and execution requirements
+         than TTM, but has no VRAM management capability.  Core GEM
+         initialization is comprised of a basic drm_mm_init call to create
+         a GTT DRM MM object, which provides an address space pool for
+         object allocation.  In a KMS configuration, the driver will
+         need to allocate and initialize a command ring buffer following
+         basic GEM initialization.  Most UMA devices have a so-called
+         "stolen" memory region, which provides space for the initial
+         framebuffer and large, contiguous memory regions required by the
+         device.  This space is not typically managed by GEM, and must
+         be initialized separately into its own DRM MM object.
+       </para>
+       <para>
+         Initialization will be driver specific, and will depend on
+         the architecture of the device.  In the case of Intel
+         integrated graphics chips like 965GM, GEM initialization can
+         be done by calling the internal GEM init function,
+         i915_gem_do_init().  Since the 965GM is a UMA device
+         (i.e. it doesn't have dedicated VRAM), GEM will manage
+         making regular RAM available for GPU operations.  Memory set
+         aside by the BIOS (called "stolen" memory by the i915
+         driver) will be managed by the DRM memrange allocator; the
+         rest of the aperture will be managed by GEM.
+         <programlisting>
+           /* Basic memrange allocator for stolen space (aka vram) */
+           drm_memrange_init(&amp;dev_priv->vram, 0, prealloc_size);
+           /* Let GEM Manage from end of prealloc space to end of aperture */
+           i915_gem_do_init(dev, prealloc_size, agp_size);
+         </programlisting>
+<!--!Edrivers/char/drm/drm_memrange.c-->
+       </para>
+       <para>
+         Once the memory manager has been set up, we can allocate the
+         command buffer.  In the i915 case, this is also done with a
+         GEM function, i915_gem_init_ringbuffer().
+       </para>
+      </sect3>
+    </sect2>
+
+    <sect2>
+      <title>Output configuration</title>
+      <para>
+       The final initialization task is output configuration.  This involves
+       finding and initializing the CRTCs, encoders and connectors
+       for your device, creating an initial configuration and
+       registering a framebuffer console driver.
+      </para>
+      <sect3>
+       <title>Output discovery and initialization</title>
+       <para>
+         Several core functions exist to create CRTCs, encoders and
+         connectors, namely drm_crtc_init(), drm_connector_init() and
+         drm_encoder_init(), along with several "helper" functions to
+         perform common tasks.
+       </para>
+       <para>
+         Connectors should be registered with sysfs once they've been
+         detected and initialized, using the
+         drm_sysfs_connector_add() function.  Likewise, when they're
+         removed from the system, they should be destroyed with
+         drm_sysfs_connector_remove().
+       </para>
+       <programlisting>
+<![CDATA[
+void intel_crt_init(struct drm_device *dev)
+{
+       struct drm_connector *connector;
+       struct intel_output *intel_output;
+
+       intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
+       if (!intel_output)
+               return;
+
+       connector = &intel_output->base;
+       drm_connector_init(dev, &intel_output->base,
+                          &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+       drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs,
+                        DRM_MODE_ENCODER_DAC);
+
+       drm_mode_connector_attach_encoder(&intel_output->base,
+                                         &intel_output->enc);
+
+       /* Set up the DDC bus. */
+       intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A");
+       if (!intel_output->ddc_bus) {
+               dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
+                          "failed.\n");
+               return;
+       }
+
+       intel_output->type = INTEL_OUTPUT_ANALOG;
+       connector->interlace_allowed = 0;
+       connector->doublescan_allowed = 0;
+
+       drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs);
+       drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
+
+       drm_sysfs_connector_add(connector);
+}
+]]>
+       </programlisting>
+       <para>
+         In the example above (again, taken from the i915 driver), a
+         CRT connector and encoder combination is created.  A device
+         specific i2c bus is also created, for fetching EDID data and
+         performing monitor detection.  Once the process is complete,
+         the new connector is regsitered with sysfs, to make its
+         properties available to applications.
+       </para>
+       <sect4>
+         <title>Helper functions and core functions</title>
+         <para>
+           Since many PC-class graphics devices have similar display output
+           designs, the DRM provides a set of helper functions to make
+           output management easier.  The core helper routines handle
+           encoder re-routing and disabling of unused functions following
+           mode set.  Using the helpers is optional, but recommended for
+           devices with PC-style architectures (i.e. a set of display planes
+           for feeding pixels to encoders which are in turn routed to
+           connectors).  Devices with more complex requirements needing
+           finer grained management can opt to use the core callbacks
+           directly.
+         </para>
+         <para>
+           [Insert typical diagram here.]  [Insert OMAP style config here.]
+         </para>
+       </sect4>
+       <para>
+         For each encoder, CRTC and connector, several functions must
+         be provided, depending on the object type.  Encoder objects
+         need should provide a DPMS (basically on/off) function, mode fixup
+         (for converting requested modes into native hardware timings),
+         and prepare, set and commit functions for use by the core DRM
+         helper functions.  Connector helpers need to provide mode fetch and
+         validity functions as well as an encoder matching function for
+         returing an ideal encoder for a given connector.  The core
+         connector functions include a DPMS callback, (deprecated)
+         save/restore routines, detection, mode probing, property handling,
+         and cleanup functions.
+       </para>
+<!--!Edrivers/char/drm/drm_crtc.h-->
+<!--!Edrivers/char/drm/drm_crtc.c-->
+<!--!Edrivers/char/drm/drm_crtc_helper.c-->
+      </sect3>
+    </sect2>
+  </sect1>
+
+  <!-- Internals: vblank handling -->
+
+  <sect1>
+    <title>VBlank event handling</title>
+    <para>
+      The DRM core exposes two vertical blank related ioctls:
+      DRM_IOCTL_WAIT_VBLANK and DRM_IOCTL_MODESET_CTL.
+<!--!Edrivers/char/drm/drm_irq.c-->
+    </para>
+    <para>
+      DRM_IOCTL_WAIT_VBLANK takes a struct drm_wait_vblank structure
+      as its argument, and is used to block or request a signal when a
+      specified vblank event occurs.
+    </para>
+    <para>
+      DRM_IOCTL_MODESET_CTL should be called by application level
+      drivers before and after mode setting, since on many devices the
+      vertical blank counter will be reset at that time.  Internally,
+      the DRM snapshots the last vblank count when the ioctl is called
+      with the _DRM_PRE_MODESET command so that the counter won't go
+      backwards (which is dealt with when _DRM_POST_MODESET is used).
+    </para>
+    <para>
+      To support the functions above, the DRM core provides several
+      helper functions for tracking vertical blank counters, and
+      requires drivers to provide several callbacks:
+      get_vblank_counter(), enable_vblank() and disable_vblank().  The
+      core uses get_vblank_counter() to keep the counter accurate
+      across interrupt disable periods.  It should return the current
+      vertical blank event count, which is often tracked in a device
+      register.  The enable and disable vblank callbacks should enable
+      and disable vertical blank interrupts, respectively.  In the
+      absence of DRM clients waiting on vblank events, the core DRM
+      code will use the disable_vblank() function to disable
+      interrupts, which saves power.  They'll be re-enabled again when
+      a client calls the vblank wait ioctl above.
+    </para>
+    <para>
+      Devices that don't provide a count register can simply use an
+      internal atomic counter incremented on every vertical blank
+      interrupt, and can make their enable and disable vblank
+      functions into no-ops.
+    </para>
+  </sect1>
+
+  <sect1>
+    <title>Memory management</title>
+    <para>
+      The memory manager lies at the heart of many DRM operations, and
+      is also required to support advanced client features like OpenGL
+      pbuffers.  The DRM currently contains two memory managers, TTM
+      and GEM.
+    </para>
+
+    <sect2>
+      <title>The Translation Table Manager (TTM)</title>
+      <para>
+       TTM was developed by Tungsten Graphics, primarily by Thomas
+       Hellström, and is intended to be a flexible, high performance
+       graphics memory manager.
+      </para>
+      <para>
+       Drivers wishing to support TTM must fill out a drm_bo_driver
+       structure.
+      </para>
+      <para>
+       TTM design background and information belongs here.
+      </para>
+    </sect2>
+
+    <sect2>
+      <title>The Graphics Execution Manager (GEM)</title>
+      <para>
+       GEM is an Intel project, authored by Eric Anholt and Keith
+       Packard.  It provides simpler interfaces than TTM, and is well
+       suited for UMA devices.
+      </para>
+      <para>
+       GEM-enabled drivers must provide gem_init_object() and
+       gem_free_object() callbacks to support the core memory
+       allocation routines.  They should also provide several driver
+       specific ioctls to support command execution, pinning, buffer
+       read &amp; write, mapping, and domain ownership transfers.
+      </para>
+      <para>
+       On a fundamental level, GEM involves several operations: memory
+       allocation and freeing, command execution, and aperture management
+       at command execution time.  Buffer object allocation is relatively
+       straightforward and largely provided by Linux's shmem layer, which
+       provides memory to back each object.  When mapped into the GTT
+       or used in a command buffer, the backing pages for an object are
+       flushed to memory and marked write combined so as to be coherent
+       with the GPU.  Likewise, when the GPU finishes rendering to an object,
+       if the CPU accesses it, it must be made coherent with the CPU's view
+       of memory, usually involving GPU cache flushing of various kinds.
+       This core CPU&lt;-&gt;GPU coherency management is provided by the GEM
+       set domain function, which evaluates an object's current domain and
+       performs any necessary flushing or synchronization to put the object
+       into the desired coherency domain (note that the object may be busy,
+       i.e. an active render target; in that case the set domain function
+       will block the client and wait for rendering to complete before
+       performing any necessary flushing operations).
+      </para>
+      <para>
+       Perhaps the most important GEM function is providing a command
+       execution interface to clients.  Client programs construct command
+       buffers containing references to previously allocated memory objects
+       and submit them to GEM.  At that point, GEM will take care to bind
+       all the objects into the GTT, execute the buffer, and provide
+       necessary synchronization between clients accessing the same buffers.
+       This often involves evicting some objects from the GTT and re-binding
+       others (a fairly expensive operation), and providing relocation
+       support which hides fixed GTT offsets from clients.  Clients must
+       take care not to submit command buffers that reference more objects
+       than can fit in the GTT or GEM will reject them and no rendering
+       will occur.  Similarly, if several objects in the buffer require
+       fence registers to be allocated for correct rendering (e.g. 2D blits
+       on pre-965 chips), care must be taken not to require more fence
+       registers than are available to the client.  Such resource management
+       should be abstracted from the client in libdrm.
+      </para>
+    </sect2>
+
+  </sect1>
+
+  <!-- Output management -->
+  <sect1>
+    <title>Output management</title>
+    <para>
+      At the core of the DRM output management code is a set of
+      structures representing CRTCs, encoders and connectors.
+    </para>
+    <para>
+      A CRTC is an abstraction representing a part of the chip that
+      contains a pointer to a scanout buffer.  Therefore, the number
+      of CRTCs available determines how many independent scanout
+      buffers can be active at any given time.  The CRTC structure
+      contains several fields to support this: a pointer to some video
+      memory, a display mode, and an (x, y) offset into the video
+      memory to support panning or configurations where one piece of
+      video memory spans multiple CRTCs.
+    </para>
+    <para>
+      An encoder takes pixel data from a CRTC and converts it to a
+      format suitable for any attached connectors.  On some devices,
+      it may be possible to have a CRTC send data to more than one
+      encoder.  In that case, both encoders would receive data from
+      the same scanout buffer, resulting in a "cloned" display
+      configuration across the connectors attached to each encoder.
+    </para>
+    <para>
+      A connector is the final destination for pixel data on a device,
+      and usually connects directly to an external display device like
+      a monitor or laptop panel.  A connector can only be attached to
+      one encoder at a time.  The connector is also the structure
+      where information about the attached display is kept, so it
+      contains fields for display data, EDID data, DPMS &amp;
+      connection status, and information about modes supported on the
+      attached displays.
+    </para>
+<!--!Edrivers/char/drm/drm_crtc.c-->
+  </sect1>
+
+  <sect1>
+    <title>Framebuffer management</title>
+    <para>
+      In order to set a mode on a given CRTC, encoder and connector
+      configuration, clients need to provide a framebuffer object which
+      will provide a source of pixels for the CRTC to deliver to the encoder(s)
+      and ultimately the connector(s) in the configuration.  A framebuffer
+      is fundamentally a driver specific memory object, made into an opaque
+      handle by the DRM addfb function.  Once an fb has been created this
+      way it can be passed to the KMS mode setting routines for use in
+      a configuration.
+    </para>
+  </sect1>
+
+  <sect1>
+    <title>Command submission &amp; fencing</title>
+    <para>
+      This should cover a few device specific command submission
+      implementations.
+    </para>
+  </sect1>
+
+  <sect1>
+    <title>Suspend/resume</title>
+    <para>
+      The DRM core provides some suspend/resume code, but drivers
+      wanting full suspend/resume support should provide save() and
+      restore() functions.  These will be called at suspend,
+      hibernate, or resume time, and should perform any state save or
+      restore required by your device across suspend or hibernate
+      states.
+    </para>
+  </sect1>
+
+  <sect1>
+    <title>DMA services</title>
+    <para>
+      This should cover how DMA mapping etc. is supported by the core.
+      These functions are deprecated and should not be used.
+    </para>
+  </sect1>
+  </chapter>
+
+  <!-- External interfaces -->
+
+  <chapter id="drmExternals">
+    <title>Userland interfaces</title>
+    <para>
+      The DRM core exports several interfaces to applications,
+      generally intended to be used through corresponding libdrm
+      wrapper functions.  In addition, drivers export device specific
+      interfaces for use by userspace drivers &amp; device aware
+      applications through ioctls and sysfs files.
+    </para>
+    <para>
+      External interfaces include: memory mapping, context management,
+      DMA operations, AGP management, vblank control, fence
+      management, memory management, and output management.
+    </para>
+    <para>
+      Cover generic ioctls and sysfs layout here.  Only need high
+      level info, since man pages will cover the rest.
+    </para>
+  </chapter>
+
+  <!-- API reference -->
+
+  <appendix id="drmDriverApi">
+    <title>DRM Driver API</title>
+    <para>
+      Include auto-generated API reference here (need to reference it
+      from paragraphs above too).
+    </para>
+  </appendix>
+
+</book>
index 5cff41a..55f12ac 100644 (file)
@@ -4,7 +4,7 @@
 
 <book id="kgdbOnLinux">
  <bookinfo>
-  <title>Using kgdb and the kgdb Internals</title>
+  <title>Using kgdb, kdb and the kernel debugger internals</title>
 
   <authorgroup>
    <author>
     </affiliation>
    </author>
   </authorgroup>
-
-  <authorgroup>
-   <author>
-    <firstname>Tom</firstname>
-    <surname>Rini</surname>
-    <affiliation>
-     <address>
-      <email>trini@kernel.crashing.org</email>
-     </address>
-    </affiliation>
-   </author>
-  </authorgroup>
-
-  <authorgroup>
-   <author>
-    <firstname>Amit S.</firstname>
-    <surname>Kale</surname>
-    <affiliation>
-     <address>
-      <email>amitkale@linsyssoft.com</email>
-     </address>
-    </affiliation>
-   </author>
-  </authorgroup>
-
   <copyright>
-   <year>2008</year>
+   <year>2008,2010</year>
    <holder>Wind River Systems, Inc.</holder>
   </copyright>
   <copyright>
   <chapter id="Introduction">
     <title>Introduction</title>
     <para>
-    kgdb is a source level debugger for linux kernel. It is used along
-    with gdb to debug a linux kernel.  The expectation is that gdb can
-    be used to "break in" to the kernel to inspect memory, variables
-    and look through call stack information similar to what an
-    application developer would use gdb for.  It is possible to place
-    breakpoints in kernel code and perform some limited execution
-    stepping.
+    The kernel has two different debugger front ends (kdb and kgdb)
+    which interface to the debug core.  It is possible to use either
+    of the debugger front ends and dynamically transition between them
+    if you configure the kernel properly at compile and runtime.
+    </para>
+    <para>
+    Kdb is simplistic shell-style interface which you can use on a
+    system console with a keyboard or serial console.  You can use it
+    to inspect memory, registers, process lists, dmesg, and even set
+    breakpoints to stop in a certain location.  Kdb is not a source
+    level debugger, although you can set breakpoints and execute some
+    basic kernel run control.  Kdb is mainly aimed at doing some
+    analysis to aid in development or diagnosing kernel problems.  You
+    can access some symbols by name in kernel built-ins or in kernel
+    modules if the code was built
+    with <symbol>CONFIG_KALLSYMS</symbol>.
+    </para>
+    <para>
+    Kgdb is intended to be used as a source level debugger for the
+    Linux kernel. It is used along with gdb to debug a Linux kernel.
+    The expectation is that gdb can be used to "break in" to the
+    kernel to inspect memory, variables and look through call stack
+    information similar to the way an application developer would use
+    gdb to debug an application.  It is possible to place breakpoints
+    in kernel code and perform some limited execution stepping.
     </para>
     <para>
-    Two machines are required for using kgdb. One of these machines is a
-    development machine and the other is a test machine.  The kernel
-    to be debugged runs on the test machine. The development machine
-    runs an instance of gdb against the vmlinux file which contains
-    the symbols (not boot image such as bzImage, zImage, uImage...).
-    In gdb the developer specifies the connection parameters and
-    connects to kgdb.  The type of connection a developer makes with
-    gdb depends on the availability of kgdb I/O modules compiled as
-    builtin's or kernel modules in the test machine's kernel.
+    Two machines are required for using kgdb. One of these machines is
+    a development machine and the other is the target machine.  The
+    kernel to be debugged runs on the target machine. The development
+    machine runs an instance of gdb against the vmlinux file which
+    contains the symbols (not boot image such as bzImage, zImage,
+    uImage...).  In gdb the developer specifies the connection
+    parameters and connects to kgdb.  The type of connection a
+    developer makes with gdb depends on the availability of kgdb I/O
+    modules compiled as built-ins or loadable kernel modules in the test
+    machine's kernel.
     </para>
   </chapter>
   <chapter id="CompilingAKernel">
-    <title>Compiling a kernel</title>
+  <title>Compiling a kernel</title>
+  <para>
+  <itemizedlist>
+  <listitem><para>In order to enable compilation of kdb, you must first enable kgdb.</para></listitem>
+  <listitem><para>The kgdb test compile options are described in the kgdb test suite chapter.</para></listitem>
+  </itemizedlist>
+  </para>
+  <sect1 id="CompileKGDB">
+    <title>Kernel config options for kgdb</title>
     <para>
     To enable <symbol>CONFIG_KGDB</symbol> you should first turn on
     "Prompt for development and/or incomplete code/drivers"
     (CONFIG_EXPERIMENTAL) in  "General setup", then under the
-    "Kernel debugging" select "KGDB: kernel debugging with remote gdb".
+    "Kernel debugging" select "KGDB: kernel debugger".
+    </para>
+    <para>
+    While it is not a hard requirement that you have symbols in your
+    vmlinux file, gdb tends not to be very useful without the symbolic
+    data, so you will want to turn
+    on <symbol>CONFIG_DEBUG_INFO</symbol> which is called "Compile the
+    kernel with debug info" in the config menu.
     </para>
     <para>
     It is advised, but not required that you turn on the
-    CONFIG_FRAME_POINTER kernel option.  This option inserts code to
-    into the compiled executable which saves the frame information in
-    registers or on the stack at different points which will allow a
-    debugger such as gdb to more accurately construct stack back traces
-    while debugging the kernel.
+    <symbol>CONFIG_FRAME_POINTER</symbol> kernel option which is called "Compile the
+    kernel with frame pointers" in the config menu.  This option
+    inserts code to into the compiled executable which saves the frame
+    information in registers or on the stack at different points which
+    allows a debugger such as gdb to more accurately construct
+    stack back traces while debugging the kernel.
     </para>
     <para>
     If the architecture that you are using supports the kernel option
     this option.
     </para>
     <para>
-    Next you should choose one of more I/O drivers to interconnect debugging
-    host and debugged target.  Early boot debugging requires a KGDB
-    I/O driver that supports early debugging and the driver must be
-    built into the kernel directly. Kgdb I/O driver configuration
-    takes place via kernel or module parameters, see following
-    chapter.
+    Next you should choose one of more I/O drivers to interconnect
+    debugging host and debugged target.  Early boot debugging requires
+    a KGDB I/O driver that supports early debugging and the driver
+    must be built into the kernel directly. Kgdb I/O driver
+    configuration takes place via kernel or module parameters which
+    you can learn more about in the in the section that describes the
+    parameter "kgdboc".
     </para>
-    <para>
-    The kgdb test compile options are described in the kgdb test suite chapter.
+    <para>Here is an example set of .config symbols to enable or
+    disable for kgdb:
+    <itemizedlist>
+    <listitem><para># CONFIG_DEBUG_RODATA is not set</para></listitem>
+    <listitem><para>CONFIG_FRAME_POINTER=y</para></listitem>
+    <listitem><para>CONFIG_KGDB=y</para></listitem>
+    <listitem><para>CONFIG_KGDB_SERIAL_CONSOLE=y</para></listitem>
+    </itemizedlist>
     </para>
-
+  </sect1>
+  <sect1 id="CompileKDB">
+    <title>Kernel config options for kdb</title>
+    <para>Kdb is quite a bit more complex than the simple gdbstub
+    sitting on top of the kernel's debug core.  Kdb must implement a
+    shell, and also adds some helper functions in other parts of the
+    kernel, responsible for printing out interesting data such as what
+    you would see if you ran "lsmod", or "ps".  In order to build kdb
+    into the kernel you follow the same steps as you would for kgdb.
+    </para>
+    <para>The main config option for kdb
+    is <symbol>CONFIG_KGDB_KDB</symbol> which is called "KGDB_KDB:
+    include kdb frontend for kgdb" in the config menu.  In theory you
+    would have already also selected an I/O driver such as the
+    CONFIG_KGDB_SERIAL_CONSOLE interface if you plan on using kdb on a
+    serial port, when you were configuring kgdb.
+    </para>
+    <para>If you want to use a PS/2-style keyboard with kdb, you would
+    select CONFIG_KDB_KEYBOARD which is called "KGDB_KDB: keyboard as
+    input device" in the config menu.  The CONFIG_KDB_KEYBOARD option
+    is not used for anything in the gdb interface to kgdb.  The
+    CONFIG_KDB_KEYBOARD option only works with kdb.
+    </para>
+    <para>Here is an example set of .config symbols to enable/disable kdb:
+    <itemizedlist>
+    <listitem><para># CONFIG_DEBUG_RODATA is not set</para></listitem>
+    <listitem><para>CONFIG_FRAME_POINTER=y</para></listitem>
+    <listitem><para>CONFIG_KGDB=y</para></listitem>
+    <listitem><para>CONFIG_KGDB_SERIAL_CONSOLE=y</para></listitem>
+    <listitem><para>CONFIG_KGDB_KDB=y</para></listitem>
+    <listitem><para>CONFIG_KDB_KEYBOARD=y</para></listitem>
+    </itemizedlist>
+    </para>
+  </sect1>
   </chapter>
-  <chapter id="EnableKGDB">
-   <title>Enable kgdb for debugging</title>
-   <para>
-   In order to use kgdb you must activate it by passing configuration
-   information to one of the kgdb I/O drivers.  If you do not pass any
-   configuration information kgdb will not do anything at all.  Kgdb
-   will only actively hook up to the kernel trap hooks if a kgdb I/O
-   driver is loaded and configured.  If you unconfigure a kgdb I/O
-   driver, kgdb will unregister all the kernel hook points.
+  <chapter id="kgdbKernelArgs">
+  <title>Kernel Debugger Boot Arguments</title>
+  <para>This section describes the various runtime kernel
+  parameters that affect the configuration of the kernel debugger.
+  The following chapter covers using kdb and kgdb as well as
+  provides some examples of the configuration parameters.</para>
+   <sect1 id="kgdboc">
+   <title>Kernel parameter: kgdboc</title>
+   <para>The kgdboc driver was originally an abbreviation meant to
+   stand for "kgdb over console".  Today it is the primary mechanism
+   to configure how to communicate from gdb to kgdb as well as the
+   devices you want to use to interact with the kdb shell.
+   </para>
+   <para>For kgdb/gdb, kgdboc is designed to work with a single serial
+   port. It is intended to cover the circumstance where you want to
+   use a serial console as your primary console as well as using it to
+   perform kernel debugging.  It is also possible to use kgdb on a
+   serial port which is not designated as a system console.  Kgdboc
+   may be configured as a kernel built-in or a kernel loadable module.
+   You can only make use of <constant>kgdbwait</constant> and early
+   debugging if you build kgdboc into the kernel as a built-in.
    </para>
+   <sect2 id="kgdbocArgs">
+   <title>kgdboc arguments</title>
+   <para>Usage: <constant>kgdboc=[kbd][[,]serial_device][,baud]</constant></para>
+   <sect3 id="kgdbocArgs1">
+   <title>Using loadable module or built-in</title>
    <para>
-   All drivers can be reconfigured at run time, if
-   <symbol>CONFIG_SYSFS</symbol> and <symbol>CONFIG_MODULES</symbol>
-   are enabled, by echo'ing a new config string to
-   <constant>/sys/module/&lt;driver&gt;/parameter/&lt;option&gt;</constant>.
-   The driver can be unconfigured by passing an empty string.  You cannot
-   change the configuration while the debugger is attached.  Make sure
-   to detach the debugger with the <constant>detach</constant> command
-   prior to trying unconfigure a kgdb I/O driver.
+   <orderedlist>
+   <listitem><para>As a kernel built-in:</para>
+   <para>Use the kernel boot argument: <constant>kgdboc=&lt;tty-device&gt;,[baud]</constant></para></listitem>
+   <listitem>
+   <para>As a kernel loadable module:</para>
+   <para>Use the command: <constant>modprobe kgdboc kgdboc=&lt;tty-device&gt;,[baud]</constant></para>
+   <para>Here are two examples of how you might formate the kgdboc
+   string. The first is for an x86 target using the first serial port.
+   The second example is for the ARM Versatile AB using the second
+   serial port.
+   <orderedlist>
+   <listitem><para><constant>kgdboc=ttyS0,115200</constant></para></listitem>
+   <listitem><para><constant>kgdboc=ttyAMA1,115200</constant></para></listitem>
+   </orderedlist>
    </para>
+   </listitem>
+   </orderedlist></para>
+   </sect3>
+   <sect3 id="kgdbocArgs2">
+   <title>Configure kgdboc at runtime with sysfs</title>
+   <para>At run time you can enable or disable kgdboc by echoing a
+   parameters into the sysfs.  Here are two examples:</para>
+   <orderedlist>
+   <listitem><para>Enable kgdboc on ttyS0</para>
+   <para><constant>echo ttyS0 &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
+   <listitem><para>Disable kgdboc</para>
+   <para><constant>echo "" &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
+   </orderedlist>
+   <para>NOTE: You do not need to specify the baud if you are
+   configuring the console on tty which is already configured or
+   open.</para>
+   </sect3>
+   <sect3 id="kgdbocArgs3">
+   <title>More examples</title>
+   <para>You can configure kgdboc to use the keyboard, and or a serial device
+   depending on if you are using kdb and or kgdb, in one of the
+   following scenarios.
+   <orderedlist>
+   <listitem><para>kdb and kgdb over only a serial port</para>
+   <para><constant>kgdboc=&lt;serial_device&gt;[,baud]</constant></para>
+   <para>Example: <constant>kgdboc=ttyS0,115200</constant></para>
+   </listitem>
+   <listitem><para>kdb and kgdb with keyboard and a serial port</para>
+   <para><constant>kgdboc=kbd,&lt;serial_device&gt;[,baud]</constant></para>
+   <para>Example: <constant>kgdboc=kbd,ttyS0,115200</constant></para>
+   </listitem>
+   <listitem><para>kdb with a keyboard</para>
+   <para><constant>kgdboc=kbd</constant></para>
+   </listitem>
+   </orderedlist>
+   </para>
+   </sect3>
+   <para>NOTE: Kgdboc does not support interrupting the target via the
+   gdb remote protocol.  You must manually send a sysrq-g unless you
+   have a proxy that splits console output to a terminal program.
+   A console proxy has a separate TCP port for the debugger and a separate
+   TCP port for the "human" console.  The proxy can take care of sending
+   the sysrq-g for you.
+   </para>
+   <para>When using kgdboc with no debugger proxy, you can end up
+    connecting the debugger at one of two entry points.  If an
+    exception occurs after you have loaded kgdboc, a message should
+    print on the console stating it is waiting for the debugger.  In
+    this case you disconnect your terminal program and then connect the
+    debugger in its place.  If you want to interrupt the target system
+    and forcibly enter a debug session you have to issue a Sysrq
+    sequence and then type the letter <constant>g</constant>.  Then
+    you disconnect the terminal session and connect gdb.  Your options
+    if you don't like this are to hack gdb to send the sysrq-g for you
+    as well as on the initial connect, or to use a debugger proxy that
+    allows an unmodified gdb to do the debugging.
+   </para>
+   </sect2>
+   </sect1>
    <sect1 id="kgdbwait">
    <title>Kernel parameter: kgdbwait</title>
    <para>
    </para>
    <para>
    The kernel will stop and wait as early as the I/O driver and
-   architecture will allow when you use this option.  If you build the
-   kgdb I/O driver as a kernel module kgdbwait will not do anything.
+   architecture allows when you use this option.  If you build the
+   kgdb I/O driver as a loadable kernel module kgdbwait will not do
+   anything.
    </para>
    </sect1>
-  <sect1 id="kgdboc">
-  <title>Kernel parameter: kgdboc</title>
-  <para>
-  The kgdboc driver was originally an abbreviation meant to stand for
-  "kgdb over console".  Kgdboc is designed to work with a single
-  serial port. It was meant to cover the circumstance
-  where you wanted to use a serial console as your primary console as
-  well as using it to perform kernel debugging.  Of course you can
-  also use kgdboc without assigning a console to the same port.
+   <sect1 id="kgdbcon">
+   <title>Kernel parameter: kgdbcon</title>
+   <para> The kgdbcon feature allows you to see printk() messages
+   inside gdb while gdb is connected to the kernel.  Kdb does not make
+    use of the kgdbcon feature.
+   </para>
+   <para>Kgdb supports using the gdb serial protocol to send console
+   messages to the debugger when the debugger is connected and running.
+   There are two ways to activate this feature.
+   <orderedlist>
+   <listitem><para>Activate with the kernel command line option:</para>
+   <para><constant>kgdbcon</constant></para>
+   </listitem>
+   <listitem><para>Use sysfs before configuring an I/O driver</para>
+   <para>
+   <constant>echo 1 &gt; /sys/module/kgdb/parameters/kgdb_use_con</constant>
+   </para>
+   <para>
+   NOTE: If you do this after you configure the kgdb I/O driver, the
+   setting will not take effect until the next point the I/O is
+   reconfigured.
+   </para>
+   </listitem>
+   </orderedlist>
+   <para>IMPORTANT NOTE: You cannot use kgdboc + kgdbcon on a tty that is an
+   active system console.  An example incorrect usage is <constant>console=ttyS0,115200 kgdboc=ttyS0 kgdbcon</constant>
+   </para>
+   <para>It is possible to use this option with kgdboc on a tty that is not a system console.
+   </para>
   </para>
-  <sect2 id="UsingKgdboc">
-  <title>Using kgdboc</title>
-  <para>
-  You can configure kgdboc via sysfs or a module or kernel boot line
-  parameter depending on if you build with CONFIG_KGDBOC as a module
-  or built-in.
-  <orderedlist>
-  <listitem><para>From the module load or build-in</para>
-  <para><constant>kgdboc=&lt;tty-device&gt;,[baud]</constant></para>
+  </sect1>
+  </chapter>
+  <chapter id="usingKDB">
+  <title>Using kdb</title>
   <para>
-  The example here would be if your console port was typically ttyS0, you would use something like <constant>kgdboc=ttyS0,115200</constant> or on the ARM Versatile AB you would likely use <constant>kgdboc=ttyAMA0,115200</constant>
+  </para>
+  <sect1 id="quickKDBserial">
+  <title>Quick start for kdb on a serial port</title>
+  <para>This is a quick example of how to use kdb.</para>
+  <para><orderedlist>
+  <listitem><para>Boot kernel with arguments:
+  <itemizedlist>
+  <listitem><para><constant>console=ttyS0,115200 kgdboc=ttyS0,115200</constant></para></listitem>
+  </itemizedlist></para>
+  <para>OR</para>
+  <para>Configure kgdboc after the kernel booted; assuming you are using a serial port console:
+  <itemizedlist>
+  <listitem><para><constant>echo ttyS0 &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
+  </itemizedlist>
   </para>
   </listitem>
-  <listitem><para>From sysfs</para>
-  <para><constant>echo ttyS0 &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para>
+  <listitem><para>Enter the kernel debugger manually or by waiting for an oops or fault.  There are several ways you can enter the kernel debugger manually; all involve using the sysrq-g, which means you must have enabled CONFIG_MAGIC_SYSRQ=y in your kernel config.</para>
+  <itemizedlist>
+  <listitem><para>When logged in as root or with a super user session you can run:</para>
+   <para><constant>echo g &gt; /proc/sysrq-trigger</constant></para></listitem>
+  <listitem><para>Example using minicom 2.2</para>
+  <para>Press: <constant>Control-a</constant></para>
+  <para>Press: <constant>f</constant></para>
+  <para>Press: <constant>g</constant></para>
   </listitem>
-  </orderedlist>
-  </para>
-  <para>
-  NOTE: Kgdboc does not support interrupting the target via the
-  gdb remote protocol.  You must manually send a sysrq-g unless you
-  have a proxy that splits console output to a terminal problem and
-  has a separate port for the debugger to connect to that sends the
-  sysrq-g for you.
+  <listitem><para>When you have telneted to a terminal server that supports sending a remote break</para>
+  <para>Press: <constant>Control-]</constant></para>
+  <para>Type in:<constant>send break</constant></para>
+  <para>Press: <constant>Enter</constant></para>
+  <para>Press: <constant>g</constant></para>
+  </listitem>
+  </itemizedlist>
+  </listitem>
+  <listitem><para>From the kdb prompt you can run the "help" command to see a complete list of the commands that are available.</para>
+  <para>Some useful commands in kdb include:
+  <itemizedlist>
+  <listitem><para>lsmod  -- Shows where kernel modules are loaded</para></listitem>
+  <listitem><para>ps -- Displays only the active processes</para></listitem>
+  <listitem><para>ps A -- Shows all the processes</para></listitem>
+  <listitem><para>summary -- Shows kernel version info and memory usage</para></listitem>
+  <listitem><para>bt -- Get a backtrace of the current process using dump_stack()</para></listitem>
+  <listitem><para>dmesg -- View the kernel syslog buffer</para></listitem>
+  <listitem><para>go -- Continue the system</para></listitem>
+  </itemizedlist>
   </para>
-  <para>When using kgdboc with no debugger proxy, you can end up
-  connecting the debugger for one of two entry points.  If an
-  exception occurs after you have loaded kgdboc a message should print
-  on the console stating it is waiting for the debugger.  In case you
-  disconnect your terminal program and then connect the debugger in
-  its place.  If you want to interrupt the target system and forcibly
-  enter a debug session you have to issue a Sysrq sequence and then
-  type the letter <constant>g</constant>.  Then you disconnect the
-  terminal session and connect gdb.  Your options if you don't like
-  this are to hack gdb to send the sysrq-g for you as well as on the
-  initial connect, or to use a debugger proxy that allows an
-  unmodified gdb to do the debugging.
+  </listitem>
+  <listitem>
+  <para>When you are done using kdb you need to consider rebooting the
+  system or using the "go" command to resuming normal kernel
+  execution.  If you have paused the kernel for a lengthy period of
+  time, applications that rely on timely networking or anything to do
+  with real wall clock time could be adversely affected, so you
+  should take this into consideration when using the kernel
+  debugger.</para>
+  </listitem>
+  </orderedlist></para>
+  </sect1>
+  <sect1 id="quickKDBkeyboard">
+  <title>Quick start for kdb using a keyboard connected console</title>
+  <para>This is a quick example of how to use kdb with a keyboard.</para>
+  <para><orderedlist>
+  <listitem><para>Boot kernel with arguments:
+  <itemizedlist>
+  <listitem><para><constant>kgdboc=kbd</constant></para></listitem>
+  </itemizedlist></para>
+  <para>OR</para>
+  <para>Configure kgdboc after the kernel booted:
+  <itemizedlist>
+  <listitem><para><constant>echo kbd &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
+  </itemizedlist>
   </para>
-  </sect2>
+  </listitem>
+  <listitem><para>Enter the kernel debugger manually or by waiting for an oops or fault.  There are several ways you can enter the kernel debugger manually; all involve using the sysrq-g, which means you must have enabled CONFIG_MAGIC_SYSRQ=y in your kernel config.</para>
+  <itemizedlist>
+  <listitem><para>When logged in as root or with a super user session you can run:</para>
+   <para><constant>echo g &gt; /proc/sysrq-trigger</constant></para></listitem>
+  <listitem><para>Example using a laptop keyboard</para>
+  <para>Press and hold down: <constant>Alt</constant></para>
+  <para>Press and hold down: <constant>Fn</constant></para>
+  <para>Press and release the key with the label: <constant>SysRq</constant></para>
+  <para>Release: <constant>Fn</constant></para>
+  <para>Press and release: <constant>g</constant></para>
+  <para>Release: <constant>Alt</constant></para>
+  </listitem>
+  <listitem><para>Example using a PS/2 101-key keyboard</para>
+  <para>Press and hold down: <constant>Alt</constant></para>
+  <para>Press and release the key with the label: <constant>SysRq</constant></para>
+  <para>Press and release: <constant>g</constant></para>
+  <para>Release: <constant>Alt</constant></para>
+  </listitem>
+  </itemizedlist>
+  </listitem>
+  <listitem>
+  <para>Now type in a kdb command such as "help", "dmesg", "bt" or "go" to continue kernel execution.</para>
+  </listitem>
+  </orderedlist></para>
   </sect1>
-  <sect1 id="kgdbcon">
-  <title>Kernel parameter: kgdbcon</title>
-  <para>
-  Kgdb supports using the gdb serial protocol to send console messages
-  to the debugger when the debugger is connected and running.  There
-  are two ways to activate this feature.
+  </chapter>
+  <chapter id="EnableKGDB">
+   <title>Using kgdb / gdb</title>
+   <para>In order to use kgdb you must activate it by passing
+   configuration information to one of the kgdb I/O drivers.  If you
+   do not pass any configuration information kgdb will not do anything
+   at all.  Kgdb will only actively hook up to the kernel trap hooks
+   if a kgdb I/O driver is loaded and configured.  If you unconfigure
+   a kgdb I/O driver, kgdb will unregister all the kernel hook points.
+   </para>
+   <para> All kgdb I/O drivers can be reconfigured at run time, if
+   <symbol>CONFIG_SYSFS</symbol> and <symbol>CONFIG_MODULES</symbol>
+   are enabled, by echo'ing a new config string to
+   <constant>/sys/module/&lt;driver&gt;/parameter/&lt;option&gt;</constant>.
+   The driver can be unconfigured by passing an empty string.  You cannot
+   change the configuration while the debugger is attached.  Make sure
+   to detach the debugger with the <constant>detach</constant> command
+   prior to trying to unconfigure a kgdb I/O driver.
+   </para>
+  <sect1 id="ConnectingGDB">
+  <title>Connecting with gdb to a serial port</title>
   <orderedlist>
-  <listitem><para>Activate with the kernel command line option:</para>
-  <para><constant>kgdbcon</constant></para>
+  <listitem><para>Configure kgdboc</para>
+   <para>Boot kernel with arguments:
+   <itemizedlist>
+    <listitem><para><constant>kgdboc=ttyS0,115200</constant></para></listitem>
+   </itemizedlist></para>
+   <para>OR</para>
+   <para>Configure kgdboc after the kernel booted:
+   <itemizedlist>
+    <listitem><para><constant>echo ttyS0 &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
+   </itemizedlist></para>
   </listitem>
-  <listitem><para>Use sysfs before configuring an io driver</para>
-  <para>
-  <constant>echo 1 &gt; /sys/module/kgdb/parameters/kgdb_use_con</constant>
-  </para>
-  <para>
-  NOTE: If you do this after you configure the kgdb I/O driver, the
-  setting will not take effect until the next point the I/O is
-  reconfigured.
-  </para>
+  <listitem>
+  <para>Stop kernel execution (break into the debugger)</para>
+  <para>In order to connect to gdb via kgdboc, the kernel must
+  first be stopped.  There are several ways to stop the kernel which
+  include using kgdbwait as a boot argument, via a sysrq-g, or running
+  the kernel until it takes an exception where it waits for the
+  debugger to attach.
+  <itemizedlist>
+  <listitem><para>When logged in as root or with a super user session you can run:</para>
+   <para><constant>echo g &gt; /proc/sysrq-trigger</constant></para></listitem>
+  <listitem><para>Example using minicom 2.2</para>
+  <para>Press: <constant>Control-a</constant></para>
+  <para>Press: <constant>f</constant></para>
+  <para>Press: <constant>g</constant></para>
   </listitem>
-  </orderedlist>
-  </para>
-  <para>
-  IMPORTANT NOTE: Using this option with kgdb over the console
-  (kgdboc) is not supported.
+  <listitem><para>When you have telneted to a terminal server that supports sending a remote break</para>
+  <para>Press: <constant>Control-]</constant></para>
+  <para>Type in:<constant>send break</constant></para>
+  <para>Press: <constant>Enter</constant></para>
+  <para>Press: <constant>g</constant></para>
+  </listitem>
+  </itemizedlist>
   </para>
-  </sect1>
-  </chapter>
-  <chapter id="ConnectingGDB">
-  <title>Connecting gdb</title>
-    <para>
-    If you are using kgdboc, you need to have used kgdbwait as a boot
-    argument, issued a sysrq-g, or the system you are going to debug
-    has already taken an exception and is waiting for the debugger to
-    attach before you can connect gdb.
-    </para>
-    <para>
-    If you are not using different kgdb I/O driver other than kgdboc,
-    you should be able to connect and the target will automatically
-    respond.
-    </para>
+  </listitem>
+  <listitem>
+    <para>Connect from from gdb</para>
     <para>
-    Example (using a serial port):
+    Example (using a directly connected port):
     </para>
     <programlisting>
     % gdb ./vmlinux
     (gdb) target remote /dev/ttyS0
     </programlisting>
     <para>
-    Example (kgdb to a terminal server on tcp port 2012):
+    Example (kgdb to a terminal server on TCP port 2012):
     </para>
     <programlisting>
     % gdb ./vmlinux
     communications.  You do this prior to issuing the <constant>target
     remote</constant> command by typing in: <constant>set debug remote 1</constant>
     </para>
+  </listitem>
+  </orderedlist>
+  <para>Remember if you continue in gdb, and need to "break in" again,
+  you need to issue an other sysrq-g.  It is easy to create a simple
+  entry point by putting a breakpoint at <constant>sys_sync</constant>
+  and then you can run "sync" from a shell or script to break into the
+  debugger.</para>
+  </sect1>
+  </chapter>
+  <chapter id="switchKdbKgdb">
+  <title>kgdb and kdb interoperability</title>
+  <para>It is possible to transition between kdb and kgdb dynamically.
+  The debug core will remember which you used the last time and
+  automatically start in the same mode.</para>
+  <sect1>
+  <title>Switching between kdb and kgdb</title>
+  <sect2>
+  <title>Switching from kgdb to kdb</title>
+  <para>
+  There are two ways to switch from kgdb to kdb: you can use gdb to
+  issue a maintenance packet, or you can blindly type the command $3#33.
+  Whenever kernel debugger stops in kgdb mode it will print the
+  message <constant>KGDB or $3#33 for KDB</constant>.  It is important
+  to note that you have to type the sequence correctly in one pass.
+  You cannot type a backspace or delete because kgdb will interpret
+  that as part of the debug stream.
+  <orderedlist>
+  <listitem><para>Change from kgdb to kdb by blindly typing:</para>
+  <para><constant>$3#33</constant></para></listitem>
+  <listitem><para>Change from kgdb to kdb with gdb</para>
+  <para><constant>maintenance packet 3</constant></para>
+  <para>NOTE: Now you must kill gdb. Typically you press control-z and
+  issue the command: kill -9 %</para></listitem>
+  </orderedlist>
+  </para>
+  </sect2>
+  <sect2>
+  <title>Change from kdb to kgdb</title>
+  <para>There are two ways you can change from kdb to kgdb.  You can
+  manually enter kgdb mode by issuing the kgdb command from the kdb
+  shell prompt, or you can connect gdb while the kdb shell prompt is
+  active.  The kdb shell looks for the typical first commands that gdb
+  would issue with the gdb remote protocol and if it sees one of those
+  commands it automatically changes into kgdb mode.</para>
+  <orderedlist>
+  <listitem><para>From kdb issue the command:</para>
+  <para><constant>kgdb</constant></para>
+  <para>Now disconnect your terminal program and connect gdb in its place</para></listitem>
+  <listitem><para>At the kdb prompt, disconnect the terminal program and connect gdb in its place.</para></listitem>
+  </orderedlist>
+  </sect2>
+  </sect1>
+  <sect1>
+  <title>Running kdb commands from gdb</title>
+  <para>It is possible to run a limited set of kdb commands from gdb,
+  using the gdb monitor command.  You don't want to execute any of the
+  run control or breakpoint operations, because it can disrupt the
+  state of the kernel debugger.  You should be using gdb for
+  breakpoints and run control operations if you have gdb connected.
+  The more useful commands to run are things like lsmod, dmesg, ps or
+  possibly some of the memory information commands.  To see all the kdb
+  commands you can run <constant>monitor help</constant>.</para>
+  <para>Example:
+  <informalexample><programlisting>
+(gdb) monitor ps
+1 idle process (state I) and
+27 sleeping system daemon (state M) processes suppressed,
+use 'ps A' to see all.
+Task Addr       Pid   Parent [*] cpu State Thread     Command
+
+0xc78291d0        1        0  0    0   S  0xc7829404  init
+0xc7954150      942        1  0    0   S  0xc7954384  dropbear
+0xc78789c0      944        1  0    0   S  0xc7878bf4  sh
+(gdb)
+  </programlisting></informalexample>
+  </para>
+  </sect1>
   </chapter>
   <chapter id="KGDBTestSuite">
     <title>kgdb Test Suite</title>
     </para>
   </chapter>
   <chapter id="CommonBackEndReq">
-  <title>KGDB Internals</title>
+  <title>Kernel Debugger Internals</title>
   <sect1 id="kgdbArchitecture">
     <title>Architecture Specifics</title>
       <para>
-      Kgdb is organized into three basic components:
+      The kernel debugger is organized into a number of components:
       <orderedlist>
-      <listitem><para>kgdb core</para>
+      <listitem><para>The debug core</para>
       <para>
-      The kgdb core is found in kernel/kgdb.c.  It contains:
+      The debug core is found in kernel/debugger/debug_core.c.  It contains:
       <itemizedlist>
-      <listitem><para>All the logic to implement the gdb serial protocol</para></listitem>
-      <listitem><para>A generic OS exception handler which includes sync'ing the processors into a stopped state on an multi cpu system.</para></listitem>
+      <listitem><para>A generic OS exception handler which includes
+      sync'ing the processors into a stopped state on an multi-CPU
+      system.</para></listitem>
       <listitem><para>The API to talk to the kgdb I/O drivers</para></listitem>
-      <listitem><para>The API to make calls to the arch specific kgdb implementation</para></listitem>
+      <listitem><para>The API to make calls to the arch-specific kgdb implementation</para></listitem>
       <listitem><para>The logic to perform safe memory reads and writes to memory while using the debugger</para></listitem>
       <listitem><para>A full implementation for software breakpoints unless overridden by the arch</para></listitem>
+      <listitem><para>The API to invoke either the kdb or kgdb frontend to the debug core.</para></listitem>
       </itemizedlist>
       </para>
       </listitem>
-      <listitem><para>kgdb arch specific implementation</para>
+      <listitem><para>kgdb arch-specific implementation</para>
       <para>
       This implementation is generally found in arch/*/kernel/kgdb.c.
       As an example, arch/x86/kernel/kgdb.c contains the specifics to
       implement HW breakpoint as well as the initialization to
       dynamically register and unregister for the trap handlers on
-      this architecture.  The arch specific portion implements:
+      this architecture.  The arch-specific portion implements:
       <itemizedlist>
-      <listitem><para>contains an arch specific trap catcher which
+      <listitem><para>contains an arch-specific trap catcher which
       invokes kgdb_handle_exception() to start kgdb about doing its
       work</para></listitem>
       <listitem><para>translation to and from gdb specific packet format to pt_regs</para></listitem>
       </itemizedlist>
       </para>
       </listitem>
+      <listitem><para>gdbstub frontend (aka kgdb)</para>
+      <para>The gdbstub is located in kernel/debug/gdbstub.c. It contains:</para>
+      <itemizedlist>
+        <listitem><para>All the logic to implement the gdb serial protocol</para></listitem>
+      </itemizedlist>
+      </listitem>
+      <listitem><para>kdb frontend</para>
+      <para>The kdb debugger shell is broken down into a number of
+      components.  The kdb core is located in kernel/debug/kdb.  There
+      are a number of helper functions in some of the other kernel
+      components to make it possible for kdb to examine and report
+      information about the kernel without taking locks that could
+      cause a kernel deadlock.  The kdb core contains implements the following functionality.</para>
+      <itemizedlist>
+        <listitem><para>A simple shell</para></listitem>
+        <listitem><para>The kdb core command set</para></listitem>
+        <listitem><para>A registration API to register additional kdb shell commands.</para>
+        <para>A good example of a self-contained kdb module is the "ftdump" command for dumping the ftrace buffer.  See: kernel/trace/trace_kdb.c</para></listitem>
+        <listitem><para>The implementation for kdb_printf() which
+        emits messages directly to I/O drivers, bypassing the kernel
+        log.</para></listitem>
+        <listitem><para>SW / HW breakpoint management for the kdb shell</para></listitem>
+      </itemizedlist>
+      </listitem>
       <listitem><para>kgdb I/O driver</para>
       <para>
-      Each kgdb I/O driver has to provide an implemenation for the following:
+      Each kgdb I/O driver has to provide an implementation for the following:
       <itemizedlist>
-      <listitem><para>configuration via builtin or module</para></listitem>
+      <listitem><para>configuration via built-in or module</para></listitem>
       <listitem><para>dynamic configuration and kgdb hook registration calls</para></listitem>
       <listitem><para>read and write character interface</para></listitem>
       <listitem><para>A cleanup handler for unconfiguring from the kgdb core</para></listitem>
   underlying low level to the hardware driver having "polling hooks"
   which the to which the tty driver is attached.  In the initial
   implementation of kgdboc it the serial_core was changed to expose a
-  low level uart hook for doing polled mode reading and writing of a
+  low level UART hook for doing polled mode reading and writing of a
   single character while in an atomic context.  When kgdb makes an I/O
   request to the debugger, kgdboc invokes a call back in the serial
-  core which in turn uses the call back in the uart driver.  It is
-  certainly possible to extend kgdboc to work with non-uart based
+  core which in turn uses the call back in the UART driver.  It is
+  certainly possible to extend kgdboc to work with non-UART based
   consoles in the future.
   </para>
   <para>
-  When using kgdboc with a uart, the uart driver must implement two callbacks in the <constant>struct uart_ops</constant>. Example from drivers/8250.c:<programlisting>
+  When using kgdboc with a UART, the UART driver must implement two callbacks in the <constant>struct uart_ops</constant>. Example from drivers/8250.c:<programlisting>
 #ifdef CONFIG_CONSOLE_POLL
        .poll_get_char = serial8250_get_poll_char,
        .poll_put_char = serial8250_put_poll_char,
   <constant>#ifdef CONFIG_CONSOLE_POLL</constant>, as shown above.
   Keep in mind that polling hooks have to be implemented in such a way
   that they can be called from an atomic context and have to restore
-  the state of the uart chip on return such that the system can return
+  the state of the UART chip on return such that the system can return
   to normal when the debugger detaches.  You need to be very careful
   with any kind of lock you consider, because failing here is most
   going to mean pressing the reset button.
                <itemizedlist>
                <listitem><para>Jason Wessel<email>jason.wessel@windriver.com</email></para></listitem>
                </itemizedlist>
+                In Jan 2010 this document was updated to include kdb.
+               <itemizedlist>
+               <listitem><para>Jason Wessel<email>jason.wessel@windriver.com</email></para></listitem>
+               </itemizedlist>
        </para>
   </chapter>
 </book>
index b12bacd..f5fce48 100644 (file)
@@ -58,6 +58,7 @@ parameter is applicable:
        ISAPNP  ISA PnP code is enabled.
        ISDN    Appropriate ISDN support is enabled.
        JOY     Appropriate joystick support is enabled.
+       KGDB    Kernel debugger support is enabled.
        KVM     Kernel Virtual Machine support is enabled.
        LIBATA  Libata driver is enabled
        LP      Printer support is enabled.
@@ -712,6 +713,12 @@ and is between 256 and 4096 characters. It is defined in the file
                        The VGA output is eventually overwritten by the real
                        console.
 
+       ekgdboc=        [X86,KGDB] Allow early kernel console debugging
+                       ekgdboc=kbd
+
+                       This is desgined to be used in conjunction with
+                       the boot argument: earlyprintk=vga
+
        eata=           [HW,SCSI]
 
        edd=            [EDD]
@@ -1120,10 +1127,26 @@ and is between 256 and 4096 characters. It is defined in the file
                        use the HighMem zone if it exists, and the Normal
                        zone if it does not.
 
-       kgdboc=         [HW] kgdb over consoles.
-                       Requires a tty driver that supports console polling.
-                       (only serial supported for now)
-                       Format: <serial_device>[,baud]
+       kgdbdbgp=       [KGDB,HW] kgdb over EHCI usb debug port.
+                       Format: <Controller#>[,poll interval]
+                       The controller # is the number of the ehci usb debug
+                       port as it is probed via PCI.  The poll interval is
+                       optional and is the number seconds in between
+                       each poll cycle to the debug port in case you need
+                       the functionality for interrupting the kernel with
+                       gdb or control-c on the dbgp connection.  When
+                       not using this parameter you use sysrq-g to break into
+                       the kernel debugger.
+
+       kgdboc=         [KGDB,HW] kgdb over consoles.
+                       Requires a tty driver that supports console polling,
+                       or a supported polling keyboard driver (non-usb).
+                       Serial only format: <serial_device>[,baud]
+                       keyboard only format: kbd
+                       keyboard and serial format: kbd,<serial_device>[,baud]
+
+       kgdbwait        [KGDB] Stop kernel execution and enter the
+                       kernel debugger at the earliest opportunity.
 
        kmac=           [MIPS] korina ethernet MAC address.
                        Configure the RouterBoard 532 series on-chip
diff --git a/Documentation/powerpc/dts-bindings/4xx/reboot.txt b/Documentation/powerpc/dts-bindings/4xx/reboot.txt
new file mode 100644 (file)
index 0000000..d721726
--- /dev/null
@@ -0,0 +1,18 @@
+Reboot property to control system reboot on PPC4xx systems:
+
+By setting "reset_type" to one of the following values, the default
+software reset mechanism may be overidden. Here the possible values of
+"reset_type":
+
+      1 - PPC4xx core reset
+      2 - PPC4xx chip reset
+      3 - PPC4xx system reset (default)
+
+Example:
+
+               cpu@0 {
+                       device_type = "cpu";
+                       model = "PowerPC,440SPe";
+                       ...
+                       reset-type = <2>;       /* Use chip-reset */
+               };
index d015dce..b0019eb 100644 (file)
@@ -11,7 +11,7 @@ Required properties:
   83xx, "fsl,mpc8572-gpio" for 85xx and "fsl,mpc8610-gpio" for 86xx.
 - #gpio-cells : Should be two. The first cell is the pin number and the
   second cell is used to specify optional parameters (currently unused).
- - interrupts : Interrupt mapping for GPIO IRQ (currently unused).
+ - interrupts : Interrupt mapping for GPIO IRQ.
  - interrupt-parent : Phandle for the interrupt controller that
    services interrupts for this device.
 - gpio-controller : Marks the port as GPIO controller.
@@ -38,3 +38,23 @@ Example of gpio-controller nodes for a MPC8347 SoC:
 
 See booting-without-of.txt for details of how to specify GPIO
 information for devices.
+
+To use GPIO pins as interrupt sources for peripherals, specify the
+GPIO controller as the interrupt parent and define GPIO number +
+trigger mode using the interrupts property, which is defined like
+this:
+
+interrupts = <number trigger>, where:
+ - number: GPIO pin (0..31)
+ - trigger: trigger mode:
+       2 = trigger on falling edge
+       3 = trigger on both edges
+
+Example of device using this is:
+
+       funkyfpga@0 {
+               compatible = "funky-fpga";
+               ...
+               interrupts = <4 3>;
+               interrupt-parent = <&gpio1>;
+       };
index a31a717..a8fe9b4 100644 (file)
@@ -3319,15 +3319,17 @@ F:      include/linux/key-type.h
 F:     include/keys/
 F:     security/keys/
 
-KGDB
+KGDB / KDB /debug_core
 M:     Jason Wessel <jason.wessel@windriver.com>
+W:     http://kgdb.wiki.kernel.org/
 L:     kgdb-bugreport@lists.sourceforge.net
 S:     Maintained
 F:     Documentation/DocBook/kgdb.tmpl
 F:     drivers/misc/kgdbts.c
 F:     drivers/serial/kgdboc.c
+F:     include/linux/kdb.h
 F:     include/linux/kgdb.h
-F:     kernel/kgdb.c
+F:     kernel/debug/
 
 KMEMCHECK
 M:     Vegard Nossum <vegardno@ifi.uio.no>
index 232f8ee..e4f4fb5 100644 (file)
@@ -774,7 +774,57 @@ CONFIG_SSB_POSSIBLE=y
 #
 # CONFIG_VGASTATE is not set
 # CONFIG_VIDEO_OUTPUT_CONTROL is not set
-# CONFIG_FB is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_ARMCLCD is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
+CONFIG_OMAP2_VRAM=y
+CONFIG_OMAP2_VRFB=y
+CONFIG_OMAP2_DSS=y
+CONFIG_OMAP2_VRAM_SIZE=4
+CONFIG_OMAP2_DSS_DEBUG_SUPPORT=y
+# CONFIG_OMAP2_DSS_RFBI is not set
+CONFIG_OMAP2_DSS_VENC=y
+# CONFIG_OMAP2_DSS_SDI is not set
+# CONFIG_OMAP2_DSS_DSI is not set
+# CONFIG_OMAP2_DSS_FAKE_VSYNC is not set
+CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=4
+CONFIG_FB_OMAP2=y
+CONFIG_FB_OMAP2_DEBUG_SUPPORT=y
+# CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE is not set
+CONFIG_FB_OMAP2_NUM_FBS=3
+
+#
+# OMAP2/3 Display Device Drivers
+#
+CONFIG_PANEL_GENERIC=y
+# CONFIG_PANEL_SHARP_LS037V7DW01 is not set
+CONFIG_PANEL_SHARP_LQ043T1DG01=y
 # CONFIG_BACKLIGHT_LCD_SUPPORT is not set
 
 #
index a6dd6d1..b02e371 100644 (file)
@@ -911,7 +911,56 @@ CONFIG_DAB=y
 #
 # CONFIG_VGASTATE is not set
 CONFIG_VIDEO_OUTPUT_CONTROL=m
-# CONFIG_FB is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
+CONFIG_OMAP2_VRAM=y
+CONFIG_OMAP2_VRFB=y
+CONFIG_OMAP2_DSS=y
+CONFIG_OMAP2_VRAM_SIZE=4
+# CONFIG_OMAP2_DSS_DEBUG_SUPPORT is not set
+# CONFIG_OMAP2_DSS_RFBI is not set
+CONFIG_OMAP2_DSS_VENC=y
+# CONFIG_OMAP2_DSS_SDI is not set
+# CONFIG_OMAP2_DSS_DSI is not set
+# CONFIG_OMAP2_DSS_FAKE_VSYNC is not set
+CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=4
+CONFIG_FB_OMAP2=y
+# CONFIG_FB_OMAP2_DEBUG_SUPPORT is not set
+# CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE is not set
+CONFIG_FB_OMAP2_NUM_FBS=3
+
+#
+# OMAP2/3 Display Device Drivers
+#
+CONFIG_PANEL_GENERIC=y
+# CONFIG_PANEL_SAMSUNG_LTE430WQ_F0C is not set
+CONFIG_PANEL_SHARP_LS037V7DW01=y
 # CONFIG_BACKLIGHT_LCD_SUPPORT is not set
 
 #
index 473f9e1..56d4928 100644 (file)
@@ -784,6 +784,7 @@ CONFIG_INPUT_KEYBOARD=y
 # CONFIG_KEYBOARD_NEWTON is not set
 # CONFIG_KEYBOARD_STOWAWAY is not set
 CONFIG_KEYBOARD_GPIO=m
+CONFIG_KEYBOARD_TWL4030=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_INPUT_JOYSTICK is not set
 # CONFIG_INPUT_TABLET is not set
@@ -809,6 +810,7 @@ CONFIG_INPUT_MISC=y
 # CONFIG_INPUT_POWERMATE is not set
 # CONFIG_INPUT_YEALINK is not set
 # CONFIG_INPUT_CM109 is not set
+CONFIG_INPUT_TWL4030_PWRBUTTON=y
 CONFIG_INPUT_UINPUT=m
 
 #
@@ -1110,7 +1112,40 @@ CONFIG_RADIO_ADAPTERS=y
 #
 # CONFIG_VGASTATE is not set
 # CONFIG_VIDEO_OUTPUT_CONTROL is not set
-# CONFIG_FB is not set
+CONFIG_FB=y
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+
+# Frame buffer hardware drivers
+#
+CONFIG_OMAP2_VRAM=y
+CONFIG_OMAP2_VRFB=y
+CONFIG_OMAP2_DSS=y
+CONFIG_OMAP2_VRAM_SIZE=0
+# CONFIG_OMAP2_DSS_DEBUG_SUPPORT is not set
+# CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS is not set
+# CONFIG_OMAP2_DSS_DPI is not set
+# CONFIG_OMAP2_DSS_RFBI is not set
+# CONFIG_OMAP2_DSS_VENC is not set
+CONFIG_OMAP2_DSS_SDI=y
+# CONFIG_OMAP2_DSS_DSI is not set
+# CONFIG_OMAP2_DSS_FAKE_VSYNC is not set
+CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=0
+CONFIG_FB_OMAP2=y
+CONFIG_FB_OMAP2_DEBUG_SUPPORT=y
+CONFIG_FB_OMAP2_NUM_FBS=3
+
+#
+# OMAP2/3 Display Device Drivers
+#
+# CONFIG_PANEL_GENERIC is not set
+# CONFIG_PANEL_SHARP_LS037V7DW01 is not set
+# CONFIG_PANEL_SHARP_LQ043T1DG01 is not set
+# CONFIG_PANEL_TOPPOLY_TDO35S is not set
+# CONFIG_PANEL_TPO_TD043MTEA1 is not set
+CONFIG_PANEL_ACX565AKM=y
+
 # CONFIG_BACKLIGHT_LCD_SUPPORT is not set
 
 #
@@ -1127,6 +1162,8 @@ CONFIG_DISPLAY_SUPPORT=y
 #
 # CONFIG_VGA_CONSOLE is not set
 CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
 CONFIG_SOUND=y
 # CONFIG_SOUND_OSS_CORE is not set
 CONFIG_SND=y
index c4b2ea3..e51b1e8 100644 (file)
@@ -20,6 +20,7 @@ enum km_type {
        KM_SOFTIRQ1,
        KM_L1_CACHE,
        KM_L2_CACHE,
+       KM_KDB,
        KM_TYPE_NR
 };
 
index a5b846b..c868a88 100644 (file)
@@ -98,6 +98,11 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
        gdb_regs[_CPSR]         = thread_regs->ARM_cpsr;
 }
 
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+       regs->ARM_pc = pc;
+}
+
 static int compiled_break;
 
 int kgdb_arch_handle_exception(int exception_vector, int signo,
index 3d725ae..d029d1f 100644 (file)
@@ -69,6 +69,8 @@ void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd)
                        writel(DMOV_CONFIG_IRQ_EN, DMOV_CONFIG(id));
                }
 #endif
+               if (cmd->execute_func)
+                       cmd->execute_func(cmd);
                PRINT_IO("msm_dmov_enqueue_cmd(%d), start command, status %x\n", id, status);
                list_add_tail(&cmd->list, &active_commands[id]);
                if (!channel_active)
@@ -116,6 +118,7 @@ int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr)
 
        cmd.dmov_cmd.cmdptr = cmdptr;
        cmd.dmov_cmd.complete_func = dmov_exec_cmdptr_complete_func;
+       cmd.dmov_cmd.execute_func = NULL;
        cmd.id = id;
        init_completion(&cmd.complete);
 
@@ -221,6 +224,8 @@ static irqreturn_t msm_datamover_irq_handler(int irq, void *dev_id)
                                cmd = list_entry(ready_commands[id].next, typeof(*cmd), list);
                                list_del(&cmd->list);
                                list_add_tail(&cmd->list, &active_commands[id]);
+                               if (cmd->execute_func)
+                                       cmd->execute_func(cmd);
                                PRINT_FLOW("msm_datamover_irq_handler id %d, start command\n", id);
                                writel(cmd->cmdptr, DMOV_CMD_PTR(id));
                        }
index 04c51cc..00f9bbf 100644 (file)
@@ -28,6 +28,8 @@ struct msm_dmov_cmd {
        void (*complete_func)(struct msm_dmov_cmd *cmd,
                              unsigned int result,
                              struct msm_dmov_errdata *err);
+       void (*execute_func)(struct msm_dmov_cmd *cmd);
+       void *data;
 };
 
 void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd);
index d28e9e5..ea52b03 100644 (file)
@@ -119,6 +119,7 @@ obj-$(CONFIG_MACH_NOKIA_N8X0)               += board-n8x0.o
 obj-$(CONFIG_MACH_NOKIA_RX51)          += board-rx51.o \
                                           board-rx51-sdram.o \
                                           board-rx51-peripherals.o \
+                                          board-rx51-video.o \
                                           hsmmc.o
 obj-$(CONFIG_MACH_OMAP_ZOOM2)          += board-zoom2.o \
                                           board-zoom-peripherals.o \
index 962d377..69b154c 100644 (file)
@@ -39,6 +39,7 @@
 
 #include <plat/board.h>
 #include <plat/common.h>
+#include <plat/display.h>
 #include <plat/gpmc.h>
 #include <plat/nand.h>
 #include <plat/usb.h>
@@ -106,6 +107,77 @@ static struct platform_device omap3beagle_nand_device = {
        .resource       = &omap3beagle_nand_resource,
 };
 
+/* DSS */
+
+static int beagle_enable_dvi(struct omap_dss_device *dssdev)
+{
+       if (gpio_is_valid(dssdev->reset_gpio))
+               gpio_set_value(dssdev->reset_gpio, 1);
+
+       return 0;
+}
+
+static void beagle_disable_dvi(struct omap_dss_device *dssdev)
+{
+       if (gpio_is_valid(dssdev->reset_gpio))
+               gpio_set_value(dssdev->reset_gpio, 0);
+}
+
+static struct omap_dss_device beagle_dvi_device = {
+       .type = OMAP_DISPLAY_TYPE_DPI,
+       .name = "dvi",
+       .driver_name = "generic_panel",
+       .phy.dpi.data_lines = 24,
+       .reset_gpio = 170,
+       .platform_enable = beagle_enable_dvi,
+       .platform_disable = beagle_disable_dvi,
+};
+
+static struct omap_dss_device beagle_tv_device = {
+       .name = "tv",
+       .driver_name = "venc",
+       .type = OMAP_DISPLAY_TYPE_VENC,
+       .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO,
+};
+
+static struct omap_dss_device *beagle_dss_devices[] = {
+       &beagle_dvi_device,
+       &beagle_tv_device,
+};
+
+static struct omap_dss_board_info beagle_dss_data = {
+       .num_devices = ARRAY_SIZE(beagle_dss_devices),
+       .devices = beagle_dss_devices,
+       .default_device = &beagle_dvi_device,
+};
+
+static struct platform_device beagle_dss_device = {
+       .name          = "omapdss",
+       .id            = -1,
+       .dev            = {
+               .platform_data = &beagle_dss_data,
+       },
+};
+
+static struct regulator_consumer_supply beagle_vdac_supply =
+       REGULATOR_SUPPLY("vdda_dac", "omapdss");
+
+static struct regulator_consumer_supply beagle_vdvi_supply =
+       REGULATOR_SUPPLY("vdds_dsi", "omapdss");
+
+static void __init beagle_display_init(void)
+{
+       int r;
+
+       r = gpio_request(beagle_dvi_device.reset_gpio, "DVI reset");
+       if (r < 0) {
+               printk(KERN_ERR "Unable to get DVI reset GPIO\n");
+               return;
+       }
+
+       gpio_direction_output(beagle_dvi_device.reset_gpio, 0);
+}
+
 #include "sdram-micron-mt46h32m32lf-6.h"
 
 static struct omap2_hsmmc_info mmc[] = {
@@ -117,15 +189,6 @@ static struct omap2_hsmmc_info mmc[] = {
        {}      /* Terminator */
 };
 
-static struct platform_device omap3_beagle_lcd_device = {
-       .name           = "omap3beagle_lcd",
-       .id             = -1,
-};
-
-static struct omap_lcd_config omap3_beagle_lcd_config __initdata = {
-       .ctrl_name      = "internal",
-};
-
 static struct regulator_consumer_supply beagle_vmmc1_supply = {
        .supply                 = "vmmc",
 };
@@ -181,16 +244,6 @@ static struct twl4030_gpio_platform_data beagle_gpio_data = {
        .setup          = beagle_twl_gpio_setup,
 };
 
-static struct regulator_consumer_supply beagle_vdac_supply = {
-       .supply         = "vdac",
-       .dev            = &omap3_beagle_lcd_device.dev,
-};
-
-static struct regulator_consumer_supply beagle_vdvi_supply = {
-       .supply         = "vdvi",
-       .dev            = &omap3_beagle_lcd_device.dev,
-};
-
 /* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
 static struct regulator_init_data beagle_vmmc1 = {
        .constraints = {
@@ -349,14 +402,8 @@ static struct platform_device keys_gpio = {
        },
 };
 
-static struct omap_board_config_kernel omap3_beagle_config[] __initdata = {
-       { OMAP_TAG_LCD,         &omap3_beagle_lcd_config },
-};
-
 static void __init omap3_beagle_init_irq(void)
 {
-       omap_board_config = omap3_beagle_config;
-       omap_board_config_size = ARRAY_SIZE(omap3_beagle_config);
        omap2_init_common_hw(mt46h32m32lf6_sdrc_params,
                             mt46h32m32lf6_sdrc_params);
        omap_init_irq();
@@ -367,9 +414,9 @@ static void __init omap3_beagle_init_irq(void)
 }
 
 static struct platform_device *omap3_beagle_devices[] __initdata = {
-       &omap3_beagle_lcd_device,
        &leds_gpio,
        &keys_gpio,
+       &beagle_dss_device,
 };
 
 static void __init omap3beagle_flash_init(void)
@@ -456,6 +503,8 @@ static void __init omap3_beagle_init(void)
        /* Ensure SDRC pins are mux'd for self-refresh */
        omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
        omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
+
+       beagle_display_init();
 }
 
 static void __init omap3_beagle_map_io(void)
index 966f5f8..abdf321 100644 (file)
@@ -45,6 +45,8 @@
 /* list all spi devices here */
 enum {
        RX51_SPI_WL1251,
+       RX51_SPI_MIPID,         /* LCD panel */
+       RX51_SPI_TSC2005,       /* Touch Controller */
 };
 
 static struct wl12xx_platform_data wl1251_pdata;
@@ -54,6 +56,16 @@ static struct omap2_mcspi_device_config wl1251_mcspi_config = {
        .single_channel = 1,
 };
 
+static struct omap2_mcspi_device_config mipid_mcspi_config = {
+       .turbo_mode     = 0,
+       .single_channel = 1,
+};
+
+static struct omap2_mcspi_device_config tsc2005_mcspi_config = {
+       .turbo_mode     = 0,
+       .single_channel = 1,
+};
+
 static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
        [RX51_SPI_WL1251] = {
                .modalias               = "wl1251",
@@ -64,6 +76,22 @@ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
                .controller_data        = &wl1251_mcspi_config,
                .platform_data          = &wl1251_pdata,
        },
+       [RX51_SPI_MIPID] = {
+               .modalias               = "acx565akm",
+               .bus_num                = 1,
+               .chip_select            = 2,
+               .max_speed_hz           = 6000000,
+               .controller_data        = &mipid_mcspi_config,
+       },
+       [RX51_SPI_TSC2005] = {
+               .modalias               = "tsc2005",
+               .bus_num                = 1,
+               .chip_select            = 0,
+               /* .irq = OMAP_GPIO_IRQ(RX51_TSC2005_IRQ_GPIO),*/
+               .max_speed_hz           = 6000000,
+               .controller_data        = &tsc2005_mcspi_config,
+               /* .platform_data = &tsc2005_config,*/
+       },
 };
 
 #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
diff --git a/arch/arm/mach-omap2/board-rx51-video.c b/arch/arm/mach-omap2/board-rx51-video.c
new file mode 100644 (file)
index 0000000..b743a4f
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ * linux/arch/arm/mach-omap2/board-rx51-video.c
+ *
+ * Copyright (C) 2010 Nokia
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/mm.h>
+
+#include <asm/mach-types.h>
+#include <plat/mux.h>
+#include <plat/display.h>
+#include <plat/vram.h>
+#include <plat/mcspi.h>
+
+#include "mux.h"
+
+#define RX51_LCD_RESET_GPIO    90
+
+#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
+
+static int rx51_lcd_enable(struct omap_dss_device *dssdev)
+{
+       gpio_set_value(dssdev->reset_gpio, 1);
+       return 0;
+}
+
+static void rx51_lcd_disable(struct omap_dss_device *dssdev)
+{
+       gpio_set_value(dssdev->reset_gpio, 0);
+}
+
+static struct omap_dss_device rx51_lcd_device = {
+       .name                   = "lcd",
+       .driver_name            = "panel-acx565akm",
+       .type                   = OMAP_DISPLAY_TYPE_SDI,
+       .phy.sdi.datapairs      = 2,
+       .reset_gpio             = RX51_LCD_RESET_GPIO,
+       .platform_enable        = rx51_lcd_enable,
+       .platform_disable       = rx51_lcd_disable,
+};
+
+static struct omap_dss_device *rx51_dss_devices[] = {
+       &rx51_lcd_device,
+};
+
+static struct omap_dss_board_info rx51_dss_board_info = {
+       .num_devices    = ARRAY_SIZE(rx51_dss_devices),
+       .devices        = rx51_dss_devices,
+       .default_device = &rx51_lcd_device,
+};
+
+struct platform_device rx51_display_device = {
+       .name   = "omapdss",
+       .id     = -1,
+       .dev    = {
+               .platform_data = &rx51_dss_board_info,
+       },
+};
+
+static struct platform_device *rx51_video_devices[] __initdata = {
+       &rx51_display_device,
+};
+
+static int __init rx51_video_init(void)
+{
+       if (!machine_is_nokia_rx51())
+               return 0;
+
+       if (omap_mux_init_gpio(RX51_LCD_RESET_GPIO, OMAP_PIN_OUTPUT)) {
+               pr_err("%s cannot configure MUX for LCD RESET\n", __func__);
+               return 0;
+       }
+
+       if (gpio_request(RX51_LCD_RESET_GPIO, "LCD ACX565AKM reset")) {
+               pr_err("%s failed to get LCD Reset GPIO\n", __func__);
+               return 0;
+       }
+
+       gpio_direction_output(RX51_LCD_RESET_GPIO, 1);
+
+       platform_add_devices(rx51_video_devices,
+                               ARRAY_SIZE(rx51_video_devices));
+       return 0;
+}
+
+subsys_initcall(rx51_video_init);
+
+void __init rx51_video_mem_init(void)
+{
+       /*
+        * GFX 864x480x32bpp
+        * VID1/2 1280x720x32bpp double buffered
+        */
+       omap_vram_set_sdram_vram(PAGE_ALIGN(864 * 480 * 4) +
+                       2 * PAGE_ALIGN(1280 * 720 * 4 * 2), 0);
+}
+
+#else
+void __init rx51_video_mem_init(void) { }
+#endif /* defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE) */
index b155c36..1b86b5b 100644 (file)
@@ -36,6 +36,7 @@
 #define RX51_GPIO_SLEEP_IND 162
 
 struct omap_sdrc_params *rx51_get_sdram_timings(void);
+extern void rx51_video_mem_init(void);
 
 static struct gpio_led gpio_leds[] = {
        {
@@ -143,6 +144,7 @@ static void __init rx51_init(void)
 static void __init rx51_map_io(void)
 {
        omap2_set_globals_343x();
+       rx51_video_mem_init();
        omap34xx_map_common_io();
 }
 
index 23bc981..37d65d6 100644 (file)
@@ -1836,7 +1836,7 @@ static struct omap_clk omap2420_clks[] = {
        CLK(NULL,       "vlynq_ick",    &vlynq_ick,     CK_242X),
        CLK(NULL,       "vlynq_fck",    &vlynq_fck,     CK_242X),
        CLK(NULL,       "des_ick",      &des_ick,       CK_242X),
-       CLK(NULL,       "sha_ick",      &sha_ick,       CK_242X),
+       CLK("omap-sham",        "ick",  &sha_ick,       CK_242X),
        CLK("omap_rng", "ick",          &rng_ick,       CK_242X),
        CLK(NULL,       "aes_ick",      &aes_ick,       CK_242X),
        CLK(NULL,       "pka_ick",      &pka_ick,       CK_242X),
index 2df50d9..b33118f 100644 (file)
@@ -1924,7 +1924,7 @@ static struct omap_clk omap2430_clks[] = {
        CLK(NULL,       "sdma_ick",     &sdma_ick,      CK_243X),
        CLK(NULL,       "sdrc_ick",     &sdrc_ick,      CK_243X),
        CLK(NULL,       "des_ick",      &des_ick,       CK_243X),
-       CLK(NULL,       "sha_ick",      &sha_ick,       CK_243X),
+       CLK("omap-sham",        "ick",  &sha_ick,       CK_243X),
        CLK("omap_rng", "ick",          &rng_ick,       CK_243X),
        CLK(NULL,       "aes_ick",      &aes_ick,       CK_243X),
        CLK(NULL,       "pka_ick",      &pka_ick,       CK_243X),
index 833be48..41b155a 100644 (file)
@@ -3284,7 +3284,7 @@ static struct omap_clk omap3xxx_clks[] = {
        CLK("mmci-omap-hs.2",   "ick",  &mmchs3_ick,    CK_3430ES2 | CK_AM35XX),
        CLK(NULL,       "icr_ick",      &icr_ick,       CK_343X),
        CLK(NULL,       "aes2_ick",     &aes2_ick,      CK_343X),
-       CLK(NULL,       "sha12_ick",    &sha12_ick,     CK_343X),
+       CLK("omap-sham",        "ick",  &sha12_ick,     CK_343X),
        CLK(NULL,       "des2_ick",     &des2_ick,      CK_343X),
        CLK("mmci-omap-hs.1",   "ick",  &mmchs2_ick,    CK_3XXX),
        CLK("mmci-omap-hs.0",   "ick",  &mmchs1_ick,    CK_3XXX),
index 705a7a3..03e6c9e 100644 (file)
@@ -28,6 +28,7 @@
 #include <plat/mux.h>
 #include <mach/gpio.h>
 #include <plat/mmc.h>
+#include <plat/dma.h>
 
 #include "mux.h"
 
@@ -486,8 +487,10 @@ static void omap_init_pmu(void)
 }
 
 
-#ifdef CONFIG_OMAP_SHA1_MD5
-static struct resource sha1_md5_resources[] = {
+#if defined(CONFIG_CRYPTO_DEV_OMAP_SHAM) || defined(CONFIG_CRYPTO_DEV_OMAP_SHAM_MODULE)
+
+#ifdef CONFIG_ARCH_OMAP2
+static struct resource omap2_sham_resources[] = {
        {
                .start  = OMAP24XX_SEC_SHA1MD5_BASE,
                .end    = OMAP24XX_SEC_SHA1MD5_BASE + 0x64,
@@ -498,20 +501,55 @@ static struct resource sha1_md5_resources[] = {
                .flags  = IORESOURCE_IRQ,
        }
 };
+static int omap2_sham_resources_sz = ARRAY_SIZE(omap2_sham_resources);
+#else
+#define omap2_sham_resources           NULL
+#define omap2_sham_resources_sz                0
+#endif
 
-static struct platform_device sha1_md5_device = {
-       .name           = "OMAP SHA1/MD5",
+#ifdef CONFIG_ARCH_OMAP3
+static struct resource omap3_sham_resources[] = {
+       {
+               .start  = OMAP34XX_SEC_SHA1MD5_BASE,
+               .end    = OMAP34XX_SEC_SHA1MD5_BASE + 0x64,
+               .flags  = IORESOURCE_MEM,
+       },
+       {
+               .start  = INT_34XX_SHA1MD52_IRQ,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = OMAP34XX_DMA_SHA1MD5_RX,
+               .flags  = IORESOURCE_DMA,
+       }
+};
+static int omap3_sham_resources_sz = ARRAY_SIZE(omap3_sham_resources);
+#else
+#define omap3_sham_resources           NULL
+#define omap3_sham_resources_sz                0
+#endif
+
+static struct platform_device sham_device = {
+       .name           = "omap-sham",
        .id             = -1,
-       .num_resources  = ARRAY_SIZE(sha1_md5_resources),
-       .resource       = sha1_md5_resources,
 };
 
-static void omap_init_sha1_md5(void)
+static void omap_init_sham(void)
 {
-       platform_device_register(&sha1_md5_device);
+       if (cpu_is_omap24xx()) {
+               sham_device.resource = omap2_sham_resources;
+               sham_device.num_resources = omap2_sham_resources_sz;
+       } else if (cpu_is_omap34xx()) {
+               sham_device.resource = omap3_sham_resources;
+               sham_device.num_resources = omap3_sham_resources_sz;
+       } else {
+               pr_err("%s: platform not supported\n", __func__);
+               return;
+       }
+       platform_device_register(&sham_device);
 }
 #else
-static inline void omap_init_sha1_md5(void) { }
+static inline void omap_init_sham(void) { }
 #endif
 
 /*-------------------------------------------------------------------------*/
@@ -869,7 +907,7 @@ static int __init omap2_init_devices(void)
        omap_init_pmu();
        omap_hdq_init();
        omap_init_sti();
-       omap_init_sha1_md5();
+       omap_init_sham();
        omap_init_vout();
 
        return 0;
index 2845fdc..98fc8b4 100644 (file)
 
 #define OMAP34XX_MAILBOX_BASE          (L4_34XX_BASE + 0x94000)
 
+/* Security */
+#define OMAP34XX_SEC_BASE      (L4_34XX_BASE + 0xA0000)
+#define OMAP34XX_SEC_SHA1MD5_BASE      (OMAP34XX_SEC_BASE + 0x23000)
+#define OMAP34XX_SEC_AES_BASE  (OMAP34XX_SEC_BASE + 0x25000)
+
 #endif /* __ASM_ARCH_OMAP3_H */
 
index 2c501ce..7367aea 100644 (file)
@@ -439,6 +439,11 @@ int kgdb_validate_break_address(unsigned long addr)
        return -EFAULT;
 }
 
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+       regs->retx = ip;
+}
+
 int kgdb_arch_init(void)
 {
        kgdb_single_step = 0;
index 48223b0..19002d6 100644 (file)
@@ -38,6 +38,8 @@ extern int kgdb_early_setup;
 extern void *saved_vectors[32];
 extern void handle_exception(struct pt_regs *regs);
 extern void breakinst(void);
+extern int kgdb_ll_trap(int cmd, const char *str,
+                       struct pt_regs *regs, long err, int trap, int sig);
 
 #endif                         /* __KERNEL__ */
 
index 50c9bb8..9b78ff6 100644 (file)
@@ -180,6 +180,11 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
        *(ptr++) = regs->cp0_epc;
 }
 
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+       regs->cp0_epc = pc;
+}
+
 /*
  * Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
  * then try to fall into the debugger
@@ -198,7 +203,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
        if (atomic_read(&kgdb_active) != -1)
                kgdb_nmicallback(smp_processor_id(), regs);
 
-       if (kgdb_handle_exception(trap, compute_signal(trap), 0, regs))
+       if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs))
                return NOTIFY_DONE;
 
        if (atomic_read(&kgdb_setting_breakpoint))
@@ -212,6 +217,26 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
        return NOTIFY_STOP;
 }
 
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+int kgdb_ll_trap(int cmd, const char *str,
+                struct pt_regs *regs, long err, int trap, int sig)
+{
+       struct die_args args = {
+               .regs   = regs,
+               .str    = str,
+               .err    = err,
+               .trapnr = trap,
+               .signr  = sig,
+
+       };
+
+       if (!kgdb_io_module_registered)
+               return NOTIFY_DONE;
+
+       return kgdb_mips_notify(NULL, cmd, &args);
+}
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
+
 static struct notifier_block kgdb_notifier = {
        .notifier_call = kgdb_mips_notify,
 };
index 7e5e38c..8bdd6a6 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/kgdb.h>
 #include <linux/kdebug.h>
 #include <linux/notifier.h>
+#include <linux/kdb.h>
 
 #include <asm/bootinfo.h>
 #include <asm/branch.h>
@@ -185,6 +186,11 @@ void show_stack(struct task_struct *task, unsigned long *sp)
                        regs.regs[29] = task->thread.reg29;
                        regs.regs[31] = 0;
                        regs.cp0_epc = task->thread.reg31;
+#ifdef CONFIG_KGDB_KDB
+               } else if (atomic_read(&kgdb_active) != -1 &&
+                          kdb_current_regs) {
+                       memcpy(&regs, kdb_current_regs, sizeof(regs));
+#endif /* CONFIG_KGDB_KDB */
                } else {
                        prepare_frametrace(&regs);
                }
@@ -360,6 +366,8 @@ void __noreturn die(const char * str, struct pt_regs * regs)
        unsigned long dvpret = dvpe();
 #endif /* CONFIG_MIPS_MT_SMTC */
 
+       notify_die(DIE_OOPS, str, (struct pt_regs *)regs, SIGSEGV, 0, 0);
+
        console_verbose();
        spin_lock_irq(&die_lock);
        bust_spinlocks(1);
@@ -704,6 +712,11 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
        siginfo_t info;
        char b[40];
 
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+       if (kgdb_ll_trap(DIE_TRAP, str, regs, code, 0, 0) == NOTIFY_STOP)
+               return;
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
+
        if (notify_die(DIE_TRAP, str, regs, code, 0, 0) == NOTIFY_STOP)
                return;
 
index 2e19500..c4c4549 100644 (file)
@@ -140,6 +140,7 @@ config PPC
        select HAVE_SYSCALL_WRAPPERS if PPC64
        select GENERIC_ATOMIC64 if PPC32
        select HAVE_PERF_EVENTS
+       select HAVE_REGS_AND_STACK_ACCESS_API
 
 config EARLY_PRINTK
        bool
index 5cdd7ed..53696da 100644 (file)
@@ -44,6 +44,18 @@ config DEBUG_STACK_USAGE
 
          This option will slow down process creation somewhat.
 
+config DEBUG_PER_CPU_MAPS
+       bool "Debug access to per_cpu maps"
+       depends on DEBUG_KERNEL
+       depends on SMP
+       default n
+       ---help---
+         Say Y to verify that the per_cpu map being accessed has
+         been setup.  Adds a fair amount of code to kernel memory
+         and decreases performance.
+
+         Say N if unsure.
+
 config HCALL_STATS
        bool "Hypervisor call instrumentation"
        depends on PPC_PSERIES && DEBUG_FS && TRACEPOINTS
index bb2465b..ad0df7d 100644 (file)
@@ -44,6 +44,7 @@ $(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=405
 $(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=405
 $(obj)/cuboot-acadia.o: BOOTCFLAGS += -mcpu=405
 $(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405
+$(obj)/treeboot-iss4xx.o: BOOTCFLAGS += -mcpu=405
 $(obj)/virtex405-head.o: BOOTAFLAGS += -mcpu=405
 
 
@@ -77,7 +78,7 @@ src-plat := of.c cuboot-52xx.c cuboot-824x.c cuboot-83xx.c cuboot-85xx.c holly.c
                cuboot-warp.c cuboot-85xx-cpm2.c cuboot-yosemite.c simpleboot.c \
                virtex405-head.S virtex.c redboot-83xx.c cuboot-sam440ep.c \
                cuboot-acadia.c cuboot-amigaone.c cuboot-kilauea.c \
-               gamecube-head.S gamecube.c wii-head.S wii.c
+               gamecube-head.S gamecube.c wii-head.S wii.c treeboot-iss4xx.c
 src-boot := $(src-wlib) $(src-plat) empty.c
 
 src-boot := $(addprefix $(obj)/, $(src-boot))
@@ -169,7 +170,7 @@ quiet_cmd_wrap      = WRAP    $@
                $(if $3, -s $3)$(if $4, -d $4)$(if $5, -i $5) vmlinux
 
 image-$(CONFIG_PPC_PSERIES)            += zImage.pseries
-image-$(CONFIG_PPC_MAPLE)              += zImage.pseries
+image-$(CONFIG_PPC_MAPLE)              += zImage.maple
 image-$(CONFIG_PPC_IBM_CELL_BLADE)     += zImage.pseries
 image-$(CONFIG_PPC_PS3)                        += dtbImage.ps3
 image-$(CONFIG_PPC_CELLEB)             += zImage.pseries
@@ -206,6 +207,8 @@ image-$(CONFIG_TAISHAN)                     += cuImage.taishan
 image-$(CONFIG_KATMAI)                 += cuImage.katmai
 image-$(CONFIG_WARP)                   += cuImage.warp
 image-$(CONFIG_YOSEMITE)               += cuImage.yosemite
+image-$(CONFIG_ISS4xx)                 += treeImage.iss4xx \
+                                          treeImage.iss4xx-mpic
 
 # Board ports in arch/powerpc/platform/8xx/Kconfig
 image-$(CONFIG_MPC86XADS)              += cuImage.mpc866ads
@@ -351,7 +354,7 @@ install: $(CONFIGURE) $(addprefix $(obj)/, $(image-y))
 clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \
        zImage zImage.initrd zImage.chrp zImage.coff zImage.holly \
        zImage.iseries zImage.miboot zImage.pmac zImage.pseries \
-       simpleImage.* otheros.bld *.dtb
+       zImage.maple simpleImage.* otheros.bld *.dtb
 
 # clean up files cached by wrapper
 clean-kernel := vmlinux.strip vmlinux.bin
diff --git a/arch/powerpc/boot/dts/iss4xx-mpic.dts b/arch/powerpc/boot/dts/iss4xx-mpic.dts
new file mode 100644 (file)
index 0000000..23e9d9b
--- /dev/null
@@ -0,0 +1,155 @@
+/*
+ * Device Tree Source for IBM Embedded PPC 476 Platform
+ *
+ * Copyright 2010 Torez Smith, IBM Corporation.
+ *
+ * Based on earlier code:
+ *     Copyright (c) 2006, 2007 IBM Corp.
+ *     Josh Boyer <jwboyer@linux.vnet.ibm.com>, David Gibson <dwg@au1.ibm.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without
+ * any warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+
+/memreserve/ 0x01f00000 0x00100000;
+
+/ {
+       #address-cells = <2>;
+       #size-cells = <1>;
+       model = "ibm,iss-4xx";
+       compatible = "ibm,iss-4xx";
+       dcr-parent = <&{/cpus/cpu@0}>;
+
+       aliases {
+               serial0 = &UART0;
+       };
+
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu@0 {
+                       device_type = "cpu";
+                       model = "PowerPC,4xx"; // real CPU changed in sim
+                       reg = <0>;
+                       clock-frequency = <100000000>; // 100Mhz :-)
+                       timebase-frequency = <100000000>;
+                       i-cache-line-size = <32>;
+                       d-cache-line-size = <32>;
+                       i-cache-size = <32768>;
+                       d-cache-size = <32768>;
+                       dcr-controller;
+                       dcr-access-method = "native";
+                       status = "ok";
+               };
+               cpu@1 {
+                       device_type = "cpu";
+                       model = "PowerPC,4xx"; // real CPU changed in sim
+                       reg = <1>;
+                       clock-frequency = <100000000>; // 100Mhz :-)
+                       timebase-frequency = <100000000>;
+                       i-cache-line-size = <32>;
+                       d-cache-line-size = <32>;
+                       i-cache-size = <32768>;
+                       d-cache-size = <32768>;
+                       dcr-controller;
+                       dcr-access-method = "native";
+                       status = "disabled";
+                       enable-method = "spin-table";
+                       cpu-release-addr = <0 0x01f00100>;
+               };
+               cpu@2 {
+                       device_type = "cpu";
+                       model = "PowerPC,4xx"; // real CPU changed in sim
+                       reg = <2>;
+                       clock-frequency = <100000000>; // 100Mhz :-)
+                       timebase-frequency = <100000000>;
+                       i-cache-line-size = <32>;
+                       d-cache-line-size = <32>;
+                       i-cache-size = <32768>;
+                       d-cache-size = <32768>;
+                       dcr-controller;
+                       dcr-access-method = "native";
+                       status = "disabled";
+                       enable-method = "spin-table";
+                       cpu-release-addr = <0 0x01f00200>;
+               };
+               cpu@3 {
+                       device_type = "cpu";
+                       model = "PowerPC,4xx"; // real CPU changed in sim
+                       reg = <3>;
+                       clock-frequency = <100000000>; // 100Mhz :-)
+                       timebase-frequency = <100000000>;
+                       i-cache-line-size = <32>;
+                       d-cache-line-size = <32>;
+                       i-cache-size = <32768>;
+                       d-cache-size = <32768>;
+                       dcr-controller;
+                       dcr-access-method = "native";
+                       status = "disabled";
+                       enable-method = "spin-table";
+                       cpu-release-addr = <0 0x01f00300>;
+               };
+       };
+
+       memory {
+               device_type = "memory";
+               reg =  <0x00000000 0x00000000 0x00000000>; // Filled in by zImage
+
+       };
+
+       MPIC: interrupt-controller {
+               compatible = "chrp,open-pic";
+               interrupt-controller;
+               dcr-reg = <0xffc00000 0x00030000>;
+               #address-cells = <0>;
+               #size-cells = <0>;
+               #interrupt-cells = <2>;
+
+       };
+
+       plb {
+               compatible = "ibm,plb-4xx", "ibm,plb4"; /* Could be PLB6, doesn't matter */
+               #address-cells = <2>;
+               #size-cells = <1>;
+               ranges;
+               clock-frequency = <0>; // Filled in by zImage
+
+               POB0: opb {
+                       compatible = "ibm,opb-4xx", "ibm,opb";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       /* Wish there was a nicer way of specifying a full 32-bit
+                          range */
+                       ranges = <0x00000000 0x00000001 0x00000000 0x80000000
+                                 0x80000000 0x00000001 0x80000000 0x80000000>;
+                       clock-frequency = <0>; // Filled in by zImage
+                       UART0: serial@40000200 {
+                               device_type = "serial";
+                               compatible = "ns16550a";
+                               reg = <0x40000200 0x00000008>;
+                               virtual-reg = <0xe0000200>;
+                               clock-frequency = <11059200>;
+                               current-speed = <115200>;
+                               interrupt-parent = <&MPIC>;
+                               interrupts = <0x0 0x2>;
+                       };
+               };
+       };
+
+       nvrtc {
+               compatible = "ds1743-nvram", "ds1743", "rtc-ds1743";
+               reg = <0 0xEF703000 0x2000>;
+       };
+       iss-block {
+               compatible = "ibm,iss-sim-block-device";
+               reg = <0 0xEF701000 0x1000>;
+       };
+
+       chosen {
+               linux,stdout-path = "/plb/opb/serial@40000200";
+       };
+};
diff --git a/arch/powerpc/boot/dts/iss4xx.dts b/arch/powerpc/boot/dts/iss4xx.dts
new file mode 100644 (file)
index 0000000..4ff6555
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * Device Tree Source for IBM Embedded PPC 476 Platform
+ *
+ * Copyright 2010 Torez Smith, IBM Corporation.
+ *
+ * Based on earlier code:
+ *    Copyright (c) 2006, 2007 IBM Corp.
+ *    Josh Boyer <jwboyer@linux.vnet.ibm.com>, David Gibson <dwg@au1.ibm.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without
+ * any warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+
+/ {
+       #address-cells = <2>;
+       #size-cells = <1>;
+       model = "ibm,iss-4xx";
+       compatible = "ibm,iss-4xx";
+       dcr-parent = <&{/cpus/cpu@0}>;
+
+       aliases {
+               serial0 = &UART0;
+       };
+
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu@0 {
+                       device_type = "cpu";
+                       model = "PowerPC,4xx"; // real CPU changed in sim
+                       reg = <0x00000000>;
+                       clock-frequency = <100000000>; // 100Mhz :-)
+                       timebase-frequency = <100000000>;
+                       i-cache-line-size = <32>; // may need fixup in sim
+                       d-cache-line-size = <32>; // may need fixup in sim
+                       i-cache-size = <32768>; /* may need fixup in sim */
+                       d-cache-size = <32768>; /* may need fixup in sim */
+                       dcr-controller;
+                       dcr-access-method = "native";
+               };
+       };
+
+       memory {
+               device_type = "memory";
+               reg = <0x00000000 0x00000000 0x00000000>; // Filled in by zImage
+       };
+
+       UIC0: interrupt-controller0 {
+               compatible = "ibm,uic-4xx", "ibm,uic";
+               interrupt-controller;
+               cell-index = <0>;
+               dcr-reg = <0x0c0 0x009>;
+               #address-cells = <0>;
+               #size-cells = <0>;
+               #interrupt-cells = <2>;
+
+       };
+
+       UIC1: interrupt-controller1 {
+               compatible = "ibm,uic-4xx", "ibm,uic";
+               interrupt-controller;
+               cell-index = <1>;
+               dcr-reg = <0x0d0 0x009>;
+               #address-cells = <0>;
+               #size-cells = <0>;
+               #interrupt-cells = <2>;
+               interrupts = <0x1e 0x4 0x1f 0x4>; /* cascade */
+               interrupt-parent = <&UIC0>;
+       };
+
+       plb {
+               compatible = "ibm,plb-4xx", "ibm,plb4"; /* Could be PLB6, doesn't matter */
+               #address-cells = <2>;
+               #size-cells = <1>;
+               ranges;
+               clock-frequency = <0>; // Filled in by zImage
+
+               POB0: opb {
+                       compatible = "ibm,opb-4xx", "ibm,opb";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       /* Wish there was a nicer way of specifying a full 32-bit
+                          range */
+                       ranges = <0x00000000 0x00000001 0x00000000 0x80000000
+                                 0x80000000 0x00000001 0x80000000 0x80000000>;
+                       clock-frequency = <0>; // Filled in by zImage
+                       UART0: serial@40000200 {
+                               device_type = "serial";
+                               compatible = "ns16550a";
+                               reg = <0x40000200 0x00000008>;
+                               virtual-reg = <0xe0000200>;
+                               clock-frequency = <11059200>;
+                               current-speed = <115200>;
+                               interrupt-parent = <&UIC0>;
+                               interrupts = <0x0 0x4>;
+                       };
+               };
+       };
+
+       nvrtc {
+               compatible = "ds1743-nvram", "ds1743", "rtc-ds1743";
+               reg = <0 0xEF703000 0x2000>;
+       };
+       iss-block {
+               compatible = "ibm,iss-sim-block-device";
+               reg = <0 0xEF701000 0x1000>;
+       };
+
+       chosen {
+               linux,stdout-path = "/plb/opb/serial@40000200";
+       };
+};
index 8a3a4f3..4dd08c3 100644 (file)
                        fsl,num-channels = <4>;
                        fsl,channel-fifo-len = <24>;
                        fsl,exec-units-mask = <0x97c>;
-                       fsl,descriptor-types-mask = <0x3ab0abf>;
+                       fsl,descriptor-types-mask = <0x3a30abf>;
                };
 
                sata@18000 {
                                  0 0x00800000>;
                };
        };
+
+       leds {
+               compatible = "gpio-leds";
+
+               pwr {
+                       gpios = <&mcu_pio 0 0>;
+                       default-state = "on";
+               };
+
+               hdd {
+                       gpios = <&mcu_pio 1 0>;
+                       linux,default-trigger = "ide-disk";
+               };
+       };
 };
index 9e2264b..dbc1b98 100644 (file)
                                  0 0x00800000>;
                };
        };
+
+       leds {
+               compatible = "gpio-leds";
+
+               pwr {
+                       gpios = <&mcu_pio 0 0>;
+                       default-state = "on";
+               };
+
+               hdd {
+                       gpios = <&mcu_pio 1 0>;
+                       linux,default-trigger = "ide-disk";
+               };
+       };
 };
index 4e6a1a4..3447eb9 100644 (file)
                                  0 0x00800000>;
                };
        };
+
+       leds {
+               compatible = "gpio-leds";
+
+               pwr {
+                       gpios = <&mcu_pio 0 0>;
+                       default-state = "on";
+               };
+
+               hdd {
+                       gpios = <&mcu_pio 1 0>;
+                       linux,default-trigger = "ide-disk";
+               };
+       };
 };
index 72336d5..15560c6 100644 (file)
                compatible = "fsl,mpc8349-pci";
                device_type = "pci";
        };
+
+       leds {
+               compatible = "gpio-leds";
+
+               pwr {
+                       gpios = <&mcu_pio 0 0>;
+                       default-state = "on";
+               };
+
+               hdd {
+                       gpios = <&mcu_pio 1 0>;
+                       linux,default-trigger = "ide-disk";
+               };
+       };
 };
index df52690..22f64b6 100644 (file)
@@ -19,6 +19,9 @@
        aliases {
                serial0 = &serial0;
                serial1 = &serial1;
+               ethernet0 = &enet0;
+               ethernet1 = &enet1;
+               ethernet2 = &enet2;
                pci0 = &pci0;
                pci1 = &pci1;
        };
                        };
                };
 
+               mdio@24000 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       compatible = "fsl,etsec2-mdio";
+                       reg = <0x24000 0x1000 0xb0030 0x4>;
+
+                       phy0: ethernet-phy@0 {
+                               interrupt-parent = <&mpic>;
+                               interrupts = <3 1>;
+                               reg = <0x0>;
+                       };
+
+                       phy1: ethernet-phy@1 {
+                               interrupt-parent = <&mpic>;
+                               interrupts = <2 1>;
+                               reg = <0x1>;
+                       };
+               };
+
+               mdio@25000 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       compatible = "fsl,etsec2-tbi";
+                       reg = <0x25000 0x1000 0xb1030 0x4>;
+
+                       tbi0: tbi-phy@11 {
+                               reg = <0x11>;
+                               device_type = "tbi-phy";
+                       };
+               };
+
+               enet0: ethernet@b0000 {
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       device_type = "network";
+                       model = "eTSEC";
+                       compatible = "fsl,etsec2";
+                       fsl,num_rx_queues = <0x8>;
+                       fsl,num_tx_queues = <0x8>;
+                       local-mac-address = [ 00 00 00 00 00 00 ];
+                       interrupt-parent = <&mpic>;
+                       fixed-link = <1 1 1000 0 0>;
+                       phy-connection-type = "rgmii-id";
+
+                       queue-group@0 {
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               reg = <0xb0000 0x1000>;
+                               interrupts = <29 2 30 2 34 2>;
+                       };
+
+                       queue-group@1 {
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               reg = <0xb4000 0x1000>;
+                               interrupts = <17 2 18 2 24 2>;
+                       };
+               };
+
+               enet1: ethernet@b1000 {
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       device_type = "network";
+                       model = "eTSEC";
+                       compatible = "fsl,etsec2";
+                       fsl,num_rx_queues = <0x8>;
+                       fsl,num_tx_queues = <0x8>;
+                       local-mac-address = [ 00 00 00 00 00 00 ];
+                       interrupt-parent = <&mpic>;
+                       phy-handle = <&phy0>;
+                       tbi-handle = <&tbi0>;
+                       phy-connection-type = "sgmii";
+
+                       queue-group@0 {
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               reg = <0xb1000 0x1000>;
+                               interrupts = <35 2 36 2 40 2>;
+                       };
+
+                       queue-group@1 {
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               reg = <0xb5000 0x1000>;
+                               interrupts = <51 2 52 2 67 2>;
+                       };
+               };
+
+               enet2: ethernet@b2000 {
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       device_type = "network";
+                       model = "eTSEC";
+                       compatible = "fsl,etsec2";
+                       fsl,num_rx_queues = <0x8>;
+                       fsl,num_tx_queues = <0x8>;
+                       local-mac-address = [ 00 00 00 00 00 00 ];
+                       interrupt-parent = <&mpic>;
+                       phy-handle = <&phy1>;
+                       phy-connection-type = "rgmii-id";
+
+                       queue-group@0 {
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               reg = <0xb2000 0x1000>;
+                               interrupts = <31 2 32 2 33 2>;
+                       };
+
+                       queue-group@1 {
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               reg = <0xb6000 0x1000>;
+                               interrupts = <25 2 26 2 27 2>;
+                       };
+               };
+
                usb@22000 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        phy_type = "ulpi";
                };
 
+               /* USB2 is shared with localbus, so it must be disabled
+                  by default. We can't put 'status = "disabled";' here
+                  since U-Boot doesn't clear the status property when
+                  it enables USB2. OTOH, U-Boot does create a new node
+                  when there isn't any. So, just comment it out.
                usb@23000 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        interrupts = <46 0x2>;
                        phy_type = "ulpi";
                };
+               */
 
                sdhci@2e000 {
                        compatible = "fsl,p1020-esdhc", "fsl,esdhc";
diff --git a/arch/powerpc/boot/treeboot-iss4xx.c b/arch/powerpc/boot/treeboot-iss4xx.c
new file mode 100644 (file)
index 0000000..fcc4495
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2010 Ben. Herrenschmidt, IBM Corporation.
+ *
+ * Based on earlier code:
+ *   Copyright (C) Paul Mackerras 1997.
+ *
+ *   Matt Porter <mporter@kernel.crashing.org>
+ *   Copyright 2002-2005 MontaVista Software Inc.
+ *
+ *   Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ *   Copyright (c) 2003, 2004 Zultys Technologies
+ *
+ *    Copyright 2007 David Gibson, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <stdarg.h>
+#include <stddef.h>
+#include "types.h"
+#include "elf.h"
+#include "string.h"
+#include "stdio.h"
+#include "page.h"
+#include "ops.h"
+#include "reg.h"
+#include "io.h"
+#include "dcr.h"
+#include "4xx.h"
+#include "44x.h"
+#include "libfdt.h"
+
+BSS_STACK(4096);
+
+static void iss_4xx_fixups(void)
+{
+       ibm4xx_sdram_fixup_memsize();
+}
+
+#define SPRN_PIR       0x11E   /* Processor Indentification Register */
+void platform_init(void)
+{
+       unsigned long end_of_ram = 0x08000000;
+       unsigned long avail_ram = end_of_ram - (unsigned long)_end;
+       u32 pir_reg;
+
+       simple_alloc_init(_end, avail_ram, 128, 64);
+       platform_ops.fixups = iss_4xx_fixups;
+       platform_ops.exit = ibm44x_dbcr_reset;
+       pir_reg = mfspr(SPRN_PIR);
+       fdt_set_boot_cpuid_phys(_dtb_start, pir_reg);
+       fdt_init(_dtb_start);
+       serial_console_init();
+}
index f4594ed..cb97e75 100755 (executable)
@@ -149,6 +149,10 @@ pseries)
     platformo=$object/of.o
     link_address='0x4000000'
     ;;
+maple)
+    platformo=$object/of.o
+    link_address='0x400000'
+    ;;
 pmac|chrp)
     platformo=$object/of.o
     ;;
@@ -237,6 +241,9 @@ gamecube|wii)
     link_address='0x600000'
     platformo="$object/$platform-head.o $object/$platform.o"
     ;;
+treeboot-iss4xx-mpic)
+    platformo="$object/treeboot-iss4xx.o"
+    ;;
 esac
 
 vmz="$tmpdir/`basename \"$kernel\"`.$ext"
@@ -321,7 +328,7 @@ fi
 
 # post-processing needed for some platforms
 case "$platform" in
-pseries|chrp)
+pseries|chrp|maple)
     $objbin/addnote "$ofile"
     ;;
 coff)
diff --git a/arch/powerpc/configs/44x/iss476-smp_defconfig b/arch/powerpc/configs/44x/iss476-smp_defconfig
new file mode 100644 (file)
index 0000000..8683cbc
--- /dev/null
@@ -0,0 +1,1026 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.33
+# Thu Mar  4 11:50:12 2010
+#
+# CONFIG_PPC64 is not set
+
+#
+# Processor support
+#
+# CONFIG_PPC_BOOK3S_32 is not set
+# CONFIG_PPC_85xx is not set
+# CONFIG_PPC_8xx is not set
+# CONFIG_40x is not set
+CONFIG_44x=y
+# CONFIG_E200 is not set
+CONFIG_PPC_FPU=y
+CONFIG_4xx=y
+CONFIG_BOOKE=y
+CONFIG_PTE_64BIT=y
+CONFIG_PHYS_64BIT=y
+CONFIG_PPC_MMU_NOHASH=y
+CONFIG_PPC_MMU_NOHASH_32=y
+# CONFIG_PPC_MM_SLICES is not set
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4
+# CONFIG_NOT_COHERENT_CACHE is not set
+CONFIG_PPC32=y
+CONFIG_WORD_SIZE=32
+CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
+CONFIG_MMU=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set
+# CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK is not set
+CONFIG_IRQ_PER_CPU=y
+CONFIG_NR_IRQS=512
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_ARCH_HAS_ILOG2_U32=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+CONFIG_OF=y
+CONFIG_PPC_UDBG_16550=y
+CONFIG_GENERIC_TBSYNC=y
+CONFIG_AUDIT_ARCH=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DTC=y
+# CONFIG_DEFAULT_UIMAGE is not set
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_PPC_DCR_NATIVE=y
+# CONFIG_PPC_DCR_MMIO is not set
+CONFIG_PPC_DCR=y
+CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+# CONFIG_PERF_COUNTERS is not set
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+CONFIG_COMPAT_BRK=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
+CONFIG_OPROFILE=y
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_HAVE_IOREMAP_PROT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_STOP_MACHINE=y
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+# CONFIG_FREEZER is not set
+
+#
+# Platform support
+#
+# CONFIG_PPC_CELL is not set
+# CONFIG_PPC_CELL_NATIVE is not set
+# CONFIG_PQ2ADS is not set
+CONFIG_PPC_47x=y
+# CONFIG_BAMBOO is not set
+# CONFIG_EBONY is not set
+# CONFIG_SAM440EP is not set
+# CONFIG_SEQUOIA is not set
+# CONFIG_TAISHAN is not set
+# CONFIG_KATMAI is not set
+# CONFIG_RAINIER is not set
+# CONFIG_WARP is not set
+# CONFIG_ARCHES is not set
+# CONFIG_CANYONLANDS is not set
+# CONFIG_GLACIER is not set
+# CONFIG_REDWOOD is not set
+# CONFIG_EIGER is not set
+# CONFIG_YOSEMITE is not set
+CONFIG_ISS4xx=y
+# CONFIG_XILINX_VIRTEX440_GENERIC_BOARD is not set
+# CONFIG_PPC44x_SIMPLE is not set
+# CONFIG_PPC4xx_GPIO is not set
+# CONFIG_IPIC is not set
+CONFIG_MPIC=y
+# CONFIG_MPIC_WEIRD is not set
+# CONFIG_PPC_I8259 is not set
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_PPC_MPC106 is not set
+# CONFIG_PPC_970_NAP is not set
+# CONFIG_PPC_INDIRECT_IO is not set
+# CONFIG_GENERIC_IOMAP is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_FSL_ULI1575 is not set
+CONFIG_OF_RTC=y
+# CONFIG_SIMPLE_GPIO is not set
+
+#
+# Kernel options
+#
+# CONFIG_HIGHMEM is not set
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=100
+# CONFIG_SCHED_HRTICK is not set
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+CONFIG_MATH_EMULATION=y
+# CONFIG_IOMMU_HELPER is not set
+# CONFIG_SWIOTLB is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_HAS_WALK_MEMORY=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_IRQ_ALL_CPUS=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_MAX_ACTIVE_REGIONS=32
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_MIGRATION=y
+CONFIG_PHYS_ADDR_T_64BIT=y
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_STDBINUTILS=y
+CONFIG_PPC_4K_PAGES=y
+# CONFIG_PPC_16K_PAGES is not set
+# CONFIG_PPC_64K_PAGES is not set
+# CONFIG_PPC_256K_PAGES is not set
+CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_PROC_DEVICETREE=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="root=/dev/issblk0"
+CONFIG_EXTRA_TARGETS=""
+CONFIG_SECCOMP=y
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_ZONE_DMA=y
+CONFIG_4xx_SOC=y
+CONFIG_PPC_PCI_CHOICE=y
+# CONFIG_PCI is not set
+# CONFIG_PCI_DOMAINS is not set
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+# CONFIG_HAS_RAPIDIO is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_PAGE_OFFSET=0xc0000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_PHYSICAL_START=0x00000000
+CONFIG_TASK_SIZE=0xc0000000
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+# CONFIG_LIB80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+CONFIG_CONNECTOR=y
+CONFIG_PROC_EVENTS=y
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+CONFIG_MTD_OF_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+CONFIG_MTD_PHYSMAP_OF=y
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+CONFIG_OF_DEVICE=y
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_DRBD is not set
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=35000
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_XILINX_SYSACE is not set
+# CONFIG_BLK_DEV_HD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+# CONFIG_NETDEVICES is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+# CONFIG_SERIAL_8250_MANY_PORTS is not set
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+# CONFIG_SERIAL_8250_DETECT_IRQ is not set
+# CONFIG_SERIAL_8250_RSA is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_UARTLITE is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL is not set
+# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_HVC_UDBG is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_NVRAM is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+# CONFIG_GPIOLIB is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+CONFIG_THERMAL=y
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+# CONFIG_SOUND is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_EDAC is not set
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_CRAMFS=y
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_NETWORK_FILESYSTEMS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_NLS is not set
+# CONFIG_DLM is not set
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
+CONFIG_NLATTR=y
+CONFIG_GENERIC_ATOMIC64=y
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+# CONFIG_DEBUG_PAGEALLOC is not set
+CONFIG_NOP_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_PPC_DISABLE_WERROR is not set
+CONFIG_PPC_WERROR=y
+CONFIG_PRINT_STACK_DEPTH=64
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_PPC_EMULATED_STATS is not set
+# CONFIG_CODE_PATCHING_SELFTEST is not set
+# CONFIG_FTR_FIXUP_SELFTEST is not set
+# CONFIG_MSI_BITMAP_SELFTEST is not set
+# CONFIG_XMON is not set
+# CONFIG_IRQSTACKS is not set
+# CONFIG_VIRQ_DEBUG is not set
+# CONFIG_BDI_SWITCH is not set
+CONFIG_PPC_EARLY_DEBUG=y
+# CONFIG_PPC_EARLY_DEBUG_LPAR is not set
+# CONFIG_PPC_EARLY_DEBUG_G5 is not set
+# CONFIG_PPC_EARLY_DEBUG_RTAS_PANEL is not set
+# CONFIG_PPC_EARLY_DEBUG_RTAS_CONSOLE is not set
+# CONFIG_PPC_EARLY_DEBUG_MAPLE is not set
+# CONFIG_PPC_EARLY_DEBUG_ISERIES is not set
+# CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE is not set
+# CONFIG_PPC_EARLY_DEBUG_BEAT is not set
+CONFIG_PPC_EARLY_DEBUG_44x=y
+# CONFIG_PPC_EARLY_DEBUG_40x is not set
+# CONFIG_PPC_EARLY_DEBUG_CPM is not set
+# CONFIG_PPC_EARLY_DEBUG_USBGECKO is not set
+CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW=0x40000200
+CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH=0x1
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+# CONFIG_CRYPTO_LRW is not set
+CONFIG_CRYPTO_PCBC=y
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
+# CONFIG_PPC_CLOCK is not set
+# CONFIG_VIRTUALIZATION is not set
index 12980d5..dad617e 100644 (file)
@@ -988,7 +988,7 @@ CONFIG_ACENIC=m
 CONFIG_ACENIC_OMIT_TIGON_I=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
-CONFIG_E1000E=m
+CONFIG_E1000E=y
 # CONFIG_IP1000 is not set
 # CONFIG_IGB is not set
 # CONFIG_NS83820 is not set
index 41de3dd..16a1458 100644 (file)
@@ -804,7 +804,7 @@ CONFIG_ACENIC=m
 CONFIG_ACENIC_OMIT_TIGON_I=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
-CONFIG_E1000E=m
+CONFIG_E1000E=y
 # CONFIG_IP1000 is not set
 # CONFIG_IGB is not set
 # CONFIG_NS83820 is not set
index 81de6eb..725634f 100644 (file)
 #define L1_CACHE_SHIFT         6
 #define MAX_COPY_PREFETCH      4
 #elif defined(CONFIG_PPC32)
-#define L1_CACHE_SHIFT         5
 #define MAX_COPY_PREFETCH      4
+#if defined(CONFIG_PPC_47x)
+#define L1_CACHE_SHIFT         7
+#else
+#define L1_CACHE_SHIFT         5
+#endif
 #else /* CONFIG_PPC64 */
 #define L1_CACHE_SHIFT         7
 #endif
index abb833b..e3cba4e 100644 (file)
@@ -72,6 +72,7 @@ extern int machine_check_4xx(struct pt_regs *regs);
 extern int machine_check_440A(struct pt_regs *regs);
 extern int machine_check_e500(struct pt_regs *regs);
 extern int machine_check_e200(struct pt_regs *regs);
+extern int machine_check_47x(struct pt_regs *regs);
 
 /* NOTE WELL: Update identify_cpu() if fields are added or removed! */
 struct cpu_spec {
@@ -365,6 +366,7 @@ extern const char *powerpc_base_platform;
 #define CPU_FTRS_44X   (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
 #define CPU_FTRS_440x6 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \
            CPU_FTR_INDEXED_DCR)
+#define CPU_FTRS_47X   (CPU_FTRS_440x6)
 #define CPU_FTRS_E200  (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \
            CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \
            CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE)
@@ -453,6 +455,9 @@ enum {
 #ifdef CONFIG_44x
            CPU_FTRS_44X | CPU_FTRS_440x6 |
 #endif
+#ifdef CONFIG_PPC_47x
+           CPU_FTRS_47X |
+#endif
 #ifdef CONFIG_E200
            CPU_FTRS_E200 |
 #endif
index f027581..5119b7d 100644 (file)
 #define H_JOIN                 0x298
 #define H_VASI_STATE            0x2A4
 #define H_ENABLE_CRQ           0x2B0
+#define H_GET_EM_PARMS         0x2B8
 #define H_SET_MPP              0x2D0
 #define H_GET_MPP              0x2D4
 #define MAX_HCALL_OPCODE       H_GET_MPP
@@ -281,6 +282,7 @@ long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...);
  */
 #define PLPAR_HCALL9_BUFSIZE 9
 long plpar_hcall9(unsigned long opcode, unsigned long *retbuf, ...);
+long plpar_hcall9_raw(unsigned long opcode, unsigned long *retbuf, ...);
 
 /* For hcall instrumentation.  One structure per-hcall, per-CPU */
 struct hcall_stats {
index 7e06b43..a6ca6da 100644 (file)
 #define KEXEC_ARCH KEXEC_ARCH_PPC
 #endif
 
+#define KEXEC_STATE_NONE 0
+#define KEXEC_STATE_IRQS_OFF 1
+#define KEXEC_STATE_REAL_MODE 2
+
 #ifndef __ASSEMBLY__
 #include <linux/cpumask.h>
 #include <asm/reg.h>
index 9163695..bca8fdc 100644 (file)
@@ -26,6 +26,7 @@ enum km_type {
        KM_SOFTIRQ1,
        KM_PPC_SYNC_PAGE,
        KM_PPC_SYNC_ICACHE,
+       KM_KDB,
        KM_TYPE_NR
 };
 
index 0372669..bf52d70 100644 (file)
@@ -40,7 +40,7 @@
 #define PPC44x_TLB_I           0x00000400      /* Caching is inhibited */
 #define PPC44x_TLB_M           0x00000200      /* Memory is coherent */
 #define PPC44x_TLB_G           0x00000100      /* Memory is guarded */
-#define PPC44x_TLB_E           0x00000080      /* Memory is guarded */
+#define PPC44x_TLB_E           0x00000080      /* Memory is little endian */
 
 #define PPC44x_TLB_PERM_MASK   0x0000003f
 #define PPC44x_TLB_UX          0x00000020      /* User execution */
 /* Number of TLB entries */
 #define PPC44x_TLB_SIZE                64
 
+/* 47x bits */
+#define PPC47x_MMUCR_TID       0x0000ffff
+#define PPC47x_MMUCR_STS       0x00010000
+
+/* Page identification fields */
+#define PPC47x_TLB0_EPN_MASK   0xfffff000      /* Effective Page Number */
+#define PPC47x_TLB0_VALID      0x00000800      /* Valid flag */
+#define PPC47x_TLB0_TS         0x00000400      /* Translation address space */
+#define PPC47x_TLB0_4K         0x00000000
+#define PPC47x_TLB0_16K                0x00000010
+#define PPC47x_TLB0_64K                0x00000030
+#define PPC47x_TLB0_1M         0x00000070
+#define PPC47x_TLB0_16M                0x000000f0
+#define PPC47x_TLB0_256M       0x000001f0
+#define PPC47x_TLB0_1G         0x000003f0
+#define PPC47x_TLB0_BOLTED_R   0x00000008      /* tlbre only */
+
+/* Translation fields */
+#define PPC47x_TLB1_RPN_MASK   0xfffff000      /* Real Page Number */
+#define PPC47x_TLB1_ERPN_MASK  0x000003ff
+
+/* Storage attribute and access control fields */
+#define PPC47x_TLB2_ATTR_MASK  0x0003ff80
+#define PPC47x_TLB2_IL1I       0x00020000      /* Memory is guarded */
+#define PPC47x_TLB2_IL1D       0x00010000      /* Memory is guarded */
+#define PPC47x_TLB2_U0         0x00008000      /* User 0 */
+#define PPC47x_TLB2_U1         0x00004000      /* User 1 */
+#define PPC47x_TLB2_U2         0x00002000      /* User 2 */
+#define PPC47x_TLB2_U3         0x00001000      /* User 3 */
+#define PPC47x_TLB2_W          0x00000800      /* Caching is write-through */
+#define PPC47x_TLB2_I          0x00000400      /* Caching is inhibited */
+#define PPC47x_TLB2_M          0x00000200      /* Memory is coherent */
+#define PPC47x_TLB2_G          0x00000100      /* Memory is guarded */
+#define PPC47x_TLB2_E          0x00000080      /* Memory is little endian */
+#define PPC47x_TLB2_PERM_MASK  0x0000003f
+#define PPC47x_TLB2_UX         0x00000020      /* User execution */
+#define PPC47x_TLB2_UW         0x00000010      /* User write */
+#define PPC47x_TLB2_UR         0x00000008      /* User read */
+#define PPC47x_TLB2_SX         0x00000004      /* Super execution */
+#define PPC47x_TLB2_SW         0x00000002      /* Super write */
+#define PPC47x_TLB2_SR         0x00000001      /* Super read */
+#define PPC47x_TLB2_U_RWX      (PPC47x_TLB2_UX|PPC47x_TLB2_UW|PPC47x_TLB2_UR)
+#define PPC47x_TLB2_S_RWX      (PPC47x_TLB2_SX|PPC47x_TLB2_SW|PPC47x_TLB2_SR)
+#define PPC47x_TLB2_S_RW       (PPC47x_TLB2_SW | PPC47x_TLB2_SR)
+#define PPC47x_TLB2_IMG                (PPC47x_TLB2_I | PPC47x_TLB2_M | PPC47x_TLB2_G)
+
 #ifndef __ASSEMBLY__
 
 extern unsigned int tlb_44x_hwater;
@@ -79,12 +125,15 @@ typedef struct {
 
 #if (PAGE_SHIFT == 12)
 #define PPC44x_TLBE_SIZE       PPC44x_TLB_4K
+#define PPC47x_TLBE_SIZE       PPC47x_TLB0_4K
 #define mmu_virtual_psize      MMU_PAGE_4K
 #elif (PAGE_SHIFT == 14)
 #define PPC44x_TLBE_SIZE       PPC44x_TLB_16K
+#define PPC47x_TLBE_SIZE       PPC47x_TLB0_16K
 #define mmu_virtual_psize      MMU_PAGE_16K
 #elif (PAGE_SHIFT == 16)
 #define PPC44x_TLBE_SIZE       PPC44x_TLB_64K
+#define PPC47x_TLBE_SIZE       PPC47x_TLB0_64K
 #define mmu_virtual_psize      MMU_PAGE_64K
 #elif (PAGE_SHIFT == 18)
 #define PPC44x_TLBE_SIZE       PPC44x_TLB_256K
index 7ffbb65..7ebf42e 100644 (file)
@@ -18,6 +18,7 @@
 #define MMU_FTR_TYPE_44x               ASM_CONST(0x00000008)
 #define MMU_FTR_TYPE_FSL_E             ASM_CONST(0x00000010)
 #define MMU_FTR_TYPE_3E                        ASM_CONST(0x00000020)
+#define MMU_FTR_TYPE_47x               ASM_CONST(0x00000040)
 
 /*
  * This is individual features
index 35acac9..aac87cb 100644 (file)
@@ -30,7 +30,7 @@ extern struct pglist_data *node_data[];
  */
 
 extern int numa_cpu_lookup_table[];
-extern cpumask_t numa_cpumask_lookup_table[];
+extern cpumask_var_t node_to_cpumask_map[];
 #ifdef CONFIG_MEMORY_HOTPLUG
 extern unsigned long max_pfn;
 #endif
index 61913d9..e000cce 100644 (file)
@@ -463,9 +463,6 @@ extern void mpic_cpu_set_priority(int prio);
 /* Request IPIs on primary mpic */
 extern void mpic_request_ipis(void);
 
-/* Send an IPI (non offseted number 0..3) */
-extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask);
-
 /* Send a message (IPI) to a given target (cpu number or MSG_*) */
 void smp_mpic_message_pass(int target, int msg);
 
index a011603..971dfa4 100644 (file)
@@ -82,6 +82,7 @@ struct paca_struct {
        s16 hw_cpu_id;                  /* Physical processor number */
        u8 cpu_start;                   /* At startup, processor spins until */
                                        /* this becomes non-zero. */
+       u8 kexec_state;         /* set when kexec down has irqs off */
 #ifdef CONFIG_PPC_STD_MMU_64
        struct slb_shadow *slb_shadow_ptr;
 
index 94942d6..1ca1102 100644 (file)
@@ -19,6 +19,8 @@ static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
        u32 io1, io2;
        int propsize;
        int count = 0;
+       int virq;
+
        for (np = NULL; (np = of_find_compatible_node(np,
                                                      "parallel",
                                                      "pnpPNP,400")) != NULL;) {
@@ -26,10 +28,13 @@ static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
                if (!prop || propsize > 6*sizeof(u32))
                        continue;
                io1 = prop[1]; io2 = prop[2];
-               prop = of_get_property(np, "interrupts", NULL);
-               if (!prop)
+
+               virq = irq_of_parse_and_map(np, 0);
+               if (virq == NO_IRQ)
                        continue;
-               if (parport_pc_probe_port(io1, io2, prop[0], autodma, NULL, 0) != NULL)
+
+               if (parport_pc_probe_port(io1, io2, virq, autodma, NULL, 0)
+                               != NULL)
                        count++;
        }
        return count;
index 605f5c5..292725c 100644 (file)
 #include <linux/cpumask.h>
 #include <linux/percpu.h>
 
+struct vmemmap_backing {
+       struct vmemmap_backing *list;
+       unsigned long phys;
+       unsigned long virt_addr;
+};
+
 /*
  * Functions that deal with pagetables that could be at any level of
  * the table need to be passed an "index_size" so they know how to
index 55646ad..a7db96f 100644 (file)
@@ -287,7 +287,7 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
 #define pmd_page_vaddr(pmd)    \
        ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
 #define pmd_page(pmd)          \
-       (mem_map + (pmd_val(pmd) >> PAGE_SHIFT))
+       pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
 #else
 #define pmd_page_vaddr(pmd)    \
        ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
index 9e2d84c..5d8be04 100644 (file)
@@ -89,6 +89,7 @@ struct pt_regs {
 
 #define instruction_pointer(regs) ((regs)->nip)
 #define user_stack_pointer(regs) ((regs)->gpr[1])
+#define kernel_stack_pointer(regs) ((regs)->gpr[1])
 #define regs_return_value(regs) ((regs)->gpr[3])
 
 #ifdef CONFIG_SMP
@@ -141,6 +142,69 @@ do {                                                                             \
 #define arch_has_block_step()  (!cpu_has_feature(CPU_FTR_601))
 #define ARCH_HAS_USER_SINGLE_STEP_INFO
 
+/*
+ * kprobe-based event tracer support
+ */
+
+#include <linux/stddef.h>
+#include <linux/thread_info.h>
+extern int regs_query_register_offset(const char *name);
+extern const char *regs_query_register_name(unsigned int offset);
+#define MAX_REG_OFFSET (offsetof(struct pt_regs, dsisr))
+
+/**
+ * regs_get_register() - get register value from its offset
+ * @regs:         pt_regs from which register value is gotten
+ * @offset:    offset number of the register.
+ *
+ * regs_get_register returns the value of a register whose offset from @regs.
+ * The @offset is the offset of the register in struct pt_regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline unsigned long regs_get_register(struct pt_regs *regs,
+                                               unsigned int offset)
+{
+       if (unlikely(offset > MAX_REG_OFFSET))
+               return 0;
+       return *(unsigned long *)((unsigned long)regs + offset);
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs:      pt_regs which contains kernel stack pointer.
+ * @addr:      address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+
+static inline bool regs_within_kernel_stack(struct pt_regs *regs,
+                                               unsigned long addr)
+{
+       return ((addr & ~(THREAD_SIZE - 1))  ==
+               (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs:      pt_regs which contains kernel stack pointer.
+ * @n:         stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+                                                     unsigned int n)
+{
+       unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+       addr += n;
+       if (regs_within_kernel_stack(regs, (unsigned long)addr))
+               return *addr;
+       else
+               return 0;
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
index 5572e86..b68f025 100644 (file)
 #define PVR_403GC      0x00200200
 #define PVR_403GCX     0x00201400
 #define PVR_405GP      0x40110000
+#define PVR_476                0x11a52000
 #define PVR_STB03XXX   0x40310000
 #define PVR_NP405H     0x41410000
 #define PVR_NP405L     0x41610000
 #define PVR_8245       0x80811014
 #define PVR_8260       PVR_8240
 
+/* 476 Simulator seems to currently have the PVR of the 602... */
+#define PVR_476_ISS    0x00052000
+
 /* 64-bit processors */
 /* XXX the prefix should be PVR_, we'll do a global sweep to fix it one day */
 #define PV_NORTHSTAR   0x0033
index 414d434..5304a37 100644 (file)
 #define MCSR_DCFP      0x01000000 /* D-Cache Flush Parity Error */
 #define MCSR_IMPE      0x00800000 /* Imprecise Machine Check Exception */
 
+#define PPC47x_MCSR_GPR        0x01000000 /* GPR parity error */
+#define PPC47x_MCSR_FPR        0x00800000 /* FPR parity error */
+#define PPC47x_MCSR_IPR        0x00400000 /* Imprecise Machine Check Exception */
+
 #ifdef CONFIG_E500
 #define MCSR_MCP       0x80000000UL /* Machine Check Input Pin */
 #define MCSR_ICPERR    0x40000000UL /* I-Cache Parity Error */
 #define DBCR_JOI       0x00000002      /* JTAG Serial Outbound Int. Enable */
 #define DBCR_JII       0x00000001      /* JTAG Serial Inbound Int. Enable */
 #endif /* 403GCX */
+
+/* Some 476 specific registers */
+#define SPRN_SSPCR             830
+#define SPRN_USPCR             831
+#define SPRN_ISPCR             829
+#define SPRN_MMUBE0            820
+#define MMUBE0_IBE0_SHIFT      24
+#define MMUBE0_IBE1_SHIFT      16
+#define MMUBE0_IBE2_SHIFT      8
+#define MMUBE0_VBE0            0x00000004
+#define MMUBE0_VBE1            0x00000002
+#define MMUBE0_VBE2            0x00000001
+#define SPRN_MMUBE1            821
+#define MMUBE1_IBE3_SHIFT      24
+#define MMUBE1_IBE4_SHIFT      16
+#define MMUBE1_IBE5_SHIFT      8
+#define MMUBE1_VBE3            0x00000004
+#define MMUBE1_VBE4            0x00000002
+#define MMUBE1_VBE5            0x00000001
+
 #endif /* __ASM_POWERPC_REG_BOOKE_H__ */
 #endif /* __KERNEL__ */
index 1d3b270..66e237b 100644 (file)
@@ -40,7 +40,7 @@ extern void smp_message_recv(int);
 DECLARE_PER_CPU(unsigned int, cpu_pvr);
 
 #ifdef CONFIG_HOTPLUG_CPU
-extern void fixup_irqs(cpumask_t map);
+extern void fixup_irqs(const struct cpumask *map);
 int generic_cpu_disable(void);
 int generic_cpu_enable(unsigned int cpu);
 void generic_cpu_die(unsigned int cpu);
@@ -68,8 +68,19 @@ static inline void set_hard_smp_processor_id(int cpu, int phys)
 }
 #endif
 
-DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
-DECLARE_PER_CPU(cpumask_t, cpu_core_map);
+DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
+DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
+
+static inline struct cpumask *cpu_sibling_mask(int cpu)
+{
+       return per_cpu(cpu_sibling_map, cpu);
+}
+
+static inline struct cpumask *cpu_core_mask(int cpu)
+{
+       return per_cpu(cpu_core_map, cpu);
+}
+
 extern int cpu_to_core_id(int cpu);
 
 /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
@@ -93,7 +104,6 @@ void smp_init_pSeries(void);
 void smp_init_cell(void);
 void smp_init_celleb(void);
 void smp_setup_cpu_maps(void);
-void smp_setup_cpu_sibling_map(void);
 
 extern int __cpu_disable(void);
 extern void __cpu_die(unsigned int cpu);
index 8eaec31..32adf72 100644 (file)
@@ -8,6 +8,26 @@ struct device_node;
 
 #ifdef CONFIG_NUMA
 
+/*
+ * Before going off node we want the VM to try and reclaim from the local
+ * node. It does this if the remote distance is larger than RECLAIM_DISTANCE.
+ * With the default REMOTE_DISTANCE of 20 and the default RECLAIM_DISTANCE of
+ * 20, we never reclaim and go off node straight away.
+ *
+ * To fix this we choose a smaller value of RECLAIM_DISTANCE.
+ */
+#define RECLAIM_DISTANCE 10
+
+/*
+ * Before going off node we want the VM to try and reclaim from the local
+ * node. It does this if the remote distance is larger than RECLAIM_DISTANCE.
+ * With the default REMOTE_DISTANCE of 20 and the default RECLAIM_DISTANCE of
+ * 20, we never reclaim and go off node straight away.
+ *
+ * To fix this we choose a smaller value of RECLAIM_DISTANCE.
+ */
+#define RECLAIM_DISTANCE 10
+
 #include <asm/mmzone.h>
 
 static inline int cpu_to_node(int cpu)
@@ -19,7 +39,7 @@ static inline int cpu_to_node(int cpu)
 
 #define cpumask_of_node(node) ((node) == -1 ?                          \
                               cpu_all_mask :                           \
-                              &numa_cpumask_lookup_table[node])
+                              node_to_cpumask_map[node])
 
 int of_node_to_nid(struct device_node *device);
 
@@ -102,8 +122,8 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
 #ifdef CONFIG_PPC64
 #include <asm/smp.h>
 
-#define topology_thread_cpumask(cpu)   (&per_cpu(cpu_sibling_map, cpu))
-#define topology_core_cpumask(cpu)     (&per_cpu(cpu_core_map, cpu))
+#define topology_thread_cpumask(cpu)   (per_cpu(cpu_sibling_map, cpu))
+#define topology_core_cpumask(cpu)     (per_cpu(cpu_core_map, cpu))
 #define topology_core_id(cpu)          (cpu_to_core_id(cpu))
 #endif
 #endif
index c09138d..28a686f 100644 (file)
@@ -183,6 +183,7 @@ int main(void)
 #endif /* CONFIG_PPC_STD_MMU_64 */
        DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
        DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
+       DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state));
        DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
        DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr));
        DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
@@ -447,6 +448,14 @@ int main(void)
        DEFINE(PGD_T_LOG2, PGD_T_LOG2);
        DEFINE(PTE_T_LOG2, PTE_T_LOG2);
 #endif
+#ifdef CONFIG_FSL_BOOKE
+       DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam));
+       DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0));
+       DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1));
+       DEFINE(TLBCAM_MAS2, offsetof(struct tlbcam, MAS2));
+       DEFINE(TLBCAM_MAS3, offsetof(struct tlbcam, MAS3));
+       DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7));
+#endif
 
 #ifdef CONFIG_KVM_EXIT_TIMING
        DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
index 8af4949..9556be9 100644 (file)
@@ -1701,6 +1701,35 @@ static struct cpu_spec __initdata cpu_specs[] = {
                .machine_check          = machine_check_440A,
                .platform               = "ppc440",
        },
+       { /* 476 core */
+               .pvr_mask               = 0xffff0000,
+               .pvr_value              = 0x11a50000,
+               .cpu_name               = "476",
+               .cpu_features           = CPU_FTRS_47X,
+               .cpu_user_features      = COMMON_USER_BOOKE |
+                       PPC_FEATURE_HAS_FPU,
+               .mmu_features           = MMU_FTR_TYPE_47x |
+                       MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
+               .icache_bsize           = 32,
+               .dcache_bsize           = 128,
+               .machine_check          = machine_check_47x,
+               .platform               = "ppc470",
+       },
+       { /* 476 iss */
+               .pvr_mask               = 0xffff0000,
+               .pvr_value              = 0x00050000,
+               .cpu_name               = "476",
+               .cpu_features           = CPU_FTRS_47X,
+               .cpu_user_features      = COMMON_USER_BOOKE |
+                       PPC_FEATURE_HAS_FPU,
+               .cpu_user_features      = COMMON_USER_BOOKE,
+               .mmu_features           = MMU_FTR_TYPE_47x |
+                       MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
+               .icache_bsize           = 32,
+               .dcache_bsize           = 128,
+               .machine_check          = machine_check_47x,
+               .platform               = "ppc470",
+       },
        {       /* default match */
                .pvr_mask               = 0x00000000,
                .pvr_value              = 0x00000000,
index 6f4613d..8c066d6 100644 (file)
@@ -162,6 +162,32 @@ static void crash_kexec_prepare_cpus(int cpu)
        /* Leave the IPI callback set */
 }
 
+/* wait for all the CPUs to hit real mode but timeout if they don't come in */
+static void crash_kexec_wait_realmode(int cpu)
+{
+       unsigned int msecs;
+       int i;
+
+       msecs = 10000;
+       for (i=0; i < NR_CPUS && msecs > 0; i++) {
+               if (i == cpu)
+                       continue;
+
+               while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) {
+                       barrier();
+                       if (!cpu_possible(i)) {
+                               break;
+                       }
+                       if (!cpu_online(i)) {
+                               break;
+                       }
+                       msecs--;
+                       mdelay(1);
+               }
+       }
+       mb();
+}
+
 /*
  * This function will be called by secondary cpus or by kexec cpu
  * if soft-reset is activated to stop some CPUs.
@@ -347,10 +373,12 @@ int crash_shutdown_unregister(crash_shutdown_t handler)
 EXPORT_SYMBOL(crash_shutdown_unregister);
 
 static unsigned long crash_shutdown_buf[JMP_BUF_LEN];
+static int crash_shutdown_cpu = -1;
 
 static int handle_fault(struct pt_regs *regs)
 {
-       longjmp(crash_shutdown_buf, 1);
+       if (crash_shutdown_cpu == smp_processor_id())
+               longjmp(crash_shutdown_buf, 1);
        return 0;
 }
 
@@ -375,11 +403,14 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
        for_each_irq(i) {
                struct irq_desc *desc = irq_to_desc(i);
 
+               if (!desc || !desc->chip || !desc->chip->eoi)
+                       continue;
+
                if (desc->status & IRQ_INPROGRESS)
                        desc->chip->eoi(i);
 
                if (!(desc->status & IRQ_DISABLED))
-                       desc->chip->disable(i);
+                       desc->chip->shutdown(i);
        }
 
        /*
@@ -388,6 +419,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
         */
        old_handler = __debugger_fault_handler;
        __debugger_fault_handler = handle_fault;
+       crash_shutdown_cpu = smp_processor_id();
        for (i = 0; crash_shutdown_handles[i]; i++) {
                if (setjmp(crash_shutdown_buf) == 0) {
                        /*
@@ -401,6 +433,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
                        asm volatile("sync; isync");
                }
        }
+       crash_shutdown_cpu = -1;
        __debugger_fault_handler = old_handler;
 
        /*
@@ -412,6 +445,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
        crash_kexec_prepare_cpus(crashing_cpu);
        cpu_set(crashing_cpu, cpus_in_crash);
        crash_kexec_stop_spus();
+       crash_kexec_wait_realmode(crashing_cpu);
        if (ppc_md.kexec_cpu_down)
                ppc_md.kexec_cpu_down(1, 0);
 }
index 1175a85..ed4aeb9 100644 (file)
@@ -373,11 +373,13 @@ syscall_exit_cont:
        bnel-   load_dbcr0
 #endif
 #ifdef CONFIG_44x
+BEGIN_MMU_FTR_SECTION
        lis     r4,icache_44x_need_flush@ha
        lwz     r5,icache_44x_need_flush@l(r4)
        cmplwi  cr0,r5,0
        bne-    2f
 1:
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
 #endif /* CONFIG_44x */
 BEGIN_FTR_SECTION
        lwarx   r7,0,r1
@@ -848,6 +850,9 @@ resume_kernel:
        /* interrupts are hard-disabled at this point */
 restore:
 #ifdef CONFIG_44x
+BEGIN_MMU_FTR_SECTION
+       b       1f
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
        lis     r4,icache_44x_need_flush@ha
        lwz     r5,icache_44x_need_flush@l(r4)
        cmplwi  cr0,r5,0
index e3be98f..3e423fb 100644 (file)
@@ -735,8 +735,11 @@ _STATIC(do_hash_page)
        std     r3,_DAR(r1)
        std     r4,_DSISR(r1)
 
-       andis.  r0,r4,0xa450            /* weird error? */
+       andis.  r0,r4,0xa410            /* weird error? */
        bne-    handle_page_fault       /* if not, try to insert a HPTE */
+       andis.  r0,r4,DSISR_DABRMATCH@h
+       bne-    handle_dabr_fault
+
 BEGIN_FTR_SECTION
        andis.  r0,r4,0x0020            /* Is it a segment table fault? */
        bne-    do_ste_alloc            /* If so handle it */
@@ -823,6 +826,14 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
        bl      .raw_local_irq_restore
        b       11f
 
+/* We have a data breakpoint exception - handle it */
+handle_dabr_fault:
+       ld      r4,_DAR(r1)
+       ld      r5,_DSISR(r1)
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      .do_dabr
+       b       .ret_from_except_lite
+
 /* Here we have a page fault that hash_page can't handle. */
 handle_page_fault:
        ENABLE_INTS
index 711368b..5ab484e 100644 (file)
@@ -37,6 +37,7 @@
 #include <asm/thread_info.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
+#include <asm/synch.h>
 #include "head_booke.h"
 
 
@@ -69,165 +70,7 @@ _ENTRY(_start);
        mr      r27,r7
        li      r24,0           /* CPU number */
 
-/*
- * In case the firmware didn't do it, we apply some workarounds
- * that are good for all 440 core variants here
- */
-       mfspr   r3,SPRN_CCR0
-       rlwinm  r3,r3,0,0,27    /* disable icache prefetch */
-       isync
-       mtspr   SPRN_CCR0,r3
-       isync
-       sync
-
-/*
- * Set up the initial MMU state
- *
- * We are still executing code at the virtual address
- * mappings set by the firmware for the base of RAM.
- *
- * We first invalidate all TLB entries but the one
- * we are running from.  We then load the KERNELBASE
- * mappings so we can begin to use kernel addresses
- * natively and so the interrupt vector locations are
- * permanently pinned (necessary since Book E
- * implementations always have translation enabled).
- *
- * TODO: Use the known TLB entry we are running from to
- *      determine which physical region we are located
- *      in.  This can be used to determine where in RAM
- *      (on a shared CPU system) or PCI memory space
- *      (on a DRAMless system) we are located.
- *       For now, we assume a perfect world which means
- *      we are located at the base of DRAM (physical 0).
- */
-
-/*
- * Search TLB for entry that we are currently using.
- * Invalidate all entries but the one we are using.
- */
-       /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
-       mfspr   r3,SPRN_PID                     /* Get PID */
-       mfmsr   r4                              /* Get MSR */
-       andi.   r4,r4,MSR_IS@l                  /* TS=1? */
-       beq     wmmucr                          /* If not, leave STS=0 */
-       oris    r3,r3,PPC44x_MMUCR_STS@h        /* Set STS=1 */
-wmmucr:        mtspr   SPRN_MMUCR,r3                   /* Put MMUCR */
-       sync
-
-       bl      invstr                          /* Find our address */
-invstr:        mflr    r5                              /* Make it accessible */
-       tlbsx   r23,0,r5                        /* Find entry we are in */
-       li      r4,0                            /* Start at TLB entry 0 */
-       li      r3,0                            /* Set PAGEID inval value */
-1:     cmpw    r23,r4                          /* Is this our entry? */
-       beq     skpinv                          /* If so, skip the inval */
-       tlbwe   r3,r4,PPC44x_TLB_PAGEID         /* If not, inval the entry */
-skpinv:        addi    r4,r4,1                         /* Increment */
-       cmpwi   r4,64                           /* Are we done? */
-       bne     1b                              /* If not, repeat */
-       isync                                   /* If so, context change */
-
-/*
- * Configure and load pinned entry into TLB slot 63.
- */
-
-       lis     r3,PAGE_OFFSET@h
-       ori     r3,r3,PAGE_OFFSET@l
-
-       /* Kernel is at the base of RAM */
-       li r4, 0                        /* Load the kernel physical address */
-
-       /* Load the kernel PID = 0 */
-       li      r0,0
-       mtspr   SPRN_PID,r0
-       sync
-
-       /* Initialize MMUCR */
-       li      r5,0
-       mtspr   SPRN_MMUCR,r5
-       sync
-
-       /* pageid fields */
-       clrrwi  r3,r3,10                /* Mask off the effective page number */
-       ori     r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
-
-       /* xlat fields */
-       clrrwi  r4,r4,10                /* Mask off the real page number */
-                                       /* ERPN is 0 for first 4GB page */
-
-       /* attrib fields */
-       /* Added guarded bit to protect against speculative loads/stores */
-       li      r5,0
-       ori     r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
-
-        li      r0,63                    /* TLB slot 63 */
-
-       tlbwe   r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
-       tlbwe   r4,r0,PPC44x_TLB_XLAT   /* Load the translation fields */
-       tlbwe   r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
-
-       /* Force context change */
-       mfmsr   r0
-       mtspr   SPRN_SRR1, r0
-       lis     r0,3f@h
-       ori     r0,r0,3f@l
-       mtspr   SPRN_SRR0,r0
-       sync
-       rfi
-
-       /* If necessary, invalidate original entry we used */
-3:     cmpwi   r23,63
-       beq     4f
-       li      r6,0
-       tlbwe   r6,r23,PPC44x_TLB_PAGEID
-       isync
-
-4:
-#ifdef CONFIG_PPC_EARLY_DEBUG_44x
-       /* Add UART mapping for early debug. */
-
-       /* pageid fields */
-       lis     r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
-       ori     r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K
-
-       /* xlat fields */
-       lis     r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
-       ori     r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
-
-       /* attrib fields */
-       li      r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G)
-        li      r0,62                    /* TLB slot 0 */
-
-       tlbwe   r3,r0,PPC44x_TLB_PAGEID
-       tlbwe   r4,r0,PPC44x_TLB_XLAT
-       tlbwe   r5,r0,PPC44x_TLB_ATTRIB
-
-       /* Force context change */
-       isync
-#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
-
-       /* Establish the interrupt vector offsets */
-       SET_IVOR(0,  CriticalInput);
-       SET_IVOR(1,  MachineCheck);
-       SET_IVOR(2,  DataStorage);
-       SET_IVOR(3,  InstructionStorage);
-       SET_IVOR(4,  ExternalInput);
-       SET_IVOR(5,  Alignment);
-       SET_IVOR(6,  Program);
-       SET_IVOR(7,  FloatingPointUnavailable);
-       SET_IVOR(8,  SystemCall);
-       SET_IVOR(9,  AuxillaryProcessorUnavailable);
-       SET_IVOR(10, Decrementer);
-       SET_IVOR(11, FixedIntervalTimer);
-       SET_IVOR(12, WatchdogTimer);
-       SET_IVOR(13, DataTLBError);
-       SET_IVOR(14, InstructionTLBError);
-       SET_IVOR(15, DebugCrit);
-
-       /* Establish the interrupt vector base */
-       lis     r4,interrupt_base@h     /* IVPR only uses the high 16-bits */
-       mtspr   SPRN_IVPR,r4
+       bl      init_cpu_state
 
        /*
         * This is where the main kernel code starts.
@@ -349,7 +192,7 @@ interrupt_base:
 #endif
 
        /* Data TLB Error Interrupt */
-       START_EXCEPTION(DataTLBError)
+       START_EXCEPTION(DataTLBError44x)
        mtspr   SPRN_SPRG_WSCRATCH0, r10                /* Save some working registers */
        mtspr   SPRN_SPRG_WSCRATCH1, r11
        mtspr   SPRN_SPRG_WSCRATCH2, r12
@@ -440,7 +283,7 @@ tlb_44x_patch_hwater_D:
        mfspr   r10,SPRN_DEAR
 
         /* Jump to common tlb load */
-       b       finish_tlb_load
+       b       finish_tlb_load_44x
 
 2:
        /* The bailout.  Restore registers to pre-exception conditions
@@ -460,7 +303,7 @@ tlb_44x_patch_hwater_D:
         * information from different registers and bailout
         * to a different point.
         */
-       START_EXCEPTION(InstructionTLBError)
+       START_EXCEPTION(InstructionTLBError44x)
        mtspr   SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
        mtspr   SPRN_SPRG_WSCRATCH1, r11
        mtspr   SPRN_SPRG_WSCRATCH2, r12
@@ -536,7 +379,7 @@ tlb_44x_patch_hwater_I:
        mfspr   r10,SPRN_SRR0
 
        /* Jump to common TLB load point */
-       b       finish_tlb_load
+       b       finish_tlb_load_44x
 
 2:
        /* The bailout.  Restore registers to pre-exception conditions
@@ -550,15 +393,7 @@ tlb_44x_patch_hwater_I:
        mfspr   r10, SPRN_SPRG_RSCRATCH0
        b       InstructionStorage
 
-       /* Debug Interrupt */
-       DEBUG_CRIT_EXCEPTION
-
-/*
- * Local functions
-  */
-
 /*
-
  * Both the instruction and data TLB miss get to this
  * point to load the TLB.
  *     r10 - EA of fault
@@ -568,7 +403,7 @@ tlb_44x_patch_hwater_I:
  *     MMUCR - loaded with proper value when we get here
  *     Upon exit, we reload everything and RFI.
  */
-finish_tlb_load:
+finish_tlb_load_44x:
        /* Combine RPN & ERPN an write WS 0 */
        rlwimi  r11,r12,0,0,31-PAGE_SHIFT
        tlbwe   r11,r13,PPC44x_TLB_XLAT
@@ -601,73 +436,722 @@ finish_tlb_load:
        mfspr   r10, SPRN_SPRG_RSCRATCH0
        rfi                                     /* Force context change */
 
-/*
- * Global functions
+/* TLB error interrupts for 476
  */
+#ifdef CONFIG_PPC_47x
+       START_EXCEPTION(DataTLBError47x)
+       mtspr   SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */
+       mtspr   SPRN_SPRG_WSCRATCH1,r11
+       mtspr   SPRN_SPRG_WSCRATCH2,r12
+       mtspr   SPRN_SPRG_WSCRATCH3,r13
+       mfcr    r11
+       mtspr   SPRN_SPRG_WSCRATCH4,r11
+       mfspr   r10,SPRN_DEAR           /* Get faulting address */
 
-/*
- * Adjust the machine check IVOR on 440A cores
- */
-_GLOBAL(__fixup_440A_mcheck)
-       li      r3,MachineCheckA@l
-       mtspr   SPRN_IVOR1,r3
-       sync
-       blr
+       /* If we are faulting a kernel address, we have to use the
+        * kernel page tables.
+        */
+       lis     r11,PAGE_OFFSET@h
+       cmplw   cr0,r10,r11
+       blt+    3f
+       lis     r11,swapper_pg_dir@h
+       ori     r11,r11, swapper_pg_dir@l
+       li      r12,0                   /* MMUCR = 0 */
+       b       4f
 
-/*
- * extern void giveup_altivec(struct task_struct *prev)
- *
- * The 44x core does not have an AltiVec unit.
- */
-_GLOBAL(giveup_altivec)
-       blr
+       /* Get the PGD for the current thread and setup MMUCR */
+3:     mfspr   r11,SPRN_SPRG3
+       lwz     r11,PGDIR(r11)
+       mfspr   r12,SPRN_PID            /* Get PID */
+4:     mtspr   SPRN_MMUCR,r12          /* Set MMUCR */
 
-/*
- * extern void giveup_fpu(struct task_struct *prev)
- *
- * The 44x core does not have an FPU.
- */
-#ifndef CONFIG_PPC_FPU
-_GLOBAL(giveup_fpu)
-       blr
+       /* Mask of required permission bits. Note that while we
+        * do copy ESR:ST to _PAGE_RW position as trying to write
+        * to an RO page is pretty common, we don't do it with
+        * _PAGE_DIRTY. We could do it, but it's a fairly rare
+        * event so I'd rather take the overhead when it happens
+        * rather than adding an instruction here. We should measure
+        * whether the whole thing is worth it in the first place
+        * as we could avoid loading SPRN_ESR completely in the first
+        * place...
+        *
+        * TODO: Is it worth doing that mfspr & rlwimi in the first
+        *       place or can we save a couple of instructions here ?
+        */
+       mfspr   r12,SPRN_ESR
+       li      r13,_PAGE_PRESENT|_PAGE_ACCESSED
+       rlwimi  r13,r12,10,30,30
+
+       /* Load the PTE */
+       /* Compute pgdir/pmd offset */
+       rlwinm  r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
+       lwzx    r11,r12,r11             /* Get pgd/pmd entry */
+
+       /* Word 0 is EPN,V,TS,DSIZ */
+       li      r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
+       rlwimi  r10,r12,0,32-PAGE_SHIFT,31      /* Insert valid and page size*/
+       li      r12,0
+       tlbwe   r10,r12,0
+
+       /* XXX can we do better ? Need to make sure tlbwe has established
+        * latch V bit in MMUCR0 before the PTE is loaded further down */
+#ifdef CONFIG_SMP
+       isync
 #endif
 
-_GLOBAL(set_context)
+       rlwinm. r12,r11,0,0,20          /* Extract pt base address */
+       /* Compute pte address */
+       rlwimi  r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
+       beq     2f                      /* Bail if no table */
+       lwz     r11,0(r12)              /* Get high word of pte entry */
 
-#ifdef CONFIG_BDI_SWITCH
-       /* Context switch the PTE pointer for the Abatron BDI2000.
-        * The PGDIR is the second parameter.
+       /* XXX can we do better ? maybe insert a known 0 bit from r11 into the
+        * bottom of r12 to create a data dependency... We can also use r10
+        * as destination nowadays
         */
-       lis     r5, abatron_pteptrs@h
-       ori     r5, r5, abatron_pteptrs@l
-       stw     r4, 0x4(r5)
+#ifdef CONFIG_SMP
+       lwsync
 #endif
-       mtspr   SPRN_PID,r3
-       isync                   /* Force context change */
-       blr
+       lwz     r12,4(r12)              /* Get low word of pte entry */
 
-/*
- * We put a few things here that have to be page-aligned. This stuff
- * goes at the beginning of the data segment, which is page-aligned.
- */
-       .data
-       .align  PAGE_SHIFT
-       .globl  sdata
-sdata:
-       .globl  empty_zero_page
-empty_zero_page:
-       .space  PAGE_SIZE
+       andc.   r13,r13,r12             /* Check permission */
 
-/*
- * To support >32-bit physical addresses, we use an 8KB pgdir.
- */
-       .globl  swapper_pg_dir
-swapper_pg_dir:
-       .space  PGD_TABLE_SIZE
+        /* Jump to common tlb load */
+       beq     finish_tlb_load_47x
 
-/*
- * Room for two PTE pointers, usually the kernel and current user pointers
- * to their respective root page table.
- */
-abatron_pteptrs:
-       .space  8
+2:     /* The bailout.  Restore registers to pre-exception conditions
+        * and call the heavyweights to help us out.
+        */
+       mfspr   r11,SPRN_SPRG_RSCRATCH4
+       mtcr    r11
+       mfspr   r13,SPRN_SPRG_RSCRATCH3
+       mfspr   r12,SPRN_SPRG_RSCRATCH2
+       mfspr   r11,SPRN_SPRG_RSCRATCH1
+       mfspr   r10,SPRN_SPRG_RSCRATCH0
+       b       DataStorage
+
+       /* Instruction TLB Error Interrupt */
+       /*
+        * Nearly the same as above, except we get our
+        * information from different registers and bailout
+        * to a different point.
+        */
+       START_EXCEPTION(InstructionTLBError47x)
+       mtspr   SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */
+       mtspr   SPRN_SPRG_WSCRATCH1,r11
+       mtspr   SPRN_SPRG_WSCRATCH2,r12
+       mtspr   SPRN_SPRG_WSCRATCH3,r13
+       mfcr    r11
+       mtspr   SPRN_SPRG_WSCRATCH4,r11
+       mfspr   r10,SPRN_SRR0           /* Get faulting address */
+
+       /* If we are faulting a kernel address, we have to use the
+        * kernel page tables.
+        */
+       lis     r11,PAGE_OFFSET@h
+       cmplw   cr0,r10,r11
+       blt+    3f
+       lis     r11,swapper_pg_dir@h
+       ori     r11,r11, swapper_pg_dir@l
+       li      r12,0                   /* MMUCR = 0 */
+       b       4f
+
+       /* Get the PGD for the current thread and setup MMUCR */
+3:     mfspr   r11,SPRN_SPRG_THREAD
+       lwz     r11,PGDIR(r11)
+       mfspr   r12,SPRN_PID            /* Get PID */
+4:     mtspr   SPRN_MMUCR,r12          /* Set MMUCR */
+
+       /* Make up the required permissions */
+       li      r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
+
+       /* Load PTE */
+       /* Compute pgdir/pmd offset */
+       rlwinm  r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
+       lwzx    r11,r12,r11             /* Get pgd/pmd entry */
+
+       /* Word 0 is EPN,V,TS,DSIZ */
+       li      r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
+       rlwimi  r10,r12,0,32-PAGE_SHIFT,31      /* Insert valid and page size*/
+       li      r12,0
+       tlbwe   r10,r12,0
+
+       /* XXX can we do better ? Need to make sure tlbwe has established
+        * latch V bit in MMUCR0 before the PTE is loaded further down */
+#ifdef CONFIG_SMP
+       isync
+#endif
+
+       rlwinm. r12,r11,0,0,20          /* Extract pt base address */
+       /* Compute pte address */
+       rlwimi  r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
+       beq     2f                      /* Bail if no table */
+
+       lwz     r11,0(r12)              /* Get high word of pte entry */
+       /* XXX can we do better ? maybe insert a known 0 bit from r11 into the
+        * bottom of r12 to create a data dependency... We can also use r10
+        * as destination nowadays
+        */
+#ifdef CONFIG_SMP
+       lwsync
+#endif
+       lwz     r12,4(r12)              /* Get low word of pte entry */
+
+       andc.   r13,r13,r12             /* Check permission */
+
+       /* Jump to common TLB load point */
+       beq     finish_tlb_load_47x
+
+2:     /* The bailout.  Restore registers to pre-exception conditions
+        * and call the heavyweights to help us out.
+        */
+       mfspr   r11, SPRN_SPRG_RSCRATCH4
+       mtcr    r11
+       mfspr   r13, SPRN_SPRG_RSCRATCH3
+       mfspr   r12, SPRN_SPRG_RSCRATCH2
+       mfspr   r11, SPRN_SPRG_RSCRATCH1
+       mfspr   r10, SPRN_SPRG_RSCRATCH0
+       b       InstructionStorage
+
+/*
+ * Both the instruction and data TLB miss get to this
+ * point to load the TLB.
+ *     r10 - free to use
+ *     r11 - PTE high word value
+ *     r12 - PTE low word value
+ *      r13 - free to use
+ *     MMUCR - loaded with proper value when we get here
+ *     Upon exit, we reload everything and RFI.
+ */
+finish_tlb_load_47x:
+       /* Combine RPN & ERPN an write WS 1 */
+       rlwimi  r11,r12,0,0,31-PAGE_SHIFT
+       tlbwe   r11,r13,1
+
+       /* And make up word 2 */
+       li      r10,0xf85                       /* Mask to apply from PTE */
+       rlwimi  r10,r12,29,30,30                /* DIRTY -> SW position */
+       and     r11,r12,r10                     /* Mask PTE bits to keep */
+       andi.   r10,r12,_PAGE_USER              /* User page ? */
+       beq     1f                              /* nope, leave U bits empty */
+       rlwimi  r11,r11,3,26,28                 /* yes, copy S bits to U */
+1:     tlbwe   r11,r13,2
+
+       /* Done...restore registers and get out of here.
+       */
+       mfspr   r11, SPRN_SPRG_RSCRATCH4
+       mtcr    r11
+       mfspr   r13, SPRN_SPRG_RSCRATCH3
+       mfspr   r12, SPRN_SPRG_RSCRATCH2
+       mfspr   r11, SPRN_SPRG_RSCRATCH1
+       mfspr   r10, SPRN_SPRG_RSCRATCH0
+       rfi
+
+#endif /* CONFIG_PPC_47x */
+
+       /* Debug Interrupt */
+       /*
+        * This statement needs to exist at the end of the IVPR
+        * definition just in case you end up taking a debug
+        * exception within another exception.
+        */
+       DEBUG_CRIT_EXCEPTION
+
+/*
+ * Global functions
+ */
+
+/*
+ * Adjust the machine check IVOR on 440A cores
+ */
+_GLOBAL(__fixup_440A_mcheck)
+       li      r3,MachineCheckA@l
+       mtspr   SPRN_IVOR1,r3
+       sync
+       blr
+
+/*
+ * extern void giveup_altivec(struct task_struct *prev)
+ *
+ * The 44x core does not have an AltiVec unit.
+ */
+_GLOBAL(giveup_altivec)
+       blr
+
+/*
+ * extern void giveup_fpu(struct task_struct *prev)
+ *
+ * The 44x core does not have an FPU.
+ */
+#ifndef CONFIG_PPC_FPU
+_GLOBAL(giveup_fpu)
+       blr
+#endif
+
+_GLOBAL(set_context)
+
+#ifdef CONFIG_BDI_SWITCH
+       /* Context switch the PTE pointer for the Abatron BDI2000.
+        * The PGDIR is the second parameter.
+        */
+       lis     r5, abatron_pteptrs@h
+       ori     r5, r5, abatron_pteptrs@l
+       stw     r4, 0x4(r5)
+#endif
+       mtspr   SPRN_PID,r3
+       isync                   /* Force context change */
+       blr
+
+/*
+ * Init CPU state. This is called at boot time or for secondary CPUs
+ * to setup initial TLB entries, setup IVORs, etc...
+ *
+ */
+_GLOBAL(init_cpu_state)
+       mflr    r22
+#ifdef CONFIG_PPC_47x
+       /* We use the PVR to differenciate 44x cores from 476 */
+       mfspr   r3,SPRN_PVR
+       srwi    r3,r3,16
+       cmplwi  cr0,r3,PVR_476@h
+       beq     head_start_47x
+       cmplwi  cr0,r3,PVR_476_ISS@h
+       beq     head_start_47x
+#endif /* CONFIG_PPC_47x */
+
+/*
+ * In case the firmware didn't do it, we apply some workarounds
+ * that are good for all 440 core variants here
+ */
+       mfspr   r3,SPRN_CCR0
+       rlwinm  r3,r3,0,0,27    /* disable icache prefetch */
+       isync
+       mtspr   SPRN_CCR0,r3
+       isync
+       sync
+
+/*
+ * Set up the initial MMU state for 44x
+ *
+ * We are still executing code at the virtual address
+ * mappings set by the firmware for the base of RAM.
+ *
+ * We first invalidate all TLB entries but the one
+ * we are running from.  We then load the KERNELBASE
+ * mappings so we can begin to use kernel addresses
+ * natively and so the interrupt vector locations are
+ * permanently pinned (necessary since Book E
+ * implementations always have translation enabled).
+ *
+ * TODO: Use the known TLB entry we are running from to
+ *      determine which physical region we are located
+ *      in.  This can be used to determine where in RAM
+ *      (on a shared CPU system) or PCI memory space
+ *      (on a DRAMless system) we are located.
+ *       For now, we assume a perfect world which means
+ *      we are located at the base of DRAM (physical 0).
+ */
+
+/*
+ * Search TLB for entry that we are currently using.
+ * Invalidate all entries but the one we are using.
+ */
+       /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
+       mfspr   r3,SPRN_PID                     /* Get PID */
+       mfmsr   r4                              /* Get MSR */
+       andi.   r4,r4,MSR_IS@l                  /* TS=1? */
+       beq     wmmucr                          /* If not, leave STS=0 */
+       oris    r3,r3,PPC44x_MMUCR_STS@h        /* Set STS=1 */
+wmmucr:        mtspr   SPRN_MMUCR,r3                   /* Put MMUCR */
+       sync
+
+       bl      invstr                          /* Find our address */
+invstr:        mflr    r5                              /* Make it accessible */
+       tlbsx   r23,0,r5                        /* Find entry we are in */
+       li      r4,0                            /* Start at TLB entry 0 */
+       li      r3,0                            /* Set PAGEID inval value */
+1:     cmpw    r23,r4                          /* Is this our entry? */
+       beq     skpinv                          /* If so, skip the inval */
+       tlbwe   r3,r4,PPC44x_TLB_PAGEID         /* If not, inval the entry */
+skpinv:        addi    r4,r4,1                         /* Increment */
+       cmpwi   r4,64                           /* Are we done? */
+       bne     1b                              /* If not, repeat */
+       isync                                   /* If so, context change */
+
+/*
+ * Configure and load pinned entry into TLB slot 63.
+ */
+
+       lis     r3,PAGE_OFFSET@h
+       ori     r3,r3,PAGE_OFFSET@l
+
+       /* Kernel is at the base of RAM */
+       li r4, 0                        /* Load the kernel physical address */
+
+       /* Load the kernel PID = 0 */
+       li      r0,0
+       mtspr   SPRN_PID,r0
+       sync
+
+       /* Initialize MMUCR */
+       li      r5,0
+       mtspr   SPRN_MMUCR,r5
+       sync
+
+       /* pageid fields */
+       clrrwi  r3,r3,10                /* Mask off the effective page number */
+       ori     r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
+
+       /* xlat fields */
+       clrrwi  r4,r4,10                /* Mask off the real page number */
+                                       /* ERPN is 0 for first 4GB page */
+
+       /* attrib fields */
+       /* Added guarded bit to protect against speculative loads/stores */
+       li      r5,0
+       ori     r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
+
+        li      r0,63                    /* TLB slot 63 */
+
+       tlbwe   r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
+       tlbwe   r4,r0,PPC44x_TLB_XLAT   /* Load the translation fields */
+       tlbwe   r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
+
+       /* Force context change */
+       mfmsr   r0
+       mtspr   SPRN_SRR1, r0
+       lis     r0,3f@h
+       ori     r0,r0,3f@l
+       mtspr   SPRN_SRR0,r0
+       sync
+       rfi
+
+       /* If necessary, invalidate original entry we used */
+3:     cmpwi   r23,63
+       beq     4f
+       li      r6,0
+       tlbwe   r6,r23,PPC44x_TLB_PAGEID
+       isync
+
+4:
+#ifdef CONFIG_PPC_EARLY_DEBUG_44x
+       /* Add UART mapping for early debug. */
+
+       /* pageid fields */
+       lis     r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
+       ori     r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K
+
+       /* xlat fields */
+       lis     r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
+       ori     r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
+
+       /* attrib fields */
+       li      r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G)
+        li      r0,62                    /* TLB slot 0 */
+
+       tlbwe   r3,r0,PPC44x_TLB_PAGEID
+       tlbwe   r4,r0,PPC44x_TLB_XLAT
+       tlbwe   r5,r0,PPC44x_TLB_ATTRIB
+
+       /* Force context change */
+       isync
+#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
+
+       /* Establish the interrupt vector offsets */
+       SET_IVOR(0,  CriticalInput);
+       SET_IVOR(1,  MachineCheck);
+       SET_IVOR(2,  DataStorage);
+       SET_IVOR(3,  InstructionStorage);
+       SET_IVOR(4,  ExternalInput);
+       SET_IVOR(5,  Alignment);
+       SET_IVOR(6,  Program);
+       SET_IVOR(7,  FloatingPointUnavailable);
+       SET_IVOR(8,  SystemCall);
+       SET_IVOR(9,  AuxillaryProcessorUnavailable);
+       SET_IVOR(10, Decrementer);
+       SET_IVOR(11, FixedIntervalTimer);
+       SET_IVOR(12, WatchdogTimer);
+       SET_IVOR(13, DataTLBError44x);
+       SET_IVOR(14, InstructionTLBError44x);
+       SET_IVOR(15, DebugCrit);
+
+       b       head_start_common
+
+
+#ifdef CONFIG_PPC_47x
+
+#ifdef CONFIG_SMP
+
+/* Entry point for secondary 47x processors */
+_GLOBAL(start_secondary_47x)
+        mr      r24,r3          /* CPU number */
+
+       bl      init_cpu_state
+
+       /* Now we need to bolt the rest of kernel memory which
+        * is done in C code. We must be careful because our task
+        * struct or our stack can (and will probably) be out
+        * of reach of the initial 256M TLB entry, so we use a
+        * small temporary stack in .bss for that. This works
+        * because only one CPU at a time can be in this code
+        */
+       lis     r1,temp_boot_stack@h
+       ori     r1,r1,temp_boot_stack@l
+       addi    r1,r1,1024-STACK_FRAME_OVERHEAD
+       li      r0,0
+       stw     r0,0(r1)
+       bl      mmu_init_secondary
+
+       /* Now we can get our task struct and real stack pointer */
+
+       /* Get current_thread_info and current */
+       lis     r1,secondary_ti@ha
+       lwz     r1,secondary_ti@l(r1)
+       lwz     r2,TI_TASK(r1)
+
+       /* Current stack pointer */
+       addi    r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
+       li      r0,0
+       stw     r0,0(r1)
+
+       /* Kernel stack for exception entry in SPRG3 */
+       addi    r4,r2,THREAD    /* init task's THREAD */
+       mtspr   SPRN_SPRG3,r4
+
+       b       start_secondary
+
+#endif /* CONFIG_SMP */
+
+/*
+ * Set up the initial MMU state for 44x
+ *
+ * We are still executing code at the virtual address
+ * mappings set by the firmware for the base of RAM.
+ */
+
+head_start_47x:
+       /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
+       mfspr   r3,SPRN_PID                     /* Get PID */
+       mfmsr   r4                              /* Get MSR */
+       andi.   r4,r4,MSR_IS@l                  /* TS=1? */
+       beq     1f                              /* If not, leave STS=0 */
+       oris    r3,r3,PPC47x_MMUCR_STS@h        /* Set STS=1 */
+1:     mtspr   SPRN_MMUCR,r3                   /* Put MMUCR */
+       sync
+
+       /* Find the entry we are running from */
+       bl      1f
+1:     mflr    r23
+       tlbsx   r23,0,r23
+       tlbre   r24,r23,0
+       tlbre   r25,r23,1
+       tlbre   r26,r23,2
+
+/*
+ * Cleanup time
+ */
+
+       /* Initialize MMUCR */
+       li      r5,0
+       mtspr   SPRN_MMUCR,r5
+       sync
+
+clear_all_utlb_entries:
+
+       #; Set initial values.
+
+       addis           r3,0,0x8000
+       addi            r4,0,0
+       addi            r5,0,0
+       b               clear_utlb_entry
+
+       #; Align the loop to speed things up.
+
+       .align          6
+
+clear_utlb_entry:
+
+       tlbwe           r4,r3,0
+       tlbwe           r5,r3,1
+       tlbwe           r5,r3,2
+       addis           r3,r3,0x2000
+       cmpwi           r3,0
+       bne             clear_utlb_entry
+       addis           r3,0,0x8000
+       addis           r4,r4,0x100
+       cmpwi           r4,0
+       bne             clear_utlb_entry
+
+       #; Restore original entry.
+
+       oris    r23,r23,0x8000  /* specify the way */
+       tlbwe           r24,r23,0
+       tlbwe           r25,r23,1
+       tlbwe           r26,r23,2
+
+/*
+ * Configure and load pinned entry into TLB for the kernel core
+ */
+
+       lis     r3,PAGE_OFFSET@h
+       ori     r3,r3,PAGE_OFFSET@l
+
+       /* Kernel is at the base of RAM */
+       li r4, 0                        /* Load the kernel physical address */
+
+       /* Load the kernel PID = 0 */
+       li      r0,0
+       mtspr   SPRN_PID,r0
+       sync
+
+       /* Word 0 */
+       clrrwi  r3,r3,12                /* Mask off the effective page number */
+       ori     r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M
+
+       /* Word 1 */
+       clrrwi  r4,r4,12                /* Mask off the real page number */
+                                       /* ERPN is 0 for first 4GB page */
+       /* Word 2 */
+       li      r5,0
+       ori     r5,r5,PPC47x_TLB2_S_RWX
+#ifdef CONFIG_SMP
+       ori     r5,r5,PPC47x_TLB2_M
+#endif
+
+       /* We write to way 0 and bolted 0 */
+       lis     r0,0x8800
+       tlbwe   r3,r0,0
+       tlbwe   r4,r0,1
+       tlbwe   r5,r0,2
+
+/*
+ * Configure SSPCR, ISPCR and USPCR for now to search everything, we can fix
+ * them up later
+ */
+       LOAD_REG_IMMEDIATE(r3, 0x9abcdef0)
+       mtspr   SPRN_SSPCR,r3
+       mtspr   SPRN_USPCR,r3
+       LOAD_REG_IMMEDIATE(r3, 0x12345670)
+       mtspr   SPRN_ISPCR,r3
+
+       /* Force context change */
+       mfmsr   r0
+       mtspr   SPRN_SRR1, r0
+       lis     r0,3f@h
+       ori     r0,r0,3f@l
+       mtspr   SPRN_SRR0,r0
+       sync
+       rfi
+
+       /* Invalidate original entry we used */
+3:
+       rlwinm  r24,r24,0,21,19 /* clear the "valid" bit */
+       tlbwe   r24,r23,0
+       addi    r24,0,0
+       tlbwe   r24,r23,1
+       tlbwe   r24,r23,2
+       isync                   /* Clear out the shadow TLB entries */
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_44x
+       /* Add UART mapping for early debug. */
+
+       /* Word 0 */
+       lis     r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
+       ori     r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_TS | PPC47x_TLB0_1M
+
+       /* Word 1 */
+       lis     r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
+       ori     r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
+
+       /* Word 2 */
+       li      r5,(PPC47x_TLB2_S_RW | PPC47x_TLB2_IMG)
+
+       /* Bolted in way 0, bolt slot 5, we -hope- we don't hit the same
+        * congruence class as the kernel, we need to make sure of it at
+        * some point
+        */
+        lis    r0,0x8d00
+       tlbwe   r3,r0,0
+       tlbwe   r4,r0,1
+       tlbwe   r5,r0,2
+
+       /* Force context change */
+       isync
+#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
+
+       /* Establish the interrupt vector offsets */
+       SET_IVOR(0,  CriticalInput);
+       SET_IVOR(1,  MachineCheckA);
+       SET_IVOR(2,  DataStorage);
+       SET_IVOR(3,  InstructionStorage);
+       SET_IVOR(4,  ExternalInput);
+       SET_IVOR(5,  Alignment);
+       SET_IVOR(6,  Program);
+       SET_IVOR(7,  FloatingPointUnavailable);
+       SET_IVOR(8,  SystemCall);
+       SET_IVOR(9,  AuxillaryProcessorUnavailable);
+       SET_IVOR(10, Decrementer);
+       SET_IVOR(11, FixedIntervalTimer);
+       SET_IVOR(12, WatchdogTimer);
+       SET_IVOR(13, DataTLBError47x);
+       SET_IVOR(14, InstructionTLBError47x);
+       SET_IVOR(15, DebugCrit);
+
+       /* We configure icbi to invalidate 128 bytes at a time since the
+        * current 32-bit kernel code isn't too happy with icache != dcache
+        * block size
+        */
+       mfspr   r3,SPRN_CCR0
+       oris    r3,r3,0x0020
+       mtspr   SPRN_CCR0,r3
+       isync
+
+#endif /* CONFIG_PPC_47x */
+
+/*
+ * Here we are back to code that is common between 44x and 47x
+ *
+ * We proceed to further kernel initialization and return to the
+ * main kernel entry
+ */
+head_start_common:
+       /* Establish the interrupt vector base */
+       lis     r4,interrupt_base@h     /* IVPR only uses the high 16-bits */
+       mtspr   SPRN_IVPR,r4
+
+       addis   r22,r22,KERNELBASE@h
+       mtlr    r22
+       isync
+       blr
+
+/*
+ * We put a few things here that have to be page-aligned. This stuff
+ * goes at the beginning of the data segment, which is page-aligned.
+ */
+       .data
+       .align  PAGE_SHIFT
+       .globl  sdata
+sdata:
+       .globl  empty_zero_page
+empty_zero_page:
+       .space  PAGE_SIZE
+
+/*
+ * To support >32-bit physical addresses, we use an 8KB pgdir.
+ */
+       .globl  swapper_pg_dir
+swapper_pg_dir:
+       .space  PGD_TABLE_SIZE
+
+/*
+ * Room for two PTE pointers, usually the kernel and current user pointers
+ * to their respective root page table.
+ */
+abatron_pteptrs:
+       .space  8
+
+#ifdef CONFIG_SMP
+       .align  12
+temp_boot_stack:
+       .space  1024
+#endif /* CONFIG_SMP */
index 3ef743f..1f1a04b 100644 (file)
@@ -71,9 +71,6 @@ _ENTRY(_start);
  * in the first level table, but that would require many changes to the
  * Linux page directory/table functions that I don't want to do right now.
  *
- * I used to use SPRG2 for a temporary register in the TLB handler, but it
- * has since been put to other uses.  I now use a hack to save a register
- * and the CCR at memory location 0.....Someday I'll fix this.....
  *     -- Dan
  */
        .globl  __start
@@ -302,8 +299,13 @@ InstructionTLBMiss:
        DO_8xx_CPU6(0x3f80, r3)
        mtspr   SPRN_M_TW, r10  /* Save a couple of working registers */
        mfcr    r10
+#ifdef CONFIG_8xx_CPU6
        stw     r10, 0(r0)
        stw     r11, 4(r0)
+#else
+       mtspr   SPRN_DAR, r10
+       mtspr   SPRN_SPRG2, r11
+#endif
        mfspr   r10, SPRN_SRR0  /* Get effective address of fault */
 #ifdef CONFIG_8xx_CPU15
        addi    r11, r10, 0x1000
@@ -318,12 +320,16 @@ InstructionTLBMiss:
        /* If we are faulting a kernel address, we have to use the
         * kernel page tables.
         */
+#ifdef CONFIG_MODULES
+       /* Only modules will cause ITLB Misses as we always
+        * pin the first 8MB of kernel memory */
        andi.   r11, r10, 0x0800        /* Address >= 0x80000000 */
        beq     3f
        lis     r11, swapper_pg_dir@h
        ori     r11, r11, swapper_pg_dir@l
        rlwimi  r10, r11, 0, 2, 19
 3:
+#endif
        lwz     r11, 0(r10)     /* Get the level 1 entry */
        rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
        beq     2f              /* If zero, don't try to find a pte */
@@ -339,31 +345,35 @@ InstructionTLBMiss:
        mfspr   r11, SPRN_MD_TWC        /* ....and get the pte address */
        lwz     r10, 0(r11)     /* Get the pte */
 
+#ifdef CONFIG_SWAP
        andi.   r11, r10, _PAGE_ACCESSED | _PAGE_PRESENT
        cmpwi   cr0, r11, _PAGE_ACCESSED | _PAGE_PRESENT
        bne-    cr0, 2f
-
-       /* Clear PP lsb, 0x400 */
-       rlwinm  r10, r10, 0, 22, 20
-
+#endif
        /* The Linux PTE won't go exactly into the MMU TLB.
-        * Software indicator bits 22 and 28 must be clear.
+        * Software indicator bits 21 and 28 must be clear.
         * Software indicator bits 24, 25, 26, and 27 must be
         * set.  All other Linux PTE bits control the behavior
         * of the MMU.
         */
        li      r11, 0x00f0
-       rlwimi  r10, r11, 0, 24, 28     /* Set 24-27, clear 28 */
+       rlwimi  r10, r11, 0, 0x07f8     /* Set 24-27, clear 21-23,28 */
        DO_8xx_CPU6(0x2d80, r3)
        mtspr   SPRN_MI_RPN, r10        /* Update TLB entry */
 
-       mfspr   r10, SPRN_M_TW  /* Restore registers */
+       /* Restore registers */
+#ifndef CONFIG_8xx_CPU6
+       mfspr   r10, SPRN_DAR
+       mtcr    r10
+       mtspr   SPRN_DAR, r11   /* Tag DAR */
+       mfspr   r11, SPRN_SPRG2
+#else
        lwz     r11, 0(r0)
        mtcr    r11
        lwz     r11, 4(r0)
-#ifdef CONFIG_8xx_CPU6
        lwz     r3, 8(r0)
 #endif
+       mfspr   r10, SPRN_M_TW
        rfi
 2:
        mfspr   r11, SPRN_SRR1
@@ -373,13 +383,20 @@ InstructionTLBMiss:
        rlwinm  r11, r11, 0, 0xffff
        mtspr   SPRN_SRR1, r11
 
-       mfspr   r10, SPRN_M_TW  /* Restore registers */
+       /* Restore registers */
+#ifndef CONFIG_8xx_CPU6
+       mfspr   r10, SPRN_DAR
+       mtcr    r10
+       li      r11, 0x00f0
+       mtspr   SPRN_DAR, r11   /* Tag DAR */
+       mfspr   r11, SPRN_SPRG2
+#else
        lwz     r11, 0(r0)
        mtcr    r11
        lwz     r11, 4(r0)
-#ifdef CONFIG_8xx_CPU6
        lwz     r3, 8(r0)
 #endif
+       mfspr   r10, SPRN_M_TW
        b       InstructionAccess
 
        . = 0x1200
@@ -390,8 +407,13 @@ DataStoreTLBMiss:
        DO_8xx_CPU6(0x3f80, r3)
        mtspr   SPRN_M_TW, r10  /* Save a couple of working registers */
        mfcr    r10
+#ifdef CONFIG_8xx_CPU6
        stw     r10, 0(r0)
        stw     r11, 4(r0)
+#else
+       mtspr   SPRN_DAR, r10
+       mtspr   SPRN_SPRG2, r11
+#endif
        mfspr   r10, SPRN_M_TWB /* Get level 1 table entry address */
 
        /* If we are faulting a kernel address, we have to use the
@@ -438,15 +460,14 @@ DataStoreTLBMiss:
         * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5));
         * r10 = (r10 & ~PRESENT) | r11;
         */
+#ifdef CONFIG_SWAP
        rlwinm  r11, r10, 32-5, _PAGE_PRESENT
        and     r11, r11, r10
        rlwimi  r10, r11, 0, _PAGE_PRESENT
-
+#endif
        /* Honour kernel RO, User NA */
        /* 0x200 == Extended encoding, bit 22 */
-       /* r11 =  (r10 & _PAGE_USER) >> 2 */
-       rlwinm  r11, r10, 32-2, 0x200
-       or      r10, r11, r10
+       rlwimi  r10, r10, 32-2, 0x200 /* Copy USER to bit 22, 0x200 */
        /* r11 =  (r10 & _PAGE_RW) >> 1 */
        rlwinm  r11, r10, 32-1, 0x200
        or      r10, r11, r10
@@ -460,18 +481,24 @@ DataStoreTLBMiss:
         * of the MMU.
         */
 2:     li      r11, 0x00f0
-       mtspr   SPRN_DAR,r11    /* Tag DAR */
        rlwimi  r10, r11, 0, 24, 28     /* Set 24-27, clear 28 */
        DO_8xx_CPU6(0x3d80, r3)
        mtspr   SPRN_MD_RPN, r10        /* Update TLB entry */
 
-       mfspr   r10, SPRN_M_TW  /* Restore registers */
+       /* Restore registers */
+#ifndef CONFIG_8xx_CPU6
+       mfspr   r10, SPRN_DAR
+       mtcr    r10
+       mtspr   SPRN_DAR, r11   /* Tag DAR */
+       mfspr   r11, SPRN_SPRG2
+#else
+       mtspr   SPRN_DAR, r11   /* Tag DAR */
        lwz     r11, 0(r0)
        mtcr    r11
        lwz     r11, 4(r0)
-#ifdef CONFIG_8xx_CPU6
        lwz     r3, 8(r0)
 #endif
+       mfspr   r10, SPRN_M_TW
        rfi
 
 /* This is an instruction TLB error on the MPC8xx.  This could be due
@@ -683,9 +710,6 @@ start_here:
        tophys(r4,r2)
        addi    r4,r4,THREAD    /* init task's THREAD */
        mtspr   SPRN_SPRG_THREAD,r4
-       li      r3,0
-       /* XXX What is that for ? SPRG2 appears otherwise unused on 8xx */
-       mtspr   SPRN_SPRG2,r3   /* 0 => r1 has kernel sp */
 
        /* stack */
        lis     r1,init_thread_union@ha
index 50504ae..a0bf158 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef __HEAD_BOOKE_H__
 #define __HEAD_BOOKE_H__
 
+#include <asm/ptrace.h>        /* for STACK_FRAME_REGS_MARKER */
 /*
  * Macros used for common Book-e exception handling
  */
@@ -48,6 +49,9 @@
        stw     r10,0(r11);                                                  \
        rlwinm  r9,r9,0,14,12;          /* clear MSR_WE (necessary?)       */\
        stw     r0,GPR0(r11);                                                \
+       lis     r10, STACK_FRAME_REGS_MARKER@ha;/* exception frame marker */ \
+       addi    r10, r10, STACK_FRAME_REGS_MARKER@l;                         \
+       stw     r10, 8(r11);                                                 \
        SAVE_4GPRS(3, r11);                                                  \
        SAVE_2GPRS(7, r11)
 
index 7255265..edd4a57 100644 (file)
@@ -639,6 +639,13 @@ interrupt_base:
        rlwinm  r12,r12,0,16,1
        mtspr   SPRN_MAS1,r12
 
+       /* Make up the required permissions for kernel code */
+#ifdef CONFIG_PTE_64BIT
+       li      r13,_PAGE_PRESENT | _PAGE_BAP_SX
+       oris    r13,r13,_PAGE_ACCESSED@h
+#else
+       li      r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
+#endif
        b       4f
 
        /* Get the PGD for the current thread */
@@ -646,15 +653,15 @@ interrupt_base:
        mfspr   r11,SPRN_SPRG_THREAD
        lwz     r11,PGDIR(r11)
 
-4:
-       /* Make up the required permissions */
+       /* Make up the required permissions for user code */
 #ifdef CONFIG_PTE_64BIT
-       li      r13,_PAGE_PRESENT | _PAGE_EXEC
+       li      r13,_PAGE_PRESENT | _PAGE_BAP_UX
        oris    r13,r13,_PAGE_ACCESSED@h
 #else
        li      r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
 #endif
 
+4:
        FIND_PTE
        andc.   r13,r13,r11             /* Check permission */
 
index ec94f90..d583917 100644 (file)
 #define DBG(...)
 
 static int novmerge;
-static int protect4gb = 1;
 
 static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
 
-static int __init setup_protect4gb(char *str)
-{
-       if (strcmp(str, "on") == 0)
-               protect4gb = 1;
-       else if (strcmp(str, "off") == 0)
-               protect4gb = 0;
-
-       return 1;
-}
-
 static int __init setup_iommu(char *str)
 {
        if (!strcmp(str, "novmerge"))
@@ -66,7 +55,6 @@ static int __init setup_iommu(char *str)
        return 1;
 }
 
-__setup("protect4gb=", setup_protect4gb);
 __setup("iommu=", setup_iommu);
 
 static unsigned long iommu_range_alloc(struct device *dev,
index 066bd31..30817d9 100644 (file)
@@ -284,30 +284,33 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-void fixup_irqs(cpumask_t map)
+void fixup_irqs(const struct cpumask *map)
 {
        struct irq_desc *desc;
        unsigned int irq;
        static int warned;
+       cpumask_var_t mask;
 
-       for_each_irq(irq) {
-               cpumask_t mask;
+       alloc_cpumask_var(&mask, GFP_KERNEL);
 
+       for_each_irq(irq) {
                desc = irq_to_desc(irq);
                if (desc && desc->status & IRQ_PER_CPU)
                        continue;
 
-               cpumask_and(&mask, desc->affinity, &map);
-               if (any_online_cpu(mask) == NR_CPUS) {
+               cpumask_and(mask, desc->affinity, map);
+               if (cpumask_any(mask) >= nr_cpu_ids) {
                        printk("Breaking affinity for irq %i\n", irq);
-                       mask = map;
+                       cpumask_copy(mask, map);
                }
                if (desc->chip->set_affinity)
-                       desc->chip->set_affinity(irq, &mask);
+                       desc->chip->set_affinity(irq, mask);
                else if (desc->action && !(warned++))
                        printk("Cannot set affinity for irq %i\n", irq);
        }
 
+       free_cpumask_var(mask);
+
        local_irq_enable();
        mdelay(1);
        local_irq_disable();
index 41bada0..82a7b22 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/smp.h>
 #include <linux/signal.h>
 #include <linux/ptrace.h>
+#include <linux/kdebug.h>
 #include <asm/current.h>
 #include <asm/processor.h>
 #include <asm/machdep.h>
@@ -115,7 +116,8 @@ void kgdb_roundup_cpus(unsigned long flags)
 /* KGDB functions to use existing PowerPC64 hooks. */
 static int kgdb_debugger(struct pt_regs *regs)
 {
-       return kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs);
+       return !kgdb_handle_exception(1, computeSignal(TRAP(regs)),
+                                     DIE_OOPS, regs);
 }
 
 static int kgdb_handle_breakpoint(struct pt_regs *regs)
@@ -123,7 +125,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
        if (user_mode(regs))
                return 0;
 
-       if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
+       if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0)
                return 0;
 
        if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
@@ -309,6 +311,11 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
               (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
 }
 
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+       regs->nip = pc;
+}
+
 /*
  * This function does PowerPC specific procesing for interfacing to gdb.
  */
index b36f074..c533525 100644 (file)
@@ -114,6 +114,9 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
        regs->msr &= ~MSR_CE;
        mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
+#ifdef CONFIG_PPC_47x
+       isync();
+#endif
 #endif
 
        /*
index c2c70e1..50362b6 100644 (file)
@@ -38,7 +38,7 @@
 #include <asm/vio.h>
 #include <asm/mmu.h>
 
-#define MODULE_VERS "1.8"
+#define MODULE_VERS "1.9"
 #define MODULE_NAME "lparcfg"
 
 /* #define LPARCFG_DEBUG */
@@ -487,6 +487,14 @@ static void splpar_dispatch_data(struct seq_file *m)
        seq_printf(m, "dispatch_dispersions=%lu\n", dispatch_dispersions);
 }
 
+static void parse_em_data(struct seq_file *m)
+{
+       unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+       if (plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
+               seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
+}
+
 static int pseries_lparcfg_data(struct seq_file *m, void *v)
 {
        int partition_potential_processors;
@@ -541,6 +549,8 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
 
        seq_printf(m, "slb_size=%d\n", mmu_slb_size);
 
+       parse_em_data(m);
+
        return 0;
 }
 
index 040bd1d..26f9900 100644 (file)
@@ -155,33 +155,38 @@ void kexec_copy_flush(struct kimage *image)
 
 #ifdef CONFIG_SMP
 
-/* FIXME: we should schedule this function to be called on all cpus based
- * on calling the interrupts, but we would like to call it off irq level
- * so that the interrupt controller is clean.
- */
+static int kexec_all_irq_disabled = 0;
+
 static void kexec_smp_down(void *arg)
 {
+       local_irq_disable();
+       mb(); /* make sure our irqs are disabled before we say they are */
+       get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
+       while(kexec_all_irq_disabled == 0)
+               cpu_relax();
+       mb(); /* make sure all irqs are disabled before this */
+       /*
+        * Now every CPU has IRQs off, we can clear out any pending
+        * IPIs and be sure that no more will come in after this.
+        */
        if (ppc_md.kexec_cpu_down)
                ppc_md.kexec_cpu_down(0, 1);
 
-       local_irq_disable();
        kexec_smp_wait();
        /* NOTREACHED */
 }
 
-static void kexec_prepare_cpus(void)
+static void kexec_prepare_cpus_wait(int wait_state)
 {
        int my_cpu, i, notified=-1;
 
-       smp_call_function(kexec_smp_down, NULL, /* wait */0);
        my_cpu = get_cpu();
-
-       /* check the others cpus are now down (via paca hw cpu id == -1) */
+       /* Make sure each CPU has atleast made it to the state we need */
        for (i=0; i < NR_CPUS; i++) {
                if (i == my_cpu)
                        continue;
 
-               while (paca[i].hw_cpu_id != -1) {
+               while (paca[i].kexec_state < wait_state) {
                        barrier();
                        if (!cpu_possible(i)) {
                                printk("kexec: cpu %d hw_cpu_id %d is not"
@@ -201,20 +206,35 @@ static void kexec_prepare_cpus(void)
                        }
                        if (i != notified) {
                                printk( "kexec: waiting for cpu %d (physical"
-                                               " %d) to go down\n",
-                                               i, paca[i].hw_cpu_id);
+                                               " %d) to enter %i state\n",
+                                       i, paca[i].hw_cpu_id, wait_state);
                                notified = i;
                        }
                }
        }
+       mb();
+}
+
+static void kexec_prepare_cpus(void)
+{
+
+       smp_call_function(kexec_smp_down, NULL, /* wait */0);
+       local_irq_disable();
+       mb(); /* make sure IRQs are disabled before we say they are */
+       get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
+
+       kexec_prepare_cpus_wait(KEXEC_STATE_IRQS_OFF);
+       /* we are sure every CPU has IRQs off at this point */
+       kexec_all_irq_disabled = 1;
 
        /* after we tell the others to go down */
        if (ppc_md.kexec_cpu_down)
                ppc_md.kexec_cpu_down(0, 0);
 
-       put_cpu();
+       /* Before removing MMU mapings make sure all CPUs have entered real mode */
+       kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE);
 
-       local_irq_disable();
+       put_cpu();
 }
 
 #else /* ! SMP */
index 8649f53..8043d1b 100644 (file)
@@ -441,7 +441,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
        addi    r3,r3,L1_CACHE_BYTES
        bdnz    0b
        sync
-#ifndef CONFIG_44x
+#ifdef CONFIG_44x
        /* We don't flush the icache on 44x. Those have a virtual icache
         * and we don't have access to the virtual address here (it's
         * not the page vaddr but where it's mapped in user space). The
@@ -449,15 +449,19 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
         * a change in the address space occurs, before returning to
         * user space
         */
+BEGIN_MMU_FTR_SECTION
+       blr
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x)
+#endif /* CONFIG_44x */
        mtctr   r4
 1:     icbi    0,r6
        addi    r6,r6,L1_CACHE_BYTES
        bdnz    1b
        sync
        isync
-#endif /* CONFIG_44x */
        blr
 
+#ifndef CONFIG_BOOKE
 /*
  * Flush a particular page from the data cache to RAM, identified
  * by its physical address.  We turn off the MMU so we can just use
@@ -490,6 +494,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
        mtmsr   r10                             /* restore DR */
        isync
        blr
+#endif /* CONFIG_BOOKE */
 
 /*
  * Clear pages using the dcbz instruction, which doesn't cause any
index a5cf9c1..a2b18df 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/cputable.h>
 #include <asm/thread_info.h>
+#include <asm/kexec.h>
 
        .text
 
@@ -471,6 +472,10 @@ _GLOBAL(kexec_wait)
 1:     mflr    r5
        addi    r5,r5,kexec_flag-1b
 
+       li      r4,KEXEC_STATE_REAL_MODE
+       stb     r4,PACAKEXECSTATE(r13)
+       SYNC
+
 99:    HMT_LOW
 #ifdef CONFIG_KEXEC            /* use no memory without kexec */
        lwz     r4,0(r5)
@@ -494,14 +499,11 @@ kexec_flag:
  * note: this is a terminal routine, it does not save lr
  *
  * get phys id from paca
- * set paca id to -1 to say we got here
  * switch to real mode
  * join other cpus in kexec_wait(phys_id)
  */
 _GLOBAL(kexec_smp_wait)
        lhz     r3,PACAHWCPUID(r13)
-       li      r4,-1
-       sth     r4,PACAHWCPUID(r13)     /* let others know we left */
        bl      real_mode
        b       .kexec_wait
 
index 0c40c6f..f88acf0 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/pgtable.h>
 #include <asm/iseries/lpar_map.h>
 #include <asm/iseries/hv_types.h>
+#include <asm/kexec.h>
 
 /* This symbol is provided by the linker - let it fill in the paca
  * field correctly */
@@ -97,6 +98,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
        new_paca->kernelbase = (unsigned long) _stext;
        new_paca->kernel_msr = MSR_KERNEL;
        new_paca->hw_cpu_id = 0xffff;
+       new_paca->kexec_state = KEXEC_STATE_NONE;
        new_paca->__current = &init_task;
 #ifdef CONFIG_PPC_STD_MMU_64
        new_paca->slb_shadow_ptr = &slb_shadow[cpu];
index cd11d5c..6ddb795 100644 (file)
@@ -310,6 +310,8 @@ static void __devinit __of_scan_bus(struct device_node *node,
        /* Scan direct children */
        for_each_child_of_node(node, child) {
                pr_debug("  * %s\n", child->full_name);
+               if (!of_device_is_available(child))
+                       continue;
                reg = of_get_property(child, "reg", &reglen);
                if (reg == NULL || reglen < 20)
                        continue;
index e4d71ce..9d255b4 100644 (file)
@@ -371,6 +371,9 @@ int set_dabr(unsigned long dabr)
        /* XXX should we have a CPU_FTR_HAS_DABR ? */
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
        mtspr(SPRN_DAC1, dabr);
+#ifdef CONFIG_PPC_47x
+       isync();
+#endif
 #elif defined(CONFIG_PPC_BOOK3S)
        mtspr(SPRN_DABR, dabr);
 #endif
index ed2cfe1..7a0c019 100644 (file)
 #include <asm/pgtable.h>
 #include <asm/system.h>
 
+/*
+ * The parameter save area on the stack is used to store arguments being passed
+ * to callee function and is located at fixed offset from stack pointer.
+ */
+#ifdef CONFIG_PPC32
+#define PARAMETER_SAVE_AREA_OFFSET     24  /* bytes */
+#else /* CONFIG_PPC32 */
+#define PARAMETER_SAVE_AREA_OFFSET     48  /* bytes */
+#endif
+
+struct pt_regs_offset {
+       const char *name;
+       int offset;
+};
+
+#define STR(s) #s                      /* convert to string */
+#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
+#define GPR_OFFSET_NAME(num)   \
+       {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+static const struct pt_regs_offset regoffset_table[] = {
+       GPR_OFFSET_NAME(0),
+       GPR_OFFSET_NAME(1),
+       GPR_OFFSET_NAME(2),
+       GPR_OFFSET_NAME(3),
+       GPR_OFFSET_NAME(4),
+       GPR_OFFSET_NAME(5),
+       GPR_OFFSET_NAME(6),
+       GPR_OFFSET_NAME(7),
+       GPR_OFFSET_NAME(8),
+       GPR_OFFSET_NAME(9),
+       GPR_OFFSET_NAME(10),
+       GPR_OFFSET_NAME(11),
+       GPR_OFFSET_NAME(12),
+       GPR_OFFSET_NAME(13),
+       GPR_OFFSET_NAME(14),
+       GPR_OFFSET_NAME(15),
+       GPR_OFFSET_NAME(16),
+       GPR_OFFSET_NAME(17),
+       GPR_OFFSET_NAME(18),
+       GPR_OFFSET_NAME(19),
+       GPR_OFFSET_NAME(20),
+       GPR_OFFSET_NAME(21),
+       GPR_OFFSET_NAME(22),
+       GPR_OFFSET_NAME(23),
+       GPR_OFFSET_NAME(24),
+       GPR_OFFSET_NAME(25),
+       GPR_OFFSET_NAME(26),
+       GPR_OFFSET_NAME(27),
+       GPR_OFFSET_NAME(28),
+       GPR_OFFSET_NAME(29),
+       GPR_OFFSET_NAME(30),
+       GPR_OFFSET_NAME(31),
+       REG_OFFSET_NAME(nip),
+       REG_OFFSET_NAME(msr),
+       REG_OFFSET_NAME(ctr),
+       REG_OFFSET_NAME(link),
+       REG_OFFSET_NAME(xer),
+       REG_OFFSET_NAME(ccr),
+#ifdef CONFIG_PPC64
+       REG_OFFSET_NAME(softe),
+#else
+       REG_OFFSET_NAME(mq),
+#endif
+       REG_OFFSET_NAME(trap),
+       REG_OFFSET_NAME(dar),
+       REG_OFFSET_NAME(dsisr),
+       REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name:      the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+       const struct pt_regs_offset *roff;
+       for (roff = regoffset_table; roff->name != NULL; roff++)
+               if (!strcmp(roff->name, name))
+                       return roff->offset;
+       return -EINVAL;
+}
+
+/**
+ * regs_query_register_name() - query register name from its offset
+ * @offset:    the offset of a register in struct pt_regs.
+ *
+ * regs_query_register_name() returns the name of a register from its
+ * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
+ */
+const char *regs_query_register_name(unsigned int offset)
+{
+       const struct pt_regs_offset *roff;
+       for (roff = regoffset_table; roff->name != NULL; roff++)
+               if (roff->offset == offset)
+                       return roff->name;
+       return NULL;
+}
+
 /*
  * does not yet catch signals sent when the child dies.
  * in exit.c or in signal.c.
index 7436784..0e1ec6f 100644 (file)
@@ -691,10 +691,14 @@ void rtas_os_term(char *str)
 {
        int status;
 
-       if (panic_timeout)
-               return;
-
-       if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term"))
+       /*
+        * Firmware with the ibm,extended-os-term property is guaranteed
+        * to always return from an ibm,os-term call. Earlier versions without
+        * this property may terminate the partition which we want to avoid
+        * since it interferes with panic_timeout.
+        */
+       if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") ||
+           RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term"))
                return;
 
        snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str);
@@ -705,8 +709,7 @@ void rtas_os_term(char *str)
        } while (rtas_busy_delay(status));
 
        if (status != 0)
-               printk(KERN_EMERG "ibm,os-term call failed %d\n",
-                              status);
+               printk(KERN_EMERG "ibm,os-term call failed %d\n", status);
 }
 
 static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE;
index 4190eae..638883e 100644 (file)
@@ -411,9 +411,9 @@ static void rtas_event_scan(struct work_struct *w)
 
        get_online_cpus();
 
-       cpu = next_cpu(smp_processor_id(), cpu_online_map);
-       if (cpu == NR_CPUS) {
-               cpu = first_cpu(cpu_online_map);
+       cpu = cpumask_next(smp_processor_id(), cpu_online_mask);
+        if (cpu >= nr_cpu_ids) {
+               cpu = cpumask_first(cpu_online_mask);
 
                if (first_pass) {
                        first_pass = 0;
@@ -466,8 +466,8 @@ static void start_event_scan(void)
        /* Retreive errors from nvram if any */
        retreive_nvram_error_log();
 
-       schedule_delayed_work_on(first_cpu(cpu_online_map), &event_scan_work,
-                                event_scan_delay);
+       schedule_delayed_work_on(cpumask_first(cpu_online_mask),
+                                &event_scan_work, event_scan_delay);
 }
 
 static int __init rtas_init(void)
@@ -490,6 +490,12 @@ static int __init rtas_init(void)
                return -ENODEV;
        }
 
+       if (!rtas_event_scan_rate) {
+               /* Broken firmware: take a rate of zero to mean don't scan */
+               printk(KERN_DEBUG "rtasd: scan rate is 0, not scanning\n");
+               return 0;
+       }
+
        /* Make room for the sequence number */
        rtas_error_log_max = rtas_get_error_log_max();
        rtas_error_log_buffer_max = rtas_error_log_max + sizeof(int);
index 48f0a00..5e4d852 100644 (file)
@@ -161,45 +161,44 @@ extern u32 cpu_temp_both(unsigned long cpu);
 DEFINE_PER_CPU(unsigned int, cpu_pvr);
 #endif
 
-static int show_cpuinfo(struct seq_file *m, void *v)
+static void show_cpuinfo_summary(struct seq_file *m)
 {
-       unsigned long cpu_id = (unsigned long)v - 1;
-       unsigned int pvr;
-       unsigned short maj;
-       unsigned short min;
-
-       if (cpu_id == NR_CPUS) {
-               struct device_node *root;
-               const char *model = NULL;
+       struct device_node *root;
+       const char *model = NULL;
 #if defined(CONFIG_SMP) && defined(CONFIG_PPC32)
-               unsigned long bogosum = 0;
-               int i;
-               for_each_online_cpu(i)
-                       bogosum += loops_per_jiffy;
-               seq_printf(m, "total bogomips\t: %lu.%02lu\n",
-                          bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
+       unsigned long bogosum = 0;
+       int i;
+       for_each_online_cpu(i)
+               bogosum += loops_per_jiffy;
+       seq_printf(m, "total bogomips\t: %lu.%02lu\n",
+                  bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
 #endif /* CONFIG_SMP && CONFIG_PPC32 */
-               seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
-               if (ppc_md.name)
-                       seq_printf(m, "platform\t: %s\n", ppc_md.name);
-               root = of_find_node_by_path("/");
-               if (root)
-                       model = of_get_property(root, "model", NULL);
-               if (model)
-                       seq_printf(m, "model\t\t: %s\n", model);
-               of_node_put(root);
-
-               if (ppc_md.show_cpuinfo != NULL)
-                       ppc_md.show_cpuinfo(m);
+       seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
+       if (ppc_md.name)
+               seq_printf(m, "platform\t: %s\n", ppc_md.name);
+       root = of_find_node_by_path("/");
+       if (root)
+               model = of_get_property(root, "model", NULL);
+       if (model)
+               seq_printf(m, "model\t\t: %s\n", model);
+       of_node_put(root);
+
+       if (ppc_md.show_cpuinfo != NULL)
+               ppc_md.show_cpuinfo(m);
 
 #ifdef CONFIG_PPC32
-               /* Display the amount of memory */
-               seq_printf(m, "Memory\t\t: %d MB\n",
-                          (unsigned int)(total_memory / (1024 * 1024)));
+       /* Display the amount of memory */
+       seq_printf(m, "Memory\t\t: %d MB\n",
+                  (unsigned int)(total_memory / (1024 * 1024)));
 #endif
+}
 
-               return 0;
-       }
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+       unsigned long cpu_id = (unsigned long)v - 1;
+       unsigned int pvr;
+       unsigned short maj;
+       unsigned short min;
 
        /* We only show online cpus: disable preempt (overzealous, I
         * knew) to prevent cpu going down. */
@@ -308,19 +307,28 @@ static int show_cpuinfo(struct seq_file *m, void *v)
 #endif
 
        preempt_enable();
+
+       /* If this is the last cpu, print the summary */
+       if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
+               show_cpuinfo_summary(m);
+
        return 0;
 }
 
 static void *c_start(struct seq_file *m, loff_t *pos)
 {
-       unsigned long i = *pos;
-
-       return i <= NR_CPUS ? (void *)(i + 1) : NULL;
+       if (*pos == 0)  /* just in case, cpu 0 is not the first */
+               *pos = cpumask_first(cpu_online_mask);
+       else
+               *pos = cpumask_next(*pos - 1, cpu_online_mask);
+       if ((*pos) < nr_cpu_ids)
+               return (void *)(unsigned long)(*pos + 1);
+       return NULL;
 }
 
 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
 {
-       ++*pos;
+       (*pos)++;
        return c_start(m, pos);
 }
 
@@ -386,14 +394,14 @@ static void __init cpu_init_thread_core_maps(int tpc)
 
 /**
  * setup_cpu_maps - initialize the following cpu maps:
- *                  cpu_possible_map
- *                  cpu_present_map
+ *                  cpu_possible_mask
+ *                  cpu_present_mask
  *
  * Having the possible map set up early allows us to restrict allocations
  * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
  *
  * We do not initialize the online map here; cpus set their own bits in
- * cpu_online_map as they come up.
+ * cpu_online_mask as they come up.
  *
  * This function is valid only for Open Firmware systems.  finish_device_tree
  * must be called before using this.
index 9143891..f3fb5a7 100644 (file)
@@ -424,9 +424,18 @@ void __init setup_system(void)
        DBG(" <- setup_system()\n");
 }
 
+static u64 slb0_limit(void)
+{
+       if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) {
+               return 1UL << SID_SHIFT_1T;
+       }
+       return 1UL << SID_SHIFT;
+}
+
 #ifdef CONFIG_IRQSTACKS
 static void __init irqstack_early_init(void)
 {
+       u64 limit = slb0_limit();
        unsigned int i;
 
        /*
@@ -436,10 +445,10 @@ static void __init irqstack_early_init(void)
        for_each_possible_cpu(i) {
                softirq_ctx[i] = (struct thread_info *)
                        __va(lmb_alloc_base(THREAD_SIZE,
-                                           THREAD_SIZE, 0x10000000));
+                                           THREAD_SIZE, limit));
                hardirq_ctx[i] = (struct thread_info *)
                        __va(lmb_alloc_base(THREAD_SIZE,
-                                           THREAD_SIZE, 0x10000000));
+                                           THREAD_SIZE, limit));
        }
 }
 #else
@@ -470,7 +479,7 @@ static void __init exc_lvl_early_init(void)
  */
 static void __init emergency_stack_init(void)
 {
-       unsigned long limit;
+       u64 limit;
        unsigned int i;
 
        /*
@@ -482,7 +491,7 @@ static void __init emergency_stack_init(void)
         * bringup, we need to get at them in real mode. This means they
         * must also be within the RMO region.
         */
-       limit = min(0x10000000ULL, lmb.rmo_size);
+       limit = min(slb0_limit(), lmb.rmo_size);
 
        for_each_possible_cpu(i) {
                unsigned long sp;
@@ -573,12 +582,6 @@ void ppc64_boot_msg(unsigned int src, const char *msg)
        printk("[boot]%04x %s\n", src, msg);
 }
 
-void cpu_die(void)
-{
-       if (ppc_md.cpu_die)
-               ppc_md.cpu_die();
-}
-
 #ifdef CONFIG_SMP
 #define PCPU_DYN_SIZE          ()
 
index c2ee144..5c196d1 100644 (file)
@@ -59,8 +59,8 @@
 
 struct thread_info *secondary_ti;
 
-DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
-DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE;
+DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
+DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
 
 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
@@ -271,6 +271,16 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
        smp_store_cpu_info(boot_cpuid);
        cpu_callin_map[boot_cpuid] = 1;
 
+       for_each_possible_cpu(cpu) {
+               zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
+                                       GFP_KERNEL, cpu_to_node(cpu));
+               zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
+                                       GFP_KERNEL, cpu_to_node(cpu));
+       }
+
+       cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
+       cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
+
        if (smp_ops)
                if (smp_ops->probe)
                        max_cpus = smp_ops->probe();
@@ -289,10 +299,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 void __devinit smp_prepare_boot_cpu(void)
 {
        BUG_ON(smp_processor_id() != boot_cpuid);
-
-       set_cpu_online(boot_cpuid, true);
-       cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid));
-       cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid));
 #ifdef CONFIG_PPC64
        paca[boot_cpuid].__current = current;
 #endif
@@ -313,7 +319,7 @@ int generic_cpu_disable(void)
        set_cpu_online(cpu, false);
 #ifdef CONFIG_PPC64
        vdso_data->processorCount--;
-       fixup_irqs(cpu_online_map);
+       fixup_irqs(cpu_online_mask);
 #endif
        return 0;
 }
@@ -333,7 +339,7 @@ int generic_cpu_enable(unsigned int cpu)
                cpu_relax();
 
 #ifdef CONFIG_PPC64
-       fixup_irqs(cpu_online_map);
+       fixup_irqs(cpu_online_mask);
        /* counter the irq disable in fixup_irqs */
        local_irq_enable();
 #endif
@@ -462,7 +468,7 @@ out:
        return id;
 }
 
-/* Must be called when no change can occur to cpu_present_map,
+/* Must be called when no change can occur to cpu_present_mask,
  * i.e. during cpu online or offline.
  */
 static struct device_node *cpu_to_l2cache(int cpu)
@@ -495,6 +501,14 @@ int __devinit start_secondary(void *unused)
        current->active_mm = &init_mm;
 
        smp_store_cpu_info(cpu);
+
+#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+       /* Clear any pending timer interrupts */
+       mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
+
+       /* Enable decrementer interrupt */
+       mtspr(SPRN_TCR, TCR_DIE);
+#endif
        set_dec(tb_ticks_per_jiffy);
        preempt_disable();
        cpu_callin_map[cpu] = 1;
@@ -517,15 +531,15 @@ int __devinit start_secondary(void *unused)
        for (i = 0; i < threads_per_core; i++) {
                if (cpu_is_offline(base + i))
                        continue;
-               cpu_set(cpu, per_cpu(cpu_sibling_map, base + i));
-               cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));
+               cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
+               cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
 
                /* cpu_core_map should be a superset of
                 * cpu_sibling_map even if we don't have cache
                 * information, so update the former here, too.
                 */
-               cpu_set(cpu, per_cpu(cpu_core_map, base +i));
-               cpu_set(base + i, per_cpu(cpu_core_map, cpu));
+               cpumask_set_cpu(cpu, cpu_core_mask(base + i));
+               cpumask_set_cpu(base + i, cpu_core_mask(cpu));
        }
        l2_cache = cpu_to_l2cache(cpu);
        for_each_online_cpu(i) {
@@ -533,8 +547,8 @@ int __devinit start_secondary(void *unused)
                if (!np)
                        continue;
                if (np == l2_cache) {
-                       cpu_set(cpu, per_cpu(cpu_core_map, i));
-                       cpu_set(i, per_cpu(cpu_core_map, cpu));
+                       cpumask_set_cpu(cpu, cpu_core_mask(i));
+                       cpumask_set_cpu(i, cpu_core_mask(cpu));
                }
                of_node_put(np);
        }
@@ -554,19 +568,22 @@ int setup_profiling_timer(unsigned int multiplier)
 
 void __init smp_cpus_done(unsigned int max_cpus)
 {
-       cpumask_t old_mask;
+       cpumask_var_t old_mask;
 
        /* We want the setup_cpu() here to be called from CPU 0, but our
         * init thread may have been "borrowed" by another CPU in the meantime
         * se we pin us down to CPU 0 for a short while
         */
-       old_mask = current->cpus_allowed;
-       set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid));
+       alloc_cpumask_var(&old_mask, GFP_NOWAIT);
+       cpumask_copy(old_mask, &current->cpus_allowed);
+       set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
        
        if (smp_ops && smp_ops->setup_cpu)
                smp_ops->setup_cpu(boot_cpuid);
 
-       set_cpus_allowed(current, old_mask);
+       set_cpus_allowed_ptr(current, old_mask);
+
+       free_cpumask_var(old_mask);
 
        snapshot_timebases();
 
@@ -591,10 +608,10 @@ int __cpu_disable(void)
        /* Update sibling maps */
        base = cpu_first_thread_in_core(cpu);
        for (i = 0; i < threads_per_core; i++) {
-               cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i));
-               cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu));
-               cpu_clear(cpu, per_cpu(cpu_core_map, base +i));
-               cpu_clear(base + i, per_cpu(cpu_core_map, cpu));
+               cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
+               cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
+               cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
+               cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
        }
 
        l2_cache = cpu_to_l2cache(cpu);
@@ -603,8 +620,8 @@ int __cpu_disable(void)
                if (!np)
                        continue;
                if (np == l2_cache) {
-                       cpu_clear(cpu, per_cpu(cpu_core_map, i));
-                       cpu_clear(i, per_cpu(cpu_core_map, cpu));
+                       cpumask_clear_cpu(cpu, cpu_core_mask(i));
+                       cpumask_clear_cpu(i, cpu_core_mask(cpu));
                }
                of_node_put(np);
        }
@@ -631,4 +648,10 @@ void cpu_hotplug_driver_unlock()
 {
        mutex_unlock(&powerpc_cpu_hotplug_driver_mutex);
 }
+
+void cpu_die(void)
+{
+       if (ppc_md.cpu_die)
+               ppc_md.cpu_die();
+}
 #endif
index e235e52..c0d8c20 100644 (file)
@@ -35,7 +35,7 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
 #ifdef CONFIG_PPC64
 
 /* Time in microseconds we delay before sleeping in the idle loop */
-DEFINE_PER_CPU(unsigned long, smt_snooze_delay) = { 100 };
+DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 };
 
 static ssize_t store_smt_snooze_delay(struct sys_device *dev,
                                      struct sysdev_attribute *attr,
@@ -44,9 +44,9 @@ static ssize_t store_smt_snooze_delay(struct sys_device *dev,
 {
        struct cpu *cpu = container_of(dev, struct cpu, sysdev);
        ssize_t ret;
-       unsigned long snooze;
+       long snooze;
 
-       ret = sscanf(buf, "%lu", &snooze);
+       ret = sscanf(buf, "%ld", &snooze);
        if (ret != 1)
                return -EINVAL;
 
@@ -61,53 +61,23 @@ static ssize_t show_smt_snooze_delay(struct sys_device *dev,
 {
        struct cpu *cpu = container_of(dev, struct cpu, sysdev);
 
-       return sprintf(buf, "%lu\n", per_cpu(smt_snooze_delay, cpu->sysdev.id));
+       return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->sysdev.id));
 }
 
 static SYSDEV_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
                   store_smt_snooze_delay);
 
-/* Only parse OF options if the matching cmdline option was not specified */
-static int smt_snooze_cmdline;
-
-static int __init smt_setup(void)
-{
-       struct device_node *options;
-       const unsigned int *val;
-       unsigned int cpu;
-
-       if (!cpu_has_feature(CPU_FTR_SMT))
-               return -ENODEV;
-
-       options = of_find_node_by_path("/options");
-       if (!options)
-               return -ENODEV;
-
-       val = of_get_property(options, "ibm,smt-snooze-delay", NULL);
-       if (!smt_snooze_cmdline && val) {
-               for_each_possible_cpu(cpu)
-                       per_cpu(smt_snooze_delay, cpu) = *val;
-       }
-
-       of_node_put(options);
-       return 0;
-}
-__initcall(smt_setup);
-
 static int __init setup_smt_snooze_delay(char *str)
 {
        unsigned int cpu;
-       int snooze;
+       long snooze;
 
        if (!cpu_has_feature(CPU_FTR_SMT))
                return 1;
 
-       smt_snooze_cmdline = 1;
-
-       if (get_option(&str, &snooze)) {
-               for_each_possible_cpu(cpu)
-                       per_cpu(smt_snooze_delay, cpu) = snooze;
-       }
+       snooze = simple_strtol(str, NULL, 10);
+       for_each_possible_cpu(cpu)
+               per_cpu(smt_snooze_delay, cpu) = snooze;
 
        return 1;
 }
index 29d128e..3031fc7 100644 (file)
@@ -380,6 +380,46 @@ int machine_check_440A(struct pt_regs *regs)
        }
        return 0;
 }
+
+int machine_check_47x(struct pt_regs *regs)
+{
+       unsigned long reason = get_mc_reason(regs);
+       u32 mcsr;
+
+       printk(KERN_ERR "Machine check in kernel mode.\n");
+       if (reason & ESR_IMCP) {
+               printk(KERN_ERR
+                      "Instruction Synchronous Machine Check exception\n");
+               mtspr(SPRN_ESR, reason & ~ESR_IMCP);
+               return 0;
+       }
+       mcsr = mfspr(SPRN_MCSR);
+       if (mcsr & MCSR_IB)
+               printk(KERN_ERR "Instruction Read PLB Error\n");
+       if (mcsr & MCSR_DRB)
+               printk(KERN_ERR "Data Read PLB Error\n");
+       if (mcsr & MCSR_DWB)
+               printk(KERN_ERR "Data Write PLB Error\n");
+       if (mcsr & MCSR_TLBP)
+               printk(KERN_ERR "TLB Parity Error\n");
+       if (mcsr & MCSR_ICP) {
+               flush_instruction_cache();
+               printk(KERN_ERR "I-Cache Parity Error\n");
+       }
+       if (mcsr & MCSR_DCSP)
+               printk(KERN_ERR "D-Cache Search Parity Error\n");
+       if (mcsr & PPC47x_MCSR_GPR)
+               printk(KERN_ERR "GPR Parity Error\n");
+       if (mcsr & PPC47x_MCSR_FPR)
+               printk(KERN_ERR "FPR Parity Error\n");
+       if (mcsr & PPC47x_MCSR_IPR)
+               printk(KERN_ERR "Machine Check exception is imprecise\n");
+
+       /* Clear MCSR */
+       mtspr(SPRN_MCSR, mcsr);
+
+       return 0;
+}
 #elif defined(CONFIG_E500)
 int machine_check_e500(struct pt_regs *regs)
 {
@@ -815,12 +855,15 @@ void __kprobes program_check_exception(struct pt_regs *regs)
                return;
        }
        if (reason & REASON_TRAP) {
+               /* Debugger is first in line to stop recursive faults in
+                * rcu_lock, notify_die, or atomic_notifier_call_chain */
+               if (debugger_bpt(regs))
+                       return;
+
                /* trap exception */
                if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
                                == NOTIFY_STOP)
                        return;
-               if (debugger_bpt(regs))
-                       return;
 
                if (!(regs->msr & MSR_PR) &&  /* not user-mode */
                    report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
index 8223717..9ce7b62 100644 (file)
@@ -645,8 +645,10 @@ void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
                        found = 1;
                        break;
                }
-       if (!found)
+       if (!found) {
+               spin_unlock_irqrestore(&vio_cmo.lock, flags);
                return;
+       }
 
        /* Increase/decrease in desired device entitlement */
        if (desired >= viodev->cmo.desired) {
@@ -958,9 +960,12 @@ viodev_cmo_rd_attr(allocated);
 
 static ssize_t name_show(struct device *, struct device_attribute *, char *);
 static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+                            char *buf);
 static struct device_attribute vio_cmo_dev_attrs[] = {
        __ATTR_RO(name),
        __ATTR_RO(devspec),
+       __ATTR_RO(modalias),
        __ATTR(cmo_desired,       S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
               viodev_cmo_desired_show, viodev_cmo_desired_set),
        __ATTR(cmo_entitled,      S_IRUGO, viodev_cmo_entitled_show,      NULL),
@@ -1320,9 +1325,27 @@ static ssize_t devspec_show(struct device *dev,
        return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none");
 }
 
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+                            char *buf)
+{
+       const struct vio_dev *vio_dev = to_vio_dev(dev);
+       struct device_node *dn;
+       const char *cp;
+
+       dn = dev->archdata.of_node;
+       if (!dn)
+               return -ENODEV;
+       cp = of_get_property(dn, "compatible", NULL);
+       if (!cp)
+               return -ENODEV;
+
+       return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
+}
+
 static struct device_attribute vio_dev_attrs[] = {
        __ATTR_RO(name),
        __ATTR_RO(devspec),
+       __ATTR_RO(modalias),
        __ATTR_NULL
 };
 
@@ -1365,6 +1388,7 @@ static struct bus_type vio_bus_type = {
        .match = vio_bus_match,
        .probe = vio_bus_probe,
        .remove = vio_bus_remove,
+       .pm = GENERIC_SUBSYS_PM_OPS,
 };
 
 /**
index 64e2e49..455881a 100644 (file)
@@ -28,7 +28,7 @@ _GLOBAL(strcpy)
 /* This clears out any unused part of the destination buffer,
    just as the libc version does.  -- paulus */
 _GLOBAL(strncpy)
-       cmpwi   0,r5,0
+       PPC_LCMPI 0,r5,0
        beqlr
        mtctr   r5
        addi    r6,r3,-1
@@ -39,7 +39,7 @@ _GLOBAL(strncpy)
        bdnzf   2,1b            /* dec ctr, branch if ctr != 0 && !cr0.eq */
        bnelr                   /* if we didn't hit a null char, we're done */
        mfctr   r5
-       cmpwi   0,r5,0          /* any space left in destination buffer? */
+       PPC_LCMPI 0,r5,0        /* any space left in destination buffer? */
        beqlr                   /* we know r0 == 0 here */
 2:     stbu    r0,1(r6)        /* clear it out if so */
        bdnz    2b
@@ -70,8 +70,8 @@ _GLOBAL(strcmp)
        blr
 
 _GLOBAL(strncmp)
-       PPC_LCMPI r5,0
-       beqlr
+       PPC_LCMPI 0,r5,0
+       beq-    2f
        mtctr   r5
        addi    r5,r3,-1
        addi    r4,r4,-1
@@ -82,6 +82,8 @@ _GLOBAL(strncmp)
        beqlr   1
        bdnzt   eq,1b
        blr
+2:     li      r3,0
+       blr
 
 _GLOBAL(strlen)
        addi    r4,r3,-1
@@ -92,8 +94,8 @@ _GLOBAL(strlen)
        blr
 
 _GLOBAL(memcmp)
-       cmpwi   0,r5,0
-       ble-    2f
+       PPC_LCMPI 0,r5,0
+       beq-    2f
        mtctr   r5
        addi    r6,r3,-1
        addi    r4,r4,-1
@@ -106,8 +108,8 @@ _GLOBAL(memcmp)
        blr
 
 _GLOBAL(memchr)
-       cmpwi   0,r5,0
-       ble-    2f
+       PPC_LCMPI 0,r5,0
+       beq-    2f
        mtctr   r5
        addi    r3,r3,-1
 1:     lbzu    r0,1(r3)
index 3986264..d8c6efb 100644 (file)
@@ -38,7 +38,9 @@ unsigned int tlb_44x_index; /* = 0 */
 unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS;
 int icache_44x_need_flush;
 
-static void __init ppc44x_update_tlb_hwater(void)
+unsigned long tlb_47x_boltmap[1024/8];
+
+static void __cpuinit ppc44x_update_tlb_hwater(void)
 {
        extern unsigned int tlb_44x_patch_hwater_D[];
        extern unsigned int tlb_44x_patch_hwater_I[];
@@ -59,7 +61,7 @@ static void __init ppc44x_update_tlb_hwater(void)
 }
 
 /*
- * "Pins" a 256MB TLB entry in AS0 for kernel lowmem
+ * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 44x type MMU
  */
 static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
 {
@@ -67,12 +69,18 @@ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
 
        ppc44x_update_tlb_hwater();
 
+       mtspr(SPRN_MMUCR, 0);
+
        __asm__ __volatile__(
                "tlbwe  %2,%3,%4\n"
                "tlbwe  %1,%3,%5\n"
                "tlbwe  %0,%3,%6\n"
        :
+#ifdef CONFIG_PPC47x
+       : "r" (PPC47x_TLB2_S_RWX),
+#else
        : "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G),
+#endif
          "r" (phys),
          "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M),
          "r" (entry),
@@ -81,8 +89,93 @@ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
          "i" (PPC44x_TLB_ATTRIB));
 }
 
+static int __init ppc47x_find_free_bolted(void)
+{
+       unsigned int mmube0 = mfspr(SPRN_MMUBE0);
+       unsigned int mmube1 = mfspr(SPRN_MMUBE1);
+
+       if (!(mmube0 & MMUBE0_VBE0))
+               return 0;
+       if (!(mmube0 & MMUBE0_VBE1))
+               return 1;
+       if (!(mmube0 & MMUBE0_VBE2))
+               return 2;
+       if (!(mmube1 & MMUBE1_VBE3))
+               return 3;
+       if (!(mmube1 & MMUBE1_VBE4))
+               return 4;
+       if (!(mmube1 & MMUBE1_VBE5))
+               return 5;
+       return -1;
+}
+
+static void __init ppc47x_update_boltmap(void)
+{
+       unsigned int mmube0 = mfspr(SPRN_MMUBE0);
+       unsigned int mmube1 = mfspr(SPRN_MMUBE1);
+
+       if (mmube0 & MMUBE0_VBE0)
+               __set_bit((mmube0 >> MMUBE0_IBE0_SHIFT) & 0xff,
+                         tlb_47x_boltmap);
+       if (mmube0 & MMUBE0_VBE1)
+               __set_bit((mmube0 >> MMUBE0_IBE1_SHIFT) & 0xff,
+                         tlb_47x_boltmap);
+       if (mmube0 & MMUBE0_VBE2)
+               __set_bit((mmube0 >> MMUBE0_IBE2_SHIFT) & 0xff,
+                         tlb_47x_boltmap);
+       if (mmube1 & MMUBE1_VBE3)
+               __set_bit((mmube1 >> MMUBE1_IBE3_SHIFT) & 0xff,
+                         tlb_47x_boltmap);
+       if (mmube1 & MMUBE1_VBE4)
+               __set_bit((mmube1 >> MMUBE1_IBE4_SHIFT) & 0xff,
+                         tlb_47x_boltmap);
+       if (mmube1 & MMUBE1_VBE5)
+               __set_bit((mmube1 >> MMUBE1_IBE5_SHIFT) & 0xff,
+                         tlb_47x_boltmap);
+}
+
+/*
+ * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 47x type MMU
+ */
+static void __cpuinit ppc47x_pin_tlb(unsigned int virt, unsigned int phys)
+{
+       unsigned int rA;
+       int bolted;
+
+       /* Base rA is HW way select, way 0, bolted bit set */
+       rA = 0x88000000;
+
+       /* Look for a bolted entry slot */
+       bolted = ppc47x_find_free_bolted();
+       BUG_ON(bolted < 0);
+
+       /* Insert bolted slot number */
+       rA |= bolted << 24;
+
+       pr_debug("256M TLB entry for 0x%08x->0x%08x in bolt slot %d\n",
+                virt, phys, bolted);
+
+       mtspr(SPRN_MMUCR, 0);
+
+       __asm__ __volatile__(
+               "tlbwe  %2,%3,0\n"
+               "tlbwe  %1,%3,1\n"
+               "tlbwe  %0,%3,2\n"
+               :
+               : "r" (PPC47x_TLB2_SW | PPC47x_TLB2_SR |
+                      PPC47x_TLB2_SX
+#ifdef CONFIG_SMP
+                      | PPC47x_TLB2_M
+#endif
+                      ),
+                 "r" (phys),
+                 "r" (virt | PPC47x_TLB0_VALID | PPC47x_TLB0_256M),
+                 "r" (rA));
+}
+
 void __init MMU_init_hw(void)
 {
+       /* This is not useful on 47x but won't hurt either */
        ppc44x_update_tlb_hwater();
 
        flush_instruction_cache();
@@ -95,8 +188,51 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
        /* Pin in enough TLBs to cover any lowmem not covered by the
         * initial 256M mapping established in head_44x.S */
        for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr;
-            addr += PPC_PIN_SIZE)
-               ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
+            addr += PPC_PIN_SIZE) {
+               if (mmu_has_feature(MMU_FTR_TYPE_47x))
+                       ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
+               else
+                       ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
+       }
+       if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
+               ppc47x_update_boltmap();
 
+#ifdef DEBUG
+               {
+                       int i;
+
+                       printk(KERN_DEBUG "bolted entries: ");
+                       for (i = 0; i < 255; i++) {
+                               if (test_bit(i, tlb_47x_boltmap))
+                                       printk("%d ", i);
+                       }
+                       printk("\n");
+               }
+#endif /* DEBUG */
+       }
        return total_lowmem;
 }
+
+#ifdef CONFIG_SMP
+void __cpuinit mmu_init_secondary(int cpu)
+{
+       unsigned long addr;
+
+       /* Pin in enough TLBs to cover any lowmem not covered by the
+        * initial 256M mapping established in head_44x.S
+        *
+        * WARNING: This is called with only the first 256M of the
+        * linear mapping in the TLB and we can't take faults yet
+        * so beware of what this code uses. It runs off a temporary
+        * stack. current (r2) isn't initialized, smp_processor_id()
+        * will not work, current thread info isn't accessible, ...
+        */
+       for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr;
+            addr += PPC_PIN_SIZE) {
+               if (mmu_has_feature(MMU_FTR_TYPE_47x))
+                       ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
+               else
+                       ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
+       }
+}
+#endif /* CONFIG_SMP */
index 26fb6b9..1bd712c 100644 (file)
@@ -151,13 +151,14 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
        if (!user_mode(regs) && (address >= TASK_SIZE))
                return SIGSEGV;
 
-#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
+#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
+                            defined(CONFIG_PPC_BOOK3S_64))
        if (error_code & DSISR_DABRMATCH) {
                /* DABR match */
                do_dabr(regs, address, error_code);
                return 0;
        }
-#endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/
+#endif
 
        if (in_atomic() || mm == NULL) {
                if (!user_mode(regs))
@@ -307,7 +308,6 @@ good_area:
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
- survive:
        ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
        if (unlikely(ret & VM_FAULT_ERROR)) {
                if (ret & VM_FAULT_OOM)
@@ -359,15 +359,10 @@ bad_area_nosemaphore:
  */
 out_of_memory:
        up_read(&mm->mmap_sem);
-       if (is_global_init(current)) {
-               yield();
-               down_read(&mm->mmap_sem);
-               goto survive;
-       }
-       printk("VM: killing process %s\n", current->comm);
-       if (user_mode(regs))
-               do_group_exit(SIGKILL);
-       return SIGKILL;
+       if (!user_mode(regs))
+               return SIGKILL;
+       pagefault_out_of_memory();
+       return 0;
 
 do_sigbus:
        up_read(&mm->mmap_sem);
index 1ed6b52..cdc7526 100644 (file)
@@ -2,7 +2,7 @@
  * Modifications by Kumar Gala (galak@kernel.crashing.org) to support
  * E500 Book E processors.
  *
- * Copyright 2004 Freescale Semiconductor, Inc
+ * Copyright 2004,2010 Freescale Semiconductor, Inc.
  *
  * This file contains the routines for initializing the MMU
  * on the 4xx series of chips.
 
 unsigned int tlbcam_index;
 
-#define NUM_TLBCAMS    (64)
 
 #if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS)
 #error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS"
 #endif
 
-struct tlbcam {
-       u32     MAS0;
-       u32     MAS1;
-       unsigned long   MAS2;
-       u32     MAS3;
-       u32     MAS7;
-} TLBCAM[NUM_TLBCAMS];
+#define NUM_TLBCAMS    (64)
+struct tlbcam TLBCAM[NUM_TLBCAMS];
 
 struct tlbcamrange {
        unsigned long start;
@@ -109,19 +103,6 @@ unsigned long p_mapped_by_tlbcam(phys_addr_t pa)
        return 0;
 }
 
-void loadcam_entry(int idx)
-{
-       mtspr(SPRN_MAS0, TLBCAM[idx].MAS0);
-       mtspr(SPRN_MAS1, TLBCAM[idx].MAS1);
-       mtspr(SPRN_MAS2, TLBCAM[idx].MAS2);
-       mtspr(SPRN_MAS3, TLBCAM[idx].MAS3);
-
-       if (mmu_has_feature(MMU_FTR_BIG_PHYS))
-               mtspr(SPRN_MAS7, TLBCAM[idx].MAS7);
-
-       asm volatile("isync;tlbwe;isync" : : : "memory");
-}
-
 /*
  * Set up one of the I/D BAT (block address translation) register pairs.
  * The parameters are not checked; in particular size must be a power
index d7fa50b..e267f22 100644 (file)
@@ -252,6 +252,47 @@ static void __meminit vmemmap_create_mapping(unsigned long start,
 }
 #endif /* CONFIG_PPC_BOOK3E */
 
+struct vmemmap_backing *vmemmap_list;
+
+static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
+{
+       static struct vmemmap_backing *next;
+       static int num_left;
+
+       /* allocate a page when required and hand out chunks */
+       if (!next || !num_left) {
+               next = vmemmap_alloc_block(PAGE_SIZE, node);
+               if (unlikely(!next)) {
+                       WARN_ON(1);
+                       return NULL;
+               }
+               num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
+       }
+
+       num_left--;
+
+       return next++;
+}
+
+static __meminit void vmemmap_list_populate(unsigned long phys,
+                                           unsigned long start,
+                                           int node)
+{
+       struct vmemmap_backing *vmem_back;
+
+       vmem_back = vmemmap_list_alloc(node);
+       if (unlikely(!vmem_back)) {
+               WARN_ON(1);
+               return;
+       }
+
+       vmem_back->phys = phys;
+       vmem_back->virt_addr = start;
+       vmem_back->list = vmemmap_list;
+
+       vmemmap_list = vmem_back;
+}
+
 int __meminit vmemmap_populate(struct page *start_page,
                               unsigned long nr_pages, int node)
 {
@@ -276,6 +317,8 @@ int __meminit vmemmap_populate(struct page *start_page,
                if (!p)
                        return -ENOMEM;
 
+               vmemmap_list_populate(__pa(p), start, node);
+
                pr_debug("      * %016lx..%016lx allocated at %p\n",
                         start, start + page_size, p);
 
index 1f2d9ff..ddfd7ad 100644 (file)
@@ -395,10 +395,18 @@ void __init mmu_context_init(void)
         * the PID/TID comparison is disabled, so we can use a TID of zero
         * to represent all kernel pages as shared among all contexts.
         *      -- Dan
+        *
+        * The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We
+        * should normally never have to steal though the facility is
+        * present if needed.
+        *      -- BenH
         */
        if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
                first_context = 0;
                last_context = 15;
+       } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
+               first_context = 1;
+               last_context = 65535;
        } else {
                first_context = 1;
                last_context = 255;
index d49a775..63b84a0 100644 (file)
@@ -69,12 +69,7 @@ static inline void _tlbil_va(unsigned long address, unsigned int pid,
 }
 #endif /* CONIFG_8xx */
 
-/*
- * As of today, we don't support tlbivax broadcast on any
- * implementation. When that becomes the case, this will be
- * an extern.
- */
-#ifdef CONFIG_PPC_BOOK3E
+#if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_PPC_47x)
 extern void _tlbivax_bcast(unsigned long address, unsigned int pid,
                           unsigned int tsize, unsigned int ind);
 #else
@@ -149,7 +144,15 @@ extern unsigned long mmu_mapin_ram(unsigned long top);
 extern void MMU_init_hw(void);
 extern unsigned long mmu_mapin_ram(unsigned long top);
 extern void adjust_total_lowmem(void);
-
+extern void loadcam_entry(unsigned int index);
+
+struct tlbcam {
+       u32     MAS0;
+       u32     MAS1;
+       unsigned long   MAS2;
+       u32     MAS3;
+       u32     MAS7;
+};
 #elif defined(CONFIG_PPC32)
 /* anything 32-bit except 4xx or 8xx */
 extern void MMU_init_hw(void);
index eaa7633..80d1106 100644 (file)
@@ -33,16 +33,41 @@ static int numa_debug;
 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
 
 int numa_cpu_lookup_table[NR_CPUS];
-cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
+cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
 struct pglist_data *node_data[MAX_NUMNODES];
 
 EXPORT_SYMBOL(numa_cpu_lookup_table);
-EXPORT_SYMBOL(numa_cpumask_lookup_table);
+EXPORT_SYMBOL(node_to_cpumask_map);
 EXPORT_SYMBOL(node_data);
 
 static int min_common_depth;
 static int n_mem_addr_cells, n_mem_size_cells;
 
+/*
+ * Allocate node_to_cpumask_map based on number of available nodes
+ * Requires node_possible_map to be valid.
+ *
+ * Note: node_to_cpumask() is not valid until after this is done.
+ */
+static void __init setup_node_to_cpumask_map(void)
+{
+       unsigned int node, num = 0;
+
+       /* setup nr_node_ids if not done yet */
+       if (nr_node_ids == MAX_NUMNODES) {
+               for_each_node_mask(node, node_possible_map)
+                       num = node;
+               nr_node_ids = num + 1;
+       }
+
+       /* allocate the map */
+       for (node = 0; node < nr_node_ids; node++)
+               alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
+
+       /* cpumask_of_node() will now work */
+       dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
+}
+
 static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
                                                unsigned int *nid)
 {
@@ -138,8 +163,8 @@ static void __cpuinit map_cpu_to_node(int cpu, int node)
 
        dbg("adding cpu %d to node %d\n", cpu, node);
 
-       if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node])))
-               cpu_set(cpu, numa_cpumask_lookup_table[node]);
+       if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
+               cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -149,8 +174,8 @@ static void unmap_cpu_from_node(unsigned long cpu)
 
        dbg("removing cpu %lu from node %d\n", cpu, node);
 
-       if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
-               cpu_clear(cpu, numa_cpumask_lookup_table[node]);
+       if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
+               cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
        } else {
                printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
                       cpu, node);
@@ -246,7 +271,8 @@ static int __init find_min_common_depth(void)
        const unsigned int *ref_points;
        struct device_node *rtas_root;
        unsigned int len;
-       struct device_node *options;
+       struct device_node *chosen;
+       const char *vec5;
 
        rtas_root = of_find_node_by_path("/rtas");
 
@@ -264,14 +290,17 @@ static int __init find_min_common_depth(void)
                        "ibm,associativity-reference-points", &len);
 
        /*
-        * For type 1 affinity information we want the first field
+        * For form 1 affinity information we want the first field
         */
-       options = of_find_node_by_path("/options");
-       if (options) {
-               const char *str;
-               str = of_get_property(options, "ibm,associativity-form", NULL);
-               if (str && !strcmp(str, "1"))
-                        index = 0;
+#define VEC5_AFFINITY_BYTE     5
+#define VEC5_AFFINITY          0x80
+       chosen = of_find_node_by_path("/chosen");
+       if (chosen) {
+               vec5 = of_get_property(chosen, "ibm,architecture-vec-5", NULL);
+               if (vec5 && (vec5[VEC5_AFFINITY_BYTE] & VEC5_AFFINITY)) {
+                       dbg("Using form 1 affinity\n");
+                       index = 0;
+               }
        }
 
        if ((len >= 2 * sizeof(unsigned int)) && ref_points) {
@@ -750,8 +779,9 @@ void __init dump_numa_cpu_topology(void)
                 * If we used a CPU iterator here we would miss printing
                 * the holes in the cpumap.
                 */
-               for (cpu = 0; cpu < NR_CPUS; cpu++) {
-                       if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
+               for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+                       if (cpumask_test_cpu(cpu,
+                                       node_to_cpumask_map[node])) {
                                if (count == 0)
                                        printk(" %u", cpu);
                                ++count;
@@ -763,7 +793,7 @@ void __init dump_numa_cpu_topology(void)
                }
 
                if (count > 1)
-                       printk("-%u", NR_CPUS - 1);
+                       printk("-%u", nr_cpu_ids - 1);
                printk("\n");
        }
 }
@@ -939,10 +969,6 @@ void __init do_init_bootmem(void)
        else
                dump_numa_memory_topology();
 
-       register_cpu_notifier(&ppc64_numa_nb);
-       cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
-                         (void *)(unsigned long)boot_cpuid);
-
        for_each_online_node(nid) {
                unsigned long start_pfn, end_pfn;
                void *bootmem_vaddr;
@@ -996,6 +1022,16 @@ void __init do_init_bootmem(void)
        }
 
        init_bootmem_done = 1;
+
+       /*
+        * Now bootmem is initialised we can create the node to cpumask
+        * lookup tables and setup the cpu callback to populate them.
+        */
+       setup_node_to_cpumask_map();
+
+       register_cpu_notifier(&ppc64_numa_nb);
+       cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
+                         (void *)(unsigned long)boot_cpuid);
 }
 
 void __init paging_init(void)
index b9243e7..9fc02dc 100644 (file)
@@ -146,6 +146,14 @@ ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
        /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
        flags &= ~(_PAGE_USER | _PAGE_EXEC);
 
+#ifdef _PAGE_BAP_SR
+       /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
+        * which means that we just cleared supervisor access... oops ;-) This
+        * restores it
+        */
+       flags |= _PAGE_BAP_SR;
+#endif
+
        return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(ioremap_flags);
@@ -385,11 +393,7 @@ static int __change_page_attr(struct page *page, pgprot_t prot)
                return -EINVAL;
        __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
        wmb();
-#ifdef CONFIG_PPC_STD_MMU
-       flush_hash_pages(0, address, pmd_val(*kpmd), 1);
-#else
        flush_tlb_page(NULL, address);
-#endif
        pte_unmap(kpte);
 
        return 0;
index d95679a..d050fc8 100644 (file)
@@ -265,6 +265,14 @@ void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
        /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
        flags &= ~(_PAGE_USER | _PAGE_EXEC);
 
+#ifdef _PAGE_BAP_SR
+       /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
+        * which means that we just cleared supervisor access... oops ;-) This
+        * restores it
+        */
+       flags |= _PAGE_BAP_SR;
+#endif
+
        if (ppc_md.ioremap)
                return ppc_md.ioremap(addr, size, flags, caller);
        return __ioremap_caller(addr, size, flags, caller);
index bbdc5b5..cfa7682 100644 (file)
@@ -10,7 +10,7 @@
  *     - tlbil_va
  *     - tlbil_pid
  *     - tlbil_all
- *     - tlbivax_bcast (not yet)
+ *     - tlbivax_bcast
  *
  * Code mostly moved over from misc_32.S
  *
@@ -33,6 +33,7 @@
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
 #include <asm/processor.h>
+#include <asm/bug.h>
 
 #if defined(CONFIG_40x)
 
@@ -65,7 +66,7 @@ _GLOBAL(__tlbil_va)
  * Nothing to do for 8xx, everything is inline
  */
 
-#elif defined(CONFIG_44x)
+#elif defined(CONFIG_44x) /* Includes 47x */
 
 /*
  * 440 implementation uses tlbsx/we for tlbil_va and a full sweep
@@ -73,7 +74,13 @@ _GLOBAL(__tlbil_va)
  */
 _GLOBAL(__tlbil_va)
        mfspr   r5,SPRN_MMUCR
-       rlwimi  r5,r4,0,24,31                   /* Set TID */
+       mfmsr   r10
+
+       /*
+        * We write 16 bits of STID since 47x supports that much, we
+        * will never be passed out of bounds values on 440 (hopefully)
+        */
+       rlwimi  r5,r4,0,16,31
 
        /* We have to run the search with interrupts disabled, otherwise
         * an interrupt which causes a TLB miss can clobber the MMUCR
@@ -83,24 +90,41 @@ _GLOBAL(__tlbil_va)
         * and restoring MMUCR, so only normal interrupts have to be
         * taken care of.
         */
-       mfmsr   r4
        wrteei  0
        mtspr   SPRN_MMUCR,r5
-       tlbsx.  r3, 0, r3
-       wrtee   r4
-       bne     1f
+       tlbsx.  r6,0,r3
+       bne     10f
        sync
-       /* There are only 64 TLB entries, so r3 < 64,
-        * which means bit 22, is clear.  Since 22 is
-        * the V bit in the TLB_PAGEID, loading this
+BEGIN_MMU_FTR_SECTION
+       b       2f
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
+       /* On 440 There are only 64 TLB entries, so r3 < 64, which means bit
+        * 22, is clear.  Since 22 is the V bit in the TLB_PAGEID, loading this
         * value will invalidate the TLB entry.
         */
-       tlbwe   r3, r3, PPC44x_TLB_PAGEID
+       tlbwe   r6,r6,PPC44x_TLB_PAGEID
        isync
-1:     blr
+10:    wrtee   r10
+       blr
+2:
+#ifdef CONFIG_PPC_47x
+       oris    r7,r6,0x8000    /* specify way explicitely */
+       clrrwi  r4,r3,12        /* get an EPN for the hashing with V = 0 */
+       ori     r4,r4,PPC47x_TLBE_SIZE
+       tlbwe   r4,r7,0         /* write it */
+       isync
+       wrtee   r10
+       blr
+#else /* CONFIG_PPC_47x */
+1:     trap
+       EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;
+#endif /* !CONFIG_PPC_47x */
 
 _GLOBAL(_tlbil_all)
 _GLOBAL(_tlbil_pid)
+BEGIN_MMU_FTR_SECTION
+       b       2f
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
        li      r3,0
        sync
 
@@ -115,6 +139,76 @@ _GLOBAL(_tlbil_pid)
 
        isync
        blr
+2:
+#ifdef CONFIG_PPC_47x
+       /* 476 variant. There's not simple way to do this, hopefully we'll
+        * try to limit the amount of such full invalidates
+        */
+       mfmsr   r11             /* Interrupts off */
+       wrteei  0
+       li      r3,-1           /* Current set */
+       lis     r10,tlb_47x_boltmap@h
+       ori     r10,r10,tlb_47x_boltmap@l
+       lis     r7,0x8000       /* Specify way explicitely */
+
+       b       9f              /* For each set */
+
+1:     li      r9,4            /* Number of ways */
+       li      r4,0            /* Current way */
+       li      r6,0            /* Default entry value 0 */
+       andi.   r0,r8,1         /* Check if way 0 is bolted */
+       mtctr   r9              /* Load way counter */
+       bne-    3f              /* Bolted, skip loading it */
+
+2:     /* For each way */
+       or      r5,r3,r4        /* Make way|index for tlbre */
+       rlwimi  r5,r5,16,8,15   /* Copy index into position */
+       tlbre   r6,r5,0         /* Read entry */
+3:     addis   r4,r4,0x2000    /* Next way */
+       andi.   r0,r6,PPC47x_TLB0_VALID /* Valid entry ? */
+       beq     4f              /* Nope, skip it */
+       rlwimi  r7,r5,0,1,2     /* Insert way number */
+       rlwinm  r6,r6,0,21,19   /* Clear V */
+       tlbwe   r6,r7,0         /* Write it */
+4:     bdnz    2b              /* Loop for each way */
+       srwi    r8,r8,1         /* Next boltmap bit */
+9:     cmpwi   cr1,r3,255      /* Last set done ? */
+       addi    r3,r3,1         /* Next set */
+       beq     cr1,1f          /* End of loop */
+       andi.   r0,r3,0x1f      /* Need to load a new boltmap word ? */
+       bne     1b              /* No, loop */
+       lwz     r8,0(r10)       /* Load boltmap entry */
+       addi    r10,r10,4       /* Next word */
+       b       1b              /* Then loop */
+1:     isync                   /* Sync shadows */
+       wrtee   r11
+#else /* CONFIG_PPC_47x */
+1:     trap
+       EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;
+#endif /* !CONFIG_PPC_47x */
+       blr
+
+#ifdef CONFIG_PPC_47x
+/*
+ * _tlbivax_bcast is only on 47x. We don't bother doing a runtime
+ * check though, it will blow up soon enough if we mistakenly try
+ * to use it on a 440.
+ */
+_GLOBAL(_tlbivax_bcast)
+       mfspr   r5,SPRN_MMUCR
+       mfmsr   r10
+       rlwimi  r5,r4,0,16,31
+       wrteei  0
+       mtspr   SPRN_MMUCR,r5
+/*     tlbivax 0,r3 - use .long to avoid binutils deps */
+       .long 0x7c000624 | (r3 << 11)
+       isync
+       eieio
+       tlbsync
+       sync
+       wrtee   r10
+       blr
+#endif /* CONFIG_PPC_47x */
 
 #elif defined(CONFIG_FSL_BOOKE)
 /*
@@ -271,3 +365,31 @@ _GLOBAL(set_context)
 #else
 #error Unsupported processor type !
 #endif
+
+#if defined(CONFIG_FSL_BOOKE)
+/*
+ * extern void loadcam_entry(unsigned int index)
+ *
+ * Load TLBCAM[index] entry in to the L2 CAM MMU
+ */
+_GLOBAL(loadcam_entry)
+       LOAD_REG_ADDR(r4, TLBCAM)
+       mulli   r5,r3,TLBCAM_SIZE
+       add     r3,r5,r4
+       lwz     r4,TLBCAM_MAS0(r3)
+       mtspr   SPRN_MAS0,r4
+       lwz     r4,TLBCAM_MAS1(r3)
+       mtspr   SPRN_MAS1,r4
+       PPC_LL  r4,TLBCAM_MAS2(r3)
+       mtspr   SPRN_MAS2,r4
+       lwz     r4,TLBCAM_MAS3(r3)
+       mtspr   SPRN_MAS3,r4
+BEGIN_MMU_FTR_SECTION
+       lwz     r4,TLBCAM_MAS7(r3)
+       mtspr   SPRN_MAS7,r4
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
+       isync
+       tlbwe
+       isync
+       blr
+#endif
index 7486bff..eeba0a7 100644 (file)
@@ -1,3 +1,12 @@
+config PPC_47x
+       bool "Support for 47x variant"
+       depends on 44x
+       default n
+       select MPIC
+       help
+         This option enables support for the 47x family of processors and is
+         not currently compatible with other 44x or 46x varients
+
 config BAMBOO
        bool "Bamboo"
        depends on 44x
@@ -151,6 +160,17 @@ config YOSEMITE
        help
          This option enables support for the AMCC PPC440EP evaluation board.
 
+config ISS4xx
+       bool "ISS 4xx Simulator"
+       depends on (44x || 40x)
+       default n
+       select 405GP if 40x
+       select 440GP if 44x && !PPC_47x
+       select PPC_FPU
+       select OF_RTC
+       help
+         This option enables support for the IBM ISS simulation environment
+
 #config LUAN
 #      bool "Luan"
 #      depends on 44x
index ee6185a..82ff326 100644 (file)
@@ -5,3 +5,4 @@ obj-$(CONFIG_SAM440EP)  += sam440ep.o
 obj-$(CONFIG_WARP)     += warp.o
 obj-$(CONFIG_XILINX_VIRTEX_5_FXT) += virtex.o
 obj-$(CONFIG_XILINX_ML510) += virtex_ml510.o
+obj-$(CONFIG_ISS4xx)   += iss4xx.o
diff --git a/arch/powerpc/platforms/44x/iss4xx.c b/arch/powerpc/platforms/44x/iss4xx.c
new file mode 100644 (file)
index 0000000..aa46e9d
--- /dev/null
@@ -0,0 +1,167 @@
+/*
+ * PPC476 board specific routines
+ *
+ * Copyright 2010 Torez Smith, IBM Corporation.
+ *
+ * Based on earlier code:
+ *    Matt Porter <mporter@kernel.crashing.org>
+ *    Copyright 2002-2005 MontaVista Software Inc.
+ *
+ *    Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ *    Copyright (c) 2003-2005 Zultys Technologies
+ *
+ *    Rewritten and ported to the merged powerpc tree:
+ *    Copyright 2007 David Gibson <dwg@au1.ibm.com>, IBM Corporation.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/of_platform.h>
+#include <linux/rtc.h>
+
+#include <asm/machdep.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <asm/time.h>
+#include <asm/uic.h>
+#include <asm/ppc4xx.h>
+#include <asm/mpic.h>
+#include <asm/mmu.h>
+
+static __initdata struct of_device_id iss4xx_of_bus[] = {
+       { .compatible = "ibm,plb4", },
+       { .compatible = "ibm,plb6", },
+       { .compatible = "ibm,opb", },
+       { .compatible = "ibm,ebc", },
+       {},
+};
+
+static int __init iss4xx_device_probe(void)
+{
+       of_platform_bus_probe(NULL, iss4xx_of_bus, NULL);
+       of_instantiate_rtc();
+
+       return 0;
+}
+machine_device_initcall(iss4xx, iss4xx_device_probe);
+
+/* We can have either UICs or MPICs */
+static void __init iss4xx_init_irq(void)
+{
+       struct device_node *np;
+
+       /* Find top level interrupt controller */
+       for_each_node_with_property(np, "interrupt-controller") {
+               if (of_get_property(np, "interrupts", NULL) == NULL)
+                       break;
+       }
+       if (np == NULL)
+               panic("Can't find top level interrupt controller");
+
+       /* Check type and do appropriate initialization */
+       if (of_device_is_compatible(np, "ibm,uic")) {
+               uic_init_tree();
+               ppc_md.get_irq = uic_get_irq;
+#ifdef CONFIG_MPIC
+       } else if (of_device_is_compatible(np, "chrp,open-pic")) {
+               /* The MPIC driver will get everything it needs from the
+                * device-tree, just pass 0 to all arguments
+                */
+               struct mpic *mpic = mpic_alloc(np, 0, MPIC_PRIMARY, 0, 0,
+                                              " MPIC     ");
+               BUG_ON(mpic == NULL);
+               mpic_init(mpic);
+               ppc_md.get_irq = mpic_get_irq;
+#endif
+       } else
+               panic("Unrecognized top level interrupt controller");
+}
+
+#ifdef CONFIG_SMP
+static void __cpuinit smp_iss4xx_setup_cpu(int cpu)
+{
+       mpic_setup_this_cpu();
+}
+
+static void __cpuinit smp_iss4xx_kick_cpu(int cpu)
+{
+       struct device_node *cpunode = of_get_cpu_node(cpu, NULL);
+       const u64 *spin_table_addr_prop;
+       u32 *spin_table;
+       extern void start_secondary_47x(void);
+
+       BUG_ON(cpunode == NULL);
+
+       /* Assume spin table. We could test for the enable-method in
+        * the device-tree but currently there's little point as it's
+        * our only supported method
+        */
+       spin_table_addr_prop = of_get_property(cpunode, "cpu-release-addr",
+                                              NULL);
+       if (spin_table_addr_prop == NULL) {
+               pr_err("CPU%d: Can't start, missing cpu-release-addr !\n", cpu);
+               return;
+       }
+
+       /* Assume it's mapped as part of the linear mapping. This is a bit
+        * fishy but will work fine for now
+        */
+       spin_table = (u32 *)__va(*spin_table_addr_prop);
+       pr_debug("CPU%d: Spin table mapped at %p\n", cpu, spin_table);
+
+       spin_table[3] = cpu;
+       smp_wmb();
+       spin_table[1] = __pa(start_secondary_47x);
+       mb();
+}
+
+static struct smp_ops_t iss_smp_ops = {
+       .probe          = smp_mpic_probe,
+       .message_pass   = smp_mpic_message_pass,
+       .setup_cpu      = smp_iss4xx_setup_cpu,
+       .kick_cpu       = smp_iss4xx_kick_cpu,
+       .give_timebase  = smp_generic_give_timebase,
+       .take_timebase  = smp_generic_take_timebase,
+};
+
+static void __init iss4xx_smp_init(void)
+{
+       if (mmu_has_feature(MMU_FTR_TYPE_47x))
+               smp_ops = &iss_smp_ops;
+}
+
+#else /* CONFIG_SMP */
+static void __init iss4xx_smp_init(void) { }
+#endif /* CONFIG_SMP */
+
+static void __init iss4xx_setup_arch(void)
+{
+       iss4xx_smp_init();
+}
+
+/*
+ * Called very early, MMU is off, device-tree isn't unflattened
+ */
+static int __init iss4xx_probe(void)
+{
+       unsigned long root = of_get_flat_dt_root();
+
+       if (!of_flat_dt_is_compatible(root, "ibm,iss-4xx"))
+               return 0;
+
+       return 1;
+}
+
+define_machine(iss4xx) {
+       .name                   = "ISS-4xx",
+       .probe                  = iss4xx_probe,
+       .progress               = udbg_progress,
+       .init_IRQ               = iss4xx_init_irq,
+       .setup_arch             = iss4xx_setup_arch,
+       .restart                = ppc4xx_reset_system,
+       .calibrate_decr         = generic_calibrate_decr,
+};
index 0b4f883..ae525e4 100644 (file)
@@ -74,6 +74,7 @@ static int __init mpc831x_rdb_probe(void)
 static struct of_device_id __initdata of_bus_ids[] = {
        { .compatible = "simple-bus" },
        { .compatible = "gianfar" },
+       { .compatible = "gpio-leds", },
        {},
 };
 
index a1908d2..e00801c 100644 (file)
@@ -72,6 +72,7 @@ static struct of_device_id mpc837x_ids[] = {
        { .compatible = "soc", },
        { .compatible = "simple-bus", },
        { .compatible = "gianfar", },
+       { .compatible = "gpio-leds", },
        {},
 };
 
index 5abe137..018cc67 100644 (file)
@@ -83,7 +83,8 @@ static struct of_device_id __initdata mpc8610_ids[] = {
        { .compatible = "fsl,mpc8610-immr", },
        { .compatible = "fsl,mpc8610-guts", },
        { .compatible = "simple-bus", },
-       { .compatible = "gianfar", },
+       /* So that the DMA channel nodes can be probed individually: */
+       { .compatible = "fsl,eloplus-dma", },
        {}
 };
 
index a8aae0b..d361f81 100644 (file)
@@ -43,7 +43,7 @@ config 40x
        select PPC_PCI_CHOICE
 
 config 44x
-       bool "AMCC 44x"
+       bool "AMCC 44x, 46x or 47x"
        select PPC_DCR_NATIVE
        select PPC_UDBG_16550
        select 4xx_SOC
@@ -294,7 +294,7 @@ config PPC_PERF_CTRS
          This enables the powerpc-specific perf_event back-end.
 
 config SMP
-       depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE
+       depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE || PPC_47x
        bool "Symmetric multi-processing support"
        ---help---
          This enables support for systems with more than one CPU. If you have
@@ -322,6 +322,7 @@ config NR_CPUS
 config NOT_COHERENT_CACHE
        bool
        depends on 4xx || 8xx || E200 || PPC_MPC512x || GAMECUBE_COMMON
+       default n if PPC_47x
        default y
 
 config CHECK_CACHE_COHERENCY
index e6506cd..bfa2c0c 100644 (file)
@@ -118,7 +118,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
        policy->cur = cbe_freqs[cur_pmode].frequency;
 
 #ifdef CONFIG_SMP
-       cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu));
+       cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
 #endif
 
        cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
index fba5bf9..32a56c6 100644 (file)
@@ -252,8 +252,8 @@ decrementer_iSeries_masked:
        li      r11,1
        ld      r12,PACALPPACAPTR(r13)
        stb     r11,LPPACADECRINT(r12)
-       LOAD_REG_IMMEDIATE(r12, tb_ticks_per_jiffy)
-       lwz     r12,0(r12)
+       li      r12,-1
+       clrldi  r12,r12,33      /* set DEC to 0x7fffffff */
        mtspr   SPRN_DEC,r12
        /* fall through */
 
index b841c9a..3fc2e64 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/of.h>
+#include <linux/ratelimit.h>
 
 #include <asm/types.h>
 #include <asm/io.h>
@@ -584,14 +585,9 @@ static inline struct device_node *xlate_iomm_address(
 
        orig_addr = (unsigned long __force)addr;
        if ((orig_addr < BASE_IO_MEMORY) || (orig_addr >= max_io_memory)) {
-               static unsigned long last_jiffies;
-               static int num_printed;
+               static DEFINE_RATELIMIT_STATE(ratelimit, 60 * HZ, 10);
 
-               if (time_after(jiffies, last_jiffies + 60 * HZ)) {
-                       last_jiffies = jiffies;
-                       num_printed = 0;
-               }
-               if (num_printed++ < 10)
+               if (__ratelimit(&ratelimit))
                        printk(KERN_ERR
                                "iSeries_%s: invalid access at IO address %p\n",
                                func, addr);
index 722335e..6590850 100644 (file)
@@ -83,7 +83,7 @@ static void smp_iSeries_message_pass(int target, int msg)
 
 static int smp_iSeries_probe(void)
 {
-       return cpus_weight(cpu_possible_map);
+       return cpumask_weight(cpu_possible_mask);
 }
 
 static void smp_iSeries_kick_cpu(int nr)
index d35e052..c16537b 100644 (file)
@@ -213,7 +213,7 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
        pr_debug("current astate is at %d\n",cur_astate);
 
        policy->cur = pas_freqs[cur_astate].frequency;
-       cpumask_copy(policy->cpus, &cpu_online_map);
+       cpumask_copy(policy->cpus, cpu_online_mask);
 
        ppc_proc_freq = policy->cur * 1000ul;
 
index 3ca09d3..9650c60 100644 (file)
@@ -362,7 +362,7 @@ static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
        /* secondary CPUs are tied to the primary one by the
         * cpufreq core if in the secondary policy we tell it that
         * it actually must be one policy together with all others. */
-       cpumask_copy(policy->cpus, &cpu_online_map);
+       cpumask_copy(policy->cpus, cpu_online_mask);
        cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu);
 
        return cpufreq_frequency_table_cpuinfo(policy,
index f45331a..06a137c 100644 (file)
@@ -592,7 +592,7 @@ static void __init kw_i2c_probe(void)
        /* Probe keywest-i2c busses */
        for_each_compatible_node(np, "i2c","keywest-i2c") {
                struct pmac_i2c_host_kw *host;
-               int multibus, chans, i;
+               int multibus;
 
                /* Found one, init a host structure */
                host = kw_i2c_host_init(np);
@@ -614,6 +614,8 @@ static void __init kw_i2c_probe(void)
                 * parent type
                 */
                if (multibus) {
+                       int chans, i;
+
                        parent = of_get_parent(np);
                        if (parent == NULL)
                                continue;
@@ -1258,8 +1260,7 @@ static void pmac_i2c_do_end(struct pmf_function *func, void *instdata)
        if (inst == NULL)
                return;
        pmac_i2c_close(inst->bus);
-       if (inst)
-               kfree(inst);
+       kfree(inst);
 }
 
 static int pmac_i2c_do_read(PMF_STD_ARGS, u32 len)
index 3362e78..f0bc08f 100644 (file)
@@ -33,6 +33,8 @@ extern void pmac_setup_pci_dma(void);
 extern void pmac_check_ht_link(void);
 
 extern void pmac_setup_smp(void);
+extern void pmac32_cpu_die(void);
+extern void low_cpu_die(void) __attribute__((noreturn));
 
 extern int pmac_nvram_init(void);
 extern void pmac_pic_init(void);
index 15c2241..f1d0132 100644 (file)
@@ -480,7 +480,7 @@ static void __init pmac_init_early(void)
 #endif
 
        /* SMP Init has to be done early as we need to patch up
-        * cpu_possible_map before interrupt stacks are allocated
+        * cpu_possible_mask before interrupt stacks are allocated
         * or kaboom...
         */
 #ifdef CONFIG_SMP
@@ -646,7 +646,7 @@ static int pmac_pci_probe_mode(struct pci_bus *bus)
 /* access per cpu vars from generic smp.c */
 DECLARE_PER_CPU(int, cpu_state);
 
-static void pmac_cpu_die(void)
+static void pmac64_cpu_die(void)
 {
        /*
         * turn off as much as possible, we'll be
@@ -717,8 +717,13 @@ define_machine(powermac) {
        .pcibios_after_init     = pmac_pcibios_after_init,
        .phys_mem_access_prot   = pci_phys_mem_access_prot,
 #endif
-#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC64)
-       .cpu_die                = pmac_cpu_die,
+#ifdef CONFIG_HOTPLUG_CPU
+#ifdef CONFIG_PPC64
+       .cpu_die                = pmac64_cpu_die,
+#endif
+#ifdef CONFIG_PPC32
+       .cpu_die                = pmac32_cpu_die,
+#endif
 #endif
 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
        .cpu_die                = generic_mach_cpu_die,
index 6898e82..c95215f 100644 (file)
@@ -53,6 +53,8 @@
 #include <asm/pmac_low_i2c.h>
 #include <asm/pmac_pfunc.h>
 
+#include "pmac.h"
+
 #undef DEBUG
 
 #ifdef DEBUG
@@ -315,7 +317,7 @@ static int __init smp_psurge_probe(void)
        /* This is necessary because OF doesn't know about the
         * secondary cpu(s), and thus there aren't nodes in the
         * device tree for them, and smp_setup_cpu_maps hasn't
-        * set their bits in cpu_present_map.
+        * set their bits in cpu_present_mask.
         */
        if (ncpus > NR_CPUS)
                ncpus = NR_CPUS;
@@ -878,10 +880,9 @@ int smp_core99_cpu_disable(void)
        return 0;
 }
 
-extern void low_cpu_die(void) __attribute__((noreturn)); /* in sleep.S */
 static int cpu_dead[NR_CPUS];
 
-void cpu_die(void)
+void pmac32_cpu_die(void)
 {
        local_irq_disable();
        cpu_dead[smp_processor_id()] = 1;
@@ -944,7 +945,7 @@ void __init pmac_setup_smp(void)
        }
 #ifdef CONFIG_PPC32
        else {
-               /* We have to set bits in cpu_possible_map here since the
+               /* We have to set bits in cpu_possible_mask here since the
                 * secondary CPU(s) aren't in the device tree. Various
                 * things won't be initialized for CPUs not in the possible
                 * map, so we really need to fix it up here.
index 0ff5174..3dbef30 100644 (file)
@@ -7,7 +7,7 @@ EXTRA_CFLAGS            += -DDEBUG
 endif
 
 obj-y                  := lpar.o hvCall.o nvram.o reconfig.o \
-                          setup.o iommu.o ras.o \
+                          setup.o iommu.o event_sources.o ras.o \
                           firmware.o power.o dlpar.o
 obj-$(CONFIG_SMP)      += smp.o
 obj-$(CONFIG_XICS)     += xics.o
index e1682bc..d71e585 100644 (file)
@@ -79,13 +79,12 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa)
         * prepend this to the full_name.
         */
        name = (char *)ccwa + ccwa->name_offset;
-       dn->full_name = kmalloc(strlen(name) + 2, GFP_KERNEL);
+       dn->full_name = kasprintf(GFP_KERNEL, "/%s", name);
        if (!dn->full_name) {
                kfree(dn);
                return NULL;
        }
 
-       sprintf(dn->full_name, "/%s", name);
        return dn;
 }
 
@@ -410,15 +409,13 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
         * directory of the device tree.  CPUs actually live in the
         * cpus directory so we need to fixup the full_name.
         */
-       cpu_name = kzalloc(strlen(dn->full_name) + strlen("/cpus") + 1,
-                          GFP_KERNEL);
+       cpu_name = kasprintf(GFP_KERNEL, "/cpus%s", dn->full_name);
        if (!cpu_name) {
                dlpar_free_cc_nodes(dn);
                rc = -ENOMEM;
                goto out;
        }
 
-       sprintf(cpu_name, "/cpus%s", dn->full_name);
        kfree(dn->full_name);
        dn->full_name = cpu_name;
 
@@ -433,6 +430,7 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
        if (rc) {
                dlpar_release_drc(drc_index);
                dlpar_free_cc_nodes(dn);
+               goto out;
        }
 
        rc = dlpar_online_cpu(dn);
index 7df7fbb..34b7dc1 100644 (file)
@@ -749,7 +749,7 @@ static void __rtas_set_slot_reset(struct pci_dn *pdn)
        /* Determine type of EEH reset required by device,
         * default hot reset or fundamental reset
         */
-       if (dev->needs_freset)
+       if (dev && dev->needs_freset)
                rtas_pci_slot_reset(pdn, 3);
        else
                rtas_pci_slot_reset(pdn, 1);
diff --git a/arch/powerpc/platforms/pseries/event_sources.c b/arch/powerpc/platforms/pseries/event_sources.c
new file mode 100644 (file)
index 0000000..e889c9d
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2001 Dave Engebretsen IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <asm/prom.h>
+
+#include "pseries.h"
+
+void request_event_sources_irqs(struct device_node *np,
+                               irq_handler_t handler,
+                               const char *name)
+{
+       int i, index, count = 0;
+       struct of_irq oirq;
+       const u32 *opicprop;
+       unsigned int opicplen;
+       unsigned int virqs[16];
+
+       /* Check for obsolete "open-pic-interrupt" property. If present, then
+        * map those interrupts using the default interrupt host and default
+        * trigger
+        */
+       opicprop = of_get_property(np, "open-pic-interrupt", &opicplen);
+       if (opicprop) {
+               opicplen /= sizeof(u32);
+               for (i = 0; i < opicplen; i++) {
+                       if (count > 15)
+                               break;
+                       virqs[count] = irq_create_mapping(NULL, *(opicprop++));
+                       if (virqs[count] == NO_IRQ)
+                               printk(KERN_ERR "Unable to allocate interrupt "
+                                      "number for %s\n", np->full_name);
+                       else
+                               count++;
+
+               }
+       }
+       /* Else use normal interrupt tree parsing */
+       else {
+               /* First try to do a proper OF tree parsing */
+               for (index = 0; of_irq_map_one(np, index, &oirq) == 0;
+                    index++) {
+                       if (count > 15)
+                               break;
+                       virqs[count] = irq_create_of_mapping(oirq.controller,
+                                                           oirq.specifier,
+                                                           oirq.size);
+                       if (virqs[count] == NO_IRQ)
+                               printk(KERN_ERR "Unable to allocate interrupt "
+                                      "number for %s\n", np->full_name);
+                       else
+                               count++;
+               }
+       }
+
+       /* Now request them */
+       for (i = 0; i < count; i++) {
+               if (request_irq(virqs[i], handler, 0, name, NULL)) {
+                       printk(KERN_ERR "Unable to request interrupt %d for "
+                              "%s\n", virqs[i], np->full_name);
+                       return;
+               }
+       }
+}
+
index a8e1d5d..8f85f39 100644 (file)
@@ -154,30 +154,6 @@ static void pseries_mach_cpu_die(void)
        for(;;);
 }
 
-static int qcss_tok;   /* query-cpu-stopped-state token */
-
-/* Get state of physical CPU.
- * Return codes:
- *     0       - The processor is in the RTAS stopped state
- *     1       - stop-self is in progress
- *     2       - The processor is not in the RTAS stopped state
- *     -1      - Hardware Error
- *     -2      - Hardware Busy, Try again later.
- */
-static int query_cpu_stopped(unsigned int pcpu)
-{
-       int cpu_status, status;
-
-       status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
-       if (status != 0) {
-               printk(KERN_ERR
-                      "RTAS query-cpu-stopped-state failed: %i\n", status);
-               return status;
-       }
-
-       return cpu_status;
-}
-
 static int pseries_cpu_disable(void)
 {
        int cpu = smp_processor_id();
@@ -187,7 +163,7 @@ static int pseries_cpu_disable(void)
 
        /*fix boot_cpuid here*/
        if (cpu == boot_cpuid)
-               boot_cpuid = any_online_cpu(cpu_online_map);
+               boot_cpuid = cpumask_any(cpu_online_mask);
 
        /* FIXME: abstract this to not be platform specific later on */
        xics_migrate_irqs_away();
@@ -224,8 +200,9 @@ static void pseries_cpu_die(unsigned int cpu)
        } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) {
 
                for (tries = 0; tries < 25; tries++) {
-                       cpu_status = query_cpu_stopped(pcpu);
-                       if (cpu_status == 0 || cpu_status == -1)
+                       cpu_status = smp_query_cpu_stopped(pcpu);
+                       if (cpu_status == QCSS_STOPPED ||
+                           cpu_status == QCSS_HARDWARE_ERROR)
                                break;
                        cpu_relax();
                }
@@ -245,7 +222,7 @@ static void pseries_cpu_die(unsigned int cpu)
 }
 
 /*
- * Update cpu_present_map and paca(s) for a new cpu node.  The wrinkle
+ * Update cpu_present_mask and paca(s) for a new cpu node.  The wrinkle
  * here is that a cpu device node may represent up to two logical cpus
  * in the SMT case.  We must honor the assumption in other code that
  * the logical ids for sibling SMT threads x and y are adjacent, such
@@ -254,7 +231,7 @@ static void pseries_cpu_die(unsigned int cpu)
 static int pseries_add_processor(struct device_node *np)
 {
        unsigned int cpu;
-       cpumask_t candidate_map, tmp = CPU_MASK_NONE;
+       cpumask_var_t candidate_mask, tmp;
        int err = -ENOSPC, len, nthreads, i;
        const u32 *intserv;
 
@@ -262,48 +239,53 @@ static int pseries_add_processor(struct device_node *np)
        if (!intserv)
                return 0;
 
+       zalloc_cpumask_var(&candidate_mask, GFP_KERNEL);
+       zalloc_cpumask_var(&tmp, GFP_KERNEL);
+
        nthreads = len / sizeof(u32);
        for (i = 0; i < nthreads; i++)
-               cpu_set(i, tmp);
+               cpumask_set_cpu(i, tmp);
 
        cpu_maps_update_begin();
 
-       BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map));
+       BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask));
 
        /* Get a bitmap of unoccupied slots. */
-       cpus_xor(candidate_map, cpu_possible_map, cpu_present_map);
-       if (cpus_empty(candidate_map)) {
+       cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask);
+       if (cpumask_empty(candidate_mask)) {
                /* If we get here, it most likely means that NR_CPUS is
                 * less than the partition's max processors setting.
                 */
                printk(KERN_ERR "Cannot add cpu %s; this system configuration"
                       " supports %d logical cpus.\n", np->full_name,
-                      cpus_weight(cpu_possible_map));
+                      cpumask_weight(cpu_possible_mask));
                goto out_unlock;
        }
 
-       while (!cpus_empty(tmp))
-               if (cpus_subset(tmp, candidate_map))
+       while (!cpumask_empty(tmp))
+               if (cpumask_subset(tmp, candidate_mask))
                        /* Found a range where we can insert the new cpu(s) */
                        break;
                else
-                       cpus_shift_left(tmp, tmp, nthreads);
+                       cpumask_shift_left(tmp, tmp, nthreads);
 
-       if (cpus_empty(tmp)) {
-               printk(KERN_ERR "Unable to find space in cpu_present_map for"
+       if (cpumask_empty(tmp)) {
+               printk(KERN_ERR "Unable to find space in cpu_present_mask for"
                       " processor %s with %d thread(s)\n", np->name,
                       nthreads);
                goto out_unlock;
        }
 
-       for_each_cpu_mask(cpu, tmp) {
-               BUG_ON(cpu_isset(cpu, cpu_present_map));
+       for_each_cpu(cpu, tmp) {
+               BUG_ON(cpumask_test_cpu(cpu, cpu_present_mask));
                set_cpu_present(cpu, true);
                set_hard_smp_processor_id(cpu, *intserv++);
        }
        err = 0;
 out_unlock:
        cpu_maps_update_done();
+       free_cpumask_var(candidate_mask);
+       free_cpumask_var(tmp);
        return err;
 }
 
@@ -334,7 +316,7 @@ static void pseries_remove_processor(struct device_node *np)
                        set_hard_smp_processor_id(cpu, -1);
                        break;
                }
-               if (cpu == NR_CPUS)
+               if (cpu >= nr_cpu_ids)
                        printk(KERN_WARNING "Could not find cpu to remove "
                               "with physical id 0x%x\n", intserv[i]);
        }
@@ -388,6 +370,7 @@ static int __init pseries_cpu_hotplug_init(void)
        struct device_node *np;
        const char *typep;
        int cpu;
+       int qcss_tok;
 
        for_each_node_by_name(np, "interrupt-controller") {
                typep = of_get_property(np, "compatible", NULL);
index 383a5d0..48d2057 100644 (file)
@@ -228,3 +228,41 @@ _GLOBAL(plpar_hcall9)
        mtcrf   0xff,r0
 
        blr                             /* return r3 = status */
+
+/* See plpar_hcall_raw to see why this is needed */
+_GLOBAL(plpar_hcall9_raw)
+       HMT_MEDIUM
+
+       mfcr    r0
+       stw     r0,8(r1)
+
+       std     r4,STK_PARM(r4)(r1)     /* Save ret buffer */
+
+       mr      r4,r5
+       mr      r5,r6
+       mr      r6,r7
+       mr      r7,r8
+       mr      r8,r9
+       mr      r9,r10
+       ld      r10,STK_PARM(r11)(r1)    /* put arg7 in R10 */
+       ld      r11,STK_PARM(r12)(r1)    /* put arg8 in R11 */
+       ld      r12,STK_PARM(r13)(r1)    /* put arg9 in R12 */
+
+       HVSC                            /* invoke the hypervisor */
+
+       mr      r0,r12
+       ld      r12,STK_PARM(r4)(r1)
+       std     r4,  0(r12)
+       std     r5,  8(r12)
+       std     r6, 16(r12)
+       std     r7, 24(r12)
+       std     r8, 32(r12)
+       std     r9, 40(r12)
+       std     r10,48(r12)
+       std     r11,56(r12)
+       std     r0, 64(r12)
+
+       lwz     r0,8(r1)
+       mtcrf   0xff,r0
+
+       blr                             /* return r3 = status */
index 0707653..cf79b46 100644 (file)
@@ -367,21 +367,28 @@ static void pSeries_lpar_hptab_clear(void)
 {
        unsigned long size_bytes = 1UL << ppc64_pft_size;
        unsigned long hpte_count = size_bytes >> 4;
-       unsigned long dummy1, dummy2, dword0;
+       struct {
+               unsigned long pteh;
+               unsigned long ptel;
+       } ptes[4];
        long lpar_rc;
-       int i;
+       int i, j;
 
-       /* TODO: Use bulk call */
-       for (i = 0; i < hpte_count; i++) {
-               /* dont remove HPTEs with VRMA mappings */
-               lpar_rc = plpar_pte_remove_raw(H_ANDCOND, i, HPTE_V_1TB_SEG,
-                                               &dummy1, &dummy2);
-               if (lpar_rc == H_NOT_FOUND) {
-                       lpar_rc = plpar_pte_read_raw(0, i, &dword0, &dummy1);
-                       if (!lpar_rc && ((dword0 & HPTE_V_VRMA_MASK)
-                               != HPTE_V_VRMA_MASK))
-                               /* Can be hpte for 1TB Seg. So remove it */
-                               plpar_pte_remove_raw(0, i, 0, &dummy1, &dummy2);
+       /* Read in batches of 4,
+        * invalidate only valid entries not in the VRMA
+        * hpte_count will be a multiple of 4
+         */
+       for (i = 0; i < hpte_count; i += 4) {
+               lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
+               if (lpar_rc != H_SUCCESS)
+                       continue;
+               for (j = 0; j < 4; j++){
+                       if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
+                               HPTE_V_VRMA_MASK)
+                               continue;
+                       if (ptes[j].pteh & HPTE_V_VALID)
+                               plpar_pte_remove_raw(0, i + j, 0,
+                                       &(ptes[j].pteh), &(ptes[j].ptel));
                }
        }
 }
index a05f8d4..d980111 100644 (file)
@@ -4,6 +4,14 @@
 #include <asm/hvcall.h>
 #include <asm/page.h>
 
+/* Get state of physical CPU from query_cpu_stopped */
+int smp_query_cpu_stopped(unsigned int pcpu);
+#define QCSS_STOPPED 0
+#define QCSS_STOPPING 1
+#define QCSS_NOT_STOPPED 2
+#define QCSS_HARDWARE_ERROR -1
+#define QCSS_HARDWARE_BUSY -2
+
 static inline long poll_pending(void)
 {
        return plpar_hcall_norets(H_POLL_PENDING);
@@ -183,6 +191,24 @@ static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
        return rc;
 }
 
+/*
+ * plpar_pte_read_4_raw can be called in real mode.
+ * ptes must be 8*sizeof(unsigned long)
+ */
+static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex,
+                                       unsigned long *ptes)
+
+{
+       long rc;
+       unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+
+       rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex);
+
+       memcpy(ptes, retbuf, 8*sizeof(unsigned long));
+
+       return rc;
+}
+
 static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
                unsigned long avpn)
 {
index 9e17c0d..40c93ca 100644 (file)
 #ifndef _PSERIES_PSERIES_H
 #define _PSERIES_PSERIES_H
 
+#include <linux/interrupt.h>
+
+struct device_node;
+
+extern void request_event_sources_irqs(struct device_node *np,
+                                      irq_handler_t handler, const char *name);
+
 extern void __init fw_feature_init(const char *hypertas, unsigned long len);
 
 struct pt_regs;
index db940d2..41a3e9a 100644 (file)
@@ -67,63 +67,6 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id);
 static irqreturn_t ras_error_interrupt(int irq, void *dev_id);
 
 
-static void request_ras_irqs(struct device_node *np,
-                       irq_handler_t handler,
-                       const char *name)
-{
-       int i, index, count = 0;
-       struct of_irq oirq;
-       const u32 *opicprop;
-       unsigned int opicplen;
-       unsigned int virqs[16];
-
-       /* Check for obsolete "open-pic-interrupt" property. If present, then
-        * map those interrupts using the default interrupt host and default
-        * trigger
-        */
-       opicprop = of_get_property(np, "open-pic-interrupt", &opicplen);
-       if (opicprop) {
-               opicplen /= sizeof(u32);
-               for (i = 0; i < opicplen; i++) {
-                       if (count > 15)
-                               break;
-                       virqs[count] = irq_create_mapping(NULL, *(opicprop++));
-                       if (virqs[count] == NO_IRQ)
-                               printk(KERN_ERR "Unable to allocate interrupt "
-                                      "number for %s\n", np->full_name);
-                       else
-                               count++;
-
-               }
-       }
-       /* Else use normal interrupt tree parsing */
-       else {
-               /* First try to do a proper OF tree parsing */
-               for (index = 0; of_irq_map_one(np, index, &oirq) == 0;
-                    index++) {
-                       if (count > 15)
-                               break;
-                       virqs[count] = irq_create_of_mapping(oirq.controller,
-                                                           oirq.specifier,
-                                                           oirq.size);
-                       if (virqs[count] == NO_IRQ)
-                               printk(KERN_ERR "Unable to allocate interrupt "
-                                      "number for %s\n", np->full_name);
-                       else
-                               count++;
-               }
-       }
-
-       /* Now request them */
-       for (i = 0; i < count; i++) {
-               if (request_irq(virqs[i], handler, 0, name, NULL)) {
-                       printk(KERN_ERR "Unable to request interrupt %d for "
-                              "%s\n", virqs[i], np->full_name);
-                       return;
-               }
-       }
-}
-
 /*
  * Initialize handlers for the set of interrupts caused by hardware errors
  * and power system events.
@@ -138,14 +81,15 @@ static int __init init_ras_IRQ(void)
        /* Internal Errors */
        np = of_find_node_by_path("/event-sources/internal-errors");
        if (np != NULL) {
-               request_ras_irqs(np, ras_error_interrupt, "RAS_ERROR");
+               request_event_sources_irqs(np, ras_error_interrupt,
+                                          "RAS_ERROR");
                of_node_put(np);
        }
 
        /* EPOW Events */
        np = of_find_node_by_path("/event-sources/epow-events");
        if (np != NULL) {
-               request_ras_irqs(np, ras_epow_interrupt, "RAS_EPOW");
+               request_event_sources_irqs(np, ras_epow_interrupt, "RAS_EPOW");
                of_node_put(np);
        }
 
index 6710761..a6d19e3 100644 (file)
@@ -496,13 +496,14 @@ static int __init pSeries_probe(void)
 }
 
 
-DECLARE_PER_CPU(unsigned long, smt_snooze_delay);
+DECLARE_PER_CPU(long, smt_snooze_delay);
 
 static void pseries_dedicated_idle_sleep(void)
 { 
        unsigned int cpu = smp_processor_id();
        unsigned long start_snooze;
        unsigned long in_purr, out_purr;
+       long snooze = __get_cpu_var(smt_snooze_delay);
 
        /*
         * Indicate to the HV that we are idle. Now would be
@@ -517,13 +518,12 @@ static void pseries_dedicated_idle_sleep(void)
         * has been checked recently.  If we should poll for a little
         * while, do so.
         */
-       if (__get_cpu_var(smt_snooze_delay)) {
-               start_snooze = get_tb() +
-                       __get_cpu_var(smt_snooze_delay) * tb_ticks_per_usec;
+       if (snooze) {
+               start_snooze = get_tb() + snooze * tb_ticks_per_usec;
                local_irq_enable();
                set_thread_flag(TIF_POLLING_NRFLAG);
 
-               while (get_tb() < start_snooze) {
+               while ((snooze < 0) || (get_tb() < start_snooze)) {
                        if (need_resched() || cpu_is_offline(cpu))
                                goto out;
                        ppc64_runlatch_off();
index 4e7f89a..3b1bf61 100644 (file)
  * The Primary thread of each non-boot processor was started from the OF client
  * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop.
  */
-static cpumask_t of_spin_map;
+static cpumask_var_t of_spin_mask;
+
+/* Query where a cpu is now.  Return codes #defined in plpar_wrappers.h */
+int smp_query_cpu_stopped(unsigned int pcpu)
+{
+       int cpu_status, status;
+       int qcss_tok = rtas_token("query-cpu-stopped-state");
+
+       if (qcss_tok == RTAS_UNKNOWN_SERVICE) {
+               printk(KERN_INFO "Firmware doesn't support "
+                               "query-cpu-stopped-state\n");
+               return QCSS_HARDWARE_ERROR;
+       }
+
+       status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
+       if (status != 0) {
+               printk(KERN_ERR
+                      "RTAS query-cpu-stopped-state failed: %i\n", status);
+               return status;
+       }
+
+       return cpu_status;
+}
 
 /**
  * smp_startup_cpu() - start the given cpu
@@ -76,12 +98,18 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
        unsigned int pcpu;
        int start_cpu;
 
-       if (cpu_isset(lcpu, of_spin_map))
+       if (cpumask_test_cpu(lcpu, of_spin_mask))
                /* Already started by OF and sitting in spin loop */
                return 1;
 
        pcpu = get_hard_smp_processor_id(lcpu);
 
+       /* Check to see if the CPU out of FW already for kexec */
+       if (smp_query_cpu_stopped(pcpu) == QCSS_NOT_STOPPED){
+               cpumask_set_cpu(lcpu, of_spin_mask);
+               return 1;
+       }
+
        /* Fixup atomic count: it exited inside IRQ handler. */
        task_thread_info(paca[lcpu].__current)->preempt_count   = 0;
 
@@ -115,7 +143,7 @@ static void __devinit smp_xics_setup_cpu(int cpu)
        if (firmware_has_feature(FW_FEATURE_SPLPAR))
                vpa_init(cpu);
 
-       cpu_clear(cpu, of_spin_map);
+       cpumask_clear_cpu(cpu, of_spin_mask);
        set_cpu_current_state(cpu, CPU_STATE_ONLINE);
        set_default_offline_state(cpu);
 
@@ -186,17 +214,19 @@ static void __init smp_init_pseries(void)
 
        pr_debug(" -> smp_init_pSeries()\n");
 
+       alloc_bootmem_cpumask_var(&of_spin_mask);
+
        /* Mark threads which are still spinning in hold loops. */
        if (cpu_has_feature(CPU_FTR_SMT)) {
                for_each_present_cpu(i) { 
                        if (cpu_thread_in_core(i) == 0)
-                               cpu_set(i, of_spin_map);
+                               cpumask_set_cpu(i, of_spin_mask);
                }
        } else {
-               of_spin_map = cpu_present_map;
+               cpumask_copy(of_spin_mask, cpu_present_mask);
        }
 
-       cpu_clear(boot_cpuid, of_spin_map);
+       cpumask_clear_cpu(boot_cpuid, of_spin_mask);
 
        /* Non-lpar has additional take/give timebase */
        if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
index 1bcedd8..f19d194 100644 (file)
@@ -163,29 +163,37 @@ static inline void lpar_qirr_info(int n_cpu , u8 value)
 /* Interface to generic irq subsystem */
 
 #ifdef CONFIG_SMP
-static int get_irq_server(unsigned int virq, cpumask_t cpumask,
+/*
+ * For the moment we only implement delivery to all cpus or one cpu.
+ *
+ * If the requested affinity is cpu_all_mask, we set global affinity.
+ * If not we set it to the first cpu in the mask, even if multiple cpus
+ * are set. This is so things like irqbalance (which set core and package
+ * wide affinities) do the right thing.
+ */
+static int get_irq_server(unsigned int virq, const struct cpumask *cpumask,
                          unsigned int strict_check)
 {
-       int server;
-       /* For the moment only implement delivery to all cpus or one cpu */
-       cpumask_t tmp = CPU_MASK_NONE;
 
        if (!distribute_irqs)
                return default_server;
 
-       if (!cpus_equal(cpumask, CPU_MASK_ALL)) {
-               cpus_and(tmp, cpu_online_map, cpumask);
-
-               server = first_cpu(tmp);
+       if (!cpumask_equal(cpumask, cpu_all_mask)) {
+               int server = cpumask_first_and(cpu_online_mask, cpumask);
 
-               if (server < NR_CPUS)
+               if (server < nr_cpu_ids)
                        return get_hard_smp_processor_id(server);
 
                if (strict_check)
                        return -1;
        }
 
-       if (cpus_equal(cpu_online_map, cpu_present_map))
+       /*
+        * Workaround issue with some versions of JS20 firmware that
+        * deliver interrupts to cpus which haven't been started. This
+        * happens when using the maxcpus= boot option.
+        */
+       if (cpumask_equal(cpu_online_mask, cpu_present_mask))
                return default_distrib_server;
 
        return default_server;
@@ -207,7 +215,7 @@ static void xics_unmask_irq(unsigned int virq)
        if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
                return;
 
-       server = get_irq_server(virq, *(irq_to_desc(virq)->affinity), 0);
+       server = get_irq_server(virq, irq_to_desc(virq)->affinity, 0);
 
        call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
                                DEFAULT_PRIORITY);
@@ -398,11 +406,7 @@ static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask)
                return -1;
        }
 
-       /*
-        * For the moment only implement delivery to all cpus or one cpu.
-        * Get current irq_server for the given irq
-        */
-       irq_server = get_irq_server(virq, *cpumask, 1);
+       irq_server = get_irq_server(virq, cpumask, 1);
        if (irq_server == -1) {
                char cpulist[128];
                cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
@@ -611,7 +615,7 @@ int __init smp_xics_probe(void)
 {
        xics_request_ipi();
 
-       return cpus_weight(cpu_possible_map);
+       return cpumask_weight(cpu_possible_mask);
 }
 
 #endif /* CONFIG_SMP */
index 6478eb1..83f5196 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/of_gpio.h>
 #include <linux/gpio.h>
 #include <linux/slab.h>
+#include <linux/irq.h>
 
 #define MPC8XXX_GPIO_PINS      32
 
@@ -35,6 +36,7 @@ struct mpc8xxx_gpio_chip {
         * open drain mode safely
         */
        u32 data;
+       struct irq_host *irq;
 };
 
 static inline u32 mpc8xxx_gpio2mask(unsigned int gpio)
@@ -128,12 +130,136 @@ static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val
        return 0;
 }
 
+static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+       struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
+       struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
+
+       if (mpc8xxx_gc->irq && offset < MPC8XXX_GPIO_PINS)
+               return irq_create_mapping(mpc8xxx_gc->irq, offset);
+       else
+               return -ENXIO;
+}
+
+static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc)
+{
+       struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_desc_data(desc);
+       struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+       unsigned int mask;
+
+       mask = in_be32(mm->regs + GPIO_IER) & in_be32(mm->regs + GPIO_IMR);
+       if (mask)
+               generic_handle_irq(irq_linear_revmap(mpc8xxx_gc->irq,
+                                                    32 - ffs(mask)));
+}
+
+static void mpc8xxx_irq_unmask(unsigned int virq)
+{
+       struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq);
+       struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+       unsigned long flags;
+
+       spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+
+       setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(virq_to_hw(virq)));
+
+       spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+}
+
+static void mpc8xxx_irq_mask(unsigned int virq)
+{
+       struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq);
+       struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+       unsigned long flags;
+
+       spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+
+       clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(virq_to_hw(virq)));
+
+       spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+}
+
+static void mpc8xxx_irq_ack(unsigned int virq)
+{
+       struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq);
+       struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+
+       out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(virq_to_hw(virq)));
+}
+
+static int mpc8xxx_irq_set_type(unsigned int virq, unsigned int flow_type)
+{
+       struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq);
+       struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+       unsigned long flags;
+
+       switch (flow_type) {
+       case IRQ_TYPE_EDGE_FALLING:
+               spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+               setbits32(mm->regs + GPIO_ICR,
+                         mpc8xxx_gpio2mask(virq_to_hw(virq)));
+               spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+               break;
+
+       case IRQ_TYPE_EDGE_BOTH:
+               spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+               clrbits32(mm->regs + GPIO_ICR,
+                         mpc8xxx_gpio2mask(virq_to_hw(virq)));
+               spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static struct irq_chip mpc8xxx_irq_chip = {
+       .name           = "mpc8xxx-gpio",
+       .unmask         = mpc8xxx_irq_unmask,
+       .mask           = mpc8xxx_irq_mask,
+       .ack            = mpc8xxx_irq_ack,
+       .set_type       = mpc8xxx_irq_set_type,
+};
+
+static int mpc8xxx_gpio_irq_map(struct irq_host *h, unsigned int virq,
+                               irq_hw_number_t hw)
+{
+       set_irq_chip_data(virq, h->host_data);
+       set_irq_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq);
+       set_irq_type(virq, IRQ_TYPE_NONE);
+
+       return 0;
+}
+
+static int mpc8xxx_gpio_irq_xlate(struct irq_host *h, struct device_node *ct,
+                                 const u32 *intspec, unsigned int intsize,
+                                 irq_hw_number_t *out_hwirq,
+                                 unsigned int *out_flags)
+
+{
+       /* interrupt sense values coming from the device tree equal either
+        * EDGE_FALLING or EDGE_BOTH
+        */
+       *out_hwirq = intspec[0];
+       *out_flags = intspec[1];
+
+       return 0;
+}
+
+static struct irq_host_ops mpc8xxx_gpio_irq_ops = {
+       .map    = mpc8xxx_gpio_irq_map,
+       .xlate  = mpc8xxx_gpio_irq_xlate,
+};
+
 static void __init mpc8xxx_add_controller(struct device_node *np)
 {
        struct mpc8xxx_gpio_chip *mpc8xxx_gc;
        struct of_mm_gpio_chip *mm_gc;
        struct of_gpio_chip *of_gc;
        struct gpio_chip *gc;
+       unsigned hwirq;
        int ret;
 
        mpc8xxx_gc = kzalloc(sizeof(*mpc8xxx_gc), GFP_KERNEL);
@@ -158,11 +284,32 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
        else
                gc->get = mpc8xxx_gpio_get;
        gc->set = mpc8xxx_gpio_set;
+       gc->to_irq = mpc8xxx_gpio_to_irq;
 
        ret = of_mm_gpiochip_add(np, mm_gc);
        if (ret)
                goto err;
 
+       hwirq = irq_of_parse_and_map(np, 0);
+       if (hwirq == NO_IRQ)
+               goto skip_irq;
+
+       mpc8xxx_gc->irq =
+               irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, MPC8XXX_GPIO_PINS,
+                              &mpc8xxx_gpio_irq_ops, MPC8XXX_GPIO_PINS);
+       if (!mpc8xxx_gc->irq)
+               goto skip_irq;
+
+       mpc8xxx_gc->irq->host_data = mpc8xxx_gc;
+
+       /* ack and mask all irqs */
+       out_be32(mm_gc->regs + GPIO_IER, 0xffffffff);
+       out_be32(mm_gc->regs + GPIO_IMR, 0);
+
+       set_irq_data(hwirq, mpc8xxx_gc);
+       set_irq_chained_handler(hwirq, mpc8xxx_gpio_irq_cascade);
+
+skip_irq:
        return;
 
 err:
index 260295b..2102487 100644 (file)
@@ -568,12 +568,12 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
 #endif /* CONFIG_MPIC_U3_HT_IRQS */
 
 #ifdef CONFIG_SMP
-static int irq_choose_cpu(const cpumask_t *mask)
+static int irq_choose_cpu(const struct cpumask *mask)
 {
        int cpuid;
 
        if (cpumask_equal(mask, cpu_all_mask)) {
-               static int irq_rover;
+               static int irq_rover = 0;
                static DEFINE_RAW_SPINLOCK(irq_rover_lock);
                unsigned long flags;
 
@@ -581,15 +581,11 @@ static int irq_choose_cpu(const cpumask_t *mask)
        do_round_robin:
                raw_spin_lock_irqsave(&irq_rover_lock, flags);
 
-               while (!cpu_online(irq_rover)) {
-                       if (++irq_rover >= NR_CPUS)
-                               irq_rover = 0;
-               }
+               irq_rover = cpumask_next(irq_rover, cpu_online_mask);
+               if (irq_rover >= nr_cpu_ids)
+                       irq_rover = cpumask_first(cpu_online_mask);
+
                cpuid = irq_rover;
-               do {
-                       if (++irq_rover >= NR_CPUS)
-                               irq_rover = 0;
-               } while (!cpu_online(irq_rover));
 
                raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
        } else {
@@ -601,7 +597,7 @@ static int irq_choose_cpu(const cpumask_t *mask)
        return get_hard_smp_processor_id(cpuid);
 }
 #else
-static int irq_choose_cpu(const cpumask_t *mask)
+static int irq_choose_cpu(const struct cpumask *mask)
 {
        return hard_smp_processor_id();
 }
@@ -814,12 +810,16 @@ int mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
 
                mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid);
        } else {
-               cpumask_t tmp;
+               cpumask_var_t tmp;
 
-               cpumask_and(&tmp, cpumask, cpu_online_mask);
+               alloc_cpumask_var(&tmp, GFP_KERNEL);
+
+               cpumask_and(tmp, cpumask, cpu_online_mask);
 
                mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION),
-                              mpic_physmask(cpus_addr(tmp)[0]));
+                              mpic_physmask(cpumask_bits(tmp)[0]));
+
+               free_cpumask_var(tmp);
        }
 
        return 0;
@@ -1479,21 +1479,6 @@ void mpic_teardown_this_cpu(int secondary)
 }
 
 
-void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask)
-{
-       struct mpic *mpic = mpic_primary;
-
-       BUG_ON(mpic == NULL);
-
-#ifdef DEBUG_IPI
-       DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
-#endif
-
-       mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) +
-                      ipi_no * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE),
-                      mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0]));
-}
-
 static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg)
 {
        u32 src;
@@ -1589,8 +1574,25 @@ void mpic_request_ipis(void)
        }
 }
 
+static void mpic_send_ipi(unsigned int ipi_no, const struct cpumask *cpu_mask)
+{
+       struct mpic *mpic = mpic_primary;
+
+       BUG_ON(mpic == NULL);
+
+#ifdef DEBUG_IPI
+       DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
+#endif
+
+       mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) +
+                      ipi_no * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE),
+                      mpic_physmask(cpumask_bits(cpu_mask)[0]));
+}
+
 void smp_mpic_message_pass(int target, int msg)
 {
+       cpumask_var_t tmp;
+
        /* make sure we're sending something that translates to an IPI */
        if ((unsigned int)msg > 3) {
                printk("SMP %d: smp_message_pass: unknown msg %d\n",
@@ -1599,13 +1601,17 @@ void smp_mpic_message_pass(int target, int msg)
        }
        switch (target) {
        case MSG_ALL:
-               mpic_send_ipi(msg, 0xffffffff);
+               mpic_send_ipi(msg, cpu_online_mask);
                break;
        case MSG_ALL_BUT_SELF:
-               mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id()));
+               alloc_cpumask_var(&tmp, GFP_NOWAIT);
+               cpumask_andnot(tmp, cpu_online_mask,
+                              cpumask_of(smp_processor_id()));
+               mpic_send_ipi(msg, tmp);
+               free_cpumask_var(tmp);
                break;
        default:
-               mpic_send_ipi(msg, 1 << target);
+               mpic_send_ipi(msg, cpumask_of(target));
                break;
        }
 }
@@ -1616,7 +1622,7 @@ int __init smp_mpic_probe(void)
 
        DBG("smp_mpic_probe()...\n");
 
-       nr_cpus = cpus_weight(cpu_possible_map);
+       nr_cpus = cpumask_weight(cpu_possible_mask);
 
        DBG("nr_cpus: %d\n", nr_cpus);
 
index 5c01435..d3d6ce3 100644 (file)
@@ -191,11 +191,31 @@ static int __init ppc4xx_l2c_probe(void)
 arch_initcall(ppc4xx_l2c_probe);
 
 /*
- * At present, this routine just applies a system reset.
+ * Apply a system reset. Alternatively a board specific value may be
+ * provided via the "reset-type" property in the cpu node.
  */
 void ppc4xx_reset_system(char *cmd)
 {
-       mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_RST_SYSTEM);
+       struct device_node *np;
+       u32 reset_type = DBCR0_RST_SYSTEM;
+       const u32 *prop;
+
+       np = of_find_node_by_type(NULL, "cpu");
+       if (np) {
+               prop = of_get_property(np, "reset-type", NULL);
+
+               /*
+                * Check if property exists and if it is in range:
+                * 1 - PPC4xx core reset
+                * 2 - PPC4xx chip reset
+                * 3 - PPC4xx system reset (default)
+                */
+               if ((prop) && ((prop[0] >= 1) && (prop[0] <= 3)))
+                       reset_type = prop[0] << 28;
+       }
+
+       mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | reset_type);
+
        while (1)
                ;       /* Just in case the reset doesn't work */
 }
index 70c6965..efb6d39 100644 (file)
@@ -237,6 +237,18 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
        return -1;
 }
 
+unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
+{
+       if (exception == 60)
+               return instruction_pointer(regs) - 2;
+       return instruction_pointer(regs);
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+       regs->pc = ip;
+}
+
 /*
  * The primary entry points for the kgdb debug trap table entries.
  */
@@ -247,7 +259,7 @@ BUILD_TRAP_HANDLER(singlestep)
 
        local_irq_save(flags);
        regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
-       kgdb_handle_exception(vec >> 2, SIGTRAP, 0, regs);
+       kgdb_handle_exception(0, SIGTRAP, 0, regs);
        local_irq_restore(flags);
 }
 
index 04df4ed..539243b 100644 (file)
@@ -158,6 +158,12 @@ void kgdb_arch_exit(void)
 {
 }
 
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+       regs->pc = ip;
+       regs->npc = regs->pc + 4;
+}
+
 struct kgdb_arch arch_kgdb_ops = {
        /* Breakpoint instruction: ta 0x7d */
        .gdb_bpt_instr          = { 0x91, 0xd0, 0x20, 0x7d },
index 0a2bd0f..768290a 100644 (file)
@@ -181,6 +181,12 @@ void kgdb_arch_exit(void)
 {
 }
 
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+       regs->tpc = ip;
+       regs->tnpc = regs->tpc + 4;
+}
+
 struct kgdb_arch arch_kgdb_ops = {
        /* Breakpoint instruction: ta 0x72 */
        .gdb_bpt_instr          = { 0x91, 0xd0, 0x20, 0x72 },
index 20bb0e1..ff16756 100644 (file)
@@ -32,6 +32,9 @@
 #define IN     IN1
 #define KEY    %xmm2
 #define IV     %xmm3
+#define BSWAP_MASK %xmm10
+#define CTR    %xmm11
+#define INC    %xmm12
 
 #define KEYP   %rdi
 #define OUTP   %rsi
@@ -42,6 +45,7 @@
 #define T1     %r10
 #define TKEYP  T1
 #define T2     %r11
+#define TCTR_LOW T2
 
 _key_expansion_128:
 _key_expansion_256a:
@@ -724,3 +728,114 @@ ENTRY(aesni_cbc_dec)
        movups IV, (IVP)
 .Lcbc_dec_just_ret:
        ret
+
+.align 16
+.Lbswap_mask:
+       .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+
+/*
+ * _aesni_inc_init:    internal ABI
+ *     setup registers used by _aesni_inc
+ * input:
+ *     IV
+ * output:
+ *     CTR:    == IV, in little endian
+ *     TCTR_LOW: == lower qword of CTR
+ *     INC:    == 1, in little endian
+ *     BSWAP_MASK == endian swapping mask
+ */
+_aesni_inc_init:
+       movaps .Lbswap_mask, BSWAP_MASK
+       movaps IV, CTR
+       PSHUFB_XMM BSWAP_MASK CTR
+       mov $1, TCTR_LOW
+       MOVQ_R64_XMM TCTR_LOW INC
+       MOVQ_R64_XMM CTR TCTR_LOW
+       ret
+
+/*
+ * _aesni_inc:         internal ABI
+ *     Increase IV by 1, IV is in big endian
+ * input:
+ *     IV
+ *     CTR:    == IV, in little endian
+ *     TCTR_LOW: == lower qword of CTR
+ *     INC:    == 1, in little endian
+ *     BSWAP_MASK == endian swapping mask
+ * output:
+ *     IV:     Increase by 1
+ * changed:
+ *     CTR:    == output IV, in little endian
+ *     TCTR_LOW: == lower qword of CTR
+ */
+_aesni_inc:
+       paddq INC, CTR
+       add $1, TCTR_LOW
+       jnc .Linc_low
+       pslldq $8, INC
+       paddq INC, CTR
+       psrldq $8, INC
+.Linc_low:
+       movaps CTR, IV
+       PSHUFB_XMM BSWAP_MASK IV
+       ret
+
+/*
+ * void aesni_ctr_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
+ *                   size_t len, u8 *iv)
+ */
+ENTRY(aesni_ctr_enc)
+       cmp $16, LEN
+       jb .Lctr_enc_just_ret
+       mov 480(KEYP), KLEN
+       movups (IVP), IV
+       call _aesni_inc_init
+       cmp $64, LEN
+       jb .Lctr_enc_loop1
+.align 4
+.Lctr_enc_loop4:
+       movaps IV, STATE1
+       call _aesni_inc
+       movups (INP), IN1
+       movaps IV, STATE2
+       call _aesni_inc
+       movups 0x10(INP), IN2
+       movaps IV, STATE3
+       call _aesni_inc
+       movups 0x20(INP), IN3
+       movaps IV, STATE4
+       call _aesni_inc
+       movups 0x30(INP), IN4
+       call _aesni_enc4
+       pxor IN1, STATE1
+       movups STATE1, (OUTP)
+       pxor IN2, STATE2
+       movups STATE2, 0x10(OUTP)
+       pxor IN3, STATE3
+       movups STATE3, 0x20(OUTP)
+       pxor IN4, STATE4
+       movups STATE4, 0x30(OUTP)
+       sub $64, LEN
+       add $64, INP
+       add $64, OUTP
+       cmp $64, LEN
+       jge .Lctr_enc_loop4
+       cmp $16, LEN
+       jb .Lctr_enc_ret
+.align 4
+.Lctr_enc_loop1:
+       movaps IV, STATE
+       call _aesni_inc
+       movups (INP), IN
+       call _aesni_enc1
+       pxor IN, STATE
+       movups STATE, (OUTP)
+       sub $16, LEN
+       add $16, INP
+       add $16, OUTP
+       cmp $16, LEN
+       jge .Lctr_enc_loop1
+.Lctr_enc_ret:
+       movups IV, (IVP)
+.Lctr_enc_just_ret:
+       ret
index 49c552c..2cb3dcc 100644 (file)
@@ -18,6 +18,7 @@
 #include <crypto/algapi.h>
 #include <crypto/aes.h>
 #include <crypto/cryptd.h>
+#include <crypto/ctr.h>
 #include <asm/i387.h>
 #include <asm/aes.h>
 
@@ -58,6 +59,8 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
                              const u8 *in, unsigned int len, u8 *iv);
 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
                              const u8 *in, unsigned int len, u8 *iv);
+asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
+                             const u8 *in, unsigned int len, u8 *iv);
 
 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
 {
@@ -321,6 +324,72 @@ static struct crypto_alg blk_cbc_alg = {
        },
 };
 
+static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
+                           struct blkcipher_walk *walk)
+{
+       u8 *ctrblk = walk->iv;
+       u8 keystream[AES_BLOCK_SIZE];
+       u8 *src = walk->src.virt.addr;
+       u8 *dst = walk->dst.virt.addr;
+       unsigned int nbytes = walk->nbytes;
+
+       aesni_enc(ctx, keystream, ctrblk);
+       crypto_xor(keystream, src, nbytes);
+       memcpy(dst, keystream, nbytes);
+       crypto_inc(ctrblk, AES_BLOCK_SIZE);
+}
+
+static int ctr_crypt(struct blkcipher_desc *desc,
+                    struct scatterlist *dst, struct scatterlist *src,
+                    unsigned int nbytes)
+{
+       struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+       desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+       kernel_fpu_begin();
+       while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
+               aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+                             nbytes & AES_BLOCK_MASK, walk.iv);
+               nbytes &= AES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+       if (walk.nbytes) {
+               ctr_crypt_final(ctx, &walk);
+               err = blkcipher_walk_done(desc, &walk, 0);
+       }
+       kernel_fpu_end();
+
+       return err;
+}
+
+static struct crypto_alg blk_ctr_alg = {
+       .cra_name               = "__ctr-aes-aesni",
+       .cra_driver_name        = "__driver-ctr-aes-aesni",
+       .cra_priority           = 0,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = 1,
+       .cra_ctxsize            = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
+       .cra_alignmask          = 0,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_list               = LIST_HEAD_INIT(blk_ctr_alg.cra_list),
+       .cra_u = {
+               .blkcipher = {
+                       .min_keysize    = AES_MIN_KEY_SIZE,
+                       .max_keysize    = AES_MAX_KEY_SIZE,
+                       .ivsize         = AES_BLOCK_SIZE,
+                       .setkey         = aes_set_key,
+                       .encrypt        = ctr_crypt,
+                       .decrypt        = ctr_crypt,
+               },
+       },
+};
+
 static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
                        unsigned int key_len)
 {
@@ -467,13 +536,11 @@ static struct crypto_alg ablk_cbc_alg = {
        },
 };
 
-#ifdef HAS_CTR
 static int ablk_ctr_init(struct crypto_tfm *tfm)
 {
        struct cryptd_ablkcipher *cryptd_tfm;
 
-       cryptd_tfm = cryptd_alloc_ablkcipher("fpu(ctr(__driver-aes-aesni))",
-                                            0, 0);
+       cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ctr-aes-aesni", 0, 0);
        if (IS_ERR(cryptd_tfm))
                return PTR_ERR(cryptd_tfm);
        ablk_init_common(tfm, cryptd_tfm);
@@ -500,11 +567,50 @@ static struct crypto_alg ablk_ctr_alg = {
                        .ivsize         = AES_BLOCK_SIZE,
                        .setkey         = ablk_set_key,
                        .encrypt        = ablk_encrypt,
-                       .decrypt        = ablk_decrypt,
+                       .decrypt        = ablk_encrypt,
                        .geniv          = "chainiv",
                },
        },
 };
+
+#ifdef HAS_CTR
+static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
+{
+       struct cryptd_ablkcipher *cryptd_tfm;
+
+       cryptd_tfm = cryptd_alloc_ablkcipher(
+               "rfc3686(__driver-ctr-aes-aesni)", 0, 0);
+       if (IS_ERR(cryptd_tfm))
+               return PTR_ERR(cryptd_tfm);
+       ablk_init_common(tfm, cryptd_tfm);
+       return 0;
+}
+
+static struct crypto_alg ablk_rfc3686_ctr_alg = {
+       .cra_name               = "rfc3686(ctr(aes))",
+       .cra_driver_name        = "rfc3686-ctr-aes-aesni",
+       .cra_priority           = 400,
+       .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+       .cra_blocksize          = 1,
+       .cra_ctxsize            = sizeof(struct async_aes_ctx),
+       .cra_alignmask          = 0,
+       .cra_type               = &crypto_ablkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_list               = LIST_HEAD_INIT(ablk_rfc3686_ctr_alg.cra_list),
+       .cra_init               = ablk_rfc3686_ctr_init,
+       .cra_exit               = ablk_exit,
+       .cra_u = {
+               .ablkcipher = {
+                       .min_keysize = AES_MIN_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
+                       .max_keysize = AES_MAX_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
+                       .ivsize      = CTR_RFC3686_IV_SIZE,
+                       .setkey      = ablk_set_key,
+                       .encrypt     = ablk_encrypt,
+                       .decrypt     = ablk_decrypt,
+                       .geniv       = "seqiv",
+               },
+       },
+};
 #endif
 
 #ifdef HAS_LRW
@@ -640,13 +746,17 @@ static int __init aesni_init(void)
                goto blk_ecb_err;
        if ((err = crypto_register_alg(&blk_cbc_alg)))
                goto blk_cbc_err;
+       if ((err = crypto_register_alg(&blk_ctr_alg)))
+               goto blk_ctr_err;
        if ((err = crypto_register_alg(&ablk_ecb_alg)))
                goto ablk_ecb_err;
        if ((err = crypto_register_alg(&ablk_cbc_alg)))
                goto ablk_cbc_err;
-#ifdef HAS_CTR
        if ((err = crypto_register_alg(&ablk_ctr_alg)))
                goto ablk_ctr_err;
+#ifdef HAS_CTR
+       if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg)))
+               goto ablk_rfc3686_ctr_err;
 #endif
 #ifdef HAS_LRW
        if ((err = crypto_register_alg(&ablk_lrw_alg)))
@@ -675,13 +785,17 @@ ablk_pcbc_err:
 ablk_lrw_err:
 #endif
 #ifdef HAS_CTR
+       crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
+ablk_rfc3686_ctr_err:
+#endif
        crypto_unregister_alg(&ablk_ctr_alg);
 ablk_ctr_err:
-#endif
        crypto_unregister_alg(&ablk_cbc_alg);
 ablk_cbc_err:
        crypto_unregister_alg(&ablk_ecb_alg);
 ablk_ecb_err:
+       crypto_unregister_alg(&blk_ctr_alg);
+blk_ctr_err:
        crypto_unregister_alg(&blk_cbc_alg);
 blk_cbc_err:
        crypto_unregister_alg(&blk_ecb_alg);
@@ -705,10 +819,12 @@ static void __exit aesni_exit(void)
        crypto_unregister_alg(&ablk_lrw_alg);
 #endif
 #ifdef HAS_CTR
-       crypto_unregister_alg(&ablk_ctr_alg);
+       crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
 #endif
+       crypto_unregister_alg(&ablk_ctr_alg);
        crypto_unregister_alg(&ablk_cbc_alg);
        crypto_unregister_alg(&ablk_ecb_alg);
+       crypto_unregister_alg(&blk_ctr_alg);
        crypto_unregister_alg(&blk_cbc_alg);
        crypto_unregister_alg(&blk_ecb_alg);
        crypto_unregister_alg(&__aesni_alg);
index c70068d..63e35ec 100644 (file)
@@ -145,9 +145,11 @@ int set_memory_np(unsigned long addr, int numpages);
 int set_memory_4k(unsigned long addr, int numpages);
 
 int set_memory_array_uc(unsigned long *addr, int addrinarray);
+int set_memory_array_wc(unsigned long *addr, int addrinarray);
 int set_memory_array_wb(unsigned long *addr, int addrinarray);
 
 int set_pages_array_uc(struct page **pages, int addrinarray);
+int set_pages_array_wc(struct page **pages, int addrinarray);
 int set_pages_array_wb(struct page **pages, int addrinarray);
 
 /*
index 14cf526..280bf7f 100644 (file)
@@ -7,7 +7,66 @@
 
 #ifdef __ASSEMBLY__
 
+#define REG_NUM_INVALID                100
+
+#define REG_TYPE_R64           0
+#define REG_TYPE_XMM           1
+#define REG_TYPE_INVALID       100
+
+       .macro R64_NUM opd r64
+       \opd = REG_NUM_INVALID
+       .ifc \r64,%rax
+       \opd = 0
+       .endif
+       .ifc \r64,%rcx
+       \opd = 1
+       .endif
+       .ifc \r64,%rdx
+       \opd = 2
+       .endif
+       .ifc \r64,%rbx
+       \opd = 3
+       .endif
+       .ifc \r64,%rsp
+       \opd = 4
+       .endif
+       .ifc \r64,%rbp
+       \opd = 5
+       .endif
+       .ifc \r64,%rsi
+       \opd = 6
+       .endif
+       .ifc \r64,%rdi
+       \opd = 7
+       .endif
+       .ifc \r64,%r8
+       \opd = 8
+       .endif
+       .ifc \r64,%r9
+       \opd = 9
+       .endif
+       .ifc \r64,%r10
+       \opd = 10
+       .endif
+       .ifc \r64,%r11
+       \opd = 11
+       .endif
+       .ifc \r64,%r12
+       \opd = 12
+       .endif
+       .ifc \r64,%r13
+       \opd = 13
+       .endif
+       .ifc \r64,%r14
+       \opd = 14
+       .endif
+       .ifc \r64,%r15
+       \opd = 15
+       .endif
+       .endm
+
        .macro XMM_NUM opd xmm
+       \opd = REG_NUM_INVALID
        .ifc \xmm,%xmm0
        \opd = 0
        .endif
        .endif
        .endm
 
+       .macro REG_TYPE type reg
+       R64_NUM reg_type_r64 \reg
+       XMM_NUM reg_type_xmm \reg
+       .if reg_type_r64 <> REG_NUM_INVALID
+       \type = REG_TYPE_R64
+       .elseif reg_type_xmm <> REG_NUM_INVALID
+       \type = REG_TYPE_XMM
+       .else
+       \type = REG_TYPE_INVALID
+       .endif
+       .endm
+
        .macro PFX_OPD_SIZE
        .byte 0x66
        .endm
 
-       .macro PFX_REX opd1 opd2
-       .if (\opd1 | \opd2) & 8
-       .byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1)
+       .macro PFX_REX opd1 opd2 W=0
+       .if ((\opd1 | \opd2) & 8) || \W
+       .byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
        .endif
        .endm
 
        .byte 0x0f, 0x38, 0xdf
        MODRM 0xc0 aesdeclast_opd1 aesdeclast_opd2
        .endm
+
+       .macro MOVQ_R64_XMM opd1 opd2
+       REG_TYPE movq_r64_xmm_opd1_type \opd1
+       .if movq_r64_xmm_opd1_type == REG_TYPE_XMM
+       XMM_NUM movq_r64_xmm_opd1 \opd1
+       R64_NUM movq_r64_xmm_opd2 \opd2
+       .else
+       R64_NUM movq_r64_xmm_opd1 \opd1
+       XMM_NUM movq_r64_xmm_opd2 \opd2
+       .endif
+       PFX_OPD_SIZE
+       PFX_REX movq_r64_xmm_opd1 movq_r64_xmm_opd2 1
+       .if movq_r64_xmm_opd1_type == REG_TYPE_XMM
+       .byte 0x0f, 0x7e
+       .else
+       .byte 0x0f, 0x6e
+       .endif
+       MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2
+       .endm
 #endif
 
 #endif
index e6c6c80..006da36 100644 (file)
@@ -76,4 +76,7 @@ static inline void arch_kgdb_breakpoint(void)
 #define BREAK_INSTR_SIZE       1
 #define CACHE_FLUSH_IS_SAFE    1
 
+extern int kgdb_ll_trap(int cmd, const char *str,
+                       struct pt_regs *regs, long err, int trap, int sig);
+
 #endif /* _ASM_X86_KGDB_H */
index 5a51379..7e5c6a6 100644 (file)
@@ -789,6 +789,8 @@ static inline void wbinvd_halt(void)
 extern void enable_sep_cpu(void);
 extern int sysenter_setup(void);
 
+extern void early_trap_init(void);
+
 /* Defined in head.S */
 extern struct desc_ptr         early_gdt_descr;
 
index c1c00d0..cc83a00 100644 (file)
@@ -1084,6 +1084,20 @@ static void clear_all_debug_regs(void)
        }
 }
 
+#ifdef CONFIG_KGDB
+/*
+ * Restore debug regs if using kgdbwait and you have a kernel debugger
+ * connection established.
+ */
+static void dbg_restore_debug_regs(void)
+{
+       if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
+               arch_kgdb_ops.correct_hw_break();
+}
+#else /* ! CONFIG_KGDB */
+#define dbg_restore_debug_regs()
+#endif /* ! CONFIG_KGDB */
+
 /*
  * cpu_init() initializes state that is per-CPU. Some data is already
  * initialized (naturally) in the bootstrap process, such as the GDT
@@ -1174,18 +1188,8 @@ void __cpuinit cpu_init(void)
        load_TR_desc();
        load_LDT(&init_mm.context);
 
-#ifdef CONFIG_KGDB
-       /*
-        * If the kgdb is connected no debug regs should be altered.  This
-        * is only applicable when KGDB and a KGDB I/O module are built
-        * into the kernel and you are using early debugging with
-        * kgdbwait. KGDB will control the kernel HW breakpoint registers.
-        */
-       if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
-               arch_kgdb_ops.correct_hw_break();
-       else
-#endif
-               clear_all_debug_regs();
+       clear_all_debug_regs();
+       dbg_restore_debug_regs();
 
        fpu_init();
 
@@ -1239,6 +1243,7 @@ void __cpuinit cpu_init(void)
 #endif
 
        clear_all_debug_regs();
+       dbg_restore_debug_regs();
 
        /*
         * Force FPU initialization:
index b9c830c..fa99bae 100644 (file)
@@ -41,6 +41,14 @@ static void early_vga_write(struct console *con, const char *str, unsigned n)
                                writew(0x720, VGABASE + 2*(max_xpos*j + i));
                        current_ypos = max_ypos-1;
                }
+#ifdef CONFIG_KGDB_KDB
+               if (c == '\b') {
+                       if (current_xpos > 0)
+                               current_xpos--;
+               } else if (c == '\r') {
+                       current_xpos = 0;
+               } else
+#endif
                if (c == '\n') {
                        current_xpos = 0;
                        current_ypos++;
index b2258ca..4f4af75 100644 (file)
 #include <asm/debugreg.h>
 #include <asm/apicdef.h>
 #include <asm/system.h>
-
 #include <asm/apic.h>
 
-/*
- * Put the error code here just in case the user cares:
- */
-static int gdb_x86errcode;
-
-/*
- * Likewise, the vector number here (since GDB only gets the signal
- * number through the usual means, and that's not very specific):
- */
-static int gdb_x86vector = -1;
-
 /**
  *     pt_regs_to_gdb_regs - Convert ptrace regs to GDB regs
  *     @gdb_regs: A pointer to hold the registers in the order GDB wants.
@@ -211,6 +199,8 @@ static struct hw_breakpoint {
        struct perf_event       **pev;
 } breakinfo[4];
 
+static unsigned long early_dr7;
+
 static void kgdb_correct_hw_break(void)
 {
        int breakno;
@@ -222,6 +212,14 @@ static void kgdb_correct_hw_break(void)
                int cpu = raw_smp_processor_id();
                if (!breakinfo[breakno].enabled)
                        continue;
+               if (dbg_is_early) {
+                       set_debugreg(breakinfo[breakno].addr, breakno);
+                       early_dr7 |= encode_dr7(breakno,
+                                               breakinfo[breakno].len,
+                                               breakinfo[breakno].type);
+                       set_debugreg(early_dr7, 7);
+                       continue;
+               }
                bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu);
                info = counter_arch_bp(bp);
                if (bp->attr.disabled != 1)
@@ -236,7 +234,8 @@ static void kgdb_correct_hw_break(void)
                if (!val)
                        bp->attr.disabled = 0;
        }
-       hw_breakpoint_restore();
+       if (!dbg_is_early)
+               hw_breakpoint_restore();
 }
 
 static int hw_break_reserve_slot(int breakno)
@@ -245,6 +244,9 @@ static int hw_break_reserve_slot(int breakno)
        int cnt = 0;
        struct perf_event **pevent;
 
+       if (dbg_is_early)
+               return 0;
+
        for_each_online_cpu(cpu) {
                cnt++;
                pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
@@ -270,6 +272,9 @@ static int hw_break_release_slot(int breakno)
        struct perf_event **pevent;
        int cpu;
 
+       if (dbg_is_early)
+               return 0;
+
        for_each_online_cpu(cpu) {
                pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
                if (dbg_release_bp_slot(*pevent))
@@ -314,7 +319,11 @@ static void kgdb_remove_all_hw_break(void)
                bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
                if (bp->attr.disabled == 1)
                        continue;
-               arch_uninstall_hw_breakpoint(bp);
+               if (dbg_is_early)
+                       early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
+                                                breakinfo[i].type);
+               else
+                       arch_uninstall_hw_breakpoint(bp);
                bp->attr.disabled = 1;
        }
 }
@@ -391,6 +400,11 @@ void kgdb_disable_hw_debug(struct pt_regs *regs)
        for (i = 0; i < 4; i++) {
                if (!breakinfo[i].enabled)
                        continue;
+               if (dbg_is_early) {
+                       early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
+                                                breakinfo[i].type);
+                       continue;
+               }
                bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
                if (bp->attr.disabled == 1)
                        continue;
@@ -399,23 +413,6 @@ void kgdb_disable_hw_debug(struct pt_regs *regs)
        }
 }
 
-/**
- *     kgdb_post_primary_code - Save error vector/code numbers.
- *     @regs: Original pt_regs.
- *     @e_vector: Original error vector.
- *     @err_code: Original error code.
- *
- *     This is needed on architectures which support SMP and KGDB.
- *     This function is called after all the slave cpus have been put
- *     to a know spin state and the primary CPU has control over KGDB.
- */
-void kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code)
-{
-       /* primary processor is completely in the debugger */
-       gdb_x86vector = e_vector;
-       gdb_x86errcode = err_code;
-}
-
 #ifdef CONFIG_SMP
 /**
  *     kgdb_roundup_cpus - Get other CPUs into a holding pattern
@@ -567,7 +564,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
                        return NOTIFY_DONE;
        }
 
-       if (kgdb_handle_exception(args->trapnr, args->signr, args->err, regs))
+       if (kgdb_handle_exception(args->trapnr, args->signr, cmd, regs))
                return NOTIFY_DONE;
 
        /* Must touch watchdog before return to normal operation */
@@ -575,6 +572,26 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
        return NOTIFY_STOP;
 }
 
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+int kgdb_ll_trap(int cmd, const char *str,
+                struct pt_regs *regs, long err, int trap, int sig)
+{
+       struct die_args args = {
+               .regs   = regs,
+               .str    = str,
+               .err    = err,
+               .trapnr = trap,
+               .signr  = sig,
+
+       };
+
+       if (!kgdb_io_module_registered)
+               return NOTIFY_DONE;
+
+       return __kgdb_notify(&args, cmd);
+}
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
+
 static int
 kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
 {
@@ -604,15 +621,16 @@ static struct notifier_block kgdb_notifier = {
  *     specific callbacks.
  */
 int kgdb_arch_init(void)
+{
+       return register_die_notifier(&kgdb_notifier);
+}
+
+void kgdb_arch_late(void)
 {
        int i, cpu;
-       int ret;
        struct perf_event_attr attr;
        struct perf_event **pevent;
 
-       ret = register_die_notifier(&kgdb_notifier);
-       if (ret != 0)
-               return ret;
        /*
         * Pre-allocate the hw breakpoint structions in the non-atomic
         * portion of kgdb because this operation requires mutexs to
@@ -624,12 +642,15 @@ int kgdb_arch_init(void)
        attr.bp_type = HW_BREAKPOINT_W;
        attr.disabled = 1;
        for (i = 0; i < 4; i++) {
+               if (breakinfo[i].pev)
+                       continue;
                breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL);
                if (IS_ERR(breakinfo[i].pev)) {
-                       printk(KERN_ERR "kgdb: Could not allocate hw breakpoints\n");
+                       printk(KERN_ERR "kgdb: Could not allocate hw"
+                              "breakpoints\nDisabling the kernel debugger\n");
                        breakinfo[i].pev = NULL;
                        kgdb_arch_exit();
-                       return -1;
+                       return;
                }
                for_each_online_cpu(cpu) {
                        pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
@@ -640,7 +661,6 @@ int kgdb_arch_init(void)
                        }
                }
        }
-       return ret;
 }
 
 /**
@@ -690,6 +710,11 @@ unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
        return instruction_pointer(regs);
 }
 
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+       regs->ip = ip;
+}
+
 struct kgdb_arch arch_kgdb_ops = {
        /* Breakpoint instruction: */
        .gdb_bpt_instr          = { 0xcc },
index c4851ef..e802989 100644 (file)
@@ -725,6 +725,7 @@ void __init setup_arch(char **cmdline_p)
        /* VMI may relocate the fixmap; do this before touching ioremap area */
        vmi_init();
 
+       early_trap_init();
        early_cpu_init();
        early_ioremap_init();
 
index 02cfb9b..142d70c 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/kprobes.h>
 #include <linux/uaccess.h>
 #include <linux/kdebug.h>
+#include <linux/kgdb.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/ptrace.h>
@@ -451,6 +452,11 @@ void restart_nmi(void)
 /* May run on IST stack. */
 dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
 {
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+       if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
+                       == NOTIFY_STOP)
+               return;
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 #ifdef CONFIG_KPROBES
        if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
                        == NOTIFY_STOP)
@@ -802,6 +808,16 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
 }
 #endif
 
+/* Set of traps needed for early debugging. */
+void __init early_trap_init(void)
+{
+       set_intr_gate_ist(1, &debug, DEBUG_STACK);
+       /* int3 can be called from all */
+       set_system_intr_gate_ist(3, &int3, DEBUG_STACK);
+       set_intr_gate(14, &page_fault);
+       load_idt(&idt_descr);
+}
+
 void __init trap_init(void)
 {
        int i;
@@ -815,10 +831,7 @@ void __init trap_init(void)
 #endif
 
        set_intr_gate(0, &divide_error);
-       set_intr_gate_ist(1, &debug, DEBUG_STACK);
        set_intr_gate_ist(2, &nmi, NMI_STACK);
-       /* int3 can be called from all */
-       set_system_intr_gate_ist(3, &int3, DEBUG_STACK);
        /* int4 can be called from all */
        set_system_intr_gate(4, &overflow);
        set_intr_gate(5, &bounds);
@@ -834,7 +847,6 @@ void __init trap_init(void)
        set_intr_gate(11, &segment_not_present);
        set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK);
        set_intr_gate(13, &general_protection);
-       set_intr_gate(14, &page_fault);
        set_intr_gate(15, &spurious_interrupt_bug);
        set_intr_gate(16, &coprocessor_error);
        set_intr_gate(17, &alignment_check);
index 28195c3..532e793 100644 (file)
@@ -997,7 +997,8 @@ out_err:
 }
 EXPORT_SYMBOL(set_memory_uc);
 
-int set_memory_array_uc(unsigned long *addr, int addrinarray)
+int _set_memory_array(unsigned long *addr, int addrinarray,
+               unsigned long new_type)
 {
        int i, j;
        int ret;
@@ -1007,13 +1008,19 @@ int set_memory_array_uc(unsigned long *addr, int addrinarray)
         */
        for (i = 0; i < addrinarray; i++) {
                ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
-                                       _PAGE_CACHE_UC_MINUS, NULL);
+                                       new_type, NULL);
                if (ret)
                        goto out_free;
        }
 
        ret = change_page_attr_set(addr, addrinarray,
                                    __pgprot(_PAGE_CACHE_UC_MINUS), 1);
+
+       if (!ret && new_type == _PAGE_CACHE_WC)
+               ret = change_page_attr_set_clr(addr, addrinarray,
+                                              __pgprot(_PAGE_CACHE_WC),
+                                              __pgprot(_PAGE_CACHE_MASK),
+                                              0, CPA_ARRAY, NULL);
        if (ret)
                goto out_free;
 
@@ -1025,8 +1032,19 @@ out_free:
 
        return ret;
 }
+
+int set_memory_array_uc(unsigned long *addr, int addrinarray)
+{
+       return _set_memory_array(addr, addrinarray, _PAGE_CACHE_UC_MINUS);
+}
 EXPORT_SYMBOL(set_memory_array_uc);
 
+int set_memory_array_wc(unsigned long *addr, int addrinarray)
+{
+       return _set_memory_array(addr, addrinarray, _PAGE_CACHE_WC);
+}
+EXPORT_SYMBOL(set_memory_array_wc);
+
 int _set_memory_wc(unsigned long addr, int numpages)
 {
        int ret;
@@ -1153,26 +1171,34 @@ int set_pages_uc(struct page *page, int numpages)
 }
 EXPORT_SYMBOL(set_pages_uc);
 
-int set_pages_array_uc(struct page **pages, int addrinarray)
+static int _set_pages_array(struct page **pages, int addrinarray,
+               unsigned long new_type)
 {
        unsigned long start;
        unsigned long end;
        int i;
        int free_idx;
+       int ret;
 
        for (i = 0; i < addrinarray; i++) {
                if (PageHighMem(pages[i]))
                        continue;
                start = page_to_pfn(pages[i]) << PAGE_SHIFT;
                end = start + PAGE_SIZE;
-               if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL))
+               if (reserve_memtype(start, end, new_type, NULL))
                        goto err_out;
        }
 
-       if (cpa_set_pages_array(pages, addrinarray,
-                       __pgprot(_PAGE_CACHE_UC_MINUS)) == 0) {
-               return 0; /* Success */
-       }
+       ret = cpa_set_pages_array(pages, addrinarray,
+                       __pgprot(_PAGE_CACHE_UC_MINUS));
+       if (!ret && new_type == _PAGE_CACHE_WC)
+               ret = change_page_attr_set_clr(NULL, addrinarray,
+                                              __pgprot(_PAGE_CACHE_WC),
+                                              __pgprot(_PAGE_CACHE_MASK),
+                                              0, CPA_PAGES_ARRAY, pages);
+       if (ret)
+               goto err_out;
+       return 0; /* Success */
 err_out:
        free_idx = i;
        for (i = 0; i < free_idx; i++) {
@@ -1184,8 +1210,19 @@ err_out:
        }
        return -EINVAL;
 }
+
+int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+       return _set_pages_array(pages, addrinarray, _PAGE_CACHE_UC_MINUS);
+}
 EXPORT_SYMBOL(set_pages_array_uc);
 
+int set_pages_array_wc(struct page **pages, int addrinarray)
+{
+       return _set_pages_array(pages, addrinarray, _PAGE_CACHE_WC);
+}
+EXPORT_SYMBOL(set_pages_array_wc);
+
 int set_pages_wb(struct page *page, int numpages)
 {
        unsigned long addr = (unsigned long)page_address(page);
index fe980da..98a6610 100644 (file)
 #include <linux/slab.h>
 #include <linux/seq_file.h>
 
+#include <crypto/scatterwalk.h>
+
 #include "internal.h"
 
 static const char *skcipher_default_geniv __read_mostly;
 
+struct ablkcipher_buffer {
+       struct list_head        entry;
+       struct scatter_walk     dst;
+       unsigned int            len;
+       void                    *data;
+};
+
+enum {
+       ABLKCIPHER_WALK_SLOW = 1 << 0,
+};
+
+static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
+{
+       scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
+}
+
+void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
+{
+       struct ablkcipher_buffer *p, *tmp;
+
+       list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
+               ablkcipher_buffer_write(p);
+               list_del(&p->entry);
+               kfree(p);
+       }
+}
+EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
+
+static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
+                                         struct ablkcipher_buffer *p)
+{
+       p->dst = walk->out;
+       list_add_tail(&p->entry, &walk->buffers);
+}
+
+/* Get a spot of the specified length that does not straddle a page.
+ * The caller needs to ensure that there is enough space for this operation.
+ */
+static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
+{
+       u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
+       return max(start, end_page);
+}
+
+static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
+                                               unsigned int bsize)
+{
+       unsigned int n = bsize;
+
+       for (;;) {
+               unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
+
+               if (len_this_page > n)
+                       len_this_page = n;
+               scatterwalk_advance(&walk->out, n);
+               if (n == len_this_page)
+                       break;
+               n -= len_this_page;
+               scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg));
+       }
+
+       return bsize;
+}
+
+static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
+                                               unsigned int n)
+{
+       scatterwalk_advance(&walk->in, n);
+       scatterwalk_advance(&walk->out, n);
+
+       return n;
+}
+
+static int ablkcipher_walk_next(struct ablkcipher_request *req,
+                               struct ablkcipher_walk *walk);
+
+int ablkcipher_walk_done(struct ablkcipher_request *req,
+                        struct ablkcipher_walk *walk, int err)
+{
+       struct crypto_tfm *tfm = req->base.tfm;
+       unsigned int nbytes = 0;
+
+       if (likely(err >= 0)) {
+               unsigned int n = walk->nbytes - err;
+
+               if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
+                       n = ablkcipher_done_fast(walk, n);
+               else if (WARN_ON(err)) {
+                       err = -EINVAL;
+                       goto err;
+               } else
+                       n = ablkcipher_done_slow(walk, n);
+
+               nbytes = walk->total - n;
+               err = 0;
+       }
+
+       scatterwalk_done(&walk->in, 0, nbytes);
+       scatterwalk_done(&walk->out, 1, nbytes);
+
+err:
+       walk->total = nbytes;
+       walk->nbytes = nbytes;
+
+       if (nbytes) {
+               crypto_yield(req->base.flags);
+               return ablkcipher_walk_next(req, walk);
+       }
+
+       if (walk->iv != req->info)
+               memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
+       if (walk->iv_buffer)
+               kfree(walk->iv_buffer);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
+
+static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
+                                      struct ablkcipher_walk *walk,
+                                      unsigned int bsize,
+                                      unsigned int alignmask,
+                                      void **src_p, void **dst_p)
+{
+       unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
+       struct ablkcipher_buffer *p;
+       void *src, *dst, *base;
+       unsigned int n;
+
+       n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
+       n += (aligned_bsize * 3 - (alignmask + 1) +
+             (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
+
+       p = kmalloc(n, GFP_ATOMIC);
+       if (!p)
+               ablkcipher_walk_done(req, walk, -ENOMEM);
+
+       base = p + 1;
+
+       dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
+       src = dst = ablkcipher_get_spot(dst, bsize);
+
+       p->len = bsize;
+       p->data = dst;
+
+       scatterwalk_copychunks(src, &walk->in, bsize, 0);
+
+       ablkcipher_queue_write(walk, p);
+
+       walk->nbytes = bsize;
+       walk->flags |= ABLKCIPHER_WALK_SLOW;
+
+       *src_p = src;
+       *dst_p = dst;
+
+       return 0;
+}
+
+static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
+                                    struct crypto_tfm *tfm,
+                                    unsigned int alignmask)
+{
+       unsigned bs = walk->blocksize;
+       unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
+       unsigned aligned_bs = ALIGN(bs, alignmask + 1);
+       unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
+                           (alignmask + 1);
+       u8 *iv;
+
+       size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
+       walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
+       if (!walk->iv_buffer)
+               return -ENOMEM;
+
+       iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
+       iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
+       iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
+       iv = ablkcipher_get_spot(iv, ivsize);
+
+       walk->iv = memcpy(iv, walk->iv, ivsize);
+       return 0;
+}
+
+static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
+                                      struct ablkcipher_walk *walk)
+{
+       walk->src.page = scatterwalk_page(&walk->in);
+       walk->src.offset = offset_in_page(walk->in.offset);
+       walk->dst.page = scatterwalk_page(&walk->out);
+       walk->dst.offset = offset_in_page(walk->out.offset);
+
+       return 0;
+}
+
+static int ablkcipher_walk_next(struct ablkcipher_request *req,
+                               struct ablkcipher_walk *walk)
+{
+       struct crypto_tfm *tfm = req->base.tfm;
+       unsigned int alignmask, bsize, n;
+       void *src, *dst;
+       int err;
+
+       alignmask = crypto_tfm_alg_alignmask(tfm);
+       n = walk->total;
+       if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
+               req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
+               return ablkcipher_walk_done(req, walk, -EINVAL);
+       }
+
+       walk->flags &= ~ABLKCIPHER_WALK_SLOW;
+       src = dst = NULL;
+
+       bsize = min(walk->blocksize, n);
+       n = scatterwalk_clamp(&walk->in, n);
+       n = scatterwalk_clamp(&walk->out, n);
+
+       if (n < bsize ||
+           !scatterwalk_aligned(&walk->in, alignmask) ||
+           !scatterwalk_aligned(&walk->out, alignmask)) {
+               err = ablkcipher_next_slow(req, walk, bsize, alignmask,
+                                          &src, &dst);
+               goto set_phys_lowmem;
+       }
+
+       walk->nbytes = n;
+
+       return ablkcipher_next_fast(req, walk);
+
+set_phys_lowmem:
+       if (err >= 0) {
+               walk->src.page = virt_to_page(src);
+               walk->dst.page = virt_to_page(dst);
+               walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
+               walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
+       }
+
+       return err;
+}
+
+static int ablkcipher_walk_first(struct ablkcipher_request *req,
+                                struct ablkcipher_walk *walk)
+{
+       struct crypto_tfm *tfm = req->base.tfm;
+       unsigned int alignmask;
+
+       alignmask = crypto_tfm_alg_alignmask(tfm);
+       if (WARN_ON_ONCE(in_irq()))
+               return -EDEADLK;
+
+       walk->nbytes = walk->total;
+       if (unlikely(!walk->total))
+               return 0;
+
+       walk->iv_buffer = NULL;
+       walk->iv = req->info;
+       if (unlikely(((unsigned long)walk->iv & alignmask))) {
+               int err = ablkcipher_copy_iv(walk, tfm, alignmask);
+               if (err)
+                       return err;
+       }
+
+       scatterwalk_start(&walk->in, walk->in.sg);
+       scatterwalk_start(&walk->out, walk->out.sg);
+
+       return ablkcipher_walk_next(req, walk);
+}
+
+int ablkcipher_walk_phys(struct ablkcipher_request *req,
+                        struct ablkcipher_walk *walk)
+{
+       walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
+       return ablkcipher_walk_first(req, walk);
+}
+EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
+
 static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
                            unsigned int keylen)
 {
index 76fae27..c3cf1a6 100644 (file)
@@ -544,7 +544,7 @@ int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
 {
        int err = -EINVAL;
 
-       if (frontend && (alg->cra_flags ^ frontend->type) & frontend->maskset)
+       if ((alg->cra_flags ^ frontend->type) & frontend->maskset)
                goto out;
 
        spawn->frontend = frontend;
index 05eb32e..b9884ee 100644 (file)
@@ -181,6 +181,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
        struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
        struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
        struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+       unsigned int cryptlen = req->cryptlen;
 
        if (err)
                goto out;
@@ -196,6 +197,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
                goto out;
 
        authsize = crypto_aead_authsize(authenc);
+       cryptlen -= authsize;
        ihash = ahreq->result + authsize;
        scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
                                 authsize, 0);
@@ -209,7 +211,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
        ablkcipher_request_set_callback(abreq, aead_request_flags(req),
                                        req->base.complete, req->base.data);
        ablkcipher_request_set_crypt(abreq, req->src, req->dst,
-                                    req->cryptlen, req->iv);
+                                    cryptlen, req->iv);
 
        err = crypto_ablkcipher_decrypt(abreq);
 
@@ -228,11 +230,13 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
        struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
        struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
        struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+       unsigned int cryptlen = req->cryptlen;
 
        if (err)
                goto out;
 
        authsize = crypto_aead_authsize(authenc);
+       cryptlen -= authsize;
        ihash = ahreq->result + authsize;
        scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
                                 authsize, 0);
@@ -246,7 +250,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
        ablkcipher_request_set_callback(abreq, aead_request_flags(req),
                                        req->base.complete, req->base.data);
        ablkcipher_request_set_crypt(abreq, req->src, req->dst,
-                                    req->cryptlen, req->iv);
+                                    cryptlen, req->iv);
 
        err = crypto_ablkcipher_decrypt(abreq);
 
index 2d22636..d4384b0 100644 (file)
@@ -6,7 +6,7 @@
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option) 
+ * Software Foundation; either version 2 of the License, or (at your option)
  * any later version.
  *
  */
index 8020124..247178c 100644 (file)
@@ -315,16 +315,13 @@ out_free_inst:
        goto out;
 }
 
-static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb)
+static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb,
+                                                u32 type, u32 mask)
 {
        struct crypto_instance *inst;
        struct crypto_alg *alg;
-       struct crypto_attr_type *algt;
-
-       algt = crypto_get_attr_type(tb);
 
-       alg = crypto_get_attr_alg(tb, algt->type,
-                                 (algt->mask & CRYPTO_ALG_TYPE_MASK));
+       alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK));
        if (IS_ERR(alg))
                return ERR_CAST(alg);
 
@@ -365,7 +362,7 @@ static struct crypto_instance *pcrypt_alloc(struct rtattr **tb)
 
        switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
        case CRYPTO_ALG_TYPE_AEAD:
-               return pcrypt_alloc_aead(tb);
+               return pcrypt_alloc_aead(tb, algt->type, algt->mask);
        }
 
        return ERR_PTR(-EINVAL);
index 3de89a4..41e529a 100644 (file)
@@ -68,7 +68,7 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
 
 void scatterwalk_done(struct scatter_walk *walk, int out, int more)
 {
-       if (!offset_in_page(walk->offset) || !more)
+       if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more)
                scatterwalk_pagedone(walk, out, more);
 }
 EXPORT_SYMBOL_GPL(scatterwalk_done);
index 91f7b9d..22fd943 100644 (file)
@@ -37,7 +37,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
        u8 *buffer, *alignbuffer;
        int err;
 
-       absize = keylen + (alignmask & ~(CRYPTO_MINALIGN - 1));
+       absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
        buffer = kmalloc(absize, GFP_KERNEL);
        if (!buffer)
                return -ENOMEM;
index a351599..3ca68f9 100644 (file)
@@ -394,6 +394,17 @@ out:
        return 0;
 }
 
+static void test_hash_sg_init(struct scatterlist *sg)
+{
+       int i;
+
+       sg_init_table(sg, TVMEMSIZE);
+       for (i = 0; i < TVMEMSIZE; i++) {
+               sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
+               memset(tvmem[i], 0xff, PAGE_SIZE);
+       }
+}
+
 static void test_hash_speed(const char *algo, unsigned int sec,
                            struct hash_speed *speed)
 {
@@ -423,12 +434,7 @@ static void test_hash_speed(const char *algo, unsigned int sec,
                goto out;
        }
 
-       sg_init_table(sg, TVMEMSIZE);
-       for (i = 0; i < TVMEMSIZE; i++) {
-               sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
-               memset(tvmem[i], 0xff, PAGE_SIZE);
-       }
-
+       test_hash_sg_init(sg);
        for (i = 0; speed[i].blen != 0; i++) {
                if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
                        printk(KERN_ERR
@@ -437,6 +443,9 @@ static void test_hash_speed(const char *algo, unsigned int sec,
                        goto out;
                }
 
+               if (speed[i].klen)
+                       crypto_hash_setkey(tfm, tvmem[0], speed[i].klen);
+
                printk(KERN_INFO "test%3u "
                       "(%5u byte blocks,%5u bytes per update,%4u updates): ",
                       i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
@@ -458,6 +467,250 @@ out:
        crypto_free_hash(tfm);
 }
 
+struct tcrypt_result {
+       struct completion completion;
+       int err;
+};
+
+static void tcrypt_complete(struct crypto_async_request *req, int err)
+{
+       struct tcrypt_result *res = req->data;
+
+       if (err == -EINPROGRESS)
+               return;
+
+       res->err = err;
+       complete(&res->completion);
+}
+
+static inline int do_one_ahash_op(struct ahash_request *req, int ret)
+{
+       if (ret == -EINPROGRESS || ret == -EBUSY) {
+               struct tcrypt_result *tr = req->base.data;
+
+               ret = wait_for_completion_interruptible(&tr->completion);
+               if (!ret)
+                       ret = tr->err;
+               INIT_COMPLETION(tr->completion);
+       }
+       return ret;
+}
+
+static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
+                                    char *out, int sec)
+{
+       unsigned long start, end;
+       int bcount;
+       int ret;
+
+       for (start = jiffies, end = start + sec * HZ, bcount = 0;
+            time_before(jiffies, end); bcount++) {
+               ret = do_one_ahash_op(req, crypto_ahash_digest(req));
+               if (ret)
+                       return ret;
+       }
+
+       printk("%6u opers/sec, %9lu bytes/sec\n",
+              bcount / sec, ((long)bcount * blen) / sec);
+
+       return 0;
+}
+
+static int test_ahash_jiffies(struct ahash_request *req, int blen,
+                             int plen, char *out, int sec)
+{
+       unsigned long start, end;
+       int bcount, pcount;
+       int ret;
+
+       if (plen == blen)
+               return test_ahash_jiffies_digest(req, blen, out, sec);
+
+       for (start = jiffies, end = start + sec * HZ, bcount = 0;
+            time_before(jiffies, end); bcount++) {
+               ret = crypto_ahash_init(req);
+               if (ret)
+                       return ret;
+               for (pcount = 0; pcount < blen; pcount += plen) {
+                       ret = do_one_ahash_op(req, crypto_ahash_update(req));
+                       if (ret)
+                               return ret;
+               }
+               /* we assume there is enough space in 'out' for the result */
+               ret = do_one_ahash_op(req, crypto_ahash_final(req));
+               if (ret)
+                       return ret;
+       }
+
+       pr_cont("%6u opers/sec, %9lu bytes/sec\n",
+               bcount / sec, ((long)bcount * blen) / sec);
+
+       return 0;
+}
+
+static int test_ahash_cycles_digest(struct ahash_request *req, int blen,
+                                   char *out)
+{
+       unsigned long cycles = 0;
+       int ret, i;
+
+       /* Warm-up run. */
+       for (i = 0; i < 4; i++) {
+               ret = do_one_ahash_op(req, crypto_ahash_digest(req));
+               if (ret)
+                       goto out;
+       }
+
+       /* The real thing. */
+       for (i = 0; i < 8; i++) {
+               cycles_t start, end;
+
+               start = get_cycles();
+
+               ret = do_one_ahash_op(req, crypto_ahash_digest(req));
+               if (ret)
+                       goto out;
+
+               end = get_cycles();
+
+               cycles += end - start;
+       }
+
+out:
+       if (ret)
+               return ret;
+
+       pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
+               cycles / 8, cycles / (8 * blen));
+
+       return 0;
+}
+
+static int test_ahash_cycles(struct ahash_request *req, int blen,
+                            int plen, char *out)
+{
+       unsigned long cycles = 0;
+       int i, pcount, ret;
+
+       if (plen == blen)
+               return test_ahash_cycles_digest(req, blen, out);
+
+       /* Warm-up run. */
+       for (i = 0; i < 4; i++) {
+               ret = crypto_ahash_init(req);
+               if (ret)
+                       goto out;
+               for (pcount = 0; pcount < blen; pcount += plen) {
+                       ret = do_one_ahash_op(req, crypto_ahash_update(req));
+                       if (ret)
+                               goto out;
+               }
+               ret = do_one_ahash_op(req, crypto_ahash_final(req));
+               if (ret)
+                       goto out;
+       }
+
+       /* The real thing. */
+       for (i = 0; i < 8; i++) {
+               cycles_t start, end;
+
+               start = get_cycles();
+
+               ret = crypto_ahash_init(req);
+               if (ret)
+                       goto out;
+               for (pcount = 0; pcount < blen; pcount += plen) {
+                       ret = do_one_ahash_op(req, crypto_ahash_update(req));
+                       if (ret)
+                               goto out;
+               }
+               ret = do_one_ahash_op(req, crypto_ahash_final(req));
+               if (ret)
+                       goto out;
+
+               end = get_cycles();
+
+               cycles += end - start;
+       }
+
+out:
+       if (ret)
+               return ret;
+
+       pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
+               cycles / 8, cycles / (8 * blen));
+
+       return 0;
+}
+
+static void test_ahash_speed(const char *algo, unsigned int sec,
+                            struct hash_speed *speed)
+{
+       struct scatterlist sg[TVMEMSIZE];
+       struct tcrypt_result tresult;
+       struct ahash_request *req;
+       struct crypto_ahash *tfm;
+       static char output[1024];
+       int i, ret;
+
+       printk(KERN_INFO "\ntesting speed of async %s\n", algo);
+
+       tfm = crypto_alloc_ahash(algo, 0, 0);
+       if (IS_ERR(tfm)) {
+               pr_err("failed to load transform for %s: %ld\n",
+                      algo, PTR_ERR(tfm));
+               return;
+       }
+
+       if (crypto_ahash_digestsize(tfm) > sizeof(output)) {
+               pr_err("digestsize(%u) > outputbuffer(%zu)\n",
+                      crypto_ahash_digestsize(tfm), sizeof(output));
+               goto out;
+       }
+
+       test_hash_sg_init(sg);
+       req = ahash_request_alloc(tfm, GFP_KERNEL);
+       if (!req) {
+               pr_err("ahash request allocation failure\n");
+               goto out;
+       }
+
+       init_completion(&tresult.completion);
+       ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+                                  tcrypt_complete, &tresult);
+
+       for (i = 0; speed[i].blen != 0; i++) {
+               if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
+                       pr_err("template (%u) too big for tvmem (%lu)\n",
+                              speed[i].blen, TVMEMSIZE * PAGE_SIZE);
+                       break;
+               }
+
+               pr_info("test%3u "
+                       "(%5u byte blocks,%5u bytes per update,%4u updates): ",
+                       i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
+
+               ahash_request_set_crypt(req, sg, output, speed[i].plen);
+
+               if (sec)
+                       ret = test_ahash_jiffies(req, speed[i].blen,
+                                                speed[i].plen, output, sec);
+               else
+                       ret = test_ahash_cycles(req, speed[i].blen,
+                                               speed[i].plen, output);
+
+               if (ret) {
+                       pr_err("hashing failed ret=%d\n", ret);
+                       break;
+               }
+       }
+
+       ahash_request_free(req);
+
+out:
+       crypto_free_ahash(tfm);
+}
+
 static void test_available(void)
 {
        char **name = check;
@@ -881,9 +1134,87 @@ static int do_test(int m)
                test_hash_speed("rmd320", sec, generic_hash_speed_template);
                if (mode > 300 && mode < 400) break;
 
+       case 318:
+               test_hash_speed("ghash-generic", sec, hash_speed_template_16);
+               if (mode > 300 && mode < 400) break;
+
        case 399:
                break;
 
+       case 400:
+               /* fall through */
+
+       case 401:
+               test_ahash_speed("md4", sec, generic_hash_speed_template);
+               if (mode > 400 && mode < 500) break;
+
+       case 402:
+               test_ahash_speed("md5", sec, generic_hash_speed_template);
+               if (mode > 400 && mode < 500) break;
+
+       case 403:
+               test_ahash_speed("sha1", sec, generic_hash_speed_template);
+               if (mode > 400 && mode < 500) break;
+
+       case 404:
+               test_ahash_speed("sha256", sec, generic_hash_speed_template);
+               if (mode > 400 && mode < 500) break;
+
+       case 405:
+               test_ahash_speed("sha384", sec, generic_hash_speed_template);
+               if (mode > 400 && mode < 500) break;
+
+       case 406:
+               test_ahash_speed("sha512", sec, generic_hash_speed_template);
+               if (mode > 400 && mode < 500) break;
+
+       case 407:
+               test_ahash_speed("wp256", sec, generic_hash_speed_template);
+               if (mode > 400 && mode < 500) break;
+
+       case 408:
+               test_ahash_speed("wp384", sec, generic_hash_speed_template);
+               if (mode > 400 && mode < 500) break;
+
+       case 409:
+               test_ahash_speed("wp512", sec, generic_hash_speed_template);
+               if (mode > 400 && mode < 500) break;
+
+       case 410:
+               test_ahash_speed("tgr128", sec, generic_hash_speed_template);
+               if (mode > 400 && mode < 500) break;
+
+       case 411:
+               test_ahash_speed("tgr160", sec, generic_hash_speed_template);
+               if (mode > 400 && mode < 500) break;
+
+       case 412:
+               test_ahash_speed("tgr192", sec, generic_hash_speed_template);
+               if (mode > 400 && mode < 500) break;
+
+       case 413:
+               test_ahash_speed("sha224", sec, generic_hash_speed_template);
+               if (mode > 400 && mode < 500) break;
+
+       case 414:
+               test_ahash_speed("rmd128", sec, generic_hash_speed_template);
+               if (mode > 400 && mode < 500) break;
+
+       case 415:
+               test_ahash_speed("rmd160", sec, generic_hash_speed_template);
+               if (mode > 400 && mode < 500) break;
+
+       case 416:
+               test_ahash_speed("rmd256", sec, generic_hash_speed_template);
+               if (mode > 400 && mode < 500) break;
+
+       case 417:
+               test_ahash_speed("rmd320", sec, generic_hash_speed_template);
+               if (mode > 400 && mode < 500) break;
+
+       case 499:
+               break;
+
        case 1000:
                test_available();
                break;
index 966bbfa..10cb925 100644 (file)
@@ -25,6 +25,7 @@ struct cipher_speed_template {
 struct hash_speed {
        unsigned int blen;      /* buffer length */
        unsigned int plen;      /* per-update length */
+       unsigned int klen;      /* key length */
 };
 
 /*
@@ -83,4 +84,32 @@ static struct hash_speed generic_hash_speed_template[] = {
        {  .blen = 0,   .plen = 0, }
 };
 
+static struct hash_speed hash_speed_template_16[] = {
+       { .blen = 16,   .plen = 16,     .klen = 16, },
+       { .blen = 64,   .plen = 16,     .klen = 16, },
+       { .blen = 64,   .plen = 64,     .klen = 16, },
+       { .blen = 256,  .plen = 16,     .klen = 16, },
+       { .blen = 256,  .plen = 64,     .klen = 16, },
+       { .blen = 256,  .plen = 256,    .klen = 16, },
+       { .blen = 1024, .plen = 16,     .klen = 16, },
+       { .blen = 1024, .plen = 256,    .klen = 16, },
+       { .blen = 1024, .plen = 1024,   .klen = 16, },
+       { .blen = 2048, .plen = 16,     .klen = 16, },
+       { .blen = 2048, .plen = 256,    .klen = 16, },
+       { .blen = 2048, .plen = 1024,   .klen = 16, },
+       { .blen = 2048, .plen = 2048,   .klen = 16, },
+       { .blen = 4096, .plen = 16,     .klen = 16, },
+       { .blen = 4096, .plen = 256,    .klen = 16, },
+       { .blen = 4096, .plen = 1024,   .klen = 16, },
+       { .blen = 4096, .plen = 4096,   .klen = 16, },
+       { .blen = 8192, .plen = 16,     .klen = 16, },
+       { .blen = 8192, .plen = 256,    .klen = 16, },
+       { .blen = 8192, .plen = 1024,   .klen = 16, },
+       { .blen = 8192, .plen = 4096,   .klen = 16, },
+       { .blen = 8192, .plen = 8192,   .klen = 16, },
+
+       /* End marker */
+       {  .blen = 0,   .plen = 0,      .klen = 0, }
+};
+
 #endif /* _CRYPTO_TCRYPT_H */
index c494d76..5c8aaa0 100644 (file)
@@ -153,8 +153,21 @@ static void testmgr_free_buf(char *buf[XBUFSIZE])
                free_page((unsigned long)buf[i]);
 }
 
+static int do_one_async_hash_op(struct ahash_request *req,
+                               struct tcrypt_result *tr,
+                               int ret)
+{
+       if (ret == -EINPROGRESS || ret == -EBUSY) {
+               ret = wait_for_completion_interruptible(&tr->completion);
+               if (!ret)
+                       ret = tr->err;
+               INIT_COMPLETION(tr->completion);
+       }
+       return ret;
+}
+
 static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
-                    unsigned int tcount)
+                    unsigned int tcount, bool use_digest)
 {
        const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
        unsigned int i, j, k, temp;
@@ -206,23 +219,36 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
                }
 
                ahash_request_set_crypt(req, sg, result, template[i].psize);
-               ret = crypto_ahash_digest(req);
-               switch (ret) {
-               case 0:
-                       break;
-               case -EINPROGRESS:
-               case -EBUSY:
-                       ret = wait_for_completion_interruptible(
-                               &tresult.completion);
-                       if (!ret && !(ret = tresult.err)) {
-                               INIT_COMPLETION(tresult.completion);
-                               break;
+               if (use_digest) {
+                       ret = do_one_async_hash_op(req, &tresult,
+                                                  crypto_ahash_digest(req));
+                       if (ret) {
+                               pr_err("alg: hash: digest failed on test %d "
+                                      "for %s: ret=%d\n", j, algo, -ret);
+                               goto out;
+                       }
+               } else {
+                       ret = do_one_async_hash_op(req, &tresult,
+                                                  crypto_ahash_init(req));
+                       if (ret) {
+                               pr_err("alt: hash: init failed on test %d "
+                                      "for %s: ret=%d\n", j, algo, -ret);
+                               goto out;
+                       }
+                       ret = do_one_async_hash_op(req, &tresult,
+                                                  crypto_ahash_update(req));
+                       if (ret) {
+                               pr_err("alt: hash: update failed on test %d "
+                                      "for %s: ret=%d\n", j, algo, -ret);
+                               goto out;
+                       }
+                       ret = do_one_async_hash_op(req, &tresult,
+                                                  crypto_ahash_final(req));
+                       if (ret) {
+                               pr_err("alt: hash: final failed on test %d "
+                                      "for %s: ret=%d\n", j, algo, -ret);
+                               goto out;
                        }
-                       /* fall through */
-               default:
-                       printk(KERN_ERR "alg: hash: digest failed on test %d "
-                              "for %s: ret=%d\n", j, algo, -ret);
-                       goto out;
                }
 
                if (memcmp(result, template[i].digest,
@@ -1402,7 +1428,11 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
                return PTR_ERR(tfm);
        }
 
-       err = test_hash(tfm, desc->suite.hash.vecs, desc->suite.hash.count);
+       err = test_hash(tfm, desc->suite.hash.vecs,
+                       desc->suite.hash.count, true);
+       if (!err)
+               err = test_hash(tfm, desc->suite.hash.vecs,
+                               desc->suite.hash.count, false);
 
        crypto_free_ahash(tfm);
        return err;
index fb76517..74e3537 100644 (file)
@@ -1669,17 +1669,73 @@ static struct hash_testvec aes_xcbc128_tv_template[] = {
        }
 };
 
-#define VMAC_AES_TEST_VECTORS  1
-static char vmac_string[128] = {'\x01', '\x01', '\x01', '\x01',
+#define VMAC_AES_TEST_VECTORS  8
+static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
                                '\x02', '\x03', '\x02', '\x02',
                                '\x02', '\x04', '\x01', '\x07',
                                '\x04', '\x01', '\x04', '\x03',};
+static char vmac_string2[128] = {'a', 'b', 'c',};
+static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
+                               'a', 'b', 'c', 'a', 'b', 'c',
+                               'a', 'b', 'c', 'a', 'b', 'c',
+                               'a', 'b', 'c', 'a', 'b', 'c',
+                               'a', 'b', 'c', 'a', 'b', 'c',
+                               'a', 'b', 'c', 'a', 'b', 'c',
+                               'a', 'b', 'c', 'a', 'b', 'c',
+                               'a', 'b', 'c', 'a', 'b', 'c',
+                               };
+
 static struct hash_testvec aes_vmac128_tv_template[] = {
        {
+               .key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
+                         "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+               .plaintext = NULL,
+               .digest = "\x07\x58\x80\x35\x77\xa4\x7b\x54",
+               .psize  = 0,
+               .ksize  = 16,
+       }, {
+               .key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
+                         "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+               .plaintext = vmac_string1,
+               .digest = "\xce\xf5\x3c\xd3\xae\x68\x8c\xa1",
+               .psize  = 128,
+               .ksize  = 16,
+       }, {
+               .key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
+                         "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+               .plaintext = vmac_string2,
+               .digest = "\xc9\x27\xb0\x73\x81\xbd\x14\x2d",
+               .psize  = 128,
+               .ksize  = 16,
+       }, {
                .key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
                          "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
-               .plaintext = vmac_string,
-               .digest = "\xcb\xd7\x8a\xfd\xb7\x33\x79\xe7",
+               .plaintext = vmac_string3,
+               .digest = "\x8d\x1a\x95\x8c\x98\x47\x0b\x19",
+               .psize  = 128,
+               .ksize  = 16,
+       }, {
+               .key    = "abcdefghijklmnop",
+               .plaintext = NULL,
+               .digest = "\x3b\x89\xa1\x26\x9e\x55\x8f\x84",
+               .psize  = 0,
+               .ksize  = 16,
+       }, {
+               .key    = "abcdefghijklmnop",
+               .plaintext = vmac_string1,
+               .digest = "\xab\x5e\xab\xb0\xf6\x8d\x74\xc2",
+               .psize  = 128,
+               .ksize  = 16,
+       }, {
+               .key    = "abcdefghijklmnop",
+               .plaintext = vmac_string2,
+               .digest = "\x11\x15\x68\x42\x3d\x7b\x09\xdf",
+               .psize  = 128,
+               .ksize  = 16,
+       }, {
+               .key    = "abcdefghijklmnop",
+               .plaintext = vmac_string3,
+               .digest = "\x8b\x32\x8f\xe1\xed\x8f\xfa\xd4",
                .psize  = 128,
                .ksize  = 16,
        },
index 0a9468e..0999274 100644 (file)
@@ -43,6 +43,8 @@ const u64 m63   = UINT64_C(0x7fffffffffffffff);  /* 63-bit mask       */
 const u64 m64   = UINT64_C(0xffffffffffffffff);  /* 64-bit mask       */
 const u64 mpoly = UINT64_C(0x1fffffff1fffffff);  /* Poly key mask     */
 
+#define pe64_to_cpup le64_to_cpup              /* Prefer little endian */
+
 #ifdef __LITTLE_ENDIAN
 #define INDEX_HIGH 1
 #define INDEX_LOW 0
@@ -110,8 +112,8 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff);  /* Poly key mask     */
                int i; u64 th, tl;                                      \
                rh = rl = 0;                                            \
                for (i = 0; i < nw; i += 2) {                           \
-                       MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i],     \
-                               le64_to_cpup((mp)+i+1)+(kp)[i+1]);      \
+                       MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i],     \
+                               pe64_to_cpup((mp)+i+1)+(kp)[i+1]);      \
                        ADD128(rh, rl, th, tl);                         \
                }                                                       \
        } while (0)
@@ -121,11 +123,11 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff);  /* Poly key mask     */
                int i; u64 th, tl;                                      \
                rh1 = rl1 = rh = rl = 0;                                \
                for (i = 0; i < nw; i += 2) {                           \
-                       MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i],     \
-                               le64_to_cpup((mp)+i+1)+(kp)[i+1]);      \
+                       MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i],     \
+                               pe64_to_cpup((mp)+i+1)+(kp)[i+1]);      \
                        ADD128(rh, rl, th, tl);                         \
-                       MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2],   \
-                               le64_to_cpup((mp)+i+1)+(kp)[i+3]);      \
+                       MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2],   \
+                               pe64_to_cpup((mp)+i+1)+(kp)[i+3]);      \
                        ADD128(rh1, rl1, th, tl);                       \
                }                                                       \
        } while (0)
@@ -136,17 +138,17 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff);  /* Poly key mask     */
                int i; u64 th, tl;                                      \
                rh = rl = 0;                                            \
                for (i = 0; i < nw; i += 8) {                           \
-                       MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i],     \
-                               le64_to_cpup((mp)+i+1)+(kp)[i+1]);      \
+                       MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i],     \
+                               pe64_to_cpup((mp)+i+1)+(kp)[i+1]);      \
                        ADD128(rh, rl, th, tl);                         \
-                       MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \
-                               le64_to_cpup((mp)+i+3)+(kp)[i+3]);      \
+                       MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
+                               pe64_to_cpup((mp)+i+3)+(kp)[i+3]);      \
                        ADD128(rh, rl, th, tl);                         \
-                       MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \
-                               le64_to_cpup((mp)+i+5)+(kp)[i+5]);      \
+                       MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
+                               pe64_to_cpup((mp)+i+5)+(kp)[i+5]);      \
                        ADD128(rh, rl, th, tl);                         \
-                       MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \
-                               le64_to_cpup((mp)+i+7)+(kp)[i+7]);      \
+                       MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
+                               pe64_to_cpup((mp)+i+7)+(kp)[i+7]);      \
                        ADD128(rh, rl, th, tl);                         \
                }                                                       \
        } while (0)
@@ -156,29 +158,29 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff);  /* Poly key mask     */
                int i; u64 th, tl;                                      \
                rh1 = rl1 = rh = rl = 0;                                \
                for (i = 0; i < nw; i += 8) {                           \
-                       MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i],     \
-                               le64_to_cpup((mp)+i+1)+(kp)[i+1]);      \
+                       MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i],     \
+                               pe64_to_cpup((mp)+i+1)+(kp)[i+1]);      \
                        ADD128(rh, rl, th, tl);                         \
-                       MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2],   \
-                               le64_to_cpup((mp)+i+1)+(kp)[i+3]);      \
+                       MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2],   \
+                               pe64_to_cpup((mp)+i+1)+(kp)[i+3]);      \
                        ADD128(rh1, rl1, th, tl);                       \
-                       MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \
-                               le64_to_cpup((mp)+i+3)+(kp)[i+3]);      \
+                       MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
+                               pe64_to_cpup((mp)+i+3)+(kp)[i+3]);      \
                        ADD128(rh, rl, th, tl);                         \
-                       MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+4], \
-                               le64_to_cpup((mp)+i+3)+(kp)[i+5]);      \
+                       MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \
+                               pe64_to_cpup((mp)+i+3)+(kp)[i+5]);      \
                        ADD128(rh1, rl1, th, tl);                       \
-                       MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \
-                               le64_to_cpup((mp)+i+5)+(kp)[i+5]);      \
+                       MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
+                               pe64_to_cpup((mp)+i+5)+(kp)[i+5]);      \
                        ADD128(rh, rl, th, tl);                         \
-                       MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+6], \
-                               le64_to_cpup((mp)+i+5)+(kp)[i+7]);      \
+                       MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \
+                               pe64_to_cpup((mp)+i+5)+(kp)[i+7]);      \
                        ADD128(rh1, rl1, th, tl);                       \
-                       MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \
-                               le64_to_cpup((mp)+i+7)+(kp)[i+7]);      \
+                       MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
+                               pe64_to_cpup((mp)+i+7)+(kp)[i+7]);      \
                        ADD128(rh, rl, th, tl);                         \
-                       MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+8], \
-                               le64_to_cpup((mp)+i+7)+(kp)[i+9]);      \
+                       MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \
+                               pe64_to_cpup((mp)+i+7)+(kp)[i+9]);      \
                        ADD128(rh1, rl1, th, tl);                       \
                }                                                       \
        } while (0)
@@ -216,8 +218,8 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff);  /* Poly key mask     */
                int i;                                                  \
                rh = rl = t = 0;                                        \
                for (i = 0; i < nw; i += 2)  {                          \
-                       t1 = le64_to_cpup(mp+i) + kp[i];                \
-                       t2 = le64_to_cpup(mp+i+1) + kp[i+1];            \
+                       t1 = pe64_to_cpup(mp+i) + kp[i];                \
+                       t2 = pe64_to_cpup(mp+i+1) + kp[i+1];            \
                        m2 = MUL32(t1 >> 32, t2);                       \
                        m1 = MUL32(t1, t2 >> 32);                       \
                        ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32),       \
@@ -322,8 +324,7 @@ static void vhash_abort(struct vmac_ctx *ctx)
        ctx->first_block_processed = 0;
 }
 
-static u64 l3hash(u64 p1, u64 p2,
-                       u64 k1, u64 k2, u64 len)
+static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
 {
        u64 rh, rl, t, z = 0;
 
@@ -474,7 +475,7 @@ static u64 vmac(unsigned char m[], unsigned int mbytes,
        }
        p = be64_to_cpup(out_p + i);
        h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
-       return p + h;
+       return le64_to_cpu(p + h);
 }
 
 static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
@@ -549,10 +550,6 @@ static int vmac_setkey(struct crypto_shash *parent,
 
 static int vmac_init(struct shash_desc *pdesc)
 {
-       struct crypto_shash *parent = pdesc->tfm;
-       struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
-
-       memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
        return 0;
 }
 
index 870f12c..1204909 100644 (file)
@@ -178,86 +178,6 @@ struct agp_bridge_data {
 #define PGE_EMPTY(b, p)        (!(p) || (p) == (unsigned long) (b)->scratch_page)
 
 
-/* Intel registers */
-#define INTEL_APSIZE   0xb4
-#define INTEL_ATTBASE  0xb8
-#define INTEL_AGPCTRL  0xb0
-#define INTEL_NBXCFG   0x50
-#define INTEL_ERRSTS   0x91
-
-/* Intel i830 registers */
-#define I830_GMCH_CTRL                 0x52
-#define I830_GMCH_ENABLED              0x4
-#define I830_GMCH_MEM_MASK             0x1
-#define I830_GMCH_MEM_64M              0x1
-#define I830_GMCH_MEM_128M             0
-#define I830_GMCH_GMS_MASK             0x70
-#define I830_GMCH_GMS_DISABLED         0x00
-#define I830_GMCH_GMS_LOCAL            0x10
-#define I830_GMCH_GMS_STOLEN_512       0x20
-#define I830_GMCH_GMS_STOLEN_1024      0x30
-#define I830_GMCH_GMS_STOLEN_8192      0x40
-#define I830_RDRAM_CHANNEL_TYPE                0x03010
-#define I830_RDRAM_ND(x)               (((x) & 0x20) >> 5)
-#define I830_RDRAM_DDT(x)              (((x) & 0x18) >> 3)
-
-/* This one is for I830MP w. an external graphic card */
-#define INTEL_I830_ERRSTS      0x92
-
-/* Intel 855GM/852GM registers */
-#define I855_GMCH_GMS_MASK             0xF0
-#define I855_GMCH_GMS_STOLEN_0M                0x0
-#define I855_GMCH_GMS_STOLEN_1M                (0x1 << 4)
-#define I855_GMCH_GMS_STOLEN_4M                (0x2 << 4)
-#define I855_GMCH_GMS_STOLEN_8M                (0x3 << 4)
-#define I855_GMCH_GMS_STOLEN_16M       (0x4 << 4)
-#define I855_GMCH_GMS_STOLEN_32M       (0x5 << 4)
-#define I85X_CAPID                     0x44
-#define I85X_VARIANT_MASK              0x7
-#define I85X_VARIANT_SHIFT             5
-#define I855_GME                       0x0
-#define I855_GM                                0x4
-#define I852_GME                       0x2
-#define I852_GM                                0x5
-
-/* Intel i845 registers */
-#define INTEL_I845_AGPM                0x51
-#define INTEL_I845_ERRSTS      0xc8
-
-/* Intel i860 registers */
-#define INTEL_I860_MCHCFG      0x50
-#define INTEL_I860_ERRSTS      0xc8
-
-/* Intel i810 registers */
-#define I810_GMADDR            0x10
-#define I810_MMADDR            0x14
-#define I810_PTE_BASE          0x10000
-#define I810_PTE_MAIN_UNCACHED 0x00000000
-#define I810_PTE_LOCAL         0x00000002
-#define I810_PTE_VALID         0x00000001
-#define I830_PTE_SYSTEM_CACHED  0x00000006
-#define I810_SMRAM_MISCC       0x70
-#define I810_GFX_MEM_WIN_SIZE  0x00010000
-#define I810_GFX_MEM_WIN_32M   0x00010000
-#define I810_GMS               0x000000c0
-#define I810_GMS_DISABLE       0x00000000
-#define I810_PGETBL_CTL                0x2020
-#define I810_PGETBL_ENABLED    0x00000001
-#define I965_PGETBL_SIZE_MASK  0x0000000e
-#define I965_PGETBL_SIZE_512KB (0 << 1)
-#define I965_PGETBL_SIZE_256KB (1 << 1)
-#define I965_PGETBL_SIZE_128KB (2 << 1)
-#define I965_PGETBL_SIZE_1MB   (3 << 1)
-#define I965_PGETBL_SIZE_2MB   (4 << 1)
-#define I965_PGETBL_SIZE_1_5MB (5 << 1)
-#define G33_PGETBL_SIZE_MASK    (3 << 8)
-#define G33_PGETBL_SIZE_1M      (1 << 8)
-#define G33_PGETBL_SIZE_2M      (2 << 8)
-
-#define I810_DRAM_CTL          0x3000
-#define I810_DRAM_ROW_0                0x00000001
-#define I810_DRAM_ROW_0_SDRAM  0x00000001
-
 struct agp_device_ids {
        unsigned short device_id; /* first, to make table easier to read */
        enum chipset_type chipset;
index d2ce68f..fd79351 100644 (file)
@@ -204,6 +204,7 @@ static const struct agp_bridge_driver ali_generic_bridge = {
        .aperture_sizes         = ali_generic_sizes,
        .size_type              = U32_APER_SIZE,
        .num_aperture_sizes     = 7,
+       .needs_scratch_page     = true,
        .configure              = ali_configure,
        .fetch_size             = ali_fetch_size,
        .cleanup                = ali_cleanup,
index a7637d7..b6b1568 100644 (file)
@@ -142,6 +142,7 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge)
 {
        struct aper_size_info_lvl2 *value;
        struct amd_page_map page_dir;
+       unsigned long __iomem *cur_gatt;
        unsigned long addr;
        int retval;
        u32 temp;
@@ -178,6 +179,13 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge)
                readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr));        /* PCI Posting. */
        }
 
+       for (i = 0; i < value->num_entries; i++) {
+               addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+               cur_gatt = GET_GATT(addr);
+               writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+               readl(cur_gatt+GET_GATT_OFF(addr));     /* PCI Posting. */
+       }
+
        return 0;
 }
 
@@ -375,6 +383,7 @@ static const struct agp_bridge_driver amd_irongate_driver = {
        .aperture_sizes         = amd_irongate_sizes,
        .size_type              = LVL2_APER_SIZE,
        .num_aperture_sizes     = 7,
+       .needs_scratch_page     = true,
        .configure              = amd_irongate_configure,
        .fetch_size             = amd_irongate_fetch_size,
        .cleanup                = amd_irongate_cleanup,
index fd50ead..67ea3a6 100644 (file)
@@ -210,6 +210,7 @@ static const struct agp_bridge_driver amd_8151_driver = {
        .aperture_sizes         = amd_8151_sizes,
        .size_type              = U32_APER_SIZE,
        .num_aperture_sizes     = 7,
+       .needs_scratch_page     = true,
        .configure              = amd_8151_configure,
        .fetch_size             = amd64_fetch_size,
        .cleanup                = amd64_cleanup,
@@ -499,6 +500,10 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev,
        u8 cap_ptr;
        int err;
 
+       /* The Highlander principle */
+       if (agp_bridges_found)
+               return -ENODEV;
+
        cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
        if (!cap_ptr)
                return -ENODEV;
@@ -562,6 +567,8 @@ static void __devexit agp_amd64_remove(struct pci_dev *pdev)
                           amd64_aperture_sizes[bridge->aperture_size_idx].size);
        agp_remove_bridge(bridge);
        agp_put_bridge(bridge);
+
+       agp_bridges_found--;
 }
 
 #ifdef CONFIG_PM
@@ -709,6 +716,11 @@ static struct pci_device_id agp_amd64_pci_table[] = {
 
 MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
 
+static DEFINE_PCI_DEVICE_TABLE(agp_amd64_pci_promisc_table) = {
+       { PCI_DEVICE_CLASS(0, 0) },
+       { }
+};
+
 static struct pci_driver agp_amd64_pci_driver = {
        .name           = "agpgart-amd64",
        .id_table       = agp_amd64_pci_table,
@@ -734,7 +746,6 @@ int __init agp_amd64_init(void)
                return err;
 
        if (agp_bridges_found == 0) {
-               struct pci_dev *dev;
                if (!agp_try_unsupported && !agp_try_unsupported_boot) {
                        printk(KERN_INFO PFX "No supported AGP bridge found.\n");
 #ifdef MODULE
@@ -750,17 +761,10 @@ int __init agp_amd64_init(void)
                        return -ENODEV;
 
                /* Look for any AGP bridge */
-               dev = NULL;
-               err = -ENODEV;
-               for_each_pci_dev(dev) {
-                       if (!pci_find_capability(dev, PCI_CAP_ID_AGP))
-                               continue;
-                       /* Only one bridge supported right now */
-                       if (agp_amd64_probe(dev, NULL) == 0) {
-                               err = 0;
-                               break;
-                       }
-               }
+               agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
+               err = driver_attach(&agp_amd64_pci_driver.driver);
+               if (err == 0 && agp_bridges_found == 0)
+                       err = -ENODEV;
        }
        return err;
 }
index 3b2ecbe..dc30e22 100644 (file)
@@ -341,6 +341,7 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge)
 {
        struct aper_size_info_lvl2 *value;
        struct ati_page_map page_dir;
+       unsigned long __iomem *cur_gatt;
        unsigned long addr;
        int retval;
        u32 temp;
@@ -395,6 +396,12 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge)
                readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr));        /* PCI Posting. */
        }
 
+       for (i = 0; i < value->num_entries; i++) {
+               addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+               cur_gatt = GET_GATT(addr);
+               writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+       }
+
        return 0;
 }
 
@@ -415,6 +422,7 @@ static const struct agp_bridge_driver ati_generic_bridge = {
        .aperture_sizes         = ati_generic_sizes,
        .size_type              = LVL2_APER_SIZE,
        .num_aperture_sizes     = 7,
+       .needs_scratch_page     = true,
        .configure              = ati_configure,
        .fetch_size             = ati_fetch_size,
        .cleanup                = ati_cleanup,
index 793f39e..aa109cb 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/page-flags.h>
 #include <linux/mm.h>
 #include "agp.h"
+#include "intel-agp.h"
 
 /*
  * The real differences to the generic AGP code is
index aa4248e..d836a71 100644 (file)
 #include <linux/agp_backend.h>
 #include <asm/smp.h>
 #include "agp.h"
+#include "intel-agp.h"
 
-int intel_agp_enabled;
-EXPORT_SYMBOL(intel_agp_enabled);
-
-/*
- * If we have Intel graphics, we're not going to have anything other than
- * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
- * on the Intel IOMMU support (CONFIG_DMAR).
- * Only newer chipsets need to bother with this, of course.
- */
-#ifdef CONFIG_DMAR
-#define USE_PCI_DMA_API 1
-#endif
-
-#define PCI_DEVICE_ID_INTEL_E7221_HB   0x2588
-#define PCI_DEVICE_ID_INTEL_E7221_IG   0x258a
-#define PCI_DEVICE_ID_INTEL_82946GZ_HB      0x2970
-#define PCI_DEVICE_ID_INTEL_82946GZ_IG      0x2972
-#define PCI_DEVICE_ID_INTEL_82G35_HB     0x2980
-#define PCI_DEVICE_ID_INTEL_82G35_IG     0x2982
-#define PCI_DEVICE_ID_INTEL_82965Q_HB       0x2990
-#define PCI_DEVICE_ID_INTEL_82965Q_IG       0x2992
-#define PCI_DEVICE_ID_INTEL_82965G_HB       0x29A0
-#define PCI_DEVICE_ID_INTEL_82965G_IG       0x29A2
-#define PCI_DEVICE_ID_INTEL_82965GM_HB      0x2A00
-#define PCI_DEVICE_ID_INTEL_82965GM_IG      0x2A02
-#define PCI_DEVICE_ID_INTEL_82965GME_HB     0x2A10
-#define PCI_DEVICE_ID_INTEL_82965GME_IG     0x2A12
-#define PCI_DEVICE_ID_INTEL_82945GME_HB     0x27AC
-#define PCI_DEVICE_ID_INTEL_82945GME_IG     0x27AE
-#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB        0xA010
-#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG        0xA011
-#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB         0xA000
-#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG         0xA001
-#define PCI_DEVICE_ID_INTEL_G33_HB          0x29C0
-#define PCI_DEVICE_ID_INTEL_G33_IG          0x29C2
-#define PCI_DEVICE_ID_INTEL_Q35_HB          0x29B0
-#define PCI_DEVICE_ID_INTEL_Q35_IG          0x29B2
-#define PCI_DEVICE_ID_INTEL_Q33_HB          0x29D0
-#define PCI_DEVICE_ID_INTEL_Q33_IG          0x29D2
-#define PCI_DEVICE_ID_INTEL_B43_HB          0x2E40
-#define PCI_DEVICE_ID_INTEL_B43_IG          0x2E42
-#define PCI_DEVICE_ID_INTEL_GM45_HB         0x2A40
-#define PCI_DEVICE_ID_INTEL_GM45_IG         0x2A42
-#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB        0x2E00
-#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG        0x2E02
-#define PCI_DEVICE_ID_INTEL_Q45_HB          0x2E10
-#define PCI_DEVICE_ID_INTEL_Q45_IG          0x2E12
-#define PCI_DEVICE_ID_INTEL_G45_HB          0x2E20
-#define PCI_DEVICE_ID_INTEL_G45_IG          0x2E22
-#define PCI_DEVICE_ID_INTEL_G41_HB          0x2E30
-#define PCI_DEVICE_ID_INTEL_G41_IG          0x2E32
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB          0x0040
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG          0x0042
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB          0x0044
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB         0x0062
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB    0x006a
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG          0x0046
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB  0x0100
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG  0x0102
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB  0x0104
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG  0x0106
-
-/* cover 915 and 945 variants */
-#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
-                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \
-                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
-                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
-                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
-                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
-
-#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
-                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
-                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
-                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
-                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
-                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
-
-#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
-
-#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
-
-#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
-
-#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
-               IS_SNB)
-
-extern int agp_memory_reserved;
-
-
-/* Intel 815 register */
-#define INTEL_815_APCONT       0x51
-#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
-
-/* Intel i820 registers */
-#define INTEL_I820_RDCR                0x51
-#define INTEL_I820_ERRSTS      0xc8
-
-/* Intel i840 registers */
-#define INTEL_I840_MCHCFG      0x50
-#define INTEL_I840_ERRSTS      0xc8
-
-/* Intel i850 registers */
-#define INTEL_I850_MCHCFG      0x50
-#define INTEL_I850_ERRSTS      0xc8
-
-/* intel 915G registers */
-#define I915_GMADDR    0x18
-#define I915_MMADDR    0x10
-#define I915_PTEADDR   0x1C
-#define I915_GMCH_GMS_STOLEN_48M       (0x6 << 4)
-#define I915_GMCH_GMS_STOLEN_64M       (0x7 << 4)
-#define G33_GMCH_GMS_STOLEN_128M       (0x8 << 4)
-#define G33_GMCH_GMS_STOLEN_256M       (0x9 << 4)
-#define INTEL_GMCH_GMS_STOLEN_96M      (0xa << 4)
-#define INTEL_GMCH_GMS_STOLEN_160M     (0xb << 4)
-#define INTEL_GMCH_GMS_STOLEN_224M     (0xc << 4)
-#define INTEL_GMCH_GMS_STOLEN_352M     (0xd << 4)
-
-#define I915_IFPADDR    0x60
-
-/* Intel 965G registers */
-#define I965_MSAC 0x62
-#define I965_IFPADDR    0x70
-
-/* Intel 7505 registers */
-#define INTEL_I7505_APSIZE     0x74
-#define INTEL_I7505_NCAPID     0x60
-#define INTEL_I7505_NISTAT     0x6c
-#define INTEL_I7505_ATTBASE    0x78
-#define INTEL_I7505_ERRSTS     0x42
-#define INTEL_I7505_AGPCTRL    0x70
-#define INTEL_I7505_MCHCFG     0x50
-
-#define SNB_GMCH_CTRL  0x50
-#define SNB_GMCH_GMS_STOLEN_MASK       0xF8
-#define SNB_GMCH_GMS_STOLEN_32M                (1 << 3)
-#define SNB_GMCH_GMS_STOLEN_64M                (2 << 3)
-#define SNB_GMCH_GMS_STOLEN_96M                (3 << 3)
-#define SNB_GMCH_GMS_STOLEN_128M       (4 << 3)
-#define SNB_GMCH_GMS_STOLEN_160M       (5 << 3)
-#define SNB_GMCH_GMS_STOLEN_192M       (6 << 3)
-#define SNB_GMCH_GMS_STOLEN_224M       (7 << 3)
-#define SNB_GMCH_GMS_STOLEN_256M       (8 << 3)
-#define SNB_GMCH_GMS_STOLEN_288M       (9 << 3)
-#define SNB_GMCH_GMS_STOLEN_320M       (0xa << 3)
-#define SNB_GMCH_GMS_STOLEN_352M       (0xb << 3)
-#define SNB_GMCH_GMS_STOLEN_384M       (0xc << 3)
-#define SNB_GMCH_GMS_STOLEN_416M       (0xd << 3)
-#define SNB_GMCH_GMS_STOLEN_448M       (0xe << 3)
-#define SNB_GMCH_GMS_STOLEN_480M       (0xf << 3)
-#define SNB_GMCH_GMS_STOLEN_512M       (0x10 << 3)
-#define SNB_GTT_SIZE_0M                        (0 << 8)
-#define SNB_GTT_SIZE_1M                        (1 << 8)
-#define SNB_GTT_SIZE_2M                        (2 << 8)
-#define SNB_GTT_SIZE_MASK              (3 << 8)
-
-static const struct aper_size_info_fixed intel_i810_sizes[] =
-{
-       {64, 16384, 4},
-       /* The 32M mode still requires a 64k gatt */
-       {32, 8192, 4}
-};
-
-#define AGP_DCACHE_MEMORY      1
-#define AGP_PHYS_MEMORY                2
-#define INTEL_AGP_CACHED_MEMORY 3
-
-static struct gatt_mask intel_i810_masks[] =
-{
-       {.mask = I810_PTE_VALID, .type = 0},
-       {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
-       {.mask = I810_PTE_VALID, .type = 0},
-       {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
-        .type = INTEL_AGP_CACHED_MEMORY}
-};
-
-static struct _intel_private {
-       struct pci_dev *pcidev; /* device one */
-       u8 __iomem *registers;
-       u32 __iomem *gtt;               /* I915G */
-       int num_dcache_entries;
-       /* gtt_entries is the number of gtt entries that are already mapped
-        * to stolen memory.  Stolen memory is larger than the memory mapped
-        * through gtt_entries, as it includes some reserved space for the BIOS
-        * popup and for the GTT.
-        */
-       int gtt_entries;                        /* i830+ */
-       int gtt_total_size;
-       union {
-               void __iomem *i9xx_flush_page;
-               void *i8xx_flush_page;
-       };
-       struct page *i8xx_page;
-       struct resource ifp_resource;
-       int resource_valid;
-} intel_private;
-
-#ifdef USE_PCI_DMA_API
-static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
-{
-       *ret = pci_map_page(intel_private.pcidev, page, 0,
-                           PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-       if (pci_dma_mapping_error(intel_private.pcidev, *ret))
-               return -EINVAL;
-       return 0;
-}
-
-static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
-{
-       pci_unmap_page(intel_private.pcidev, dma,
-                      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-}
-
-static void intel_agp_free_sglist(struct agp_memory *mem)
-{
-       struct sg_table st;
-
-       st.sgl = mem->sg_list;
-       st.orig_nents = st.nents = mem->page_count;
-
-       sg_free_table(&st);
-
-       mem->sg_list = NULL;
-       mem->num_sg = 0;
-}
-
-static int intel_agp_map_memory(struct agp_memory *mem)
-{
-       struct sg_table st;
-       struct scatterlist *sg;
-       int i;
-
-       DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
-
-       if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
-               return -ENOMEM;
-
-       mem->sg_list = sg = st.sgl;
-
-       for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
-               sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
-
-       mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
-                                mem->page_count, PCI_DMA_BIDIRECTIONAL);
-       if (unlikely(!mem->num_sg)) {
-               intel_agp_free_sglist(mem);
-               return -ENOMEM;
-       }
-       return 0;
-}
-
-static void intel_agp_unmap_memory(struct agp_memory *mem)
-{
-       DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
-
-       pci_unmap_sg(intel_private.pcidev, mem->sg_list,
-                    mem->page_count, PCI_DMA_BIDIRECTIONAL);
-       intel_agp_free_sglist(mem);
-}
-
-static void intel_agp_insert_sg_entries(struct agp_memory *mem,
-                                       off_t pg_start, int mask_type)
-{
-       struct scatterlist *sg;
-       int i, j;
-
-       j = pg_start;
-
-       WARN_ON(!mem->num_sg);
-
-       if (mem->num_sg == mem->page_count) {
-               for_each_sg(mem->sg_list, sg, mem->page_count, i) {
-                       writel(agp_bridge->driver->mask_memory(agp_bridge,
-                                       sg_dma_address(sg), mask_type),
-                                       intel_private.gtt+j);
-                       j++;
-               }
-       } else {
-               /* sg may merge pages, but we have to separate
-                * per-page addr for GTT */
-               unsigned int len, m;
-
-               for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
-                       len = sg_dma_len(sg) / PAGE_SIZE;
-                       for (m = 0; m < len; m++) {
-                               writel(agp_bridge->driver->mask_memory(agp_bridge,
-                                                                      sg_dma_address(sg) + m * PAGE_SIZE,
-                                                                      mask_type),
-                                      intel_private.gtt+j);
-                               j++;
-                       }
-               }
-       }
-       readl(intel_private.gtt+j-1);
-}
-
-#else
-
-static void intel_agp_insert_sg_entries(struct agp_memory *mem,
-                                       off_t pg_start, int mask_type)
-{
-       int i, j;
-       u32 cache_bits = 0;
-
-       if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
-           agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
-       {
-               cache_bits = I830_PTE_SYSTEM_CACHED;
-       }
-
-       for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
-               writel(agp_bridge->driver->mask_memory(agp_bridge,
-                               page_to_phys(mem->pages[i]), mask_type),
-                      intel_private.gtt+j);
-       }
-
-       readl(intel_private.gtt+j-1);
-}
-
-#endif
-
-static int intel_i810_fetch_size(void)
-{
-       u32 smram_miscc;
-       struct aper_size_info_fixed *values;
-
-       pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
-       values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
-
-       if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
-               dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
-               return 0;
-       }
-       if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
-               agp_bridge->previous_size =
-                       agp_bridge->current_size = (void *) (values + 1);
-               agp_bridge->aperture_size_idx = 1;
-               return values[1].size;
-       } else {
-               agp_bridge->previous_size =
-                       agp_bridge->current_size = (void *) (values);
-               agp_bridge->aperture_size_idx = 0;
-               return values[0].size;
-       }
-
-       return 0;
-}
-
-static int intel_i810_configure(void)
-{
-       struct aper_size_info_fixed *current_size;
-       u32 temp;
-       int i;
-
-       current_size = A_SIZE_FIX(agp_bridge->current_size);
-
-       if (!intel_private.registers) {
-               pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
-               temp &= 0xfff80000;
-
-               intel_private.registers = ioremap(temp, 128 * 4096);
-               if (!intel_private.registers) {
-                       dev_err(&intel_private.pcidev->dev,
-                               "can't remap memory\n");
-                       return -ENOMEM;
-               }
-       }
-
-       if ((readl(intel_private.registers+I810_DRAM_CTL)
-               & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
-               /* This will need to be dynamically assigned */
-               dev_info(&intel_private.pcidev->dev,
-                        "detected 4MB dedicated video ram\n");
-               intel_private.num_dcache_entries = 1024;
-       }
-       pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
-       agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-       writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
-       readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
-
-       if (agp_bridge->driver->needs_scratch_page) {
-               for (i = 0; i < current_size->num_entries; i++) {
-                       writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
-               }
-               readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
-       }
-       global_cache_flush();
-       return 0;
-}
-
-static void intel_i810_cleanup(void)
-{
-       writel(0, intel_private.registers+I810_PGETBL_CTL);
-       readl(intel_private.registers); /* PCI Posting. */
-       iounmap(intel_private.registers);
-}
-
-static void intel_i810_tlbflush(struct agp_memory *mem)
-{
-       return;
-}
-
-static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
-{
-       return;
-}
-
-/* Exists to support ARGB cursors */
-static struct page *i8xx_alloc_pages(void)
-{
-       struct page *page;
-
-       page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
-       if (page == NULL)
-               return NULL;
-
-       if (set_pages_uc(page, 4) < 0) {
-               set_pages_wb(page, 4);
-               __free_pages(page, 2);
-               return NULL;
-       }
-       get_page(page);
-       atomic_inc(&agp_bridge->current_memory_agp);
-       return page;
-}
-
-static void i8xx_destroy_pages(struct page *page)
-{
-       if (page == NULL)
-               return;
-
-       set_pages_wb(page, 4);
-       put_page(page);
-       __free_pages(page, 2);
-       atomic_dec(&agp_bridge->current_memory_agp);
-}
-
-static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
-                                       int type)
-{
-       if (type < AGP_USER_TYPES)
-               return type;
-       else if (type == AGP_USER_CACHED_MEMORY)
-               return INTEL_AGP_CACHED_MEMORY;
-       else
-               return 0;
-}
-
-static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
-                               int type)
-{
-       int i, j, num_entries;
-       void *temp;
-       int ret = -EINVAL;
-       int mask_type;
-
-       if (mem->page_count == 0)
-               goto out;
-
-       temp = agp_bridge->current_size;
-       num_entries = A_SIZE_FIX(temp)->num_entries;
-
-       if ((pg_start + mem->page_count) > num_entries)
-               goto out_err;
-
-
-       for (j = pg_start; j < (pg_start + mem->page_count); j++) {
-               if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
-                       ret = -EBUSY;
-                       goto out_err;
-               }
-       }
-
-       if (type != mem->type)
-               goto out_err;
-
-       mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
-
-       switch (mask_type) {
-       case AGP_DCACHE_MEMORY:
-               if (!mem->is_flushed)
-                       global_cache_flush();
-               for (i = pg_start; i < (pg_start + mem->page_count); i++) {
-                       writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
-                              intel_private.registers+I810_PTE_BASE+(i*4));
-               }
-               readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
-               break;
-       case AGP_PHYS_MEMORY:
-       case AGP_NORMAL_MEMORY:
-               if (!mem->is_flushed)
-                       global_cache_flush();
-               for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
-                       writel(agp_bridge->driver->mask_memory(agp_bridge,
-                                       page_to_phys(mem->pages[i]), mask_type),
-                              intel_private.registers+I810_PTE_BASE+(j*4));
-               }
-               readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
-               break;
-       default:
-               goto out_err;
-       }
-
-       agp_bridge->driver->tlb_flush(mem);
-out:
-       ret = 0;
-out_err:
-       mem->is_flushed = true;
-       return ret;
-}
-
-static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
-                               int type)
-{
-       int i;
-
-       if (mem->page_count == 0)
-               return 0;
-
-       for (i = pg_start; i < (mem->page_count + pg_start); i++) {
-               writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
-       }
-       readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
-
-       agp_bridge->driver->tlb_flush(mem);
-       return 0;
-}
-
-/*
- * The i810/i830 requires a physical address to program its mouse
- * pointer into hardware.
- * However the Xserver still writes to it through the agp aperture.
- */
-static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
-{
-       struct agp_memory *new;
-       struct page *page;
-
-       switch (pg_count) {
-       case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
-               break;
-       case 4:
-               /* kludge to get 4 physical pages for ARGB cursor */
-               page = i8xx_alloc_pages();
-               break;
-       default:
-               return NULL;
-       }
-
-       if (page == NULL)
-               return NULL;
-
-       new = agp_create_memory(pg_count);
-       if (new == NULL)
-               return NULL;
-
-       new->pages[0] = page;
-       if (pg_count == 4) {
-               /* kludge to get 4 physical pages for ARGB cursor */
-               new->pages[1] = new->pages[0] + 1;
-               new->pages[2] = new->pages[1] + 1;
-               new->pages[3] = new->pages[2] + 1;
-       }
-       new->page_count = pg_count;
-       new->num_scratch_pages = pg_count;
-       new->type = AGP_PHYS_MEMORY;
-       new->physical = page_to_phys(new->pages[0]);
-       return new;
-}
-
-static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
-{
-       struct agp_memory *new;
-
-       if (type == AGP_DCACHE_MEMORY) {
-               if (pg_count != intel_private.num_dcache_entries)
-                       return NULL;
-
-               new = agp_create_memory(1);
-               if (new == NULL)
-                       return NULL;
-
-               new->type = AGP_DCACHE_MEMORY;
-               new->page_count = pg_count;
-               new->num_scratch_pages = 0;
-               agp_free_page_array(new);
-               return new;
-       }
-       if (type == AGP_PHYS_MEMORY)
-               return alloc_agpphysmem_i8xx(pg_count, type);
-       return NULL;
-}
-
-static void intel_i810_free_by_type(struct agp_memory *curr)
-{
-       agp_free_key(curr->key);
-       if (curr->type == AGP_PHYS_MEMORY) {
-               if (curr->page_count == 4)
-                       i8xx_destroy_pages(curr->pages[0]);
-               else {
-                       agp_bridge->driver->agp_destroy_page(curr->pages[0],
-                                                            AGP_PAGE_DESTROY_UNMAP);
-                       agp_bridge->driver->agp_destroy_page(curr->pages[0],
-                                                            AGP_PAGE_DESTROY_FREE);
-               }
-               agp_free_page_array(curr);
-       }
-       kfree(curr);
-}
-
-static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
-                                           dma_addr_t addr, int type)
-{
-       /* Type checking must be done elsewhere */
-       return addr | bridge->driver->masks[type].mask;
-}
-
-static struct aper_size_info_fixed intel_i830_sizes[] =
-{
-       {128, 32768, 5},
-       /* The 64M mode still requires a 128k gatt */
-       {64, 16384, 5},
-       {256, 65536, 6},
-       {512, 131072, 7},
-};
-
-static void intel_i830_init_gtt_entries(void)
-{
-       u16 gmch_ctrl;
-       int gtt_entries = 0;
-       u8 rdct;
-       int local = 0;
-       static const int ddt[4] = { 0, 16, 32, 64 };
-       int size; /* reserved space (in kb) at the top of stolen memory */
-
-       pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
-
-       if (IS_I965) {
-               u32 pgetbl_ctl;
-               pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
-
-               /* The 965 has a field telling us the size of the GTT,
-                * which may be larger than what is necessary to map the
-                * aperture.
-                */
-               switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
-               case I965_PGETBL_SIZE_128KB:
-                       size = 128;
-                       break;
-               case I965_PGETBL_SIZE_256KB:
-                       size = 256;
-                       break;
-               case I965_PGETBL_SIZE_512KB:
-                       size = 512;
-                       break;
-               case I965_PGETBL_SIZE_1MB:
-                       size = 1024;
-                       break;
-               case I965_PGETBL_SIZE_2MB:
-                       size = 2048;
-                       break;
-               case I965_PGETBL_SIZE_1_5MB:
-                       size = 1024 + 512;
-                       break;
-               default:
-                       dev_info(&intel_private.pcidev->dev,
-                                "unknown page table size, assuming 512KB\n");
-                       size = 512;
-               }
-               size += 4; /* add in BIOS popup space */
-       } else if (IS_G33 && !IS_PINEVIEW) {
-       /* G33's GTT size defined in gmch_ctrl */
-               switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
-               case G33_PGETBL_SIZE_1M:
-                       size = 1024;
-                       break;
-               case G33_PGETBL_SIZE_2M:
-                       size = 2048;
-                       break;
-               default:
-                       dev_info(&agp_bridge->dev->dev,
-                                "unknown page table size 0x%x, assuming 512KB\n",
-                               (gmch_ctrl & G33_PGETBL_SIZE_MASK));
-                       size = 512;
-               }
-               size += 4;
-       } else if (IS_G4X || IS_PINEVIEW) {
-               /* On 4 series hardware, GTT stolen is separate from graphics
-                * stolen, ignore it in stolen gtt entries counting.  However,
-                * 4KB of the stolen memory doesn't get mapped to the GTT.
-                */
-               size = 4;
-       } else {
-               /* On previous hardware, the GTT size was just what was
-                * required to map the aperture.
-                */
-               size = agp_bridge->driver->fetch_size() + 4;
-       }
-
-       if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
-           agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
-               switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
-               case I830_GMCH_GMS_STOLEN_512:
-                       gtt_entries = KB(512) - KB(size);
-                       break;
-               case I830_GMCH_GMS_STOLEN_1024:
-                       gtt_entries = MB(1) - KB(size);
-                       break;
-               case I830_GMCH_GMS_STOLEN_8192:
-                       gtt_entries = MB(8) - KB(size);
-                       break;
-               case I830_GMCH_GMS_LOCAL:
-                       rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
-                       gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
-                                       MB(ddt[I830_RDRAM_DDT(rdct)]);
-                       local = 1;
-                       break;
-               default:
-                       gtt_entries = 0;
-                       break;
-               }
-       } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
-                  agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
-               /*
-                * SandyBridge has new memory control reg at 0x50.w
-                */
-               u16 snb_gmch_ctl;
-               pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-               switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
-               case SNB_GMCH_GMS_STOLEN_32M:
-                       gtt_entries = MB(32) - KB(size);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_64M:
-                       gtt_entries = MB(64) - KB(size);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_96M:
-                       gtt_entries = MB(96) - KB(size);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_128M:
-                       gtt_entries = MB(128) - KB(size);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_160M:
-                       gtt_entries = MB(160) - KB(size);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_192M:
-                       gtt_entries = MB(192) - KB(size);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_224M:
-                       gtt_entries = MB(224) - KB(size);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_256M:
-                       gtt_entries = MB(256) - KB(size);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_288M:
-                       gtt_entries = MB(288) - KB(size);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_320M:
-                       gtt_entries = MB(320) - KB(size);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_352M:
-                       gtt_entries = MB(352) - KB(size);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_384M:
-                       gtt_entries = MB(384) - KB(size);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_416M:
-                       gtt_entries = MB(416) - KB(size);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_448M:
-                       gtt_entries = MB(448) - KB(size);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_480M:
-                       gtt_entries = MB(480) - KB(size);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_512M:
-                       gtt_entries = MB(512) - KB(size);
-                       break;
-               }
-       } else {
-               switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
-               case I855_GMCH_GMS_STOLEN_1M:
-                       gtt_entries = MB(1) - KB(size);
-                       break;
-               case I855_GMCH_GMS_STOLEN_4M:
-                       gtt_entries = MB(4) - KB(size);
-                       break;
-               case I855_GMCH_GMS_STOLEN_8M:
-                       gtt_entries = MB(8) - KB(size);
-                       break;
-               case I855_GMCH_GMS_STOLEN_16M:
-                       gtt_entries = MB(16) - KB(size);
-                       break;
-               case I855_GMCH_GMS_STOLEN_32M:
-                       gtt_entries = MB(32) - KB(size);
-                       break;
-               case I915_GMCH_GMS_STOLEN_48M:
-                       /* Check it's really I915G */
-                       if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
-                               gtt_entries = MB(48) - KB(size);
-                       else
-                               gtt_entries = 0;
-                       break;
-               case I915_GMCH_GMS_STOLEN_64M:
-                       /* Check it's really I915G */
-                       if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
-                               gtt_entries = MB(64) - KB(size);
-                       else
-                               gtt_entries = 0;
-                       break;
-               case G33_GMCH_GMS_STOLEN_128M:
-                       if (IS_G33 || IS_I965 || IS_G4X)
-                               gtt_entries = MB(128) - KB(size);
-                       else
-                               gtt_entries = 0;
-                       break;
-               case G33_GMCH_GMS_STOLEN_256M:
-                       if (IS_G33 || IS_I965 || IS_G4X)
-                               gtt_entries = MB(256) - KB(size);
-                       else
-                               gtt_entries = 0;
-                       break;
-               case INTEL_GMCH_GMS_STOLEN_96M:
-                       if (IS_I965 || IS_G4X)
-                               gtt_entries = MB(96) - KB(size);
-                       else
-                               gtt_entries = 0;
-                       break;
-               case INTEL_GMCH_GMS_STOLEN_160M:
-                       if (IS_I965 || IS_G4X)
-                               gtt_entries = MB(160) - KB(size);
-                       else
-                               gtt_entries = 0;
-                       break;
-               case INTEL_GMCH_GMS_STOLEN_224M:
-                       if (IS_I965 || IS_G4X)
-                               gtt_entries = MB(224) - KB(size);
-                       else
-                               gtt_entries = 0;
-                       break;
-               case INTEL_GMCH_GMS_STOLEN_352M:
-                       if (IS_I965 || IS_G4X)
-                               gtt_entries = MB(352) - KB(size);
-                       else
-                               gtt_entries = 0;
-                       break;
-               default:
-                       gtt_entries = 0;
-                       break;
-               }
-       }
-       if (gtt_entries > 0) {
-               dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
-                      gtt_entries / KB(1), local ? "local" : "stolen");
-               gtt_entries /= KB(4);
-       } else {
-               dev_info(&agp_bridge->dev->dev,
-                      "no pre-allocated video memory detected\n");
-               gtt_entries = 0;
-       }
-
-       intel_private.gtt_entries = gtt_entries;
-}
-
-static void intel_i830_fini_flush(void)
-{
-       kunmap(intel_private.i8xx_page);
-       intel_private.i8xx_flush_page = NULL;
-       unmap_page_from_agp(intel_private.i8xx_page);
-
-       __free_page(intel_private.i8xx_page);
-       intel_private.i8xx_page = NULL;
-}
-
-static void intel_i830_setup_flush(void)
-{
-       /* return if we've already set the flush mechanism up */
-       if (intel_private.i8xx_page)
-               return;
-
-       intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
-       if (!intel_private.i8xx_page)
-               return;
-
-       intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
-       if (!intel_private.i8xx_flush_page)
-               intel_i830_fini_flush();
-}
-
-/* The chipset_flush interface needs to get data that has already been
- * flushed out of the CPU all the way out to main memory, because the GPU
- * doesn't snoop those buffers.
- *
- * The 8xx series doesn't have the same lovely interface for flushing the
- * chipset write buffers that the later chips do. According to the 865
- * specs, it's 64 octwords, or 1KB.  So, to get those previous things in
- * that buffer out, we just fill 1KB and clflush it out, on the assumption
- * that it'll push whatever was in there out.  It appears to work.
- */
-static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
-{
-       unsigned int *pg = intel_private.i8xx_flush_page;
-
-       memset(pg, 0, 1024);
-
-       if (cpu_has_clflush)
-               clflush_cache_range(pg, 1024);
-       else if (wbinvd_on_all_cpus() != 0)
-               printk(KERN_ERR "Timed out waiting for cache flush.\n");
-}
-
-/* The intel i830 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
-{
-       int page_order;
-       struct aper_size_info_fixed *size;
-       int num_entries;
-       u32 temp;
-
-       size = agp_bridge->current_size;
-       page_order = size->page_order;
-       num_entries = size->num_entries;
-       agp_bridge->gatt_table_real = NULL;
-
-       pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
-       temp &= 0xfff80000;
-
-       intel_private.registers = ioremap(temp, 128 * 4096);
-       if (!intel_private.registers)
-               return -ENOMEM;
-
-       temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
-       global_cache_flush();   /* FIXME: ?? */
-
-       /* we have to call this as early as possible after the MMIO base address is known */
-       intel_i830_init_gtt_entries();
-
-       agp_bridge->gatt_table = NULL;
-
-       agp_bridge->gatt_bus_addr = temp;
-
-       return 0;
-}
-
-/* Return the gatt table to a sane state. Use the top of stolen
- * memory for the GTT.
- */
-static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
-{
-       return 0;
-}
-
-static int intel_i830_fetch_size(void)
-{
-       u16 gmch_ctrl;
-       struct aper_size_info_fixed *values;
-
-       values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
-
-       if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
-           agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
-               /* 855GM/852GM/865G has 128MB aperture size */
-               agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
-               agp_bridge->aperture_size_idx = 0;
-               return values[0].size;
-       }
-
-       pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
-
-       if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
-               agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
-               agp_bridge->aperture_size_idx = 0;
-               return values[0].size;
-       } else {
-               agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + 1);
-               agp_bridge->aperture_size_idx = 1;
-               return values[1].size;
-       }
-
-       return 0;
-}
-
-static int intel_i830_configure(void)
-{
-       struct aper_size_info_fixed *current_size;
-       u32 temp;
-       u16 gmch_ctrl;
-       int i;
-
-       current_size = A_SIZE_FIX(agp_bridge->current_size);
-
-       pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
-       agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
-       pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
-       gmch_ctrl |= I830_GMCH_ENABLED;
-       pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
-
-       writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
-       readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
-
-       if (agp_bridge->driver->needs_scratch_page) {
-               for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
-                       writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
-               }
-               readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
-       }
-
-       global_cache_flush();
-
-       intel_i830_setup_flush();
-       return 0;
-}
-
-static void intel_i830_cleanup(void)
-{
-       iounmap(intel_private.registers);
-}
-
-static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
-                                    int type)
-{
-       int i, j, num_entries;
-       void *temp;
-       int ret = -EINVAL;
-       int mask_type;
-
-       if (mem->page_count == 0)
-               goto out;
-
-       temp = agp_bridge->current_size;
-       num_entries = A_SIZE_FIX(temp)->num_entries;
-
-       if (pg_start < intel_private.gtt_entries) {
-               dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
-                          "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
-                          pg_start, intel_private.gtt_entries);
-
-               dev_info(&intel_private.pcidev->dev,
-                        "trying to insert into local/stolen memory\n");
-               goto out_err;
-       }
-
-       if ((pg_start + mem->page_count) > num_entries)
-               goto out_err;
-
-       /* The i830 can't check the GTT for entries since its read only,
-        * depend on the caller to make the correct offset decisions.
-        */
-
-       if (type != mem->type)
-               goto out_err;
-
-       mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
-
-       if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
-           mask_type != INTEL_AGP_CACHED_MEMORY)
-               goto out_err;
-
-       if (!mem->is_flushed)
-               global_cache_flush();
-
-       for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
-               writel(agp_bridge->driver->mask_memory(agp_bridge,
-                               page_to_phys(mem->pages[i]), mask_type),
-                      intel_private.registers+I810_PTE_BASE+(j*4));
-       }
-       readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
-       agp_bridge->driver->tlb_flush(mem);
-
-out:
-       ret = 0;
-out_err:
-       mem->is_flushed = true;
-       return ret;
-}
-
-static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
-                                    int type)
-{
-       int i;
-
-       if (mem->page_count == 0)
-               return 0;
-
-       if (pg_start < intel_private.gtt_entries) {
-               dev_info(&intel_private.pcidev->dev,
-                        "trying to disable local/stolen memory\n");
-               return -EINVAL;
-       }
-
-       for (i = pg_start; i < (mem->page_count + pg_start); i++) {
-               writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
-       }
-       readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
-
-       agp_bridge->driver->tlb_flush(mem);
-       return 0;
-}
-
-static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
-{
-       if (type == AGP_PHYS_MEMORY)
-               return alloc_agpphysmem_i8xx(pg_count, type);
-       /* always return NULL for other allocation types for now */
-       return NULL;
-}
-
-static int intel_alloc_chipset_flush_resource(void)
-{
-       int ret;
-       ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
-                                    PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
-                                    pcibios_align_resource, agp_bridge->dev);
-
-       return ret;
-}
-
-static void intel_i915_setup_chipset_flush(void)
-{
-       int ret;
-       u32 temp;
-
-       pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
-       if (!(temp & 0x1)) {
-               intel_alloc_chipset_flush_resource();
-               intel_private.resource_valid = 1;
-               pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
-       } else {
-               temp &= ~1;
-
-               intel_private.resource_valid = 1;
-               intel_private.ifp_resource.start = temp;
-               intel_private.ifp_resource.end = temp + PAGE_SIZE;
-               ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
-               /* some BIOSes reserve this area in a pnp some don't */
-               if (ret)
-                       intel_private.resource_valid = 0;
-       }
-}
-
-static void intel_i965_g33_setup_chipset_flush(void)
-{
-       u32 temp_hi, temp_lo;
-       int ret;
-
-       pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
-       pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
-
-       if (!(temp_lo & 0x1)) {
-
-               intel_alloc_chipset_flush_resource();
-
-               intel_private.resource_valid = 1;
-               pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
-                       upper_32_bits(intel_private.ifp_resource.start));
-               pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
-       } else {
-               u64 l64;
-
-               temp_lo &= ~0x1;
-               l64 = ((u64)temp_hi << 32) | temp_lo;
-
-               intel_private.resource_valid = 1;
-               intel_private.ifp_resource.start = l64;
-               intel_private.ifp_resource.end = l64 + PAGE_SIZE;
-               ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
-               /* some BIOSes reserve this area in a pnp some don't */
-               if (ret)
-                       intel_private.resource_valid = 0;
-       }
-}
-
-static void intel_i9xx_setup_flush(void)
-{
-       /* return if already configured */
-       if (intel_private.ifp_resource.start)
-               return;
-
-       if (IS_SNB)
-               return;
-
-       /* setup a resource for this object */
-       intel_private.ifp_resource.name = "Intel Flush Page";
-       intel_private.ifp_resource.flags = IORESOURCE_MEM;
-
-       /* Setup chipset flush for 915 */
-       if (IS_I965 || IS_G33 || IS_G4X) {
-               intel_i965_g33_setup_chipset_flush();
-       } else {
-               intel_i915_setup_chipset_flush();
-       }
-
-       if (intel_private.ifp_resource.start) {
-               intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
-               if (!intel_private.i9xx_flush_page)
-                       dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
-       }
-}
-
-static int intel_i915_configure(void)
-{
-       struct aper_size_info_fixed *current_size;
-       u32 temp;
-       u16 gmch_ctrl;
-       int i;
-
-       current_size = A_SIZE_FIX(agp_bridge->current_size);
-
-       pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
-
-       agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
-       pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
-       gmch_ctrl |= I830_GMCH_ENABLED;
-       pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
-
-       writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
-       readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
-
-       if (agp_bridge->driver->needs_scratch_page) {
-               for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
-                       writel(agp_bridge->scratch_page, intel_private.gtt+i);
-               }
-               readl(intel_private.gtt+i-1);   /* PCI Posting. */
-       }
-
-       global_cache_flush();
-
-       intel_i9xx_setup_flush();
-
-       return 0;
-}
-
-static void intel_i915_cleanup(void)
-{
-       if (intel_private.i9xx_flush_page)
-               iounmap(intel_private.i9xx_flush_page);
-       if (intel_private.resource_valid)
-               release_resource(&intel_private.ifp_resource);
-       intel_private.ifp_resource.start = 0;
-       intel_private.resource_valid = 0;
-       iounmap(intel_private.gtt);
-       iounmap(intel_private.registers);
-}
-
-static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
-{
-       if (intel_private.i9xx_flush_page)
-               writel(1, intel_private.i9xx_flush_page);
-}
-
-static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
-                                    int type)
-{
-       int num_entries;
-       void *temp;
-       int ret = -EINVAL;
-       int mask_type;
-
-       if (mem->page_count == 0)
-               goto out;
-
-       temp = agp_bridge->current_size;
-       num_entries = A_SIZE_FIX(temp)->num_entries;
-
-       if (pg_start < intel_private.gtt_entries) {
-               dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
-                          "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
-                          pg_start, intel_private.gtt_entries);
-
-               dev_info(&intel_private.pcidev->dev,
-                        "trying to insert into local/stolen memory\n");
-               goto out_err;
-       }
-
-       if ((pg_start + mem->page_count) > num_entries)
-               goto out_err;
-
-       /* The i915 can't check the GTT for entries since it's read only;
-        * depend on the caller to make the correct offset decisions.
-        */
-
-       if (type != mem->type)
-               goto out_err;
-
-       mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
-
-       if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
-           mask_type != INTEL_AGP_CACHED_MEMORY)
-               goto out_err;
-
-       if (!mem->is_flushed)
-               global_cache_flush();
-
-       intel_agp_insert_sg_entries(mem, pg_start, mask_type);
-       agp_bridge->driver->tlb_flush(mem);
-
- out:
-       ret = 0;
- out_err:
-       mem->is_flushed = true;
-       return ret;
-}
-
-static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
-                                    int type)
-{
-       int i;
-
-       if (mem->page_count == 0)
-               return 0;
-
-       if (pg_start < intel_private.gtt_entries) {
-               dev_info(&intel_private.pcidev->dev,
-                        "trying to disable local/stolen memory\n");
-               return -EINVAL;
-       }
-
-       for (i = pg_start; i < (mem->page_count + pg_start); i++)
-               writel(agp_bridge->scratch_page, intel_private.gtt+i);
-
-       readl(intel_private.gtt+i-1);
-
-       agp_bridge->driver->tlb_flush(mem);
-       return 0;
-}
-
-/* Return the aperture size by just checking the resource length.  The effect
- * described in the spec of the MSAC registers is just changing of the
- * resource size.
- */
-static int intel_i9xx_fetch_size(void)
-{
-       int num_sizes = ARRAY_SIZE(intel_i830_sizes);
-       int aper_size; /* size in megabytes */
-       int i;
-
-       aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
-
-       for (i = 0; i < num_sizes; i++) {
-               if (aper_size == intel_i830_sizes[i].size) {
-                       agp_bridge->current_size = intel_i830_sizes + i;
-                       agp_bridge->previous_size = agp_bridge->current_size;
-                       return aper_size;
-               }
-       }
-
-       return 0;
-}
-
-/* The intel i915 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
-{
-       int page_order;
-       struct aper_size_info_fixed *size;
-       int num_entries;
-       u32 temp, temp2;
-       int gtt_map_size = 256 * 1024;
-
-       size = agp_bridge->current_size;
-       page_order = size->page_order;
-       num_entries = size->num_entries;
-       agp_bridge->gatt_table_real = NULL;
-
-       pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
-       pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
-
-       if (IS_G33)
-           gtt_map_size = 1024 * 1024; /* 1M on G33 */
-       intel_private.gtt = ioremap(temp2, gtt_map_size);
-       if (!intel_private.gtt)
-               return -ENOMEM;
-
-       intel_private.gtt_total_size = gtt_map_size / 4;
-
-       temp &= 0xfff80000;
-
-       intel_private.registers = ioremap(temp, 128 * 4096);
-       if (!intel_private.registers) {
-               iounmap(intel_private.gtt);
-               return -ENOMEM;
-       }
-
-       temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
-       global_cache_flush();   /* FIXME: ? */
-
-       /* we have to call this as early as possible after the MMIO base address is known */
-       intel_i830_init_gtt_entries();
-
-       agp_bridge->gatt_table = NULL;
-
-       agp_bridge->gatt_bus_addr = temp;
-
-       return 0;
-}
-
-/*
- * The i965 supports 36-bit physical addresses, but to keep
- * the format of the GTT the same, the bits that don't fit
- * in a 32-bit word are shifted down to bits 4..7.
- *
- * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
- * is always zero on 32-bit architectures, so no need to make
- * this conditional.
- */
-static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
-                                           dma_addr_t addr, int type)
-{
-       /* Shift high bits down */
-       addr |= (addr >> 28) & 0xf0;
-
-       /* Type checking must be done elsewhere */
-       return addr | bridge->driver->masks[type].mask;
-}
-
-static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
-{
-       u16 snb_gmch_ctl;
-
-       switch (agp_bridge->dev->device) {
-       case PCI_DEVICE_ID_INTEL_GM45_HB:
-       case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
-       case PCI_DEVICE_ID_INTEL_Q45_HB:
-       case PCI_DEVICE_ID_INTEL_G45_HB:
-       case PCI_DEVICE_ID_INTEL_G41_HB:
-       case PCI_DEVICE_ID_INTEL_B43_HB:
-       case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
-       case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
-       case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
-       case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
-               *gtt_offset = *gtt_size = MB(2);
-               break;
-       case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
-       case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
-               *gtt_offset = MB(2);
-
-               pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-               switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
-               default:
-               case SNB_GTT_SIZE_0M:
-                       printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
-                       *gtt_size = MB(0);
-                       break;
-               case SNB_GTT_SIZE_1M:
-                       *gtt_size = MB(1);
-                       break;
-               case SNB_GTT_SIZE_2M:
-                       *gtt_size = MB(2);
-                       break;
-               }
-               break;
-       default:
-               *gtt_offset = *gtt_size = KB(512);
-       }
-}
-
-/* The intel i965 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
-{
-       int page_order;
-       struct aper_size_info_fixed *size;
-       int num_entries;
-       u32 temp;
-       int gtt_offset, gtt_size;
-
-       size = agp_bridge->current_size;
-       page_order = size->page_order;
-       num_entries = size->num_entries;
-       agp_bridge->gatt_table_real = NULL;
-
-       pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
-
-       temp &= 0xfff00000;
-
-       intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
-
-       intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
-
-       if (!intel_private.gtt)
-               return -ENOMEM;
-
-       intel_private.gtt_total_size = gtt_size / 4;
-
-       intel_private.registers = ioremap(temp, 128 * 4096);
-       if (!intel_private.registers) {
-               iounmap(intel_private.gtt);
-               return -ENOMEM;
-       }
-
-       temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
-       global_cache_flush();   /* FIXME: ? */
-
-       /* we have to call this as early as possible after the MMIO base address is known */
-       intel_i830_init_gtt_entries();
-
-       agp_bridge->gatt_table = NULL;
-
-       agp_bridge->gatt_bus_addr = temp;
-
-       return 0;
-}
+#include "intel-gtt.c"
 
+int intel_agp_enabled;
+EXPORT_SYMBOL(intel_agp_enabled);
 
 static int intel_fetch_size(void)
 {
@@ -1982,6 +464,7 @@ static const struct agp_bridge_driver intel_generic_driver = {
        .aperture_sizes         = intel_generic_sizes,
        .size_type              = U16_APER_SIZE,
        .num_aperture_sizes     = 7,
+       .needs_scratch_page     = true,
        .configure              = intel_configure,
        .fetch_size             = intel_fetch_size,
        .cleanup                = intel_cleanup,
@@ -2003,38 +486,12 @@ static const struct agp_bridge_driver intel_generic_driver = {
        .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
 };
 
-static const struct agp_bridge_driver intel_810_driver = {
-       .owner                  = THIS_MODULE,
-       .aperture_sizes         = intel_i810_sizes,
-       .size_type              = FIXED_APER_SIZE,
-       .num_aperture_sizes     = 2,
-       .needs_scratch_page     = true,
-       .configure              = intel_i810_configure,
-       .fetch_size             = intel_i810_fetch_size,
-       .cleanup                = intel_i810_cleanup,
-       .tlb_flush              = intel_i810_tlbflush,
-       .mask_memory            = intel_i810_mask_memory,
-       .masks                  = intel_i810_masks,
-       .agp_enable             = intel_i810_agp_enable,
-       .cache_flush            = global_cache_flush,
-       .create_gatt_table      = agp_generic_create_gatt_table,
-       .free_gatt_table        = agp_generic_free_gatt_table,
-       .insert_memory          = intel_i810_insert_entries,
-       .remove_memory          = intel_i810_remove_entries,
-       .alloc_by_type          = intel_i810_alloc_by_type,
-       .free_by_type           = intel_i810_free_by_type,
-       .agp_alloc_page         = agp_generic_alloc_page,
-       .agp_alloc_pages        = agp_generic_alloc_pages,
-       .agp_destroy_page       = agp_generic_destroy_page,
-       .agp_destroy_pages      = agp_generic_destroy_pages,
-       .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
-};
-
 static const struct agp_bridge_driver intel_815_driver = {
        .owner                  = THIS_MODULE,
        .aperture_sizes         = intel_815_sizes,
        .size_type              = U8_APER_SIZE,
        .num_aperture_sizes     = 2,
+       .needs_scratch_page     = true,
        .configure              = intel_815_configure,
        .fetch_size             = intel_815_fetch_size,
        .cleanup                = intel_8xx_cleanup,
@@ -2056,39 +513,12 @@ static const struct agp_bridge_driver intel_815_driver = {
        .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
 };
 
-static const struct agp_bridge_driver intel_830_driver = {
-       .owner                  = THIS_MODULE,
-       .aperture_sizes         = intel_i830_sizes,
-       .size_type              = FIXED_APER_SIZE,
-       .num_aperture_sizes     = 4,
-       .needs_scratch_page     = true,
-       .configure              = intel_i830_configure,
-       .fetch_size             = intel_i830_fetch_size,
-       .cleanup                = intel_i830_cleanup,
-       .tlb_flush              = intel_i810_tlbflush,
-       .mask_memory            = intel_i810_mask_memory,
-       .masks                  = intel_i810_masks,
-       .agp_enable             = intel_i810_agp_enable,
-       .cache_flush            = global_cache_flush,
-       .create_gatt_table      = intel_i830_create_gatt_table,
-       .free_gatt_table        = intel_i830_free_gatt_table,
-       .insert_memory          = intel_i830_insert_entries,
-       .remove_memory          = intel_i830_remove_entries,
-       .alloc_by_type          = intel_i830_alloc_by_type,
-       .free_by_type           = intel_i810_free_by_type,
-       .agp_alloc_page         = agp_generic_alloc_page,
-       .agp_alloc_pages        = agp_generic_alloc_pages,
-       .agp_destroy_page       = agp_generic_destroy_page,
-       .agp_destroy_pages      = agp_generic_destroy_pages,
-       .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
-       .chipset_flush          = intel_i830_chipset_flush,
-};
-
 static const struct agp_bridge_driver intel_820_driver = {
        .owner                  = THIS_MODULE,
        .aperture_sizes         = intel_8xx_sizes,
        .size_type              = U8_APER_SIZE,
        .num_aperture_sizes     = 7,
+       .needs_scratch_page     = true,
        .configure              = intel_820_configure,
        .fetch_size             = intel_8xx_fetch_size,
        .cleanup                = intel_820_cleanup,
@@ -2115,6 +545,7 @@ static const struct agp_bridge_driver intel_830mp_driver = {
        .aperture_sizes         = intel_830mp_sizes,
        .size_type              = U8_APER_SIZE,
        .num_aperture_sizes     = 4,
+       .needs_scratch_page     = true,
        .configure              = intel_830mp_configure,
        .fetch_size             = intel_8xx_fetch_size,
        .cleanup                = intel_8xx_cleanup,
@@ -2141,6 +572,7 @@ static const struct agp_bridge_driver intel_840_driver = {
        .aperture_sizes         = intel_8xx_sizes,
        .size_type              = U8_APER_SIZE,
        .num_aperture_sizes     = 7,
+       .needs_scratch_page     = true,
        .configure              = intel_840_configure,
        .fetch_size             = intel_8xx_fetch_size,
        .cleanup                = intel_8xx_cleanup,
@@ -2167,6 +599,7 @@ static const struct agp_bridge_driver intel_845_driver = {
        .aperture_sizes         = intel_8xx_sizes,
        .size_type              = U8_APER_SIZE,
        .num_aperture_sizes     = 7,
+       .needs_scratch_page     = true,
        .configure              = intel_845_configure,
        .fetch_size             = intel_8xx_fetch_size,
        .cleanup                = intel_8xx_cleanup,
@@ -2193,6 +626,7 @@ static const struct agp_bridge_driver intel_850_driver = {
        .aperture_sizes         = intel_8xx_sizes,
        .size_type              = U8_APER_SIZE,
        .num_aperture_sizes     = 7,
+       .needs_scratch_page     = true,
        .configure              = intel_850_configure,
        .fetch_size             = intel_8xx_fetch_size,
        .cleanup                = intel_8xx_cleanup,
@@ -2219,6 +653,7 @@ static const struct agp_bridge_driver intel_860_driver = {
        .aperture_sizes         = intel_8xx_sizes,
        .size_type              = U8_APER_SIZE,
        .num_aperture_sizes     = 7,
+       .needs_scratch_page     = true,
        .configure              = intel_860_configure,
        .fetch_size             = intel_8xx_fetch_size,
        .cleanup                = intel_8xx_cleanup,
@@ -2240,79 +675,12 @@ static const struct agp_bridge_driver intel_860_driver = {
        .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
 };
 
-static const struct agp_bridge_driver intel_915_driver = {
-       .owner                  = THIS_MODULE,
-       .aperture_sizes         = intel_i830_sizes,
-       .size_type              = FIXED_APER_SIZE,
-       .num_aperture_sizes     = 4,
-       .needs_scratch_page     = true,
-       .configure              = intel_i915_configure,
-       .fetch_size             = intel_i9xx_fetch_size,
-       .cleanup                = intel_i915_cleanup,
-       .tlb_flush              = intel_i810_tlbflush,
-       .mask_memory            = intel_i810_mask_memory,
-       .masks                  = intel_i810_masks,
-       .agp_enable             = intel_i810_agp_enable,
-       .cache_flush            = global_cache_flush,
-       .create_gatt_table      = intel_i915_create_gatt_table,
-       .free_gatt_table        = intel_i830_free_gatt_table,
-       .insert_memory          = intel_i915_insert_entries,
-       .remove_memory          = intel_i915_remove_entries,
-       .alloc_by_type          = intel_i830_alloc_by_type,
-       .free_by_type           = intel_i810_free_by_type,
-       .agp_alloc_page         = agp_generic_alloc_page,
-       .agp_alloc_pages        = agp_generic_alloc_pages,
-       .agp_destroy_page       = agp_generic_destroy_page,
-       .agp_destroy_pages      = agp_generic_destroy_pages,
-       .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
-       .chipset_flush          = intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
-       .agp_map_page           = intel_agp_map_page,
-       .agp_unmap_page         = intel_agp_unmap_page,
-       .agp_map_memory         = intel_agp_map_memory,
-       .agp_unmap_memory       = intel_agp_unmap_memory,
-#endif
-};
-
-static const struct agp_bridge_driver intel_i965_driver = {
-       .owner                  = THIS_MODULE,
-       .aperture_sizes         = intel_i830_sizes,
-       .size_type              = FIXED_APER_SIZE,
-       .num_aperture_sizes     = 4,
-       .needs_scratch_page     = true,
-       .configure              = intel_i915_configure,
-       .fetch_size             = intel_i9xx_fetch_size,
-       .cleanup                = intel_i915_cleanup,
-       .tlb_flush              = intel_i810_tlbflush,
-       .mask_memory            = intel_i965_mask_memory,
-       .masks                  = intel_i810_masks,
-       .agp_enable             = intel_i810_agp_enable,
-       .cache_flush            = global_cache_flush,
-       .create_gatt_table      = intel_i965_create_gatt_table,
-       .free_gatt_table        = intel_i830_free_gatt_table,
-       .insert_memory          = intel_i915_insert_entries,
-       .remove_memory          = intel_i915_remove_entries,
-       .alloc_by_type          = intel_i830_alloc_by_type,
-       .free_by_type           = intel_i810_free_by_type,
-       .agp_alloc_page         = agp_generic_alloc_page,
-       .agp_alloc_pages        = agp_generic_alloc_pages,
-       .agp_destroy_page       = agp_generic_destroy_page,
-       .agp_destroy_pages      = agp_generic_destroy_pages,
-       .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
-       .chipset_flush          = intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
-       .agp_map_page           = intel_agp_map_page,
-       .agp_unmap_page         = intel_agp_unmap_page,
-       .agp_map_memory         = intel_agp_map_memory,
-       .agp_unmap_memory       = intel_agp_unmap_memory,
-#endif
-};
-
 static const struct agp_bridge_driver intel_7505_driver = {
        .owner                  = THIS_MODULE,
        .aperture_sizes         = intel_8xx_sizes,
        .size_type              = U8_APER_SIZE,
        .num_aperture_sizes     = 7,
+       .needs_scratch_page     = true,
        .configure              = intel_7505_configure,
        .fetch_size             = intel_8xx_fetch_size,
        .cleanup                = intel_8xx_cleanup,
@@ -2334,40 +702,6 @@ static const struct agp_bridge_driver intel_7505_driver = {
        .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
 };
 
-static const struct agp_bridge_driver intel_g33_driver = {
-       .owner                  = THIS_MODULE,
-       .aperture_sizes         = intel_i830_sizes,
-       .size_type              = FIXED_APER_SIZE,
-       .num_aperture_sizes     = 4,
-       .needs_scratch_page     = true,
-       .configure              = intel_i915_configure,
-       .fetch_size             = intel_i9xx_fetch_size,
-       .cleanup                = intel_i915_cleanup,
-       .tlb_flush              = intel_i810_tlbflush,
-       .mask_memory            = intel_i965_mask_memory,
-       .masks                  = intel_i810_masks,
-       .agp_enable             = intel_i810_agp_enable,
-       .cache_flush            = global_cache_flush,
-       .create_gatt_table      = intel_i915_create_gatt_table,
-       .free_gatt_table        = intel_i830_free_gatt_table,
-       .insert_memory          = intel_i915_insert_entries,
-       .remove_memory          = intel_i915_remove_entries,
-       .alloc_by_type          = intel_i830_alloc_by_type,
-       .free_by_type           = intel_i810_free_by_type,
-       .agp_alloc_page         = agp_generic_alloc_page,
-       .agp_alloc_pages        = agp_generic_alloc_pages,
-       .agp_destroy_page       = agp_generic_destroy_page,
-       .agp_destroy_pages      = agp_generic_destroy_pages,
-       .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
-       .chipset_flush          = intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
-       .agp_map_page           = intel_agp_map_page,
-       .agp_unmap_page         = intel_agp_unmap_page,
-       .agp_map_memory         = intel_agp_map_memory,
-       .agp_unmap_memory       = intel_agp_unmap_memory,
-#endif
-};
-
 static int find_gmch(u16 device)
 {
        struct pci_dev *gmch_device;
@@ -2392,103 +726,137 @@ static int find_gmch(u16 device)
 static const struct intel_driver_description {
        unsigned int chip_id;
        unsigned int gmch_chip_id;
-       unsigned int multi_gmch_chip; /* if we have more gfx chip type on this HB. */
        char *name;
        const struct agp_bridge_driver *driver;
        const struct agp_bridge_driver *gmch_driver;
 } intel_agp_chipsets[] = {
-       { PCI_DEVICE_ID_INTEL_82443LX_0, 0, 0, "440LX", &intel_generic_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_82443BX_0, 0, 0, "440BX", &intel_generic_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_82443GX_0, 0, 0, "440GX", &intel_generic_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, 0, "i810",
+       { PCI_DEVICE_ID_INTEL_82443LX_0, 0, "440LX", &intel_generic_driver, NULL },
+       { PCI_DEVICE_ID_INTEL_82443BX_0, 0, "440BX", &intel_generic_driver, NULL },
+       { PCI_DEVICE_ID_INTEL_82443GX_0, 0, "440GX", &intel_generic_driver, NULL },
+       { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
                NULL, &intel_810_driver },
-       { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, 0, "i810",
+       { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
                NULL, &intel_810_driver },
-       { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, 0, "i810",
+       { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
                NULL, &intel_810_driver },
-       { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, 0, "i815",
+       { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
                &intel_815_driver, &intel_810_driver },
-       { PCI_DEVICE_ID_INTEL_82820_HB, 0, 0, "i820", &intel_820_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, 0, "i820", &intel_820_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, 0, "830M",
+       { PCI_DEVICE_ID_INTEL_82820_HB, 0, "i820", &intel_820_driver, NULL },
+       { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, "i820", &intel_820_driver, NULL },
+       { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
                &intel_830mp_driver, &intel_830_driver },
-       { PCI_DEVICE_ID_INTEL_82840_HB, 0, 0, "i840", &intel_840_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_82845_HB, 0, 0, "845G", &intel_845_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, 0, "830M",
+       { PCI_DEVICE_ID_INTEL_82840_HB, 0, "i840", &intel_840_driver, NULL },
+       { PCI_DEVICE_ID_INTEL_82845_HB, 0, "845G", &intel_845_driver, NULL },
+       { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
                &intel_845_driver, &intel_830_driver },
-       { PCI_DEVICE_ID_INTEL_82850_HB, 0, 0, "i850", &intel_850_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, 0, "854",
+       { PCI_DEVICE_ID_INTEL_82850_HB, 0, "i850", &intel_850_driver, NULL },
+       { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, "854",
                &intel_845_driver, &intel_830_driver },
-       { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, 0, "855PM", &intel_845_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, 0, "855GM",
+       { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, "855PM", &intel_845_driver, NULL },
+       { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
                &intel_845_driver, &intel_830_driver },
-       { PCI_DEVICE_ID_INTEL_82860_HB, 0, 0, "i860", &intel_860_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, 0, "865",
+       { PCI_DEVICE_ID_INTEL_82860_HB, 0, "i860", &intel_860_driver, NULL },
+       { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, "865",
                &intel_845_driver, &intel_830_driver },
-       { PCI_DEVICE_ID_INTEL_82875_HB, 0, 0, "i875", &intel_845_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, 0, "E7221 (i915)",
+       { PCI_DEVICE_ID_INTEL_82875_HB, 0, "i875", &intel_845_driver, NULL },
+       { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
                NULL, &intel_915_driver },
-       { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, 0, "915G",
+       { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
                NULL, &intel_915_driver },
-       { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, 0, "915GM",
+       { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
                NULL, &intel_915_driver },
-       { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, 0, "945G",
+       { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
                NULL, &intel_915_driver },
-       { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, 0, "945GM",
+       { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
                NULL, &intel_915_driver },
-       { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, 0, "945GME",
+       { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
                NULL, &intel_915_driver },
-       { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, 0, "946GZ",
+       { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
                NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, 0, "G35",
+       { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
                NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, 0, "965Q",
+       { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
                NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, 0, "965G",
+       { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
                NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, 0, "965GM",
+       { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
                NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, 0, "965GME/GLE",
+       { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
                NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_7505_0, 0, 0, "E7505", &intel_7505_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_7205_0, 0, 0, "E7205", &intel_7505_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, 0, "G33",
+       { PCI_DEVICE_ID_INTEL_7505_0, 0, "E7505", &intel_7505_driver, NULL },
+       { PCI_DEVICE_ID_INTEL_7205_0, 0, "E7205", &intel_7505_driver, NULL },
+       { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, "G33",
                NULL, &intel_g33_driver },
-       { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, 0, "Q35",
+       { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
                NULL, &intel_g33_driver },
-       { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
+       { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
                NULL, &intel_g33_driver },
-       { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "GMA3150",
+       { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
                NULL, &intel_g33_driver },
-       { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "GMA3150",
+       { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
                NULL, &intel_g33_driver },
-       { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
+       { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG,
            "GM45", NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, 0,
+       { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG,
            "Eaglelake", NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0,
+       { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG,
            "Q45/Q43", NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0,
+       { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG,
            "G45/G43", NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, 0,
+       { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
            "B43", NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
+       { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
            "G41", NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0,
+       { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
            "HD Graphics", NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+       { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
            "HD Graphics", NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+       { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
            "HD Graphics", NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+       { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
            "HD Graphics", NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, 0,
+       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG,
            "Sandybridge", NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, 0,
+       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG,
            "Sandybridge", NULL, &intel_i965_driver },
-       { 0, 0, 0, NULL, NULL, NULL }
+       { 0, 0, NULL, NULL, NULL }
 };
 
+static int __devinit intel_gmch_probe(struct pci_dev *pdev,
+                                     struct agp_bridge_data *bridge)
+{
+       int i;
+       bridge->driver = NULL;
+
+       for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
+               if ((intel_agp_chipsets[i].gmch_chip_id != 0) &&
+                       find_gmch(intel_agp_chipsets[i].gmch_chip_id)) {
+                       bridge->driver =
+                               intel_agp_chipsets[i].gmch_driver;
+                       break;
+               }
+       }
+
+       if (!bridge->driver)
+               return 0;
+
+       bridge->dev_private_data = &intel_private;
+       bridge->dev = pdev;
+
+       dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
+
+       if (bridge->driver->mask_memory == intel_i965_mask_memory) {
+               if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
+                       dev_err(&intel_private.pcidev->dev,
+                               "set gfx device dma mask 36bit failed!\n");
+               else
+                       pci_set_consistent_dma_mask(intel_private.pcidev,
+                                                   DMA_BIT_MASK(36));
+       }
+
+       return 1;
+}
+
 static int __devinit agp_intel_probe(struct pci_dev *pdev,
                                     const struct pci_device_id *ent)
 {
@@ -2503,22 +871,18 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
        if (!bridge)
                return -ENOMEM;
 
+       bridge->capndx = cap_ptr;
+
+       if (intel_gmch_probe(pdev, bridge))
+               goto found_gmch;
+
        for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
                /* In case that multiple models of gfx chip may
                   stand on same host bridge type, this can be
                   sure we detect the right IGD. */
                if (pdev->device == intel_agp_chipsets[i].chip_id) {
-                       if ((intel_agp_chipsets[i].gmch_chip_id != 0) &&
-                               find_gmch(intel_agp_chipsets[i].gmch_chip_id)) {
-                               bridge->driver =
-                                       intel_agp_chipsets[i].gmch_driver;
-                               break;
-                       } else if (intel_agp_chipsets[i].multi_gmch_chip) {
-                               continue;
-                       } else {
-                               bridge->driver = intel_agp_chipsets[i].driver;
-                               break;
-                       }
+                       bridge->driver = intel_agp_chipsets[i].driver;
+                       break;
                }
        }
 
@@ -2530,18 +894,16 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
                return -ENODEV;
        }
 
-       if (bridge->driver == NULL) {
-               /* bridge has no AGP and no IGD detected */
+       if (!bridge->driver) {
                if (cap_ptr)
                        dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n",
-                                intel_agp_chipsets[i].gmch_chip_id);
+                                intel_agp_chipsets[i].gmch_chip_id);
                agp_put_bridge(bridge);
                return -ENODEV;
        }
 
        bridge->dev = pdev;
-       bridge->capndx = cap_ptr;
-       bridge->dev_private_data = &intel_private;
+       bridge->dev_private_data = NULL;
 
        dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
 
@@ -2577,15 +939,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
                                &bridge->mode);
        }
 
-       if (bridge->driver->mask_memory == intel_i965_mask_memory) {
-               if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
-                       dev_err(&intel_private.pcidev->dev,
-                               "set gfx device dma mask 36bit failed!\n");
-               else
-                       pci_set_consistent_dma_mask(intel_private.pcidev,
-                                                   DMA_BIT_MASK(36));
-       }
-
+found_gmch:
        pci_set_drvdata(pdev, bridge);
        err = agp_add_bridge(bridge);
        if (!err)
@@ -2611,22 +965,7 @@ static int agp_intel_resume(struct pci_dev *pdev)
        struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
        int ret_val;
 
-       if (bridge->driver == &intel_generic_driver)
-               intel_configure();
-       else if (bridge->driver == &intel_850_driver)
-               intel_850_configure();
-       else if (bridge->driver == &intel_845_driver)
-               intel_845_configure();
-       else if (bridge->driver == &intel_830mp_driver)
-               intel_830mp_configure();
-       else if (bridge->driver == &intel_915_driver)
-               intel_i915_configure();
-       else if (bridge->driver == &intel_830_driver)
-               intel_i830_configure();
-       else if (bridge->driver == &intel_810_driver)
-               intel_i810_configure();
-       else if (bridge->driver == &intel_i965_driver)
-               intel_i915_configure();
+       bridge->driver->configure();
 
        ret_val = agp_rebind_memory();
        if (ret_val != 0)
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
new file mode 100644 (file)
index 0000000..2547465
--- /dev/null
@@ -0,0 +1,239 @@
+/*
+ * Common Intel AGPGART and GTT definitions.
+ */
+
+/* Intel registers */
+#define INTEL_APSIZE   0xb4
+#define INTEL_ATTBASE  0xb8
+#define INTEL_AGPCTRL  0xb0
+#define INTEL_NBXCFG   0x50
+#define INTEL_ERRSTS   0x91
+
+/* Intel i830 registers */
+#define I830_GMCH_CTRL                 0x52
+#define I830_GMCH_ENABLED              0x4
+#define I830_GMCH_MEM_MASK             0x1
+#define I830_GMCH_MEM_64M              0x1
+#define I830_GMCH_MEM_128M             0
+#define I830_GMCH_GMS_MASK             0x70
+#define I830_GMCH_GMS_DISABLED         0x00
+#define I830_GMCH_GMS_LOCAL            0x10
+#define I830_GMCH_GMS_STOLEN_512       0x20
+#define I830_GMCH_GMS_STOLEN_1024      0x30
+#define I830_GMCH_GMS_STOLEN_8192      0x40
+#define I830_RDRAM_CHANNEL_TYPE                0x03010
+#define I830_RDRAM_ND(x)               (((x) & 0x20) >> 5)
+#define I830_RDRAM_DDT(x)              (((x) & 0x18) >> 3)
+
+/* This one is for I830MP w. an external graphic card */
+#define INTEL_I830_ERRSTS      0x92
+
+/* Intel 855GM/852GM registers */
+#define I855_GMCH_GMS_MASK             0xF0
+#define I855_GMCH_GMS_STOLEN_0M                0x0
+#define I855_GMCH_GMS_STOLEN_1M                (0x1 << 4)
+#define I855_GMCH_GMS_STOLEN_4M                (0x2 << 4)
+#define I855_GMCH_GMS_STOLEN_8M                (0x3 << 4)
+#define I855_GMCH_GMS_STOLEN_16M       (0x4 << 4)
+#define I855_GMCH_GMS_STOLEN_32M       (0x5 << 4)
+#define I85X_CAPID                     0x44
+#define I85X_VARIANT_MASK              0x7
+#define I85X_VARIANT_SHIFT             5
+#define I855_GME                       0x0
+#define I855_GM                                0x4
+#define I852_GME                       0x2
+#define I852_GM                                0x5
+
+/* Intel i845 registers */
+#define INTEL_I845_AGPM                0x51
+#define INTEL_I845_ERRSTS      0xc8
+
+/* Intel i860 registers */
+#define INTEL_I860_MCHCFG      0x50
+#define INTEL_I860_ERRSTS      0xc8
+
+/* Intel i810 registers */
+#define I810_GMADDR            0x10
+#define I810_MMADDR            0x14
+#define I810_PTE_BASE          0x10000
+#define I810_PTE_MAIN_UNCACHED 0x00000000
+#define I810_PTE_LOCAL         0x00000002
+#define I810_PTE_VALID         0x00000001
+#define I830_PTE_SYSTEM_CACHED  0x00000006
+#define I810_SMRAM_MISCC       0x70
+#define I810_GFX_MEM_WIN_SIZE  0x00010000
+#define I810_GFX_MEM_WIN_32M   0x00010000
+#define I810_GMS               0x000000c0
+#define I810_GMS_DISABLE       0x00000000
+#define I810_PGETBL_CTL                0x2020
+#define I810_PGETBL_ENABLED    0x00000001
+#define I965_PGETBL_SIZE_MASK  0x0000000e
+#define I965_PGETBL_SIZE_512KB (0 << 1)
+#define I965_PGETBL_SIZE_256KB (1 << 1)
+#define I965_PGETBL_SIZE_128KB (2 << 1)
+#define I965_PGETBL_SIZE_1MB   (3 << 1)
+#define I965_PGETBL_SIZE_2MB   (4 << 1)
+#define I965_PGETBL_SIZE_1_5MB (5 << 1)
+#define G33_PGETBL_SIZE_MASK    (3 << 8)
+#define G33_PGETBL_SIZE_1M      (1 << 8)
+#define G33_PGETBL_SIZE_2M      (2 << 8)
+
+#define I810_DRAM_CTL          0x3000
+#define I810_DRAM_ROW_0                0x00000001
+#define I810_DRAM_ROW_0_SDRAM  0x00000001
+
+/* Intel 815 register */
+#define INTEL_815_APCONT       0x51
+#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
+
+/* Intel i820 registers */
+#define INTEL_I820_RDCR                0x51
+#define INTEL_I820_ERRSTS      0xc8
+
+/* Intel i840 registers */
+#define INTEL_I840_MCHCFG      0x50
+#define INTEL_I840_ERRSTS      0xc8
+
+/* Intel i850 registers */
+#define INTEL_I850_MCHCFG      0x50
+#define INTEL_I850_ERRSTS      0xc8
+
+/* intel 915G registers */
+#define I915_GMADDR    0x18
+#define I915_MMADDR    0x10
+#define I915_PTEADDR   0x1C
+#define I915_GMCH_GMS_STOLEN_48M       (0x6 << 4)
+#define I915_GMCH_GMS_STOLEN_64M       (0x7 << 4)
+#define G33_GMCH_GMS_STOLEN_128M       (0x8 << 4)
+#define G33_GMCH_GMS_STOLEN_256M       (0x9 << 4)
+#define INTEL_GMCH_GMS_STOLEN_96M      (0xa << 4)
+#define INTEL_GMCH_GMS_STOLEN_160M     (0xb << 4)
+#define INTEL_GMCH_GMS_STOLEN_224M     (0xc << 4)
+#define INTEL_GMCH_GMS_STOLEN_352M     (0xd << 4)
+
+#define I915_IFPADDR    0x60
+
+/* Intel 965G registers */
+#define I965_MSAC 0x62
+#define I965_IFPADDR    0x70
+
+/* Intel 7505 registers */
+#define INTEL_I7505_APSIZE     0x74
+#define INTEL_I7505_NCAPID     0x60
+#define INTEL_I7505_NISTAT     0x6c
+#define INTEL_I7505_ATTBASE    0x78
+#define INTEL_I7505_ERRSTS     0x42
+#define INTEL_I7505_AGPCTRL    0x70
+#define INTEL_I7505_MCHCFG     0x50
+
+#define SNB_GMCH_CTRL  0x50
+#define SNB_GMCH_GMS_STOLEN_MASK       0xF8
+#define SNB_GMCH_GMS_STOLEN_32M                (1 << 3)
+#define SNB_GMCH_GMS_STOLEN_64M                (2 << 3)
+#define SNB_GMCH_GMS_STOLEN_96M                (3 << 3)
+#define SNB_GMCH_GMS_STOLEN_128M       (4 << 3)
+#define SNB_GMCH_GMS_STOLEN_160M       (5 << 3)
+#define SNB_GMCH_GMS_STOLEN_192M       (6 << 3)
+#define SNB_GMCH_GMS_STOLEN_224M       (7 << 3)
+#define SNB_GMCH_GMS_STOLEN_256M       (8 << 3)
+#define SNB_GMCH_GMS_STOLEN_288M       (9 << 3)
+#define SNB_GMCH_GMS_STOLEN_320M       (0xa << 3)
+#define SNB_GMCH_GMS_STOLEN_352M       (0xb << 3)
+#define SNB_GMCH_GMS_STOLEN_384M       (0xc << 3)
+#define SNB_GMCH_GMS_STOLEN_416M       (0xd << 3)
+#define SNB_GMCH_GMS_STOLEN_448M       (0xe << 3)
+#define SNB_GMCH_GMS_STOLEN_480M       (0xf << 3)
+#define SNB_GMCH_GMS_STOLEN_512M       (0x10 << 3)
+#define SNB_GTT_SIZE_0M                        (0 << 8)
+#define SNB_GTT_SIZE_1M                        (1 << 8)
+#define SNB_GTT_SIZE_2M                        (2 << 8)
+#define SNB_GTT_SIZE_MASK              (3 << 8)
+
+/* pci devices ids */
+#define PCI_DEVICE_ID_INTEL_E7221_HB   0x2588
+#define PCI_DEVICE_ID_INTEL_E7221_IG   0x258a
+#define PCI_DEVICE_ID_INTEL_82946GZ_HB      0x2970
+#define PCI_DEVICE_ID_INTEL_82946GZ_IG      0x2972
+#define PCI_DEVICE_ID_INTEL_82G35_HB     0x2980
+#define PCI_DEVICE_ID_INTEL_82G35_IG     0x2982
+#define PCI_DEVICE_ID_INTEL_82965Q_HB       0x2990
+#define PCI_DEVICE_ID_INTEL_82965Q_IG       0x2992
+#define PCI_DEVICE_ID_INTEL_82965G_HB       0x29A0
+#define PCI_DEVICE_ID_INTEL_82965G_IG       0x29A2
+#define PCI_DEVICE_ID_INTEL_82965GM_HB      0x2A00
+#define PCI_DEVICE_ID_INTEL_82965GM_IG      0x2A02
+#define PCI_DEVICE_ID_INTEL_82965GME_HB     0x2A10
+#define PCI_DEVICE_ID_INTEL_82965GME_IG     0x2A12
+#define PCI_DEVICE_ID_INTEL_82945GME_HB     0x27AC
+#define PCI_DEVICE_ID_INTEL_82945GME_IG     0x27AE
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB        0xA010
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG        0xA011
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB         0xA000
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG         0xA001
+#define PCI_DEVICE_ID_INTEL_G33_HB          0x29C0
+#define PCI_DEVICE_ID_INTEL_G33_IG          0x29C2
+#define PCI_DEVICE_ID_INTEL_Q35_HB          0x29B0
+#define PCI_DEVICE_ID_INTEL_Q35_IG          0x29B2
+#define PCI_DEVICE_ID_INTEL_Q33_HB          0x29D0
+#define PCI_DEVICE_ID_INTEL_Q33_IG          0x29D2
+#define PCI_DEVICE_ID_INTEL_B43_HB          0x2E40
+#define PCI_DEVICE_ID_INTEL_B43_IG          0x2E42
+#define PCI_DEVICE_ID_INTEL_GM45_HB         0x2A40
+#define PCI_DEVICE_ID_INTEL_GM45_IG         0x2A42
+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB        0x2E00
+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG        0x2E02
+#define PCI_DEVICE_ID_INTEL_Q45_HB          0x2E10
+#define PCI_DEVICE_ID_INTEL_Q45_IG          0x2E12
+#define PCI_DEVICE_ID_INTEL_G45_HB          0x2E20
+#define PCI_DEVICE_ID_INTEL_G45_IG          0x2E22
+#define PCI_DEVICE_ID_INTEL_G41_HB          0x2E30
+#define PCI_DEVICE_ID_INTEL_G41_IG          0x2E32
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB          0x0040
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG          0x0042
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB          0x0044
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB         0x0062
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB    0x006a
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG          0x0046
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB  0x0100
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG  0x0102
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB  0x0104
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG  0x0106
+
+/* cover 915 and 945 variants */
+#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
+                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \
+                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
+                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
+                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
+                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
+
+#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
+                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
+                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
+                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
+                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
+                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
+
+#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
+
+#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
+
+#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
+
+#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
+               IS_SNB)
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
new file mode 100644 (file)
index 0000000..e8ea682
--- /dev/null
@@ -0,0 +1,1516 @@
+/*
+ * Intel GTT (Graphics Translation Table) routines
+ *
+ * Caveat: This driver implements the linux agp interface, but this is far from
+ * a agp driver! GTT support ended up here for purely historical reasons: The
+ * old userspace intel graphics drivers needed an interface to map memory into
+ * the GTT. And the drm provides a default interface for graphic devices sitting
+ * on an agp port. So it made sense to fake the GTT support as an agp port to
+ * avoid having to create a new api.
+ *
+ * With gem this does not make much sense anymore, just needlessly complicates
+ * the code. But as long as the old graphics stack is still support, it's stuck
+ * here.
+ *
+ * /fairy-tale-mode off
+ */
+
+/*
+ * If we have Intel graphics, we're not going to have anything other than
+ * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
+ * on the Intel IOMMU support (CONFIG_DMAR).
+ * Only newer chipsets need to bother with this, of course.
+ */
+#ifdef CONFIG_DMAR
+#define USE_PCI_DMA_API 1
+#endif
+
+static const struct aper_size_info_fixed intel_i810_sizes[] =
+{
+       {64, 16384, 4},
+       /* The 32M mode still requires a 64k gatt */
+       {32, 8192, 4}
+};
+
+#define AGP_DCACHE_MEMORY      1
+#define AGP_PHYS_MEMORY                2
+#define INTEL_AGP_CACHED_MEMORY 3
+
+static struct gatt_mask intel_i810_masks[] =
+{
+       {.mask = I810_PTE_VALID, .type = 0},
+       {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
+       {.mask = I810_PTE_VALID, .type = 0},
+       {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
+        .type = INTEL_AGP_CACHED_MEMORY}
+};
+
+static struct _intel_private {
+       struct pci_dev *pcidev; /* device one */
+       u8 __iomem *registers;
+       u32 __iomem *gtt;               /* I915G */
+       int num_dcache_entries;
+       /* gtt_entries is the number of gtt entries that are already mapped
+        * to stolen memory.  Stolen memory is larger than the memory mapped
+        * through gtt_entries, as it includes some reserved space for the BIOS
+        * popup and for the GTT.
+        */
+       int gtt_entries;                        /* i830+ */
+       int gtt_total_size;
+       union {
+               void __iomem *i9xx_flush_page;
+               void *i8xx_flush_page;
+       };
+       struct page *i8xx_page;
+       struct resource ifp_resource;
+       int resource_valid;
+} intel_private;
+
+#ifdef USE_PCI_DMA_API
+static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
+{
+       *ret = pci_map_page(intel_private.pcidev, page, 0,
+                           PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+       if (pci_dma_mapping_error(intel_private.pcidev, *ret))
+               return -EINVAL;
+       return 0;
+}
+
+static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
+{
+       pci_unmap_page(intel_private.pcidev, dma,
+                      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+}
+
+static void intel_agp_free_sglist(struct agp_memory *mem)
+{
+       struct sg_table st;
+
+       st.sgl = mem->sg_list;
+       st.orig_nents = st.nents = mem->page_count;
+
+       sg_free_table(&st);
+
+       mem->sg_list = NULL;
+       mem->num_sg = 0;
+}
+
+static int intel_agp_map_memory(struct agp_memory *mem)
+{
+       struct sg_table st;
+       struct scatterlist *sg;
+       int i;
+
+       DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
+
+       if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
+               return -ENOMEM;
+
+       mem->sg_list = sg = st.sgl;
+
+       for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
+               sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
+
+       mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
+                                mem->page_count, PCI_DMA_BIDIRECTIONAL);
+       if (unlikely(!mem->num_sg)) {
+               intel_agp_free_sglist(mem);
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+static void intel_agp_unmap_memory(struct agp_memory *mem)
+{
+       DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
+
+       pci_unmap_sg(intel_private.pcidev, mem->sg_list,
+                    mem->page_count, PCI_DMA_BIDIRECTIONAL);
+       intel_agp_free_sglist(mem);
+}
+
+static void intel_agp_insert_sg_entries(struct agp_memory *mem,
+                                       off_t pg_start, int mask_type)
+{
+       struct scatterlist *sg;
+       int i, j;
+
+       j = pg_start;
+
+       WARN_ON(!mem->num_sg);
+
+       if (mem->num_sg == mem->page_count) {
+               for_each_sg(mem->sg_list, sg, mem->page_count, i) {
+                       writel(agp_bridge->driver->mask_memory(agp_bridge,
+                                       sg_dma_address(sg), mask_type),
+                                       intel_private.gtt+j);
+                       j++;
+               }
+       } else {
+               /* sg may merge pages, but we have to separate
+                * per-page addr for GTT */
+               unsigned int len, m;
+
+               for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
+                       len = sg_dma_len(sg) / PAGE_SIZE;
+                       for (m = 0; m < len; m++) {
+                               writel(agp_bridge->driver->mask_memory(agp_bridge,
+                                                                      sg_dma_address(sg) + m * PAGE_SIZE,
+                                                                      mask_type),
+                                      intel_private.gtt+j);
+                               j++;
+                       }
+               }
+       }
+       readl(intel_private.gtt+j-1);
+}
+
+#else
+
+static void intel_agp_insert_sg_entries(struct agp_memory *mem,
+                                       off_t pg_start, int mask_type)
+{
+       int i, j;
+       u32 cache_bits = 0;
+
+       if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
+           agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
+       {
+               cache_bits = I830_PTE_SYSTEM_CACHED;
+       }
+
+       for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+               writel(agp_bridge->driver->mask_memory(agp_bridge,
+                               page_to_phys(mem->pages[i]), mask_type),
+                      intel_private.gtt+j);
+       }
+
+       readl(intel_private.gtt+j-1);
+}
+
+#endif
+
+static int intel_i810_fetch_size(void)
+{
+       u32 smram_miscc;
+       struct aper_size_info_fixed *values;
+
+       pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
+       values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
+
+       if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
+               dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
+               return 0;
+       }
+       if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
+               agp_bridge->current_size = (void *) (values + 1);
+               agp_bridge->aperture_size_idx = 1;
+               return values[1].size;
+       } else {
+               agp_bridge->current_size = (void *) (values);
+               agp_bridge->aperture_size_idx = 0;
+               return values[0].size;
+       }
+
+       return 0;
+}
+
+static int intel_i810_configure(void)
+{
+       struct aper_size_info_fixed *current_size;
+       u32 temp;
+       int i;
+
+       current_size = A_SIZE_FIX(agp_bridge->current_size);
+
+       if (!intel_private.registers) {
+               pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
+               temp &= 0xfff80000;
+
+               intel_private.registers = ioremap(temp, 128 * 4096);
+               if (!intel_private.registers) {
+                       dev_err(&intel_private.pcidev->dev,
+                               "can't remap memory\n");
+                       return -ENOMEM;
+               }
+       }
+
+       if ((readl(intel_private.registers+I810_DRAM_CTL)
+               & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
+               /* This will need to be dynamically assigned */
+               dev_info(&intel_private.pcidev->dev,
+                        "detected 4MB dedicated video ram\n");
+               intel_private.num_dcache_entries = 1024;
+       }
+       pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
+       agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+       writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
+       readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+
+       if (agp_bridge->driver->needs_scratch_page) {
+               for (i = 0; i < current_size->num_entries; i++) {
+                       writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+               }
+               readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
+       }
+       global_cache_flush();
+       return 0;
+}
+
+static void intel_i810_cleanup(void)
+{
+       writel(0, intel_private.registers+I810_PGETBL_CTL);
+       readl(intel_private.registers); /* PCI Posting. */
+       iounmap(intel_private.registers);
+}
+
+static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+       return;
+}
+
+/* Exists to support ARGB cursors */
+static struct page *i8xx_alloc_pages(void)
+{
+       struct page *page;
+
+       page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
+       if (page == NULL)
+               return NULL;
+
+       if (set_pages_uc(page, 4) < 0) {
+               set_pages_wb(page, 4);
+               __free_pages(page, 2);
+               return NULL;
+       }
+       get_page(page);
+       atomic_inc(&agp_bridge->current_memory_agp);
+       return page;
+}
+
+static void i8xx_destroy_pages(struct page *page)
+{
+       if (page == NULL)
+               return;
+
+       set_pages_wb(page, 4);
+       put_page(page);
+       __free_pages(page, 2);
+       atomic_dec(&agp_bridge->current_memory_agp);
+}
+
+static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
+                                       int type)
+{
+       if (type < AGP_USER_TYPES)
+               return type;
+       else if (type == AGP_USER_CACHED_MEMORY)
+               return INTEL_AGP_CACHED_MEMORY;
+       else
+               return 0;
+}
+
+static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
+                               int type)
+{
+       int i, j, num_entries;
+       void *temp;
+       int ret = -EINVAL;
+       int mask_type;
+
+       if (mem->page_count == 0)
+               goto out;
+
+       temp = agp_bridge->current_size;
+       num_entries = A_SIZE_FIX(temp)->num_entries;
+
+       if ((pg_start + mem->page_count) > num_entries)
+               goto out_err;
+
+
+       for (j = pg_start; j < (pg_start + mem->page_count); j++) {
+               if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
+                       ret = -EBUSY;
+                       goto out_err;
+               }
+       }
+
+       if (type != mem->type)
+               goto out_err;
+
+       mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+
+       switch (mask_type) {
+       case AGP_DCACHE_MEMORY:
+               if (!mem->is_flushed)
+                       global_cache_flush();
+               for (i = pg_start; i < (pg_start + mem->page_count); i++) {
+                       writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
+                              intel_private.registers+I810_PTE_BASE+(i*4));
+               }
+               readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
+               break;
+       case AGP_PHYS_MEMORY:
+       case AGP_NORMAL_MEMORY:
+               if (!mem->is_flushed)
+                       global_cache_flush();
+               for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+                       writel(agp_bridge->driver->mask_memory(agp_bridge,
+                                       page_to_phys(mem->pages[i]), mask_type),
+                              intel_private.registers+I810_PTE_BASE+(j*4));
+               }
+               readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
+               break;
+       default:
+               goto out_err;
+       }
+
+out:
+       ret = 0;
+out_err:
+       mem->is_flushed = true;
+       return ret;
+}
+
+static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
+                               int type)
+{
+       int i;
+
+       if (mem->page_count == 0)
+               return 0;
+
+       for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+               writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+       }
+       readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
+
+       return 0;
+}
+
+/*
+ * The i810/i830 requires a physical address to program its mouse
+ * pointer into hardware.
+ * However the Xserver still writes to it through the agp aperture.
+ */
+static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
+{
+       struct agp_memory *new;
+       struct page *page;
+
+       switch (pg_count) {
+       case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
+               break;
+       case 4:
+               /* kludge to get 4 physical pages for ARGB cursor */
+               page = i8xx_alloc_pages();
+               break;
+       default:
+               return NULL;
+       }
+
+       if (page == NULL)
+               return NULL;
+
+       new = agp_create_memory(pg_count);
+       if (new == NULL)
+               return NULL;
+
+       new->pages[0] = page;
+       if (pg_count == 4) {
+               /* kludge to get 4 physical pages for ARGB cursor */
+               new->pages[1] = new->pages[0] + 1;
+               new->pages[2] = new->pages[1] + 1;
+               new->pages[3] = new->pages[2] + 1;
+       }
+       new->page_count = pg_count;
+       new->num_scratch_pages = pg_count;
+       new->type = AGP_PHYS_MEMORY;
+       new->physical = page_to_phys(new->pages[0]);
+       return new;
+}
+
+static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
+{
+       struct agp_memory *new;
+
+       if (type == AGP_DCACHE_MEMORY) {
+               if (pg_count != intel_private.num_dcache_entries)
+                       return NULL;
+
+               new = agp_create_memory(1);
+               if (new == NULL)
+                       return NULL;
+
+               new->type = AGP_DCACHE_MEMORY;
+               new->page_count = pg_count;
+               new->num_scratch_pages = 0;
+               agp_free_page_array(new);
+               return new;
+       }
+       if (type == AGP_PHYS_MEMORY)
+               return alloc_agpphysmem_i8xx(pg_count, type);
+       return NULL;
+}
+
+static void intel_i810_free_by_type(struct agp_memory *curr)
+{
+       agp_free_key(curr->key);
+       if (curr->type == AGP_PHYS_MEMORY) {
+               if (curr->page_count == 4)
+                       i8xx_destroy_pages(curr->pages[0]);
+               else {
+                       agp_bridge->driver->agp_destroy_page(curr->pages[0],
+                                                            AGP_PAGE_DESTROY_UNMAP);
+                       agp_bridge->driver->agp_destroy_page(curr->pages[0],
+                                                            AGP_PAGE_DESTROY_FREE);
+               }
+               agp_free_page_array(curr);
+       }
+       kfree(curr);
+}
+
+static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
+                                           dma_addr_t addr, int type)
+{
+       /* Type checking must be done elsewhere */
+       return addr | bridge->driver->masks[type].mask;
+}
+
+static struct aper_size_info_fixed intel_i830_sizes[] =
+{
+       {128, 32768, 5},
+       /* The 64M mode still requires a 128k gatt */
+       {64, 16384, 5},
+       {256, 65536, 6},
+       {512, 131072, 7},
+};
+
+static void intel_i830_init_gtt_entries(void)
+{
+       u16 gmch_ctrl;
+       int gtt_entries = 0;
+       u8 rdct;
+       int local = 0;
+       static const int ddt[4] = { 0, 16, 32, 64 };
+       int size; /* reserved space (in kb) at the top of stolen memory */
+
+       pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+
+       if (IS_I965) {
+               u32 pgetbl_ctl;
+               pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+
+               /* The 965 has a field telling us the size of the GTT,
+                * which may be larger than what is necessary to map the
+                * aperture.
+                */
+               switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
+               case I965_PGETBL_SIZE_128KB:
+                       size = 128;
+                       break;
+               case I965_PGETBL_SIZE_256KB:
+                       size = 256;
+                       break;
+               case I965_PGETBL_SIZE_512KB:
+                       size = 512;
+                       break;
+               case I965_PGETBL_SIZE_1MB:
+                       size = 1024;
+                       break;
+               case I965_PGETBL_SIZE_2MB:
+                       size = 2048;
+                       break;
+               case I965_PGETBL_SIZE_1_5MB:
+                       size = 1024 + 512;
+                       break;
+               default:
+                       dev_info(&intel_private.pcidev->dev,
+                                "unknown page table size, assuming 512KB\n");
+                       size = 512;
+               }
+               size += 4; /* add in BIOS popup space */
+       } else if (IS_G33 && !IS_PINEVIEW) {
+       /* G33's GTT size defined in gmch_ctrl */
+               switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
+               case G33_PGETBL_SIZE_1M:
+                       size = 1024;
+                       break;
+               case G33_PGETBL_SIZE_2M:
+                       size = 2048;
+                       break;
+               default:
+                       dev_info(&agp_bridge->dev->dev,
+                                "unknown page table size 0x%x, assuming 512KB\n",
+                               (gmch_ctrl & G33_PGETBL_SIZE_MASK));
+                       size = 512;
+               }
+               size += 4;
+       } else if (IS_G4X || IS_PINEVIEW) {
+               /* On 4 series hardware, GTT stolen is separate from graphics
+                * stolen, ignore it in stolen gtt entries counting.  However,
+                * 4KB of the stolen memory doesn't get mapped to the GTT.
+                */
+               size = 4;
+       } else {
+               /* On previous hardware, the GTT size was just what was
+                * required to map the aperture.
+                */
+               size = agp_bridge->driver->fetch_size() + 4;
+       }
+
+       if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
+           agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
+               switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
+               case I830_GMCH_GMS_STOLEN_512:
+                       gtt_entries = KB(512) - KB(size);
+                       break;
+               case I830_GMCH_GMS_STOLEN_1024:
+                       gtt_entries = MB(1) - KB(size);
+                       break;
+               case I830_GMCH_GMS_STOLEN_8192:
+                       gtt_entries = MB(8) - KB(size);
+                       break;
+               case I830_GMCH_GMS_LOCAL:
+                       rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
+                       gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
+                                       MB(ddt[I830_RDRAM_DDT(rdct)]);
+                       local = 1;
+                       break;
+               default:
+                       gtt_entries = 0;
+                       break;
+               }
+       } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
+                  agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
+               /*
+                * SandyBridge has new memory control reg at 0x50.w
+                */
+               u16 snb_gmch_ctl;
+               pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+               switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
+               case SNB_GMCH_GMS_STOLEN_32M:
+                       gtt_entries = MB(32) - KB(size);
+                       break;
+               case SNB_GMCH_GMS_STOLEN_64M:
+                       gtt_entries = MB(64) - KB(size);
+                       break;
+               case SNB_GMCH_GMS_STOLEN_96M:
+                       gtt_entries = MB(96) - KB(size);
+                       break;
+               case SNB_GMCH_GMS_STOLEN_128M:
+                       gtt_entries = MB(128) - KB(size);
+                       break;
+               case SNB_GMCH_GMS_STOLEN_160M:
+                       gtt_entries = MB(160) - KB(size);
+                       break;
+               case SNB_GMCH_GMS_STOLEN_192M:
+                       gtt_entries = MB(192) - KB(size);
+                       break;
+               case SNB_GMCH_GMS_STOLEN_224M:
+                       gtt_entries = MB(224) - KB(size);
+                       break;
+               case SNB_GMCH_GMS_STOLEN_256M:
+                       gtt_entries = MB(256) - KB(size);
+                       break;
+               case SNB_GMCH_GMS_STOLEN_288M:
+                       gtt_entries = MB(288) - KB(size);
+                       break;
+               case SNB_GMCH_GMS_STOLEN_320M:
+                       gtt_entries = MB(320) - KB(size);
+                       break;
+               case SNB_GMCH_GMS_STOLEN_352M:
+                       gtt_entries = MB(352) - KB(size);
+                       break;
+               case SNB_GMCH_GMS_STOLEN_384M:
+                       gtt_entries = MB(384) - KB(size);
+                       break;
+               case SNB_GMCH_GMS_STOLEN_416M:
+                       gtt_entries = MB(416) - KB(size);
+                       break;
+               case SNB_GMCH_GMS_STOLEN_448M:
+                       gtt_entries = MB(448) - KB(size);
+                       break;
+               case SNB_GMCH_GMS_STOLEN_480M:
+                       gtt_entries = MB(480) - KB(size);
+                       break;
+               case SNB_GMCH_GMS_STOLEN_512M:
+                       gtt_entries = MB(512) - KB(size);
+                       break;
+               }
+       } else {
+               switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
+               case I855_GMCH_GMS_STOLEN_1M:
+                       gtt_entries = MB(1) - KB(size);
+                       break;
+               case I855_GMCH_GMS_STOLEN_4M:
+                       gtt_entries = MB(4) - KB(size);
+                       break;
+               case I855_GMCH_GMS_STOLEN_8M:
+                       gtt_entries = MB(8) - KB(size);
+                       break;
+               case I855_GMCH_GMS_STOLEN_16M:
+                       gtt_entries = MB(16) - KB(size);
+                       break;
+               case I855_GMCH_GMS_STOLEN_32M:
+                       gtt_entries = MB(32) - KB(size);
+                       break;
+               case I915_GMCH_GMS_STOLEN_48M:
+                       /* Check it's really I915G */
+                       if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
+                               gtt_entries = MB(48) - KB(size);
+                       else
+                               gtt_entries = 0;
+                       break;
+               case I915_GMCH_GMS_STOLEN_64M:
+                       /* Check it's really I915G */
+                       if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
+                               gtt_entries = MB(64) - KB(size);
+                       else
+                               gtt_entries = 0;
+                       break;
+               case G33_GMCH_GMS_STOLEN_128M:
+                       if (IS_G33 || IS_I965 || IS_G4X)
+                               gtt_entries = MB(128) - KB(size);
+                       else
+                               gtt_entries = 0;
+                       break;
+               case G33_GMCH_GMS_STOLEN_256M:
+                       if (IS_G33 || IS_I965 || IS_G4X)
+                               gtt_entries = MB(256) - KB(size);
+                       else
+                               gtt_entries = 0;
+                       break;
+               case INTEL_GMCH_GMS_STOLEN_96M:
+                       if (IS_I965 || IS_G4X)
+                               gtt_entries = MB(96) - KB(size);
+                       else
+                               gtt_entries = 0;
+                       break;
+               case INTEL_GMCH_GMS_STOLEN_160M:
+                       if (IS_I965 || IS_G4X)
+                               gtt_entries = MB(160) - KB(size);
+                       else
+                               gtt_entries = 0;
+                       break;
+               case INTEL_GMCH_GMS_STOLEN_224M:
+                       if (IS_I965 || IS_G4X)
+                               gtt_entries = MB(224) - KB(size);
+                       else
+                               gtt_entries = 0;
+                       break;
+               case INTEL_GMCH_GMS_STOLEN_352M:
+                       if (IS_I965 || IS_G4X)
+                               gtt_entries = MB(352) - KB(size);
+                       else
+                               gtt_entries = 0;
+                       break;
+               default:
+                       gtt_entries = 0;
+                       break;
+               }
+       }
+       if (gtt_entries > 0) {
+               dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
+                      gtt_entries / KB(1), local ? "local" : "stolen");
+               gtt_entries /= KB(4);
+       } else {
+               dev_info(&agp_bridge->dev->dev,
+                      "no pre-allocated video memory detected\n");
+               gtt_entries = 0;
+       }
+
+       intel_private.gtt_entries = gtt_entries;
+}
+
+static void intel_i830_fini_flush(void)
+{
+       kunmap(intel_private.i8xx_page);
+       intel_private.i8xx_flush_page = NULL;
+       unmap_page_from_agp(intel_private.i8xx_page);
+
+       __free_page(intel_private.i8xx_page);
+       intel_private.i8xx_page = NULL;
+}
+
+static void intel_i830_setup_flush(void)
+{
+       /* return if we've already set the flush mechanism up */
+       if (intel_private.i8xx_page)
+               return;
+
+       intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
+       if (!intel_private.i8xx_page)
+               return;
+
+       intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
+       if (!intel_private.i8xx_flush_page)
+               intel_i830_fini_flush();
+}
+
+/* The chipset_flush interface needs to get data that has already been
+ * flushed out of the CPU all the way out to main memory, because the GPU
+ * doesn't snoop those buffers.
+ *
+ * The 8xx series doesn't have the same lovely interface for flushing the
+ * chipset write buffers that the later chips do. According to the 865
+ * specs, it's 64 octwords, or 1KB.  So, to get those previous things in
+ * that buffer out, we just fill 1KB and clflush it out, on the assumption
+ * that it'll push whatever was in there out.  It appears to work.
+ */
+static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
+{
+       unsigned int *pg = intel_private.i8xx_flush_page;
+
+       memset(pg, 0, 1024);
+
+       if (cpu_has_clflush)
+               clflush_cache_range(pg, 1024);
+       else if (wbinvd_on_all_cpus() != 0)
+               printk(KERN_ERR "Timed out waiting for cache flush.\n");
+}
+
+/* The intel i830 automatically initializes the agp aperture during POST.
+ * Use the memory already set aside for in the GTT.
+ */
+static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
+{
+       int page_order;
+       struct aper_size_info_fixed *size;
+       int num_entries;
+       u32 temp;
+
+       size = agp_bridge->current_size;
+       page_order = size->page_order;
+       num_entries = size->num_entries;
+       agp_bridge->gatt_table_real = NULL;
+
+       pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
+       temp &= 0xfff80000;
+
+       intel_private.registers = ioremap(temp, 128 * 4096);
+       if (!intel_private.registers)
+               return -ENOMEM;
+
+       temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+       global_cache_flush();   /* FIXME: ?? */
+
+       /* we have to call this as early as possible after the MMIO base address is known */
+       intel_i830_init_gtt_entries();
+
+       agp_bridge->gatt_table = NULL;
+
+       agp_bridge->gatt_bus_addr = temp;
+
+       return 0;
+}
+
+/* Return the gatt table to a sane state. Use the top of stolen
+ * memory for the GTT.
+ */
+static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
+{
+       return 0;
+}
+
+static int intel_i830_fetch_size(void)
+{
+       u16 gmch_ctrl;
+       struct aper_size_info_fixed *values;
+
+       values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
+
+       if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
+           agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
+               /* 855GM/852GM/865G has 128MB aperture size */
+               agp_bridge->current_size = (void *) values;
+               agp_bridge->aperture_size_idx = 0;
+               return values[0].size;
+       }
+
+       pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+
+       if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
+               agp_bridge->current_size = (void *) values;
+               agp_bridge->aperture_size_idx = 0;
+               return values[0].size;
+       } else {
+               agp_bridge->current_size = (void *) (values + 1);
+               agp_bridge->aperture_size_idx = 1;
+               return values[1].size;
+       }
+
+       return 0;
+}
+
+static int intel_i830_configure(void)
+{
+       struct aper_size_info_fixed *current_size;
+       u32 temp;
+       u16 gmch_ctrl;
+       int i;
+
+       current_size = A_SIZE_FIX(agp_bridge->current_size);
+
+       pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
+       agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+       pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+       gmch_ctrl |= I830_GMCH_ENABLED;
+       pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
+
+       writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
+       readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+
+       if (agp_bridge->driver->needs_scratch_page) {
+               for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
+                       writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+               }
+               readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
+       }
+
+       global_cache_flush();
+
+       intel_i830_setup_flush();
+       return 0;
+}
+
+static void intel_i830_cleanup(void)
+{
+       iounmap(intel_private.registers);
+}
+
+static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
+                                    int type)
+{
+       int i, j, num_entries;
+       void *temp;
+       int ret = -EINVAL;
+       int mask_type;
+
+       if (mem->page_count == 0)
+               goto out;
+
+       temp = agp_bridge->current_size;
+       num_entries = A_SIZE_FIX(temp)->num_entries;
+
+       if (pg_start < intel_private.gtt_entries) {
+               dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
+                          "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
+                          pg_start, intel_private.gtt_entries);
+
+               dev_info(&intel_private.pcidev->dev,
+                        "trying to insert into local/stolen memory\n");
+               goto out_err;
+       }
+
+       if ((pg_start + mem->page_count) > num_entries)
+               goto out_err;
+
+       /* The i830 can't check the GTT for entries since its read only,
+        * depend on the caller to make the correct offset decisions.
+        */
+
+       if (type != mem->type)
+               goto out_err;
+
+       mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+
+       if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
+           mask_type != INTEL_AGP_CACHED_MEMORY)
+               goto out_err;
+
+       if (!mem->is_flushed)
+               global_cache_flush();
+
+       for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+               writel(agp_bridge->driver->mask_memory(agp_bridge,
+                               page_to_phys(mem->pages[i]), mask_type),
+                      intel_private.registers+I810_PTE_BASE+(j*4));
+       }
+       readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
+
+out:
+       ret = 0;
+out_err:
+       mem->is_flushed = true;
+       return ret;
+}
+
+static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
+                                    int type)
+{
+       int i;
+
+       if (mem->page_count == 0)
+               return 0;
+
+       if (pg_start < intel_private.gtt_entries) {
+               dev_info(&intel_private.pcidev->dev,
+                        "trying to disable local/stolen memory\n");
+               return -EINVAL;
+       }
+
+       for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+               writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+       }
+       readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
+
+       return 0;
+}
+
+static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
+{
+       if (type == AGP_PHYS_MEMORY)
+               return alloc_agpphysmem_i8xx(pg_count, type);
+       /* always return NULL for other allocation types for now */
+       return NULL;
+}
+
+static int intel_alloc_chipset_flush_resource(void)
+{
+       int ret;
+       ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
+                                    PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
+                                    pcibios_align_resource, agp_bridge->dev);
+
+       return ret;
+}
+
+static void intel_i915_setup_chipset_flush(void)
+{
+       int ret;
+       u32 temp;
+
+       pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
+       if (!(temp & 0x1)) {
+               intel_alloc_chipset_flush_resource();
+               intel_private.resource_valid = 1;
+               pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+       } else {
+               temp &= ~1;
+
+               intel_private.resource_valid = 1;
+               intel_private.ifp_resource.start = temp;
+               intel_private.ifp_resource.end = temp + PAGE_SIZE;
+               ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
+               /* some BIOSes reserve this area in a pnp some don't */
+               if (ret)
+                       intel_private.resource_valid = 0;
+       }
+}
+
+static void intel_i965_g33_setup_chipset_flush(void)
+{
+       u32 temp_hi, temp_lo;
+       int ret;
+
+       pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
+       pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
+
+       if (!(temp_lo & 0x1)) {
+
+               intel_alloc_chipset_flush_resource();
+
+               intel_private.resource_valid = 1;
+               pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
+                       upper_32_bits(intel_private.ifp_resource.start));
+               pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+       } else {
+               u64 l64;
+
+               temp_lo &= ~0x1;
+               l64 = ((u64)temp_hi << 32) | temp_lo;
+
+               intel_private.resource_valid = 1;
+               intel_private.ifp_resource.start = l64;
+               intel_private.ifp_resource.end = l64 + PAGE_SIZE;
+               ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
+               /* some BIOSes reserve this area in a pnp some don't */
+               if (ret)
+                       intel_private.resource_valid = 0;
+       }
+}
+
+static void intel_i9xx_setup_flush(void)
+{
+       /* return if already configured */
+       if (intel_private.ifp_resource.start)
+               return;
+
+       if (IS_SNB)
+               return;
+
+       /* setup a resource for this object */
+       intel_private.ifp_resource.name = "Intel Flush Page";
+       intel_private.ifp_resource.flags = IORESOURCE_MEM;
+
+       /* Setup chipset flush for 915 */
+       if (IS_I965 || IS_G33 || IS_G4X) {
+               intel_i965_g33_setup_chipset_flush();
+       } else {
+               intel_i915_setup_chipset_flush();
+       }
+
+       if (intel_private.ifp_resource.start) {
+               intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
+               if (!intel_private.i9xx_flush_page)
+                       dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
+       }
+}
+
+static int intel_i915_configure(void)
+{
+       struct aper_size_info_fixed *current_size;
+       u32 temp;
+       u16 gmch_ctrl;
+       int i;
+
+       current_size = A_SIZE_FIX(agp_bridge->current_size);
+
+       pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
+
+       agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+       pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+       gmch_ctrl |= I830_GMCH_ENABLED;
+       pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
+
+       writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
+       readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+
+       if (agp_bridge->driver->needs_scratch_page) {
+               for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
+                       writel(agp_bridge->scratch_page, intel_private.gtt+i);
+               }
+               readl(intel_private.gtt+i-1);   /* PCI Posting. */
+       }
+
+       global_cache_flush();
+
+       intel_i9xx_setup_flush();
+
+       return 0;
+}
+
+static void intel_i915_cleanup(void)
+{
+       if (intel_private.i9xx_flush_page)
+               iounmap(intel_private.i9xx_flush_page);
+       if (intel_private.resource_valid)
+               release_resource(&intel_private.ifp_resource);
+       intel_private.ifp_resource.start = 0;
+       intel_private.resource_valid = 0;
+       iounmap(intel_private.gtt);
+       iounmap(intel_private.registers);
+}
+
+static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
+{
+       if (intel_private.i9xx_flush_page)
+               writel(1, intel_private.i9xx_flush_page);
+}
+
+static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
+                                    int type)
+{
+       int num_entries;
+       void *temp;
+       int ret = -EINVAL;
+       int mask_type;
+
+       if (mem->page_count == 0)
+               goto out;
+
+       temp = agp_bridge->current_size;
+       num_entries = A_SIZE_FIX(temp)->num_entries;
+
+       if (pg_start < intel_private.gtt_entries) {
+               dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
+                          "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
+                          pg_start, intel_private.gtt_entries);
+
+               dev_info(&intel_private.pcidev->dev,
+                        "trying to insert into local/stolen memory\n");
+               goto out_err;
+       }
+
+       if ((pg_start + mem->page_count) > num_entries)
+               goto out_err;
+
+       /* The i915 can't check the GTT for entries since it's read only;
+        * depend on the caller to make the correct offset decisions.
+        */
+
+       if (type != mem->type)
+               goto out_err;
+
+       mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+
+       if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
+           mask_type != INTEL_AGP_CACHED_MEMORY)
+               goto out_err;
+
+       if (!mem->is_flushed)
+               global_cache_flush();
+
+       intel_agp_insert_sg_entries(mem, pg_start, mask_type);
+
+ out:
+       ret = 0;
+ out_err:
+       mem->is_flushed = true;
+       return ret;
+}
+
+static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
+                                    int type)
+{
+       int i;
+
+       if (mem->page_count == 0)
+               return 0;
+
+       if (pg_start < intel_private.gtt_entries) {
+               dev_info(&intel_private.pcidev->dev,
+                        "trying to disable local/stolen memory\n");
+               return -EINVAL;
+       }
+
+       for (i = pg_start; i < (mem->page_count + pg_start); i++)
+               writel(agp_bridge->scratch_page, intel_private.gtt+i);
+
+       readl(intel_private.gtt+i-1);
+
+       return 0;
+}
+
+/* Return the aperture size by just checking the resource length.  The effect
+ * described in the spec of the MSAC registers is just changing of the
+ * resource size.
+ */
+static int intel_i9xx_fetch_size(void)
+{
+       int num_sizes = ARRAY_SIZE(intel_i830_sizes);
+       int aper_size; /* size in megabytes */
+       int i;
+
+       aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
+
+       for (i = 0; i < num_sizes; i++) {
+               if (aper_size == intel_i830_sizes[i].size) {
+                       agp_bridge->current_size = intel_i830_sizes + i;
+                       return aper_size;
+               }
+       }
+
+       return 0;
+}
+
+/* The intel i915 automatically initializes the agp aperture during POST.
+ * Use the memory already set aside for in the GTT.
+ */
+static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
+{
+       int page_order;
+       struct aper_size_info_fixed *size;
+       int num_entries;
+       u32 temp, temp2;
+       int gtt_map_size = 256 * 1024;
+
+       size = agp_bridge->current_size;
+       page_order = size->page_order;
+       num_entries = size->num_entries;
+       agp_bridge->gatt_table_real = NULL;
+
+       pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
+       pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
+
+       if (IS_G33)
+           gtt_map_size = 1024 * 1024; /* 1M on G33 */
+       intel_private.gtt = ioremap(temp2, gtt_map_size);
+       if (!intel_private.gtt)
+               return -ENOMEM;
+
+       intel_private.gtt_total_size = gtt_map_size / 4;
+
+       temp &= 0xfff80000;
+
+       intel_private.registers = ioremap(temp, 128 * 4096);
+       if (!intel_private.registers) {
+               iounmap(intel_private.gtt);
+               return -ENOMEM;
+       }
+
+       temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+       global_cache_flush();   /* FIXME: ? */
+
+       /* we have to call this as early as possible after the MMIO base address is known */
+       intel_i830_init_gtt_entries();
+
+       agp_bridge->gatt_table = NULL;
+
+       agp_bridge->gatt_bus_addr = temp;
+
+       return 0;
+}
+
+/*
+ * The i965 supports 36-bit physical addresses, but to keep
+ * the format of the GTT the same, the bits that don't fit
+ * in a 32-bit word are shifted down to bits 4..7.
+ *
+ * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
+ * is always zero on 32-bit architectures, so no need to make
+ * this conditional.
+ */
+static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
+                                           dma_addr_t addr, int type)
+{
+       /* Shift high bits down */
+       addr |= (addr >> 28) & 0xf0;
+
+       /* Type checking must be done elsewhere */
+       return addr | bridge->driver->masks[type].mask;
+}
+
+static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
+{
+       u16 snb_gmch_ctl;
+
+       switch (agp_bridge->dev->device) {
+       case PCI_DEVICE_ID_INTEL_GM45_HB:
+       case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
+       case PCI_DEVICE_ID_INTEL_Q45_HB:
+       case PCI_DEVICE_ID_INTEL_G45_HB:
+       case PCI_DEVICE_ID_INTEL_G41_HB:
+       case PCI_DEVICE_ID_INTEL_B43_HB:
+       case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
+       case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
+       case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
+       case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
+               *gtt_offset = *gtt_size = MB(2);
+               break;
+       case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
+       case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
+               *gtt_offset = MB(2);
+
+               pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+               switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
+               default:
+               case SNB_GTT_SIZE_0M:
+                       printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
+                       *gtt_size = MB(0);
+                       break;
+               case SNB_GTT_SIZE_1M:
+                       *gtt_size = MB(1);
+                       break;
+               case SNB_GTT_SIZE_2M:
+                       *gtt_size = MB(2);
+                       break;
+               }
+               break;
+       default:
+               *gtt_offset = *gtt_size = KB(512);
+       }
+}
+
+/* The intel i965 automatically initializes the agp aperture during POST.
+ * Use the memory already set aside for in the GTT.
+ */
+static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
+{
+       int page_order;
+       struct aper_size_info_fixed *size;
+       int num_entries;
+       u32 temp;
+       int gtt_offset, gtt_size;
+
+       size = agp_bridge->current_size;
+       page_order = size->page_order;
+       num_entries = size->num_entries;
+       agp_bridge->gatt_table_real = NULL;
+
+       pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
+
+       temp &= 0xfff00000;
+
+       intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
+
+       intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
+
+       if (!intel_private.gtt)
+               return -ENOMEM;
+
+       intel_private.gtt_total_size = gtt_size / 4;
+
+       intel_private.registers = ioremap(temp, 128 * 4096);
+       if (!intel_private.registers) {
+               iounmap(intel_private.gtt);
+               return -ENOMEM;
+       }
+
+       temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+       global_cache_flush();   /* FIXME: ? */
+
+       /* we have to call this as early as possible after the MMIO base address is known */
+       intel_i830_init_gtt_entries();
+
+       agp_bridge->gatt_table = NULL;
+
+       agp_bridge->gatt_bus_addr = temp;
+
+       return 0;
+}
+
+static const struct agp_bridge_driver intel_810_driver = {
+       .owner                  = THIS_MODULE,
+       .aperture_sizes         = intel_i810_sizes,
+       .size_type              = FIXED_APER_SIZE,
+       .num_aperture_sizes     = 2,
+       .needs_scratch_page     = true,
+       .configure              = intel_i810_configure,
+       .fetch_size             = intel_i810_fetch_size,
+       .cleanup                = intel_i810_cleanup,
+       .mask_memory            = intel_i810_mask_memory,
+       .masks                  = intel_i810_masks,
+       .agp_enable             = intel_i810_agp_enable,
+       .cache_flush            = global_cache_flush,
+       .create_gatt_table      = agp_generic_create_gatt_table,
+       .free_gatt_table        = agp_generic_free_gatt_table,
+       .insert_memory          = intel_i810_insert_entries,
+       .remove_memory          = intel_i810_remove_entries,
+       .alloc_by_type          = intel_i810_alloc_by_type,
+       .free_by_type           = intel_i810_free_by_type,
+       .agp_alloc_page         = agp_generic_alloc_page,
+       .agp_alloc_pages        = agp_generic_alloc_pages,
+       .agp_destroy_page       = agp_generic_destroy_page,
+       .agp_destroy_pages      = agp_generic_destroy_pages,
+       .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_830_driver = {
+       .owner                  = THIS_MODULE,
+       .aperture_sizes         = intel_i830_sizes,
+       .size_type              = FIXED_APER_SIZE,
+       .num_aperture_sizes     = 4,
+       .needs_scratch_page     = true,
+       .configure              = intel_i830_configure,
+       .fetch_size             = intel_i830_fetch_size,
+       .cleanup                = intel_i830_cleanup,
+       .mask_memory            = intel_i810_mask_memory,
+       .masks                  = intel_i810_masks,
+       .agp_enable             = intel_i810_agp_enable,
+       .cache_flush            = global_cache_flush,
+       .create_gatt_table      = intel_i830_create_gatt_table,
+       .free_gatt_table        = intel_i830_free_gatt_table,
+       .insert_memory          = intel_i830_insert_entries,
+       .remove_memory          = intel_i830_remove_entries,
+       .alloc_by_type          = intel_i830_alloc_by_type,
+       .free_by_type           = intel_i810_free_by_type,
+       .agp_alloc_page         = agp_generic_alloc_page,
+       .agp_alloc_pages        = agp_generic_alloc_pages,
+       .agp_destroy_page       = agp_generic_destroy_page,
+       .agp_destroy_pages      = agp_generic_destroy_pages,
+       .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
+       .chipset_flush          = intel_i830_chipset_flush,
+};
+
+static const struct agp_bridge_driver intel_915_driver = {
+       .owner                  = THIS_MODULE,
+       .aperture_sizes         = intel_i830_sizes,
+       .size_type              = FIXED_APER_SIZE,
+       .num_aperture_sizes     = 4,
+       .needs_scratch_page     = true,
+       .configure              = intel_i915_configure,
+       .fetch_size             = intel_i9xx_fetch_size,
+       .cleanup                = intel_i915_cleanup,
+       .mask_memory            = intel_i810_mask_memory,
+       .masks                  = intel_i810_masks,
+       .agp_enable             = intel_i810_agp_enable,
+       .cache_flush            = global_cache_flush,
+       .create_gatt_table      = intel_i915_create_gatt_table,
+       .free_gatt_table        = intel_i830_free_gatt_table,
+       .insert_memory          = intel_i915_insert_entries,
+       .remove_memory          = intel_i915_remove_entries,
+       .alloc_by_type          = intel_i830_alloc_by_type,
+       .free_by_type           = intel_i810_free_by_type,
+       .agp_alloc_page         = agp_generic_alloc_page,
+       .agp_alloc_pages        = agp_generic_alloc_pages,
+       .agp_destroy_page       = agp_generic_destroy_page,
+       .agp_destroy_pages      = agp_generic_destroy_pages,
+       .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
+       .chipset_flush          = intel_i915_chipset_flush,
+#ifdef USE_PCI_DMA_API
+       .agp_map_page           = intel_agp_map_page,
+       .agp_unmap_page         = intel_agp_unmap_page,
+       .agp_map_memory         = intel_agp_map_memory,
+       .agp_unmap_memory       = intel_agp_unmap_memory,
+#endif
+};
+
+static const struct agp_bridge_driver intel_i965_driver = {
+       .owner                  = THIS_MODULE,
+       .aperture_sizes         = intel_i830_sizes,
+       .size_type              = FIXED_APER_SIZE,
+       .num_aperture_sizes     = 4,
+       .needs_scratch_page     = true,
+       .configure              = intel_i915_configure,
+       .fetch_size             = intel_i9xx_fetch_size,
+       .cleanup                = intel_i915_cleanup,
+       .mask_memory            = intel_i965_mask_memory,
+       .masks                  = intel_i810_masks,
+       .agp_enable             = intel_i810_agp_enable,
+       .cache_flush            = global_cache_flush,
+       .create_gatt_table      = intel_i965_create_gatt_table,
+       .free_gatt_table        = intel_i830_free_gatt_table,
+       .insert_memory          = intel_i915_insert_entries,
+       .remove_memory          = intel_i915_remove_entries,
+       .alloc_by_type          = intel_i830_alloc_by_type,
+       .free_by_type           = intel_i810_free_by_type,
+       .agp_alloc_page         = agp_generic_alloc_page,
+       .agp_alloc_pages        = agp_generic_alloc_pages,
+       .agp_destroy_page       = agp_generic_destroy_page,
+       .agp_destroy_pages      = agp_generic_destroy_pages,
+       .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
+       .chipset_flush          = intel_i915_chipset_flush,
+#ifdef USE_PCI_DMA_API
+       .agp_map_page           = intel_agp_map_page,
+       .agp_unmap_page         = intel_agp_unmap_page,
+       .agp_map_memory         = intel_agp_map_memory,
+       .agp_unmap_memory       = intel_agp_unmap_memory,
+#endif
+};
+
+static const struct agp_bridge_driver intel_g33_driver = {
+       .owner                  = THIS_MODULE,
+       .aperture_sizes         = intel_i830_sizes,
+       .size_type              = FIXED_APER_SIZE,
+       .num_aperture_sizes     = 4,
+       .needs_scratch_page     = true,
+       .configure              = intel_i915_configure,
+       .fetch_size             = intel_i9xx_fetch_size,
+       .cleanup                = intel_i915_cleanup,
+       .mask_memory            = intel_i965_mask_memory,
+       .masks                  = intel_i810_masks,
+       .agp_enable             = intel_i810_agp_enable,
+       .cache_flush            = global_cache_flush,
+       .create_gatt_table      = intel_i915_create_gatt_table,
+       .free_gatt_table        = intel_i830_free_gatt_table,
+       .insert_memory          = intel_i915_insert_entries,
+       .remove_memory          = intel_i915_remove_entries,
+       .alloc_by_type          = intel_i830_alloc_by_type,
+       .free_by_type           = intel_i810_free_by_type,
+       .agp_alloc_page         = agp_generic_alloc_page,
+       .agp_alloc_pages        = agp_generic_alloc_pages,
+       .agp_destroy_page       = agp_generic_destroy_page,
+       .agp_destroy_pages      = agp_generic_destroy_pages,
+       .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
+       .chipset_flush          = intel_i915_chipset_flush,
+#ifdef USE_PCI_DMA_API
+       .agp_map_page           = intel_agp_map_page,
+       .agp_unmap_page         = intel_agp_unmap_page,
+       .agp_map_memory         = intel_agp_map_memory,
+       .agp_unmap_memory       = intel_agp_unmap_memory,
+#endif
+};
index 10f24e3..b9734a9 100644 (file)
@@ -310,6 +310,7 @@ static const struct agp_bridge_driver nvidia_driver = {
        .aperture_sizes         = nvidia_generic_sizes,
        .size_type              = U8_APER_SIZE,
        .num_aperture_sizes     = 5,
+       .needs_scratch_page     = true,
        .configure              = nvidia_configure,
        .fetch_size             = nvidia_fetch_size,
        .cleanup                = nvidia_cleanup,
index 6c3837a..29aacd8 100644 (file)
@@ -125,6 +125,7 @@ static struct agp_bridge_driver sis_driver = {
        .aperture_sizes         = sis_generic_sizes,
        .size_type              = U8_APER_SIZE,
        .num_aperture_sizes     = 7,
+       .needs_scratch_page     = true,
        .configure              = sis_configure,
        .fetch_size             = sis_fetch_size,
        .cleanup                = sis_cleanup,
@@ -415,14 +416,6 @@ static struct pci_device_id agp_sis_pci_table[] = {
                .subvendor      = PCI_ANY_ID,
                .subdevice      = PCI_ANY_ID,
        },
-       {
-               .class          = (PCI_CLASS_BRIDGE_HOST << 8),
-               .class_mask     = ~0,
-               .vendor         = PCI_VENDOR_ID_SI,
-               .device         = PCI_DEVICE_ID_SI_760,
-               .subvendor      = PCI_ANY_ID,
-               .subdevice      = PCI_ANY_ID,
-       },
        { }
 };
 
index 6f48931..95db713 100644 (file)
@@ -28,6 +28,7 @@
  */
 static int uninorth_rev;
 static int is_u3;
+static u32 scratch_value;
 
 #define DEFAULT_APERTURE_SIZE 256
 #define DEFAULT_APERTURE_STRING "256"
@@ -172,7 +173,7 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int ty
 
        gp = (u32 *) &agp_bridge->gatt_table[pg_start];
        for (i = 0; i < mem->page_count; ++i) {
-               if (gp[i]) {
+               if (gp[i] != scratch_value) {
                        dev_info(&agp_bridge->dev->dev,
                                 "uninorth_insert_memory: entry 0x%x occupied (%x)\n",
                                 i, gp[i]);
@@ -214,8 +215,9 @@ int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
                return 0;
 
        gp = (u32 *) &agp_bridge->gatt_table[pg_start];
-       for (i = 0; i < mem->page_count; ++i)
-               gp[i] = 0;
+       for (i = 0; i < mem->page_count; ++i) {
+               gp[i] = scratch_value;
+       }
        mb();
        uninorth_tlbflush(mem);
 
@@ -421,8 +423,13 @@ static int uninorth_create_gatt_table(struct agp_bridge_data *bridge)
 
        bridge->gatt_bus_addr = virt_to_phys(table);
 
+       if (is_u3)
+               scratch_value = (page_to_phys(agp_bridge->scratch_page_page) >> PAGE_SHIFT) | 0x80000000UL;
+       else
+               scratch_value = cpu_to_le32((page_to_phys(agp_bridge->scratch_page_page) & 0xFFFFF000UL) |
+                               0x1UL);
        for (i = 0; i < num_entries; i++)
-               bridge->gatt_table[i] = 0;
+               bridge->gatt_table[i] = scratch_value;
 
        return 0;
 
@@ -519,6 +526,7 @@ const struct agp_bridge_driver uninorth_agp_driver = {
        .agp_destroy_pages      = agp_generic_destroy_pages,
        .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
        .cant_use_aperture      = true,
+       .needs_scratch_page     = true,
 };
 
 const struct agp_bridge_driver u3_agp_driver = {
index d3bd243..df67e80 100644 (file)
@@ -175,6 +175,7 @@ static const struct agp_bridge_driver via_agp3_driver = {
        .aperture_sizes         = agp3_generic_sizes,
        .size_type              = U8_APER_SIZE,
        .num_aperture_sizes     = 10,
+       .needs_scratch_page     = true,
        .configure              = via_configure_agp3,
        .fetch_size             = via_fetch_size_agp3,
        .cleanup                = via_cleanup_agp3,
@@ -201,6 +202,7 @@ static const struct agp_bridge_driver via_driver = {
        .aperture_sizes         = via_generic_sizes,
        .size_type              = U8_APER_SIZE,
        .num_aperture_sizes     = 9,
+       .needs_scratch_page     = true,
        .configure              = via_configure,
        .fetch_size             = via_fetch_size,
        .cleanup                = via_cleanup,
index 2fd3d39..8d85587 100644 (file)
 #define INPUT_POOL_WORDS 128
 #define OUTPUT_POOL_WORDS 32
 #define SEC_XFER_SIZE 512
+#define EXTRACT_SIZE 10
 
 /*
  * The minimum number of bits of entropy before we wake up a read on
@@ -414,7 +415,7 @@ struct entropy_store {
        unsigned add_ptr;
        int entropy_count;
        int input_rotate;
-       __u8 *last_data;
+       __u8 last_data[EXTRACT_SIZE];
 };
 
 static __u32 input_pool_data[INPUT_POOL_WORDS];
@@ -714,8 +715,6 @@ void add_disk_randomness(struct gendisk *disk)
 }
 #endif
 
-#define EXTRACT_SIZE 10
-
 /*********************************************************************
  *
  * Entropy extraction routines
@@ -862,7 +861,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
        while (nbytes) {
                extract_buf(r, tmp);
 
-               if (r->last_data) {
+               if (fips_enabled) {
                        spin_lock_irqsave(&r->lock, flags);
                        if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
                                panic("Hardware RNG duplicated output!\n");
@@ -951,9 +950,6 @@ static void init_std_data(struct entropy_store *r)
        now = ktime_get_real();
        mix_pool_bytes(r, &now, sizeof(now));
        mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
-       /* Enable continuous test in fips mode */
-       if (fips_enabled)
-               r->last_data = kmalloc(EXTRACT_SIZE, GFP_KERNEL);
 }
 
 static int rand_initialize(void)
index b08403d..fbf94cf 100644 (file)
@@ -170,6 +170,18 @@ config CRYPTO_DEV_MV_CESA
 
          Currently the driver supports AES in ECB and CBC mode without DMA.
 
+config CRYPTO_DEV_NIAGARA2
+       tristate "Niagara2 Stream Processing Unit driver"
+       select CRYPTO_ALGAPI
+       depends on SPARC64
+       help
+         Each core of a Niagara2 processor contains a Stream
+         Processing Unit, which itself contains several cryptographic
+         sub-units.  One set provides the Modular Arithmetic Unit,
+         used for SSL offload.  The other set provides the Cipher
+         Group, which can perform encryption, decryption, hashing,
+         checksumming, and raw copies.
+
 config CRYPTO_DEV_HIFN_795X
        tristate "Driver HIFN 795x crypto accelerator chips"
        select CRYPTO_DES
@@ -222,4 +234,13 @@ config CRYPTO_DEV_PPC4XX
        help
          This option allows you to have support for AMCC crypto acceleration.
 
+config CRYPTO_DEV_OMAP_SHAM
+       tristate "Support for OMAP SHA1/MD5 hw accelerator"
+       depends on ARCH_OMAP2 || ARCH_OMAP3
+       select CRYPTO_SHA1
+       select CRYPTO_MD5
+       help
+         OMAP processors have SHA1/MD5 hw accelerator. Select this if you
+         want to use the OMAP module for SHA1/MD5 algorithms.
+
 endif # CRYPTO_HW
index 6ffcb3f..6dbbe00 100644 (file)
@@ -1,8 +1,12 @@
 obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
 obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
 obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
+obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
+n2_crypto-objs := n2_core.o n2_asm.o
 obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
 obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
 obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
 obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
 obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
+obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
+
index c7a5a43..09389dd 100644 (file)
 #include <crypto/algapi.h>
 #include <crypto/aes.h>
 
-#include <asm/io.h>
-#include <asm/delay.h>
+#include <linux/io.h>
+#include <linux/delay.h>
 
 #include "geode-aes.h"
 
 /* Static structures */
 
-static void __iomem * _iobase;
+static void __iomem *_iobase;
 static spinlock_t lock;
 
 /* Write a 128 bit field (either a writable key or IV) */
@@ -30,7 +30,7 @@ static inline void
 _writefield(u32 offset, void *value)
 {
        int i;
-       for(i = 0; i < 4; i++)
+       for (i = 0; i < 4; i++)
                iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
 }
 
@@ -39,7 +39,7 @@ static inline void
 _readfield(u32 offset, void *value)
 {
        int i;
-       for(i = 0; i < 4; i++)
+       for (i = 0; i < 4; i++)
                ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
 }
 
@@ -59,7 +59,7 @@ do_crypt(void *src, void *dst, int len, u32 flags)
        do {
                status = ioread32(_iobase + AES_INTR_REG);
                cpu_relax();
-       } while(!(status & AES_INTRA_PENDING) && --counter);
+       } while (!(status & AES_INTRA_PENDING) && --counter);
 
        /* Clear the event */
        iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
@@ -317,7 +317,7 @@ geode_cbc_decrypt(struct blkcipher_desc *desc,
        err = blkcipher_walk_virt(desc, &walk);
        op->iv = walk.iv;
 
-       while((nbytes = walk.nbytes)) {
+       while ((nbytes = walk.nbytes)) {
                op->src = walk.src.virt.addr,
                op->dst = walk.dst.virt.addr;
                op->mode = AES_MODE_CBC;
@@ -349,7 +349,7 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
        err = blkcipher_walk_virt(desc, &walk);
        op->iv = walk.iv;
 
-       while((nbytes = walk.nbytes)) {
+       while ((nbytes = walk.nbytes)) {
                op->src = walk.src.virt.addr,
                op->dst = walk.dst.virt.addr;
                op->mode = AES_MODE_CBC;
@@ -429,7 +429,7 @@ geode_ecb_decrypt(struct blkcipher_desc *desc,
        blkcipher_walk_init(&walk, dst, src, nbytes);
        err = blkcipher_walk_virt(desc, &walk);
 
-       while((nbytes = walk.nbytes)) {
+       while ((nbytes = walk.nbytes)) {
                op->src = walk.src.virt.addr,
                op->dst = walk.dst.virt.addr;
                op->mode = AES_MODE_ECB;
@@ -459,7 +459,7 @@ geode_ecb_encrypt(struct blkcipher_desc *desc,
        blkcipher_walk_init(&walk, dst, src, nbytes);
        err = blkcipher_walk_virt(desc, &walk);
 
-       while((nbytes = walk.nbytes)) {
+       while ((nbytes = walk.nbytes)) {
                op->src = walk.src.virt.addr,
                op->dst = walk.dst.virt.addr;
                op->mode = AES_MODE_ECB;
@@ -518,11 +518,12 @@ static int __devinit
 geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
        int ret;
-
-       if ((ret = pci_enable_device(dev)))
+       ret = pci_enable_device(dev);
+       if (ret)
                return ret;
 
-       if ((ret = pci_request_regions(dev, "geode-aes")))
+       ret = pci_request_regions(dev, "geode-aes");
+       if (ret)
                goto eenable;
 
        _iobase = pci_iomap(dev, 0, 0);
@@ -537,13 +538,16 @@ geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
        /* Clear any pending activity */
        iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
 
-       if ((ret = crypto_register_alg(&geode_alg)))
+       ret = crypto_register_alg(&geode_alg);
+       if (ret)
                goto eiomap;
 
-       if ((ret = crypto_register_alg(&geode_ecb_alg)))
+       ret = crypto_register_alg(&geode_ecb_alg);
+       if (ret)
                goto ealg;
 
-       if ((ret = crypto_register_alg(&geode_cbc_alg)))
+       ret = crypto_register_alg(&geode_cbc_alg);
+       if (ret)
                goto eecb;
 
        printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n");
index 73e8b17..16fce3a 100644 (file)
@@ -638,7 +638,7 @@ struct hifn_crypto_alg
 
 #define ASYNC_FLAGS_MISALIGNED (1<<0)
 
-struct ablkcipher_walk
+struct hifn_cipher_walk
 {
        struct scatterlist      cache[ASYNC_SCATTERLIST_CACHE];
        u32                     flags;
@@ -657,7 +657,7 @@ struct hifn_request_context
        u8                      *iv;
        unsigned int            ivsize;
        u8                      op, type, mode, unused;
-       struct ablkcipher_walk  walk;
+       struct hifn_cipher_walk walk;
 };
 
 #define crypto_alg_to_hifn(a)  container_of(a, struct hifn_crypto_alg, alg)
@@ -1417,7 +1417,7 @@ static int hifn_setup_dma(struct hifn_device *dev,
        return 0;
 }
 
-static int ablkcipher_walk_init(struct ablkcipher_walk *w,
+static int hifn_cipher_walk_init(struct hifn_cipher_walk *w,
                int num, gfp_t gfp_flags)
 {
        int i;
@@ -1442,7 +1442,7 @@ static int ablkcipher_walk_init(struct ablkcipher_walk *w,
        return i;
 }
 
-static void ablkcipher_walk_exit(struct ablkcipher_walk *w)
+static void hifn_cipher_walk_exit(struct hifn_cipher_walk *w)
 {
        int i;
 
@@ -1486,8 +1486,8 @@ static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst,
        return idx;
 }
 
-static int ablkcipher_walk(struct ablkcipher_request *req,
-               struct ablkcipher_walk *w)
+static int hifn_cipher_walk(struct ablkcipher_request *req,
+               struct hifn_cipher_walk *w)
 {
        struct scatterlist *dst, *t;
        unsigned int nbytes = req->nbytes, offset, copy, diff;
@@ -1600,12 +1600,12 @@ static int hifn_setup_session(struct ablkcipher_request *req)
        }
 
        if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
-               err = ablkcipher_walk_init(&rctx->walk, idx, GFP_ATOMIC);
+               err = hifn_cipher_walk_init(&rctx->walk, idx, GFP_ATOMIC);
                if (err < 0)
                        return err;
        }
 
-       sg_num = ablkcipher_walk(req, &rctx->walk);
+       sg_num = hifn_cipher_walk(req, &rctx->walk);
        if (sg_num < 0) {
                err = sg_num;
                goto err_out_exit;
@@ -1806,7 +1806,7 @@ static void hifn_process_ready(struct ablkcipher_request *req, int error)
                        kunmap_atomic(saddr, KM_SOFTIRQ0);
                }
 
-               ablkcipher_walk_exit(&rctx->walk);
+               hifn_cipher_walk_exit(&rctx->walk);
        }
 
        req->base.complete(&req->base, error);
index 6f29012..e095422 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
 
 #include "mv_cesa.h"
+
+#define MV_CESA        "MV-CESA:"
+#define MAX_HW_HASH_SIZE       0xFFFF
+
 /*
  * STM:
  *   /---------------------------------------\
@@ -39,10 +45,12 @@ enum engine_status {
  * @dst_sg_it:         sg iterator for dst
  * @sg_src_left:       bytes left in src to process (scatter list)
  * @src_start:         offset to add to src start position (scatter list)
- * @crypt_len:         length of current crypt process
+ * @crypt_len:         length of current hw crypt/hash process
+ * @hw_nbytes:         total bytes to process in hw for this request
+ * @copy_back:         whether to copy data back (crypt) or not (hash)
  * @sg_dst_left:       bytes left dst to process in this scatter list
  * @dst_start:         offset to add to dst start position (scatter list)
- * @total_req_bytes:   total number of bytes processed (request).
+ * @hw_processed_bytes:        number of bytes processed by hw (request).
  *
  * sg helper are used to iterate over the scatterlist. Since the size of the
  * SRAM may be less than the scatter size, this struct struct is used to keep
@@ -51,15 +59,19 @@ enum engine_status {
 struct req_progress {
        struct sg_mapping_iter src_sg_it;
        struct sg_mapping_iter dst_sg_it;
+       void (*complete) (void);
+       void (*process) (int is_first);
 
        /* src mostly */
        int sg_src_left;
        int src_start;
        int crypt_len;
+       int hw_nbytes;
        /* dst mostly */
+       int copy_back;
        int sg_dst_left;
        int dst_start;
-       int total_req_bytes;
+       int hw_processed_bytes;
 };
 
 struct crypto_priv {
@@ -72,10 +84,12 @@ struct crypto_priv {
        spinlock_t lock;
        struct crypto_queue queue;
        enum engine_status eng_st;
-       struct ablkcipher_request *cur_req;
+       struct crypto_async_request *cur_req;
        struct req_progress p;
        int max_req_size;
        int sram_size;
+       int has_sha1;
+       int has_hmac_sha1;
 };
 
 static struct crypto_priv *cpg;
@@ -97,6 +111,31 @@ struct mv_req_ctx {
        int decrypt;
 };
 
+enum hash_op {
+       COP_SHA1,
+       COP_HMAC_SHA1
+};
+
+struct mv_tfm_hash_ctx {
+       struct crypto_shash *fallback;
+       struct crypto_shash *base_hash;
+       u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
+       int count_add;
+       enum hash_op op;
+};
+
+struct mv_req_hash_ctx {
+       u64 count;
+       u32 state[SHA1_DIGEST_SIZE / 4];
+       u8 buffer[SHA1_BLOCK_SIZE];
+       int first_hash;         /* marks that we don't have previous state */
+       int last_chunk;         /* marks that this is the 'final' request */
+       int extra_bytes;        /* unprocessed bytes in buffer */
+       enum hash_op op;
+       int count_add;
+       struct scatterlist dummysg;
+};
+
 static void compute_aes_dec_key(struct mv_ctx *ctx)
 {
        struct crypto_aes_ctx gen_aes_key;
@@ -144,32 +183,51 @@ static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
        return 0;
 }
 
-static void setup_data_in(struct ablkcipher_request *req)
+static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
 {
        int ret;
-       void *buf;
+       void *sbuf;
+       int copied = 0;
 
-       if (!cpg->p.sg_src_left) {
-               ret = sg_miter_next(&cpg->p.src_sg_it);
-               BUG_ON(!ret);
-               cpg->p.sg_src_left = cpg->p.src_sg_it.length;
-               cpg->p.src_start = 0;
-       }
-
-       cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size);
-
-       buf = cpg->p.src_sg_it.addr;
-       buf += cpg->p.src_start;
+       while (1) {
+               if (!p->sg_src_left) {
+                       ret = sg_miter_next(&p->src_sg_it);
+                       BUG_ON(!ret);
+                       p->sg_src_left = p->src_sg_it.length;
+                       p->src_start = 0;
+               }
 
-       memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);
+               sbuf = p->src_sg_it.addr + p->src_start;
+
+               if (p->sg_src_left <= len - copied) {
+                       memcpy(dbuf + copied, sbuf, p->sg_src_left);
+                       copied += p->sg_src_left;
+                       p->sg_src_left = 0;
+                       if (copied >= len)
+                               break;
+               } else {
+                       int copy_len = len - copied;
+                       memcpy(dbuf + copied, sbuf, copy_len);
+                       p->src_start += copy_len;
+                       p->sg_src_left -= copy_len;
+                       break;
+               }
+       }
+}
 
-       cpg->p.sg_src_left -= cpg->p.crypt_len;
-       cpg->p.src_start += cpg->p.crypt_len;
+static void setup_data_in(void)
+{
+       struct req_progress *p = &cpg->p;
+       int data_in_sram =
+           min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
+       copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
+                       data_in_sram - p->crypt_len);
+       p->crypt_len = data_in_sram;
 }
 
 static void mv_process_current_q(int first_block)
 {
-       struct ablkcipher_request *req = cpg->cur_req;
+       struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
        struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
        struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
        struct sec_accel_config op;
@@ -179,6 +237,7 @@ static void mv_process_current_q(int first_block)
                op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
                break;
        case COP_AES_CBC:
+       default:
                op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
                op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
                        ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
@@ -211,7 +270,7 @@ static void mv_process_current_q(int first_block)
                ENC_P_DST(SRAM_DATA_OUT_START);
        op.enc_key_p = SRAM_DATA_KEY_P;
 
-       setup_data_in(req);
+       setup_data_in();
        op.enc_len = cpg->p.crypt_len;
        memcpy(cpg->sram + SRAM_CONFIG, &op,
                        sizeof(struct sec_accel_config));
@@ -228,91 +287,294 @@ static void mv_process_current_q(int first_block)
 
 static void mv_crypto_algo_completion(void)
 {
-       struct ablkcipher_request *req = cpg->cur_req;
+       struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
        struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
 
+       sg_miter_stop(&cpg->p.src_sg_it);
+       sg_miter_stop(&cpg->p.dst_sg_it);
+
        if (req_ctx->op != COP_AES_CBC)
                return ;
 
        memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
 }
 
+static void mv_process_hash_current(int first_block)
+{
+       struct ahash_request *req = ahash_request_cast(cpg->cur_req);
+       struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
+       struct req_progress *p = &cpg->p;
+       struct sec_accel_config op = { 0 };
+       int is_last;
+
+       switch (req_ctx->op) {
+       case COP_SHA1:
+       default:
+               op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
+               break;
+       case COP_HMAC_SHA1:
+               op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
+               break;
+       }
+
+       op.mac_src_p =
+               MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
+               req_ctx->
+               count);
+
+       setup_data_in();
+
+       op.mac_digest =
+               MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
+       op.mac_iv =
+               MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
+               MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
+
+       is_last = req_ctx->last_chunk
+               && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
+               && (req_ctx->count <= MAX_HW_HASH_SIZE);
+       if (req_ctx->first_hash) {
+               if (is_last)
+                       op.config |= CFG_NOT_FRAG;
+               else
+                       op.config |= CFG_FIRST_FRAG;
+
+               req_ctx->first_hash = 0;
+       } else {
+               if (is_last)
+                       op.config |= CFG_LAST_FRAG;
+               else
+                       op.config |= CFG_MID_FRAG;
+       }
+
+       memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
+
+       writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
+       /* GO */
+       writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
+
+       /*
+       * XXX: add timer if the interrupt does not occur for some mystery
+       * reason
+       */
+}
+
+static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
+                                         struct shash_desc *desc)
+{
+       int i;
+       struct sha1_state shash_state;
+
+       shash_state.count = ctx->count + ctx->count_add;
+       for (i = 0; i < 5; i++)
+               shash_state.state[i] = ctx->state[i];
+       memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
+       return crypto_shash_import(desc, &shash_state);
+}
+
+static int mv_hash_final_fallback(struct ahash_request *req)
+{
+       const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+       struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
+       struct {
+               struct shash_desc shash;
+               char ctx[crypto_shash_descsize(tfm_ctx->fallback)];
+       } desc;
+       int rc;
+
+       desc.shash.tfm = tfm_ctx->fallback;
+       desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       if (unlikely(req_ctx->first_hash)) {
+               crypto_shash_init(&desc.shash);
+               crypto_shash_update(&desc.shash, req_ctx->buffer,
+                                   req_ctx->extra_bytes);
+       } else {
+               /* only SHA1 for now....
+                */
+               rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash);
+               if (rc)
+                       goto out;
+       }
+       rc = crypto_shash_final(&desc.shash, req->result);
+out:
+       return rc;
+}
+
+static void mv_hash_algo_completion(void)
+{
+       struct ahash_request *req = ahash_request_cast(cpg->cur_req);
+       struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
+
+       if (ctx->extra_bytes)
+               copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
+       sg_miter_stop(&cpg->p.src_sg_it);
+
+       ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
+       ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
+       ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
+       ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
+       ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
+
+       if (likely(ctx->last_chunk)) {
+               if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
+                       memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
+                              crypto_ahash_digestsize(crypto_ahash_reqtfm
+                                                      (req)));
+               } else
+                       mv_hash_final_fallback(req);
+       }
+}
+
 static void dequeue_complete_req(void)
 {
-       struct ablkcipher_request *req = cpg->cur_req;
+       struct crypto_async_request *req = cpg->cur_req;
        void *buf;
        int ret;
+       cpg->p.hw_processed_bytes += cpg->p.crypt_len;
+       if (cpg->p.copy_back) {
+               int need_copy_len = cpg->p.crypt_len;
+               int sram_offset = 0;
+               do {
+                       int dst_copy;
+
+                       if (!cpg->p.sg_dst_left) {
+                               ret = sg_miter_next(&cpg->p.dst_sg_it);
+                               BUG_ON(!ret);
+                               cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
+                               cpg->p.dst_start = 0;
+                       }
 
-       cpg->p.total_req_bytes += cpg->p.crypt_len;
-       do {
-               int dst_copy;
-
-               if (!cpg->p.sg_dst_left) {
-                       ret = sg_miter_next(&cpg->p.dst_sg_it);
-                       BUG_ON(!ret);
-                       cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
-                       cpg->p.dst_start = 0;
-               }
-
-               buf = cpg->p.dst_sg_it.addr;
-               buf += cpg->p.dst_start;
+                       buf = cpg->p.dst_sg_it.addr;
+                       buf += cpg->p.dst_start;
 
-               dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left);
+                       dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
 
-               memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
+                       memcpy(buf,
+                              cpg->sram + SRAM_DATA_OUT_START + sram_offset,
+                              dst_copy);
+                       sram_offset += dst_copy;
+                       cpg->p.sg_dst_left -= dst_copy;
+                       need_copy_len -= dst_copy;
+                       cpg->p.dst_start += dst_copy;
+               } while (need_copy_len > 0);
+       }
 
-               cpg->p.sg_dst_left -= dst_copy;
-               cpg->p.crypt_len -= dst_copy;
-               cpg->p.dst_start += dst_copy;
-       } while (cpg->p.crypt_len > 0);
+       cpg->p.crypt_len = 0;
 
        BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
-       if (cpg->p.total_req_bytes < req->nbytes) {
+       if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
                /* process next scatter list entry */
                cpg->eng_st = ENGINE_BUSY;
-               mv_process_current_q(0);
+               cpg->p.process(0);
        } else {
-               sg_miter_stop(&cpg->p.src_sg_it);
-               sg_miter_stop(&cpg->p.dst_sg_it);
-               mv_crypto_algo_completion();
+               cpg->p.complete();
                cpg->eng_st = ENGINE_IDLE;
-               req->base.complete(&req->base, 0);
+               local_bh_disable();
+               req->complete(req, 0);
+               local_bh_enable();
        }
 }
 
 static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
 {
        int i = 0;
-
-       do {
-               total_bytes -= sl[i].length;
-               i++;
-
-       } while (total_bytes > 0);
+       size_t cur_len;
+
+       while (1) {
+               cur_len = sl[i].length;
+               ++i;
+               if (total_bytes > cur_len)
+                       total_bytes -= cur_len;
+               else
+                       break;
+       }
 
        return i;
 }
 
-static void mv_enqueue_new_req(struct ablkcipher_request *req)
+static void mv_start_new_crypt_req(struct ablkcipher_request *req)
 {
+       struct req_progress *p = &cpg->p;
        int num_sgs;
 
-       cpg->cur_req = req;
-       memset(&cpg->p, 0, sizeof(struct req_progress));
+       cpg->cur_req = &req->base;
+       memset(p, 0, sizeof(struct req_progress));
+       p->hw_nbytes = req->nbytes;
+       p->complete = mv_crypto_algo_completion;
+       p->process = mv_process_current_q;
+       p->copy_back = 1;
 
        num_sgs = count_sgs(req->src, req->nbytes);
-       sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
+       sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
 
        num_sgs = count_sgs(req->dst, req->nbytes);
-       sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
+       sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
+
        mv_process_current_q(1);
 }
 
+static void mv_start_new_hash_req(struct ahash_request *req)
+{
+       struct req_progress *p = &cpg->p;
+       struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
+       const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+       int num_sgs, hw_bytes, old_extra_bytes, rc;
+       cpg->cur_req = &req->base;
+       memset(p, 0, sizeof(struct req_progress));
+       hw_bytes = req->nbytes + ctx->extra_bytes;
+       old_extra_bytes = ctx->extra_bytes;
+
+       if (unlikely(ctx->extra_bytes)) {
+               memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
+                      ctx->extra_bytes);
+               p->crypt_len = ctx->extra_bytes;
+       }
+
+       memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
+
+       if (unlikely(!ctx->first_hash)) {
+               writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
+               writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
+               writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
+               writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
+               writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
+       }
+
+       ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
+       if (ctx->extra_bytes != 0
+           && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
+               hw_bytes -= ctx->extra_bytes;
+       else
+               ctx->extra_bytes = 0;
+
+       num_sgs = count_sgs(req->src, req->nbytes);
+       sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
+
+       if (hw_bytes) {
+               p->hw_nbytes = hw_bytes;
+               p->complete = mv_hash_algo_completion;
+               p->process = mv_process_hash_current;
+
+               mv_process_hash_current(1);
+       } else {
+               copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
+                               ctx->extra_bytes - old_extra_bytes);
+               sg_miter_stop(&p->src_sg_it);
+               if (ctx->last_chunk)
+                       rc = mv_hash_final_fallback(req);
+               else
+                       rc = 0;
+               cpg->eng_st = ENGINE_IDLE;
+               local_bh_disable();
+               req->base.complete(&req->base, rc);
+               local_bh_enable();
+       }
+}
+
 static int queue_manag(void *data)
 {
        cpg->eng_st = ENGINE_IDLE;
        do {
-               struct ablkcipher_request *req;
                struct crypto_async_request *async_req = NULL;
                struct crypto_async_request *backlog;
 
@@ -338,9 +600,18 @@ static int queue_manag(void *data)
                }
 
                if (async_req) {
-                       req = container_of(async_req,
-                                       struct ablkcipher_request, base);
-                       mv_enqueue_new_req(req);
+                       if (async_req->tfm->__crt_alg->cra_type !=
+                           &crypto_ahash_type) {
+                               struct ablkcipher_request *req =
+                                   container_of(async_req,
+                                                struct ablkcipher_request,
+                                                base);
+                               mv_start_new_crypt_req(req);
+                       } else {
+                               struct ahash_request *req =
+                                   ahash_request_cast(async_req);
+                               mv_start_new_hash_req(req);
+                       }
                        async_req = NULL;
                }
 
@@ -350,13 +621,13 @@ static int queue_manag(void *data)
        return 0;
 }
 
-static int mv_handle_req(struct ablkcipher_request *req)
+static int mv_handle_req(struct crypto_async_request *req)
 {
        unsigned long flags;
        int ret;
 
        spin_lock_irqsave(&cpg->lock, flags);
-       ret = ablkcipher_enqueue_request(&cpg->queue, req);
+       ret = crypto_enqueue_request(&cpg->queue, req);
        spin_unlock_irqrestore(&cpg->lock, flags);
        wake_up_process(cpg->queue_th);
        return ret;
@@ -369,7 +640,7 @@ static int mv_enc_aes_ecb(struct ablkcipher_request *req)
        req_ctx->op = COP_AES_ECB;
        req_ctx->decrypt = 0;
 
-       return mv_handle_req(req);
+       return mv_handle_req(&req->base);
 }
 
 static int mv_dec_aes_ecb(struct ablkcipher_request *req)
@@ -381,7 +652,7 @@ static int mv_dec_aes_ecb(struct ablkcipher_request *req)
        req_ctx->decrypt = 1;
 
        compute_aes_dec_key(ctx);
-       return mv_handle_req(req);
+       return mv_handle_req(&req->base);
 }
 
 static int mv_enc_aes_cbc(struct ablkcipher_request *req)
@@ -391,7 +662,7 @@ static int mv_enc_aes_cbc(struct ablkcipher_request *req)
        req_ctx->op = COP_AES_CBC;
        req_ctx->decrypt = 0;
 
-       return mv_handle_req(req);
+       return mv_handle_req(&req->base);
 }
 
 static int mv_dec_aes_cbc(struct ablkcipher_request *req)
@@ -403,7 +674,7 @@ static int mv_dec_aes_cbc(struct ablkcipher_request *req)
        req_ctx->decrypt = 1;
 
        compute_aes_dec_key(ctx);
-       return mv_handle_req(req);
+       return mv_handle_req(&req->base);
 }
 
 static int mv_cra_init(struct crypto_tfm *tfm)
@@ -412,6 +683,215 @@ static int mv_cra_init(struct crypto_tfm *tfm)
        return 0;
 }
 
+static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
+                                int is_last, unsigned int req_len,
+                                int count_add)
+{
+       memset(ctx, 0, sizeof(*ctx));
+       ctx->op = op;
+       ctx->count = req_len;
+       ctx->first_hash = 1;
+       ctx->last_chunk = is_last;
+       ctx->count_add = count_add;
+}
+
+static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
+                                  unsigned req_len)
+{
+       ctx->last_chunk = is_last;
+       ctx->count += req_len;
+}
+
+static int mv_hash_init(struct ahash_request *req)
+{
+       const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+       mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
+                            tfm_ctx->count_add);
+       return 0;
+}
+
+static int mv_hash_update(struct ahash_request *req)
+{
+       if (!req->nbytes)
+               return 0;
+
+       mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
+       return mv_handle_req(&req->base);
+}
+
+static int mv_hash_final(struct ahash_request *req)
+{
+       struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
+       /* dummy buffer of 4 bytes */
+       sg_init_one(&ctx->dummysg, ctx->buffer, 4);
+       /* I think I'm allowed to do that... */
+       ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0);
+       mv_update_hash_req_ctx(ctx, 1, 0);
+       return mv_handle_req(&req->base);
+}
+
+static int mv_hash_finup(struct ahash_request *req)
+{
+       if (!req->nbytes)
+               return mv_hash_final(req);
+
+       mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
+       return mv_handle_req(&req->base);
+}
+
+static int mv_hash_digest(struct ahash_request *req)
+{
+       const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+       mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
+                            req->nbytes, tfm_ctx->count_add);
+       return mv_handle_req(&req->base);
+}
+
+static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
+                            const void *ostate)
+{
+       const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
+       int i;
+       for (i = 0; i < 5; i++) {
+               ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
+               ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
+       }
+}
+
+static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
+                         unsigned int keylen)
+{
+       int rc;
+       struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
+       int bs, ds, ss;
+
+       if (!ctx->base_hash)
+               return 0;
+
+       rc = crypto_shash_setkey(ctx->fallback, key, keylen);
+       if (rc)
+               return rc;
+
+       /* Can't see a way to extract the ipad/opad from the fallback tfm
+          so I'm basically copying code from the hmac module */
+       bs = crypto_shash_blocksize(ctx->base_hash);
+       ds = crypto_shash_digestsize(ctx->base_hash);
+       ss = crypto_shash_statesize(ctx->base_hash);
+
+       {
+               struct {
+                       struct shash_desc shash;
+                       char ctx[crypto_shash_descsize(ctx->base_hash)];
+               } desc;
+               unsigned int i;
+               char ipad[ss];
+               char opad[ss];
+
+               desc.shash.tfm = ctx->base_hash;
+               desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) &
+                   CRYPTO_TFM_REQ_MAY_SLEEP;
+
+               if (keylen > bs) {
+                       int err;
+
+                       err =
+                           crypto_shash_digest(&desc.shash, key, keylen, ipad);
+                       if (err)
+                               return err;
+
+                       keylen = ds;
+               } else
+                       memcpy(ipad, key, keylen);
+
+               memset(ipad + keylen, 0, bs - keylen);
+               memcpy(opad, ipad, bs);
+
+               for (i = 0; i < bs; i++) {
+                       ipad[i] ^= 0x36;
+                       opad[i] ^= 0x5c;
+               }
+
+               rc = crypto_shash_init(&desc.shash) ? :
+                   crypto_shash_update(&desc.shash, ipad, bs) ? :
+                   crypto_shash_export(&desc.shash, ipad) ? :
+                   crypto_shash_init(&desc.shash) ? :
+                   crypto_shash_update(&desc.shash, opad, bs) ? :
+                   crypto_shash_export(&desc.shash, opad);
+
+               if (rc == 0)
+                       mv_hash_init_ivs(ctx, ipad, opad);
+
+               return rc;
+       }
+}
+
+static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
+                           enum hash_op op, int count_add)
+{
+       const char *fallback_driver_name = tfm->__crt_alg->cra_name;
+       struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct crypto_shash *fallback_tfm = NULL;
+       struct crypto_shash *base_hash = NULL;
+       int err = -ENOMEM;
+
+       ctx->op = op;
+       ctx->count_add = count_add;
+
+       /* Allocate a fallback and abort if it failed. */
+       fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
+                                         CRYPTO_ALG_NEED_FALLBACK);
+       if (IS_ERR(fallback_tfm)) {
+               printk(KERN_WARNING MV_CESA
+                      "Fallback driver '%s' could not be loaded!\n",
+                      fallback_driver_name);
+               err = PTR_ERR(fallback_tfm);
+               goto out;
+       }
+       ctx->fallback = fallback_tfm;
+
+       if (base_hash_name) {
+               /* Allocate a hash to compute the ipad/opad of hmac. */
+               base_hash = crypto_alloc_shash(base_hash_name, 0,
+                                              CRYPTO_ALG_NEED_FALLBACK);
+               if (IS_ERR(base_hash)) {
+                       printk(KERN_WARNING MV_CESA
+                              "Base driver '%s' could not be loaded!\n",
+                              base_hash_name);
+                       err = PTR_ERR(fallback_tfm);
+                       goto err_bad_base;
+               }
+       }
+       ctx->base_hash = base_hash;
+
+       crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+                                sizeof(struct mv_req_hash_ctx) +
+                                crypto_shash_descsize(ctx->fallback));
+       return 0;
+err_bad_base:
+       crypto_free_shash(fallback_tfm);
+out:
+       return err;
+}
+
+static void mv_cra_hash_exit(struct crypto_tfm *tfm)
+{
+       struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       crypto_free_shash(ctx->fallback);
+       if (ctx->base_hash)
+               crypto_free_shash(ctx->base_hash);
+}
+
+static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
+{
+       return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
+}
+
+static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
+{
+       return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
+}
+
 irqreturn_t crypto_int(int irq, void *priv)
 {
        u32 val;
@@ -474,6 +954,53 @@ struct crypto_alg mv_aes_alg_cbc = {
        },
 };
 
+struct ahash_alg mv_sha1_alg = {
+       .init = mv_hash_init,
+       .update = mv_hash_update,
+       .final = mv_hash_final,
+       .finup = mv_hash_finup,
+       .digest = mv_hash_digest,
+       .halg = {
+                .digestsize = SHA1_DIGEST_SIZE,
+                .base = {
+                         .cra_name = "sha1",
+                         .cra_driver_name = "mv-sha1",
+                         .cra_priority = 300,
+                         .cra_flags =
+                         CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+                         .cra_blocksize = SHA1_BLOCK_SIZE,
+                         .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
+                         .cra_init = mv_cra_hash_sha1_init,
+                         .cra_exit = mv_cra_hash_exit,
+                         .cra_module = THIS_MODULE,
+                         }
+                }
+};
+
+struct ahash_alg mv_hmac_sha1_alg = {
+       .init = mv_hash_init,
+       .update = mv_hash_update,
+       .final = mv_hash_final,
+       .finup = mv_hash_finup,
+       .digest = mv_hash_digest,
+       .setkey = mv_hash_setkey,
+       .halg = {
+                .digestsize = SHA1_DIGEST_SIZE,
+                .base = {
+                         .cra_name = "hmac(sha1)",
+                         .cra_driver_name = "mv-hmac-sha1",
+                         .cra_priority = 300,
+                         .cra_flags =
+                         CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+                         .cra_blocksize = SHA1_BLOCK_SIZE,
+                         .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
+                         .cra_init = mv_cra_hash_hmac_sha1_init,
+                         .cra_exit = mv_cra_hash_exit,
+                         .cra_module = THIS_MODULE,
+                         }
+                }
+};
+
 static int mv_probe(struct platform_device *pdev)
 {
        struct crypto_priv *cp;
@@ -482,7 +1009,7 @@ static int mv_probe(struct platform_device *pdev)
        int ret;
 
        if (cpg) {
-               printk(KERN_ERR "Second crypto dev?\n");
+               printk(KERN_ERR MV_CESA "Second crypto dev?\n");
                return -EEXIST;
        }
 
@@ -496,7 +1023,7 @@ static int mv_probe(struct platform_device *pdev)
 
        spin_lock_init(&cp->lock);
        crypto_init_queue(&cp->queue, 50);
-       cp->reg = ioremap(res->start, res->end - res->start + 1);
+       cp->reg = ioremap(res->start, resource_size(res));
        if (!cp->reg) {
                ret = -ENOMEM;
                goto err;
@@ -507,7 +1034,7 @@ static int mv_probe(struct platform_device *pdev)
                ret = -ENXIO;
                goto err_unmap_reg;
        }
-       cp->sram_size = res->end - res->start + 1;
+       cp->sram_size = resource_size(res);
        cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
        cp->sram = ioremap(res->start, cp->sram_size);
        if (!cp->sram) {
@@ -546,6 +1073,21 @@ static int mv_probe(struct platform_device *pdev)
        ret = crypto_register_alg(&mv_aes_alg_cbc);
        if (ret)
                goto err_unreg_ecb;
+
+       ret = crypto_register_ahash(&mv_sha1_alg);
+       if (ret == 0)
+               cpg->has_sha1 = 1;
+       else
+               printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
+
+       ret = crypto_register_ahash(&mv_hmac_sha1_alg);
+       if (ret == 0) {
+               cpg->has_hmac_sha1 = 1;
+       } else {
+               printk(KERN_WARNING MV_CESA
+                      "Could not register hmac-sha1 driver\n");
+       }
+
        return 0;
 err_unreg_ecb:
        crypto_unregister_alg(&mv_aes_alg_ecb);
@@ -570,6 +1112,10 @@ static int mv_remove(struct platform_device *pdev)
 
        crypto_unregister_alg(&mv_aes_alg_ecb);
        crypto_unregister_alg(&mv_aes_alg_cbc);
+       if (cp->has_sha1)
+               crypto_unregister_ahash(&mv_sha1_alg);
+       if (cp->has_hmac_sha1)
+               crypto_unregister_ahash(&mv_hmac_sha1_alg);
        kthread_stop(cp->queue_th);
        free_irq(cp->irq, cp);
        memset(cp->sram, 0, cp->sram_size);
index c3e25d3..08fcb11 100644 (file)
@@ -1,6 +1,10 @@
 #ifndef __MV_CRYPTO_H__
 
 #define DIGEST_INITIAL_VAL_A   0xdd00
+#define DIGEST_INITIAL_VAL_B   0xdd04
+#define DIGEST_INITIAL_VAL_C   0xdd08
+#define DIGEST_INITIAL_VAL_D   0xdd0c
+#define DIGEST_INITIAL_VAL_E   0xdd10
 #define DES_CMD_REG            0xdd58
 
 #define SEC_ACCEL_CMD          0xde00
@@ -70,6 +74,10 @@ struct sec_accel_config {
 #define CFG_AES_LEN_128                (0 << 24)
 #define CFG_AES_LEN_192                (1 << 24)
 #define CFG_AES_LEN_256                (2 << 24)
+#define CFG_NOT_FRAG           (0 << 30)
+#define CFG_FIRST_FRAG         (1 << 30)
+#define CFG_LAST_FRAG          (2 << 30)
+#define CFG_MID_FRAG           (3 << 30)
 
        u32 enc_p;
 #define ENC_P_SRC(x)           (x)
@@ -90,7 +98,11 @@ struct sec_accel_config {
 #define MAC_SRC_TOTAL_LEN(x)   ((x) << 16)
 
        u32 mac_digest;
+#define MAC_DIGEST_P(x)        (x)
+#define MAC_FRAG_LEN(x)        ((x) << 16)
        u32 mac_iv;
+#define MAC_INNER_IV_P(x)      (x)
+#define MAC_OUTER_IV_P(x)      ((x) << 16)
 }__attribute__ ((packed));
        /*
         * /-----------\ 0
@@ -101,19 +113,37 @@ struct sec_accel_config {
         * |  IV   IN  |        4 * 4
         * |-----------| 0x40 (inplace)
         * |  IV BUF   |        4 * 4
-        * |-----------| 0x50
+        * |-----------| 0x80
         * |  DATA IN  |        16 * x (max ->max_req_size)
-        * |-----------| 0x50 (inplace operation)
+        * |-----------| 0x80 (inplace operation)
         * |  DATA OUT |        16 * x (max ->max_req_size)
         * \-----------/ SRAM size
         */
+
+       /* Hashing memory map:
+        * /-----------\ 0
+        * | ACCEL CFG |        4 * 8
+        * |-----------| 0x20
+        * | Inner IV  |        5 * 4
+        * |-----------| 0x34
+        * | Outer IV  |        5 * 4
+        * |-----------| 0x48
+        * | Output BUF|        5 * 4
+        * |-----------| 0x80
+        * |  DATA IN  |        64 * x (max ->max_req_size)
+        * \-----------/ SRAM size
+        */
 #define SRAM_CONFIG            0x00
 #define SRAM_DATA_KEY_P                0x20
 #define SRAM_DATA_IV           0x40
 #define SRAM_DATA_IV_BUF       0x40
-#define SRAM_DATA_IN_START     0x50
-#define SRAM_DATA_OUT_START    0x50
+#define SRAM_DATA_IN_START     0x80
+#define SRAM_DATA_OUT_START    0x80
+
+#define SRAM_HMAC_IV_IN                0x20
+#define SRAM_HMAC_IV_OUT       0x34
+#define SRAM_DIGEST_BUF                0x48
 
-#define SRAM_CFG_SPACE         0x50
+#define SRAM_CFG_SPACE         0x80
 
 #endif
diff --git a/drivers/crypto/n2_asm.S b/drivers/crypto/n2_asm.S
new file mode 100644 (file)
index 0000000..f7c7937
--- /dev/null
@@ -0,0 +1,95 @@
+/* n2_asm.S: Hypervisor calls for NCS support.
+ *
+ * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
+ */
+
+#include <linux/linkage.h>
+#include <asm/hypervisor.h>
+#include "n2_core.h"
+
+       /* o0: queue type
+        * o1: RA of queue
+        * o2: num entries in queue
+        * o3: address of queue handle return
+        */
+ENTRY(sun4v_ncs_qconf)
+       mov     HV_FAST_NCS_QCONF, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%o3]
+       retl
+        nop
+ENDPROC(sun4v_ncs_qconf)
+
+       /* %o0: queue handle
+        * %o1: address of queue type return
+        * %o2: address of queue base address return
+        * %o3: address of queue num entries return
+        */
+ENTRY(sun4v_ncs_qinfo)
+       mov     %o1, %g1
+       mov     %o2, %g2
+       mov     %o3, %g3
+       mov     HV_FAST_NCS_QINFO, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%g1]
+       stx     %o2, [%g2]
+       stx     %o3, [%g3]
+       retl
+        nop
+ENDPROC(sun4v_ncs_qinfo)
+
+       /* %o0: queue handle
+        * %o1: address of head offset return
+        */
+ENTRY(sun4v_ncs_gethead)
+       mov     %o1, %o2
+       mov     HV_FAST_NCS_GETHEAD, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%o2]
+       retl
+        nop
+ENDPROC(sun4v_ncs_gethead)
+
+       /* %o0: queue handle
+        * %o1: address of tail offset return
+        */
+ENTRY(sun4v_ncs_gettail)
+       mov     %o1, %o2
+       mov     HV_FAST_NCS_GETTAIL, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%o2]
+       retl
+        nop
+ENDPROC(sun4v_ncs_gettail)
+
+       /* %o0: queue handle
+        * %o1: new tail offset
+        */
+ENTRY(sun4v_ncs_settail)
+       mov     HV_FAST_NCS_SETTAIL, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+ENDPROC(sun4v_ncs_settail)
+
+       /* %o0: queue handle
+        * %o1: address of devino return
+        */
+ENTRY(sun4v_ncs_qhandle_to_devino)
+       mov     %o1, %o2
+       mov     HV_FAST_NCS_QHANDLE_TO_DEVINO, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%o2]
+       retl
+        nop
+ENDPROC(sun4v_ncs_qhandle_to_devino)
+
+       /* %o0: queue handle
+        * %o1: new head offset
+        */
+ENTRY(sun4v_ncs_sethead_marker)
+       mov     HV_FAST_NCS_SETHEAD_MARKER, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+ENDPROC(sun4v_ncs_sethead_marker)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
new file mode 100644 (file)
index 0000000..8566be8
--- /dev/null
@@ -0,0 +1,2083 @@
+/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
+ *
+ * Copyright (C) 2010 David S. Miller <davem@davemloft.net>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/cpumask.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/crypto.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+
+#include <asm/hypervisor.h>
+#include <asm/mdesc.h>
+
+#include "n2_core.h"
+
+#define DRV_MODULE_NAME                "n2_crypto"
+#define DRV_MODULE_VERSION     "0.1"
+#define DRV_MODULE_RELDATE     "April 29, 2010"
+
+static char version[] __devinitdata =
+       DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_DESCRIPTION("Niagara2 Crypto driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define N2_CRA_PRIORITY                300
+
+static DEFINE_MUTEX(spu_lock);
+
+struct spu_queue {
+       cpumask_t               sharing;
+       unsigned long           qhandle;
+
+       spinlock_t              lock;
+       u8                      q_type;
+       void                    *q;
+       unsigned long           head;
+       unsigned long           tail;
+       struct list_head        jobs;
+
+       unsigned long           devino;
+
+       char                    irq_name[32];
+       unsigned int            irq;
+
+       struct list_head        list;
+};
+
+static struct spu_queue **cpu_to_cwq;
+static struct spu_queue **cpu_to_mau;
+
+static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
+{
+       if (q->q_type == HV_NCS_QTYPE_MAU) {
+               off += MAU_ENTRY_SIZE;
+               if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
+                       off = 0;
+       } else {
+               off += CWQ_ENTRY_SIZE;
+               if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
+                       off = 0;
+       }
+       return off;
+}
+
+struct n2_request_common {
+       struct list_head        entry;
+       unsigned int            offset;
+};
+#define OFFSET_NOT_RUNNING     (~(unsigned int)0)
+
+/* An async job request records the final tail value it used in
+ * n2_request_common->offset, test to see if that offset is in
+ * the range old_head, new_head, inclusive.
+ */
+static inline bool job_finished(struct spu_queue *q, unsigned int offset,
+                               unsigned long old_head, unsigned long new_head)
+{
+       if (old_head <= new_head) {
+               if (offset > old_head && offset <= new_head)
+                       return true;
+       } else {
+               if (offset > old_head || offset <= new_head)
+                       return true;
+       }
+       return false;
+}
+
+/* When the HEAD marker is unequal to the actual HEAD, we get
+ * a virtual device INO interrupt.  We should process the
+ * completed CWQ entries and adjust the HEAD marker to clear
+ * the IRQ.
+ */
+static irqreturn_t cwq_intr(int irq, void *dev_id)
+{
+       unsigned long off, new_head, hv_ret;
+       struct spu_queue *q = dev_id;
+
+       pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
+              smp_processor_id(), q->qhandle);
+
+       spin_lock(&q->lock);
+
+       hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
+
+       pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
+              smp_processor_id(), new_head, hv_ret);
+
+       for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
+               /* XXX ... XXX */
+       }
+
+       hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
+       if (hv_ret == HV_EOK)
+               q->head = new_head;
+
+       spin_unlock(&q->lock);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t mau_intr(int irq, void *dev_id)
+{
+       struct spu_queue *q = dev_id;
+       unsigned long head, hv_ret;
+
+       spin_lock(&q->lock);
+
+       pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
+              smp_processor_id(), q->qhandle);
+
+       hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
+
+       pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
+              smp_processor_id(), head, hv_ret);
+
+       sun4v_ncs_sethead_marker(q->qhandle, head);
+
+       spin_unlock(&q->lock);
+
+       return IRQ_HANDLED;
+}
+
+static void *spu_queue_next(struct spu_queue *q, void *cur)
+{
+       return q->q + spu_next_offset(q, cur - q->q);
+}
+
+static int spu_queue_num_free(struct spu_queue *q)
+{
+       unsigned long head = q->head;
+       unsigned long tail = q->tail;
+       unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
+       unsigned long diff;
+
+       if (head > tail)
+               diff = head - tail;
+       else
+               diff = (end - tail) + head;
+
+       return (diff / CWQ_ENTRY_SIZE) - 1;
+}
+
+static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
+{
+       int avail = spu_queue_num_free(q);
+
+       if (avail >= num_entries)
+               return q->q + q->tail;
+
+       return NULL;
+}
+
+static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
+{
+       unsigned long hv_ret, new_tail;
+
+       new_tail = spu_next_offset(q, last - q->q);
+
+       hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
+       if (hv_ret == HV_EOK)
+               q->tail = new_tail;
+       return hv_ret;
+}
+
+static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
+                            int enc_type, int auth_type,
+                            unsigned int hash_len,
+                            bool sfas, bool sob, bool eob, bool encrypt,
+                            int opcode)
+{
+       u64 word = (len - 1) & CONTROL_LEN;
+
+       word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
+       word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
+       word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
+       if (sfas)
+               word |= CONTROL_STORE_FINAL_AUTH_STATE;
+       if (sob)
+               word |= CONTROL_START_OF_BLOCK;
+       if (eob)
+               word |= CONTROL_END_OF_BLOCK;
+       if (encrypt)
+               word |= CONTROL_ENCRYPT;
+       if (hmac_key_len)
+               word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
+       if (hash_len)
+               word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
+
+       return word;
+}
+
+#if 0
+static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
+{
+       if (this_len >= 64 ||
+           qp->head != qp->tail)
+               return true;
+       return false;
+}
+#endif
+
+struct n2_base_ctx {
+       struct list_head                list;
+};
+
+static void n2_base_ctx_init(struct n2_base_ctx *ctx)
+{
+       INIT_LIST_HEAD(&ctx->list);
+}
+
+struct n2_hash_ctx {
+       struct n2_base_ctx              base;
+
+       struct crypto_ahash             *fallback;
+
+       /* These next three members must match the layout created by
+        * crypto_init_shash_ops_async.  This allows us to properly
+        * plumb requests we can't do in hardware down to the fallback
+        * operation, providing all of the data structures and layouts
+        * expected by those paths.
+        */
+       struct ahash_request            fallback_req;
+       struct shash_desc               fallback_desc;
+       union {
+               struct md5_state        md5;
+               struct sha1_state       sha1;
+               struct sha256_state     sha256;
+       } u;
+
+       unsigned char                   hash_key[64];
+       unsigned char                   keyed_zero_hash[32];
+};
+
+static int n2_hash_async_init(struct ahash_request *req)
+{
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+       ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
+       ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+       return crypto_ahash_init(&ctx->fallback_req);
+}
+
+static int n2_hash_async_update(struct ahash_request *req)
+{
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+       ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
+       ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+       ctx->fallback_req.nbytes = req->nbytes;
+       ctx->fallback_req.src = req->src;
+
+       return crypto_ahash_update(&ctx->fallback_req);
+}
+
+static int n2_hash_async_final(struct ahash_request *req)
+{
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+       ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
+       ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+       ctx->fallback_req.result = req->result;
+
+       return crypto_ahash_final(&ctx->fallback_req);
+}
+
+static int n2_hash_async_finup(struct ahash_request *req)
+{
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+       ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
+       ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+       ctx->fallback_req.nbytes = req->nbytes;
+       ctx->fallback_req.src = req->src;
+       ctx->fallback_req.result = req->result;
+
+       return crypto_ahash_finup(&ctx->fallback_req);
+}
+
+static int n2_hash_cra_init(struct crypto_tfm *tfm)
+{
+       const char *fallback_driver_name = tfm->__crt_alg->cra_name;
+       struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+       struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+       struct crypto_ahash *fallback_tfm;
+       int err;
+
+       fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
+                                         CRYPTO_ALG_NEED_FALLBACK);
+       if (IS_ERR(fallback_tfm)) {
+               pr_warning("Fallback driver '%s' could not be loaded!\n",
+                          fallback_driver_name);
+               err = PTR_ERR(fallback_tfm);
+               goto out;
+       }
+
+       ctx->fallback = fallback_tfm;
+       return 0;
+
+out:
+       return err;
+}
+
+static void n2_hash_cra_exit(struct crypto_tfm *tfm)
+{
+       struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+       struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+
+       crypto_free_ahash(ctx->fallback);
+}
+
+static unsigned long wait_for_tail(struct spu_queue *qp)
+{
+       unsigned long head, hv_ret;
+
+       do {
+               hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
+               if (hv_ret != HV_EOK) {
+                       pr_err("Hypervisor error on gethead\n");
+                       break;
+               }
+               if (head == qp->tail) {
+                       qp->head = head;
+                       break;
+               }
+       } while (1);
+       return hv_ret;
+}
+
+static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
+                                             struct cwq_initial_entry *ent)
+{
+       unsigned long hv_ret = spu_queue_submit(qp, ent);
+
+       if (hv_ret == HV_EOK)
+               hv_ret = wait_for_tail(qp);
+
+       return hv_ret;
+}
+
+static int n2_hash_async_digest(struct ahash_request *req,
+                               unsigned int auth_type, unsigned int digest_size,
+                               unsigned int result_size, void *hash_loc)
+{
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+       struct cwq_initial_entry *ent;
+       struct crypto_hash_walk walk;
+       struct spu_queue *qp;
+       unsigned long flags;
+       int err = -ENODEV;
+       int nbytes, cpu;
+
+       /* The total effective length of the operation may not
+        * exceed 2^16.
+        */
+       if (unlikely(req->nbytes > (1 << 16))) {
+               ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
+               ctx->fallback_req.base.flags =
+                       req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+               ctx->fallback_req.nbytes = req->nbytes;
+               ctx->fallback_req.src = req->src;
+               ctx->fallback_req.result = req->result;
+
+               return crypto_ahash_digest(&ctx->fallback_req);
+       }
+
+       n2_base_ctx_init(&ctx->base);
+
+       nbytes = crypto_hash_walk_first(req, &walk);
+
+       cpu = get_cpu();
+       qp = cpu_to_cwq[cpu];
+       if (!qp)
+               goto out;
+
+       spin_lock_irqsave(&qp->lock, flags);
+
+       /* XXX can do better, improve this later by doing a by-hand scatterlist
+        * XXX walk, etc.
+        */
+       ent = qp->q + qp->tail;
+
+       ent->control = control_word_base(nbytes, 0, 0,
+                                        auth_type, digest_size,
+                                        false, true, false, false,
+                                        OPCODE_INPLACE_BIT |
+                                        OPCODE_AUTH_MAC);
+       ent->src_addr = __pa(walk.data);
+       ent->auth_key_addr = 0UL;
+       ent->auth_iv_addr = __pa(hash_loc);
+       ent->final_auth_state_addr = 0UL;
+       ent->enc_key_addr = 0UL;
+       ent->enc_iv_addr = 0UL;
+       ent->dest_addr = __pa(hash_loc);
+
+       nbytes = crypto_hash_walk_done(&walk, 0);
+       while (nbytes > 0) {
+               ent = spu_queue_next(qp, ent);
+
+               ent->control = (nbytes - 1);
+               ent->src_addr = __pa(walk.data);
+               ent->auth_key_addr = 0UL;
+               ent->auth_iv_addr = 0UL;
+               ent->final_auth_state_addr = 0UL;
+               ent->enc_key_addr = 0UL;
+               ent->enc_iv_addr = 0UL;
+               ent->dest_addr = 0UL;
+
+               nbytes = crypto_hash_walk_done(&walk, 0);
+       }
+       ent->control |= CONTROL_END_OF_BLOCK;
+
+       if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
+               err = -EINVAL;
+       else
+               err = 0;
+
+       spin_unlock_irqrestore(&qp->lock, flags);
+
+       if (!err)
+               memcpy(req->result, hash_loc, result_size);
+out:
+       put_cpu();
+
+       return err;
+}
+
+static int n2_md5_async_digest(struct ahash_request *req)
+{
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+       struct md5_state *m = &ctx->u.md5;
+
+       if (unlikely(req->nbytes == 0)) {
+               static const char md5_zero[MD5_DIGEST_SIZE] = {
+                       0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
+                       0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
+               };
+
+               memcpy(req->result, md5_zero, MD5_DIGEST_SIZE);
+               return 0;
+       }
+       m->hash[0] = cpu_to_le32(0x67452301);
+       m->hash[1] = cpu_to_le32(0xefcdab89);
+       m->hash[2] = cpu_to_le32(0x98badcfe);
+       m->hash[3] = cpu_to_le32(0x10325476);
+
+       return n2_hash_async_digest(req, AUTH_TYPE_MD5,
+                                   MD5_DIGEST_SIZE, MD5_DIGEST_SIZE,
+                                   m->hash);
+}
+
+static int n2_sha1_async_digest(struct ahash_request *req)
+{
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+       struct sha1_state *s = &ctx->u.sha1;
+
+       if (unlikely(req->nbytes == 0)) {
+               static const char sha1_zero[SHA1_DIGEST_SIZE] = {
+                       0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32,
+                       0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8,
+                       0x07, 0x09
+               };
+
+               memcpy(req->result, sha1_zero, SHA1_DIGEST_SIZE);
+               return 0;
+       }
+       s->state[0] = SHA1_H0;
+       s->state[1] = SHA1_H1;
+       s->state[2] = SHA1_H2;
+       s->state[3] = SHA1_H3;
+       s->state[4] = SHA1_H4;
+
+       return n2_hash_async_digest(req, AUTH_TYPE_SHA1,
+                                   SHA1_DIGEST_SIZE, SHA1_DIGEST_SIZE,
+                                   s->state);
+}
+
+static int n2_sha256_async_digest(struct ahash_request *req)
+{
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+       struct sha256_state *s = &ctx->u.sha256;
+
+       if (req->nbytes == 0) {
+               static const char sha256_zero[SHA256_DIGEST_SIZE] = {
+                       0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a,
+                       0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae,
+                       0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99,
+                       0x1b, 0x78, 0x52, 0xb8, 0x55
+               };
+
+               memcpy(req->result, sha256_zero, SHA256_DIGEST_SIZE);
+               return 0;
+       }
+       s->state[0] = SHA256_H0;
+       s->state[1] = SHA256_H1;
+       s->state[2] = SHA256_H2;
+       s->state[3] = SHA256_H3;
+       s->state[4] = SHA256_H4;
+       s->state[5] = SHA256_H5;
+       s->state[6] = SHA256_H6;
+       s->state[7] = SHA256_H7;
+
+       return n2_hash_async_digest(req, AUTH_TYPE_SHA256,
+                                   SHA256_DIGEST_SIZE, SHA256_DIGEST_SIZE,
+                                   s->state);
+}
+
+static int n2_sha224_async_digest(struct ahash_request *req)
+{
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+       struct sha256_state *s = &ctx->u.sha256;
+
+       if (req->nbytes == 0) {
+               static const char sha224_zero[SHA224_DIGEST_SIZE] = {
+                       0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
+                       0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
+                       0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
+                       0x2f
+               };
+
+               memcpy(req->result, sha224_zero, SHA224_DIGEST_SIZE);
+               return 0;
+       }
+       s->state[0] = SHA224_H0;
+       s->state[1] = SHA224_H1;
+       s->state[2] = SHA224_H2;
+       s->state[3] = SHA224_H3;
+       s->state[4] = SHA224_H4;
+       s->state[5] = SHA224_H5;
+       s->state[6] = SHA224_H6;
+       s->state[7] = SHA224_H7;
+
+       return n2_hash_async_digest(req, AUTH_TYPE_SHA256,
+                                   SHA256_DIGEST_SIZE, SHA224_DIGEST_SIZE,
+                                   s->state);
+}
+
+struct n2_cipher_context {
+       int                     key_len;
+       int                     enc_type;
+       union {
+               u8              aes[AES_MAX_KEY_SIZE];
+               u8              des[DES_KEY_SIZE];
+               u8              des3[3 * DES_KEY_SIZE];
+               u8              arc4[258]; /* S-box, X, Y */
+       } key;
+};
+
+#define N2_CHUNK_ARR_LEN       16
+
+struct n2_crypto_chunk {
+       struct list_head        entry;
+       unsigned long           iv_paddr : 44;
+       unsigned long           arr_len : 20;
+       unsigned long           dest_paddr;
+       unsigned long           dest_final;
+       struct {
+               unsigned long   src_paddr : 44;
+               unsigned long   src_len : 20;
+       } arr[N2_CHUNK_ARR_LEN];
+};
+
+struct n2_request_context {
+       struct ablkcipher_walk  walk;
+       struct list_head        chunk_list;
+       struct n2_crypto_chunk  chunk;
+       u8                      temp_iv[16];
+};
+
+/* The SPU allows some level of flexibility for partial cipher blocks
+ * being specified in a descriptor.
+ *
+ * It merely requires that every descriptor's length field is at least
+ * as large as the cipher block size.  This means that a cipher block
+ * can span at most 2 descriptors.  However, this does not allow a
+ * partial block to span into the final descriptor as that would
+ * violate the rule (since every descriptor's length must be at lest
+ * the block size).  So, for example, assuming an 8 byte block size:
+ *
+ *     0xe --> 0xa --> 0x8
+ *
+ * is a valid length sequence, whereas:
+ *
+ *     0xe --> 0xb --> 0x7
+ *
+ * is not a valid sequence.
+ */
+
+struct n2_cipher_alg {
+       struct list_head        entry;
+       u8                      enc_type;
+       struct crypto_alg       alg;
+};
+
+static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm)
+{
+       struct crypto_alg *alg = tfm->__crt_alg;
+
+       return container_of(alg, struct n2_cipher_alg, alg);
+}
+
+struct n2_cipher_request_context {
+       struct ablkcipher_walk  walk;
+};
+
+static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+                        unsigned int keylen)
+{
+       struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+       struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+       struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+
+       ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
+
+       switch (keylen) {
+       case AES_KEYSIZE_128:
+               ctx->enc_type |= ENC_TYPE_ALG_AES128;
+               break;
+       case AES_KEYSIZE_192:
+               ctx->enc_type |= ENC_TYPE_ALG_AES192;
+               break;
+       case AES_KEYSIZE_256:
+               ctx->enc_type |= ENC_TYPE_ALG_AES256;
+               break;
+       default:
+               crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+               return -EINVAL;
+       }
+
+       ctx->key_len = keylen;
+       memcpy(ctx->key.aes, key, keylen);
+       return 0;
+}
+
+static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+                        unsigned int keylen)
+{
+       struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+       struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+       struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+       u32 tmp[DES_EXPKEY_WORDS];
+       int err;
+
+       ctx->enc_type = n2alg->enc_type;
+
+       if (keylen != DES_KEY_SIZE) {
+               crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+               return -EINVAL;
+       }
+
+       err = des_ekey(tmp, key);
+       if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+               tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+               return -EINVAL;
+       }
+
+       ctx->key_len = keylen;
+       memcpy(ctx->key.des, key, keylen);
+       return 0;
+}
+
+static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+                         unsigned int keylen)
+{
+       struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+       struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+       struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+
+       ctx->enc_type = n2alg->enc_type;
+
+       if (keylen != (3 * DES_KEY_SIZE)) {
+               crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+               return -EINVAL;
+       }
+       ctx->key_len = keylen;
+       memcpy(ctx->key.des3, key, keylen);
+       return 0;
+}
+
+static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+                         unsigned int keylen)
+{
+       struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+       struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+       struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+       u8 *s = ctx->key.arc4;
+       u8 *x = s + 256;
+       u8 *y = x + 1;
+       int i, j, k;
+
+       ctx->enc_type = n2alg->enc_type;
+
+       j = k = 0;
+       *x = 0;
+       *y = 0;
+       for (i = 0; i < 256; i++)
+               s[i] = i;
+       for (i = 0; i < 256; i++) {
+               u8 a = s[i];
+               j = (j + key[k] + a) & 0xff;
+               s[i] = s[j];
+               s[j] = a;
+               if (++k >= keylen)
+                       k = 0;
+       }
+
+       return 0;
+}
+
+static inline int cipher_descriptor_len(int nbytes, unsigned int block_size)
+{
+       int this_len = nbytes;
+
+       this_len -= (nbytes & (block_size - 1));
+       return this_len > (1 << 16) ? (1 << 16) : this_len;
+}
+
+static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp,
+                           struct spu_queue *qp, bool encrypt)
+{
+       struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+       struct cwq_initial_entry *ent;
+       bool in_place;
+       int i;
+
+       ent = spu_queue_alloc(qp, cp->arr_len);
+       if (!ent) {
+               pr_info("queue_alloc() of %d fails\n",
+                       cp->arr_len);
+               return -EBUSY;
+       }
+
+       in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
+
+       ent->control = control_word_base(cp->arr[0].src_len,
+                                        0, ctx->enc_type, 0, 0,
+                                        false, true, false, encrypt,
+                                        OPCODE_ENCRYPT |
+                                        (in_place ? OPCODE_INPLACE_BIT : 0));
+       ent->src_addr = cp->arr[0].src_paddr;
+       ent->auth_key_addr = 0UL;
+       ent->auth_iv_addr = 0UL;
+       ent->final_auth_state_addr = 0UL;
+       ent->enc_key_addr = __pa(&ctx->key);
+       ent->enc_iv_addr = cp->iv_paddr;
+       ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
+
+       for (i = 1; i < cp->arr_len; i++) {
+               ent = spu_queue_next(qp, ent);
+
+               ent->control = cp->arr[i].src_len - 1;
+               ent->src_addr = cp->arr[i].src_paddr;
+               ent->auth_key_addr = 0UL;
+               ent->auth_iv_addr = 0UL;
+               ent->final_auth_state_addr = 0UL;
+               ent->enc_key_addr = 0UL;
+               ent->enc_iv_addr = 0UL;
+               ent->dest_addr = 0UL;
+       }
+       ent->control |= CONTROL_END_OF_BLOCK;
+
+       return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
+}
+
+static int n2_compute_chunks(struct ablkcipher_request *req)
+{
+       struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+       struct ablkcipher_walk *walk = &rctx->walk;
+       struct n2_crypto_chunk *chunk;
+       unsigned long dest_prev;
+       unsigned int tot_len;
+       bool prev_in_place;
+       int err, nbytes;
+
+       ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes);
+       err = ablkcipher_walk_phys(req, walk);
+       if (err)
+               return err;
+
+       INIT_LIST_HEAD(&rctx->chunk_list);
+
+       chunk = &rctx->chunk;
+       INIT_LIST_HEAD(&chunk->entry);
+
+       chunk->iv_paddr = 0UL;
+       chunk->arr_len = 0;
+       chunk->dest_paddr = 0UL;
+
+       prev_in_place = false;
+       dest_prev = ~0UL;
+       tot_len = 0;
+
+       while ((nbytes = walk->nbytes) != 0) {
+               unsigned long dest_paddr, src_paddr;
+               bool in_place;
+               int this_len;
+
+               src_paddr = (page_to_phys(walk->src.page) +
+                            walk->src.offset);
+               dest_paddr = (page_to_phys(walk->dst.page) +
+                             walk->dst.offset);
+               in_place = (src_paddr == dest_paddr);
+               this_len = cipher_descriptor_len(nbytes, walk->blocksize);
+
+               if (chunk->arr_len != 0) {
+                       if (in_place != prev_in_place ||
+                           (!prev_in_place &&
+                            dest_paddr != dest_prev) ||
+                           chunk->arr_len == N2_CHUNK_ARR_LEN ||
+                           tot_len + this_len > (1 << 16)) {
+                               chunk->dest_final = dest_prev;
+                               list_add_tail(&chunk->entry,
+                                             &rctx->chunk_list);
+                               chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
+                               if (!chunk) {
+                                       err = -ENOMEM;
+                                       break;
+                               }
+                               INIT_LIST_HEAD(&chunk->entry);
+                       }
+               }
+               if (chunk->arr_len == 0) {
+                       chunk->dest_paddr = dest_paddr;
+                       tot_len = 0;
+               }
+               chunk->arr[chunk->arr_len].src_paddr = src_paddr;
+               chunk->arr[chunk->arr_len].src_len = this_len;
+               chunk->arr_len++;
+
+               dest_prev = dest_paddr + this_len;
+               prev_in_place = in_place;
+               tot_len += this_len;
+
+               err = ablkcipher_walk_done(req, walk, nbytes - this_len);
+               if (err)
+                       break;
+       }
+       if (!err && chunk->arr_len != 0) {
+               chunk->dest_final = dest_prev;
+               list_add_tail(&chunk->entry, &rctx->chunk_list);
+       }
+
+       return err;
+}
+
+static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
+{
+       struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+       struct n2_crypto_chunk *c, *tmp;
+
+       if (final_iv)
+               memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
+
+       ablkcipher_walk_complete(&rctx->walk);
+       list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
+               list_del(&c->entry);
+               if (unlikely(c != &rctx->chunk))
+                       kfree(c);
+       }
+
+}
+
+static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
+{
+       struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+       struct crypto_tfm *tfm = req->base.tfm;
+       int err = n2_compute_chunks(req);
+       struct n2_crypto_chunk *c, *tmp;
+       unsigned long flags, hv_ret;
+       struct spu_queue *qp;
+
+       if (err)
+               return err;
+
+       qp = cpu_to_cwq[get_cpu()];
+       err = -ENODEV;
+       if (!qp)
+               goto out;
+
+       spin_lock_irqsave(&qp->lock, flags);
+
+       list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
+               err = __n2_crypt_chunk(tfm, c, qp, encrypt);
+               if (err)
+                       break;
+               list_del(&c->entry);
+               if (unlikely(c != &rctx->chunk))
+                       kfree(c);
+       }
+       if (!err) {
+               hv_ret = wait_for_tail(qp);
+               if (hv_ret != HV_EOK)
+                       err = -EINVAL;
+       }
+
+       spin_unlock_irqrestore(&qp->lock, flags);
+
+       put_cpu();
+
+out:
+       n2_chunk_complete(req, NULL);
+       return err;
+}
+
+static int n2_encrypt_ecb(struct ablkcipher_request *req)
+{
+       return n2_do_ecb(req, true);
+}
+
+static int n2_decrypt_ecb(struct ablkcipher_request *req)
+{
+       return n2_do_ecb(req, false);
+}
+
+static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
+{
+       struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+       struct crypto_tfm *tfm = req->base.tfm;
+       unsigned long flags, hv_ret, iv_paddr;
+       int err = n2_compute_chunks(req);
+       struct n2_crypto_chunk *c, *tmp;
+       struct spu_queue *qp;
+       void *final_iv_addr;
+
+       final_iv_addr = NULL;
+
+       if (err)
+               return err;
+
+       qp = cpu_to_cwq[get_cpu()];
+       err = -ENODEV;
+       if (!qp)
+               goto out;
+
+       spin_lock_irqsave(&qp->lock, flags);
+
+       if (encrypt) {
+               iv_paddr = __pa(rctx->walk.iv);
+               list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
+                                        entry) {
+                       c->iv_paddr = iv_paddr;
+                       err = __n2_crypt_chunk(tfm, c, qp, true);
+                       if (err)
+                               break;
+                       iv_paddr = c->dest_final - rctx->walk.blocksize;
+                       list_del(&c->entry);
+                       if (unlikely(c != &rctx->chunk))
+                               kfree(c);
+               }
+               final_iv_addr = __va(iv_paddr);
+       } else {
+               list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
+                                                entry) {
+                       if (c == &rctx->chunk) {
+                               iv_paddr = __pa(rctx->walk.iv);
+                       } else {
+                               iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
+                                           tmp->arr[tmp->arr_len-1].src_len -
+                                           rctx->walk.blocksize);
+                       }
+                       if (!final_iv_addr) {
+                               unsigned long pa;
+
+                               pa = (c->arr[c->arr_len-1].src_paddr +
+                                     c->arr[c->arr_len-1].src_len -
+                                     rctx->walk.blocksize);
+                               final_iv_addr = rctx->temp_iv;
+                               memcpy(rctx->temp_iv, __va(pa),
+                                      rctx->walk.blocksize);
+                       }
+                       c->iv_paddr = iv_paddr;
+                       err = __n2_crypt_chunk(tfm, c, qp, false);
+                       if (err)
+                               break;
+                       list_del(&c->entry);
+                       if (unlikely(c != &rctx->chunk))
+                               kfree(c);
+               }
+       }
+       if (!err) {
+               hv_ret = wait_for_tail(qp);
+               if (hv_ret != HV_EOK)
+                       err = -EINVAL;
+       }
+
+       spin_unlock_irqrestore(&qp->lock, flags);
+
+       put_cpu();
+
+out:
+       n2_chunk_complete(req, err ? NULL : final_iv_addr);
+       return err;
+}
+
+static int n2_encrypt_chaining(struct ablkcipher_request *req)
+{
+       return n2_do_chaining(req, true);
+}
+
+static int n2_decrypt_chaining(struct ablkcipher_request *req)
+{
+       return n2_do_chaining(req, false);
+}
+
+struct n2_cipher_tmpl {
+       const char              *name;
+       const char              *drv_name;
+       u8                      block_size;
+       u8                      enc_type;
+       struct ablkcipher_alg   ablkcipher;
+};
+
+static const struct n2_cipher_tmpl cipher_tmpls[] = {
+       /* ARC4: only ECB is supported (chaining bits ignored) */
+       {       .name           = "ecb(arc4)",
+               .drv_name       = "ecb-arc4",
+               .block_size     = 1,
+               .enc_type       = (ENC_TYPE_ALG_RC4_STREAM |
+                                  ENC_TYPE_CHAINING_ECB),
+               .ablkcipher     = {
+                       .min_keysize    = 1,
+                       .max_keysize    = 256,
+                       .setkey         = n2_arc4_setkey,
+                       .encrypt        = n2_encrypt_ecb,
+                       .decrypt        = n2_decrypt_ecb,
+               },
+       },
+
+       /* DES: ECB CBC and CFB are supported */
+       {       .name           = "ecb(des)",
+               .drv_name       = "ecb-des",
+               .block_size     = DES_BLOCK_SIZE,
+               .enc_type       = (ENC_TYPE_ALG_DES |
+                                  ENC_TYPE_CHAINING_ECB),
+               .ablkcipher     = {
+                       .min_keysize    = DES_KEY_SIZE,
+                       .max_keysize    = DES_KEY_SIZE,
+                       .setkey         = n2_des_setkey,
+                       .encrypt        = n2_encrypt_ecb,
+                       .decrypt        = n2_decrypt_ecb,
+               },
+       },
+       {       .name           = "cbc(des)",
+               .drv_name       = "cbc-des",
+               .block_size     = DES_BLOCK_SIZE,
+               .enc_type       = (ENC_TYPE_ALG_DES |
+                                  ENC_TYPE_CHAINING_CBC),
+               .ablkcipher     = {
+                       .ivsize         = DES_BLOCK_SIZE,
+                       .min_keysize    = DES_KEY_SIZE,
+                       .max_keysize    = DES_KEY_SIZE,
+                       .setkey         = n2_des_setkey,
+                       .encrypt        = n2_encrypt_chaining,
+                       .decrypt        = n2_decrypt_chaining,
+               },
+       },
+       {       .name           = "cfb(des)",
+               .drv_name       = "cfb-des",
+               .block_size     = DES_BLOCK_SIZE,
+               .enc_type       = (ENC_TYPE_ALG_DES |
+                                  ENC_TYPE_CHAINING_CFB),
+               .ablkcipher     = {
+                       .min_keysize    = DES_KEY_SIZE,
+                       .max_keysize    = DES_KEY_SIZE,
+                       .setkey         = n2_des_setkey,
+                       .encrypt        = n2_encrypt_chaining,
+                       .decrypt        = n2_decrypt_chaining,
+               },
+       },
+
+       /* 3DES: ECB CBC and CFB are supported */
+       {       .name           = "ecb(des3_ede)",
+               .drv_name       = "ecb-3des",
+               .block_size     = DES_BLOCK_SIZE,
+               .enc_type       = (ENC_TYPE_ALG_3DES |
+                                  ENC_TYPE_CHAINING_ECB),
+               .ablkcipher     = {
+                       .min_keysize    = 3 * DES_KEY_SIZE,
+                       .max_keysize    = 3 * DES_KEY_SIZE,
+                       .setkey         = n2_3des_setkey,
+                       .encrypt        = n2_encrypt_ecb,
+                       .decrypt        = n2_decrypt_ecb,
+               },
+       },
+       {       .name           = "cbc(des3_ede)",
+               .drv_name       = "cbc-3des",
+               .block_size     = DES_BLOCK_SIZE,
+               .enc_type       = (ENC_TYPE_ALG_3DES |
+                                  ENC_TYPE_CHAINING_CBC),
+               .ablkcipher     = {
+                       .ivsize         = DES_BLOCK_SIZE,
+                       .min_keysize    = 3 * DES_KEY_SIZE,
+                       .max_keysize    = 3 * DES_KEY_SIZE,
+                       .setkey         = n2_3des_setkey,
+                       .encrypt        = n2_encrypt_chaining,
+                       .decrypt        = n2_decrypt_chaining,
+               },
+       },
+       {       .name           = "cfb(des3_ede)",
+               .drv_name       = "cfb-3des",
+               .block_size     = DES_BLOCK_SIZE,
+               .enc_type       = (ENC_TYPE_ALG_3DES |
+                                  ENC_TYPE_CHAINING_CFB),
+               .ablkcipher     = {
+                       .min_keysize    = 3 * DES_KEY_SIZE,
+                       .max_keysize    = 3 * DES_KEY_SIZE,
+                       .setkey         = n2_3des_setkey,
+                       .encrypt        = n2_encrypt_chaining,
+                       .decrypt        = n2_decrypt_chaining,
+               },
+       },
+       /* AES: ECB CBC and CTR are supported */
+       {       .name           = "ecb(aes)",
+               .drv_name       = "ecb-aes",
+               .block_size     = AES_BLOCK_SIZE,
+               .enc_type       = (ENC_TYPE_ALG_AES128 |
+                                  ENC_TYPE_CHAINING_ECB),
+               .ablkcipher     = {
+                       .min_keysize    = AES_MIN_KEY_SIZE,
+                       .max_keysize    = AES_MAX_KEY_SIZE,
+                       .setkey         = n2_aes_setkey,
+                       .encrypt        = n2_encrypt_ecb,
+                       .decrypt        = n2_decrypt_ecb,
+               },
+       },
+       {       .name           = "cbc(aes)",
+               .drv_name       = "cbc-aes",
+               .block_size     = AES_BLOCK_SIZE,
+               .enc_type       = (ENC_TYPE_ALG_AES128 |
+                                  ENC_TYPE_CHAINING_CBC),
+               .ablkcipher     = {
+                       .ivsize         = AES_BLOCK_SIZE,
+                       .min_keysize    = AES_MIN_KEY_SIZE,
+                       .max_keysize    = AES_MAX_KEY_SIZE,
+                       .setkey         = n2_aes_setkey,
+                       .encrypt        = n2_encrypt_chaining,
+                       .decrypt        = n2_decrypt_chaining,
+               },
+       },
+       {       .name           = "ctr(aes)",
+               .drv_name       = "ctr-aes",
+               .block_size     = AES_BLOCK_SIZE,
+               .enc_type       = (ENC_TYPE_ALG_AES128 |
+                                  ENC_TYPE_CHAINING_COUNTER),
+               .ablkcipher     = {
+                       .ivsize         = AES_BLOCK_SIZE,
+                       .min_keysize    = AES_MIN_KEY_SIZE,
+                       .max_keysize    = AES_MAX_KEY_SIZE,
+                       .setkey         = n2_aes_setkey,
+                       .encrypt        = n2_encrypt_chaining,
+                       .decrypt        = n2_encrypt_chaining,
+               },
+       },
+
+};
+#define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls)
+
+static LIST_HEAD(cipher_algs);
+
+struct n2_hash_tmpl {
+       const char      *name;
+       int             (*digest)(struct ahash_request *req);
+       u8              digest_size;
+       u8              block_size;
+};
+static const struct n2_hash_tmpl hash_tmpls[] = {
+       { .name         = "md5",
+         .digest       = n2_md5_async_digest,
+         .digest_size  = MD5_DIGEST_SIZE,
+         .block_size   = MD5_HMAC_BLOCK_SIZE },
+       { .name         = "sha1",
+         .digest       = n2_sha1_async_digest,
+         .digest_size  = SHA1_DIGEST_SIZE,
+         .block_size   = SHA1_BLOCK_SIZE },
+       { .name         = "sha256",
+         .digest       = n2_sha256_async_digest,
+         .digest_size  = SHA256_DIGEST_SIZE,
+         .block_size   = SHA256_BLOCK_SIZE },
+       { .name         = "sha224",
+         .digest       = n2_sha224_async_digest,
+         .digest_size  = SHA224_DIGEST_SIZE,
+         .block_size   = SHA224_BLOCK_SIZE },
+};
+#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
+
+struct n2_ahash_alg {
+       struct list_head        entry;
+       struct ahash_alg        alg;
+};
+static LIST_HEAD(ahash_algs);
+
+static int algs_registered;
+
+static void __n2_unregister_algs(void)
+{
+       struct n2_cipher_alg *cipher, *cipher_tmp;
+       struct n2_ahash_alg *alg, *alg_tmp;
+
+       list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) {
+               crypto_unregister_alg(&cipher->alg);
+               list_del(&cipher->entry);
+               kfree(cipher);
+       }
+       list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
+               crypto_unregister_ahash(&alg->alg);
+               list_del(&alg->entry);
+               kfree(alg);
+       }
+}
+
+static int n2_cipher_cra_init(struct crypto_tfm *tfm)
+{
+       tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context);
+       return 0;
+}
+
+static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
+{
+       struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
+       struct crypto_alg *alg;
+       int err;
+
+       if (!p)
+               return -ENOMEM;
+
+       alg = &p->alg;
+
+       snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
+       snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
+       alg->cra_priority = N2_CRA_PRIORITY;
+       alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
+       alg->cra_blocksize = tmpl->block_size;
+       p->enc_type = tmpl->enc_type;
+       alg->cra_ctxsize = sizeof(struct n2_cipher_context);
+       alg->cra_type = &crypto_ablkcipher_type;
+       alg->cra_u.ablkcipher = tmpl->ablkcipher;
+       alg->cra_init = n2_cipher_cra_init;
+       alg->cra_module = THIS_MODULE;
+
+       list_add(&p->entry, &cipher_algs);
+       err = crypto_register_alg(alg);
+       if (err) {
+               list_del(&p->entry);
+               kfree(p);
+       }
+       return err;
+}
+
+static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
+{
+       struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
+       struct hash_alg_common *halg;
+       struct crypto_alg *base;
+       struct ahash_alg *ahash;
+       int err;
+
+       if (!p)
+               return -ENOMEM;
+
+       ahash = &p->alg;
+       ahash->init = n2_hash_async_init;
+       ahash->update = n2_hash_async_update;
+       ahash->final = n2_hash_async_final;
+       ahash->finup = n2_hash_async_finup;
+       ahash->digest = tmpl->digest;
+
+       halg = &ahash->halg;
+       halg->digestsize = tmpl->digest_size;
+
+       base = &halg->base;
+       snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
+       snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
+       base->cra_priority = N2_CRA_PRIORITY;
+       base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK;
+       base->cra_blocksize = tmpl->block_size;
+       base->cra_ctxsize = sizeof(struct n2_hash_ctx);
+       base->cra_module = THIS_MODULE;
+       base->cra_init = n2_hash_cra_init;
+       base->cra_exit = n2_hash_cra_exit;
+
+       list_add(&p->entry, &ahash_algs);
+       err = crypto_register_ahash(ahash);
+       if (err) {
+               list_del(&p->entry);
+               kfree(p);
+       }
+       return err;
+}
+
+static int __devinit n2_register_algs(void)
+{
+       int i, err = 0;
+
+       mutex_lock(&spu_lock);
+       if (algs_registered++)
+               goto out;
+
+       for (i = 0; i < NUM_HASH_TMPLS; i++) {
+               err = __n2_register_one_ahash(&hash_tmpls[i]);
+               if (err) {
+                       __n2_unregister_algs();
+                       goto out;
+               }
+       }
+       for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
+               err = __n2_register_one_cipher(&cipher_tmpls[i]);
+               if (err) {
+                       __n2_unregister_algs();
+                       goto out;
+               }
+       }
+
+out:
+       mutex_unlock(&spu_lock);
+       return err;
+}
+
+static void __exit n2_unregister_algs(void)
+{
+       mutex_lock(&spu_lock);
+       if (!--algs_registered)
+               __n2_unregister_algs();
+       mutex_unlock(&spu_lock);
+}
+
+/* To map CWQ queues to interrupt sources, the hypervisor API provides
+ * a devino.  This isn't very useful to us because all of the
+ * interrupts listed in the of_device node have been translated to
+ * Linux virtual IRQ cookie numbers.
+ *
+ * So we have to back-translate, going through the 'intr' and 'ino'
+ * property tables of the n2cp MDESC node, matching it with the OF
+ * 'interrupts' property entries, in order to to figure out which
+ * devino goes to which already-translated IRQ.
+ */
+static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip,
+                            unsigned long dev_ino)
+{
+       const unsigned int *dev_intrs;
+       unsigned int intr;
+       int i;
+
+       for (i = 0; i < ip->num_intrs; i++) {
+               if (ip->ino_table[i].ino == dev_ino)
+                       break;
+       }
+       if (i == ip->num_intrs)
+               return -ENODEV;
+
+       intr = ip->ino_table[i].intr;
+
+       dev_intrs = of_get_property(dev->node, "interrupts", NULL);
+       if (!dev_intrs)
+               return -ENODEV;
+
+       for (i = 0; i < dev->num_irqs; i++) {
+               if (dev_intrs[i] == intr)
+                       return i;
+       }
+
+       return -ENODEV;
+}
+
+static int spu_map_ino(struct of_device *dev, struct spu_mdesc_info *ip,
+                      const char *irq_name, struct spu_queue *p,
+                      irq_handler_t handler)
+{
+       unsigned long herr;
+       int index;
+
+       herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
+       if (herr)
+               return -EINVAL;
+
+       index = find_devino_index(dev, ip, p->devino);
+       if (index < 0)
+               return index;
+
+       p->irq = dev->irqs[index];
+
+       sprintf(p->irq_name, "%s-%d", irq_name, index);
+
+       return request_irq(p->irq, handler, IRQF_SAMPLE_RANDOM,
+                          p->irq_name, p);
+}
+
+static struct kmem_cache *queue_cache[2];
+
+static void *new_queue(unsigned long q_type)
+{
+       return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
+}
+
+static void free_queue(void *p, unsigned long q_type)
+{
+       return kmem_cache_free(queue_cache[q_type - 1], p);
+}
+
+static int queue_cache_init(void)
+{
+       if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
+               queue_cache[HV_NCS_QTYPE_MAU - 1] =
+                       kmem_cache_create("cwq_queue",
+                                         (MAU_NUM_ENTRIES *
+                                          MAU_ENTRY_SIZE),
+                                         MAU_ENTRY_SIZE, 0, NULL);
+       if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
+               return -ENOMEM;
+
+       if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
+               queue_cache[HV_NCS_QTYPE_CWQ - 1] =
+                       kmem_cache_create("cwq_queue",
+                                         (CWQ_NUM_ENTRIES *
+                                          CWQ_ENTRY_SIZE),
+                                         CWQ_ENTRY_SIZE, 0, NULL);
+       if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
+               kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+static void queue_cache_destroy(void)
+{
+       kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
+       kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
+}
+
+static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
+{
+       cpumask_var_t old_allowed;
+       unsigned long hv_ret;
+
+       if (cpumask_empty(&p->sharing))
+               return -EINVAL;
+
+       if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
+               return -ENOMEM;
+
+       cpumask_copy(old_allowed, &current->cpus_allowed);
+
+       set_cpus_allowed_ptr(current, &p->sharing);
+
+       hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
+                                CWQ_NUM_ENTRIES, &p->qhandle);
+       if (!hv_ret)
+               sun4v_ncs_sethead_marker(p->qhandle, 0);
+
+       set_cpus_allowed_ptr(current, old_allowed);
+
+       free_cpumask_var(old_allowed);
+
+       return (hv_ret ? -EINVAL : 0);
+}
+
+static int spu_queue_setup(struct spu_queue *p)
+{
+       int err;
+
+       p->q = new_queue(p->q_type);
+       if (!p->q)
+               return -ENOMEM;
+
+       err = spu_queue_register(p, p->q_type);
+       if (err) {
+               free_queue(p->q, p->q_type);
+               p->q = NULL;
+       }
+
+       return err;
+}
+
+static void spu_queue_destroy(struct spu_queue *p)
+{
+       unsigned long hv_ret;
+
+       if (!p->q)
+               return;
+
+       hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
+
+       if (!hv_ret)
+               free_queue(p->q, p->q_type);
+}
+
+static void spu_list_destroy(struct list_head *list)
+{
+       struct spu_queue *p, *n;
+
+       list_for_each_entry_safe(p, n, list, list) {
+               int i;
+
+               for (i = 0; i < NR_CPUS; i++) {
+                       if (cpu_to_cwq[i] == p)
+                               cpu_to_cwq[i] = NULL;
+               }
+
+               if (p->irq) {
+                       free_irq(p->irq, p);
+                       p->irq = 0;
+               }
+               spu_queue_destroy(p);
+               list_del(&p->list);
+               kfree(p);
+       }
+}
+
+/* Walk the backward arcs of a CWQ 'exec-unit' node,
+ * gathering cpu membership information.
+ */
+static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
+                              struct of_device *dev,
+                              u64 node, struct spu_queue *p,
+                              struct spu_queue **table)
+{
+       u64 arc;
+
+       mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
+               u64 tgt = mdesc_arc_target(mdesc, arc);
+               const char *name = mdesc_node_name(mdesc, tgt);
+               const u64 *id;
+
+               if (strcmp(name, "cpu"))
+                       continue;
+               id = mdesc_get_property(mdesc, tgt, "id", NULL);
+               if (table[*id] != NULL) {
+                       dev_err(&dev->dev, "%s: SPU cpu slot already set.\n",
+                               dev->node->full_name);
+                       return -EINVAL;
+               }
+               cpu_set(*id, p->sharing);
+               table[*id] = p;
+       }
+       return 0;
+}
+
+/* Process an 'exec-unit' MDESC node of type 'cwq'.  */
+static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
+                           struct of_device *dev, struct mdesc_handle *mdesc,
+                           u64 node, const char *iname, unsigned long q_type,
+                           irq_handler_t handler, struct spu_queue **table)
+{
+       struct spu_queue *p;
+       int err;
+
+       p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
+       if (!p) {
+               dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n",
+                       dev->node->full_name);
+               return -ENOMEM;
+       }
+
+       cpus_clear(p->sharing);
+       spin_lock_init(&p->lock);
+       p->q_type = q_type;
+       INIT_LIST_HEAD(&p->jobs);
+       list_add(&p->list, list);
+
+       err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
+       if (err)
+               return err;
+
+       err = spu_queue_setup(p);
+       if (err)
+               return err;
+
+       return spu_map_ino(dev, ip, iname, p, handler);
+}
+
+static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct of_device *dev,
+                         struct spu_mdesc_info *ip, struct list_head *list,
+                         const char *exec_name, unsigned long q_type,
+                         irq_handler_t handler, struct spu_queue **table)
+{
+       int err = 0;
+       u64 node;
+
+       mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
+               const char *type;
+
+               type = mdesc_get_property(mdesc, node, "type", NULL);
+               if (!type || strcmp(type, exec_name))
+                       continue;
+
+               err = handle_exec_unit(ip, list, dev, mdesc, node,
+                                      exec_name, q_type, handler, table);
+               if (err) {
+                       spu_list_destroy(list);
+                       break;
+               }
+       }
+
+       return err;
+}
+
+static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
+                                  struct spu_mdesc_info *ip)
+{
+       const u64 *intr, *ino;
+       int intr_len, ino_len;
+       int i;
+
+       intr = mdesc_get_property(mdesc, node, "intr", &intr_len);
+       if (!intr)
+               return -ENODEV;
+
+       ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
+       if (!intr)
+               return -ENODEV;
+
+       if (intr_len != ino_len)
+               return -EINVAL;
+
+       ip->num_intrs = intr_len / sizeof(u64);
+       ip->ino_table = kzalloc((sizeof(struct ino_blob) *
+                                ip->num_intrs),
+                               GFP_KERNEL);
+       if (!ip->ino_table)
+               return -ENOMEM;
+
+       for (i = 0; i < ip->num_intrs; i++) {
+               struct ino_blob *b = &ip->ino_table[i];
+               b->intr = intr[i];
+               b->ino = ino[i];
+       }
+
+       return 0;
+}
+
+static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc,
+                                         struct of_device *dev,
+                                         struct spu_mdesc_info *ip,
+                                         const char *node_name)
+{
+       const unsigned int *reg;
+       u64 node;
+
+       reg = of_get_property(dev->node, "reg", NULL);
+       if (!reg)
+               return -ENODEV;
+
+       mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
+               const char *name;
+               const u64 *chdl;
+
+               name = mdesc_get_property(mdesc, node, "name", NULL);
+               if (!name || strcmp(name, node_name))
+                       continue;
+               chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
+               if (!chdl || (*chdl != *reg))
+                       continue;
+               ip->cfg_handle = *chdl;
+               return get_irq_props(mdesc, node, ip);
+       }
+
+       return -ENODEV;
+}
+
+static unsigned long n2_spu_hvapi_major;
+static unsigned long n2_spu_hvapi_minor;
+
+static int __devinit n2_spu_hvapi_register(void)
+{
+       int err;
+
+       n2_spu_hvapi_major = 2;
+       n2_spu_hvapi_minor = 0;
+
+       err = sun4v_hvapi_register(HV_GRP_NCS,
+                                  n2_spu_hvapi_major,
+                                  &n2_spu_hvapi_minor);
+
+       if (!err)
+               pr_info("Registered NCS HVAPI version %lu.%lu\n",
+                       n2_spu_hvapi_major,
+                       n2_spu_hvapi_minor);
+
+       return err;
+}
+
+static void n2_spu_hvapi_unregister(void)
+{
+       sun4v_hvapi_unregister(HV_GRP_NCS);
+}
+
+static int global_ref;
+
+static int __devinit grab_global_resources(void)
+{
+       int err = 0;
+
+       mutex_lock(&spu_lock);
+
+       if (global_ref++)
+               goto out;
+
+       err = n2_spu_hvapi_register();
+       if (err)
+               goto out;
+
+       err = queue_cache_init();
+       if (err)
+               goto out_hvapi_release;
+
+       err = -ENOMEM;
+       cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
+                            GFP_KERNEL);
+       if (!cpu_to_cwq)
+               goto out_queue_cache_destroy;
+
+       cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
+                            GFP_KERNEL);
+       if (!cpu_to_mau)
+               goto out_free_cwq_table;
+
+       err = 0;
+
+out:
+       if (err)
+               global_ref--;
+       mutex_unlock(&spu_lock);
+       return err;
+
+out_free_cwq_table:
+       kfree(cpu_to_cwq);
+       cpu_to_cwq = NULL;
+
+out_queue_cache_destroy:
+       queue_cache_destroy();
+
+out_hvapi_release:
+       n2_spu_hvapi_unregister();
+       goto out;
+}
+
+static void release_global_resources(void)
+{
+       mutex_lock(&spu_lock);
+       if (!--global_ref) {
+               kfree(cpu_to_cwq);
+               cpu_to_cwq = NULL;
+
+               kfree(cpu_to_mau);
+               cpu_to_mau = NULL;
+
+               queue_cache_destroy();
+               n2_spu_hvapi_unregister();
+       }
+       mutex_unlock(&spu_lock);
+}
+
+static struct n2_crypto * __devinit alloc_n2cp(void)
+{
+       struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
+
+       if (np)
+               INIT_LIST_HEAD(&np->cwq_list);
+
+       return np;
+}
+
+static void free_n2cp(struct n2_crypto *np)
+{
+       if (np->cwq_info.ino_table) {
+               kfree(np->cwq_info.ino_table);
+               np->cwq_info.ino_table = NULL;
+       }
+
+       kfree(np);
+}
+
+static void __devinit n2_spu_driver_version(void)
+{
+       static int n2_spu_version_printed;
+
+       if (n2_spu_version_printed++ == 0)
+               pr_info("%s", version);
+}
+
+static int __devinit n2_crypto_probe(struct of_device *dev,
+                                    const struct of_device_id *match)
+{
+       struct mdesc_handle *mdesc;
+       const char *full_name;
+       struct n2_crypto *np;
+       int err;
+
+       n2_spu_driver_version();
+
+       full_name = dev->node->full_name;
+       pr_info("Found N2CP at %s\n", full_name);
+
+       np = alloc_n2cp();
+       if (!np) {
+               dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n",
+                       full_name);
+               return -ENOMEM;
+       }
+
+       err = grab_global_resources();
+       if (err) {
+               dev_err(&dev->dev, "%s: Unable to grab "
+                       "global resources.\n", full_name);
+               goto out_free_n2cp;
+       }
+
+       mdesc = mdesc_grab();
+
+       if (!mdesc) {
+               dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
+                       full_name);
+               err = -ENODEV;
+               goto out_free_global;
+       }
+       err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
+       if (err) {
+               dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
+                       full_name);
+               mdesc_release(mdesc);
+               goto out_free_global;
+       }
+
+       err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
+                            "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
+                            cpu_to_cwq);
+       mdesc_release(mdesc);
+
+       if (err) {
+               dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n",
+                       full_name);
+               goto out_free_global;
+       }
+
+       err = n2_register_algs();
+       if (err) {
+               dev_err(&dev->dev, "%s: Unable to register algorithms.\n",
+                       full_name);
+               goto out_free_spu_list;
+       }
+
+       dev_set_drvdata(&dev->dev, np);
+
+       return 0;
+
+out_free_spu_list:
+       spu_list_destroy(&np->cwq_list);
+
+out_free_global:
+       release_global_resources();
+
+out_free_n2cp:
+       free_n2cp(np);
+
+       return err;
+}
+
+static int __devexit n2_crypto_remove(struct of_device *dev)
+{
+       struct n2_crypto *np = dev_get_drvdata(&dev->dev);
+
+       n2_unregister_algs();
+
+       spu_list_destroy(&np->cwq_list);
+
+       release_global_resources();
+
+       free_n2cp(np);
+
+       return 0;
+}
+
+static struct n2_mau * __devinit alloc_ncp(void)
+{
+       struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
+
+       if (mp)
+               INIT_LIST_HEAD(&mp->mau_list);
+
+       return mp;
+}
+
+static void free_ncp(struct n2_mau *mp)
+{
+       if (mp->mau_info.ino_table) {
+               kfree(mp->mau_info.ino_table);
+               mp->mau_info.ino_table = NULL;
+       }
+
+       kfree(mp);
+}
+
+static int __devinit n2_mau_probe(struct of_device *dev,
+                                    const struct of_device_id *match)
+{
+       struct mdesc_handle *mdesc;
+       const char *full_name;
+       struct n2_mau *mp;
+       int err;
+
+       n2_spu_driver_version();
+
+       full_name = dev->node->full_name;
+       pr_info("Found NCP at %s\n", full_name);
+
+       mp = alloc_ncp();
+       if (!mp) {
+               dev_err(&dev->dev, "%s: Unable to allocate ncp.\n",
+                       full_name);
+               return -ENOMEM;
+       }
+
+       err = grab_global_resources();
+       if (err) {
+               dev_err(&dev->dev, "%s: Unable to grab "
+                       "global resources.\n", full_name);
+               goto out_free_ncp;
+       }
+
+       mdesc = mdesc_grab();
+
+       if (!mdesc) {
+               dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
+                       full_name);
+               err = -ENODEV;
+               goto out_free_global;
+       }
+
+       err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
+       if (err) {
+               dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
+                       full_name);
+               mdesc_release(mdesc);
+               goto out_free_global;
+       }
+
+       err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
+                            "mau", HV_NCS_QTYPE_MAU, mau_intr,
+                            cpu_to_mau);
+       mdesc_release(mdesc);
+
+       if (err) {
+               dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n",
+                       full_name);
+               goto out_free_global;
+       }
+
+       dev_set_drvdata(&dev->dev, mp);
+
+       return 0;
+
+out_free_global:
+       release_global_resources();
+
+out_free_ncp:
+       free_ncp(mp);
+
+       return err;
+}
+
+static int __devexit n2_mau_remove(struct of_device *dev)
+{
+       struct n2_mau *mp = dev_get_drvdata(&dev->dev);
+
+       spu_list_destroy(&mp->mau_list);
+
+       release_global_resources();
+
+       free_ncp(mp);
+
+       return 0;
+}
+
+static struct of_device_id n2_crypto_match[] = {
+       {
+               .name = "n2cp",
+               .compatible = "SUNW,n2-cwq",
+       },
+       {
+               .name = "n2cp",
+               .compatible = "SUNW,vf-cwq",
+       },
+       {},
+};
+
+MODULE_DEVICE_TABLE(of, n2_crypto_match);
+
+static struct of_platform_driver n2_crypto_driver = {
+       .name           =       "n2cp",
+       .match_table    =       n2_crypto_match,
+       .probe          =       n2_crypto_probe,
+       .remove         =       __devexit_p(n2_crypto_remove),
+};
+
+static struct of_device_id n2_mau_match[] = {
+       {
+               .name = "ncp",
+               .compatible = "SUNW,n2-mau",
+       },
+       {
+               .name = "ncp",
+               .compatible = "SUNW,vf-mau",
+       },
+       {},
+};
+
+MODULE_DEVICE_TABLE(of, n2_mau_match);
+
+static struct of_platform_driver n2_mau_driver = {
+       .name           =       "ncp",
+       .match_table    =       n2_mau_match,
+       .probe          =       n2_mau_probe,
+       .remove         =       __devexit_p(n2_mau_remove),
+};
+
+static int __init n2_init(void)
+{
+       int err = of_register_driver(&n2_crypto_driver, &of_bus_type);
+
+       if (!err) {
+               err = of_register_driver(&n2_mau_driver, &of_bus_type);
+               if (err)
+                       of_unregister_driver(&n2_crypto_driver);
+       }
+       return err;
+}
+
+static void __exit n2_exit(void)
+{
+       of_unregister_driver(&n2_mau_driver);
+       of_unregister_driver(&n2_crypto_driver);
+}
+
+module_init(n2_init);
+module_exit(n2_exit);
diff --git a/drivers/crypto/n2_core.h b/drivers/crypto/n2_core.h
new file mode 100644 (file)
index 0000000..4bcbbea
--- /dev/null
@@ -0,0 +1,231 @@
+#ifndef _N2_CORE_H
+#define _N2_CORE_H
+
+#ifndef __ASSEMBLY__
+
+struct ino_blob {
+       u64                     intr;
+       u64                     ino;
+};
+
+struct spu_mdesc_info {
+       u64                     cfg_handle;
+       struct ino_blob         *ino_table;
+       int                     num_intrs;
+};
+
+struct n2_crypto {
+       struct spu_mdesc_info   cwq_info;
+       struct list_head        cwq_list;
+};
+
+struct n2_mau {
+       struct spu_mdesc_info   mau_info;
+       struct list_head        mau_list;
+};
+
+#define CWQ_ENTRY_SIZE         64
+#define CWQ_NUM_ENTRIES                64
+
+#define MAU_ENTRY_SIZE         64
+#define MAU_NUM_ENTRIES                64
+
+struct cwq_initial_entry {
+       u64                     control;
+       u64                     src_addr;
+       u64                     auth_key_addr;
+       u64                     auth_iv_addr;
+       u64                     final_auth_state_addr;
+       u64                     enc_key_addr;
+       u64                     enc_iv_addr;
+       u64                     dest_addr;
+};
+
+struct cwq_ext_entry {
+       u64                     len;
+       u64                     src_addr;
+       u64                     resv1;
+       u64                     resv2;
+       u64                     resv3;
+       u64                     resv4;
+       u64                     resv5;
+       u64                     resv6;
+};
+
+struct cwq_final_entry {
+       u64                     control;
+       u64                     src_addr;
+       u64                     resv1;
+       u64                     resv2;
+       u64                     resv3;
+       u64                     resv4;
+       u64                     resv5;
+       u64                     resv6;
+};
+
+#define CONTROL_LEN                    0x000000000000ffffULL
+#define CONTROL_LEN_SHIFT              0
+#define CONTROL_HMAC_KEY_LEN           0x0000000000ff0000ULL
+#define CONTROL_HMAC_KEY_LEN_SHIFT     16
+#define CONTROL_ENC_TYPE               0x00000000ff000000ULL
+#define CONTROL_ENC_TYPE_SHIFT         24
+#define  ENC_TYPE_ALG_RC4_STREAM       0x00ULL
+#define  ENC_TYPE_ALG_RC4_NOSTREAM     0x04ULL
+#define  ENC_TYPE_ALG_DES              0x08ULL
+#define  ENC_TYPE_ALG_3DES             0x0cULL
+#define  ENC_TYPE_ALG_AES128           0x10ULL
+#define  ENC_TYPE_ALG_AES192           0x14ULL
+#define  ENC_TYPE_ALG_AES256           0x18ULL
+#define  ENC_TYPE_ALG_RESERVED         0x1cULL
+#define  ENC_TYPE_ALG_MASK             0x1cULL
+#define  ENC_TYPE_CHAINING_ECB         0x00ULL
+#define  ENC_TYPE_CHAINING_CBC         0x01ULL
+#define  ENC_TYPE_CHAINING_CFB         0x02ULL
+#define  ENC_TYPE_CHAINING_COUNTER     0x03ULL
+#define  ENC_TYPE_CHAINING_MASK                0x03ULL
+#define CONTROL_AUTH_TYPE              0x0000001f00000000ULL
+#define CONTROL_AUTH_TYPE_SHIFT                32
+#define  AUTH_TYPE_RESERVED            0x00ULL
+#define  AUTH_TYPE_MD5                 0x01ULL
+#define  AUTH_TYPE_SHA1                        0x02ULL
+#define  AUTH_TYPE_SHA256              0x03ULL
+#define  AUTH_TYPE_CRC32               0x04ULL
+#define  AUTH_TYPE_HMAC_MD5            0x05ULL
+#define  AUTH_TYPE_HMAC_SHA1           0x06ULL
+#define  AUTH_TYPE_HMAC_SHA256         0x07ULL
+#define  AUTH_TYPE_TCP_CHECKSUM                0x08ULL
+#define  AUTH_TYPE_SSL_HMAC_MD5                0x09ULL
+#define  AUTH_TYPE_SSL_HMAC_SHA1       0x0aULL
+#define  AUTH_TYPE_SSL_HMAC_SHA256     0x0bULL
+#define CONTROL_STRAND                 0x000000e000000000ULL
+#define CONTROL_STRAND_SHIFT           37
+#define CONTROL_HASH_LEN               0x0000ff0000000000ULL
+#define CONTROL_HASH_LEN_SHIFT         40
+#define CONTROL_INTERRUPT              0x0001000000000000ULL
+#define CONTROL_STORE_FINAL_AUTH_STATE 0x0002000000000000ULL
+#define CONTROL_RESERVED               0x001c000000000000ULL
+#define CONTROL_HV_DONE                        0x0004000000000000ULL
+#define CONTROL_HV_PROTOCOL_ERROR      0x0008000000000000ULL
+#define CONTROL_HV_HARDWARE_ERROR      0x0010000000000000ULL
+#define CONTROL_END_OF_BLOCK           0x0020000000000000ULL
+#define CONTROL_START_OF_BLOCK         0x0040000000000000ULL
+#define CONTROL_ENCRYPT                        0x0080000000000000ULL
+#define CONTROL_OPCODE                 0xff00000000000000ULL
+#define CONTROL_OPCODE_SHIFT           56
+#define  OPCODE_INPLACE_BIT            0x80ULL
+#define  OPCODE_SSL_KEYBLOCK           0x10ULL
+#define  OPCODE_COPY                   0x20ULL
+#define  OPCODE_ENCRYPT                        0x40ULL
+#define  OPCODE_AUTH_MAC               0x41ULL
+
+#endif /* !(__ASSEMBLY__) */
+
+/* NCS v2.0 hypervisor interfaces */
+#define HV_NCS_QTYPE_MAU               0x01
+#define HV_NCS_QTYPE_CWQ               0x02
+
+/* ncs_qconf()
+ * TRAP:       HV_FAST_TRAP
+ * FUNCTION:   HV_FAST_NCS_QCONF
+ * ARG0:       Queue type (HV_NCS_QTYPE_{MAU,CWQ})
+ * ARG1:       Real address of queue, or handle for unconfigure
+ * ARG2:       Number of entries in queue, zero for unconfigure
+ * RET0:       status
+ * RET1:       queue handle
+ *
+ * Configure a queue in the stream processing unit.
+ *
+ * The real address given as the base must be 64-byte
+ * aligned.
+ *
+ * The queue size can range from a minimum of 2 to a maximum
+ * of 64.  The queue size must be a power of two.
+ *
+ * To unconfigure a queue, specify a length of zero and place
+ * the queue handle into ARG1.
+ *
+ * On configure success the hypervisor will set the FIRST, HEAD,
+ * and TAIL registers to the address of the first entry in the
+ * queue.  The LAST register will be set to point to the last
+ * entry in the queue.
+ */
+#define HV_FAST_NCS_QCONF              0x111
+
+/* ncs_qinfo()
+ * TRAP:       HV_FAST_TRAP
+ * FUNCTION:   HV_FAST_NCS_QINFO
+ * ARG0:       Queue handle
+ * RET0:       status
+ * RET1:       Queue type (HV_NCS_QTYPE_{MAU,CWQ})
+ * RET2:       Queue base address
+ * RET3:       Number of entries
+ */
+#define HV_FAST_NCS_QINFO              0x112
+
+/* ncs_gethead()
+ * TRAP:       HV_FAST_TRAP
+ * FUNCTION:   HV_FAST_NCS_GETHEAD
+ * ARG0:       Queue handle
+ * RET0:       status
+ * RET1:       queue head offset
+ */
+#define HV_FAST_NCS_GETHEAD            0x113
+
+/* ncs_gettail()
+ * TRAP:       HV_FAST_TRAP
+ * FUNCTION:   HV_FAST_NCS_GETTAIL
+ * ARG0:       Queue handle
+ * RET0:       status
+ * RET1:       queue tail offset
+ */
+#define HV_FAST_NCS_GETTAIL            0x114
+
+/* ncs_settail()
+ * TRAP:       HV_FAST_TRAP
+ * FUNCTION:   HV_FAST_NCS_SETTAIL
+ * ARG0:       Queue handle
+ * ARG1:       New tail offset
+ * RET0:       status
+ */
+#define HV_FAST_NCS_SETTAIL            0x115
+
+/* ncs_qhandle_to_devino()
+ * TRAP:       HV_FAST_TRAP
+ * FUNCTION:   HV_FAST_NCS_QHANDLE_TO_DEVINO
+ * ARG0:       Queue handle
+ * RET0:       status
+ * RET1:       devino
+ */
+#define HV_FAST_NCS_QHANDLE_TO_DEVINO  0x116
+
+/* ncs_sethead_marker()
+ * TRAP:       HV_FAST_TRAP
+ * FUNCTION:   HV_FAST_NCS_SETHEAD_MARKER
+ * ARG0:       Queue handle
+ * ARG1:       New head offset
+ * RET0:       status
+ */
+#define HV_FAST_NCS_SETHEAD_MARKER     0x117
+
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_ncs_qconf(unsigned long queue_type,
+                                    unsigned long queue_ra,
+                                    unsigned long num_entries,
+                                    unsigned long *qhandle);
+extern unsigned long sun4v_ncs_qinfo(unsigned long qhandle,
+                                    unsigned long *queue_type,
+                                    unsigned long *queue_ra,
+                                    unsigned long *num_entries);
+extern unsigned long sun4v_ncs_gethead(unsigned long qhandle,
+                                      unsigned long *head);
+extern unsigned long sun4v_ncs_gettail(unsigned long qhandle,
+                                      unsigned long *tail);
+extern unsigned long sun4v_ncs_settail(unsigned long qhandle,
+                                      unsigned long tail);
+extern unsigned long sun4v_ncs_qhandle_to_devino(unsigned long qhandle,
+                                                unsigned long *devino);
+extern unsigned long sun4v_ncs_sethead_marker(unsigned long qhandle,
+                                             unsigned long head);
+#endif /* !(__ASSEMBLY__) */
+
+#endif /* _N2_CORE_H */
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
new file mode 100644 (file)
index 0000000..8b03433
--- /dev/null
@@ -0,0 +1,1259 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for OMAP SHA1/MD5 HW acceleration.
+ *
+ * Copyright (c) 2010 Nokia Corporation
+ * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Some ideas are from old omap-sha1-md5.c driver.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/version.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+
+#include <plat/cpu.h>
+#include <plat/dma.h>
+#include <mach/irqs.h>
+
+#define SHA_REG_DIGEST(x)              (0x00 + ((x) * 0x04))
+#define SHA_REG_DIN(x)                 (0x1C + ((x) * 0x04))
+
+#define SHA1_MD5_BLOCK_SIZE            SHA1_BLOCK_SIZE
+#define MD5_DIGEST_SIZE                        16
+
+#define SHA_REG_DIGCNT                 0x14
+
+#define SHA_REG_CTRL                   0x18
+#define SHA_REG_CTRL_LENGTH            (0xFFFFFFFF << 5)
+#define SHA_REG_CTRL_CLOSE_HASH                (1 << 4)
+#define SHA_REG_CTRL_ALGO_CONST                (1 << 3)
+#define SHA_REG_CTRL_ALGO              (1 << 2)
+#define SHA_REG_CTRL_INPUT_READY       (1 << 1)
+#define SHA_REG_CTRL_OUTPUT_READY      (1 << 0)
+
+#define SHA_REG_REV                    0x5C
+#define SHA_REG_REV_MAJOR              0xF0
+#define SHA_REG_REV_MINOR              0x0F
+
+#define SHA_REG_MASK                   0x60
+#define SHA_REG_MASK_DMA_EN            (1 << 3)
+#define SHA_REG_MASK_IT_EN             (1 << 2)
+#define SHA_REG_MASK_SOFTRESET         (1 << 1)
+#define SHA_REG_AUTOIDLE               (1 << 0)
+
+#define SHA_REG_SYSSTATUS              0x64
+#define SHA_REG_SYSSTATUS_RESETDONE    (1 << 0)
+
+#define DEFAULT_TIMEOUT_INTERVAL       HZ
+
+#define FLAGS_FIRST            0x0001
+#define FLAGS_FINUP            0x0002
+#define FLAGS_FINAL            0x0004
+#define FLAGS_FAST             0x0008
+#define FLAGS_SHA1             0x0010
+#define FLAGS_DMA_ACTIVE       0x0020
+#define FLAGS_OUTPUT_READY     0x0040
+#define FLAGS_CLEAN            0x0080
+#define FLAGS_INIT             0x0100
+#define FLAGS_CPU              0x0200
+#define FLAGS_HMAC             0x0400
+
+/* 3rd byte */
+#define FLAGS_BUSY             16
+
+#define OP_UPDATE      1
+#define OP_FINAL       2
+
+struct omap_sham_dev;
+
+struct omap_sham_reqctx {
+       struct omap_sham_dev    *dd;
+       unsigned long           flags;
+       unsigned long           op;
+
+       size_t                  digcnt;
+       u8                      *buffer;
+       size_t                  bufcnt;
+       size_t                  buflen;
+       dma_addr_t              dma_addr;
+
+       /* walk state */
+       struct scatterlist      *sg;
+       unsigned int            offset; /* offset in current sg */
+       unsigned int            total;  /* total request */
+};
+
+struct omap_sham_hmac_ctx {
+       struct crypto_shash     *shash;
+       u8                      ipad[SHA1_MD5_BLOCK_SIZE];
+       u8                      opad[SHA1_MD5_BLOCK_SIZE];
+};
+
+struct omap_sham_ctx {
+       struct omap_sham_dev    *dd;
+
+       unsigned long           flags;
+
+       /* fallback stuff */
+       struct crypto_shash     *fallback;
+
+       struct omap_sham_hmac_ctx base[0];
+};
+
+#define OMAP_SHAM_QUEUE_LENGTH 1
+
+struct omap_sham_dev {
+       struct list_head        list;
+       unsigned long           phys_base;
+       struct device           *dev;
+       void __iomem            *io_base;
+       int                     irq;
+       struct clk              *iclk;
+       spinlock_t              lock;
+       int                     dma;
+       int                     dma_lch;
+       struct tasklet_struct   done_task;
+       struct tasklet_struct   queue_task;
+
+       unsigned long           flags;
+       struct crypto_queue     queue;
+       struct ahash_request    *req;
+};
+
+struct omap_sham_drv {
+       struct list_head        dev_list;
+       spinlock_t              lock;
+       unsigned long           flags;
+};
+
+static struct omap_sham_drv sham = {
+       .dev_list = LIST_HEAD_INIT(sham.dev_list),
+       .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
+};
+
+static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
+{
+       return __raw_readl(dd->io_base + offset);
+}
+
+static inline void omap_sham_write(struct omap_sham_dev *dd,
+                                       u32 offset, u32 value)
+{
+       __raw_writel(value, dd->io_base + offset);
+}
+
+static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
+                                       u32 value, u32 mask)
+{
+       u32 val;
+
+       val = omap_sham_read(dd, address);
+       val &= ~mask;
+       val |= value;
+       omap_sham_write(dd, address, val);
+}
+
+static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
+{
+       unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
+
+       while (!(omap_sham_read(dd, offset) & bit)) {
+               if (time_is_before_jiffies(timeout))
+                       return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+static void omap_sham_copy_hash(struct ahash_request *req, int out)
+{
+       struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+       u32 *hash = (u32 *)req->result;
+       int i;
+
+       if (likely(ctx->flags & FLAGS_SHA1)) {
+               /* SHA1 results are in big endian */
+               for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
+                       if (out)
+                               hash[i] = be32_to_cpu(omap_sham_read(ctx->dd,
+                                                       SHA_REG_DIGEST(i)));
+                       else
+                               omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
+                                                       cpu_to_be32(hash[i]));
+       } else {
+               /* MD5 results are in little endian */
+               for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++)
+                       if (out)
+                               hash[i] = le32_to_cpu(omap_sham_read(ctx->dd,
+                                                       SHA_REG_DIGEST(i)));
+                       else
+                               omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
+                                                       cpu_to_le32(hash[i]));
+       }
+}
+
+static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
+                                int final, int dma)
+{
+       struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+       u32 val = length << 5, mask;
+
+       if (unlikely(!ctx->digcnt)) {
+
+               clk_enable(dd->iclk);
+
+               if (!(dd->flags & FLAGS_INIT)) {
+                       omap_sham_write_mask(dd, SHA_REG_MASK,
+                               SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
+
+                       if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
+                                               SHA_REG_SYSSTATUS_RESETDONE))
+                               return -ETIMEDOUT;
+
+                       dd->flags |= FLAGS_INIT;
+               }
+       } else {
+               omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
+       }
+
+       omap_sham_write_mask(dd, SHA_REG_MASK,
+               SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
+               SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
+       /*
+        * Setting ALGO_CONST only for the first iteration
+        * and CLOSE_HASH only for the last one.
+        */
+       if (ctx->flags & FLAGS_SHA1)
+               val |= SHA_REG_CTRL_ALGO;
+       if (!ctx->digcnt)
+               val |= SHA_REG_CTRL_ALGO_CONST;
+       if (final)
+               val |= SHA_REG_CTRL_CLOSE_HASH;
+
+       mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
+                       SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
+
+       omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
+
+       return 0;
+}
+
+static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
+                             size_t length, int final)
+{
+       struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+       int err, count, len32;
+       const u32 *buffer = (const u32 *)buf;
+
+       dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
+                                               ctx->digcnt, length, final);
+
+       err = omap_sham_write_ctrl(dd, length, final, 0);
+       if (err)
+               return err;
+
+       if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY))
+               return -ETIMEDOUT;
+
+       ctx->digcnt += length;
+
+       if (final)
+               ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
+
+       len32 = DIV_ROUND_UP(length, sizeof(u32));
+
+       for (count = 0; count < len32; count++)
+               omap_sham_write(dd, SHA_REG_DIN(count), buffer[count]);
+
+       return -EINPROGRESS;
+}
+
+static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
+                             size_t length, int final)
+{
+       struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+       int err, len32;
+
+       dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
+                                               ctx->digcnt, length, final);
+
+       /* flush cache entries related to our page */
+       if (dma_addr == ctx->dma_addr)
+               dma_sync_single_for_device(dd->dev, dma_addr, length,
+                                          DMA_TO_DEVICE);
+
+       len32 = DIV_ROUND_UP(length, sizeof(u32));
+
+       omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32,
+                       1, OMAP_DMA_SYNC_PACKET, dd->dma, OMAP_DMA_DST_SYNC);
+
+       omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
+                               dma_addr, 0, 0);
+
+       err = omap_sham_write_ctrl(dd, length, final, 1);
+       if (err)
+               return err;
+
+       ctx->digcnt += length;
+
+       if (final)
+               ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
+
+       dd->flags |= FLAGS_DMA_ACTIVE;
+
+       omap_start_dma(dd->dma_lch);
+
+       return -EINPROGRESS;
+}
+
+static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx,
+                               const u8 *data, size_t length)
+{
+       size_t count = min(length, ctx->buflen - ctx->bufcnt);
+
+       count = min(count, ctx->total);
+       if (count <= 0)
+               return 0;
+       memcpy(ctx->buffer + ctx->bufcnt, data, count);
+       ctx->bufcnt += count;
+
+       return count;
+}
+
+static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
+{
+       size_t count;
+
+       while (ctx->sg) {
+               count = omap_sham_append_buffer(ctx,
+                               sg_virt(ctx->sg) + ctx->offset,
+                               ctx->sg->length - ctx->offset);
+               if (!count)
+                       break;
+               ctx->offset += count;
+               ctx->total -= count;
+               if (ctx->offset == ctx->sg->length) {
+                       ctx->sg = sg_next(ctx->sg);
+                       if (ctx->sg)
+                               ctx->offset = 0;
+                       else
+                               ctx->total = 0;
+               }
+       }
+
+       return 0;
+}
+
+static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
+{
+       struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+       unsigned int final;
+       size_t count;
+
+       if (!ctx->total)
+               return 0;
+
+       omap_sham_append_sg(ctx);
+
+       final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
+
+       dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
+                                        ctx->bufcnt, ctx->digcnt, final);
+
+       if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
+               count = ctx->bufcnt;
+               ctx->bufcnt = 0;
+               return omap_sham_xmit_dma(dd, ctx->dma_addr, count, final);
+       }
+
+       return 0;
+}
+
+static int omap_sham_update_dma_fast(struct omap_sham_dev *dd)
+{
+       struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+       unsigned int length;
+
+       ctx->flags |= FLAGS_FAST;
+
+       length = min(ctx->total, sg_dma_len(ctx->sg));
+       ctx->total = length;
+
+       if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
+               dev_err(dd->dev, "dma_map_sg  error\n");
+               return -EINVAL;
+       }
+
+       ctx->total -= length;
+
+       return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1);
+}
+
+static int omap_sham_update_cpu(struct omap_sham_dev *dd)
+{
+       struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+       int bufcnt;
+
+       omap_sham_append_sg(ctx);
+       bufcnt = ctx->bufcnt;
+       ctx->bufcnt = 0;
+
+       return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
+}
+
+static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
+{
+       struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+
+       omap_stop_dma(dd->dma_lch);
+       if (ctx->flags & FLAGS_FAST)
+               dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
+
+       return 0;
+}
+
+static void omap_sham_cleanup(struct ahash_request *req)
+{
+       struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+       struct omap_sham_dev *dd = ctx->dd;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dd->lock, flags);
+       if (ctx->flags & FLAGS_CLEAN) {
+               spin_unlock_irqrestore(&dd->lock, flags);
+               return;
+       }
+       ctx->flags |= FLAGS_CLEAN;
+       spin_unlock_irqrestore(&dd->lock, flags);
+
+       if (ctx->digcnt)
+               clk_disable(dd->iclk);
+
+       if (ctx->dma_addr)
+               dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
+                                DMA_TO_DEVICE);
+
+       if (ctx->buffer)
+               free_page((unsigned long)ctx->buffer);
+
+       dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
+}
+
+static int omap_sham_init(struct ahash_request *req)
+{
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+       struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+       struct omap_sham_dev *dd = NULL, *tmp;
+
+       spin_lock_bh(&sham.lock);
+       if (!tctx->dd) {
+               list_for_each_entry(tmp, &sham.dev_list, list) {
+                       dd = tmp;
+                       break;
+               }
+               tctx->dd = dd;
+       } else {
+               dd = tctx->dd;
+       }
+       spin_unlock_bh(&sham.lock);
+
+       ctx->dd = dd;
+
+       ctx->flags = 0;
+
+       ctx->flags |= FLAGS_FIRST;
+
+       dev_dbg(dd->dev, "init: digest size: %d\n",
+               crypto_ahash_digestsize(tfm));
+
+       if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
+               ctx->flags |= FLAGS_SHA1;
+
+       ctx->bufcnt = 0;
+       ctx->digcnt = 0;
+
+       ctx->buflen = PAGE_SIZE;
+       ctx->buffer = (void *)__get_free_page(
+                               (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+                               GFP_KERNEL : GFP_ATOMIC);
+       if (!ctx->buffer)
+               return -ENOMEM;
+
+       ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
+                                       DMA_TO_DEVICE);
+       if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
+               dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
+               free_page((unsigned long)ctx->buffer);
+               return -EINVAL;
+       }
+
+       if (tctx->flags & FLAGS_HMAC) {
+               struct omap_sham_hmac_ctx *bctx = tctx->base;
+
+               memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
+               ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
+               ctx->flags |= FLAGS_HMAC;
+       }
+
+       return 0;
+
+}
+
+static int omap_sham_update_req(struct omap_sham_dev *dd)
+{
+       struct ahash_request *req = dd->req;
+       struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+       int err;
+
+       dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
+                ctx->total, ctx->digcnt, (ctx->flags & FLAGS_FINUP) != 0);
+
+       if (ctx->flags & FLAGS_CPU)
+               err = omap_sham_update_cpu(dd);
+       else if (ctx->flags & FLAGS_FAST)
+               err = omap_sham_update_dma_fast(dd);
+       else
+               err = omap_sham_update_dma_slow(dd);
+
+       /* wait for dma completion before can take more data */
+       dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
+
+       return err;
+}
+
+static int omap_sham_final_req(struct omap_sham_dev *dd)
+{
+       struct ahash_request *req = dd->req;
+       struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+       int err = 0, use_dma = 1;
+
+       if (ctx->bufcnt <= 64)
+               /* faster to handle last block with cpu */
+               use_dma = 0;
+
+       if (use_dma)
+               err = omap_sham_xmit_dma(dd, ctx->dma_addr, ctx->bufcnt, 1);
+       else
+               err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
+
+       ctx->bufcnt = 0;
+
+       if (err != -EINPROGRESS)
+               omap_sham_cleanup(req);
+
+       dev_dbg(dd->dev, "final_req: err: %d\n", err);
+
+       return err;
+}
+
+static int omap_sham_finish_req_hmac(struct ahash_request *req)
+{
+       struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+       struct omap_sham_hmac_ctx *bctx = tctx->base;
+       int bs = crypto_shash_blocksize(bctx->shash);
+       int ds = crypto_shash_digestsize(bctx->shash);
+       struct {
+               struct shash_desc shash;
+               char ctx[crypto_shash_descsize(bctx->shash)];
+       } desc;
+
+       desc.shash.tfm = bctx->shash;
+       desc.shash.flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
+
+       return crypto_shash_init(&desc.shash) ?:
+              crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
+              crypto_shash_finup(&desc.shash, req->result, ds, req->result);
+}
+
+static void omap_sham_finish_req(struct ahash_request *req, int err)
+{
+       struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+
+       if (!err) {
+               omap_sham_copy_hash(ctx->dd->req, 1);
+               if (ctx->flags & FLAGS_HMAC)
+                       err = omap_sham_finish_req_hmac(req);
+       }
+
+       if (ctx->flags & FLAGS_FINAL)
+               omap_sham_cleanup(req);
+
+       clear_bit(FLAGS_BUSY, &ctx->dd->flags);
+
+       if (req->base.complete)
+               req->base.complete(&req->base, err);
+}
+
+static int omap_sham_handle_queue(struct omap_sham_dev *dd)
+{
+       struct crypto_async_request *async_req, *backlog;
+       struct omap_sham_reqctx *ctx;
+       struct ahash_request *req, *prev_req;
+       unsigned long flags;
+       int err = 0;
+
+       if (test_and_set_bit(FLAGS_BUSY, &dd->flags))
+               return 0;
+
+       spin_lock_irqsave(&dd->lock, flags);
+       backlog = crypto_get_backlog(&dd->queue);
+       async_req = crypto_dequeue_request(&dd->queue);
+       if (!async_req)
+               clear_bit(FLAGS_BUSY, &dd->flags);
+       spin_unlock_irqrestore(&dd->lock, flags);
+
+       if (!async_req)
+               return 0;
+
+       if (backlog)
+               backlog->complete(backlog, -EINPROGRESS);
+
+       req = ahash_request_cast(async_req);
+
+       prev_req = dd->req;
+       dd->req = req;
+
+       ctx = ahash_request_ctx(req);
+
+       dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
+                                               ctx->op, req->nbytes);
+
+       if (req != prev_req && ctx->digcnt)
+               /* request has changed - restore hash */
+               omap_sham_copy_hash(req, 0);
+
+       if (ctx->op == OP_UPDATE) {
+               err = omap_sham_update_req(dd);
+               if (err != -EINPROGRESS && (ctx->flags & FLAGS_FINUP))
+                       /* no final() after finup() */
+                       err = omap_sham_final_req(dd);
+       } else if (ctx->op == OP_FINAL) {
+               err = omap_sham_final_req(dd);
+       }
+
+       if (err != -EINPROGRESS) {
+               /* done_task will not finish it, so do it here */
+               omap_sham_finish_req(req, err);
+               tasklet_schedule(&dd->queue_task);
+       }
+
+       dev_dbg(dd->dev, "exit, err: %d\n", err);
+
+       return err;
+}
+
+static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
+{
+       struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+       struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+       struct omap_sham_dev *dd = tctx->dd;
+       unsigned long flags;
+       int err;
+
+       ctx->op = op;
+
+       spin_lock_irqsave(&dd->lock, flags);
+       err = ahash_enqueue_request(&dd->queue, req);
+       spin_unlock_irqrestore(&dd->lock, flags);
+
+       omap_sham_handle_queue(dd);
+
+       return err;
+}
+
+static int omap_sham_update(struct ahash_request *req)
+{
+       struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+
+       if (!req->nbytes)
+               return 0;
+
+       ctx->total = req->nbytes;
+       ctx->sg = req->src;
+       ctx->offset = 0;
+
+       if (ctx->flags & FLAGS_FINUP) {
+               if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) {
+                       /*
+                       * OMAP HW accel works only with buffers >= 9
+                       * will switch to bypass in final()
+                       * final has the same request and data
+                       */
+                       omap_sham_append_sg(ctx);
+                       return 0;
+               } else if (ctx->bufcnt + ctx->total <= 64) {
+                       ctx->flags |= FLAGS_CPU;
+               } else if (!ctx->bufcnt && sg_is_last(ctx->sg)) {
+                       /* may be can use faster functions */
+                       int aligned = IS_ALIGNED((u32)ctx->sg->offset,
+                                                               sizeof(u32));
+
+                       if (aligned && (ctx->flags & FLAGS_FIRST))
+                               /* digest: first and final */
+                               ctx->flags |= FLAGS_FAST;
+
+                       ctx->flags &= ~FLAGS_FIRST;
+               }
+       } else if (ctx->bufcnt + ctx->total <= ctx->buflen) {
+               /* if not finaup -> not fast */
+               omap_sham_append_sg(ctx);
+               return 0;
+       }
+
+       return omap_sham_enqueue(req, OP_UPDATE);
+}
+
+static int omap_sham_shash_digest(struct crypto_shash *shash, u32 flags,
+                                 const u8 *data, unsigned int len, u8 *out)
+{
+       struct {
+               struct shash_desc shash;
+               char ctx[crypto_shash_descsize(shash)];
+       } desc;
+
+       desc.shash.tfm = shash;
+       desc.shash.flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+       return crypto_shash_digest(&desc.shash, data, len, out);
+}
+
+static int omap_sham_final_shash(struct ahash_request *req)
+{
+       struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+       struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+
+       return omap_sham_shash_digest(tctx->fallback, req->base.flags,
+                                     ctx->buffer, ctx->bufcnt, req->result);
+}
+
+static int omap_sham_final(struct ahash_request *req)
+{
+       struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+       int err = 0;
+
+       ctx->flags |= FLAGS_FINUP;
+
+       /* OMAP HW accel works only with buffers >= 9 */
+       /* HMAC is always >= 9 because of ipad */
+       if ((ctx->digcnt + ctx->bufcnt) < 9)
+               err = omap_sham_final_shash(req);
+       else if (ctx->bufcnt)
+               return omap_sham_enqueue(req, OP_FINAL);
+
+       omap_sham_cleanup(req);
+
+       return err;
+}
+
+static int omap_sham_finup(struct ahash_request *req)
+{
+       struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+       int err1, err2;
+
+       ctx->flags |= FLAGS_FINUP;
+
+       err1 = omap_sham_update(req);
+       if (err1 == -EINPROGRESS)
+               return err1;
+       /*
+        * final() has to be always called to cleanup resources
+        * even if udpate() failed, except EINPROGRESS
+        */
+       err2 = omap_sham_final(req);
+
+       return err1 ?: err2;
+}
+
+static int omap_sham_digest(struct ahash_request *req)
+{
+       return omap_sham_init(req) ?: omap_sham_finup(req);
+}
+
+static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
+                     unsigned int keylen)
+{
+       struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+       struct omap_sham_hmac_ctx *bctx = tctx->base;
+       int bs = crypto_shash_blocksize(bctx->shash);
+       int ds = crypto_shash_digestsize(bctx->shash);
+       int err, i;
+       err = crypto_shash_setkey(tctx->fallback, key, keylen);
+       if (err)
+               return err;
+
+       if (keylen > bs) {
+               err = omap_sham_shash_digest(bctx->shash,
+                               crypto_shash_get_flags(bctx->shash),
+                               key, keylen, bctx->ipad);
+               if (err)
+                       return err;
+               keylen = ds;
+       } else {
+               memcpy(bctx->ipad, key, keylen);
+       }
+
+       memset(bctx->ipad + keylen, 0, bs - keylen);
+       memcpy(bctx->opad, bctx->ipad, bs);
+
+       for (i = 0; i < bs; i++) {
+               bctx->ipad[i] ^= 0x36;
+               bctx->opad[i] ^= 0x5c;
+       }
+
+       return err;
+}
+
+static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
+{
+       struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
+       const char *alg_name = crypto_tfm_alg_name(tfm);
+
+       /* Allocate a fallback and abort if it failed. */
+       tctx->fallback = crypto_alloc_shash(alg_name, 0,
+                                           CRYPTO_ALG_NEED_FALLBACK);
+       if (IS_ERR(tctx->fallback)) {
+               pr_err("omap-sham: fallback driver '%s' "
+                               "could not be loaded.\n", alg_name);
+               return PTR_ERR(tctx->fallback);
+       }
+
+       crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+                                sizeof(struct omap_sham_reqctx));
+
+       if (alg_base) {
+               struct omap_sham_hmac_ctx *bctx = tctx->base;
+               tctx->flags |= FLAGS_HMAC;
+               bctx->shash = crypto_alloc_shash(alg_base, 0,
+                                               CRYPTO_ALG_NEED_FALLBACK);
+               if (IS_ERR(bctx->shash)) {
+                       pr_err("omap-sham: base driver '%s' "
+                                       "could not be loaded.\n", alg_base);
+                       crypto_free_shash(tctx->fallback);
+                       return PTR_ERR(bctx->shash);
+               }
+
+       }
+
+       return 0;
+}
+
+static int omap_sham_cra_init(struct crypto_tfm *tfm)
+{
+       return omap_sham_cra_init_alg(tfm, NULL);
+}
+
+static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
+{
+       return omap_sham_cra_init_alg(tfm, "sha1");
+}
+
+static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
+{
+       return omap_sham_cra_init_alg(tfm, "md5");
+}
+
+static void omap_sham_cra_exit(struct crypto_tfm *tfm)
+{
+       struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
+
+       crypto_free_shash(tctx->fallback);
+       tctx->fallback = NULL;
+
+       if (tctx->flags & FLAGS_HMAC) {
+               struct omap_sham_hmac_ctx *bctx = tctx->base;
+               crypto_free_shash(bctx->shash);
+       }
+}
+
+static struct ahash_alg algs[] = {
+{
+       .init           = omap_sham_init,
+       .update         = omap_sham_update,
+       .final          = omap_sham_final,
+       .finup          = omap_sham_finup,
+       .digest         = omap_sham_digest,
+       .halg.digestsize        = SHA1_DIGEST_SIZE,
+       .halg.base      = {
+               .cra_name               = "sha1",
+               .cra_driver_name        = "omap-sha1",
+               .cra_priority           = 100,
+               .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
+                                               CRYPTO_ALG_ASYNC |
+                                               CRYPTO_ALG_NEED_FALLBACK,
+               .cra_blocksize          = SHA1_BLOCK_SIZE,
+               .cra_ctxsize            = sizeof(struct omap_sham_ctx),
+               .cra_alignmask          = 0,
+               .cra_module             = THIS_MODULE,
+               .cra_init               = omap_sham_cra_init,
+               .cra_exit               = omap_sham_cra_exit,
+       }
+},
+{
+       .init           = omap_sham_init,
+       .update         = omap_sham_update,
+       .final          = omap_sham_final,
+       .finup          = omap_sham_finup,
+       .digest         = omap_sham_digest,
+       .halg.digestsize        = MD5_DIGEST_SIZE,
+       .halg.base      = {
+               .cra_name               = "md5",
+               .cra_driver_name        = "omap-md5",
+               .cra_priority           = 100,
+               .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
+                                               CRYPTO_ALG_ASYNC |
+                                               CRYPTO_ALG_NEED_FALLBACK,
+               .cra_blocksize          = SHA1_BLOCK_SIZE,
+               .cra_ctxsize            = sizeof(struct omap_sham_ctx),
+               .cra_alignmask          = 0,
+               .cra_module             = THIS_MODULE,
+               .cra_init               = omap_sham_cra_init,
+               .cra_exit               = omap_sham_cra_exit,
+       }
+},
+{
+       .init           = omap_sham_init,
+       .update         = omap_sham_update,
+       .final          = omap_sham_final,
+       .finup          = omap_sham_finup,
+       .digest         = omap_sham_digest,
+       .setkey         = omap_sham_setkey,
+       .halg.digestsize        = SHA1_DIGEST_SIZE,
+       .halg.base      = {
+               .cra_name               = "hmac(sha1)",
+               .cra_driver_name        = "omap-hmac-sha1",
+               .cra_priority           = 100,
+               .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
+                                               CRYPTO_ALG_ASYNC |
+                                               CRYPTO_ALG_NEED_FALLBACK,
+               .cra_blocksize          = SHA1_BLOCK_SIZE,
+               .cra_ctxsize            = sizeof(struct omap_sham_ctx) +
+                                       sizeof(struct omap_sham_hmac_ctx),
+               .cra_alignmask          = 0,
+               .cra_module             = THIS_MODULE,
+               .cra_init               = omap_sham_cra_sha1_init,
+               .cra_exit               = omap_sham_cra_exit,
+       }
+},
+{
+       .init           = omap_sham_init,
+       .update         = omap_sham_update,
+       .final          = omap_sham_final,
+       .finup          = omap_sham_finup,
+       .digest         = omap_sham_digest,
+       .setkey         = omap_sham_setkey,
+       .halg.digestsize        = MD5_DIGEST_SIZE,
+       .halg.base      = {
+               .cra_name               = "hmac(md5)",
+               .cra_driver_name        = "omap-hmac-md5",
+               .cra_priority           = 100,
+               .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
+                                               CRYPTO_ALG_ASYNC |
+                                               CRYPTO_ALG_NEED_FALLBACK,
+               .cra_blocksize          = SHA1_BLOCK_SIZE,
+               .cra_ctxsize            = sizeof(struct omap_sham_ctx) +
+                                       sizeof(struct omap_sham_hmac_ctx),
+               .cra_alignmask          = 0,
+               .cra_module             = THIS_MODULE,
+               .cra_init               = omap_sham_cra_md5_init,
+               .cra_exit               = omap_sham_cra_exit,
+       }
+}
+};
+
+static void omap_sham_done_task(unsigned long data)
+{
+       struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
+       struct ahash_request *req = dd->req;
+       struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+       int ready = 1;
+
+       if (ctx->flags & FLAGS_OUTPUT_READY) {
+               ctx->flags &= ~FLAGS_OUTPUT_READY;
+               ready = 1;
+       }
+
+       if (dd->flags & FLAGS_DMA_ACTIVE) {
+               dd->flags &= ~FLAGS_DMA_ACTIVE;
+               omap_sham_update_dma_stop(dd);
+               omap_sham_update_dma_slow(dd);
+       }
+
+       if (ready && !(dd->flags & FLAGS_DMA_ACTIVE)) {
+               dev_dbg(dd->dev, "update done\n");
+               /* finish curent request */
+               omap_sham_finish_req(req, 0);
+               /* start new request */
+               omap_sham_handle_queue(dd);
+       }
+}
+
+static void omap_sham_queue_task(unsigned long data)
+{
+       struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
+
+       omap_sham_handle_queue(dd);
+}
+
+static irqreturn_t omap_sham_irq(int irq, void *dev_id)
+{
+       struct omap_sham_dev *dd = dev_id;
+       struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+
+       if (!ctx) {
+               dev_err(dd->dev, "unknown interrupt.\n");
+               return IRQ_HANDLED;
+       }
+
+       if (unlikely(ctx->flags & FLAGS_FINAL))
+               /* final -> allow device to go to power-saving mode */
+               omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
+
+       omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
+                                SHA_REG_CTRL_OUTPUT_READY);
+       omap_sham_read(dd, SHA_REG_CTRL);
+
+       ctx->flags |= FLAGS_OUTPUT_READY;
+       tasklet_schedule(&dd->done_task);
+
+       return IRQ_HANDLED;
+}
+
+static void omap_sham_dma_callback(int lch, u16 ch_status, void *data)
+{
+       struct omap_sham_dev *dd = data;
+
+       if (likely(lch == dd->dma_lch))
+               tasklet_schedule(&dd->done_task);
+}
+
+static int omap_sham_dma_init(struct omap_sham_dev *dd)
+{
+       int err;
+
+       dd->dma_lch = -1;
+
+       err = omap_request_dma(dd->dma, dev_name(dd->dev),
+                       omap_sham_dma_callback, dd, &dd->dma_lch);
+       if (err) {
+               dev_err(dd->dev, "Unable to request DMA channel\n");
+               return err;
+       }
+       omap_set_dma_dest_params(dd->dma_lch, 0,
+                       OMAP_DMA_AMODE_CONSTANT,
+                       dd->phys_base + SHA_REG_DIN(0), 0, 16);
+
+       omap_set_dma_dest_burst_mode(dd->dma_lch,
+                       OMAP_DMA_DATA_BURST_16);
+
+       return 0;
+}
+
+static void omap_sham_dma_cleanup(struct omap_sham_dev *dd)
+{
+       if (dd->dma_lch >= 0) {
+               omap_free_dma(dd->dma_lch);
+               dd->dma_lch = -1;
+       }
+}
+
+static int __devinit omap_sham_probe(struct platform_device *pdev)
+{
+       struct omap_sham_dev *dd;
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+       int err, i, j;
+
+       dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL);
+       if (dd == NULL) {
+               dev_err(dev, "unable to alloc data struct.\n");
+               err = -ENOMEM;
+               goto data_err;
+       }
+       dd->dev = dev;
+       platform_set_drvdata(pdev, dd);
+
+       INIT_LIST_HEAD(&dd->list);
+       spin_lock_init(&dd->lock);
+       tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
+       tasklet_init(&dd->queue_task, omap_sham_queue_task, (unsigned long)dd);
+       crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
+
+       dd->irq = -1;
+
+       /* Get the base address */
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(dev, "no MEM resource info\n");
+               err = -ENODEV;
+               goto res_err;
+       }
+       dd->phys_base = res->start;
+
+       /* Get the DMA */
+       res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+       if (!res) {
+               dev_err(dev, "no DMA resource info\n");
+               err = -ENODEV;
+               goto res_err;
+       }
+       dd->dma = res->start;
+
+       /* Get the IRQ */
+       dd->irq = platform_get_irq(pdev,  0);
+       if (dd->irq < 0) {
+               dev_err(dev, "no IRQ resource info\n");
+               err = dd->irq;
+               goto res_err;
+       }
+
+       err = request_irq(dd->irq, omap_sham_irq,
+                       IRQF_TRIGGER_LOW, dev_name(dev), dd);
+       if (err) {
+               dev_err(dev, "unable to request irq.\n");
+               goto res_err;
+       }
+
+       err = omap_sham_dma_init(dd);
+       if (err)
+               goto dma_err;
+
+       /* Initializing the clock */
+       dd->iclk = clk_get(dev, "ick");
+       if (!dd->iclk) {
+               dev_err(dev, "clock intialization failed.\n");
+               err = -ENODEV;
+               goto clk_err;
+       }
+
+       dd->io_base = ioremap(dd->phys_base, SZ_4K);
+       if (!dd->io_base) {
+               dev_err(dev, "can't ioremap\n");
+               err = -ENOMEM;
+               goto io_err;
+       }
+
+       clk_enable(dd->iclk);
+       dev_info(dev, "hw accel on OMAP rev %u.%u\n",
+               (omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MAJOR) >> 4,
+               omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MINOR);
+       clk_disable(dd->iclk);
+
+       spin_lock(&sham.lock);
+       list_add_tail(&dd->list, &sham.dev_list);
+       spin_unlock(&sham.lock);
+
+       for (i = 0; i < ARRAY_SIZE(algs); i++) {
+               err = crypto_register_ahash(&algs[i]);
+               if (err)
+                       goto err_algs;
+       }
+
+       return 0;
+
+err_algs:
+       for (j = 0; j < i; j++)
+               crypto_unregister_ahash(&algs[j]);
+       iounmap(dd->io_base);
+io_err:
+       clk_put(dd->iclk);
+clk_err:
+       omap_sham_dma_cleanup(dd);
+dma_err:
+       if (dd->irq >= 0)
+               free_irq(dd->irq, dd);
+res_err:
+       kfree(dd);
+       dd = NULL;
+data_err:
+       dev_err(dev, "initialization failed.\n");
+
+       return err;
+}
+
+static int __devexit omap_sham_remove(struct platform_device *pdev)
+{
+       static struct omap_sham_dev *dd;
+       int i;
+
+       dd = platform_get_drvdata(pdev);
+       if (!dd)
+               return -ENODEV;
+       spin_lock(&sham.lock);
+       list_del(&dd->list);
+       spin_unlock(&sham.lock);
+       for (i = 0; i < ARRAY_SIZE(algs); i++)
+               crypto_unregister_ahash(&algs[i]);
+       tasklet_kill(&dd->done_task);
+       tasklet_kill(&dd->queue_task);
+       iounmap(dd->io_base);
+       clk_put(dd->iclk);
+       omap_sham_dma_cleanup(dd);
+       if (dd->irq >= 0)
+               free_irq(dd->irq, dd);
+       kfree(dd);
+       dd = NULL;
+
+       return 0;
+}
+
+static struct platform_driver omap_sham_driver = {
+       .probe  = omap_sham_probe,
+       .remove = omap_sham_remove,
+       .driver = {
+               .name   = "omap-sham",
+               .owner  = THIS_MODULE,
+       },
+};
+
+static int __init omap_sham_mod_init(void)
+{
+       pr_info("loading %s driver\n", "omap-sham");
+
+       if (!cpu_class_is_omap2() ||
+               omap_type() != OMAP2_DEVICE_TYPE_SEC) {
+               pr_err("Unsupported cpu\n");
+               return -ENODEV;
+       }
+
+       return platform_driver_register(&omap_sham_driver);
+}
+
+static void __exit omap_sham_mod_exit(void)
+{
+       platform_driver_unregister(&omap_sham_driver);
+}
+
+module_init(omap_sham_mod_init);
+module_exit(omap_sham_mod_exit);
+
+MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Dmitry Kasatkin");
index dc558a0..6a0f59d 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * talitos - Freescale Integrated Security Engine (SEC) device driver
  *
- * Copyright (c) 2008 Freescale Semiconductor, Inc.
+ * Copyright (c) 2008-2010 Freescale Semiconductor, Inc.
  *
  * Scatterlist Crypto API glue code copied from files with the following:
  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
 #include <crypto/aes.h>
 #include <crypto/des.h>
 #include <crypto/sha.h>
+#include <crypto/md5.h>
 #include <crypto/aead.h>
 #include <crypto/authenc.h>
 #include <crypto/skcipher.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
 #include <crypto/scatterwalk.h>
 
 #include "talitos.h"
@@ -65,6 +68,13 @@ struct talitos_ptr {
        __be32 ptr;     /* address */
 };
 
+static const struct talitos_ptr zero_entry = {
+       .len = 0,
+       .j_extent = 0,
+       .eptr = 0,
+       .ptr = 0
+};
+
 /* descriptor */
 struct talitos_desc {
        __be32 hdr;                     /* header high bits */
@@ -146,6 +156,7 @@ struct talitos_private {
 /* .features flag */
 #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
 #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
+#define TALITOS_FTR_SHA224_HWINIT 0x00000004
 
 static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
 {
@@ -692,7 +703,7 @@ static void talitos_unregister_rng(struct device *dev)
 #define TALITOS_MAX_KEY_SIZE           64
 #define TALITOS_MAX_IV_LENGTH          16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
 
-#define MD5_DIGEST_SIZE   16
+#define MD5_BLOCK_SIZE    64
 
 struct talitos_ctx {
        struct device *dev;
@@ -705,6 +716,23 @@ struct talitos_ctx {
        unsigned int authsize;
 };
 
+#define HASH_MAX_BLOCK_SIZE            SHA512_BLOCK_SIZE
+#define TALITOS_MDEU_MAX_CONTEXT_SIZE  TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
+
+struct talitos_ahash_req_ctx {
+       u64 count;
+       u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
+       unsigned int hw_context_size;
+       u8 buf[HASH_MAX_BLOCK_SIZE];
+       u8 bufnext[HASH_MAX_BLOCK_SIZE];
+       unsigned int swinit;
+       unsigned int first;
+       unsigned int last;
+       unsigned int to_hash_later;
+       struct scatterlist bufsl[2];
+       struct scatterlist *psrc;
+};
+
 static int aead_setauthsize(struct crypto_aead *authenc,
                            unsigned int authsize)
 {
@@ -821,10 +849,14 @@ static void talitos_sg_unmap(struct device *dev,
                else
                        dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
 
-               if (edesc->dst_is_chained)
-                       talitos_unmap_sg_chain(dev, dst, DMA_FROM_DEVICE);
-               else
-                       dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+               if (dst) {
+                       if (edesc->dst_is_chained)
+                               talitos_unmap_sg_chain(dev, dst,
+                                                      DMA_FROM_DEVICE);
+                       else
+                               dma_unmap_sg(dev, dst, dst_nents,
+                                            DMA_FROM_DEVICE);
+               }
        } else
                if (edesc->src_is_chained)
                        talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
@@ -1114,12 +1146,67 @@ static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
        return sg_nents;
 }
 
+/**
+ * sg_copy_end_to_buffer - Copy end data from SG list to a linear buffer
+ * @sgl:                The SG list
+ * @nents:              Number of SG entries
+ * @buf:                Where to copy to
+ * @buflen:             The number of bytes to copy
+ * @skip:               The number of bytes to skip before copying.
+ *                       Note: skip + buflen should equal SG total size.
+ *
+ * Returns the number of copied bytes.
+ *
+ **/
+static size_t sg_copy_end_to_buffer(struct scatterlist *sgl, unsigned int nents,
+                                   void *buf, size_t buflen, unsigned int skip)
+{
+       unsigned int offset = 0;
+       unsigned int boffset = 0;
+       struct sg_mapping_iter miter;
+       unsigned long flags;
+       unsigned int sg_flags = SG_MITER_ATOMIC;
+       size_t total_buffer = buflen + skip;
+
+       sg_flags |= SG_MITER_FROM_SG;
+
+       sg_miter_start(&miter, sgl, nents, sg_flags);
+
+       local_irq_save(flags);
+
+       while (sg_miter_next(&miter) && offset < total_buffer) {
+               unsigned int len;
+               unsigned int ignore;
+
+               if ((offset + miter.length) > skip) {
+                       if (offset < skip) {
+                               /* Copy part of this segment */
+                               ignore = skip - offset;
+                               len = miter.length - ignore;
+                               memcpy(buf + boffset, miter.addr + ignore, len);
+                       } else {
+                               /* Copy all of this segment */
+                               len = miter.length;
+                               memcpy(buf + boffset, miter.addr, len);
+                       }
+                       boffset += len;
+               }
+               offset += miter.length;
+       }
+
+       sg_miter_stop(&miter);
+
+       local_irq_restore(flags);
+       return boffset;
+}
+
 /*
  * allocate and map the extended descriptor
  */
 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
                                                 struct scatterlist *src,
                                                 struct scatterlist *dst,
+                                                int hash_result,
                                                 unsigned int cryptlen,
                                                 unsigned int authsize,
                                                 int icv_stashing,
@@ -1139,11 +1226,16 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
        src_nents = sg_count(src, cryptlen + authsize, &src_chained);
        src_nents = (src_nents == 1) ? 0 : src_nents;
 
-       if (dst == src) {
-               dst_nents = src_nents;
+       if (hash_result) {
+               dst_nents = 0;
        } else {
-               dst_nents = sg_count(dst, cryptlen + authsize, &dst_chained);
-               dst_nents = (dst_nents == 1) ? 0 : dst_nents;
+               if (dst == src) {
+                       dst_nents = src_nents;
+               } else {
+                       dst_nents = sg_count(dst, cryptlen + authsize,
+                                            &dst_chained);
+                       dst_nents = (dst_nents == 1) ? 0 : dst_nents;
+               }
        }
 
        /*
@@ -1172,8 +1264,10 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
        edesc->src_is_chained = src_chained;
        edesc->dst_is_chained = dst_chained;
        edesc->dma_len = dma_len;
-       edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
-                                            edesc->dma_len, DMA_BIDIRECTIONAL);
+       if (dma_len)
+               edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
+                                                    edesc->dma_len,
+                                                    DMA_BIDIRECTIONAL);
 
        return edesc;
 }
@@ -1184,7 +1278,7 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq,
        struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
        struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 
-       return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
+       return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
                                   areq->cryptlen, ctx->authsize, icv_stashing,
                                   areq->base.flags);
 }
@@ -1441,8 +1535,8 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
        struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
        struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
 
-       return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, areq->nbytes,
-                                  0, 0, areq->base.flags);
+       return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
+                                  areq->nbytes, 0, 0, areq->base.flags);
 }
 
 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
@@ -1478,15 +1572,329 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
        return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
 }
 
+static void common_nonsnoop_hash_unmap(struct device *dev,
+                                      struct talitos_edesc *edesc,
+                                      struct ahash_request *areq)
+{
+       struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+       unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
+
+       /* When using hashctx-in, must unmap it. */
+       if (edesc->desc.ptr[1].len)
+               unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
+                                        DMA_TO_DEVICE);
+
+       if (edesc->desc.ptr[2].len)
+               unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
+                                        DMA_TO_DEVICE);
+
+       talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL);
+
+       if (edesc->dma_len)
+               dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
+                                DMA_BIDIRECTIONAL);
+
+}
+
+static void ahash_done(struct device *dev,
+                      struct talitos_desc *desc, void *context,
+                      int err)
+{
+       struct ahash_request *areq = context;
+       struct talitos_edesc *edesc =
+                container_of(desc, struct talitos_edesc, desc);
+       struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+       if (!req_ctx->last && req_ctx->to_hash_later) {
+               /* Position any partial block for next update/final/finup */
+               memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
+       }
+       common_nonsnoop_hash_unmap(dev, edesc, areq);
+
+       kfree(edesc);
+
+       areq->base.complete(&areq->base, err);
+}
+
+static int common_nonsnoop_hash(struct talitos_edesc *edesc,
+                               struct ahash_request *areq, unsigned int length,
+                               void (*callback) (struct device *dev,
+                                                 struct talitos_desc *desc,
+                                                 void *context, int error))
+{
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+       struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+       struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+       struct device *dev = ctx->dev;
+       struct talitos_desc *desc = &edesc->desc;
+       int sg_count, ret;
+
+       /* first DWORD empty */
+       desc->ptr[0] = zero_entry;
+
+       /* hash context in */
+       if (!req_ctx->first || req_ctx->swinit) {
+               map_single_talitos_ptr(dev, &desc->ptr[1],
+                                      req_ctx->hw_context_size,
+                                      (char *)req_ctx->hw_context, 0,
+                                      DMA_TO_DEVICE);
+               req_ctx->swinit = 0;
+       } else {
+               desc->ptr[1] = zero_entry;
+               /* Indicate next op is not the first. */
+               req_ctx->first = 0;
+       }
+
+       /* HMAC key */
+       if (ctx->keylen)
+               map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
+                                      (char *)&ctx->key, 0, DMA_TO_DEVICE);
+       else
+               desc->ptr[2] = zero_entry;
+
+       /*
+        * data in
+        */
+       desc->ptr[3].len = cpu_to_be16(length);
+       desc->ptr[3].j_extent = 0;
+
+       sg_count = talitos_map_sg(dev, req_ctx->psrc,
+                                 edesc->src_nents ? : 1,
+                                 DMA_TO_DEVICE,
+                                 edesc->src_is_chained);
+
+       if (sg_count == 1) {
+               to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc));
+       } else {
+               sg_count = sg_to_link_tbl(req_ctx->psrc, sg_count, length,
+                                         &edesc->link_tbl[0]);
+               if (sg_count > 1) {
+                       desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
+                       to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
+                       dma_sync_single_for_device(ctx->dev,
+                                                  edesc->dma_link_tbl,
+                                                  edesc->dma_len,
+                                                  DMA_BIDIRECTIONAL);
+               } else {
+                       /* Only one segment now, so no link tbl needed */
+                       to_talitos_ptr(&desc->ptr[3],
+                                      sg_dma_address(req_ctx->psrc));
+               }
+       }
+
+       /* fifth DWORD empty */
+       desc->ptr[4] = zero_entry;
+
+       /* hash/HMAC out -or- hash context out */
+       if (req_ctx->last)
+               map_single_talitos_ptr(dev, &desc->ptr[5],
+                                      crypto_ahash_digestsize(tfm),
+                                      areq->result, 0, DMA_FROM_DEVICE);
+       else
+               map_single_talitos_ptr(dev, &desc->ptr[5],
+                                      req_ctx->hw_context_size,
+                                      req_ctx->hw_context, 0, DMA_FROM_DEVICE);
+
+       /* last DWORD empty */
+       desc->ptr[6] = zero_entry;
+
+       ret = talitos_submit(dev, desc, callback, areq);
+       if (ret != -EINPROGRESS) {
+               common_nonsnoop_hash_unmap(dev, edesc, areq);
+               kfree(edesc);
+       }
+       return ret;
+}
+
+static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
+                                              unsigned int nbytes)
+{
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+       struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+       struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+       return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, 1,
+                                  nbytes, 0, 0, areq->base.flags);
+}
+
+static int ahash_init(struct ahash_request *areq)
+{
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+       struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+       /* Initialize the context */
+       req_ctx->count = 0;
+       req_ctx->first = 1; /* first indicates h/w must init its context */
+       req_ctx->swinit = 0; /* assume h/w init of context */
+       req_ctx->hw_context_size =
+               (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
+                       ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
+                       : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
+
+       return 0;
+}
+
+/*
+ * on h/w without explicit sha224 support, we initialize h/w context
+ * manually with sha224 constants, and tell it to run sha256.
+ */
+static int ahash_init_sha224_swinit(struct ahash_request *areq)
+{
+       struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+       ahash_init(areq);
+       req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
+
+       req_ctx->hw_context[0] = cpu_to_be32(SHA224_H0);
+       req_ctx->hw_context[1] = cpu_to_be32(SHA224_H1);
+       req_ctx->hw_context[2] = cpu_to_be32(SHA224_H2);
+       req_ctx->hw_context[3] = cpu_to_be32(SHA224_H3);
+       req_ctx->hw_context[4] = cpu_to_be32(SHA224_H4);
+       req_ctx->hw_context[5] = cpu_to_be32(SHA224_H5);
+       req_ctx->hw_context[6] = cpu_to_be32(SHA224_H6);
+       req_ctx->hw_context[7] = cpu_to_be32(SHA224_H7);
+
+       /* init 64-bit count */
+       req_ctx->hw_context[8] = 0;
+       req_ctx->hw_context[9] = 0;
+
+       return 0;
+}
+
+static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
+{
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+       struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+       struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+       struct talitos_edesc *edesc;
+       unsigned int blocksize =
+                       crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+       unsigned int nbytes_to_hash;
+       unsigned int to_hash_later;
+       unsigned int index;
+       int chained;
+
+       index = req_ctx->count & (blocksize - 1);
+       req_ctx->count += nbytes;
+
+       if (!req_ctx->last && (index + nbytes) < blocksize) {
+               /* Buffer the partial block */
+               sg_copy_to_buffer(areq->src,
+                                 sg_count(areq->src, nbytes, &chained),
+                                 req_ctx->buf + index, nbytes);
+               return 0;
+       }
+
+       if (index) {
+               /* partial block from previous update; chain it in. */
+               sg_init_table(req_ctx->bufsl, (nbytes) ? 2 : 1);
+               sg_set_buf(req_ctx->bufsl, req_ctx->buf, index);
+               if (nbytes)
+                       scatterwalk_sg_chain(req_ctx->bufsl, 2,
+                                            areq->src);
+               req_ctx->psrc = req_ctx->bufsl;
+       } else {
+               req_ctx->psrc = areq->src;
+       }
+       nbytes_to_hash =  index + nbytes;
+       if (!req_ctx->last) {
+               to_hash_later = (nbytes_to_hash & (blocksize - 1));
+               if (to_hash_later) {
+                       int nents;
+                       /* Must copy to_hash_later bytes from the end
+                        * to bufnext (a partial block) for later.
+                        */
+                       nents = sg_count(areq->src, nbytes, &chained);
+                       sg_copy_end_to_buffer(areq->src, nents,
+                                             req_ctx->bufnext,
+                                             to_hash_later,
+                                             nbytes - to_hash_later);
+
+                       /* Adjust count for what will be hashed now */
+                       nbytes_to_hash -= to_hash_later;
+               }
+               req_ctx->to_hash_later = to_hash_later;
+       }
+
+       /* allocate extended descriptor */
+       edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
+       if (IS_ERR(edesc))
+               return PTR_ERR(edesc);
+
+       edesc->desc.hdr = ctx->desc_hdr_template;
+
+       /* On last one, request SEC to pad; otherwise continue */
+       if (req_ctx->last)
+               edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
+       else
+               edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
+
+       /* request SEC to INIT hash. */
+       if (req_ctx->first && !req_ctx->swinit)
+               edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
+
+       /* When the tfm context has a keylen, it's an HMAC.
+        * A first or last (ie. not middle) descriptor must request HMAC.
+        */
+       if (ctx->keylen && (req_ctx->first || req_ctx->last))
+               edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
+
+       return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
+                                   ahash_done);
+}
+
+static int ahash_update(struct ahash_request *areq)
+{
+       struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+       req_ctx->last = 0;
+
+       return ahash_process_req(areq, areq->nbytes);
+}
+
+static int ahash_final(struct ahash_request *areq)
+{
+       struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+       req_ctx->last = 1;
+
+       return ahash_process_req(areq, 0);
+}
+
+static int ahash_finup(struct ahash_request *areq)
+{
+       struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+       req_ctx->last = 1;
+
+       return ahash_process_req(areq, areq->nbytes);
+}
+
+static int ahash_digest(struct ahash_request *areq)
+{
+       struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+       struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+
+       ahash->init(areq);
+       req_ctx->last = 1;
+
+       return ahash_process_req(areq, areq->nbytes);
+}
+
 struct talitos_alg_template {
-       struct crypto_alg alg;
+       u32 type;
+       union {
+               struct crypto_alg crypto;
+               struct ahash_alg hash;
+       } alg;
        __be32 desc_hdr_template;
 };
 
 static struct talitos_alg_template driver_algs[] = {
        /* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
-       {
-               .alg = {
+       {       .type = CRYPTO_ALG_TYPE_AEAD,
+               .alg.crypto = {
                        .cra_name = "authenc(hmac(sha1),cbc(aes))",
                        .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
                        .cra_blocksize = AES_BLOCK_SIZE,
@@ -1511,8 +1919,8 @@ static struct talitos_alg_template driver_algs[] = {
                                     DESC_HDR_MODE1_MDEU_PAD |
                                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
        },
-       {
-               .alg = {
+       {       .type = CRYPTO_ALG_TYPE_AEAD,
+               .alg.crypto = {
                        .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
                        .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
                        .cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1538,8 +1946,8 @@ static struct talitos_alg_template driver_algs[] = {
                                     DESC_HDR_MODE1_MDEU_PAD |
                                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
        },
-       {
-               .alg = {
+       {       .type = CRYPTO_ALG_TYPE_AEAD,
+               .alg.crypto = {
                        .cra_name = "authenc(hmac(sha256),cbc(aes))",
                        .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
                        .cra_blocksize = AES_BLOCK_SIZE,
@@ -1564,8 +1972,8 @@ static struct talitos_alg_template driver_algs[] = {
                                     DESC_HDR_MODE1_MDEU_PAD |
                                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
        },
-       {
-               .alg = {
+       {       .type = CRYPTO_ALG_TYPE_AEAD,
+               .alg.crypto = {
                        .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
                        .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
                        .cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1591,8 +1999,8 @@ static struct talitos_alg_template driver_algs[] = {
                                     DESC_HDR_MODE1_MDEU_PAD |
                                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
        },
-       {
-               .alg = {
+       {       .type = CRYPTO_ALG_TYPE_AEAD,
+               .alg.crypto = {
                        .cra_name = "authenc(hmac(md5),cbc(aes))",
                        .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
                        .cra_blocksize = AES_BLOCK_SIZE,
@@ -1617,8 +2025,8 @@ static struct talitos_alg_template driver_algs[] = {
                                     DESC_HDR_MODE1_MDEU_PAD |
                                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
        },
-       {
-               .alg = {
+       {       .type = CRYPTO_ALG_TYPE_AEAD,
+               .alg.crypto = {
                        .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
                        .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
                        .cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1645,8 +2053,8 @@ static struct talitos_alg_template driver_algs[] = {
                                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
        },
        /* ABLKCIPHER algorithms. */
-       {
-               .alg = {
+       {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+               .alg.crypto = {
                        .cra_name = "cbc(aes)",
                        .cra_driver_name = "cbc-aes-talitos",
                        .cra_blocksize = AES_BLOCK_SIZE,
@@ -1667,8 +2075,8 @@ static struct talitos_alg_template driver_algs[] = {
                                     DESC_HDR_SEL0_AESU |
                                     DESC_HDR_MODE0_AESU_CBC,
        },
-       {
-               .alg = {
+       {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+               .alg.crypto = {
                        .cra_name = "cbc(des3_ede)",
                        .cra_driver_name = "cbc-3des-talitos",
                        .cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1689,14 +2097,140 @@ static struct talitos_alg_template driver_algs[] = {
                                     DESC_HDR_SEL0_DEU |
                                     DESC_HDR_MODE0_DEU_CBC |
                                     DESC_HDR_MODE0_DEU_3DES,
-       }
+       },
+       /* AHASH algorithms. */
+       {       .type = CRYPTO_ALG_TYPE_AHASH,
+               .alg.hash = {
+                       .init = ahash_init,
+                       .update = ahash_update,
+                       .final = ahash_final,
+                       .finup = ahash_finup,
+                       .digest = ahash_digest,
+                       .halg.digestsize = MD5_DIGEST_SIZE,
+                       .halg.base = {
+                               .cra_name = "md5",
+                               .cra_driver_name = "md5-talitos",
+                               .cra_blocksize = MD5_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+                                            CRYPTO_ALG_ASYNC,
+                               .cra_type = &crypto_ahash_type
+                       }
+               },
+               .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+                                    DESC_HDR_SEL0_MDEUA |
+                                    DESC_HDR_MODE0_MDEU_MD5,
+       },
+       {       .type = CRYPTO_ALG_TYPE_AHASH,
+               .alg.hash = {
+                       .init = ahash_init,
+                       .update = ahash_update,
+                       .final = ahash_final,
+                       .finup = ahash_finup,
+                       .digest = ahash_digest,
+                       .halg.digestsize = SHA1_DIGEST_SIZE,
+                       .halg.base = {
+                               .cra_name = "sha1",
+                               .cra_driver_name = "sha1-talitos",
+                               .cra_blocksize = SHA1_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+                                            CRYPTO_ALG_ASYNC,
+                               .cra_type = &crypto_ahash_type
+                       }
+               },
+               .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+                                    DESC_HDR_SEL0_MDEUA |
+                                    DESC_HDR_MODE0_MDEU_SHA1,
+       },
+       {       .type = CRYPTO_ALG_TYPE_AHASH,
+               .alg.hash = {
+                       .init = ahash_init,
+                       .update = ahash_update,
+                       .final = ahash_final,
+                       .finup = ahash_finup,
+                       .digest = ahash_digest,
+                       .halg.digestsize = SHA224_DIGEST_SIZE,
+                       .halg.base = {
+                               .cra_name = "sha224",
+                               .cra_driver_name = "sha224-talitos",
+                               .cra_blocksize = SHA224_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+                                            CRYPTO_ALG_ASYNC,
+                               .cra_type = &crypto_ahash_type
+                       }
+               },
+               .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+                                    DESC_HDR_SEL0_MDEUA |
+                                    DESC_HDR_MODE0_MDEU_SHA224,
+       },
+       {       .type = CRYPTO_ALG_TYPE_AHASH,
+               .alg.hash = {
+                       .init = ahash_init,
+                       .update = ahash_update,
+                       .final = ahash_final,
+                       .finup = ahash_finup,
+                       .digest = ahash_digest,
+                       .halg.digestsize = SHA256_DIGEST_SIZE,
+                       .halg.base = {
+                               .cra_name = "sha256",
+                               .cra_driver_name = "sha256-talitos",
+                               .cra_blocksize = SHA256_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+                                            CRYPTO_ALG_ASYNC,
+                               .cra_type = &crypto_ahash_type
+                       }
+               },
+               .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+                                    DESC_HDR_SEL0_MDEUA |
+                                    DESC_HDR_MODE0_MDEU_SHA256,
+       },
+       {       .type = CRYPTO_ALG_TYPE_AHASH,
+               .alg.hash = {
+                       .init = ahash_init,
+                       .update = ahash_update,
+                       .final = ahash_final,
+                       .finup = ahash_finup,
+                       .digest = ahash_digest,
+                       .halg.digestsize = SHA384_DIGEST_SIZE,
+                       .halg.base = {
+                               .cra_name = "sha384",
+                               .cra_driver_name = "sha384-talitos",
+                               .cra_blocksize = SHA384_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+                                            CRYPTO_ALG_ASYNC,
+                               .cra_type = &crypto_ahash_type
+                       }
+               },
+               .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+                                    DESC_HDR_SEL0_MDEUB |
+                                    DESC_HDR_MODE0_MDEUB_SHA384,
+       },
+       {       .type = CRYPTO_ALG_TYPE_AHASH,
+               .alg.hash = {
+                       .init = ahash_init,
+                       .update = ahash_update,
+                       .final = ahash_final,
+                       .finup = ahash_finup,
+                       .digest = ahash_digest,
+                       .halg.digestsize = SHA512_DIGEST_SIZE,
+                       .halg.base = {
+                               .cra_name = "sha512",
+                               .cra_driver_name = "sha512-talitos",
+                               .cra_blocksize = SHA512_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+                                            CRYPTO_ALG_ASYNC,
+                               .cra_type = &crypto_ahash_type
+                       }
+               },
+               .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+                                    DESC_HDR_SEL0_MDEUB |
+                                    DESC_HDR_MODE0_MDEUB_SHA512,
+       },
 };
 
 struct talitos_crypto_alg {
        struct list_head entry;
        struct device *dev;
-       __be32 desc_hdr_template;
-       struct crypto_alg crypto_alg;
+       struct talitos_alg_template algt;
 };
 
 static int talitos_cra_init(struct crypto_tfm *tfm)
@@ -1705,13 +2239,28 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
        struct talitos_crypto_alg *talitos_alg;
        struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       talitos_alg =  container_of(alg, struct talitos_crypto_alg, crypto_alg);
+       if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
+               talitos_alg = container_of(__crypto_ahash_alg(alg),
+                                          struct talitos_crypto_alg,
+                                          algt.alg.hash);
+       else
+               talitos_alg = container_of(alg, struct talitos_crypto_alg,
+                                          algt.alg.crypto);
 
        /* update context with ptr to dev */
        ctx->dev = talitos_alg->dev;
 
        /* copy descriptor header template value */
-       ctx->desc_hdr_template = talitos_alg->desc_hdr_template;
+       ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
+
+       return 0;
+}
+
+static int talitos_cra_init_aead(struct crypto_tfm *tfm)
+{
+       struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       talitos_cra_init(tfm);
 
        /* random first IV */
        get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
@@ -1719,6 +2268,19 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
        return 0;
 }
 
+static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
+{
+       struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       talitos_cra_init(tfm);
+
+       ctx->keylen = 0;
+       crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+                                sizeof(struct talitos_ahash_req_ctx));
+
+       return 0;
+}
+
 /*
  * given the alg's descriptor header template, determine whether descriptor
  * type and primary/secondary execution units required match the hw
@@ -1747,7 +2309,15 @@ static int talitos_remove(struct of_device *ofdev)
        int i;
 
        list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
-               crypto_unregister_alg(&t_alg->crypto_alg);
+               switch (t_alg->algt.type) {
+               case CRYPTO_ALG_TYPE_ABLKCIPHER:
+               case CRYPTO_ALG_TYPE_AEAD:
+                       crypto_unregister_alg(&t_alg->algt.alg.crypto);
+                       break;
+               case CRYPTO_ALG_TYPE_AHASH:
+                       crypto_unregister_ahash(&t_alg->algt.alg.hash);
+                       break;
+               }
                list_del(&t_alg->entry);
                kfree(t_alg);
        }
@@ -1781,6 +2351,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
                                                    struct talitos_alg_template
                                                           *template)
 {
+       struct talitos_private *priv = dev_get_drvdata(dev);
        struct talitos_crypto_alg *t_alg;
        struct crypto_alg *alg;
 
@@ -1788,16 +2359,36 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
        if (!t_alg)
                return ERR_PTR(-ENOMEM);
 
-       alg = &t_alg->crypto_alg;
-       *alg = template->alg;
+       t_alg->algt = *template;
+
+       switch (t_alg->algt.type) {
+       case CRYPTO_ALG_TYPE_ABLKCIPHER:
+               alg = &t_alg->algt.alg.crypto;
+               alg->cra_init = talitos_cra_init;
+               break;
+       case CRYPTO_ALG_TYPE_AEAD:
+               alg = &t_alg->algt.alg.crypto;
+               alg->cra_init = talitos_cra_init_aead;
+               break;
+       case CRYPTO_ALG_TYPE_AHASH:
+               alg = &t_alg->algt.alg.hash.halg.base;
+               alg->cra_init = talitos_cra_init_ahash;
+               if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
+                   !strcmp(alg->cra_name, "sha224")) {
+                       t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
+                       t_alg->algt.desc_hdr_template =
+                                       DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+                                       DESC_HDR_SEL0_MDEUA |
+                                       DESC_HDR_MODE0_MDEU_SHA256;
+               }
+               break;
+       }
 
        alg->cra_module = THIS_MODULE;
-       alg->cra_init = talitos_cra_init;
        alg->cra_priority = TALITOS_CRA_PRIORITY;
        alg->cra_alignmask = 0;
        alg->cra_ctxsize = sizeof(struct talitos_ctx);
 
-       t_alg->desc_hdr_template = template->desc_hdr_template;
        t_alg->dev = dev;
 
        return t_alg;
@@ -1877,7 +2468,8 @@ static int talitos_probe(struct of_device *ofdev,
                priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
 
        if (of_device_is_compatible(np, "fsl,sec2.1"))
-               priv->features |= TALITOS_FTR_HW_AUTH_CHECK;
+               priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
+                                 TALITOS_FTR_SHA224_HWINIT;
 
        priv->chan = kzalloc(sizeof(struct talitos_channel) *
                             priv->num_channels, GFP_KERNEL);
@@ -1931,6 +2523,7 @@ static int talitos_probe(struct of_device *ofdev,
        for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
                if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
                        struct talitos_crypto_alg *t_alg;
+                       char *name = NULL;
 
                        t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
                        if (IS_ERR(t_alg)) {
@@ -1938,15 +2531,27 @@ static int talitos_probe(struct of_device *ofdev,
                                goto err_out;
                        }
 
-                       err = crypto_register_alg(&t_alg->crypto_alg);
+                       switch (t_alg->algt.type) {
+                       case CRYPTO_ALG_TYPE_ABLKCIPHER:
+                       case CRYPTO_ALG_TYPE_AEAD:
+                               err = crypto_register_alg(
+                                               &t_alg->algt.alg.crypto);
+                               name = t_alg->algt.alg.crypto.cra_driver_name;
+                               break;
+                       case CRYPTO_ALG_TYPE_AHASH:
+                               err = crypto_register_ahash(
+                                               &t_alg->algt.alg.hash);
+                               name =
+                                t_alg->algt.alg.hash.halg.base.cra_driver_name;
+                               break;
+                       }
                        if (err) {
                                dev_err(dev, "%s alg registration failed\n",
-                                       t_alg->crypto_alg.cra_driver_name);
+                                       name);
                                kfree(t_alg);
                        } else {
                                list_add_tail(&t_alg->entry, &priv->alg_list);
-                               dev_info(dev, "%s\n",
-                                        t_alg->crypto_alg.cra_driver_name);
+                               dev_info(dev, "%s\n", name);
                        }
                }
        }
index ff5a145..0b746ac 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Freescale SEC (talitos) device register and descriptor header defines
  *
- * Copyright (c) 2006-2008 Freescale Semiconductor, Inc.
+ * Copyright (c) 2006-2010 Freescale Semiconductor, Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
 #define TALITOS_CRCUISR                        0xf030 /* cyclic redundancy check unit*/
 #define TALITOS_CRCUISR_LO             0xf034
 
+#define TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256      0x28
+#define TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512                0x48
+
 /*
  * talitos descriptor header (hdr) bits
  */
 #define        DESC_HDR_MODE0_AESU_CBC         cpu_to_be32(0x00200000)
 #define        DESC_HDR_MODE0_DEU_CBC          cpu_to_be32(0x00400000)
 #define        DESC_HDR_MODE0_DEU_3DES         cpu_to_be32(0x00200000)
+#define        DESC_HDR_MODE0_MDEU_CONT        cpu_to_be32(0x08000000)
 #define        DESC_HDR_MODE0_MDEU_INIT        cpu_to_be32(0x01000000)
 #define        DESC_HDR_MODE0_MDEU_HMAC        cpu_to_be32(0x00800000)
 #define        DESC_HDR_MODE0_MDEU_PAD         cpu_to_be32(0x00400000)
+#define        DESC_HDR_MODE0_MDEU_SHA224      cpu_to_be32(0x00300000)
 #define        DESC_HDR_MODE0_MDEU_MD5         cpu_to_be32(0x00200000)
 #define        DESC_HDR_MODE0_MDEU_SHA256      cpu_to_be32(0x00100000)
 #define        DESC_HDR_MODE0_MDEU_SHA1        cpu_to_be32(0x00000000)
+#define        DESC_HDR_MODE0_MDEUB_SHA384     cpu_to_be32(0x00000000)
+#define        DESC_HDR_MODE0_MDEUB_SHA512     cpu_to_be32(0x00200000)
 #define        DESC_HDR_MODE0_MDEU_MD5_HMAC    (DESC_HDR_MODE0_MDEU_MD5 | \
                                         DESC_HDR_MODE0_MDEU_HMAC)
 #define        DESC_HDR_MODE0_MDEU_SHA256_HMAC (DESC_HDR_MODE0_MDEU_SHA256 | \
 #define        DESC_HDR_MODE1_MDEU_INIT        cpu_to_be32(0x00001000)
 #define        DESC_HDR_MODE1_MDEU_HMAC        cpu_to_be32(0x00000800)
 #define        DESC_HDR_MODE1_MDEU_PAD         cpu_to_be32(0x00000400)
+#define        DESC_HDR_MODE1_MDEU_SHA224      cpu_to_be32(0x00000300)
 #define        DESC_HDR_MODE1_MDEU_MD5         cpu_to_be32(0x00000200)
 #define        DESC_HDR_MODE1_MDEU_SHA256      cpu_to_be32(0x00000100)
 #define        DESC_HDR_MODE1_MDEU_SHA1        cpu_to_be32(0x00000000)
+#define        DESC_HDR_MODE1_MDEUB_SHA384     cpu_to_be32(0x00000000)
+#define        DESC_HDR_MODE1_MDEUB_SHA512     cpu_to_be32(0x00000200)
 #define        DESC_HDR_MODE1_MDEU_MD5_HMAC    (DESC_HDR_MODE1_MDEU_MD5 | \
                                         DESC_HDR_MODE1_MDEU_HMAC)
 #define        DESC_HDR_MODE1_MDEU_SHA256_HMAC (DESC_HDR_MODE1_MDEU_SHA256 | \
index 305c590..88910e5 100644 (file)
@@ -9,6 +9,7 @@ menuconfig DRM
        depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU
        select I2C
        select I2C_ALGOBIT
+       select SLOW_WORK
        help
          Kernel-level support for the Direct Rendering Infrastructure (DRI)
          introduced in XFree86 4.0. If you say Y here, you need to select
@@ -59,6 +60,7 @@ config DRM_RADEON
        select FW_LOADER
         select DRM_KMS_HELPER
         select DRM_TTM
+       select POWER_SUPPLY
        help
          Choose this option if you have an ATI Radeon graphics card.  There
          are both PCI and AGP versions.  You don't need to choose this to
index 932b5aa..3f46772 100644 (file)
@@ -79,10 +79,9 @@ static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
        struct drm_device *dev = master->minor->dev;
        DRM_DEBUG("%d\n", magic);
 
-       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
        if (!entry)
                return -ENOMEM;
-       memset(entry, 0, sizeof(*entry));
        entry->priv = priv;
        entry->hash_item.key = (unsigned long)magic;
        mutex_lock(&dev->struct_mutex);
index 61b9bcf..994d23b 100644 (file)
@@ -34,6 +34,7 @@
 #include "drm.h"
 #include "drmP.h"
 #include "drm_crtc.h"
+#include "drm_edid.h"
 
 struct drm_prop_enum_list {
        int type;
@@ -494,7 +495,6 @@ void drm_connector_cleanup(struct drm_connector *connector)
        list_for_each_entry_safe(mode, t, &connector->user_modes, head)
                drm_mode_remove(connector, mode);
 
-       kfree(connector->fb_helper_private);
        mutex_lock(&dev->mode_config.mutex);
        drm_mode_object_put(dev, &connector->base);
        list_del(&connector->head);
@@ -858,7 +858,6 @@ void drm_mode_config_init(struct drm_device *dev)
        mutex_init(&dev->mode_config.mutex);
        mutex_init(&dev->mode_config.idr_mutex);
        INIT_LIST_HEAD(&dev->mode_config.fb_list);
-       INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list);
        INIT_LIST_HEAD(&dev->mode_config.crtc_list);
        INIT_LIST_HEAD(&dev->mode_config.connector_list);
        INIT_LIST_HEAD(&dev->mode_config.encoder_list);
@@ -2350,7 +2349,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
                                            struct edid *edid)
 {
        struct drm_device *dev = connector->dev;
-       int ret = 0;
+       int ret = 0, size;
 
        if (connector->edid_blob_ptr)
                drm_property_destroy_blob(dev, connector->edid_blob_ptr);
@@ -2362,7 +2361,9 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
                return ret;
        }
 
-       connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 128, edid);
+       size = EDID_LENGTH * (1 + edid->extensions);
+       connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
+                                                           size, edid);
 
        ret = drm_connector_property_set_value(connector,
                                               dev->mode_config.edid_property,
index 51103aa..7644019 100644 (file)
@@ -55,7 +55,7 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
 }
 
 /**
- * drm_helper_probe_connector_modes - get complete set of display modes
+ * drm_helper_probe_single_connector_modes - get complete set of display modes
  * @dev: DRM device
  * @maxX: max width for modes
  * @maxY: max height for modes
@@ -154,21 +154,6 @@ prune:
 }
 EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
 
-int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX,
-                                     uint32_t maxY)
-{
-       struct drm_connector *connector;
-       int count = 0;
-
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               count += drm_helper_probe_single_connector_modes(connector,
-                                                                maxX, maxY);
-       }
-
-       return count;
-}
-EXPORT_SYMBOL(drm_helper_probe_connector_modes);
-
 /**
  * drm_helper_encoder_in_use - check if a given encoder is in use
  * @encoder: encoder to check
@@ -263,302 +248,6 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_helper_disable_unused_functions);
 
-static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *connector, int width, int height)
-{
-       struct drm_display_mode *mode;
-
-       list_for_each_entry(mode, &connector->modes, head) {
-               if (drm_mode_width(mode) > width ||
-                   drm_mode_height(mode) > height)
-                       continue;
-               if (mode->type & DRM_MODE_TYPE_PREFERRED)
-                       return mode;
-       }
-       return NULL;
-}
-
-static bool drm_has_cmdline_mode(struct drm_connector *connector)
-{
-       struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
-       struct drm_fb_helper_cmdline_mode *cmdline_mode;
-
-       if (!fb_help_conn)
-               return false;
-
-       cmdline_mode = &fb_help_conn->cmdline_mode;
-       return cmdline_mode->specified;
-}
-
-static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_connector *connector, int width, int height)
-{
-       struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
-       struct drm_fb_helper_cmdline_mode *cmdline_mode;
-       struct drm_display_mode *mode = NULL;
-
-       if (!fb_help_conn)
-               return mode;
-
-       cmdline_mode = &fb_help_conn->cmdline_mode;
-       if (cmdline_mode->specified == false)
-               return mode;
-
-       /* attempt to find a matching mode in the list of modes
-        *  we have gotten so far, if not add a CVT mode that conforms
-        */
-       if (cmdline_mode->rb || cmdline_mode->margins)
-               goto create_mode;
-
-       list_for_each_entry(mode, &connector->modes, head) {
-               /* check width/height */
-               if (mode->hdisplay != cmdline_mode->xres ||
-                   mode->vdisplay != cmdline_mode->yres)
-                       continue;
-
-               if (cmdline_mode->refresh_specified) {
-                       if (mode->vrefresh != cmdline_mode->refresh)
-                               continue;
-               }
-
-               if (cmdline_mode->interlace) {
-                       if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
-                               continue;
-               }
-               return mode;
-       }
-
-create_mode:
-       mode = drm_cvt_mode(connector->dev, cmdline_mode->xres,
-                           cmdline_mode->yres,
-                           cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
-                           cmdline_mode->rb, cmdline_mode->interlace,
-                           cmdline_mode->margins);
-       drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
-       list_add(&mode->head, &connector->modes);
-       return mode;
-}
-
-static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
-{
-       bool enable;
-
-       if (strict) {
-               enable = connector->status == connector_status_connected;
-       } else {
-               enable = connector->status != connector_status_disconnected;
-       }
-       return enable;
-}
-
-static void drm_enable_connectors(struct drm_device *dev, bool *enabled)
-{
-       bool any_enabled = false;
-       struct drm_connector *connector;
-       int i = 0;
-
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               enabled[i] = drm_connector_enabled(connector, true);
-               DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
-                         enabled[i] ? "yes" : "no");
-               any_enabled |= enabled[i];
-               i++;
-       }
-
-       if (any_enabled)
-               return;
-
-       i = 0;
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               enabled[i] = drm_connector_enabled(connector, false);
-               i++;
-       }
-}
-
-static bool drm_target_preferred(struct drm_device *dev,
-                                struct drm_display_mode **modes,
-                                bool *enabled, int width, int height)
-{
-       struct drm_connector *connector;
-       int i = 0;
-
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-
-               if (enabled[i] == false) {
-                       i++;
-                       continue;
-               }
-
-               DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
-                             connector->base.id);
-
-               /* got for command line mode first */
-               modes[i] = drm_pick_cmdline_mode(connector, width, height);
-               if (!modes[i]) {
-                       DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
-                                     connector->base.id);
-                       modes[i] = drm_has_preferred_mode(connector, width, height);
-               }
-               /* No preferred modes, pick one off the list */
-               if (!modes[i] && !list_empty(&connector->modes)) {
-                       list_for_each_entry(modes[i], &connector->modes, head)
-                               break;
-               }
-               DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
-                         "none");
-               i++;
-       }
-       return true;
-}
-
-static int drm_pick_crtcs(struct drm_device *dev,
-                         struct drm_crtc **best_crtcs,
-                         struct drm_display_mode **modes,
-                         int n, int width, int height)
-{
-       int c, o;
-       struct drm_connector *connector;
-       struct drm_connector_helper_funcs *connector_funcs;
-       struct drm_encoder *encoder;
-       struct drm_crtc *best_crtc;
-       int my_score, best_score, score;
-       struct drm_crtc **crtcs, *crtc;
-
-       if (n == dev->mode_config.num_connector)
-               return 0;
-       c = 0;
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               if (c == n)
-                       break;
-               c++;
-       }
-
-       best_crtcs[n] = NULL;
-       best_crtc = NULL;
-       best_score = drm_pick_crtcs(dev, best_crtcs, modes, n+1, width, height);
-       if (modes[n] == NULL)
-               return best_score;
-
-       crtcs = kmalloc(dev->mode_config.num_connector *
-                       sizeof(struct drm_crtc *), GFP_KERNEL);
-       if (!crtcs)
-               return best_score;
-
-       my_score = 1;
-       if (connector->status == connector_status_connected)
-               my_score++;
-       if (drm_has_cmdline_mode(connector))
-               my_score++;
-       if (drm_has_preferred_mode(connector, width, height))
-               my_score++;
-
-       connector_funcs = connector->helper_private;
-       encoder = connector_funcs->best_encoder(connector);
-       if (!encoder)
-               goto out;
-
-       connector->encoder = encoder;
-
-       /* select a crtc for this connector and then attempt to configure
-          remaining connectors */
-       c = 0;
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-
-               if ((encoder->possible_crtcs & (1 << c)) == 0) {
-                       c++;
-                       continue;
-               }
-
-               for (o = 0; o < n; o++)
-                       if (best_crtcs[o] == crtc)
-                               break;
-
-               if (o < n) {
-                       /* ignore cloning for now */
-                       c++;
-                       continue;
-               }
-
-               crtcs[n] = crtc;
-               memcpy(crtcs, best_crtcs, n * sizeof(struct drm_crtc *));
-               score = my_score + drm_pick_crtcs(dev, crtcs, modes, n + 1,
-                                                 width, height);
-               if (score > best_score) {
-                       best_crtc = crtc;
-                       best_score = score;
-                       memcpy(best_crtcs, crtcs,
-                              dev->mode_config.num_connector *
-                              sizeof(struct drm_crtc *));
-               }
-               c++;
-       }
-out:
-       kfree(crtcs);
-       return best_score;
-}
-
-static void drm_setup_crtcs(struct drm_device *dev)
-{
-       struct drm_crtc **crtcs;
-       struct drm_display_mode **modes;
-       struct drm_encoder *encoder;
-       struct drm_connector *connector;
-       bool *enabled;
-       int width, height;
-       int i, ret;
-
-       DRM_DEBUG_KMS("\n");
-
-       width = dev->mode_config.max_width;
-       height = dev->mode_config.max_height;
-
-       /* clean out all the encoder/crtc combos */
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-               encoder->crtc = NULL;
-       }
-
-       crtcs = kcalloc(dev->mode_config.num_connector,
-                       sizeof(struct drm_crtc *), GFP_KERNEL);
-       modes = kcalloc(dev->mode_config.num_connector,
-                       sizeof(struct drm_display_mode *), GFP_KERNEL);
-       enabled = kcalloc(dev->mode_config.num_connector,
-                         sizeof(bool), GFP_KERNEL);
-
-       drm_enable_connectors(dev, enabled);
-
-       ret = drm_target_preferred(dev, modes, enabled, width, height);
-       if (!ret)
-               DRM_ERROR("Unable to find initial modes\n");
-
-       DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
-
-       drm_pick_crtcs(dev, crtcs, modes, 0, width, height);
-
-       i = 0;
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               struct drm_display_mode *mode = modes[i];
-               struct drm_crtc *crtc = crtcs[i];
-
-               if (connector->encoder == NULL) {
-                       i++;
-                       continue;
-               }
-
-               if (mode && crtc) {
-                       DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
-                                 mode->name, crtc->base.id);
-                       crtc->desired_mode = mode;
-                       connector->encoder->crtc = crtc;
-               } else {
-                       connector->encoder->crtc = NULL;
-                       connector->encoder = NULL;
-               }
-               i++;
-       }
-
-       kfree(crtcs);
-       kfree(modes);
-       kfree(enabled);
-}
-
 /**
  * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
  * @encoder: encoder to test
@@ -936,10 +625,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
                                ret = -EINVAL;
                                goto fail;
                        }
-                       /* TODO are these needed? */
-                       set->crtc->desired_x = set->x;
-                       set->crtc->desired_y = set->y;
-                       set->crtc->desired_mode = set->mode;
                }
                drm_helper_disable_unused_functions(dev);
        } else if (fb_changed) {
@@ -984,63 +669,6 @@ fail:
 }
 EXPORT_SYMBOL(drm_crtc_helper_set_config);
 
-bool drm_helper_plugged_event(struct drm_device *dev)
-{
-       DRM_DEBUG_KMS("\n");
-
-       drm_helper_probe_connector_modes(dev, dev->mode_config.max_width,
-                                        dev->mode_config.max_height);
-
-       drm_setup_crtcs(dev);
-
-       /* alert the driver fb layer */
-       dev->mode_config.funcs->fb_changed(dev);
-
-       /* FIXME: send hotplug event */
-       return true;
-}
-/**
- * drm_initial_config - setup a sane initial connector configuration
- * @dev: DRM device
- *
- * LOCKING:
- * Called at init time, must take mode config lock.
- *
- * Scan the CRTCs and connectors and try to put together an initial setup.
- * At the moment, this is a cloned configuration across all heads with
- * a new framebuffer object as the backing store.
- *
- * RETURNS:
- * Zero if everything went ok, nonzero otherwise.
- */
-bool drm_helper_initial_config(struct drm_device *dev)
-{
-       int count = 0;
-
-       /* disable all the possible outputs/crtcs before entering KMS mode */
-       drm_helper_disable_unused_functions(dev);
-
-       drm_fb_helper_parse_command_line(dev);
-
-       count = drm_helper_probe_connector_modes(dev,
-                                                dev->mode_config.max_width,
-                                                dev->mode_config.max_height);
-
-       /*
-        * we shouldn't end up with no modes here.
-        */
-       if (count == 0)
-               printk(KERN_INFO "No connectors reported connected with modes\n");
-
-       drm_setup_crtcs(dev);
-
-       /* alert the driver fb layer */
-       dev->mode_config.funcs->fb_changed(dev);
-
-       return 0;
-}
-EXPORT_SYMBOL(drm_helper_initial_config);
-
 static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
 {
        int dpms = DRM_MODE_DPMS_OFF;
@@ -1123,27 +751,6 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
 }
 EXPORT_SYMBOL(drm_helper_connector_dpms);
 
-/**
- * drm_hotplug_stage_two
- * @dev DRM device
- * @connector hotpluged connector
- *
- * LOCKING.
- * Caller must hold mode config lock, function might grab struct lock.
- *
- * Stage two of a hotplug.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_helper_hotplug_stage_two(struct drm_device *dev)
-{
-       drm_helper_plugged_event(dev);
-
-       return 0;
-}
-EXPORT_SYMBOL(drm_helper_hotplug_stage_two);
-
 int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
                                   struct drm_mode_fb_cmd *mode_cmd)
 {
@@ -1200,3 +807,98 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
        return 0;
 }
 EXPORT_SYMBOL(drm_helper_resume_force_mode);
+
+static struct slow_work_ops output_poll_ops;
+
+#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
+static void output_poll_execute(struct slow_work *work)
+{
+       struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work);
+       struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_slow_work);
+       struct drm_connector *connector;
+       enum drm_connector_status old_status, status;
+       bool repoll = false, changed = false;
+       int ret;
+
+       mutex_lock(&dev->mode_config.mutex);
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+               /* if this is HPD or polled don't check it -
+                  TV out for instance */
+               if (!connector->polled)
+                       continue;
+
+               else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT))
+                       repoll = true;
+
+               old_status = connector->status;
+               /* if we are connected and don't want to poll for disconnect
+                  skip it */
+               if (old_status == connector_status_connected &&
+                   !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) &&
+                   !(connector->polled & DRM_CONNECTOR_POLL_HPD))
+                       continue;
+
+               status = connector->funcs->detect(connector);
+               if (old_status != status)
+                       changed = true;
+       }
+
+       mutex_unlock(&dev->mode_config.mutex);
+
+       if (changed) {
+               /* send a uevent + call fbdev */
+               drm_sysfs_hotplug_event(dev);
+               if (dev->mode_config.funcs->output_poll_changed)
+                       dev->mode_config.funcs->output_poll_changed(dev);
+       }
+
+       if (repoll) {
+               ret = delayed_slow_work_enqueue(delayed_work, DRM_OUTPUT_POLL_PERIOD);
+               if (ret)
+                       DRM_ERROR("delayed enqueue failed %d\n", ret);
+       }
+}
+
+void drm_kms_helper_poll_init(struct drm_device *dev)
+{
+       struct drm_connector *connector;
+       bool poll = false;
+       int ret;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               if (connector->polled)
+                       poll = true;
+       }
+       slow_work_register_user(THIS_MODULE);
+       delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
+                              &output_poll_ops);
+
+       if (poll) {
+               ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD);
+               if (ret)
+                       DRM_ERROR("delayed enqueue failed %d\n", ret);
+       }
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_init);
+
+void drm_kms_helper_poll_fini(struct drm_device *dev)
+{
+       delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
+       slow_work_unregister_user(THIS_MODULE);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_fini);
+
+void drm_helper_hpd_irq_event(struct drm_device *dev)
+{
+       if (!dev->mode_config.poll_enabled)
+               return;
+       delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
+       /* schedule a slow work asap */
+       delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, 0);
+}
+EXPORT_SYMBOL(drm_helper_hpd_irq_event);
+
+static struct slow_work_ops output_poll_ops = {
+       .execute = output_poll_execute,
+};
index 13f1537..252cbd7 100644 (file)
@@ -47,12 +47,10 @@ int drm_dma_setup(struct drm_device *dev)
 {
        int i;
 
-       dev->dma = kmalloc(sizeof(*dev->dma), GFP_KERNEL);
+       dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
        if (!dev->dma)
                return -ENOMEM;
 
-       memset(dev->dma, 0, sizeof(*dev->dma));
-
        for (i = 0; i <= DRM_MAX_ORDER; i++)
                memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
 
index 18f41d7..f569ae8 100644 (file)
@@ -2,6 +2,7 @@
  * Copyright (c) 2006 Luc Verhaegen (quirks list)
  * Copyright (c) 2007-2008 Intel Corporation
  *   Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2010 Red Hat, Inc.
  *
  * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
  * FB layer.
 #include "drmP.h"
 #include "drm_edid.h"
 
-/*
- * TODO:
- *   - support EDID 1.4 (incl. CE blocks)
- */
+#define EDID_EST_TIMINGS 16
+#define EDID_STD_TIMINGS 8
+#define EDID_DETAILED_TIMINGS 4
 
 /*
  * EDID blocks out in the wild have a variety of bugs, try to collect
@@ -65,7 +65,8 @@
 
 #define LEVEL_DMT      0
 #define LEVEL_GTF      1
-#define LEVEL_CVT      2
+#define LEVEL_GTF2     2
+#define LEVEL_CVT      3
 
 static struct edid_quirk {
        char *vendor;
@@ -109,36 +110,38 @@ static struct edid_quirk {
        { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
 };
 
+/*** DDC fetch and block validation ***/
 
-/* Valid EDID header has these bytes */
 static const u8 edid_header[] = {
        0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
 };
 
-/**
- * drm_edid_is_valid - sanity check EDID data
- * @edid: EDID data
- *
- * Sanity check the EDID block by looking at the header, the version number
- * and the checksum.  Return 0 if the EDID doesn't check out, or 1 if it's
- * valid.
+/*
+ * Sanity check the EDID block (base or extension).  Return 0 if the block
+ * doesn't check out, or 1 if it's valid.
  */
-bool drm_edid_is_valid(struct edid *edid)
+static bool
+drm_edid_block_valid(u8 *raw_edid)
 {
-       int i, score = 0;
+       int i;
        u8 csum = 0;
-       u8 *raw_edid = (u8 *)edid;
+       struct edid *edid = (struct edid *)raw_edid;
 
-       for (i = 0; i < sizeof(edid_header); i++)
-               if (raw_edid[i] == edid_header[i])
-                       score++;
+       if (raw_edid[0] == 0x00) {
+               int score = 0;
 
-       if (score == 8) ;
-       else if (score >= 6) {
-               DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
-               memcpy(raw_edid, edid_header, sizeof(edid_header));
-       } else
-               goto bad;
+               for (i = 0; i < sizeof(edid_header); i++)
+                       if (raw_edid[i] == edid_header[i])
+                               score++;
+
+               if (score == 8) ;
+               else if (score >= 6) {
+                       DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
+                       memcpy(raw_edid, edid_header, sizeof(edid_header));
+               } else {
+                       goto bad;
+               }
+       }
 
        for (i = 0; i < EDID_LENGTH; i++)
                csum += raw_edid[i];
@@ -147,13 +150,21 @@ bool drm_edid_is_valid(struct edid *edid)
                goto bad;
        }
 
-       if (edid->version != 1) {
-               DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
-               goto bad;
-       }
+       /* per-block-type checks */
+       switch (raw_edid[0]) {
+       case 0: /* base */
+               if (edid->version != 1) {
+                       DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+                       goto bad;
+               }
 
-       if (edid->revision > 4)
-               DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+               if (edid->revision > 4)
+                       DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+               break;
+
+       default:
+               break;
+       }
 
        return 1;
 
@@ -165,8 +176,158 @@ bad:
        }
        return 0;
 }
+
+/**
+ * drm_edid_is_valid - sanity check EDID data
+ * @edid: EDID data
+ *
+ * Sanity-check an entire EDID record (including extensions)
+ */
+bool drm_edid_is_valid(struct edid *edid)
+{
+       int i;
+       u8 *raw = (u8 *)edid;
+
+       if (!edid)
+               return false;
+
+       for (i = 0; i <= edid->extensions; i++)
+               if (!drm_edid_block_valid(raw + i * EDID_LENGTH))
+                       return false;
+
+       return true;
+}
 EXPORT_SYMBOL(drm_edid_is_valid);
 
+#define DDC_ADDR 0x50
+#define DDC_SEGMENT_ADDR 0x30
+/**
+ * Get EDID information via I2C.
+ *
+ * \param adapter : i2c device adaptor
+ * \param buf     : EDID data buffer to be filled
+ * \param len     : EDID data buffer length
+ * \return 0 on success or -1 on failure.
+ *
+ * Try to fetch EDID information by calling i2c driver function.
+ */
+static int
+drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
+                     int block, int len)
+{
+       unsigned char start = block * EDID_LENGTH;
+       struct i2c_msg msgs[] = {
+               {
+                       .addr   = DDC_ADDR,
+                       .flags  = 0,
+                       .len    = 1,
+                       .buf    = &start,
+               }, {
+                       .addr   = DDC_ADDR,
+                       .flags  = I2C_M_RD,
+                       .len    = len,
+                       .buf    = buf + start,
+               }
+       };
+
+       if (i2c_transfer(adapter, msgs, 2) == 2)
+               return 0;
+
+       return -1;
+}
+
+static u8 *
+drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
+{
+       int i, j = 0;
+       u8 *block, *new;
+
+       if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
+               return NULL;
+
+       /* base block fetch */
+       for (i = 0; i < 4; i++) {
+               if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
+                       goto out;
+               if (drm_edid_block_valid(block))
+                       break;
+       }
+       if (i == 4)
+               goto carp;
+
+       /* if there's no extensions, we're done */
+       if (block[0x7e] == 0)
+               return block;
+
+       new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
+       if (!new)
+               goto out;
+       block = new;
+
+       for (j = 1; j <= block[0x7e]; j++) {
+               for (i = 0; i < 4; i++) {
+                       if (drm_do_probe_ddc_edid(adapter, block, j,
+                                                 EDID_LENGTH))
+                               goto out;
+                       if (drm_edid_block_valid(block + j * EDID_LENGTH))
+                               break;
+               }
+               if (i == 4)
+                       goto carp;
+       }
+
+       return block;
+
+carp:
+       dev_warn(&connector->dev->pdev->dev, "%s: EDID block %d invalid.\n",
+                drm_get_connector_name(connector), j);
+
+out:
+       kfree(block);
+       return NULL;
+}
+
+/**
+ * Probe DDC presence.
+ *
+ * \param adapter : i2c device adaptor
+ * \return 1 on success
+ */
+static bool
+drm_probe_ddc(struct i2c_adapter *adapter)
+{
+       unsigned char out;
+
+       return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
+}
+
+/**
+ * drm_get_edid - get EDID data, if available
+ * @connector: connector we're probing
+ * @adapter: i2c adapter to use for DDC
+ *
+ * Poke the given i2c channel to grab EDID data if possible.  If found,
+ * attach it to the connector.
+ *
+ * Return edid data or NULL if we couldn't find any.
+ */
+struct edid *drm_get_edid(struct drm_connector *connector,
+                         struct i2c_adapter *adapter)
+{
+       struct edid *edid = NULL;
+
+       if (drm_probe_ddc(adapter))
+               edid = (struct edid *)drm_do_get_edid(connector, adapter);
+
+       connector->display_info.raw_edid = (char *)edid;
+
+       return edid;
+
+}
+EXPORT_SYMBOL(drm_get_edid);
+
+/*** EDID parsing ***/
+
 /**
  * edid_vendor - match a string against EDID's obfuscated vendor field
  * @edid: EDID to match
@@ -335,7 +496,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
        /* 1024x768@85Hz */
        { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
-                  1072, 1376, 0, 768, 769, 772, 808, 0,
+                  1168, 1376, 0, 768, 769, 772, 808, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
        /* 1152x864@75Hz */
        { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
@@ -426,7 +587,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
                   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
        /* 1600x1200@75Hz */
-       { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 2025000, 1600, 1664,
+       { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
                   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
        /* 1600x1200@85Hz */
@@ -497,8 +658,8 @@ static struct drm_display_mode drm_dmt_modes[] = {
 static const int drm_num_dmt_modes =
        sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
 
-static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
-                       int hsize, int vsize, int fresh)
+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+                                          int hsize, int vsize, int fresh)
 {
        int i;
        struct drm_display_mode *ptr, *mode;
@@ -516,6 +677,111 @@ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
        }
        return mode;
 }
+EXPORT_SYMBOL(drm_mode_find_dmt);
+
+typedef void detailed_cb(struct detailed_timing *timing, void *closure);
+
+static void
+drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure)
+{
+       int i;
+       struct edid *edid = (struct edid *)raw_edid;
+
+       if (edid == NULL)
+               return;
+
+       for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
+               cb(&(edid->detailed_timings[i]), closure);
+
+       /* XXX extension block walk */
+}
+
+static void
+is_rb(struct detailed_timing *t, void *data)
+{
+       u8 *r = (u8 *)t;
+       if (r[3] == EDID_DETAIL_MONITOR_RANGE)
+               if (r[15] & 0x10)
+                       *(bool *)data = true;
+}
+
+/* EDID 1.4 defines this explicitly.  For EDID 1.3, we guess, badly. */
+static bool
+drm_monitor_supports_rb(struct edid *edid)
+{
+       if (edid->revision >= 4) {
+               bool ret;
+               drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
+               return ret;
+       }
+
+       return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0);
+}
+
+static void
+find_gtf2(struct detailed_timing *t, void *data)
+{
+       u8 *r = (u8 *)t;
+       if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02)
+               *(u8 **)data = r;
+}
+
+/* Secondary GTF curve kicks in above some break frequency */
+static int
+drm_gtf2_hbreak(struct edid *edid)
+{
+       u8 *r = NULL;
+       drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+       return r ? (r[12] * 2) : 0;
+}
+
+static int
+drm_gtf2_2c(struct edid *edid)
+{
+       u8 *r = NULL;
+       drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+       return r ? r[13] : 0;
+}
+
+static int
+drm_gtf2_m(struct edid *edid)
+{
+       u8 *r = NULL;
+       drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+       return r ? (r[15] << 8) + r[14] : 0;
+}
+
+static int
+drm_gtf2_k(struct edid *edid)
+{
+       u8 *r = NULL;
+       drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+       return r ? r[16] : 0;
+}
+
+static int
+drm_gtf2_2j(struct edid *edid)
+{
+       u8 *r = NULL;
+       drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+       return r ? r[17] : 0;
+}
+
+/**
+ * standard_timing_level - get std. timing level(CVT/GTF/DMT)
+ * @edid: EDID block to scan
+ */
+static int standard_timing_level(struct edid *edid)
+{
+       if (edid->revision >= 2) {
+               if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
+                       return LEVEL_CVT;
+               if (drm_gtf2_hbreak(edid))
+                       return LEVEL_GTF2;
+               return LEVEL_GTF;
+       }
+       return LEVEL_DMT;
+}
 
 /*
  * 0 is reserved.  The spec says 0x01 fill for unused timings.  Some old
@@ -536,22 +802,20 @@ bad_std_timing(u8 a, u8 b)
  *
  * Take the standard timing params (in this case width, aspect, and refresh)
  * and convert them into a real mode using CVT/GTF/DMT.
- *
- * Punts for now, but should eventually use the FB layer's CVT based mode
- * generation code.
  */
-struct drm_display_mode *drm_mode_std(struct drm_device *dev,
-                                     struct std_timing *t,
-                                     int revision,
-                                     int timing_level)
+static struct drm_display_mode *
+drm_mode_std(struct drm_connector *connector, struct edid *edid,
+            struct std_timing *t, int revision)
 {
-       struct drm_display_mode *mode;
+       struct drm_device *dev = connector->dev;
+       struct drm_display_mode *m, *mode = NULL;
        int hsize, vsize;
        int vrefresh_rate;
        unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
                >> EDID_TIMING_ASPECT_SHIFT;
        unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
                >> EDID_TIMING_VFREQ_SHIFT;
+       int timing_level = standard_timing_level(edid);
 
        if (bad_std_timing(t->hsize, t->vfreq_aspect))
                return NULL;
@@ -572,18 +836,38 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
                vsize = (hsize * 4) / 5;
        else
                vsize = (hsize * 9) / 16;
-       /* HDTV hack */
-       if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) {
-               mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
+
+       /* HDTV hack, part 1 */
+       if (vrefresh_rate == 60 &&
+           ((hsize == 1360 && vsize == 765) ||
+            (hsize == 1368 && vsize == 769))) {
+               hsize = 1366;
+               vsize = 768;
+       }
+
+       /*
+        * If this connector already has a mode for this size and refresh
+        * rate (because it came from detailed or CVT info), use that
+        * instead.  This way we don't have to guess at interlace or
+        * reduced blanking.
+        */
+       list_for_each_entry(m, &connector->probed_modes, head)
+               if (m->hdisplay == hsize && m->vdisplay == vsize &&
+                   drm_mode_vrefresh(m) == vrefresh_rate)
+                       return NULL;
+
+       /* HDTV hack, part 2 */
+       if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) {
+               mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
                                    false);
                mode->hdisplay = 1366;
                mode->vsync_start = mode->vsync_start - 1;
                mode->vsync_end = mode->vsync_end - 1;
                return mode;
        }
-       mode = NULL;
+
        /* check whether it can be found in default mode table */
-       mode = drm_find_dmt(dev, hsize, vsize, vrefresh_rate);
+       mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate);
        if (mode)
                return mode;
 
@@ -593,6 +877,23 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
        case LEVEL_GTF:
                mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
                break;
+       case LEVEL_GTF2:
+               /*
+                * This is potentially wrong if there's ever a monitor with
+                * more than one ranges section, each claiming a different
+                * secondary GTF curve.  Please don't do that.
+                */
+               mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+               if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
+                       kfree(mode);
+                       mode = drm_gtf_mode_complex(dev, hsize, vsize,
+                                                   vrefresh_rate, 0, 0,
+                                                   drm_gtf2_m(edid),
+                                                   drm_gtf2_2c(edid),
+                                                   drm_gtf2_k(edid),
+                                                   drm_gtf2_2j(edid));
+               }
+               break;
        case LEVEL_CVT:
                mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
                                    false);
@@ -716,10 +1017,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
        if (mode->vsync_end > mode->vtotal)
                mode->vtotal = mode->vsync_end + 1;
 
-       drm_mode_set_name(mode);
-
        drm_mode_do_interlace_quirk(mode, pt);
 
+       drm_mode_set_name(mode);
+
        if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
                pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
        }
@@ -802,10 +1103,6 @@ static struct drm_display_mode edid_est_modes[] = {
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
 };
 
-#define EDID_EST_TIMINGS 16
-#define EDID_STD_TIMINGS 8
-#define EDID_DETAILED_TIMINGS 4
-
 /**
  * add_established_modes - get est. modes from EDID and add them
  * @edid: EDID block to scan
@@ -833,19 +1130,6 @@ static int add_established_modes(struct drm_connector *connector, struct edid *e
 
        return modes;
 }
-/**
- * stanard_timing_level - get std. timing level(CVT/GTF/DMT)
- * @edid: EDID block to scan
- */
-static int standard_timing_level(struct edid *edid)
-{
-       if (edid->revision >= 2) {
-               if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
-                       return LEVEL_CVT;
-               return LEVEL_GTF;
-       }
-       return LEVEL_DMT;
-}
 
 /**
  * add_standard_modes - get std. modes from EDID and add them
@@ -856,22 +1140,14 @@ static int standard_timing_level(struct edid *edid)
  */
 static int add_standard_modes(struct drm_connector *connector, struct edid *edid)
 {
-       struct drm_device *dev = connector->dev;
        int i, modes = 0;
-       int timing_level;
-
-       timing_level = standard_timing_level(edid);
 
        for (i = 0; i < EDID_STD_TIMINGS; i++) {
-               struct std_timing *t = &edid->standard_timings[i];
                struct drm_display_mode *newmode;
 
-               /* If std timings bytes are 1, 1 it's empty */
-               if (t->hsize == 1 && t->vfreq_aspect == 1)
-                       continue;
-
-               newmode = drm_mode_std(dev, &edid->standard_timings[i],
-                                      edid->revision, timing_level);
+               newmode = drm_mode_std(connector, edid,
+                                      &edid->standard_timings[i],
+                                      edid->revision);
                if (newmode) {
                        drm_mode_probed_add(connector, newmode);
                        modes++;
@@ -881,36 +1157,86 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
        return modes;
 }
 
-/*
- * XXX fix this for:
- * - GTF secondary curve formula
- * - EDID 1.4 range offsets
- * - CVT extended bits
- */
 static bool
-mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
+mode_is_rb(struct drm_display_mode *mode)
 {
-       struct detailed_data_monitor_range *range;
-       int hsync, vrefresh;
-
-       range = &timing->data.other_data.data.range;
+       return (mode->htotal - mode->hdisplay == 160) &&
+              (mode->hsync_end - mode->hdisplay == 80) &&
+              (mode->hsync_end - mode->hsync_start == 32) &&
+              (mode->vsync_start - mode->vdisplay == 3);
+}
 
+static bool
+mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
+{
+       int hsync, hmin, hmax;
+
+       hmin = t[7];
+       if (edid->revision >= 4)
+           hmin += ((t[4] & 0x04) ? 255 : 0);
+       hmax = t[8];
+       if (edid->revision >= 4)
+           hmax += ((t[4] & 0x08) ? 255 : 0);
        hsync = drm_mode_hsync(mode);
-       vrefresh = drm_mode_vrefresh(mode);
 
-       if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz)
+       return (hsync <= hmax && hsync >= hmin);
+}
+
+static bool
+mode_in_vsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
+{
+       int vsync, vmin, vmax;
+
+       vmin = t[5];
+       if (edid->revision >= 4)
+           vmin += ((t[4] & 0x01) ? 255 : 0);
+       vmax = t[6];
+       if (edid->revision >= 4)
+           vmax += ((t[4] & 0x02) ? 255 : 0);
+       vsync = drm_mode_vrefresh(mode);
+
+       return (vsync <= vmax && vsync >= vmin);
+}
+
+static u32
+range_pixel_clock(struct edid *edid, u8 *t)
+{
+       /* unspecified */
+       if (t[9] == 0 || t[9] == 255)
+               return 0;
+
+       /* 1.4 with CVT support gives us real precision, yay */
+       if (edid->revision >= 4 && t[10] == 0x04)
+               return (t[9] * 10000) - ((t[12] >> 2) * 250);
+
+       /* 1.3 is pathetic, so fuzz up a bit */
+       return t[9] * 10000 + 5001;
+}
+
+static bool
+mode_in_range(struct drm_display_mode *mode, struct edid *edid,
+             struct detailed_timing *timing)
+{
+       u32 max_clock;
+       u8 *t = (u8 *)timing;
+
+       if (!mode_in_hsync_range(mode, edid, t))
                return false;
 
-       if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq)
+       if (!mode_in_vsync_range(mode, edid, t))
                return false;
 
-       if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) {
-               /* be forgiving since it's in units of 10MHz */
-               int max_clock = range->pixel_clock_mhz * 10 + 9;
-               max_clock *= 1000;
+       if ((max_clock = range_pixel_clock(edid, t)))
                if (mode->clock > max_clock)
                        return false;
-       }
+
+       /* 1.4 max horizontal check */
+       if (edid->revision >= 4 && t[10] == 0x04)
+               if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3))))
+                       return false;
+
+       if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid))
+               return false;
 
        return true;
 }
@@ -919,15 +1245,16 @@ mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
  * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
  * need to account for them.
  */
-static int drm_gtf_modes_for_range(struct drm_connector *connector,
-                                  struct detailed_timing *timing)
+static int
+drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
+                       struct detailed_timing *timing)
 {
        int i, modes = 0;
        struct drm_display_mode *newmode;
        struct drm_device *dev = connector->dev;
 
        for (i = 0; i < drm_num_dmt_modes; i++) {
-               if (mode_in_range(drm_dmt_modes + i, timing)) {
+               if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
                        newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
                        if (newmode) {
                                drm_mode_probed_add(connector, newmode);
@@ -988,13 +1315,100 @@ static int drm_cvt_modes(struct drm_connector *connector,
        return modes;
 }
 
+static const struct {
+       short w;
+       short h;
+       short r;
+       short rb;
+} est3_modes[] = {
+       /* byte 6 */
+       { 640, 350, 85, 0 },
+       { 640, 400, 85, 0 },
+       { 720, 400, 85, 0 },
+       { 640, 480, 85, 0 },
+       { 848, 480, 60, 0 },
+       { 800, 600, 85, 0 },
+       { 1024, 768, 85, 0 },
+       { 1152, 864, 75, 0 },
+       /* byte 7 */
+       { 1280, 768, 60, 1 },
+       { 1280, 768, 60, 0 },
+       { 1280, 768, 75, 0 },
+       { 1280, 768, 85, 0 },
+       { 1280, 960, 60, 0 },
+       { 1280, 960, 85, 0 },
+       { 1280, 1024, 60, 0 },
+       { 1280, 1024, 85, 0 },
+       /* byte 8 */
+       { 1360, 768, 60, 0 },
+       { 1440, 900, 60, 1 },
+       { 1440, 900, 60, 0 },
+       { 1440, 900, 75, 0 },
+       { 1440, 900, 85, 0 },
+       { 1400, 1050, 60, 1 },
+       { 1400, 1050, 60, 0 },
+       { 1400, 1050, 75, 0 },
+       /* byte 9 */
+       { 1400, 1050, 85, 0 },
+       { 1680, 1050, 60, 1 },
+       { 1680, 1050, 60, 0 },
+       { 1680, 1050, 75, 0 },
+       { 1680, 1050, 85, 0 },
+       { 1600, 1200, 60, 0 },
+       { 1600, 1200, 65, 0 },
+       { 1600, 1200, 70, 0 },
+       /* byte 10 */
+       { 1600, 1200, 75, 0 },
+       { 1600, 1200, 85, 0 },
+       { 1792, 1344, 60, 0 },
+       { 1792, 1344, 85, 0 },
+       { 1856, 1392, 60, 0 },
+       { 1856, 1392, 75, 0 },
+       { 1920, 1200, 60, 1 },
+       { 1920, 1200, 60, 0 },
+       /* byte 11 */
+       { 1920, 1200, 75, 0 },
+       { 1920, 1200, 85, 0 },
+       { 1920, 1440, 60, 0 },
+       { 1920, 1440, 75, 0 },
+};
+static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
+
+static int
+drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
+{
+       int i, j, m, modes = 0;
+       struct drm_display_mode *mode;
+       u8 *est = ((u8 *)timing) + 5;
+
+       for (i = 0; i < 6; i++) {
+               for (j = 7; j > 0; j--) {
+                       m = (i * 8) + (7 - j);
+                       if (m >= num_est3_modes)
+                               break;
+                       if (est[i] & (1 << j)) {
+                               mode = drm_mode_find_dmt(connector->dev,
+                                                        est3_modes[m].w,
+                                                        est3_modes[m].h,
+                                                        est3_modes[m].r
+                                                        /*, est3_modes[m].rb */);
+                               if (mode) {
+                                       drm_mode_probed_add(connector, mode);
+                                       modes++;
+                               }
+                       }
+               }
+       }
+
+       return modes;
+}
+
 static int add_detailed_modes(struct drm_connector *connector,
                              struct detailed_timing *timing,
                              struct edid *edid, u32 quirks, int preferred)
 {
        int i, modes = 0;
        struct detailed_non_pixel *data = &timing->data.other_data;
-       int timing_level = standard_timing_level(edid);
        int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
        struct drm_display_mode *newmode;
        struct drm_device *dev = connector->dev;
@@ -1015,7 +1429,8 @@ static int add_detailed_modes(struct drm_connector *connector,
        switch (data->type) {
        case EDID_DETAIL_MONITOR_RANGE:
                if (gtf)
-                       modes += drm_gtf_modes_for_range(connector, timing);
+                       modes += drm_gtf_modes_for_range(connector, edid,
+                                                        timing);
                break;
        case EDID_DETAIL_STD_MODES:
                /* Six modes per detailed section */
@@ -1024,8 +1439,8 @@ static int add_detailed_modes(struct drm_connector *connector,
                        struct drm_display_mode *newmode;
 
                        std = &data->data.timings[i];
-                       newmode = drm_mode_std(dev, std, edid->revision,
-                                              timing_level);
+                       newmode = drm_mode_std(connector, edid, std,
+                                              edid->revision);
                        if (newmode) {
                                drm_mode_probed_add(connector, newmode);
                                modes++;
@@ -1035,6 +1450,9 @@ static int add_detailed_modes(struct drm_connector *connector,
        case EDID_DETAIL_CVT_3BYTE:
                modes += drm_cvt_modes(connector, timing);
                break;
+       case EDID_DETAIL_EST_TIMINGS:
+               modes += drm_est3_modes(connector, timing);
+               break;
        default:
                break;
        }
@@ -1058,7 +1476,10 @@ static int add_detailed_info(struct drm_connector *connector,
 
        for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
                struct detailed_timing *timing = &edid->detailed_timings[i];
-               int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+               int preferred = (i == 0);
+
+               if (preferred && edid->version == 1 && edid->revision < 4)
+                       preferred = (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
 
                /* In 1.0, only timings are allowed */
                if (!timing->pixel_clock && edid->version == 1 &&
@@ -1088,39 +1509,22 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
        int i, modes = 0;
        char *edid_ext = NULL;
        struct detailed_timing *timing;
-       int edid_ext_num;
        int start_offset, end_offset;
-       int timing_level;
 
-       if (edid->version == 1 && edid->revision < 3) {
-               /* If the EDID version is less than 1.3, there is no
-                * extension EDID.
-                */
+       if (edid->version == 1 && edid->revision < 3)
                return 0;
-       }
-       if (!edid->extensions) {
-               /* if there is no extension EDID, it is unnecessary to
-                * parse the E-EDID to get detailed info
-                */
+       if (!edid->extensions)
                return 0;
-       }
-
-       /* Chose real EDID extension number */
-       edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
-               DRM_MAX_EDID_EXT_NUM : edid->extensions;
 
        /* Find CEA extension */
-       for (i = 0; i < edid_ext_num; i++) {
+       for (i = 0; i < edid->extensions; i++) {
                edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
-               /* This block is CEA extension */
                if (edid_ext[0] == 0x02)
                        break;
        }
 
-       if (i == edid_ext_num) {
-               /* if there is no additional timing EDID block, return */
+       if (i == edid->extensions)
                return 0;
-       }
 
        /* Get the start offset of detailed timing block */
        start_offset = edid_ext[2];
@@ -1132,7 +1536,6 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
                return 0;
        }
 
-       timing_level = standard_timing_level(edid);
        end_offset = EDID_LENGTH;
        end_offset -= sizeof(struct detailed_timing);
        for (i = start_offset; i < end_offset;
@@ -1144,123 +1547,6 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
        return modes;
 }
 
-#define DDC_ADDR 0x50
-/**
- * Get EDID information via I2C.
- *
- * \param adapter : i2c device adaptor
- * \param buf     : EDID data buffer to be filled
- * \param len     : EDID data buffer length
- * \return 0 on success or -1 on failure.
- *
- * Try to fetch EDID information by calling i2c driver function.
- */
-int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
-                         unsigned char *buf, int len)
-{
-       unsigned char start = 0x0;
-       struct i2c_msg msgs[] = {
-               {
-                       .addr   = DDC_ADDR,
-                       .flags  = 0,
-                       .len    = 1,
-                       .buf    = &start,
-               }, {
-                       .addr   = DDC_ADDR,
-                       .flags  = I2C_M_RD,
-                       .len    = len,
-                       .buf    = buf,
-               }
-       };
-
-       if (i2c_transfer(adapter, msgs, 2) == 2)
-               return 0;
-
-       return -1;
-}
-EXPORT_SYMBOL(drm_do_probe_ddc_edid);
-
-static int drm_ddc_read_edid(struct drm_connector *connector,
-                            struct i2c_adapter *adapter,
-                            char *buf, int len)
-{
-       int i;
-
-       for (i = 0; i < 4; i++) {
-               if (drm_do_probe_ddc_edid(adapter, buf, len))
-                       return -1;
-               if (drm_edid_is_valid((struct edid *)buf))
-                       return 0;
-       }
-
-       /* repeated checksum failures; warn, but carry on */
-       dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
-                drm_get_connector_name(connector));
-       return -1;
-}
-
-/**
- * drm_get_edid - get EDID data, if available
- * @connector: connector we're probing
- * @adapter: i2c adapter to use for DDC
- *
- * Poke the given connector's i2c channel to grab EDID data if possible.
- *
- * Return edid data or NULL if we couldn't find any.
- */
-struct edid *drm_get_edid(struct drm_connector *connector,
-                         struct i2c_adapter *adapter)
-{
-       int ret;
-       struct edid *edid;
-
-       edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
-                      GFP_KERNEL);
-       if (edid == NULL) {
-               dev_warn(&connector->dev->pdev->dev,
-                        "Failed to allocate EDID\n");
-               goto end;
-       }
-
-       /* Read first EDID block */
-       ret = drm_ddc_read_edid(connector, adapter,
-                               (unsigned char *)edid, EDID_LENGTH);
-       if (ret != 0)
-               goto clean_up;
-
-       /* There are EDID extensions to be read */
-       if (edid->extensions != 0) {
-               int edid_ext_num = edid->extensions;
-
-               if (edid_ext_num > DRM_MAX_EDID_EXT_NUM) {
-                       dev_warn(&connector->dev->pdev->dev,
-                                "The number of extension(%d) is "
-                                "over max (%d), actually read number (%d)\n",
-                                edid_ext_num, DRM_MAX_EDID_EXT_NUM,
-                                DRM_MAX_EDID_EXT_NUM);
-                       /* Reset EDID extension number to be read */
-                       edid_ext_num = DRM_MAX_EDID_EXT_NUM;
-               }
-               /* Read EDID including extensions too */
-               ret = drm_ddc_read_edid(connector, adapter, (char *)edid,
-                                       EDID_LENGTH * (edid_ext_num + 1));
-               if (ret != 0)
-                       goto clean_up;
-
-       }
-
-       connector->display_info.raw_edid = (char *)edid;
-       goto end;
-
-clean_up:
-       kfree(edid);
-       edid = NULL;
-end:
-       return edid;
-
-}
-EXPORT_SYMBOL(drm_get_edid);
-
 #define HDMI_IDENTIFIER 0x000C03
 #define VENDOR_BLOCK    0x03
 /**
@@ -1273,7 +1559,7 @@ EXPORT_SYMBOL(drm_get_edid);
 bool drm_detect_hdmi_monitor(struct edid *edid)
 {
        char *edid_ext = NULL;
-       int i, hdmi_id, edid_ext_num;
+       int i, hdmi_id;
        int start_offset, end_offset;
        bool is_hdmi = false;
 
@@ -1281,19 +1567,15 @@ bool drm_detect_hdmi_monitor(struct edid *edid)
        if (edid == NULL || edid->extensions == 0)
                goto end;
 
-       /* Chose real EDID extension number */
-       edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
-                      DRM_MAX_EDID_EXT_NUM : edid->extensions;
-
        /* Find CEA extension */
-       for (i = 0; i < edid_ext_num; i++) {
+       for (i = 0; i < edid->extensions; i++) {
                edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
                /* This block is CEA extension */
                if (edid_ext[0] == 0x02)
                        break;
        }
 
-       if (i == edid_ext_num)
+       if (i == edid->extensions)
                goto end;
 
        /* Data block offset in CEA extension block */
@@ -1348,10 +1630,24 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
 
        quirks = edid_get_quirks(edid);
 
-       num_modes += add_established_modes(connector, edid);
-       num_modes += add_standard_modes(connector, edid);
+       /*
+        * EDID spec says modes should be preferred in this order:
+        * - preferred detailed mode
+        * - other detailed modes from base block
+        * - detailed modes from extension blocks
+        * - CVT 3-byte code modes
+        * - standard timing codes
+        * - established timing codes
+        * - modes inferred from GTF or CVT range information
+        *
+        * We don't quite implement this yet, but we're close.
+        *
+        * XXX order for additional mode types in extension blocks?
+        */
        num_modes += add_detailed_info(connector, edid, quirks);
        num_modes += add_detailed_info_eedid(connector, edid, quirks);
+       num_modes += add_standard_modes(connector, edid);
+       num_modes += add_established_modes(connector, edid);
 
        if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
                edid_fixup_preferred(connector, quirks);
index 288ea2f..b3779d2 100644 (file)
@@ -42,15 +42,33 @@ MODULE_LICENSE("GPL and additional rights");
 
 static LIST_HEAD(kernel_fb_helper_list);
 
-int drm_fb_helper_add_connector(struct drm_connector *connector)
+/* simple single crtc case helper function */
+int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
 {
-       connector->fb_helper_private = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
-       if (!connector->fb_helper_private)
-               return -ENOMEM;
+       struct drm_device *dev = fb_helper->dev;
+       struct drm_connector *connector;
+       int i;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               struct drm_fb_helper_connector *fb_helper_connector;
+
+               fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
+               if (!fb_helper_connector)
+                       goto fail;
 
+               fb_helper_connector->connector = connector;
+               fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
+       }
        return 0;
+fail:
+       for (i = 0; i < fb_helper->connector_count; i++) {
+               kfree(fb_helper->connector_info[i]);
+               fb_helper->connector_info[i] = NULL;
+       }
+       fb_helper->connector_count = 0;
+       return -ENOMEM;
 }
-EXPORT_SYMBOL(drm_fb_helper_add_connector);
+EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
 
 /**
  * drm_fb_helper_connector_parse_command_line - parse command line for connector
@@ -65,7 +83,7 @@ EXPORT_SYMBOL(drm_fb_helper_add_connector);
  *
  * enable/enable Digital/disable bit at the end
  */
-static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *connector,
+static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_connector *fb_helper_conn,
                                                       const char *mode_option)
 {
        const char *name;
@@ -75,13 +93,13 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
        int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
        int i;
        enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
-       struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
        struct drm_fb_helper_cmdline_mode *cmdline_mode;
+       struct drm_connector *connector = fb_helper_conn->connector;
 
-       if (!fb_help_conn)
+       if (!fb_helper_conn)
                return false;
 
-       cmdline_mode = &fb_help_conn->cmdline_mode;
+       cmdline_mode = &fb_helper_conn->cmdline_mode;
        if (!mode_option)
                mode_option = fb_mode_option;
 
@@ -204,18 +222,21 @@ done:
        return true;
 }
 
-int drm_fb_helper_parse_command_line(struct drm_device *dev)
+static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
 {
-       struct drm_connector *connector;
+       struct drm_fb_helper_connector *fb_helper_conn;
+       int i;
 
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+       for (i = 0; i < fb_helper->connector_count; i++) {
                char *option = NULL;
 
+               fb_helper_conn = fb_helper->connector_info[i];
+
                /* do something on return - turn off connector maybe */
-               if (fb_get_options(drm_get_connector_name(connector), &option))
+               if (fb_get_options(drm_get_connector_name(fb_helper_conn->connector), &option))
                        continue;
 
-               drm_fb_helper_connector_parse_command_line(connector, option);
+               drm_fb_helper_connector_parse_command_line(fb_helper_conn, option);
        }
        return 0;
 }
@@ -293,6 +314,7 @@ static void drm_fb_helper_on(struct fb_info *info)
        struct drm_fb_helper *fb_helper = info->par;
        struct drm_device *dev = fb_helper->dev;
        struct drm_crtc *crtc;
+       struct drm_crtc_helper_funcs *crtc_funcs;
        struct drm_encoder *encoder;
        int i;
 
@@ -300,33 +322,28 @@ static void drm_fb_helper_on(struct fb_info *info)
         * For each CRTC in this fb, turn the crtc on then,
         * find all associated encoders and turn them on.
         */
+       mutex_lock(&dev->mode_config.mutex);
        for (i = 0; i < fb_helper->crtc_count; i++) {
-               list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-                       struct drm_crtc_helper_funcs *crtc_funcs =
-                               crtc->helper_private;
+               crtc = fb_helper->crtc_info[i].mode_set.crtc;
+               crtc_funcs = crtc->helper_private;
 
-                       /* Only mess with CRTCs in this fb */
-                       if (crtc->base.id != fb_helper->crtc_info[i].crtc_id ||
-                           !crtc->enabled)
-                               continue;
+               if (!crtc->enabled)
+                       continue;
 
-                       mutex_lock(&dev->mode_config.mutex);
-                       crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-                       mutex_unlock(&dev->mode_config.mutex);
+               crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
 
-                       /* Found a CRTC on this fb, now find encoders */
-                       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-                               if (encoder->crtc == crtc) {
-                                       struct drm_encoder_helper_funcs *encoder_funcs;
 
-                                       encoder_funcs = encoder->helper_private;
-                                       mutex_lock(&dev->mode_config.mutex);
-                                       encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
-                                       mutex_unlock(&dev->mode_config.mutex);
-                               }
+               /* Found a CRTC on this fb, now find encoders */
+               list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+                       if (encoder->crtc == crtc) {
+                               struct drm_encoder_helper_funcs *encoder_funcs;
+
+                               encoder_funcs = encoder->helper_private;
+                               encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
                        }
                }
        }
+       mutex_unlock(&dev->mode_config.mutex);
 }
 
 static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
@@ -334,6 +351,7 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
        struct drm_fb_helper *fb_helper = info->par;
        struct drm_device *dev = fb_helper->dev;
        struct drm_crtc *crtc;
+       struct drm_crtc_helper_funcs *crtc_funcs;
        struct drm_encoder *encoder;
        int i;
 
@@ -341,32 +359,26 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
         * For each CRTC in this fb, find all associated encoders
         * and turn them off, then turn off the CRTC.
         */
+       mutex_lock(&dev->mode_config.mutex);
        for (i = 0; i < fb_helper->crtc_count; i++) {
-               list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-                       struct drm_crtc_helper_funcs *crtc_funcs =
-                               crtc->helper_private;
+               crtc = fb_helper->crtc_info[i].mode_set.crtc;
+               crtc_funcs = crtc->helper_private;
 
-                       /* Only mess with CRTCs in this fb */
-                       if (crtc->base.id != fb_helper->crtc_info[i].crtc_id ||
-                           !crtc->enabled)
-                               continue;
+               if (!crtc->enabled)
+                       continue;
 
-                       /* Found a CRTC on this fb, now find encoders */
-                       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-                               if (encoder->crtc == crtc) {
-                                       struct drm_encoder_helper_funcs *encoder_funcs;
+               /* Found a CRTC on this fb, now find encoders */
+               list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+                       if (encoder->crtc == crtc) {
+                               struct drm_encoder_helper_funcs *encoder_funcs;
 
-                                       encoder_funcs = encoder->helper_private;
-                                       mutex_lock(&dev->mode_config.mutex);
-                                       encoder_funcs->dpms(encoder, dpms_mode);
-                                       mutex_unlock(&dev->mode_config.mutex);
-                               }
+                               encoder_funcs = encoder->helper_private;
+                               encoder_funcs->dpms(encoder, dpms_mode);
                        }
-                       mutex_lock(&dev->mode_config.mutex);
-                       crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-                       mutex_unlock(&dev->mode_config.mutex);
                }
+               crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
        }
+       mutex_unlock(&dev->mode_config.mutex);
 }
 
 int drm_fb_helper_blank(int blank, struct fb_info *info)
@@ -401,50 +413,81 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
 {
        int i;
 
+       for (i = 0; i < helper->connector_count; i++)
+               kfree(helper->connector_info[i]);
+       kfree(helper->connector_info);
        for (i = 0; i < helper->crtc_count; i++)
                kfree(helper->crtc_info[i].mode_set.connectors);
        kfree(helper->crtc_info);
 }
 
-int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count, int max_conn_count)
+int drm_fb_helper_init(struct drm_device *dev,
+                      struct drm_fb_helper *fb_helper,
+                      int crtc_count, int max_conn_count)
 {
-       struct drm_device *dev = helper->dev;
        struct drm_crtc *crtc;
        int ret = 0;
        int i;
 
-       helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
-       if (!helper->crtc_info)
+       fb_helper->dev = dev;
+
+       INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
+
+       fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
+       if (!fb_helper->crtc_info)
                return -ENOMEM;
 
-       helper->crtc_count = crtc_count;
+       fb_helper->crtc_count = crtc_count;
+       fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL);
+       if (!fb_helper->connector_info) {
+               kfree(fb_helper->crtc_info);
+               return -ENOMEM;
+       }
+       fb_helper->connector_count = 0;
 
        for (i = 0; i < crtc_count; i++) {
-               helper->crtc_info[i].mode_set.connectors =
+               fb_helper->crtc_info[i].mode_set.connectors =
                        kcalloc(max_conn_count,
                                sizeof(struct drm_connector *),
                                GFP_KERNEL);
 
-               if (!helper->crtc_info[i].mode_set.connectors) {
+               if (!fb_helper->crtc_info[i].mode_set.connectors) {
                        ret = -ENOMEM;
                        goto out_free;
                }
-               helper->crtc_info[i].mode_set.num_connectors = 0;
+               fb_helper->crtc_info[i].mode_set.num_connectors = 0;
        }
 
        i = 0;
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               helper->crtc_info[i].crtc_id = crtc->base.id;
-               helper->crtc_info[i].mode_set.crtc = crtc;
+               fb_helper->crtc_info[i].crtc_id = crtc->base.id;
+               fb_helper->crtc_info[i].mode_set.crtc = crtc;
                i++;
        }
-       helper->conn_limit = max_conn_count;
+       fb_helper->conn_limit = max_conn_count;
        return 0;
 out_free:
-       drm_fb_helper_crtc_free(helper);
+       drm_fb_helper_crtc_free(fb_helper);
        return -ENOMEM;
 }
-EXPORT_SYMBOL(drm_fb_helper_init_crtc_count);
+EXPORT_SYMBOL(drm_fb_helper_init);
+
+void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
+{
+       if (!list_empty(&fb_helper->kernel_fb_list)) {
+               list_del(&fb_helper->kernel_fb_list);
+               if (list_empty(&kernel_fb_helper_list)) {
+                       printk(KERN_INFO "drm: unregistered panic notifier\n");
+                       atomic_notifier_chain_unregister(&panic_notifier_list,
+                                                        &paniced);
+                       unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
+               }
+       }
+
+       drm_fb_helper_crtc_free(fb_helper);
+
+}
+EXPORT_SYMBOL(drm_fb_helper_fini);
 
 static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
                     u16 blue, u16 regno, struct fb_info *info)
@@ -508,20 +551,15 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
 int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
 {
        struct drm_fb_helper *fb_helper = info->par;
-       struct drm_device *dev = fb_helper->dev;
+       struct drm_crtc_helper_funcs *crtc_funcs;
        u16 *red, *green, *blue, *transp;
        struct drm_crtc *crtc;
        int i, rc = 0;
        int start;
 
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-               for (i = 0; i < fb_helper->crtc_count; i++) {
-                       if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
-                               break;
-               }
-               if (i == fb_helper->crtc_count)
-                       continue;
+       for (i = 0; i < fb_helper->crtc_count; i++) {
+               crtc = fb_helper->crtc_info[i].mode_set.crtc;
+               crtc_funcs = crtc->helper_private;
 
                red = cmap->red;
                green = cmap->green;
@@ -549,41 +587,6 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
 }
 EXPORT_SYMBOL(drm_fb_helper_setcmap);
 
-int drm_fb_helper_setcolreg(unsigned regno,
-                           unsigned red,
-                           unsigned green,
-                           unsigned blue,
-                           unsigned transp,
-                           struct fb_info *info)
-{
-       struct drm_fb_helper *fb_helper = info->par;
-       struct drm_device *dev = fb_helper->dev;
-       struct drm_crtc *crtc;
-       int i;
-       int ret;
-
-       if (regno > 255)
-               return 1;
-
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-               for (i = 0; i < fb_helper->crtc_count; i++) {
-                       if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
-                               break;
-               }
-               if (i == fb_helper->crtc_count)
-                       continue;
-
-               ret = setcolreg(crtc, red, green, blue, regno, info);
-               if (ret)
-                       return ret;
-
-               crtc_funcs->load_lut(crtc);
-       }
-       return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_setcolreg);
-
 int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
                            struct fb_info *info)
 {
@@ -687,23 +690,21 @@ int drm_fb_helper_set_par(struct fb_info *info)
                return -EINVAL;
        }
 
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-
-               for (i = 0; i < fb_helper->crtc_count; i++) {
-                       if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
-                               break;
-               }
-               if (i == fb_helper->crtc_count)
-                       continue;
-
-               if (crtc->fb == fb_helper->crtc_info[i].mode_set.fb) {
-                       mutex_lock(&dev->mode_config.mutex);
-                       ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
+       mutex_lock(&dev->mode_config.mutex);
+       for (i = 0; i < fb_helper->crtc_count; i++) {
+               crtc = fb_helper->crtc_info[i].mode_set.crtc;
+               ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
+               if (ret) {
                        mutex_unlock(&dev->mode_config.mutex);
-                       if (ret)
-                               return ret;
+                       return ret;
                }
        }
+       mutex_unlock(&dev->mode_config.mutex);
+
+       if (fb_helper->delayed_hotplug) {
+               fb_helper->delayed_hotplug = false;
+               drm_fb_helper_hotplug_event(fb_helper);
+       }
        return 0;
 }
 EXPORT_SYMBOL(drm_fb_helper_set_par);
@@ -718,14 +719,9 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
        int ret = 0;
        int i;
 
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               for (i = 0; i < fb_helper->crtc_count; i++) {
-                       if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
-                               break;
-               }
-
-               if (i == fb_helper->crtc_count)
-                       continue;
+       mutex_lock(&dev->mode_config.mutex);
+       for (i = 0; i < fb_helper->crtc_count; i++) {
+               crtc = fb_helper->crtc_info[i].mode_set.crtc;
 
                modeset = &fb_helper->crtc_info[i].mode_set;
 
@@ -733,209 +729,138 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
                modeset->y = var->yoffset;
 
                if (modeset->num_connectors) {
-                       mutex_lock(&dev->mode_config.mutex);
                        ret = crtc->funcs->set_config(modeset);
-                       mutex_unlock(&dev->mode_config.mutex);
                        if (!ret) {
                                info->var.xoffset = var->xoffset;
                                info->var.yoffset = var->yoffset;
                        }
                }
        }
+       mutex_unlock(&dev->mode_config.mutex);
        return ret;
 }
 EXPORT_SYMBOL(drm_fb_helper_pan_display);
 
-int drm_fb_helper_single_fb_probe(struct drm_device *dev,
-                                 int preferred_bpp,
-                                 int (*fb_create)(struct drm_device *dev,
-                                                  uint32_t fb_width,
-                                                  uint32_t fb_height,
-                                                  uint32_t surface_width,
-                                                  uint32_t surface_height,
-                                                  uint32_t surface_depth,
-                                                  uint32_t surface_bpp,
-                                                  struct drm_framebuffer **fb_ptr))
+int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
+                                 int preferred_bpp)
 {
-       struct drm_crtc *crtc;
-       struct drm_connector *connector;
-       unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1;
-       unsigned int surface_width = 0, surface_height = 0;
        int new_fb = 0;
        int crtc_count = 0;
-       int ret, i, conn_count = 0;
+       int i;
        struct fb_info *info;
-       struct drm_framebuffer *fb;
-       struct drm_mode_set *modeset = NULL;
-       struct drm_fb_helper *fb_helper;
-       uint32_t surface_depth = 24, surface_bpp = 32;
+       struct drm_fb_helper_surface_size sizes;
+       int gamma_size = 0;
+
+       memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
+       sizes.surface_depth = 24;
+       sizes.surface_bpp = 32;
+       sizes.fb_width = (unsigned)-1;
+       sizes.fb_height = (unsigned)-1;
 
        /* if driver picks 8 or 16 by default use that
           for both depth/bpp */
-       if (preferred_bpp != surface_bpp) {
-               surface_depth = surface_bpp = preferred_bpp;
+       if (preferred_bpp != sizes.surface_bpp) {
+               sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
        }
        /* first up get a count of crtcs now in use and new min/maxes width/heights */
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
-
+       for (i = 0; i < fb_helper->connector_count; i++) {
+               struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
                struct drm_fb_helper_cmdline_mode *cmdline_mode;
 
-               if (!fb_help_conn)
-                       continue;
-               
-               cmdline_mode = &fb_help_conn->cmdline_mode;
+               cmdline_mode = &fb_helper_conn->cmdline_mode;
 
                if (cmdline_mode->bpp_specified) {
                        switch (cmdline_mode->bpp) {
                        case 8:
-                               surface_depth = surface_bpp = 8;
+                               sizes.surface_depth = sizes.surface_bpp = 8;
                                break;
                        case 15:
-                               surface_depth = 15;
-                               surface_bpp = 16;
+                               sizes.surface_depth = 15;
+                               sizes.surface_bpp = 16;
                                break;
                        case 16:
-                               surface_depth = surface_bpp = 16;
+                               sizes.surface_depth = sizes.surface_bpp = 16;
                                break;
                        case 24:
-                               surface_depth = surface_bpp = 24;
+                               sizes.surface_depth = sizes.surface_bpp = 24;
                                break;
                        case 32:
-                               surface_depth = 24;
-                               surface_bpp = 32;
+                               sizes.surface_depth = 24;
+                               sizes.surface_bpp = 32;
                                break;
                        }
                        break;
                }
        }
 
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               if (drm_helper_crtc_in_use(crtc)) {
-                       if (crtc->desired_mode) {
-                               if (crtc->desired_mode->hdisplay < fb_width)
-                                       fb_width = crtc->desired_mode->hdisplay;
-
-                               if (crtc->desired_mode->vdisplay < fb_height)
-                                       fb_height = crtc->desired_mode->vdisplay;
-
-                               if (crtc->desired_mode->hdisplay > surface_width)
-                                       surface_width = crtc->desired_mode->hdisplay;
-
-                               if (crtc->desired_mode->vdisplay > surface_height)
-                                       surface_height = crtc->desired_mode->vdisplay;
-                       }
+       crtc_count = 0;
+       for (i = 0; i < fb_helper->crtc_count; i++) {
+               struct drm_display_mode *desired_mode;
+               desired_mode = fb_helper->crtc_info[i].desired_mode;
+
+               if (desired_mode) {
+                       if (gamma_size == 0)
+                               gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
+                       if (desired_mode->hdisplay < sizes.fb_width)
+                               sizes.fb_width = desired_mode->hdisplay;
+                       if (desired_mode->vdisplay < sizes.fb_height)
+                               sizes.fb_height = desired_mode->vdisplay;
+                       if (desired_mode->hdisplay > sizes.surface_width)
+                               sizes.surface_width = desired_mode->hdisplay;
+                       if (desired_mode->vdisplay > sizes.surface_height)
+                               sizes.surface_height = desired_mode->vdisplay;
                        crtc_count++;
                }
        }
 
-       if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
+       if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
                /* hmm everyone went away - assume VGA cable just fell out
                   and will come back later. */
-               return 0;
+               DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n");
+               sizes.fb_width = sizes.surface_width = 1024;
+               sizes.fb_height = sizes.surface_height = 768;
        }
 
-       /* do we have an fb already? */
-       if (list_empty(&dev->mode_config.fb_kernel_list)) {
-               ret = (*fb_create)(dev, fb_width, fb_height, surface_width,
-                                  surface_height, surface_depth, surface_bpp,
-                                  &fb);
-               if (ret)
-                       return -EINVAL;
-               new_fb = 1;
-       } else {
-               fb = list_first_entry(&dev->mode_config.fb_kernel_list,
-                                     struct drm_framebuffer, filp_head);
-
-               /* if someone hotplugs something bigger than we have already allocated, we are pwned.
-                  As really we can't resize an fbdev that is in the wild currently due to fbdev
-                  not really being designed for the lower layers moving stuff around under it.
-                  - so in the grand style of things - punt. */
-               if ((fb->width < surface_width) ||
-                   (fb->height < surface_height)) {
-                       DRM_ERROR("Framebuffer not large enough to scale console onto.\n");
-                       return -EINVAL;
-               }
-       }
-
-       info = fb->fbdev;
-       fb_helper = info->par;
-
-       crtc_count = 0;
-       /* okay we need to setup new connector sets in the crtcs */
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               modeset = &fb_helper->crtc_info[crtc_count].mode_set;
-               modeset->fb = fb;
-               conn_count = 0;
-               list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-                       if (connector->encoder)
-                               if (connector->encoder->crtc == modeset->crtc) {
-                                       modeset->connectors[conn_count] = connector;
-                                       conn_count++;
-                                       if (conn_count > fb_helper->conn_limit)
-                                               BUG();
-                               }
-               }
-
-               for (i = conn_count; i < fb_helper->conn_limit; i++)
-                       modeset->connectors[i] = NULL;
+       /* push down into drivers */
+       new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
+       if (new_fb < 0)
+               return new_fb;
 
-               modeset->crtc = crtc;
-               crtc_count++;
+       info = fb_helper->fbdev;
 
-               modeset->num_connectors = conn_count;
-               if (modeset->crtc->desired_mode) {
-                       if (modeset->mode)
-                               drm_mode_destroy(dev, modeset->mode);
-                       modeset->mode = drm_mode_duplicate(dev,
-                                                          modeset->crtc->desired_mode);
-               }
+       /* set the fb pointer */
+       for (i = 0; i < fb_helper->crtc_count; i++) {
+               fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
        }
-       fb_helper->crtc_count = crtc_count;
-       fb_helper->fb = fb;
 
        if (new_fb) {
                info->var.pixclock = 0;
-               ret = fb_alloc_cmap(&info->cmap, modeset->crtc->gamma_size, 0);
-               if (ret)
-                       return ret;
                if (register_framebuffer(info) < 0) {
-                       fb_dealloc_cmap(&info->cmap);
                        return -EINVAL;
                }
+
+               printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
+                      info->fix.id);
+
        } else {
                drm_fb_helper_set_par(info);
        }
-       printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
-              info->fix.id);
 
        /* Switch back to kernel console on panic */
        /* multi card linked list maybe */
        if (list_empty(&kernel_fb_helper_list)) {
-               printk(KERN_INFO "registered panic notifier\n");
+               printk(KERN_INFO "drm: registered panic notifier\n");
                atomic_notifier_chain_register(&panic_notifier_list,
                                               &paniced);
                register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
        }
-       list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+       if (new_fb)
+               list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+
        return 0;
 }
 EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
 
-void drm_fb_helper_free(struct drm_fb_helper *helper)
-{
-       list_del(&helper->kernel_fb_list);
-       if (list_empty(&kernel_fb_helper_list)) {
-               printk(KERN_INFO "unregistered panic notifier\n");
-               atomic_notifier_chain_unregister(&panic_notifier_list,
-                                                &paniced);
-               unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
-       }
-       drm_fb_helper_crtc_free(helper);
-       fb_dealloc_cmap(&helper->fb->fbdev->cmap);
-}
-EXPORT_SYMBOL(drm_fb_helper_free);
-
 void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
                            uint32_t depth)
 {
@@ -954,10 +879,11 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
 }
 EXPORT_SYMBOL(drm_fb_helper_fill_fix);
 
-void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
+void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
                            uint32_t fb_width, uint32_t fb_height)
 {
-       info->pseudo_palette = fb->pseudo_palette;
+       struct drm_framebuffer *fb = fb_helper->fb;
+       info->pseudo_palette = fb_helper->pseudo_palette;
        info->var.xres_virtual = fb->width;
        info->var.yres_virtual = fb->height;
        info->var.bits_per_pixel = fb->bits_per_pixel;
@@ -1025,3 +951,457 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
        info->var.yres = fb_height;
 }
 EXPORT_SYMBOL(drm_fb_helper_fill_var);
+
+static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
+                                              uint32_t maxX,
+                                              uint32_t maxY)
+{
+       struct drm_connector *connector;
+       int count = 0;
+       int i;
+
+       for (i = 0; i < fb_helper->connector_count; i++) {
+               connector = fb_helper->connector_info[i]->connector;
+               count += connector->funcs->fill_modes(connector, maxX, maxY);
+       }
+
+       return count;
+}
+
+static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
+{
+       struct drm_display_mode *mode;
+
+       list_for_each_entry(mode, &fb_connector->connector->modes, head) {
+               if (drm_mode_width(mode) > width ||
+                   drm_mode_height(mode) > height)
+                       continue;
+               if (mode->type & DRM_MODE_TYPE_PREFERRED)
+                       return mode;
+       }
+       return NULL;
+}
+
+static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
+{
+       struct drm_fb_helper_cmdline_mode *cmdline_mode;
+       cmdline_mode = &fb_connector->cmdline_mode;
+       return cmdline_mode->specified;
+}
+
+static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
+                                                     int width, int height)
+{
+       struct drm_fb_helper_cmdline_mode *cmdline_mode;
+       struct drm_display_mode *mode = NULL;
+
+       cmdline_mode = &fb_helper_conn->cmdline_mode;
+       if (cmdline_mode->specified == false)
+               return mode;
+
+       /* attempt to find a matching mode in the list of modes
+        *  we have gotten so far, if not add a CVT mode that conforms
+        */
+       if (cmdline_mode->rb || cmdline_mode->margins)
+               goto create_mode;
+
+       list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
+               /* check width/height */
+               if (mode->hdisplay != cmdline_mode->xres ||
+                   mode->vdisplay != cmdline_mode->yres)
+                       continue;
+
+               if (cmdline_mode->refresh_specified) {
+                       if (mode->vrefresh != cmdline_mode->refresh)
+                               continue;
+               }
+
+               if (cmdline_mode->interlace) {
+                       if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
+                               continue;
+               }
+               return mode;
+       }
+
+create_mode:
+       mode = drm_cvt_mode(fb_helper_conn->connector->dev, cmdline_mode->xres,
+                           cmdline_mode->yres,
+                           cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
+                           cmdline_mode->rb, cmdline_mode->interlace,
+                           cmdline_mode->margins);
+       drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+       list_add(&mode->head, &fb_helper_conn->connector->modes);
+       return mode;
+}
+
+static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
+{
+       bool enable;
+
+       if (strict) {
+               enable = connector->status == connector_status_connected;
+       } else {
+               enable = connector->status != connector_status_disconnected;
+       }
+       return enable;
+}
+
+static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
+                                 bool *enabled)
+{
+       bool any_enabled = false;
+       struct drm_connector *connector;
+       int i = 0;
+
+       for (i = 0; i < fb_helper->connector_count; i++) {
+               connector = fb_helper->connector_info[i]->connector;
+               enabled[i] = drm_connector_enabled(connector, true);
+               DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
+                         enabled[i] ? "yes" : "no");
+               any_enabled |= enabled[i];
+       }
+
+       if (any_enabled)
+               return;
+
+       for (i = 0; i < fb_helper->connector_count; i++) {
+               connector = fb_helper->connector_info[i]->connector;
+               enabled[i] = drm_connector_enabled(connector, false);
+       }
+}
+
+static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
+                             struct drm_display_mode **modes,
+                             bool *enabled, int width, int height)
+{
+       int count, i, j;
+       bool can_clone = false;
+       struct drm_fb_helper_connector *fb_helper_conn;
+       struct drm_display_mode *dmt_mode, *mode;
+
+       /* only contemplate cloning in the single crtc case */
+       if (fb_helper->crtc_count > 1)
+               return false;
+
+       count = 0;
+       for (i = 0; i < fb_helper->connector_count; i++) {
+               if (enabled[i])
+                       count++;
+       }
+
+       /* only contemplate cloning if more than one connector is enabled */
+       if (count <= 1)
+               return false;
+
+       /* check the command line or if nothing common pick 1024x768 */
+       can_clone = true;
+       for (i = 0; i < fb_helper->connector_count; i++) {
+               if (!enabled[i])
+                       continue;
+               fb_helper_conn = fb_helper->connector_info[i];
+               modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+               if (!modes[i]) {
+                       can_clone = false;
+                       break;
+               }
+               for (j = 0; j < i; j++) {
+                       if (!enabled[j])
+                               continue;
+                       if (!drm_mode_equal(modes[j], modes[i]))
+                               can_clone = false;
+               }
+       }
+
+       if (can_clone) {
+               DRM_DEBUG_KMS("can clone using command line\n");
+               return true;
+       }
+
+       /* try and find a 1024x768 mode on each connector */
+       can_clone = true;
+       dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60);
+
+       for (i = 0; i < fb_helper->connector_count; i++) {
+
+               if (!enabled[i])
+                       continue;
+
+               fb_helper_conn = fb_helper->connector_info[i];
+               list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
+                       if (drm_mode_equal(mode, dmt_mode))
+                               modes[i] = mode;
+               }
+               if (!modes[i])
+                       can_clone = false;
+       }
+
+       if (can_clone) {
+               DRM_DEBUG_KMS("can clone using 1024x768\n");
+               return true;
+       }
+       DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
+       return false;
+}
+
+static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
+                                struct drm_display_mode **modes,
+                                bool *enabled, int width, int height)
+{
+       struct drm_fb_helper_connector *fb_helper_conn;
+       int i;
+
+       for (i = 0; i < fb_helper->connector_count; i++) {
+               fb_helper_conn = fb_helper->connector_info[i];
+
+               if (enabled[i] == false)
+                       continue;
+
+               DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
+                             fb_helper_conn->connector->base.id);
+
+               /* got for command line mode first */
+               modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+               if (!modes[i]) {
+                       DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
+                                     fb_helper_conn->connector->base.id);
+                       modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
+               }
+               /* No preferred modes, pick one off the list */
+               if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) {
+                       list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head)
+                               break;
+               }
+               DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
+                         "none");
+       }
+       return true;
+}
+
+static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
+                         struct drm_fb_helper_crtc **best_crtcs,
+                         struct drm_display_mode **modes,
+                         int n, int width, int height)
+{
+       int c, o;
+       struct drm_device *dev = fb_helper->dev;
+       struct drm_connector *connector;
+       struct drm_connector_helper_funcs *connector_funcs;
+       struct drm_encoder *encoder;
+       struct drm_fb_helper_crtc *best_crtc;
+       int my_score, best_score, score;
+       struct drm_fb_helper_crtc **crtcs, *crtc;
+       struct drm_fb_helper_connector *fb_helper_conn;
+
+       if (n == fb_helper->connector_count)
+               return 0;
+
+       fb_helper_conn = fb_helper->connector_info[n];
+       connector = fb_helper_conn->connector;
+
+       best_crtcs[n] = NULL;
+       best_crtc = NULL;
+       best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
+       if (modes[n] == NULL)
+               return best_score;
+
+       crtcs = kzalloc(dev->mode_config.num_connector *
+                       sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
+       if (!crtcs)
+               return best_score;
+
+       my_score = 1;
+       if (connector->status == connector_status_connected)
+               my_score++;
+       if (drm_has_cmdline_mode(fb_helper_conn))
+               my_score++;
+       if (drm_has_preferred_mode(fb_helper_conn, width, height))
+               my_score++;
+
+       connector_funcs = connector->helper_private;
+       encoder = connector_funcs->best_encoder(connector);
+       if (!encoder)
+               goto out;
+
+       /* select a crtc for this connector and then attempt to configure
+          remaining connectors */
+       for (c = 0; c < fb_helper->crtc_count; c++) {
+               crtc = &fb_helper->crtc_info[c];
+
+               if ((encoder->possible_crtcs & (1 << c)) == 0) {
+                       continue;
+               }
+
+               for (o = 0; o < n; o++)
+                       if (best_crtcs[o] == crtc)
+                               break;
+
+               if (o < n) {
+                       /* ignore cloning unless only a single crtc */
+                       if (fb_helper->crtc_count > 1)
+                               continue;
+
+                       if (!drm_mode_equal(modes[o], modes[n]))
+                               continue;
+               }
+
+               crtcs[n] = crtc;
+               memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *));
+               score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
+                                                 width, height);
+               if (score > best_score) {
+                       best_crtc = crtc;
+                       best_score = score;
+                       memcpy(best_crtcs, crtcs,
+                              dev->mode_config.num_connector *
+                              sizeof(struct drm_fb_helper_crtc *));
+               }
+       }
+out:
+       kfree(crtcs);
+       return best_score;
+}
+
+static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
+{
+       struct drm_device *dev = fb_helper->dev;
+       struct drm_fb_helper_crtc **crtcs;
+       struct drm_display_mode **modes;
+       struct drm_encoder *encoder;
+       struct drm_mode_set *modeset;
+       bool *enabled;
+       int width, height;
+       int i, ret;
+
+       DRM_DEBUG_KMS("\n");
+
+       width = dev->mode_config.max_width;
+       height = dev->mode_config.max_height;
+
+       /* clean out all the encoder/crtc combos */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               encoder->crtc = NULL;
+       }
+
+       crtcs = kcalloc(dev->mode_config.num_connector,
+                       sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
+       modes = kcalloc(dev->mode_config.num_connector,
+                       sizeof(struct drm_display_mode *), GFP_KERNEL);
+       enabled = kcalloc(dev->mode_config.num_connector,
+                         sizeof(bool), GFP_KERNEL);
+
+       drm_enable_connectors(fb_helper, enabled);
+
+       ret = drm_target_cloned(fb_helper, modes, enabled, width, height);
+       if (!ret) {
+               ret = drm_target_preferred(fb_helper, modes, enabled, width, height);
+               if (!ret)
+                       DRM_ERROR("Unable to find initial modes\n");
+       }
+
+       DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
+
+       drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
+
+       /* need to set the modesets up here for use later */
+       /* fill out the connector<->crtc mappings into the modesets */
+       for (i = 0; i < fb_helper->crtc_count; i++) {
+               modeset = &fb_helper->crtc_info[i].mode_set;
+               modeset->num_connectors = 0;
+       }
+
+       for (i = 0; i < fb_helper->connector_count; i++) {
+               struct drm_display_mode *mode = modes[i];
+               struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
+               modeset = &fb_crtc->mode_set;
+
+               if (mode && fb_crtc) {
+                       DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
+                                     mode->name, fb_crtc->mode_set.crtc->base.id);
+                       fb_crtc->desired_mode = mode;
+                       if (modeset->mode)
+                               drm_mode_destroy(dev, modeset->mode);
+                       modeset->mode = drm_mode_duplicate(dev,
+                                                          fb_crtc->desired_mode);
+                       modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
+               }
+       }
+
+       kfree(crtcs);
+       kfree(modes);
+       kfree(enabled);
+}
+
+/**
+ * drm_helper_initial_config - setup a sane initial connector configuration
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Called at init time, must take mode config lock.
+ *
+ * Scan the CRTCs and connectors and try to put together an initial setup.
+ * At the moment, this is a cloned configuration across all heads with
+ * a new framebuffer object as the backing store.
+ *
+ * RETURNS:
+ * Zero if everything went ok, nonzero otherwise.
+ */
+bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
+{
+       struct drm_device *dev = fb_helper->dev;
+       int count = 0;
+
+       /* disable all the possible outputs/crtcs before entering KMS mode */
+       drm_helper_disable_unused_functions(fb_helper->dev);
+
+       drm_fb_helper_parse_command_line(fb_helper);
+
+       count = drm_fb_helper_probe_connector_modes(fb_helper,
+                                                   dev->mode_config.max_width,
+                                                   dev->mode_config.max_height);
+       /*
+        * we shouldn't end up with no modes here.
+        */
+       if (count == 0) {
+               printk(KERN_INFO "No connectors reported connected with modes\n");
+       }
+       drm_setup_crtcs(fb_helper);
+
+       return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+EXPORT_SYMBOL(drm_fb_helper_initial_config);
+
+bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
+{
+       int count = 0;
+       u32 max_width, max_height, bpp_sel;
+       bool bound = false, crtcs_bound = false;
+       struct drm_crtc *crtc;
+
+       if (!fb_helper->fb)
+               return false;
+
+       list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) {
+               if (crtc->fb)
+                       crtcs_bound = true;
+               if (crtc->fb == fb_helper->fb)
+                       bound = true;
+       }
+
+       if (!bound && crtcs_bound) {
+               fb_helper->delayed_hotplug = true;
+               return false;
+       }
+       DRM_DEBUG_KMS("\n");
+
+       max_width = fb_helper->fb->width;
+       max_height = fb_helper->fb->height;
+       bpp_sel = fb_helper->fb->bits_per_pixel;
+
+       count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
+                                                   max_height);
+       drm_setup_crtcs(fb_helper);
+
+       return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
+
index 9d532d7..e7aace2 100644 (file)
@@ -243,11 +243,10 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
 
        DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
 
-       priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
 
-       memset(priv, 0, sizeof(*priv));
        filp->private_data = priv;
        priv->filp = filp;
        priv->uid = current_euid();
index aa89d4b..33dad3f 100644 (file)
@@ -123,6 +123,31 @@ drm_gem_destroy(struct drm_device *dev)
        dev->mm_private = NULL;
 }
 
+/**
+ * Initialize an already allocate GEM object of the specified size with
+ * shmfs backing store.
+ */
+int drm_gem_object_init(struct drm_device *dev,
+                       struct drm_gem_object *obj, size_t size)
+{
+       BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+
+       obj->dev = dev;
+       obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
+       if (IS_ERR(obj->filp))
+               return -ENOMEM;
+
+       kref_init(&obj->refcount);
+       kref_init(&obj->handlecount);
+       obj->size = size;
+
+       atomic_inc(&dev->object_count);
+       atomic_add(obj->size, &dev->object_memory);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_gem_object_init);
+
 /**
  * Allocate a GEM object of the specified size with shmfs backing store
  */
@@ -131,28 +156,22 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
 {
        struct drm_gem_object *obj;
 
-       BUG_ON((size & (PAGE_SIZE - 1)) != 0);
-
        obj = kzalloc(sizeof(*obj), GFP_KERNEL);
        if (!obj)
                goto free;
 
-       obj->dev = dev;
-       obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
-       if (IS_ERR(obj->filp))
+       if (drm_gem_object_init(dev, obj, size) != 0)
                goto free;
 
-       kref_init(&obj->refcount);
-       kref_init(&obj->handlecount);
-       obj->size = size;
        if (dev->driver->gem_init_object != NULL &&
            dev->driver->gem_init_object(obj) != 0) {
                goto fput;
        }
-       atomic_inc(&dev->object_count);
-       atomic_add(obj->size, &dev->object_memory);
        return obj;
 fput:
+       /* Object_init mangles the global counters - readjust them. */
+       atomic_dec(&dev->object_count);
+       atomic_sub(obj->size, &dev->object_memory);
        fput(obj->filp);
 free:
        kfree(obj);
@@ -403,15 +422,15 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
        idr_destroy(&file_private->object_idr);
 }
 
-static void
-drm_gem_object_free_common(struct drm_gem_object *obj)
+void
+drm_gem_object_release(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        fput(obj->filp);
        atomic_dec(&dev->object_count);
        atomic_sub(obj->size, &dev->object_memory);
-       kfree(obj);
 }
+EXPORT_SYMBOL(drm_gem_object_release);
 
 /**
  * Called after the last reference to the object has been lost.
@@ -429,8 +448,6 @@ drm_gem_object_free(struct kref *kref)
 
        if (dev->driver->gem_free_object != NULL)
                dev->driver->gem_free_object(obj);
-
-       drm_gem_object_free_common(obj);
 }
 EXPORT_SYMBOL(drm_gem_object_free);
 
@@ -453,8 +470,6 @@ drm_gem_object_free_unlocked(struct kref *kref)
                dev->driver->gem_free_object(obj);
                mutex_unlock(&dev->struct_mutex);
        }
-
-       drm_gem_object_free_common(obj);
 }
 EXPORT_SYMBOL(drm_gem_object_free_unlocked);
 
index 76d6339..f1f473e 100644 (file)
@@ -258,8 +258,10 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
        drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
        /* 18/16. Find actual vertical frame frequency */
        /* ignore - just set the mode flag for interlaced */
-       if (interlaced)
+       if (interlaced) {
                drm_mode->vtotal *= 2;
+               drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+       }
        /* Fill the mode line name */
        drm_mode_set_name(drm_mode);
        if (reduced)
@@ -268,43 +270,35 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
        else
                drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
                                        DRM_MODE_FLAG_NHSYNC);
-       if (interlaced)
-               drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
 
-    return drm_mode;
+       return drm_mode;
 }
 EXPORT_SYMBOL(drm_cvt_mode);
 
 /**
- * drm_gtf_mode - create the modeline based on GTF algorithm
+ * drm_gtf_mode_complex - create the modeline based on full GTF algorithm
  *
  * @dev                :drm device
  * @hdisplay   :hdisplay size
  * @vdisplay   :vdisplay size
  * @vrefresh   :vrefresh rate.
  * @interlaced :whether the interlace is supported
- * @margins    :whether the margin is supported
+ * @margins    :desired margin size
+ * @GTF_[MCKJ]  :extended GTF formula parameters
  *
  * LOCKING.
  * none.
  *
- * return the modeline based on GTF algorithm
- *
- * This function is to create the modeline based on the GTF algorithm.
- * Generalized Timing Formula is derived from:
- *     GTF Spreadsheet by Andy Morrish (1/5/97)
- *     available at http://www.vesa.org
+ * return the modeline based on full GTF algorithm.
  *
- * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
- * What I have done is to translate it by using integer calculation.
- * I also refer to the function of fb_get_mode in the file of
- * drivers/video/fbmon.c
+ * GTF feature blocks specify C and J in multiples of 0.5, so we pass them
+ * in here multiplied by two.  For a C of 40, pass in 80.
  */
-struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
-                                     int vdisplay, int vrefresh,
-                                     bool interlaced, int margins)
-{
-       /* 1) top/bottom margin size (% of height) - default: 1.8, */
+struct drm_display_mode *
+drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
+                    int vrefresh, bool interlaced, int margins,
+                    int GTF_M, int GTF_2C, int GTF_K, int GTF_2J)
+{      /* 1) top/bottom margin size (% of height) - default: 1.8, */
 #define        GTF_MARGIN_PERCENTAGE           18
        /* 2) character cell horizontal granularity (pixels) - default 8 */
 #define        GTF_CELL_GRAN                   8
@@ -316,17 +310,9 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
 #define H_SYNC_PERCENT                 8
        /* min time of vsync + back porch (microsec) */
 #define MIN_VSYNC_PLUS_BP              550
-       /* blanking formula gradient */
-#define GTF_M                          600
-       /* blanking formula offset */
-#define GTF_C                          40
-       /* blanking formula scaling factor */
-#define GTF_K                          128
-       /* blanking formula scaling factor */
-#define GTF_J                          20
        /* C' and M' are part of the Blanking Duty Cycle computation */
-#define GTF_C_PRIME            (((GTF_C - GTF_J) * GTF_K / 256) + GTF_J)
-#define GTF_M_PRIME            (GTF_K * GTF_M / 256)
+#define GTF_C_PRIME    ((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2)
+#define GTF_M_PRIME    (GTF_K * GTF_M / 256)
        struct drm_display_mode *drm_mode;
        unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
        int top_margin, bottom_margin;
@@ -460,17 +446,61 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
 
        drm_mode->clock = pixel_freq;
 
-       drm_mode_set_name(drm_mode);
-       drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
-
        if (interlaced) {
                drm_mode->vtotal *= 2;
                drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
        }
 
+       drm_mode_set_name(drm_mode);
+       if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40)
+               drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
+       else
+               drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC;
+
        return drm_mode;
 }
+EXPORT_SYMBOL(drm_gtf_mode_complex);
+
+/**
+ * drm_gtf_mode - create the modeline based on GTF algorithm
+ *
+ * @dev                :drm device
+ * @hdisplay   :hdisplay size
+ * @vdisplay   :vdisplay size
+ * @vrefresh   :vrefresh rate.
+ * @interlaced :whether the interlace is supported
+ * @margins    :whether the margin is supported
+ *
+ * LOCKING.
+ * none.
+ *
+ * return the modeline based on GTF algorithm
+ *
+ * This function is to create the modeline based on the GTF algorithm.
+ * Generalized Timing Formula is derived from:
+ *     GTF Spreadsheet by Andy Morrish (1/5/97)
+ *     available at http://www.vesa.org
+ *
+ * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
+ * What I have done is to translate it by using integer calculation.
+ * I also refer to the function of fb_get_mode in the file of
+ * drivers/video/fbmon.c
+ *
+ * Standard GTF parameters:
+ * M = 600
+ * C = 40
+ * K = 128
+ * J = 20
+ */
+struct drm_display_mode *
+drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
+            bool lace, int margins)
+{
+       return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace,
+                                   margins, 600, 40 * 2, 128, 20 * 2);
+}
 EXPORT_SYMBOL(drm_gtf_mode);
+
 /**
  * drm_mode_set_name - set the name on a mode
  * @mode: name will be set in this mode
@@ -482,8 +512,11 @@ EXPORT_SYMBOL(drm_gtf_mode);
  */
 void drm_mode_set_name(struct drm_display_mode *mode)
 {
-       snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay,
-                mode->vdisplay);
+       bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+
+       snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
+                mode->hdisplay, mode->vdisplay,
+                interlaced ? "i" : "");
 }
 EXPORT_SYMBOL(drm_mode_set_name);
 
index 387166d..101d381 100644 (file)
@@ -334,7 +334,7 @@ static struct device_attribute connector_attrs_opt1[] = {
 static struct bin_attribute edid_attr = {
        .attr.name = "edid",
        .attr.mode = 0444,
-       .size = 128,
+       .size = 0,
        .read = edid_show,
 };
 
index 9929f84..9563901 100644 (file)
@@ -33,3 +33,5 @@ i915-$(CONFIG_ACPI)   += i915_opregion.o
 i915-$(CONFIG_COMPAT)   += i915_ioc32.o
 
 obj-$(CONFIG_DRM_I915)  += i915.o
+
+CFLAGS_i915_trace_points.o := -I$(src)
index 288fc50..0d6ff64 100644 (file)
@@ -69,16 +69,6 @@ struct intel_dvo_dev_ops {
         */
        void (*dpms)(struct intel_dvo_device *dvo, int mode);
 
-       /*
-        * Saves the output's state for restoration on VT switch.
-        */
-       void (*save)(struct intel_dvo_device *dvo);
-
-       /*
-        * Restore's the output's state at VT switch.
-        */
-       void (*restore)(struct intel_dvo_device *dvo);
-
        /*
         * Callback for testing a video mode for a given output.
         *
index 1184c14..14d5980 100644 (file)
 #define CH7017_BANG_LIMIT_CONTROL      0x7f
 
 struct ch7017_priv {
-       uint8_t save_hapi;
-       uint8_t save_vali;
-       uint8_t save_valo;
-       uint8_t save_ailo;
-       uint8_t save_lvds_pll_vco;
-       uint8_t save_feedback_div;
-       uint8_t save_lvds_control_2;
-       uint8_t save_outputs_enable;
-       uint8_t save_lvds_power_down;
-       uint8_t save_power_management;
+       uint8_t dummy;
 };
 
 static void ch7017_dump_regs(struct intel_dvo_device *dvo);
@@ -401,39 +392,6 @@ do {                                                       \
        DUMP(CH7017_LVDS_POWER_DOWN);
 }
 
-static void ch7017_save(struct intel_dvo_device *dvo)
-{
-       struct ch7017_priv *priv = dvo->dev_priv;
-
-       ch7017_read(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, &priv->save_hapi);
-       ch7017_read(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, &priv->save_valo);
-       ch7017_read(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, &priv->save_ailo);
-       ch7017_read(dvo, CH7017_LVDS_PLL_VCO_CONTROL, &priv->save_lvds_pll_vco);
-       ch7017_read(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, &priv->save_feedback_div);
-       ch7017_read(dvo, CH7017_LVDS_CONTROL_2, &priv->save_lvds_control_2);
-       ch7017_read(dvo, CH7017_OUTPUTS_ENABLE, &priv->save_outputs_enable);
-       ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &priv->save_lvds_power_down);
-       ch7017_read(dvo, CH7017_POWER_MANAGEMENT, &priv->save_power_management);
-}
-
-static void ch7017_restore(struct intel_dvo_device *dvo)
-{
-       struct ch7017_priv *priv = dvo->dev_priv;
-
-       /* Power down before changing mode */
-       ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
-
-       ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, priv->save_hapi);
-       ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, priv->save_valo);
-       ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, priv->save_ailo);
-       ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, priv->save_lvds_pll_vco);
-       ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, priv->save_feedback_div);
-       ch7017_write(dvo, CH7017_LVDS_CONTROL_2, priv->save_lvds_control_2);
-       ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, priv->save_outputs_enable);
-       ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, priv->save_lvds_power_down);
-       ch7017_write(dvo, CH7017_POWER_MANAGEMENT, priv->save_power_management);
-}
-
 static void ch7017_destroy(struct intel_dvo_device *dvo)
 {
        struct ch7017_priv *priv = dvo->dev_priv;
@@ -451,7 +409,5 @@ struct intel_dvo_dev_ops ch7017_ops = {
        .mode_set = ch7017_mode_set,
        .dpms = ch7017_dpms,
        .dump_regs = ch7017_dump_regs,
-       .save = ch7017_save,
-       .restore = ch7017_restore,
        .destroy = ch7017_destroy,
 };
index d56ff5c..6f1944b 100644 (file)
@@ -92,21 +92,10 @@ static struct ch7xxx_id_struct {
        { CH7301_VID, "CH7301" },
 };
 
-struct ch7xxx_reg_state {
-    uint8_t regs[CH7xxx_NUM_REGS];
-};
-
 struct ch7xxx_priv {
        bool quiet;
-
-       struct ch7xxx_reg_state save_reg;
-       struct ch7xxx_reg_state mode_reg;
-       uint8_t save_TCTL, save_TPCP, save_TPD, save_TPVT;
-       uint8_t save_TLPF, save_TCT, save_PM, save_IDF;
 };
 
-static void ch7xxx_save(struct intel_dvo_device *dvo);
-
 static char *ch7xxx_get_id(uint8_t vid)
 {
        int i;
@@ -312,42 +301,17 @@ static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode)
 
 static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
 {
-       struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
        int i;
 
        for (i = 0; i < CH7xxx_NUM_REGS; i++) {
+               uint8_t val;
                if ((i % 8) == 0 )
                        DRM_LOG_KMS("\n %02X: ", i);
-               DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]);
+               ch7xxx_readb(dvo, i, &val);
+               DRM_LOG_KMS("%02X ", val);
        }
 }
 
-static void ch7xxx_save(struct intel_dvo_device *dvo)
-{
-       struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
-
-       ch7xxx_readb(dvo, CH7xxx_TCTL, &ch7xxx->save_TCTL);
-       ch7xxx_readb(dvo, CH7xxx_TPCP, &ch7xxx->save_TPCP);
-       ch7xxx_readb(dvo, CH7xxx_TPD, &ch7xxx->save_TPD);
-       ch7xxx_readb(dvo, CH7xxx_TPVT, &ch7xxx->save_TPVT);
-       ch7xxx_readb(dvo, CH7xxx_TLPF, &ch7xxx->save_TLPF);
-       ch7xxx_readb(dvo, CH7xxx_PM, &ch7xxx->save_PM);
-       ch7xxx_readb(dvo, CH7xxx_IDF, &ch7xxx->save_IDF);
-}
-
-static void ch7xxx_restore(struct intel_dvo_device *dvo)
-{
-       struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
-
-       ch7xxx_writeb(dvo, CH7xxx_TCTL, ch7xxx->save_TCTL);
-       ch7xxx_writeb(dvo, CH7xxx_TPCP, ch7xxx->save_TPCP);
-       ch7xxx_writeb(dvo, CH7xxx_TPD, ch7xxx->save_TPD);
-       ch7xxx_writeb(dvo, CH7xxx_TPVT, ch7xxx->save_TPVT);
-       ch7xxx_writeb(dvo, CH7xxx_TLPF, ch7xxx->save_TLPF);
-       ch7xxx_writeb(dvo, CH7xxx_IDF, ch7xxx->save_IDF);
-       ch7xxx_writeb(dvo, CH7xxx_PM, ch7xxx->save_PM);
-}
-
 static void ch7xxx_destroy(struct intel_dvo_device *dvo)
 {
        struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
@@ -365,7 +329,5 @@ struct intel_dvo_dev_ops ch7xxx_ops = {
        .mode_set = ch7xxx_mode_set,
        .dpms = ch7xxx_dpms,
        .dump_regs = ch7xxx_dump_regs,
-       .save = ch7xxx_save,
-       .restore = ch7xxx_restore,
        .destroy = ch7xxx_destroy,
 };
index 24169e5..a2ec3f4 100644 (file)
@@ -153,9 +153,6 @@ struct ivch_priv {
        bool quiet;
 
        uint16_t width, height;
-
-       uint16_t save_VR01;
-       uint16_t save_VR40;
 };
 
 
@@ -405,22 +402,6 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo)
        DRM_LOG_KMS("VR8F: 0x%04x\n", val);
 }
 
-static void ivch_save(struct intel_dvo_device *dvo)
-{
-       struct ivch_priv *priv = dvo->dev_priv;
-
-       ivch_read(dvo, VR01, &priv->save_VR01);
-       ivch_read(dvo, VR40, &priv->save_VR40);
-}
-
-static void ivch_restore(struct intel_dvo_device *dvo)
-{
-       struct ivch_priv *priv = dvo->dev_priv;
-
-       ivch_write(dvo, VR01, priv->save_VR01);
-       ivch_write(dvo, VR40, priv->save_VR40);
-}
-
 static void ivch_destroy(struct intel_dvo_device *dvo)
 {
        struct ivch_priv *priv = dvo->dev_priv;
@@ -434,8 +415,6 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
 struct intel_dvo_dev_ops ivch_ops= {
        .init = ivch_init,
        .dpms = ivch_dpms,
-       .save = ivch_save,
-       .restore = ivch_restore,
        .mode_valid = ivch_mode_valid,
        .mode_set = ivch_mode_set,
        .detect = ivch_detect,
index 0001c13..9b8e676 100644 (file)
@@ -58,17 +58,9 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
 #define SIL164_REGC 0x0c
 
-struct sil164_save_rec {
-       uint8_t reg8;
-       uint8_t reg9;
-       uint8_t regc;
-};
-
 struct sil164_priv {
        //I2CDevRec d;
        bool quiet;
-       struct sil164_save_rec save_regs;
-       struct sil164_save_rec mode_regs;
 };
 
 #define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
@@ -252,34 +244,6 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo)
        DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
 }
 
-static void sil164_save(struct intel_dvo_device *dvo)
-{
-       struct sil164_priv *sil= dvo->dev_priv;
-
-       if (!sil164_readb(dvo, SIL164_REG8, &sil->save_regs.reg8))
-               return;
-
-       if (!sil164_readb(dvo, SIL164_REG9, &sil->save_regs.reg9))
-               return;
-
-       if (!sil164_readb(dvo, SIL164_REGC, &sil->save_regs.regc))
-               return;
-
-       return;
-}
-
-static void sil164_restore(struct intel_dvo_device *dvo)
-{
-       struct sil164_priv *sil = dvo->dev_priv;
-
-       /* Restore it powered down initially */
-       sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8 & ~0x1);
-
-       sil164_writeb(dvo, SIL164_REG9, sil->save_regs.reg9);
-       sil164_writeb(dvo, SIL164_REGC, sil->save_regs.regc);
-       sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8);
-}
-
 static void sil164_destroy(struct intel_dvo_device *dvo)
 {
        struct sil164_priv *sil = dvo->dev_priv;
@@ -297,7 +261,5 @@ struct intel_dvo_dev_ops sil164_ops = {
        .mode_set = sil164_mode_set,
        .dpms = sil164_dpms,
        .dump_regs = sil164_dump_regs,
-       .save = sil164_save,
-       .restore = sil164_restore,
        .destroy = sil164_destroy,
 };
index c7c391b..66c697b 100644 (file)
 #define TFP410_V_RES_LO                0x3C
 #define TFP410_V_RES_HI                0x3D
 
-struct tfp410_save_rec {
-       uint8_t ctl1;
-       uint8_t ctl2;
-};
-
 struct tfp410_priv {
        bool quiet;
-
-       struct tfp410_save_rec saved_reg;
-       struct tfp410_save_rec mode_reg;
 };
 
 static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
@@ -293,28 +285,6 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo)
        DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
 }
 
-static void tfp410_save(struct intel_dvo_device *dvo)
-{
-       struct tfp410_priv *tfp = dvo->dev_priv;
-
-       if (!tfp410_readb(dvo, TFP410_CTL_1, &tfp->saved_reg.ctl1))
-               return;
-
-       if (!tfp410_readb(dvo, TFP410_CTL_2, &tfp->saved_reg.ctl2))
-               return;
-}
-
-static void tfp410_restore(struct intel_dvo_device *dvo)
-{
-       struct tfp410_priv *tfp = dvo->dev_priv;
-
-       /* Restore it powered down initially */
-       tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1 & ~0x1);
-
-       tfp410_writeb(dvo, TFP410_CTL_2, tfp->saved_reg.ctl2);
-       tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1);
-}
-
 static void tfp410_destroy(struct intel_dvo_device *dvo)
 {
        struct tfp410_priv *tfp = dvo->dev_priv;
@@ -332,7 +302,5 @@ struct intel_dvo_dev_ops tfp410_ops = {
        .mode_set = tfp410_mode_set,
        .dpms = tfp410_dpms,
        .dump_regs = tfp410_dump_regs,
-       .save = tfp410_save,
-       .restore = tfp410_restore,
        .destroy = tfp410_destroy,
 };
index a0b8447..322070c 100644 (file)
@@ -96,19 +96,18 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
                spin_lock(lock);
        list_for_each_entry(obj_priv, head, list)
        {
-               struct drm_gem_object *obj = obj_priv->obj;
-
                seq_printf(m, "    %p: %s %8zd %08x %08x %d%s%s",
-                          obj,
+                          &obj_priv->base,
                           get_pin_flag(obj_priv),
-                          obj->size,
-                          obj->read_domains, obj->write_domain,
+                          obj_priv->base.size,
+                          obj_priv->base.read_domains,
+                          obj_priv->base.write_domain,
                           obj_priv->last_rendering_seqno,
                           obj_priv->dirty ? " dirty" : "",
                           obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 
-               if (obj->name)
-                       seq_printf(m, " (name: %d)", obj->name);
+               if (obj_priv->base.name)
+                       seq_printf(m, " (name: %d)", obj_priv->base.name);
                if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
                        seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
                if (obj_priv->gtt_space != NULL)
@@ -289,7 +288,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
        spin_lock(&dev_priv->mm.active_list_lock);
 
        list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
-               obj = obj_priv->obj;
+               obj = &obj_priv->base;
                if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
                    ret = i915_gem_object_get_pages(obj, 0);
                    if (ret) {
@@ -567,23 +566,14 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_crtc *crtc;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       bool fbc_enabled = false;
 
-       if (!dev_priv->display.fbc_enabled) {
+       if (!I915_HAS_FBC(dev)) {
                seq_printf(m, "FBC unsupported on this chipset\n");
                return 0;
        }
 
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               if (!crtc->enabled)
-                       continue;
-               if (dev_priv->display.fbc_enabled(crtc))
-                       fbc_enabled = true;
-       }
-
-       if (fbc_enabled) {
+       if (intel_fbc_enabled(dev)) {
                seq_printf(m, "FBC enabled\n");
        } else {
                seq_printf(m, "FBC disabled: ");
index c3cfafc..2a6b5de 100644 (file)
@@ -1357,13 +1357,12 @@ static void i915_setup_compression(struct drm_device *dev, int size)
 
        dev_priv->cfb_size = size;
 
+       intel_disable_fbc(dev);
        dev_priv->compressed_fb = compressed_fb;
 
        if (IS_GM45(dev)) {
-               g4x_disable_fbc(dev);
                I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
        } else {
-               i8xx_disable_fbc(dev);
                I915_WRITE(FBC_CFB_BASE, cfb_base);
                I915_WRITE(FBC_LL_BASE, ll_base);
                dev_priv->compressed_llb = compressed_llb;
@@ -1504,8 +1503,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
 
        I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
 
-       drm_helper_initial_config(dev);
-
+       intel_fbdev_init(dev);
+       drm_kms_helper_poll_init(dev);
        return 0;
 
 destroy_ringbuffer:
@@ -1591,7 +1590,7 @@ static void i915_get_mem_freq(struct drm_device *dev)
  */
 int i915_driver_load(struct drm_device *dev, unsigned long flags)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv;
        resource_size_t base, size;
        int ret = 0, mmio_bar;
        uint32_t agp_size, prealloc_size, prealloc_start;
@@ -1723,6 +1722,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        /* Start out suspended */
        dev_priv->mm.suspended = 1;
 
+       intel_detect_pch(dev);
+
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
                ret = i915_load_modeset_init(dev, prealloc_start,
                                             prealloc_size, agp_size);
@@ -1769,6 +1770,8 @@ int i915_driver_unload(struct drm_device *dev)
        }
 
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               intel_modeset_cleanup(dev);
+
                /*
                 * free the memory space allocated for the child device
                 * config parsed from VBT
@@ -1792,8 +1795,6 @@ int i915_driver_unload(struct drm_device *dev)
        intel_opregion_free(dev, 0);
 
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               intel_modeset_cleanup(dev);
-
                i915_gem_free_all_phys_object(dev);
 
                mutex_lock(&dev->struct_mutex);
index cc03537..5c51e45 100644 (file)
@@ -188,6 +188,35 @@ const static struct pci_device_id pciidlist[] = {
 MODULE_DEVICE_TABLE(pci, pciidlist);
 #endif
 
+#define INTEL_PCH_DEVICE_ID_MASK       0xff00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE   0x1c00
+
+void intel_detect_pch (struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct pci_dev *pch;
+
+       /*
+        * The reason to probe ISA bridge instead of Dev31:Fun0 is to
+        * make graphics device passthrough work easy for VMM, that only
+        * need to expose ISA bridge to let driver know the real hardware
+        * underneath. This is a requirement from virtualization team.
+        */
+       pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
+       if (pch) {
+               if (pch->vendor == PCI_VENDOR_ID_INTEL) {
+                       int id;
+                       id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+
+                       if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
+                               dev_priv->pch_type = PCH_CPT;
+                               DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+                       }
+               }
+               pci_dev_put(pch);
+       }
+}
+
 static int i915_drm_freeze(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
index 6e47900..7f797ef 100644 (file)
@@ -128,6 +128,7 @@ struct drm_i915_master_private {
 
 struct drm_i915_fence_reg {
        struct drm_gem_object *obj;
+       struct list_head lru_list;
 };
 
 struct sdvo_device_mapping {
@@ -135,6 +136,7 @@ struct sdvo_device_mapping {
        u8 slave_addr;
        u8 dvo_wiring;
        u8 initialized;
+       u8 ddc_pin;
 };
 
 struct drm_i915_error_state {
@@ -175,7 +177,7 @@ struct drm_i915_error_state {
 
 struct drm_i915_display_funcs {
        void (*dpms)(struct drm_crtc *crtc, int mode);
-       bool (*fbc_enabled)(struct drm_crtc *crtc);
+       bool (*fbc_enabled)(struct drm_device *dev);
        void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
        void (*disable_fbc)(struct drm_device *dev);
        int (*get_display_clock_speed)(struct drm_device *dev);
@@ -222,6 +224,13 @@ enum no_fbc_reason {
        FBC_NOT_TILED, /* buffer not tiled */
 };
 
+enum intel_pch {
+       PCH_IBX,        /* Ibexpeak PCH */
+       PCH_CPT,        /* Cougarpoint PCH */
+};
+
+struct intel_fbdev;
+
 typedef struct drm_i915_private {
        struct drm_device *dev;
 
@@ -335,6 +344,9 @@ typedef struct drm_i915_private {
        /* Display functions */
        struct drm_i915_display_funcs display;
 
+       /* PCH chipset type */
+       enum intel_pch pch_type;
+
        /* Register state */
        bool modeset_on_lid;
        u8 saveLBB;
@@ -637,11 +649,14 @@ typedef struct drm_i915_private {
 
        struct drm_mm_node *compressed_fb;
        struct drm_mm_node *compressed_llb;
+
+       /* list of fbdev register on this device */
+       struct intel_fbdev *fbdev;
 } drm_i915_private_t;
 
 /** driver private structure attached to each drm_gem_object */
 struct drm_i915_gem_object {
-       struct drm_gem_object *obj;
+       struct drm_gem_object base;
 
        /** Current space allocated to this object in the GTT, if any. */
        struct drm_mm_node *gtt_space;
@@ -651,9 +666,6 @@ struct drm_i915_gem_object {
        /** This object's place on GPU write list */
        struct list_head gpu_write_list;
 
-       /** This object's place on the fenced object LRU */
-       struct list_head fence_list;
-
        /**
         * This is set if the object is on the active or flushing lists
         * (has pending rendering), and is not set if it's on inactive (ready
@@ -740,7 +752,7 @@ struct drm_i915_gem_object {
        atomic_t pending_flip;
 };
 
-#define to_intel_bo(x) ((struct drm_i915_gem_object *) (x)->driver_private)
+#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
 
 /**
  * Request queue structure.
@@ -902,6 +914,8 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
 void i915_gem_load(struct drm_device *dev);
 int i915_gem_init_object(struct drm_gem_object *obj);
+struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
+                                             size_t size);
 void i915_gem_free_object(struct drm_gem_object *obj);
 int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
 void i915_gem_object_unpin(struct drm_gem_object *obj);
@@ -998,6 +1012,12 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
 extern void i8xx_disable_fbc(struct drm_device *dev);
 extern void g4x_disable_fbc(struct drm_device *dev);
+extern void intel_disable_fbc(struct drm_device *dev);
+extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
+extern bool intel_fbc_enabled(struct drm_device *dev);
+
+extern void intel_detect_pch (struct drm_device *dev);
+extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
 
 /**
  * Lock test for when it's just for synchronization of ring access.
@@ -1130,7 +1150,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
 #define SUPPORTS_INTEGRATED_DP(dev)    (IS_G4X(dev) || IS_IRONLAKE(dev))
 #define SUPPORTS_EDP(dev)              (IS_IRONLAKE_M(dev))
 #define SUPPORTS_TV(dev)               (IS_I9XX(dev) && IS_MOBILE(dev) && \
-                                       !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
+                                       !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \
+                                       !IS_GEN6(dev))
 #define I915_HAS_HOTPLUG(dev)           (INTEL_INFO(dev)->has_hotplug)
 /* dsparb controlled by hw only */
 #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
@@ -1144,6 +1165,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
                            IS_GEN6(dev))
 #define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
 
+#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+
 #define PRIMARY_RINGBUFFER_SIZE         (128*1024)
 
 #endif
index ef3d91d..112699f 100644 (file)
@@ -124,7 +124,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
        args->size = roundup(args->size, PAGE_SIZE);
 
        /* Allocate the new object */
-       obj = drm_gem_object_alloc(dev, args->size);
+       obj = i915_gem_alloc_object(dev, args->size);
        if (obj == NULL)
                return -ENOMEM;
 
@@ -1051,7 +1051,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                 * about to occur.
                 */
                if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
-                       list_move_tail(&obj_priv->fence_list,
+                       struct drm_i915_fence_reg *reg =
+                               &dev_priv->fence_regs[obj_priv->fence_reg];
+                       list_move_tail(&reg->lru_list,
                                       &dev_priv->mm.fence_list);
                }
 
@@ -1566,7 +1568,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
        list_for_each_entry_safe(obj_priv, next,
                                 &dev_priv->mm.gpu_write_list,
                                 gpu_write_list) {
-               struct drm_gem_object *obj = obj_priv->obj;
+               struct drm_gem_object *obj = &obj_priv->base;
 
                if ((obj->write_domain & flush_domains) ==
                    obj->write_domain) {
@@ -1577,9 +1579,12 @@ i915_gem_process_flushing_list(struct drm_device *dev,
                        i915_gem_object_move_to_active(obj, seqno);
 
                        /* update the fence lru list */
-                       if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
-                               list_move_tail(&obj_priv->fence_list,
+                       if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
+                               struct drm_i915_fence_reg *reg =
+                                       &dev_priv->fence_regs[obj_priv->fence_reg];
+                               list_move_tail(&reg->lru_list,
                                                &dev_priv->mm.fence_list);
+                       }
 
                        trace_i915_gem_object_change_domain(obj,
                                                            obj->read_domains,
@@ -1745,7 +1750,7 @@ i915_gem_retire_request(struct drm_device *dev,
                obj_priv = list_first_entry(&dev_priv->mm.active_list,
                                            struct drm_i915_gem_object,
                                            list);
-               obj = obj_priv->obj;
+               obj = &obj_priv->base;
 
                /* If the seqno being retired doesn't match the oldest in the
                 * list, then the oldest in the list must still be newer than
@@ -2119,7 +2124,7 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
 
        /* Try to find the smallest clean object */
        list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
-               struct drm_gem_object *obj = obj_priv->obj;
+               struct drm_gem_object *obj = &obj_priv->base;
                if (obj->size >= min_size) {
                        if ((!obj_priv->dirty ||
                             i915_gem_object_is_purgeable(obj_priv)) &&
@@ -2253,7 +2258,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
 
                        /* Find an object that we can immediately reuse */
                        list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
-                               obj = obj_priv->obj;
+                               obj = &obj_priv->base;
                                if (obj->size >= min_size)
                                        break;
 
@@ -2485,9 +2490,10 @@ static int i915_find_fence_reg(struct drm_device *dev)
 
        /* None available, try to steal one or wait for a user to finish */
        i = I915_FENCE_REG_NONE;
-       list_for_each_entry(obj_priv, &dev_priv->mm.fence_list,
-                           fence_list) {
-               obj = obj_priv->obj;
+       list_for_each_entry(reg, &dev_priv->mm.fence_list,
+                           lru_list) {
+               obj = reg->obj;
+               obj_priv = to_intel_bo(obj);
 
                if (obj_priv->pin_count)
                        continue;
@@ -2536,7 +2542,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
 
        /* Just update our place in the LRU if our fence is getting used. */
        if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
-               list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+               reg = &dev_priv->fence_regs[obj_priv->fence_reg];
+               list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
                return 0;
        }
 
@@ -2566,7 +2573,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
 
        obj_priv->fence_reg = ret;
        reg = &dev_priv->fence_regs[obj_priv->fence_reg];
-       list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+       list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
 
        reg->obj = obj;
 
@@ -2598,6 +2605,8 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       struct drm_i915_fence_reg *reg =
+               &dev_priv->fence_regs[obj_priv->fence_reg];
 
        if (IS_GEN6(dev)) {
                I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
@@ -2616,9 +2625,9 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
                I915_WRITE(fence_reg, 0);
        }
 
-       dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
+       reg->obj = NULL;
        obj_priv->fence_reg = I915_FENCE_REG_NONE;
-       list_del_init(&obj_priv->fence_list);
+       list_del_init(&reg->lru_list);
 }
 
 /**
@@ -4471,34 +4480,38 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
-int i915_gem_init_object(struct drm_gem_object *obj)
+struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
+                                             size_t size)
 {
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
 
-       obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
-       if (obj_priv == NULL)
-               return -ENOMEM;
+       obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+       if (obj == NULL)
+               return NULL;
 
-       /*
-        * We've just allocated pages from the kernel,
-        * so they've just been written by the CPU with
-        * zeros. They'll need to be clflushed before we
-        * use them with the GPU.
-        */
-       obj->write_domain = I915_GEM_DOMAIN_CPU;
-       obj->read_domains = I915_GEM_DOMAIN_CPU;
+       if (drm_gem_object_init(dev, &obj->base, size) != 0) {
+               kfree(obj);
+               return NULL;
+       }
 
-       obj_priv->agp_type = AGP_USER_MEMORY;
+       obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+       obj->base.read_domains = I915_GEM_DOMAIN_CPU;
 
-       obj->driver_private = obj_priv;
-       obj_priv->obj = obj;
-       obj_priv->fence_reg = I915_FENCE_REG_NONE;
-       INIT_LIST_HEAD(&obj_priv->list);
-       INIT_LIST_HEAD(&obj_priv->gpu_write_list);
-       INIT_LIST_HEAD(&obj_priv->fence_list);
-       obj_priv->madv = I915_MADV_WILLNEED;
+       obj->agp_type = AGP_USER_MEMORY;
+       obj->base.driver_private = NULL;
+       obj->fence_reg = I915_FENCE_REG_NONE;
+       INIT_LIST_HEAD(&obj->list);
+       INIT_LIST_HEAD(&obj->gpu_write_list);
+       obj->madv = I915_MADV_WILLNEED;
 
-       trace_i915_gem_object_create(obj);
+       trace_i915_gem_object_create(&obj->base);
+
+       return &obj->base;
+}
+
+int i915_gem_init_object(struct drm_gem_object *obj)
+{
+       BUG();
 
        return 0;
 }
@@ -4521,9 +4534,11 @@ void i915_gem_free_object(struct drm_gem_object *obj)
        if (obj_priv->mmap_offset)
                i915_gem_free_mmap_offset(obj);
 
+       drm_gem_object_release(obj);
+
        kfree(obj_priv->page_cpu_valid);
        kfree(obj_priv->bit_17);
-       kfree(obj->driver_private);
+       kfree(obj_priv);
 }
 
 /** Unbinds all inactive objects. */
@@ -4536,9 +4551,9 @@ i915_gem_evict_from_inactive_list(struct drm_device *dev)
                struct drm_gem_object *obj;
                int ret;
 
-               obj = list_first_entry(&dev_priv->mm.inactive_list,
-                                      struct drm_i915_gem_object,
-                                      list)->obj;
+               obj = &list_first_entry(&dev_priv->mm.inactive_list,
+                                       struct drm_i915_gem_object,
+                                       list)->base;
 
                ret = i915_gem_object_unbind(obj);
                if (ret != 0) {
@@ -4608,7 +4623,7 @@ i915_gem_init_pipe_control(struct drm_device *dev)
        struct drm_i915_gem_object *obj_priv;
        int ret;
 
-       obj = drm_gem_object_alloc(dev, 4096);
+       obj = i915_gem_alloc_object(dev, 4096);
        if (obj == NULL) {
                DRM_ERROR("Failed to allocate seqno page\n");
                ret = -ENOMEM;
@@ -4653,7 +4668,7 @@ i915_gem_init_hws(struct drm_device *dev)
        if (!I915_NEED_GFX_HWS(dev))
                return 0;
 
-       obj = drm_gem_object_alloc(dev, 4096);
+       obj = i915_gem_alloc_object(dev, 4096);
        if (obj == NULL) {
                DRM_ERROR("Failed to allocate status page\n");
                ret = -ENOMEM;
@@ -4764,7 +4779,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
        if (ret != 0)
                return ret;
 
-       obj = drm_gem_object_alloc(dev, 128 * 1024);
+       obj = i915_gem_alloc_object(dev, 128 * 1024);
        if (obj == NULL) {
                DRM_ERROR("Failed to allocate ringbuffer\n");
                i915_gem_cleanup_hws(dev);
@@ -4957,6 +4972,8 @@ i915_gem_load(struct drm_device *dev)
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
        INIT_LIST_HEAD(&dev_priv->mm.request_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+       for (i = 0; i < 16; i++)
+               INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
                          i915_gem_retire_work_handler);
        dev_priv->mm.next_gem_seqno = 1;
@@ -5184,6 +5201,20 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
        mutex_unlock(&dev->struct_mutex);
 }
 
+static int
+i915_gpu_is_active(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       int lists_empty;
+
+       spin_lock(&dev_priv->mm.active_list_lock);
+       lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
+                     list_empty(&dev_priv->mm.active_list);
+       spin_unlock(&dev_priv->mm.active_list_lock);
+
+       return !lists_empty;
+}
+
 static int
 i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
 {
@@ -5213,6 +5244,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
 
        spin_lock(&shrink_list_lock);
 
+rescan:
        /* first scan for clean buffers */
        list_for_each_entry_safe(dev_priv, next_dev,
                                 &shrink_list, mm.shrink_list) {
@@ -5229,7 +5261,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
                                         &dev_priv->mm.inactive_list,
                                         list) {
                        if (i915_gem_object_is_purgeable(obj_priv)) {
-                               i915_gem_object_unbind(obj_priv->obj);
+                               i915_gem_object_unbind(&obj_priv->base);
                                if (--nr_to_scan <= 0)
                                        break;
                        }
@@ -5258,7 +5290,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
                                         &dev_priv->mm.inactive_list,
                                         list) {
                        if (nr_to_scan > 0) {
-                               i915_gem_object_unbind(obj_priv->obj);
+                               i915_gem_object_unbind(&obj_priv->base);
                                nr_to_scan--;
                        } else
                                cnt++;
@@ -5270,6 +5302,36 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
                would_deadlock = 0;
        }
 
+       if (nr_to_scan) {
+               int active = 0;
+
+               /*
+                * We are desperate for pages, so as a last resort, wait
+                * for the GPU to finish and discard whatever we can.
+                * This has a dramatic impact to reduce the number of
+                * OOM-killer events whilst running the GPU aggressively.
+                */
+               list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
+                       struct drm_device *dev = dev_priv->dev;
+
+                       if (!mutex_trylock(&dev->struct_mutex))
+                               continue;
+
+                       spin_unlock(&shrink_list_lock);
+
+                       if (i915_gpu_is_active(dev)) {
+                               i915_gpu_idle(dev);
+                               active++;
+                       }
+
+                       spin_lock(&shrink_list_lock);
+                       mutex_unlock(&dev->struct_mutex);
+               }
+
+               if (active)
+                       goto rescan;
+       }
+
        spin_unlock(&shrink_list_lock);
 
        if (would_deadlock)
index 35507cf..80f380b 100644 (file)
@@ -39,7 +39,7 @@ i915_verify_inactive(struct drm_device *dev, char *file, int line)
        struct drm_i915_gem_object *obj_priv;
 
        list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
-               obj = obj_priv->obj;
+               obj = &obj_priv->base;
                if (obj_priv->pin_count || obj_priv->active ||
                    (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
                                           I915_GEM_DOMAIN_GTT)))
index 4bdccef..4b7c49d 100644 (file)
@@ -283,6 +283,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
+       if (obj_priv->pin_count) {
+               drm_gem_object_unreference_unlocked(obj);
+               return -EBUSY;
+       }
+
        if (args->tiling_mode == I915_TILING_NONE) {
                args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
                args->stride = 0;
index df6a9cd..8c3f080 100644 (file)
@@ -169,9 +169,13 @@ void intel_enable_asle (struct drm_device *dev)
 
        if (HAS_PCH_SPLIT(dev))
                ironlake_enable_display_irq(dev_priv, DE_GSE);
-       else
+       else {
                i915_enable_pipestat(dev_priv, 1,
                                     I915_LEGACY_BLC_EVENT_ENABLE);
+               if (IS_I965G(dev))
+                       i915_enable_pipestat(dev_priv, 0,
+                                            I915_LEGACY_BLC_EVENT_ENABLE);
+       }
 }
 
 /**
@@ -256,18 +260,18 @@ static void i915_hotplug_work_func(struct work_struct *work)
                                                    hotplug_work);
        struct drm_device *dev = dev_priv->dev;
        struct drm_mode_config *mode_config = &dev->mode_config;
-       struct drm_connector *connector;
+       struct drm_encoder *encoder;
 
-       if (mode_config->num_connector) {
-               list_for_each_entry(connector, &mode_config->connector_list, head) {
-                       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       if (mode_config->num_encoder) {
+               list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+                       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        
                        if (intel_encoder->hot_plug)
                                (*intel_encoder->hot_plug) (intel_encoder);
                }
        }
        /* Just fire off a uevent and let userspace tell us what to do */
-       drm_sysfs_hotplug_event(dev);
+       drm_helper_hpd_irq_event(dev);
 }
 
 static void i915_handle_rps_change(struct drm_device *dev)
@@ -612,7 +616,7 @@ static void i915_capture_error_state(struct drm_device *dev)
        batchbuffer[1] = NULL;
        count = 0;
        list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
-               struct drm_gem_object *obj = obj_priv->obj;
+               struct drm_gem_object *obj = &obj_priv->base;
 
                if (batchbuffer[0] == NULL &&
                    bbaddr >= obj_priv->gtt_offset &&
@@ -648,7 +652,7 @@ static void i915_capture_error_state(struct drm_device *dev)
        if (error->active_bo) {
                int i = 0;
                list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
-                       struct drm_gem_object *obj = obj_priv->obj;
+                       struct drm_gem_object *obj = &obj_priv->base;
 
                        error->active_bo[i].size = obj->size;
                        error->active_bo[i].name = obj->name;
@@ -950,7 +954,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
                        intel_finish_page_flip(dev, 1);
                }
 
-               if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
+               if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
+                   (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
                    (iir & I915_ASLE_INTERRUPT))
                        opregion_asle_intr(dev);
 
index 4cbc521..f3e39cc 100644 (file)
 #define   DP_LINK_TRAIN_MASK           (3 << 28)
 #define   DP_LINK_TRAIN_SHIFT          28
 
+/* CPT Link training mode */
+#define   DP_LINK_TRAIN_PAT_1_CPT      (0 << 8)
+#define   DP_LINK_TRAIN_PAT_2_CPT      (1 << 8)
+#define   DP_LINK_TRAIN_PAT_IDLE_CPT   (2 << 8)
+#define   DP_LINK_TRAIN_OFF_CPT                (3 << 8)
+#define   DP_LINK_TRAIN_MASK_CPT       (7 << 8)
+#define   DP_LINK_TRAIN_SHIFT_CPT      8
+
 /* Signal voltages. These are mostly controlled by the other end */
 #define   DP_VOLTAGE_0_4               (0 << 25)
 #define   DP_VOLTAGE_0_6               (1 << 25)
 /* Display & cursor control */
 
 /* dithering flag on Ironlake */
-#define PIPE_ENABLE_DITHER     (1 << 4)
+#define PIPE_ENABLE_DITHER             (1 << 4)
+#define PIPE_DITHER_TYPE_MASK          (3 << 2)
+#define PIPE_DITHER_TYPE_SPATIAL       (0 << 2)
+#define PIPE_DITHER_TYPE_ST01          (1 << 2)
 /* Pipe A */
 #define PIPEADSL               0x70000
 #define PIPEACONF              0x70008
 
 #define DSPFW1                 0x70034
 #define   DSPFW_SR_SHIFT       23
+#define   DSPFW_SR_MASK        (0x1ff<<23)
 #define   DSPFW_CURSORB_SHIFT  16
+#define   DSPFW_CURSORB_MASK   (0x3f<<16)
 #define   DSPFW_PLANEB_SHIFT   8
+#define   DSPFW_PLANEB_MASK    (0x7f<<8)
+#define   DSPFW_PLANEA_MASK    (0x7f)
 #define DSPFW2                 0x70038
 #define   DSPFW_CURSORA_MASK   0x00003f00
 #define   DSPFW_CURSORA_SHIFT  8
+#define   DSPFW_PLANEC_MASK    (0x7f)
 #define DSPFW3                 0x7003c
 #define   DSPFW_HPLL_SR_EN     (1<<31)
 #define   DSPFW_CURSOR_SR_SHIFT        24
 #define   PINEVIEW_SELF_REFRESH_EN     (1<<30)
+#define   DSPFW_CURSOR_SR_MASK         (0x3f<<24)
+#define   DSPFW_HPLL_CURSOR_SHIFT      16
+#define   DSPFW_HPLL_CURSOR_MASK       (0x3f<<16)
+#define   DSPFW_HPLL_SR_MASK           (0x1ff)
 
 /* FIFO watermark sizes etc */
 #define G4X_FIFO_LINE_SIZE     64
 #define PINEVIEW_CURSOR_DFT_WM 0
 #define PINEVIEW_CURSOR_GUARD_WM       5
 
+
+/* define the Watermark register on Ironlake */
+#define WM0_PIPEA_ILK          0x45100
+#define  WM0_PIPE_PLANE_MASK   (0x7f<<16)
+#define  WM0_PIPE_PLANE_SHIFT  16
+#define  WM0_PIPE_SPRITE_MASK  (0x3f<<8)
+#define  WM0_PIPE_SPRITE_SHIFT 8
+#define  WM0_PIPE_CURSOR_MASK  (0x1f)
+
+#define WM0_PIPEB_ILK          0x45104
+#define WM1_LP_ILK             0x45108
+#define  WM1_LP_SR_EN          (1<<31)
+#define  WM1_LP_LATENCY_SHIFT  24
+#define  WM1_LP_LATENCY_MASK   (0x7f<<24)
+#define  WM1_LP_SR_MASK                (0x1ff<<8)
+#define  WM1_LP_SR_SHIFT       8
+#define  WM1_LP_CURSOR_MASK    (0x3f)
+
+/* Memory latency timer register */
+#define MLTR_ILK               0x11222
+/* the unit of memory self-refresh latency time is 0.5us */
+#define  ILK_SRLT_MASK         0x3f
+
+/* define the fifo size on Ironlake */
+#define ILK_DISPLAY_FIFO       128
+#define ILK_DISPLAY_MAXWM      64
+#define ILK_DISPLAY_DFTWM      8
+
+#define ILK_DISPLAY_SR_FIFO    512
+#define ILK_DISPLAY_MAX_SRWM   0x1ff
+#define ILK_DISPLAY_DFT_SRWM   0x3f
+#define ILK_CURSOR_SR_FIFO     64
+#define ILK_CURSOR_MAX_SRWM    0x3f
+#define ILK_CURSOR_DFT_SRWM    8
+
+#define ILK_FIFO_LINE_SIZE     64
+
 /*
  * The two pipe frame counter registers are not synchronized, so
  * reading a stable value is somewhat tricky. The following code
 #define GTIIR   0x44018
 #define GTIER   0x4401c
 
+#define ILK_DISPLAY_CHICKEN2   0x42004
+#define  ILK_DPARB_GATE        (1<<22)
+#define  ILK_VSDPFD_FULL       (1<<21)
+#define ILK_DSPCLK_GATE                0x42020
+#define  ILK_DPARB_CLK_GATE    (1<<5)
+
 #define DISP_ARB_CTL   0x45000
 #define  DISP_TILE_SURFACE_SWIZZLING   (1<<13)
+#define  DISP_FBC_WM_DIS               (1<<15)
 
 /* PCH */
 
 #define SDE_PORTB_HOTPLUG       (1 << 8)
 #define SDE_SDVOB_HOTPLUG       (1 << 6)
 #define SDE_HOTPLUG_MASK       (0xf << 8)
+/* CPT */
+#define SDE_CRT_HOTPLUG_CPT    (1 << 19)
+#define SDE_PORTD_HOTPLUG_CPT  (1 << 23)
+#define SDE_PORTC_HOTPLUG_CPT  (1 << 22)
+#define SDE_PORTB_HOTPLUG_CPT  (1 << 21)
 
 #define SDEISR  0xc4000
 #define SDEIMR  0xc4004
 #define PCH_SSC4_PARMS          0xc6210
 #define PCH_SSC4_AUX_PARMS      0xc6214
 
+#define PCH_DPLL_SEL           0xc7000
+#define  TRANSA_DPLL_ENABLE    (1<<3)
+#define         TRANSA_DPLLB_SEL       (1<<0)
+#define         TRANSA_DPLLA_SEL       0
+#define  TRANSB_DPLL_ENABLE    (1<<7)
+#define         TRANSB_DPLLB_SEL       (1<<4)
+#define         TRANSB_DPLLA_SEL       (0)
+#define  TRANSC_DPLL_ENABLE    (1<<11)
+#define         TRANSC_DPLLB_SEL       (1<<8)
+#define         TRANSC_DPLLA_SEL       (0)
+
 /* transcoder */
 
 #define TRANS_HTOTAL_A          0xe0000
 #define  FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
 #define  FDI_LINK_TRAIN_PRE_EMPHASIS_2X   (2<<22)
 #define  FDI_LINK_TRAIN_PRE_EMPHASIS_3X   (3<<22)
+/* ILK always use 400mV 0dB for voltage swing and pre-emphasis level.
+   SNB has different settings. */
+/* SNB A-stepping */
+#define  FDI_LINK_TRAIN_400MV_0DB_SNB_A                (0x38<<22)
+#define  FDI_LINK_TRAIN_400MV_6DB_SNB_A                (0x02<<22)
+#define  FDI_LINK_TRAIN_600MV_3_5DB_SNB_A      (0x01<<22)
+#define  FDI_LINK_TRAIN_800MV_0DB_SNB_A                (0x0<<22)
+/* SNB B-stepping */
+#define  FDI_LINK_TRAIN_400MV_0DB_SNB_B                (0x0<<22)
+#define  FDI_LINK_TRAIN_400MV_6DB_SNB_B                (0x3a<<22)
+#define  FDI_LINK_TRAIN_600MV_3_5DB_SNB_B      (0x39<<22)
+#define  FDI_LINK_TRAIN_800MV_0DB_SNB_B                (0x38<<22)
+#define  FDI_LINK_TRAIN_VOL_EMP_MASK           (0x3f<<22)
 #define  FDI_DP_PORT_WIDTH_X1           (0<<19)
 #define  FDI_DP_PORT_WIDTH_X2           (1<<19)
 #define  FDI_DP_PORT_WIDTH_X3           (2<<19)
 #define  FDI_RX_ENHANCE_FRAME_ENABLE    (1<<6)
 #define  FDI_SEL_RAWCLK                 (0<<4)
 #define  FDI_SEL_PCDCLK                 (1<<4)
+/* CPT */
+#define  FDI_AUTO_TRAINING                     (1<<10)
+#define  FDI_LINK_TRAIN_PATTERN_1_CPT          (0<<8)
+#define  FDI_LINK_TRAIN_PATTERN_2_CPT          (1<<8)
+#define  FDI_LINK_TRAIN_PATTERN_IDLE_CPT       (2<<8)
+#define  FDI_LINK_TRAIN_NORMAL_CPT             (3<<8)
+#define  FDI_LINK_TRAIN_PATTERN_MASK_CPT       (3<<8)
 
 #define FDI_RXA_MISC            0xf0010
 #define FDI_RXB_MISC            0xf1010
 #define  HSYNC_ACTIVE_HIGH      (1 << 3)
 #define  PORT_DETECTED          (1 << 2)
 
+/* PCH SDVOB multiplex with HDMIB */
+#define PCH_SDVOB      HDMIB
+
 #define HDMIC   0xe1150
 #define HDMID   0xe1160
 
 #define PCH_DPD_AUX_CH_DATA4   0xe4320
 #define PCH_DPD_AUX_CH_DATA5   0xe4324
 
+/* CPT */
+#define  PORT_TRANS_A_SEL_CPT  0
+#define  PORT_TRANS_B_SEL_CPT  (1<<29)
+#define  PORT_TRANS_C_SEL_CPT  (2<<29)
+#define  PORT_TRANS_SEL_MASK   (3<<29)
+
+#define TRANS_DP_CTL_A         0xe0300
+#define TRANS_DP_CTL_B         0xe1300
+#define TRANS_DP_CTL_C         0xe2300
+#define  TRANS_DP_OUTPUT_ENABLE        (1<<31)
+#define  TRANS_DP_PORT_SEL_B   (0<<29)
+#define  TRANS_DP_PORT_SEL_C   (1<<29)
+#define  TRANS_DP_PORT_SEL_D   (2<<29)
+#define  TRANS_DP_PORT_SEL_MASK        (3<<29)
+#define  TRANS_DP_AUDIO_ONLY   (1<<26)
+#define  TRANS_DP_ENH_FRAMING  (1<<18)
+#define  TRANS_DP_8BPC         (0<<9)
+#define  TRANS_DP_10BPC                (1<<9)
+#define  TRANS_DP_6BPC         (2<<9)
+#define  TRANS_DP_12BPC                (3<<9)
+#define  TRANS_DP_VSYNC_ACTIVE_HIGH    (1<<4)
+#define  TRANS_DP_VSYNC_ACTIVE_LOW     0
+#define  TRANS_DP_HSYNC_ACTIVE_HIGH    (1<<3)
+#define  TRANS_DP_HSYNC_ACTIVE_LOW     0
+
+/* SNB eDP training params */
+/* SNB A-stepping */
+#define  EDP_LINK_TRAIN_400MV_0DB_SNB_A                (0x38<<22)
+#define  EDP_LINK_TRAIN_400MV_6DB_SNB_A                (0x02<<22)
+#define  EDP_LINK_TRAIN_600MV_3_5DB_SNB_A      (0x01<<22)
+#define  EDP_LINK_TRAIN_800MV_0DB_SNB_A                (0x0<<22)
+/* SNB B-stepping */
+#define  EDP_LINK_TRAIN_400MV_0DB_SNB_B                (0x0<<22)
+#define  EDP_LINK_TRAIN_400MV_6DB_SNB_B                (0x3a<<22)
+#define  EDP_LINK_TRAIN_600MV_3_5DB_SNB_B      (0x39<<22)
+#define  EDP_LINK_TRAIN_800MV_0DB_SNB_B                (0x38<<22)
+#define  EDP_LINK_TRAIN_VOL_EMP_MASK_SNB       (0x3f<<22)
+
 #endif /* _I915_REG_H_ */
index ac0d1a7..60a5800 100644 (file)
@@ -600,14 +600,16 @@ void i915_save_display(struct drm_device *dev)
        }
        /* FIXME: save TV & SDVO state */
 
-       /* FBC state */
-       if (IS_GM45(dev)) {
-               dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
-       } else {
-               dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
-               dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
-               dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
-               dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+       /* Only save FBC state on the platform that supports FBC */
+       if (I915_HAS_FBC(dev)) {
+               if (IS_GM45(dev)) {
+                       dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
+               } else {
+                       dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+                       dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+                       dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+                       dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+               }
        }
 
        /* VGA state */
@@ -702,18 +704,19 @@ void i915_restore_display(struct drm_device *dev)
        }
        /* FIXME: restore TV & SDVO state */
 
-       /* FBC info */
-       if (IS_GM45(dev)) {
-               g4x_disable_fbc(dev);
-               I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
-       } else {
-               i8xx_disable_fbc(dev);
-               I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
-               I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
-               I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
-               I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+       /* only restore FBC info on the platform that supports FBC*/
+       if (I915_HAS_FBC(dev)) {
+               if (IS_GM45(dev)) {
+                       g4x_disable_fbc(dev);
+                       I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
+               } else {
+                       i8xx_disable_fbc(dev);
+                       I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
+                       I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
+                       I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
+                       I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+               }
        }
-
        /* VGA state */
        if (IS_IRONLAKE(dev))
                I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
index 01840d9..9e4c45f 100644 (file)
@@ -115,7 +115,7 @@ TRACE_EVENT(i915_gem_object_get_fence,
                      __entry->obj, __entry->fence, __entry->tiling_mode)
 );
 
-TRACE_EVENT(i915_gem_object_unbind,
+DECLARE_EVENT_CLASS(i915_gem_object,
 
            TP_PROTO(struct drm_gem_object *obj),
 
@@ -132,21 +132,18 @@ TRACE_EVENT(i915_gem_object_unbind,
            TP_printk("obj=%p", __entry->obj)
 );
 
-TRACE_EVENT(i915_gem_object_destroy,
+DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
 
            TP_PROTO(struct drm_gem_object *obj),
 
-           TP_ARGS(obj),
+           TP_ARGS(obj)
+);
 
-           TP_STRUCT__entry(
-                            __field(struct drm_gem_object *, obj)
-                            ),
+DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
 
-           TP_fast_assign(
-                          __entry->obj = obj;
-                          ),
+           TP_PROTO(struct drm_gem_object *obj),
 
-           TP_printk("obj=%p", __entry->obj)
+           TP_ARGS(obj)
 );
 
 /* batch tracing */
@@ -197,8 +194,7 @@ TRACE_EVENT(i915_gem_request_flush,
                      __entry->flush_domains, __entry->invalidate_domains)
 );
 
-
-TRACE_EVENT(i915_gem_request_complete,
+DECLARE_EVENT_CLASS(i915_gem_request,
 
            TP_PROTO(struct drm_device *dev, u32 seqno),
 
@@ -217,64 +213,35 @@ TRACE_EVENT(i915_gem_request_complete,
            TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
 );
 
-TRACE_EVENT(i915_gem_request_retire,
+DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
 
            TP_PROTO(struct drm_device *dev, u32 seqno),
 
-           TP_ARGS(dev, seqno),
-
-           TP_STRUCT__entry(
-                            __field(u32, dev)
-                            __field(u32, seqno)
-                            ),
-
-           TP_fast_assign(
-                          __entry->dev = dev->primary->index;
-                          __entry->seqno = seqno;
-                          ),
-
-           TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+           TP_ARGS(dev, seqno)
 );
 
-TRACE_EVENT(i915_gem_request_wait_begin,
+DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
 
            TP_PROTO(struct drm_device *dev, u32 seqno),
 
-           TP_ARGS(dev, seqno),
-
-           TP_STRUCT__entry(
-                            __field(u32, dev)
-                            __field(u32, seqno)
-                            ),
-
-           TP_fast_assign(
-                          __entry->dev = dev->primary->index;
-                          __entry->seqno = seqno;
-                          ),
-
-           TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+           TP_ARGS(dev, seqno)
 );
 
-TRACE_EVENT(i915_gem_request_wait_end,
+DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin,
 
            TP_PROTO(struct drm_device *dev, u32 seqno),
 
-           TP_ARGS(dev, seqno),
+           TP_ARGS(dev, seqno)
+);
 
-           TP_STRUCT__entry(
-                            __field(u32, dev)
-                            __field(u32, seqno)
-                            ),
+DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
 
-           TP_fast_assign(
-                          __entry->dev = dev->primary->index;
-                          __entry->seqno = seqno;
-                          ),
+           TP_PROTO(struct drm_device *dev, u32 seqno),
 
-           TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+           TP_ARGS(dev, seqno)
 );
 
-TRACE_EVENT(i915_ring_wait_begin,
+DECLARE_EVENT_CLASS(i915_ring,
 
            TP_PROTO(struct drm_device *dev),
 
@@ -291,26 +258,23 @@ TRACE_EVENT(i915_ring_wait_begin,
            TP_printk("dev=%u", __entry->dev)
 );
 
-TRACE_EVENT(i915_ring_wait_end,
+DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
 
            TP_PROTO(struct drm_device *dev),
 
-           TP_ARGS(dev),
+           TP_ARGS(dev)
+);
 
-           TP_STRUCT__entry(
-                            __field(u32, dev)
-                            ),
+DEFINE_EVENT(i915_ring, i915_ring_wait_end,
 
-           TP_fast_assign(
-                          __entry->dev = dev->primary->index;
-                          ),
+           TP_PROTO(struct drm_device *dev),
 
-           TP_printk("dev=%u", __entry->dev)
+           TP_ARGS(dev)
 );
 
 #endif /* _I915_TRACE_H_ */
 
 /* This part must be outside protection */
 #undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
+#define TRACE_INCLUDE_PATH .
 #include <trace/define_trace.h>
index f9ba452..4c748d8 100644 (file)
@@ -366,6 +366,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
                        p_mapping->dvo_port = p_child->dvo_port;
                        p_mapping->slave_addr = p_child->slave_addr;
                        p_mapping->dvo_wiring = p_child->dvo_wiring;
+                       p_mapping->ddc_pin = p_child->ddc_pin;
                        p_mapping->initialized = 1;
                } else {
                        DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
index 759c2ef..e16ac5a 100644 (file)
@@ -136,11 +136,17 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
                adpa |= ADPA_VSYNC_ACTIVE_HIGH;
 
        if (intel_crtc->pipe == 0) {
-               adpa |= ADPA_PIPE_A_SELECT;
+               if (HAS_PCH_CPT(dev))
+                       adpa |= PORT_TRANS_A_SEL_CPT;
+               else
+                       adpa |= ADPA_PIPE_A_SELECT;
                if (!HAS_PCH_SPLIT(dev))
                        I915_WRITE(BCLRPAT_A, 0);
        } else {
-               adpa |= ADPA_PIPE_B_SELECT;
+               if (HAS_PCH_CPT(dev))
+                       adpa |= PORT_TRANS_B_SEL_CPT;
+               else
+                       adpa |= ADPA_PIPE_B_SELECT;
                if (!HAS_PCH_SPLIT(dev))
                        I915_WRITE(BCLRPAT_B, 0);
        }
@@ -152,15 +158,21 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 adpa;
+       u32 adpa, temp;
        bool ret;
 
-       adpa = I915_READ(PCH_ADPA);
+       temp = adpa = I915_READ(PCH_ADPA);
 
-       adpa &= ~ADPA_CRT_HOTPLUG_MASK;
-       /* disable HPD first */
-       I915_WRITE(PCH_ADPA, adpa);
-       (void)I915_READ(PCH_ADPA);
+       if (HAS_PCH_CPT(dev)) {
+               /* Disable DAC before force detect */
+               I915_WRITE(PCH_ADPA, adpa & ~ADPA_DAC_ENABLE);
+               (void)I915_READ(PCH_ADPA);
+       } else {
+               adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+               /* disable HPD first */
+               I915_WRITE(PCH_ADPA, adpa);
+               (void)I915_READ(PCH_ADPA);
+       }
 
        adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
                        ADPA_CRT_HOTPLUG_WARMUP_10MS |
@@ -176,6 +188,11 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
        while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
                ;
 
+       if (HAS_PCH_CPT(dev)) {
+               I915_WRITE(PCH_ADPA, temp);
+               (void)I915_READ(PCH_ADPA);
+       }
+
        /* Check the status to see if both blue and green are on now */
        adpa = I915_READ(PCH_ADPA);
        adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK;
@@ -245,9 +262,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
        return false;
 }
 
-static bool intel_crt_detect_ddc(struct drm_connector *connector)
+static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 
        /* CRT should always be at 0, but check anyway */
        if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
@@ -387,8 +404,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
 static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-       struct drm_encoder *encoder = &intel_encoder->enc;
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        struct drm_crtc *crtc;
        int dpms_mode;
        enum drm_connector_status status;
@@ -400,18 +417,19 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
                        return connector_status_disconnected;
        }
 
-       if (intel_crt_detect_ddc(connector))
+       if (intel_crt_detect_ddc(encoder))
                return connector_status_connected;
 
        /* for pre-945g platforms use load detect */
        if (encoder->crtc && encoder->crtc->enabled) {
                status = intel_crt_load_detect(encoder->crtc, intel_encoder);
        } else {
-               crtc = intel_get_load_detect_pipe(intel_encoder,
+               crtc = intel_get_load_detect_pipe(intel_encoder, connector,
                                                  NULL, &dpms_mode);
                if (crtc) {
                        status = intel_crt_load_detect(crtc, intel_encoder);
-                       intel_release_load_detect_pipe(intel_encoder, dpms_mode);
+                       intel_release_load_detect_pipe(intel_encoder,
+                                                      connector, dpms_mode);
                } else
                        status = connector_status_unknown;
        }
@@ -421,9 +439,6 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
 
 static void intel_crt_destroy(struct drm_connector *connector)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-
-       intel_i2c_destroy(intel_encoder->ddc_bus);
        drm_sysfs_connector_remove(connector);
        drm_connector_cleanup(connector);
        kfree(connector);
@@ -432,29 +447,27 @@ static void intel_crt_destroy(struct drm_connector *connector)
 static int intel_crt_get_modes(struct drm_connector *connector)
 {
        int ret;
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-       struct i2c_adapter *ddcbus;
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+       struct i2c_adapter *ddc_bus;
        struct drm_device *dev = connector->dev;
 
 
-       ret = intel_ddc_get_modes(intel_encoder);
+       ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
        if (ret || !IS_G4X(dev))
                goto end;
 
-       ddcbus = intel_encoder->ddc_bus;
        /* Try to probe digital port for output in DVI-I -> VGA mode. */
-       intel_encoder->ddc_bus =
-               intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
+       ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
 
-       if (!intel_encoder->ddc_bus) {
-               intel_encoder->ddc_bus = ddcbus;
+       if (!ddc_bus) {
                dev_printk(KERN_ERR, &connector->dev->pdev->dev,
                           "DDC bus registration failed for CRTDDC_D.\n");
                goto end;
        }
        /* Try to get modes by GPIOD port */
-       ret = intel_ddc_get_modes(intel_encoder);
-       intel_i2c_destroy(ddcbus);
+       ret = intel_ddc_get_modes(connector, ddc_bus);
+       intel_i2c_destroy(ddc_bus);
 
 end:
        return ret;
@@ -491,12 +504,16 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
 static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
        .mode_valid = intel_crt_mode_valid,
        .get_modes = intel_crt_get_modes,
-       .best_encoder = intel_best_encoder,
+       .best_encoder = intel_attached_encoder,
 };
 
 static void intel_crt_enc_destroy(struct drm_encoder *encoder)
 {
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+       intel_i2c_destroy(intel_encoder->ddc_bus);
        drm_encoder_cleanup(encoder);
+       kfree(intel_encoder);
 }
 
 static const struct drm_encoder_funcs intel_crt_enc_funcs = {
@@ -507,6 +524,7 @@ void intel_crt_init(struct drm_device *dev)
 {
        struct drm_connector *connector;
        struct intel_encoder *intel_encoder;
+       struct intel_connector *intel_connector;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 i2c_reg;
 
@@ -514,14 +532,20 @@ void intel_crt_init(struct drm_device *dev)
        if (!intel_encoder)
                return;
 
-       connector = &intel_encoder->base;
-       drm_connector_init(dev, &intel_encoder->base,
+       intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+       if (!intel_connector) {
+               kfree(intel_encoder);
+               return;
+       }
+
+       connector = &intel_connector->base;
+       drm_connector_init(dev, &intel_connector->base,
                           &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
 
        drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs,
                         DRM_MODE_ENCODER_DAC);
 
-       drm_mode_connector_attach_encoder(&intel_encoder->base,
+       drm_mode_connector_attach_encoder(&intel_connector->base,
                                          &intel_encoder->enc);
 
        /* Set up the DDC bus. */
@@ -553,5 +577,10 @@ void intel_crt_init(struct drm_device *dev)
 
        drm_sysfs_connector_add(connector);
 
+       if (I915_HAS_HOTPLUG(dev))
+               connector->polled = DRM_CONNECTOR_POLL_HPD;
+       else
+               connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
        dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
 }
index f27e370..f469a84 100644 (file)
@@ -742,12 +742,11 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
 {
     struct drm_device *dev = crtc->dev;
     struct drm_mode_config *mode_config = &dev->mode_config;
-    struct drm_connector *l_entry;
+    struct drm_encoder *l_entry;
 
-    list_for_each_entry(l_entry, &mode_config->connector_list, head) {
-           if (l_entry->encoder &&
-               l_entry->encoder->crtc == crtc) {
-                   struct intel_encoder *intel_encoder = to_intel_encoder(l_entry);
+    list_for_each_entry(l_entry, &mode_config->encoder_list, head) {
+           if (l_entry && l_entry->crtc == crtc) {
+                   struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry);
                    if (intel_encoder->type == type)
                            return true;
            }
@@ -755,23 +754,6 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
     return false;
 }
 
-static struct drm_connector *
-intel_pipe_get_connector (struct drm_crtc *crtc)
-{
-    struct drm_device *dev = crtc->dev;
-    struct drm_mode_config *mode_config = &dev->mode_config;
-    struct drm_connector *l_entry, *ret = NULL;
-
-    list_for_each_entry(l_entry, &mode_config->connector_list, head) {
-           if (l_entry->encoder &&
-               l_entry->encoder->crtc == crtc) {
-                   ret = l_entry;
-                   break;
-           }
-    }
-    return ret;
-}
-
 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
 /**
  * Returns whether the given set of divisors are valid for a given refclk with
@@ -1066,9 +1048,8 @@ void i8xx_disable_fbc(struct drm_device *dev)
        DRM_DEBUG_KMS("disabled FBC\n");
 }
 
-static bool i8xx_fbc_enabled(struct drm_crtc *crtc)
+static bool i8xx_fbc_enabled(struct drm_device *dev)
 {
-       struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
@@ -1125,14 +1106,43 @@ void g4x_disable_fbc(struct drm_device *dev)
        DRM_DEBUG_KMS("disabled FBC\n");
 }
 
-static bool g4x_fbc_enabled(struct drm_crtc *crtc)
+static bool g4x_fbc_enabled(struct drm_device *dev)
 {
-       struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
 }
 
+bool intel_fbc_enabled(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (!dev_priv->display.fbc_enabled)
+               return false;
+
+       return dev_priv->display.fbc_enabled(dev);
+}
+
+void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+
+       if (!dev_priv->display.enable_fbc)
+               return;
+
+       dev_priv->display.enable_fbc(crtc, interval);
+}
+
+void intel_disable_fbc(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (!dev_priv->display.disable_fbc)
+               return;
+
+       dev_priv->display.disable_fbc(dev);
+}
+
 /**
  * intel_update_fbc - enable/disable FBC as needed
  * @crtc: CRTC to point the compressor at
@@ -1167,9 +1177,7 @@ static void intel_update_fbc(struct drm_crtc *crtc,
        if (!i915_powersave)
                return;
 
-       if (!dev_priv->display.fbc_enabled ||
-           !dev_priv->display.enable_fbc ||
-           !dev_priv->display.disable_fbc)
+       if (!I915_HAS_FBC(dev))
                return;
 
        if (!crtc->fb)
@@ -1216,28 +1224,25 @@ static void intel_update_fbc(struct drm_crtc *crtc,
                goto out_disable;
        }
 
-       if (dev_priv->display.fbc_enabled(crtc)) {
+       if (intel_fbc_enabled(dev)) {
                /* We can re-enable it in this case, but need to update pitch */
-               if (fb->pitch > dev_priv->cfb_pitch)
-                       dev_priv->display.disable_fbc(dev);
-               if (obj_priv->fence_reg != dev_priv->cfb_fence)
-                       dev_priv->display.disable_fbc(dev);
-               if (plane != dev_priv->cfb_plane)
-                       dev_priv->display.disable_fbc(dev);
+               if ((fb->pitch > dev_priv->cfb_pitch) ||
+                   (obj_priv->fence_reg != dev_priv->cfb_fence) ||
+                   (plane != dev_priv->cfb_plane))
+                       intel_disable_fbc(dev);
        }
 
-       if (!dev_priv->display.fbc_enabled(crtc)) {
-               /* Now try to turn it back on if possible */
-               dev_priv->display.enable_fbc(crtc, 500);
-       }
+       /* Now try to turn it back on if possible */
+       if (!intel_fbc_enabled(dev))
+               intel_enable_fbc(crtc, 500);
 
        return;
 
 out_disable:
        DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
        /* Multiple disables should be harmless */
-       if (dev_priv->display.fbc_enabled(crtc))
-               dev_priv->display.disable_fbc(dev);
+       if (intel_fbc_enabled(dev))
+               intel_disable_fbc(dev);
 }
 
 static int
@@ -1510,6 +1515,219 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
        udelay(500);
 }
 
+/* The FDI link training functions for ILK/Ibexpeak. */
+static void ironlake_fdi_link_train(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+       int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+       int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+       int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
+       int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
+       u32 temp, tries = 0;
+
+       /* enable CPU FDI TX and PCH FDI RX */
+       temp = I915_READ(fdi_tx_reg);
+       temp |= FDI_TX_ENABLE;
+       temp &= ~(7 << 19);
+       temp |= (intel_crtc->fdi_lanes - 1) << 19;
+       temp &= ~FDI_LINK_TRAIN_NONE;
+       temp |= FDI_LINK_TRAIN_PATTERN_1;
+       I915_WRITE(fdi_tx_reg, temp);
+       I915_READ(fdi_tx_reg);
+
+       temp = I915_READ(fdi_rx_reg);
+       temp &= ~FDI_LINK_TRAIN_NONE;
+       temp |= FDI_LINK_TRAIN_PATTERN_1;
+       I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
+       I915_READ(fdi_rx_reg);
+       udelay(150);
+
+       /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+          for train result */
+       temp = I915_READ(fdi_rx_imr_reg);
+       temp &= ~FDI_RX_SYMBOL_LOCK;
+       temp &= ~FDI_RX_BIT_LOCK;
+       I915_WRITE(fdi_rx_imr_reg, temp);
+       I915_READ(fdi_rx_imr_reg);
+       udelay(150);
+
+       for (;;) {
+               temp = I915_READ(fdi_rx_iir_reg);
+               DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+               if ((temp & FDI_RX_BIT_LOCK)) {
+                       DRM_DEBUG_KMS("FDI train 1 done.\n");
+                       I915_WRITE(fdi_rx_iir_reg,
+                                  temp | FDI_RX_BIT_LOCK);
+                       break;
+               }
+
+               tries++;
+
+               if (tries > 5) {
+                       DRM_DEBUG_KMS("FDI train 1 fail!\n");
+                       break;
+               }
+       }
+
+       /* Train 2 */
+       temp = I915_READ(fdi_tx_reg);
+       temp &= ~FDI_LINK_TRAIN_NONE;
+       temp |= FDI_LINK_TRAIN_PATTERN_2;
+       I915_WRITE(fdi_tx_reg, temp);
+
+       temp = I915_READ(fdi_rx_reg);
+       temp &= ~FDI_LINK_TRAIN_NONE;
+       temp |= FDI_LINK_TRAIN_PATTERN_2;
+       I915_WRITE(fdi_rx_reg, temp);
+       udelay(150);
+
+       tries = 0;
+
+       for (;;) {
+               temp = I915_READ(fdi_rx_iir_reg);
+               DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+               if (temp & FDI_RX_SYMBOL_LOCK) {
+                       I915_WRITE(fdi_rx_iir_reg,
+                                  temp | FDI_RX_SYMBOL_LOCK);
+                       DRM_DEBUG_KMS("FDI train 2 done.\n");
+                       break;
+               }
+
+               tries++;
+
+               if (tries > 5) {
+                       DRM_DEBUG_KMS("FDI train 2 fail!\n");
+                       break;
+               }
+       }
+
+       DRM_DEBUG_KMS("FDI train done\n");
+}
+
+static int snb_b_fdi_train_param [] = {
+       FDI_LINK_TRAIN_400MV_0DB_SNB_B,
+       FDI_LINK_TRAIN_400MV_6DB_SNB_B,
+       FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
+       FDI_LINK_TRAIN_800MV_0DB_SNB_B,
+};
+
+/* The FDI link training functions for SNB/Cougarpoint. */
+static void gen6_fdi_link_train(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+       int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+       int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+       int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
+       int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
+       u32 temp, i;
+
+       /* enable CPU FDI TX and PCH FDI RX */
+       temp = I915_READ(fdi_tx_reg);
+       temp |= FDI_TX_ENABLE;
+       temp &= ~(7 << 19);
+       temp |= (intel_crtc->fdi_lanes - 1) << 19;
+       temp &= ~FDI_LINK_TRAIN_NONE;
+       temp |= FDI_LINK_TRAIN_PATTERN_1;
+       temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+       /* SNB-B */
+       temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+       I915_WRITE(fdi_tx_reg, temp);
+       I915_READ(fdi_tx_reg);
+
+       temp = I915_READ(fdi_rx_reg);
+       if (HAS_PCH_CPT(dev)) {
+               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+               temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+       } else {
+               temp &= ~FDI_LINK_TRAIN_NONE;
+               temp |= FDI_LINK_TRAIN_PATTERN_1;
+       }
+       I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
+       I915_READ(fdi_rx_reg);
+       udelay(150);
+
+       /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+          for train result */
+       temp = I915_READ(fdi_rx_imr_reg);
+       temp &= ~FDI_RX_SYMBOL_LOCK;
+       temp &= ~FDI_RX_BIT_LOCK;
+       I915_WRITE(fdi_rx_imr_reg, temp);
+       I915_READ(fdi_rx_imr_reg);
+       udelay(150);
+
+       for (i = 0; i < 4; i++ ) {
+               temp = I915_READ(fdi_tx_reg);
+               temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+               temp |= snb_b_fdi_train_param[i];
+               I915_WRITE(fdi_tx_reg, temp);
+               udelay(500);
+
+               temp = I915_READ(fdi_rx_iir_reg);
+               DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+               if (temp & FDI_RX_BIT_LOCK) {
+                       I915_WRITE(fdi_rx_iir_reg,
+                                  temp | FDI_RX_BIT_LOCK);
+                       DRM_DEBUG_KMS("FDI train 1 done.\n");
+                       break;
+               }
+       }
+       if (i == 4)
+               DRM_DEBUG_KMS("FDI train 1 fail!\n");
+
+       /* Train 2 */
+       temp = I915_READ(fdi_tx_reg);
+       temp &= ~FDI_LINK_TRAIN_NONE;
+       temp |= FDI_LINK_TRAIN_PATTERN_2;
+       if (IS_GEN6(dev)) {
+               temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+               /* SNB-B */
+               temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+       }
+       I915_WRITE(fdi_tx_reg, temp);
+
+       temp = I915_READ(fdi_rx_reg);
+       if (HAS_PCH_CPT(dev)) {
+               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+               temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+       } else {
+               temp &= ~FDI_LINK_TRAIN_NONE;
+               temp |= FDI_LINK_TRAIN_PATTERN_2;
+       }
+       I915_WRITE(fdi_rx_reg, temp);
+       udelay(150);
+
+       for (i = 0; i < 4; i++ ) {
+               temp = I915_READ(fdi_tx_reg);
+               temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+               temp |= snb_b_fdi_train_param[i];
+               I915_WRITE(fdi_tx_reg, temp);
+               udelay(500);
+
+               temp = I915_READ(fdi_rx_iir_reg);
+               DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+               if (temp & FDI_RX_SYMBOL_LOCK) {
+                       I915_WRITE(fdi_rx_iir_reg,
+                                  temp | FDI_RX_SYMBOL_LOCK);
+                       DRM_DEBUG_KMS("FDI train 2 done.\n");
+                       break;
+               }
+       }
+       if (i == 4)
+               DRM_DEBUG_KMS("FDI train 2 fail!\n");
+
+       DRM_DEBUG_KMS("FDI train done.\n");
+}
+
 static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
        struct drm_device *dev = crtc->dev;
@@ -1523,8 +1741,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
        int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
        int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
        int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
-       int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
-       int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
        int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
        int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
        int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
@@ -1541,8 +1757,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
        int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B;
        int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
        int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
+       int trans_dpll_sel = (pipe == 0) ? 0 : 1;
        u32 temp;
-       int tries = 5, j, n;
+       int n;
        u32 pipe_bpc;
 
        temp = I915_READ(pipeconf_reg);
@@ -1569,12 +1786,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
                        /* enable eDP PLL */
                        ironlake_enable_pll_edp(crtc);
                } else {
-                       /* enable PCH DPLL */
-                       temp = I915_READ(pch_dpll_reg);
-                       if ((temp & DPLL_VCO_ENABLE) == 0) {
-                               I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
-                               I915_READ(pch_dpll_reg);
-                       }
 
                        /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
                        temp = I915_READ(fdi_rx_reg);
@@ -1584,9 +1795,15 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
                         */
                        temp &= ~(0x7 << 16);
                        temp |= (pipe_bpc << 11);
-                       I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
-                                       FDI_SEL_PCDCLK |
-                                       FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
+                       temp &= ~(7 << 19);
+                       temp |= (intel_crtc->fdi_lanes - 1) << 19;
+                       I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
+                       I915_READ(fdi_rx_reg);
+                       udelay(200);
+
+                       /* Switch from Rawclk to PCDclk */
+                       temp = I915_READ(fdi_rx_reg);
+                       I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
                        I915_READ(fdi_rx_reg);
                        udelay(200);
 
@@ -1629,91 +1846,32 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
                }
 
                if (!HAS_eDP) {
-                       /* enable CPU FDI TX and PCH FDI RX */
-                       temp = I915_READ(fdi_tx_reg);
-                       temp |= FDI_TX_ENABLE;
-                       temp |= FDI_DP_PORT_WIDTH_X4; /* default */
-                       temp &= ~FDI_LINK_TRAIN_NONE;
-                       temp |= FDI_LINK_TRAIN_PATTERN_1;
-                       I915_WRITE(fdi_tx_reg, temp);
-                       I915_READ(fdi_tx_reg);
-
-                       temp = I915_READ(fdi_rx_reg);
-                       temp &= ~FDI_LINK_TRAIN_NONE;
-                       temp |= FDI_LINK_TRAIN_PATTERN_1;
-                       I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
-                       I915_READ(fdi_rx_reg);
-
-                       udelay(150);
-
-                       /* Train FDI. */
-                       /* umask FDI RX Interrupt symbol_lock and bit_lock bit
-                          for train result */
-                       temp = I915_READ(fdi_rx_imr_reg);
-                       temp &= ~FDI_RX_SYMBOL_LOCK;
-                       temp &= ~FDI_RX_BIT_LOCK;
-                       I915_WRITE(fdi_rx_imr_reg, temp);
-                       I915_READ(fdi_rx_imr_reg);
-                       udelay(150);
+                       /* For PCH output, training FDI link */
+                       if (IS_GEN6(dev))
+                               gen6_fdi_link_train(crtc);
+                       else
+                               ironlake_fdi_link_train(crtc);
 
-                       temp = I915_READ(fdi_rx_iir_reg);
-                       DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
-                       if ((temp & FDI_RX_BIT_LOCK) == 0) {
-                               for (j = 0; j < tries; j++) {
-                                       temp = I915_READ(fdi_rx_iir_reg);
-                                       DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
-                                                               temp);
-                                       if (temp & FDI_RX_BIT_LOCK)
-                                               break;
-                                       udelay(200);
-                               }
-                               if (j != tries)
-                                       I915_WRITE(fdi_rx_iir_reg,
-                                                       temp | FDI_RX_BIT_LOCK);
-                               else
-                                       DRM_DEBUG_KMS("train 1 fail\n");
-                       } else {
-                               I915_WRITE(fdi_rx_iir_reg,
-                                               temp | FDI_RX_BIT_LOCK);
-                               DRM_DEBUG_KMS("train 1 ok 2!\n");
+                       /* enable PCH DPLL */
+                       temp = I915_READ(pch_dpll_reg);
+                       if ((temp & DPLL_VCO_ENABLE) == 0) {
+                               I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
+                               I915_READ(pch_dpll_reg);
                        }
-                       temp = I915_READ(fdi_tx_reg);
-                       temp &= ~FDI_LINK_TRAIN_NONE;
-                       temp |= FDI_LINK_TRAIN_PATTERN_2;
-                       I915_WRITE(fdi_tx_reg, temp);
-
-                       temp = I915_READ(fdi_rx_reg);
-                       temp &= ~FDI_LINK_TRAIN_NONE;
-                       temp |= FDI_LINK_TRAIN_PATTERN_2;
-                       I915_WRITE(fdi_rx_reg, temp);
-
-                       udelay(150);
+                       udelay(200);
 
-                       temp = I915_READ(fdi_rx_iir_reg);
-                       DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
-                       if ((temp & FDI_RX_SYMBOL_LOCK) == 0) {
-                               for (j = 0; j < tries; j++) {
-                                       temp = I915_READ(fdi_rx_iir_reg);
-                                       DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
-                                                               temp);
-                                       if (temp & FDI_RX_SYMBOL_LOCK)
-                                               break;
-                                       udelay(200);
-                               }
-                               if (j != tries) {
-                                       I915_WRITE(fdi_rx_iir_reg,
-                                                       temp | FDI_RX_SYMBOL_LOCK);
-                                       DRM_DEBUG_KMS("train 2 ok 1!\n");
-                               } else
-                                       DRM_DEBUG_KMS("train 2 fail\n");
-                       } else {
-                               I915_WRITE(fdi_rx_iir_reg,
-                                               temp | FDI_RX_SYMBOL_LOCK);
-                               DRM_DEBUG_KMS("train 2 ok 2!\n");
+                       if (HAS_PCH_CPT(dev)) {
+                               /* Be sure PCH DPLL SEL is set */
+                               temp = I915_READ(PCH_DPLL_SEL);
+                               if (trans_dpll_sel == 0 &&
+                                               (temp & TRANSA_DPLL_ENABLE) == 0)
+                                       temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+                               else if (trans_dpll_sel == 1 &&
+                                               (temp & TRANSB_DPLL_ENABLE) == 0)
+                                       temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+                               I915_WRITE(PCH_DPLL_SEL, temp);
+                               I915_READ(PCH_DPLL_SEL);
                        }
-                       DRM_DEBUG_KMS("train done\n");
 
                        /* set transcoder timing */
                        I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
@@ -1724,6 +1882,60 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
                        I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
                        I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
 
+                       /* enable normal train */
+                       temp = I915_READ(fdi_tx_reg);
+                       temp &= ~FDI_LINK_TRAIN_NONE;
+                       I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
+                                       FDI_TX_ENHANCE_FRAME_ENABLE);
+                       I915_READ(fdi_tx_reg);
+
+                       temp = I915_READ(fdi_rx_reg);
+                       if (HAS_PCH_CPT(dev)) {
+                               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+                               temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+                       } else {
+                               temp &= ~FDI_LINK_TRAIN_NONE;
+                               temp |= FDI_LINK_TRAIN_NONE;
+                       }
+                       I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+                       I915_READ(fdi_rx_reg);
+
+                       /* wait one idle pattern time */
+                       udelay(100);
+
+                       /* For PCH DP, enable TRANS_DP_CTL */
+                       if (HAS_PCH_CPT(dev) &&
+                           intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+                               int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
+                               int reg;
+
+                               reg = I915_READ(trans_dp_ctl);
+                               reg &= ~TRANS_DP_PORT_SEL_MASK;
+                               reg = TRANS_DP_OUTPUT_ENABLE |
+                                     TRANS_DP_ENH_FRAMING |
+                                     TRANS_DP_VSYNC_ACTIVE_HIGH |
+                                     TRANS_DP_HSYNC_ACTIVE_HIGH;
+
+                               switch (intel_trans_dp_port_sel(crtc)) {
+                               case PCH_DP_B:
+                                       reg |= TRANS_DP_PORT_SEL_B;
+                                       break;
+                               case PCH_DP_C:
+                                       reg |= TRANS_DP_PORT_SEL_C;
+                                       break;
+                               case PCH_DP_D:
+                                       reg |= TRANS_DP_PORT_SEL_D;
+                                       break;
+                               default:
+                                       DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
+                                       reg |= TRANS_DP_PORT_SEL_B;
+                                       break;
+                               }
+
+                               I915_WRITE(trans_dp_ctl, reg);
+                               POSTING_READ(trans_dp_ctl);
+                       }
+
                        /* enable PCH transcoder */
                        temp = I915_READ(transconf_reg);
                        /*
@@ -1738,23 +1950,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
                        while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0)
                                ;
 
-                       /* enable normal */
-
-                       temp = I915_READ(fdi_tx_reg);
-                       temp &= ~FDI_LINK_TRAIN_NONE;
-                       I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
-                                       FDI_TX_ENHANCE_FRAME_ENABLE);
-                       I915_READ(fdi_tx_reg);
-
-                       temp = I915_READ(fdi_rx_reg);
-                       temp &= ~FDI_LINK_TRAIN_NONE;
-                       I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE |
-                                       FDI_RX_ENHANCE_FRAME_ENABLE);
-                       I915_READ(fdi_rx_reg);
-
-                       /* wait one idle pattern time */
-                       udelay(100);
-
                }
 
                intel_crtc_load_lut(crtc);
@@ -1805,6 +2000,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
                        I915_READ(pf_ctl_reg);
                }
                I915_WRITE(pf_win_size, 0);
+               POSTING_READ(pf_win_size);
+
 
                /* disable CPU FDI tx and PCH FDI rx */
                temp = I915_READ(fdi_tx_reg);
@@ -1825,11 +2022,18 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
                temp &= ~FDI_LINK_TRAIN_NONE;
                temp |= FDI_LINK_TRAIN_PATTERN_1;
                I915_WRITE(fdi_tx_reg, temp);
+               POSTING_READ(fdi_tx_reg);
 
                temp = I915_READ(fdi_rx_reg);
-               temp &= ~FDI_LINK_TRAIN_NONE;
-               temp |= FDI_LINK_TRAIN_PATTERN_1;
+               if (HAS_PCH_CPT(dev)) {
+                       temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+                       temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+               } else {
+                       temp &= ~FDI_LINK_TRAIN_NONE;
+                       temp |= FDI_LINK_TRAIN_PATTERN_1;
+               }
                I915_WRITE(fdi_rx_reg, temp);
+               POSTING_READ(fdi_rx_reg);
 
                udelay(100);
 
@@ -1859,6 +2063,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
                                }
                        }
                }
+
                temp = I915_READ(transconf_reg);
                /* BPC in transcoder is consistent with that in pipeconf */
                temp &= ~PIPE_BPC_MASK;
@@ -1867,35 +2072,53 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
                I915_READ(transconf_reg);
                udelay(100);
 
+               if (HAS_PCH_CPT(dev)) {
+                       /* disable TRANS_DP_CTL */
+                       int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
+                       int reg;
+
+                       reg = I915_READ(trans_dp_ctl);
+                       reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
+                       I915_WRITE(trans_dp_ctl, reg);
+                       POSTING_READ(trans_dp_ctl);
+
+                       /* disable DPLL_SEL */
+                       temp = I915_READ(PCH_DPLL_SEL);
+                       if (trans_dpll_sel == 0)
+                               temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
+                       else
+                               temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+                       I915_WRITE(PCH_DPLL_SEL, temp);
+                       I915_READ(PCH_DPLL_SEL);
+
+               }
+
                /* disable PCH DPLL */
                temp = I915_READ(pch_dpll_reg);
-               if ((temp & DPLL_VCO_ENABLE) != 0) {
-                       I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
-                       I915_READ(pch_dpll_reg);
-               }
+               I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
+               I915_READ(pch_dpll_reg);
 
                if (HAS_eDP) {
                        ironlake_disable_pll_edp(crtc);
                }
 
+               /* Switch from PCDclk to Rawclk */
                temp = I915_READ(fdi_rx_reg);
                temp &= ~FDI_SEL_PCDCLK;
                I915_WRITE(fdi_rx_reg, temp);
                I915_READ(fdi_rx_reg);
 
+               /* Disable CPU FDI TX PLL */
+               temp = I915_READ(fdi_tx_reg);
+               I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
+               I915_READ(fdi_tx_reg);
+               udelay(100);
+
                temp = I915_READ(fdi_rx_reg);
                temp &= ~FDI_RX_PLL_ENABLE;
                I915_WRITE(fdi_rx_reg, temp);
                I915_READ(fdi_rx_reg);
 
-               /* Disable CPU FDI TX PLL */
-               temp = I915_READ(fdi_tx_reg);
-               if ((temp & FDI_TX_PLL_ENABLE) != 0) {
-                       I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
-                       I915_READ(fdi_tx_reg);
-                       udelay(100);
-               }
-
                /* Wait for the clocks to turn off. */
                udelay(100);
                break;
@@ -2331,6 +2554,30 @@ static struct intel_watermark_params i830_wm_info = {
        I830_FIFO_LINE_SIZE
 };
 
+static struct intel_watermark_params ironlake_display_wm_info = {
+       ILK_DISPLAY_FIFO,
+       ILK_DISPLAY_MAXWM,
+       ILK_DISPLAY_DFTWM,
+       2,
+       ILK_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params ironlake_display_srwm_info = {
+       ILK_DISPLAY_SR_FIFO,
+       ILK_DISPLAY_MAX_SRWM,
+       ILK_DISPLAY_DFT_SRWM,
+       2,
+       ILK_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params ironlake_cursor_srwm_info = {
+       ILK_CURSOR_SR_FIFO,
+       ILK_CURSOR_MAX_SRWM,
+       ILK_CURSOR_DFT_SRWM,
+       2,
+       ILK_FIFO_LINE_SIZE
+};
+
 /**
  * intel_calculate_wm - calculate watermark level
  * @clock_in_khz: pixel clock
@@ -2449,66 +2696,6 @@ static void pineview_disable_cxsr(struct drm_device *dev)
        DRM_INFO("Big FIFO is disabled\n");
 }
 
-static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
-                                int pixel_size)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 reg;
-       unsigned long wm;
-       struct cxsr_latency *latency;
-
-       latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
-               dev_priv->mem_freq);
-       if (!latency) {
-               DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
-               pineview_disable_cxsr(dev);
-               return;
-       }
-
-       /* Display SR */
-       wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size,
-                               latency->display_sr);
-       reg = I915_READ(DSPFW1);
-       reg &= 0x7fffff;
-       reg |= wm << 23;
-       I915_WRITE(DSPFW1, reg);
-       DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
-
-       /* cursor SR */
-       wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size,
-                               latency->cursor_sr);
-       reg = I915_READ(DSPFW3);
-       reg &= ~(0x3f << 24);
-       reg |= (wm & 0x3f) << 24;
-       I915_WRITE(DSPFW3, reg);
-
-       /* Display HPLL off SR */
-       wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
-               latency->display_hpll_disable, I915_FIFO_LINE_SIZE);
-       reg = I915_READ(DSPFW3);
-       reg &= 0xfffffe00;
-       reg |= wm & 0x1ff;
-       I915_WRITE(DSPFW3, reg);
-
-       /* cursor HPLL off SR */
-       wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size,
-                               latency->cursor_hpll_disable);
-       reg = I915_READ(DSPFW3);
-       reg &= ~(0x3f << 16);
-       reg |= (wm & 0x3f) << 16;
-       I915_WRITE(DSPFW3, reg);
-       DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
-
-       /* activate cxsr */
-       reg = I915_READ(DSPFW3);
-       reg |= PINEVIEW_SELF_REFRESH_EN;
-       I915_WRITE(DSPFW3, reg);
-
-       DRM_INFO("Big FIFO is enabled\n");
-
-       return;
-}
-
 /*
  * Latency for FIFO fetches is dependent on several factors:
  *   - memory configuration (speed, channels)
@@ -2593,6 +2780,71 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
        return size;
 }
 
+static void pineview_update_wm(struct drm_device *dev,  int planea_clock,
+                         int planeb_clock, int sr_hdisplay, int pixel_size)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 reg;
+       unsigned long wm;
+       struct cxsr_latency *latency;
+       int sr_clock;
+
+       latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
+                                        dev_priv->mem_freq);
+       if (!latency) {
+               DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+               pineview_disable_cxsr(dev);
+               return;
+       }
+
+       if (!planea_clock || !planeb_clock) {
+               sr_clock = planea_clock ? planea_clock : planeb_clock;
+
+               /* Display SR */
+               wm = intel_calculate_wm(sr_clock, &pineview_display_wm,
+                                       pixel_size, latency->display_sr);
+               reg = I915_READ(DSPFW1);
+               reg &= ~DSPFW_SR_MASK;
+               reg |= wm << DSPFW_SR_SHIFT;
+               I915_WRITE(DSPFW1, reg);
+               DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
+
+               /* cursor SR */
+               wm = intel_calculate_wm(sr_clock, &pineview_cursor_wm,
+                                       pixel_size, latency->cursor_sr);
+               reg = I915_READ(DSPFW3);
+               reg &= ~DSPFW_CURSOR_SR_MASK;
+               reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
+               I915_WRITE(DSPFW3, reg);
+
+               /* Display HPLL off SR */
+               wm = intel_calculate_wm(sr_clock, &pineview_display_hplloff_wm,
+                                       pixel_size, latency->display_hpll_disable);
+               reg = I915_READ(DSPFW3);
+               reg &= ~DSPFW_HPLL_SR_MASK;
+               reg |= wm & DSPFW_HPLL_SR_MASK;
+               I915_WRITE(DSPFW3, reg);
+
+               /* cursor HPLL off SR */
+               wm = intel_calculate_wm(sr_clock, &pineview_cursor_hplloff_wm,
+                                       pixel_size, latency->cursor_hpll_disable);
+               reg = I915_READ(DSPFW3);
+               reg &= ~DSPFW_HPLL_CURSOR_MASK;
+               reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
+               I915_WRITE(DSPFW3, reg);
+               DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
+
+               /* activate cxsr */
+               reg = I915_READ(DSPFW3);
+               reg |= PINEVIEW_SELF_REFRESH_EN;
+               I915_WRITE(DSPFW3, reg);
+               DRM_DEBUG_KMS("Self-refresh is enabled\n");
+       } else {
+               pineview_disable_cxsr(dev);
+               DRM_DEBUG_KMS("Self-refresh is disabled\n");
+       }
+}
+
 static void g4x_update_wm(struct drm_device *dev,  int planea_clock,
                          int planeb_clock, int sr_hdisplay, int pixel_size)
 {
@@ -2813,6 +3065,108 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
        I915_WRITE(FW_BLC, fwater_lo);
 }
 
+#define ILK_LP0_PLANE_LATENCY          700
+
+static void ironlake_update_wm(struct drm_device *dev,  int planea_clock,
+                      int planeb_clock, int sr_hdisplay, int pixel_size)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+       int sr_wm, cursor_wm;
+       unsigned long line_time_us;
+       int sr_clock, entries_required;
+       u32 reg_value;
+
+       /* Calculate and update the watermark for plane A */
+       if (planea_clock) {
+               entries_required = ((planea_clock / 1000) * pixel_size *
+                                    ILK_LP0_PLANE_LATENCY) / 1000;
+               entries_required = DIV_ROUND_UP(entries_required,
+                                  ironlake_display_wm_info.cacheline_size);
+               planea_wm = entries_required +
+                           ironlake_display_wm_info.guard_size;
+
+               if (planea_wm > (int)ironlake_display_wm_info.max_wm)
+                       planea_wm = ironlake_display_wm_info.max_wm;
+
+               cursora_wm = 16;
+               reg_value = I915_READ(WM0_PIPEA_ILK);
+               reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+               reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
+                            (cursora_wm & WM0_PIPE_CURSOR_MASK);
+               I915_WRITE(WM0_PIPEA_ILK, reg_value);
+               DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, "
+                               "cursor: %d\n", planea_wm, cursora_wm);
+       }
+       /* Calculate and update the watermark for plane B */
+       if (planeb_clock) {
+               entries_required = ((planeb_clock / 1000) * pixel_size *
+                                    ILK_LP0_PLANE_LATENCY) / 1000;
+               entries_required = DIV_ROUND_UP(entries_required,
+                                  ironlake_display_wm_info.cacheline_size);
+               planeb_wm = entries_required +
+                           ironlake_display_wm_info.guard_size;
+
+               if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
+                       planeb_wm = ironlake_display_wm_info.max_wm;
+
+               cursorb_wm = 16;
+               reg_value = I915_READ(WM0_PIPEB_ILK);
+               reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+               reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) |
+                            (cursorb_wm & WM0_PIPE_CURSOR_MASK);
+               I915_WRITE(WM0_PIPEB_ILK, reg_value);
+               DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, "
+                               "cursor: %d\n", planeb_wm, cursorb_wm);
+       }
+
+       /*
+        * Calculate and update the self-refresh watermark only when one
+        * display plane is used.
+        */
+       if (!planea_clock || !planeb_clock) {
+               int line_count;
+               /* Read the self-refresh latency. The unit is 0.5us */
+               int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
+
+               sr_clock = planea_clock ? planea_clock : planeb_clock;
+               line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+
+               /* Use ns/us then divide to preserve precision */
+               line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
+                              / 1000;
+
+               /* calculate the self-refresh watermark for display plane */
+               entries_required = line_count * sr_hdisplay * pixel_size;
+               entries_required = DIV_ROUND_UP(entries_required,
+                                  ironlake_display_srwm_info.cacheline_size);
+               sr_wm = entries_required +
+                       ironlake_display_srwm_info.guard_size;
+
+               /* calculate the self-refresh watermark for display cursor */
+               entries_required = line_count * pixel_size * 64;
+               entries_required = DIV_ROUND_UP(entries_required,
+                                  ironlake_cursor_srwm_info.cacheline_size);
+               cursor_wm = entries_required +
+                           ironlake_cursor_srwm_info.guard_size;
+
+               /* configure watermark and enable self-refresh */
+               reg_value = I915_READ(WM1_LP_ILK);
+               reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
+                              WM1_LP_CURSOR_MASK);
+               reg_value |= WM1_LP_SR_EN |
+                            (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
+                            (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
+
+               I915_WRITE(WM1_LP_ILK, reg_value);
+               DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+                               "cursor %d\n", sr_wm, cursor_wm);
+
+       } else {
+               /* Turn off self refresh if both pipes are enabled */
+               I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
+       }
+}
 /**
  * intel_update_watermarks - update FIFO watermark values based on current modes
  *
@@ -2882,12 +3236,6 @@ static void intel_update_watermarks(struct drm_device *dev)
        if (enabled <= 0)
                return;
 
-       /* Single plane configs can enable self refresh */
-       if (enabled == 1 && IS_PINEVIEW(dev))
-               pineview_enable_cxsr(dev, sr_clock, pixel_size);
-       else if (IS_PINEVIEW(dev))
-               pineview_disable_cxsr(dev);
-
        dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
                                    sr_hdisplay, pixel_size);
 }
@@ -2924,7 +3272,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
        bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
        bool is_edp = false;
        struct drm_mode_config *mode_config = &dev->mode_config;
-       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+       struct intel_encoder *intel_encoder = NULL;
        const intel_limit_t *limit;
        int ret;
        struct fdi_m_n m_n = {0};
@@ -2935,6 +3284,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
        int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0;
        int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
        int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+       int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+       int trans_dpll_sel = (pipe == 0) ? 0 : 1;
        int lvds_reg = LVDS;
        u32 temp;
        int sdvo_pixel_multiply;
@@ -2942,12 +3293,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
 
        drm_vblank_pre_modeset(dev, pipe);
 
-       list_for_each_entry(connector, &mode_config->connector_list, head) {
-               struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       list_for_each_entry(encoder, &mode_config->encoder_list, head) {
 
-               if (!connector->encoder || connector->encoder->crtc != crtc)
+               if (!encoder || encoder->crtc != crtc)
                        continue;
 
+               intel_encoder = enc_to_intel_encoder(encoder);
+
                switch (intel_encoder->type) {
                case INTEL_OUTPUT_LVDS:
                        is_lvds = true;
@@ -3043,14 +3395,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
 
        /* FDI link */
        if (HAS_PCH_SPLIT(dev)) {
-               int lane, link_bw, bpp;
+               int lane = 0, link_bw, bpp;
                /* eDP doesn't require FDI link, so just set DP M/N
                   according to current link config */
                if (is_edp) {
-                       struct drm_connector *edp;
                        target_clock = mode->clock;
-                       edp = intel_pipe_get_connector(crtc);
-                       intel_edp_link_config(to_intel_encoder(edp),
+                       intel_edp_link_config(intel_encoder,
                                        &lane, &link_bw);
                } else {
                        /* DP over FDI requires target mode clock
@@ -3059,7 +3409,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                                target_clock = mode->clock;
                        else
                                target_clock = adjusted_mode->clock;
-                       lane = 4;
                        link_bw = 270000;
                }
 
@@ -3111,6 +3460,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                        bpp = 24;
                }
 
+               if (!lane) {
+                       /* 
+                        * Account for spread spectrum to avoid
+                        * oversubscribing the link. Max center spread
+                        * is 2.5%; use 5% for safety's sake.
+                        */
+                       u32 bps = target_clock * bpp * 21 / 20;
+                       lane = bps / (link_bw * 8) + 1;
+               }
+
+               intel_crtc->fdi_lanes = lane;
+
                ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
        }
 
@@ -3265,11 +3626,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                        pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
        }
 
-       dspcntr |= DISPLAY_PLANE_ENABLE;
-       pipeconf |= PIPEACONF_ENABLE;
-       dpll |= DPLL_VCO_ENABLE;
-
-
        /* Disable the panel fitter if it was on our pipe */
        if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
                I915_WRITE(PFIT_CONTROL, 0);
@@ -3292,6 +3648,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                udelay(150);
        }
 
+       /* enable transcoder DPLL */
+       if (HAS_PCH_CPT(dev)) {
+               temp = I915_READ(PCH_DPLL_SEL);
+               if (trans_dpll_sel == 0)
+                       temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+               else
+                       temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+               I915_WRITE(PCH_DPLL_SEL, temp);
+               I915_READ(PCH_DPLL_SEL);
+               udelay(150);
+       }
+
        /* The LVDS pin pair needs to be on before the DPLLs are enabled.
         * This is an exception to the general rule that mode_set doesn't turn
         * things on.
@@ -3303,7 +3671,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                        lvds_reg = PCH_LVDS;
 
                lvds = I915_READ(lvds_reg);
-               lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT;
+               lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+               if (pipe == 1) {
+                       if (HAS_PCH_CPT(dev))
+                               lvds |= PORT_TRANS_B_SEL_CPT;
+                       else
+                               lvds |= LVDS_PIPEB_SELECT;
+               } else {
+                       if (HAS_PCH_CPT(dev))
+                               lvds &= ~PORT_TRANS_SEL_MASK;
+                       else
+                               lvds &= ~LVDS_PIPEB_SELECT;
+               }
                /* set the corresponsding LVDS_BORDER bit */
                lvds |= dev_priv->lvds_border_bits;
                /* Set the B0-B3 data pairs corresponding to whether we're going to
@@ -3321,14 +3700,16 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                /* set the dithering flag */
                if (IS_I965G(dev)) {
                        if (dev_priv->lvds_dither) {
-                               if (HAS_PCH_SPLIT(dev))
+                               if (HAS_PCH_SPLIT(dev)) {
                                        pipeconf |= PIPE_ENABLE_DITHER;
-                               else
+                                       pipeconf |= PIPE_DITHER_TYPE_ST01;
+                               } else
                                        lvds |= LVDS_ENABLE_DITHER;
                        } else {
-                               if (HAS_PCH_SPLIT(dev))
+                               if (HAS_PCH_SPLIT(dev)) {
                                        pipeconf &= ~PIPE_ENABLE_DITHER;
-                               else
+                                       pipeconf &= ~PIPE_DITHER_TYPE_MASK;
+                               } else
                                        lvds &= ~LVDS_ENABLE_DITHER;
                        }
                }
@@ -3337,6 +3718,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
        }
        if (is_dp)
                intel_dp_set_m_n(crtc, mode, adjusted_mode);
+       else if (HAS_PCH_SPLIT(dev)) {
+               /* For non-DP output, clear any trans DP clock recovery setting.*/
+               if (pipe == 0) {
+                       I915_WRITE(TRANSA_DATA_M1, 0);
+                       I915_WRITE(TRANSA_DATA_N1, 0);
+                       I915_WRITE(TRANSA_DP_LINK_M1, 0);
+                       I915_WRITE(TRANSA_DP_LINK_N1, 0);
+               } else {
+                       I915_WRITE(TRANSB_DATA_M1, 0);
+                       I915_WRITE(TRANSB_DATA_N1, 0);
+                       I915_WRITE(TRANSB_DP_LINK_M1, 0);
+                       I915_WRITE(TRANSB_DP_LINK_N1, 0);
+               }
+       }
 
        if (!is_edp) {
                I915_WRITE(fp_reg, fp);
@@ -3411,6 +3806,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                        /* enable FDI RX PLL too */
                        temp = I915_READ(fdi_rx_reg);
                        I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
+                       I915_READ(fdi_rx_reg);
+                       udelay(200);
+
+                       /* enable FDI TX PLL too */
+                       temp = I915_READ(fdi_tx_reg);
+                       I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
+                       I915_READ(fdi_tx_reg);
+
+                       /* enable FDI RX PCDCLK */
+                       temp = I915_READ(fdi_rx_reg);
+                       I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
+                       I915_READ(fdi_rx_reg);
                        udelay(200);
                }
        }
@@ -3671,6 +4078,7 @@ static struct drm_display_mode load_detect_mode = {
 };
 
 struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
+                                           struct drm_connector *connector,
                                            struct drm_display_mode *mode,
                                            int *dpms_mode)
 {
@@ -3729,7 +4137,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
        }
 
        encoder->crtc = crtc;
-       intel_encoder->base.encoder = encoder;
+       connector->encoder = encoder;
        intel_encoder->load_detect_temp = true;
 
        intel_crtc = to_intel_crtc(crtc);
@@ -3755,7 +4163,8 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
        return crtc;
 }
 
-void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode)
+void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
+                                   struct drm_connector *connector, int dpms_mode)
 {
        struct drm_encoder *encoder = &intel_encoder->enc;
        struct drm_device *dev = encoder->dev;
@@ -3765,7 +4174,7 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpm
 
        if (intel_encoder->load_detect_temp) {
                encoder->crtc = NULL;
-               intel_encoder->base.encoder = NULL;
+               connector->encoder = NULL;
                intel_encoder->load_detect_temp = false;
                crtc->enabled = drm_helper_crtc_in_use(crtc);
                drm_helper_disable_unused_functions(dev);
@@ -4392,14 +4801,14 @@ struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
        return crtc;
 }
 
-static int intel_connector_clones(struct drm_device *dev, int type_mask)
+static int intel_encoder_clones(struct drm_device *dev, int type_mask)
 {
        int index_mask = 0;
-       struct drm_connector *connector;
+       struct drm_encoder *encoder;
        int entry = 0;
 
-        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
                if (type_mask & intel_encoder->clone_mask)
                        index_mask |= (1 << entry);
                entry++;
@@ -4411,7 +4820,7 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask)
 static void intel_setup_outputs(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_connector *connector;
+       struct drm_encoder *encoder;
 
        intel_crt_init(dev);
 
@@ -4426,9 +4835,8 @@ static void intel_setup_outputs(struct drm_device *dev)
                        intel_dp_init(dev, DP_A);
 
                if (I915_READ(HDMIB) & PORT_DETECTED) {
-                       /* check SDVOB */
-                       /* found = intel_sdvo_init(dev, HDMIB); */
-                       found = 0;
+                       /* PCH SDVOB multiplex with HDMIB */
+                       found = intel_sdvo_init(dev, PCH_SDVOB);
                        if (!found)
                                intel_hdmi_init(dev, HDMIB);
                        if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
@@ -4494,12 +4902,11 @@ static void intel_setup_outputs(struct drm_device *dev)
        if (SUPPORTS_TV(dev))
                intel_tv_init(dev);
 
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-               struct drm_encoder *encoder = &intel_encoder->enc;
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 
                encoder->possible_crtcs = intel_encoder->crtc_mask;
-               encoder->possible_clones = intel_connector_clones(dev,
+               encoder->possible_clones = intel_encoder_clones(dev,
                                                intel_encoder->clone_mask);
        }
 }
@@ -4507,10 +4914,6 @@ static void intel_setup_outputs(struct drm_device *dev)
 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct drm_device *dev = fb->dev;
-
-       if (fb->fbdev)
-               intelfb_remove(dev, fb);
 
        drm_framebuffer_cleanup(fb);
        drm_gem_object_unreference_unlocked(intel_fb->obj);
@@ -4533,18 +4936,13 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
        .create_handle = intel_user_framebuffer_create_handle,
 };
 
-int intel_framebuffer_create(struct drm_device *dev,
-                            struct drm_mode_fb_cmd *mode_cmd,
-                            struct drm_framebuffer **fb,
-                            struct drm_gem_object *obj)
+int intel_framebuffer_init(struct drm_device *dev,
+                          struct intel_framebuffer *intel_fb,
+                          struct drm_mode_fb_cmd *mode_cmd,
+                          struct drm_gem_object *obj)
 {
-       struct intel_framebuffer *intel_fb;
        int ret;
 
-       intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
-       if (!intel_fb)
-               return -ENOMEM;
-
        ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
        if (ret) {
                DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -4552,40 +4950,41 @@ int intel_framebuffer_create(struct drm_device *dev,
        }
 
        drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
-
        intel_fb->obj = obj;
-
-       *fb = &intel_fb->base;
-
        return 0;
 }
 
-
 static struct drm_framebuffer *
 intel_user_framebuffer_create(struct drm_device *dev,
                              struct drm_file *filp,
                              struct drm_mode_fb_cmd *mode_cmd)
 {
        struct drm_gem_object *obj;
-       struct drm_framebuffer *fb;
+       struct intel_framebuffer *intel_fb;
        int ret;
 
        obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
        if (!obj)
                return NULL;
 
-       ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj);
+       intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+       if (!intel_fb)
+               return NULL;
+
+       ret = intel_framebuffer_init(dev, intel_fb,
+                                    mode_cmd, obj);
        if (ret) {
                drm_gem_object_unreference_unlocked(obj);
+               kfree(intel_fb);
                return NULL;
        }
 
-       return fb;
+       return &intel_fb->base;
 }
 
 static const struct drm_mode_config_funcs intel_mode_funcs = {
        .fb_create = intel_user_framebuffer_create,
-       .fb_changed = intelfb_probe,
+       .output_poll_changed = intel_fb_output_poll_changed,
 };
 
 static struct drm_gem_object *
@@ -4594,7 +4993,7 @@ intel_alloc_power_context(struct drm_device *dev)
        struct drm_gem_object *pwrctx;
        int ret;
 
-       pwrctx = drm_gem_object_alloc(dev, 4096);
+       pwrctx = i915_gem_alloc_object(dev, 4096);
        if (!pwrctx) {
                DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
                return NULL;
@@ -4732,6 +5131,25 @@ void intel_init_clock_gating(struct drm_device *dev)
                }
 
                I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+               /*
+                * According to the spec the following bits should be set in
+                * order to enable memory self-refresh
+                * The bit 22/21 of 0x42004
+                * The bit 5 of 0x42020
+                * The bit 15 of 0x45000
+                */
+               if (IS_IRONLAKE(dev)) {
+                       I915_WRITE(ILK_DISPLAY_CHICKEN2,
+                                       (I915_READ(ILK_DISPLAY_CHICKEN2) |
+                                       ILK_DPARB_GATE | ILK_VSDPFD_FULL));
+                       I915_WRITE(ILK_DSPCLK_GATE,
+                                       (I915_READ(ILK_DSPCLK_GATE) |
+                                               ILK_DPARB_CLK_GATE));
+                       I915_WRITE(DISP_ARB_CTL,
+                                       (I915_READ(DISP_ARB_CTL) |
+                                               DISP_FBC_WM_DIS));
+               }
                return;
        } else if (IS_G4X(dev)) {
                uint32_t dspclk_gate;
@@ -4809,8 +5227,7 @@ static void intel_init_display(struct drm_device *dev)
        else
                dev_priv->display.dpms = i9xx_crtc_dpms;
 
-       /* Only mobile has FBC, leave pointers NULL for other chips */
-       if (IS_MOBILE(dev)) {
+       if (I915_HAS_FBC(dev)) {
                if (IS_GM45(dev)) {
                        dev_priv->display.fbc_enabled = g4x_fbc_enabled;
                        dev_priv->display.enable_fbc = g4x_enable_fbc;
@@ -4847,9 +5264,31 @@ static void intel_init_display(struct drm_device *dev)
                        i830_get_display_clock_speed;
 
        /* For FIFO watermark updates */
-       if (HAS_PCH_SPLIT(dev))
-               dev_priv->display.update_wm = NULL;
-       else if (IS_G4X(dev))
+       if (HAS_PCH_SPLIT(dev)) {
+               if (IS_IRONLAKE(dev)) {
+                       if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
+                               dev_priv->display.update_wm = ironlake_update_wm;
+                       else {
+                               DRM_DEBUG_KMS("Failed to get proper latency. "
+                                             "Disable CxSR\n");
+                               dev_priv->display.update_wm = NULL;
+                       }
+               } else
+                       dev_priv->display.update_wm = NULL;
+       } else if (IS_PINEVIEW(dev)) {
+               if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+                                           dev_priv->fsb_freq,
+                                           dev_priv->mem_freq)) {
+                       DRM_INFO("failed to find known CxSR latency "
+                                "(found fsb freq %d, mem freq %d), "
+                                "disabling CxSR\n",
+                                dev_priv->fsb_freq, dev_priv->mem_freq);
+                       /* Disable CxSR and never update its watermark again */
+                       pineview_disable_cxsr(dev);
+                       dev_priv->display.update_wm = NULL;
+               } else
+                       dev_priv->display.update_wm = pineview_update_wm;
+       } else if (IS_G4X(dev))
                dev_priv->display.update_wm = g4x_update_wm;
        else if (IS_I965G(dev))
                dev_priv->display.update_wm = i965_update_wm;
@@ -4923,13 +5362,6 @@ void intel_modeset_init(struct drm_device *dev)
                    (unsigned long)dev);
 
        intel_setup_overlay(dev);
-
-       if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
-                                                       dev_priv->fsb_freq,
-                                                       dev_priv->mem_freq))
-               DRM_INFO("failed to find known CxSR latency "
-                        "(found fsb freq %d, mem freq %d), disabling CxSR\n",
-                        dev_priv->fsb_freq, dev_priv->mem_freq);
 }
 
 void intel_modeset_cleanup(struct drm_device *dev)
@@ -4940,6 +5372,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
 
        mutex_lock(&dev->struct_mutex);
 
+       drm_kms_helper_poll_fini(dev);
+       intel_fbdev_fini(dev);
+
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                /* Skip inactive CRTCs */
                if (!crtc->fb)
@@ -4974,14 +5409,29 @@ void intel_modeset_cleanup(struct drm_device *dev)
 }
 
 
-/* current intel driver doesn't take advantage of encoders
-   always give back the encoder for the connector
-*/
-struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
+/*
+ * Return which encoder is currently attached for connector.
+ */
+struct drm_encoder *intel_attached_encoder (struct drm_connector *connector)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       struct drm_mode_object *obj;
+       struct drm_encoder *encoder;
+       int i;
 
-       return &intel_encoder->enc;
+       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+               if (connector->encoder_ids[i] == 0)
+                       break;
+
+               obj = drm_mode_object_find(connector->dev,
+                                           connector->encoder_ids[i],
+                                           DRM_MODE_OBJECT_ENCODER);
+               if (!obj)
+                       continue;
+
+               encoder = obj_to_encoder(obj);
+               return encoder;
+       }
+       return NULL;
 }
 
 /*
index 77e40cf..6b1c9a2 100644 (file)
@@ -48,8 +48,6 @@ struct intel_dp_priv {
        uint32_t output_reg;
        uint32_t DP;
        uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
-       uint32_t save_DP;
-       uint8_t  save_link_configuration[DP_LINK_CONFIGURATION_SIZE];
        bool has_audio;
        int dpms_mode;
        uint8_t link_bw;
@@ -141,7 +139,8 @@ static int
 intel_dp_mode_valid(struct drm_connector *connector,
                    struct drm_display_mode *mode)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
        int max_lanes = intel_dp_max_lane_count(intel_encoder);
 
@@ -215,7 +214,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
 {
        struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
        uint32_t output_reg = dp_priv->output_reg;
-       struct drm_device *dev = intel_encoder->base.dev;
+       struct drm_device *dev = intel_encoder->enc.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t ch_ctl = output_reg + 0x10;
        uint32_t ch_data = ch_ctl + 4;
@@ -224,19 +223,27 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
        uint32_t ctl;
        uint32_t status;
        uint32_t aux_clock_divider;
-       int try;
+       int try, precharge;
 
        /* The clock divider is based off the hrawclk,
         * and would like to run at 2MHz. So, take the
         * hrawclk value and divide by 2 and use that
         */
-       if (IS_eDP(intel_encoder))
-               aux_clock_divider = 225; /* eDP input clock at 450Mhz */
-       else if (HAS_PCH_SPLIT(dev))
+       if (IS_eDP(intel_encoder)) {
+               if (IS_GEN6(dev))
+                       aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
+               else
+                       aux_clock_divider = 225; /* eDP input clock at 450Mhz */
+       } else if (HAS_PCH_SPLIT(dev))
                aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
        else
                aux_clock_divider = intel_hrawclk(dev) / 2;
 
+       if (IS_GEN6(dev))
+               precharge = 3;
+       else
+               precharge = 5;
+
        /* Must try at least 3 times according to DP spec */
        for (try = 0; try < 5; try++) {
                /* Load the send data into the aux channel data registers */
@@ -249,7 +256,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
                ctl = (DP_AUX_CH_CTL_SEND_BUSY |
                       DP_AUX_CH_CTL_TIME_OUT_400us |
                       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
-                      (5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+                      (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
                       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
                       DP_AUX_CH_CTL_DONE |
                       DP_AUX_CH_CTL_TIME_OUT_ERROR |
@@ -465,7 +472,8 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
 }
 
 static int
-intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name)
+intel_dp_i2c_init(struct intel_encoder *intel_encoder,
+                 struct intel_connector *intel_connector, const char *name)
 {
        struct intel_dp_priv   *dp_priv = intel_encoder->dev_priv;
 
@@ -480,7 +488,7 @@ intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name)
        strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1);
        dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0';
        dp_priv->adapter.algo_data = &dp_priv->algo;
-       dp_priv->adapter.dev.parent = &intel_encoder->base.kdev;
+       dp_priv->adapter.dev.parent = &intel_connector->base.kdev;
        
        return i2c_dp_aux_add_bus(&dp_priv->adapter);
 }
@@ -555,7 +563,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
 {
        struct drm_device *dev = crtc->dev;
        struct drm_mode_config *mode_config = &dev->mode_config;
-       struct drm_connector *connector;
+       struct drm_encoder *encoder;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int lane_count = 4;
@@ -564,13 +572,16 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
        /*
         * Find the lane count in the intel_encoder private
         */
-       list_for_each_entry(connector, &mode_config->connector_list, head) {
-               struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-               struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+       list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+               struct intel_encoder *intel_encoder;
+               struct intel_dp_priv *dp_priv;
 
-               if (!connector->encoder || connector->encoder->crtc != crtc)
+               if (!encoder || encoder->crtc != crtc)
                        continue;
 
+               intel_encoder = enc_to_intel_encoder(encoder);
+               dp_priv = intel_encoder->dev_priv;
+
                if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
                        lane_count = dp_priv->lane_count;
                        break;
@@ -626,16 +637,24 @@ static void
 intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
                  struct drm_display_mode *adjusted_mode)
 {
+       struct drm_device *dev = encoder->dev;
        struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
        struct drm_crtc *crtc = intel_encoder->enc.crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-       dp_priv->DP = (DP_LINK_TRAIN_OFF |
-                       DP_VOLTAGE_0_4 |
-                       DP_PRE_EMPHASIS_0 |
-                       DP_SYNC_VS_HIGH |
-                       DP_SYNC_HS_HIGH);
+       dp_priv->DP = (DP_VOLTAGE_0_4 |
+                      DP_PRE_EMPHASIS_0);
+
+       if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+               dp_priv->DP |= DP_SYNC_HS_HIGH;
+       if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+               dp_priv->DP |= DP_SYNC_VS_HIGH;
+
+       if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+               dp_priv->DP |= DP_LINK_TRAIN_OFF_CPT;
+       else
+               dp_priv->DP |= DP_LINK_TRAIN_OFF;
 
        switch (dp_priv->lane_count) {
        case 1:
@@ -664,7 +683,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
                dp_priv->DP |= DP_ENHANCED_FRAMING;
        }
 
-       if (intel_crtc->pipe == 1)
+       /* CPT DP's pipe select is decided in TRANS_DP_CTL */
+       if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
                dp_priv->DP |= DP_PIPEB_SELECT;
 
        if (IS_eDP(intel_encoder)) {
@@ -704,7 +724,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
 {
        struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
-       struct drm_device *dev = intel_encoder->base.dev;
+       struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t dp_reg = I915_READ(dp_priv->output_reg);
 
@@ -749,20 +769,6 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
        return link_status[r - DP_LANE0_1_STATUS];
 }
 
-static void
-intel_dp_save(struct drm_connector *connector)
-{
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-       struct drm_device *dev = intel_encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
-
-       dp_priv->save_DP = I915_READ(dp_priv->output_reg);
-       intel_dp_aux_native_read(intel_encoder, DP_LINK_BW_SET,
-                                dp_priv->save_link_configuration,
-                                sizeof (dp_priv->save_link_configuration));
-}
-
 static uint8_t
 intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
                                 int lane)
@@ -892,6 +898,25 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count)
        return signal_levels;
 }
 
+/* Gen6's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen6_edp_signal_levels(uint8_t train_set)
+{
+       switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) {
+       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+               return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
+       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+               return EDP_LINK_TRAIN_400MV_6DB_SNB_B;
+       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+               return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B;
+       case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+               return EDP_LINK_TRAIN_800MV_0DB_SNB_B;
+       default:
+               DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n");
+               return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
+       }
+}
+
 static uint8_t
 intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
                      int lane)
@@ -948,7 +973,7 @@ intel_dp_set_link_train(struct intel_encoder *intel_encoder,
                        uint8_t train_set[4],
                        bool first)
 {
-       struct drm_device *dev = intel_encoder->base.dev;
+       struct drm_device *dev = intel_encoder->enc.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
        int ret;
@@ -974,7 +999,7 @@ static void
 intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
                    uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
 {
-       struct drm_device *dev = intel_encoder->base.dev;
+       struct drm_device *dev = intel_encoder->enc.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
        uint8_t train_set[4];
@@ -985,23 +1010,38 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
        bool channel_eq = false;
        bool first = true;
        int tries;
+       u32 reg;
 
        /* Write the link configuration data */
-       intel_dp_aux_native_write(intel_encoder, 0x100,
+       intel_dp_aux_native_write(intel_encoder, DP_LINK_BW_SET,
                                  link_configuration, DP_LINK_CONFIGURATION_SIZE);
 
        DP |= DP_PORT_EN;
-       DP &= ~DP_LINK_TRAIN_MASK;
+       if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+               DP &= ~DP_LINK_TRAIN_MASK_CPT;
+       else
+               DP &= ~DP_LINK_TRAIN_MASK;
        memset(train_set, 0, 4);
        voltage = 0xff;
        tries = 0;
        clock_recovery = false;
        for (;;) {
                /* Use train_set[0] to set the voltage and pre emphasis values */
-               uint32_t    signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
-               DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+               uint32_t    signal_levels;
+               if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
+                       signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
+                       DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
+               } else {
+                       signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+                       DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+               }
 
-               if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_1,
+               if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+                       reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
+               else
+                       reg = DP | DP_LINK_TRAIN_PAT_1;
+
+               if (!intel_dp_set_link_train(intel_encoder, reg,
                                             DP_TRAINING_PATTERN_1, train_set, first))
                        break;
                first = false;
@@ -1041,11 +1081,23 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
        channel_eq = false;
        for (;;) {
                /* Use train_set[0] to set the voltage and pre emphasis values */
-               uint32_t    signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
-               DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+               uint32_t    signal_levels;
+
+               if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
+                       signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
+                       DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
+               } else {
+                       signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+                       DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+               }
+
+               if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+                       reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
+               else
+                       reg = DP | DP_LINK_TRAIN_PAT_2;
 
                /* channel eq pattern */
-               if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_2,
+               if (!intel_dp_set_link_train(intel_encoder, reg,
                                             DP_TRAINING_PATTERN_2, train_set,
                                             false))
                        break;
@@ -1068,7 +1120,12 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
                ++tries;
        }
 
-       I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF);
+       if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+               reg = DP | DP_LINK_TRAIN_OFF_CPT;
+       else
+               reg = DP | DP_LINK_TRAIN_OFF;
+
+       I915_WRITE(dp_priv->output_reg, reg);
        POSTING_READ(dp_priv->output_reg);
        intel_dp_aux_native_write_1(intel_encoder,
                                    DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
@@ -1077,7 +1134,7 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
 static void
 intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
 {
-       struct drm_device *dev = intel_encoder->base.dev;
+       struct drm_device *dev = intel_encoder->enc.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
 
@@ -1090,9 +1147,15 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
                udelay(100);
        }
 
-       DP &= ~DP_LINK_TRAIN_MASK;
-       I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
-       POSTING_READ(dp_priv->output_reg);
+       if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) {
+               DP &= ~DP_LINK_TRAIN_MASK_CPT;
+               I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
+               POSTING_READ(dp_priv->output_reg);
+       } else {
+               DP &= ~DP_LINK_TRAIN_MASK;
+               I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+               POSTING_READ(dp_priv->output_reg);
+       }
 
        udelay(17000);
 
@@ -1102,18 +1165,6 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
        POSTING_READ(dp_priv->output_reg);
 }
 
-static void
-intel_dp_restore(struct drm_connector *connector)
-{
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-       struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
-
-       if (dp_priv->save_DP & DP_PORT_EN)
-               intel_dp_link_train(intel_encoder, dp_priv->save_DP, dp_priv->save_link_configuration);
-       else
-               intel_dp_link_down(intel_encoder,  dp_priv->save_DP);
-}
-
 /*
  * According to DP spec
  * 5.1.2:
@@ -1144,7 +1195,8 @@ intel_dp_check_link_status(struct intel_encoder *intel_encoder)
 static enum drm_connector_status
 ironlake_dp_detect(struct drm_connector *connector)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
        enum drm_connector_status status;
 
@@ -1168,8 +1220,9 @@ ironlake_dp_detect(struct drm_connector *connector)
 static enum drm_connector_status
 intel_dp_detect(struct drm_connector *connector)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-       struct drm_device *dev = intel_encoder->base.dev;
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+       struct drm_device *dev = intel_encoder->enc.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
        uint32_t temp, bit;
@@ -1180,16 +1233,6 @@ intel_dp_detect(struct drm_connector *connector)
        if (HAS_PCH_SPLIT(dev))
                return ironlake_dp_detect(connector);
 
-       temp = I915_READ(PORT_HOTPLUG_EN);
-
-       I915_WRITE(PORT_HOTPLUG_EN,
-              temp |
-              DPB_HOTPLUG_INT_EN |
-              DPC_HOTPLUG_INT_EN |
-              DPD_HOTPLUG_INT_EN);
-
-       POSTING_READ(PORT_HOTPLUG_EN);
-
        switch (dp_priv->output_reg) {
        case DP_B:
                bit = DPB_HOTPLUG_INT_STATUS;
@@ -1222,15 +1265,16 @@ intel_dp_detect(struct drm_connector *connector)
 
 static int intel_dp_get_modes(struct drm_connector *connector)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-       struct drm_device *dev = intel_encoder->base.dev;
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+       struct drm_device *dev = intel_encoder->enc.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
        /* We should parse the EDID data and find out if it has an audio sink
         */
 
-       ret = intel_ddc_get_modes(intel_encoder);
+       ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
        if (ret)
                return ret;
 
@@ -1249,13 +1293,9 @@ static int intel_dp_get_modes(struct drm_connector *connector)
 static void
 intel_dp_destroy (struct drm_connector *connector)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-
-       if (intel_encoder->i2c_bus)
-               intel_i2c_destroy(intel_encoder->i2c_bus);
        drm_sysfs_connector_remove(connector);
        drm_connector_cleanup(connector);
-       kfree(intel_encoder);
+       kfree(connector);
 }
 
 static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
@@ -1268,8 +1308,6 @@ static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
 
 static const struct drm_connector_funcs intel_dp_connector_funcs = {
        .dpms = drm_helper_connector_dpms,
-       .save = intel_dp_save,
-       .restore = intel_dp_restore,
        .detect = intel_dp_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .destroy = intel_dp_destroy,
@@ -1278,12 +1316,17 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
        .get_modes = intel_dp_get_modes,
        .mode_valid = intel_dp_mode_valid,
-       .best_encoder = intel_best_encoder,
+       .best_encoder = intel_attached_encoder,
 };
 
 static void intel_dp_enc_destroy(struct drm_encoder *encoder)
 {
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+       if (intel_encoder->i2c_bus)
+               intel_i2c_destroy(intel_encoder->i2c_bus);
        drm_encoder_cleanup(encoder);
+       kfree(intel_encoder);
 }
 
 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
@@ -1299,12 +1342,35 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder)
                intel_dp_check_link_status(intel_encoder);
 }
 
+/* Return which DP Port should be selected for Transcoder DP control */
+int
+intel_trans_dp_port_sel (struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct drm_encoder *encoder;
+       struct intel_encoder *intel_encoder = NULL;
+
+       list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+               if (!encoder || encoder->crtc != crtc)
+                       continue;
+
+               intel_encoder = enc_to_intel_encoder(encoder);
+               if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+                       struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+                       return dp_priv->output_reg;
+               }
+       }
+       return -1;
+}
+
 void
 intel_dp_init(struct drm_device *dev, int output_reg)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_connector *connector;
        struct intel_encoder *intel_encoder;
+       struct intel_connector *intel_connector;
        struct intel_dp_priv *dp_priv;
        const char *name = NULL;
 
@@ -1313,13 +1379,21 @@ intel_dp_init(struct drm_device *dev, int output_reg)
        if (!intel_encoder)
                return;
 
+       intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+       if (!intel_connector) {
+               kfree(intel_encoder);
+               return;
+       }
+
        dp_priv = (struct intel_dp_priv *)(intel_encoder + 1);
 
-       connector = &intel_encoder->base;
+       connector = &intel_connector->base;
        drm_connector_init(dev, connector, &intel_dp_connector_funcs,
                           DRM_MODE_CONNECTOR_DisplayPort);
        drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
 
+       connector->polled = DRM_CONNECTOR_POLL_HPD;
+
        if (output_reg == DP_A)
                intel_encoder->type = INTEL_OUTPUT_EDP;
        else
@@ -1349,7 +1423,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
                         DRM_MODE_ENCODER_TMDS);
        drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs);
 
-       drm_mode_connector_attach_encoder(&intel_encoder->base,
+       drm_mode_connector_attach_encoder(&intel_connector->base,
                                          &intel_encoder->enc);
        drm_sysfs_connector_add(connector);
 
@@ -1378,7 +1452,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
                        break;
        }
 
-       intel_dp_i2c_init(intel_encoder, name);
+       intel_dp_i2c_init(intel_encoder, intel_connector, name);
 
        intel_encoder->ddc_bus = &dp_priv->adapter;
        intel_encoder->hot_plug = intel_dp_hot_plug;
index e302537..df931f7 100644 (file)
@@ -96,8 +96,6 @@ struct intel_framebuffer {
 
 
 struct intel_encoder {
-       struct drm_connector base;
-
        struct drm_encoder enc;
        int type;
        struct i2c_adapter *i2c_bus;
@@ -110,6 +108,11 @@ struct intel_encoder {
        int clone_mask;
 };
 
+struct intel_connector {
+       struct drm_connector base;
+       void *dev_priv;
+};
+
 struct intel_crtc;
 struct intel_overlay {
        struct drm_device *dev;
@@ -149,17 +152,18 @@ struct intel_crtc {
        bool lowfreq_avail;
        struct intel_overlay *overlay;
        struct intel_unpin_work *unpin_work;
+       int fdi_lanes;
 };
 
 #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
-#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
+#define to_intel_connector(x) container_of(x, struct intel_connector, base)
 #define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
 #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
 
 struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
                                     const char *name);
 void intel_i2c_destroy(struct i2c_adapter *adapter);
-int intel_ddc_get_modes(struct intel_encoder *intel_encoder);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
 extern bool intel_ddc_probe(struct intel_encoder *intel_encoder);
 void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
 void intel_i2c_reset_gmbus(struct drm_device *dev);
@@ -183,7 +187,7 @@ extern void intel_crtc_load_lut(struct drm_crtc *crtc);
 extern void intel_encoder_prepare (struct drm_encoder *encoder);
 extern void intel_encoder_commit (struct drm_encoder *encoder);
 
-extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
+extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector);
 
 extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
                                                    struct drm_crtc *crtc);
@@ -192,17 +196,16 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
 extern void intel_wait_for_vblank(struct drm_device *dev);
 extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
 extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
+                                                  struct drm_connector *connector,
                                                   struct drm_display_mode *mode,
                                                   int *dpms_mode);
 extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
+                                          struct drm_connector *connector,
                                           int dpms_mode);
 
 extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
 extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
 extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);
-extern int intelfb_probe(struct drm_device *dev);
-extern int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
-extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc);
 extern void intelfb_restore(void);
 extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
                                    u16 blue, int regno);
@@ -212,10 +215,12 @@ extern void intel_init_clock_gating(struct drm_device *dev);
 extern void ironlake_enable_drps(struct drm_device *dev);
 extern void ironlake_disable_drps(struct drm_device *dev);
 
-extern int intel_framebuffer_create(struct drm_device *dev,
-                                   struct drm_mode_fb_cmd *mode_cmd,
-                                   struct drm_framebuffer **fb,
-                                   struct drm_gem_object *obj);
+extern int intel_framebuffer_init(struct drm_device *dev,
+                                 struct intel_framebuffer *ifb,
+                                 struct drm_mode_fb_cmd *mode_cmd,
+                                 struct drm_gem_object *obj);
+extern int intel_fbdev_init(struct drm_device *dev);
+extern void intel_fbdev_fini(struct drm_device *dev);
 
 extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
 extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
@@ -229,4 +234,6 @@ extern int intel_overlay_put_image(struct drm_device *dev, void *data,
                                   struct drm_file *file_priv);
 extern int intel_overlay_attrs(struct drm_device *dev, void *data,
                               struct drm_file *file_priv);
+
+extern void intel_fb_output_poll_changed(struct drm_device *dev);
 #endif /* __INTEL_DRV_H__ */
index ebf213c..227feca 100644 (file)
@@ -96,39 +96,11 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
        }
 }
 
-static void intel_dvo_save(struct drm_connector *connector)
-{
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-       struct intel_dvo_device *dvo = intel_encoder->dev_priv;
-
-       /* Each output should probably just save the registers it touches,
-        * but for now, use more overkill.
-        */
-       dev_priv->saveDVOA = I915_READ(DVOA);
-       dev_priv->saveDVOB = I915_READ(DVOB);
-       dev_priv->saveDVOC = I915_READ(DVOC);
-
-       dvo->dev_ops->save(dvo);
-}
-
-static void intel_dvo_restore(struct drm_connector *connector)
-{
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-       struct intel_dvo_device *dvo = intel_encoder->dev_priv;
-
-       dvo->dev_ops->restore(dvo);
-
-       I915_WRITE(DVOA, dev_priv->saveDVOA);
-       I915_WRITE(DVOB, dev_priv->saveDVOB);
-       I915_WRITE(DVOC, dev_priv->saveDVOC);
-}
-
 static int intel_dvo_mode_valid(struct drm_connector *connector,
                                struct drm_display_mode *mode)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        struct intel_dvo_device *dvo = intel_encoder->dev_priv;
 
        if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -241,7 +213,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
  */
 static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        struct intel_dvo_device *dvo = intel_encoder->dev_priv;
 
        return dvo->dev_ops->detect(dvo);
@@ -249,7 +222,8 @@ static enum drm_connector_status intel_dvo_detect(struct drm_connector *connecto
 
 static int intel_dvo_get_modes(struct drm_connector *connector)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        struct intel_dvo_device *dvo = intel_encoder->dev_priv;
 
        /* We should probably have an i2c driver get_modes function for those
@@ -257,7 +231,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
         * (TV-out, for example), but for now with just TMDS and LVDS,
         * that's not the case.
         */
-       intel_ddc_get_modes(intel_encoder);
+       intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
        if (!list_empty(&connector->probed_modes))
                return 1;
 
@@ -275,38 +249,10 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
 
 static void intel_dvo_destroy (struct drm_connector *connector)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-       struct intel_dvo_device *dvo = intel_encoder->dev_priv;
-
-       if (dvo) {
-               if (dvo->dev_ops->destroy)
-                       dvo->dev_ops->destroy(dvo);
-               if (dvo->panel_fixed_mode)
-                       kfree(dvo->panel_fixed_mode);
-               /* no need, in i830_dvoices[] now */
-               //kfree(dvo);
-       }
-       if (intel_encoder->i2c_bus)
-               intel_i2c_destroy(intel_encoder->i2c_bus);
-       if (intel_encoder->ddc_bus)
-               intel_i2c_destroy(intel_encoder->ddc_bus);
        drm_sysfs_connector_remove(connector);
        drm_connector_cleanup(connector);
-       kfree(intel_encoder);
-}
-
-#ifdef RANDR_GET_CRTC_INTERFACE
-static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector)
-{
-       struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-       struct intel_dvo_device *dvo = intel_encoder->dev_priv;
-       int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT);
-
-       return intel_pipe_to_crtc(pScrn, pipe);
+       kfree(connector);
 }
-#endif
 
 static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
        .dpms = intel_dvo_dpms,
@@ -318,8 +264,6 @@ static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
 
 static const struct drm_connector_funcs intel_dvo_connector_funcs = {
        .dpms = drm_helper_connector_dpms,
-       .save = intel_dvo_save,
-       .restore = intel_dvo_restore,
        .detect = intel_dvo_detect,
        .destroy = intel_dvo_destroy,
        .fill_modes = drm_helper_probe_single_connector_modes,
@@ -328,12 +272,26 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
 static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
        .mode_valid = intel_dvo_mode_valid,
        .get_modes = intel_dvo_get_modes,
-       .best_encoder = intel_best_encoder,
+       .best_encoder = intel_attached_encoder,
 };
 
 static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
 {
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+       struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+
+       if (dvo) {
+               if (dvo->dev_ops->destroy)
+                       dvo->dev_ops->destroy(dvo);
+               if (dvo->panel_fixed_mode)
+                       kfree(dvo->panel_fixed_mode);
+       }
+       if (intel_encoder->i2c_bus)
+               intel_i2c_destroy(intel_encoder->i2c_bus);
+       if (intel_encoder->ddc_bus)
+               intel_i2c_destroy(intel_encoder->ddc_bus);
        drm_encoder_cleanup(encoder);
+       kfree(intel_encoder);
 }
 
 static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
@@ -352,7 +310,8 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        struct intel_dvo_device *dvo = intel_encoder->dev_priv;
        uint32_t dvo_reg = dvo->dvo_reg;
        uint32_t dvo_val = I915_READ(dvo_reg);
@@ -384,6 +343,7 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
 void intel_dvo_init(struct drm_device *dev)
 {
        struct intel_encoder *intel_encoder;
+       struct intel_connector *intel_connector;
        struct intel_dvo_device *dvo;
        struct i2c_adapter *i2cbus = NULL;
        int ret = 0;
@@ -393,6 +353,12 @@ void intel_dvo_init(struct drm_device *dev)
        if (!intel_encoder)
                return;
 
+       intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+       if (!intel_connector) {
+               kfree(intel_encoder);
+               return;
+       }
+
        /* Set up the DDC bus */
        intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
        if (!intel_encoder->ddc_bus)
@@ -400,7 +366,7 @@ void intel_dvo_init(struct drm_device *dev)
 
        /* Now, try to find a controller */
        for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
-               struct drm_connector *connector = &intel_encoder->base;
+               struct drm_connector *connector = &intel_connector->base;
                int gpio;
 
                dvo = &intel_dvo_devices[i];
@@ -471,7 +437,7 @@ void intel_dvo_init(struct drm_device *dev)
                drm_encoder_helper_add(&intel_encoder->enc,
                                       &intel_dvo_helper_funcs);
 
-               drm_mode_connector_attach_encoder(&intel_encoder->base,
+               drm_mode_connector_attach_encoder(&intel_connector->base,
                                                  &intel_encoder->enc);
                if (dvo->type == INTEL_DVO_CHIP_LVDS) {
                        /* For our LVDS chipsets, we should hopefully be able
@@ -496,4 +462,5 @@ void intel_dvo_init(struct drm_device *dev)
                intel_i2c_destroy(i2cbus);
 free_intel:
        kfree(intel_encoder);
+       kfree(intel_connector);
 }
index 8a0b3bc..6f53cf7 100644 (file)
 #include "i915_drm.h"
 #include "i915_drv.h"
 
-struct intelfb_par {
+struct intel_fbdev {
        struct drm_fb_helper helper;
-       struct intel_framebuffer *intel_fb;
+       struct intel_framebuffer ifb;
+       struct list_head fbdev_list;
        struct drm_display_mode *our_mode;
 };
 
@@ -54,7 +55,6 @@ static struct fb_ops intelfb_ops = {
        .owner = THIS_MODULE,
        .fb_check_var = drm_fb_helper_check_var,
        .fb_set_par = drm_fb_helper_set_par,
-       .fb_setcolreg = drm_fb_helper_setcolreg,
        .fb_fillrect = cfb_fillrect,
        .fb_copyarea = cfb_copyarea,
        .fb_imageblit = cfb_imageblit,
@@ -63,62 +63,12 @@ static struct fb_ops intelfb_ops = {
        .fb_setcmap = drm_fb_helper_setcmap,
 };
 
-static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
-       .gamma_set = intel_crtc_fb_gamma_set,
-       .gamma_get = intel_crtc_fb_gamma_get,
-};
-
-
-/**
- * Currently it is assumed that the old framebuffer is reused.
- *
- * LOCKING
- * caller should hold the mode config lock.
- *
- */
-int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
-{
-       struct fb_info *info;
-       struct drm_framebuffer *fb;
-       struct drm_display_mode *mode = crtc->desired_mode;
-
-       fb = crtc->fb;
-       if (!fb)
-               return 1;
-
-       info = fb->fbdev;
-       if (!info)
-               return 1;
-
-       if (!mode)
-               return 1;
-
-       info->var.xres = mode->hdisplay;
-       info->var.right_margin = mode->hsync_start - mode->hdisplay;
-       info->var.hsync_len = mode->hsync_end - mode->hsync_start;
-       info->var.left_margin = mode->htotal - mode->hsync_end;
-       info->var.yres = mode->vdisplay;
-       info->var.lower_margin = mode->vsync_start - mode->vdisplay;
-       info->var.vsync_len = mode->vsync_end - mode->vsync_start;
-       info->var.upper_margin = mode->vtotal - mode->vsync_end;
-       info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
-       /* avoid overflow */
-       info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
-
-       return 0;
-}
-EXPORT_SYMBOL(intelfb_resize);
-
-static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
-                         uint32_t fb_height, uint32_t surface_width,
-                         uint32_t surface_height,
-                         uint32_t surface_depth, uint32_t surface_bpp,
-                         struct drm_framebuffer **fb_p)
+static int intelfb_create(struct intel_fbdev *ifbdev,
+                         struct drm_fb_helper_surface_size *sizes)
 {
+       struct drm_device *dev = ifbdev->helper.dev;
        struct fb_info *info;
-       struct intelfb_par *par;
        struct drm_framebuffer *fb;
-       struct intel_framebuffer *intel_fb;
        struct drm_mode_fb_cmd mode_cmd;
        struct drm_gem_object *fbo = NULL;
        struct drm_i915_gem_object *obj_priv;
@@ -126,19 +76,19 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
        int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
 
        /* we don't do packed 24bpp */
-       if (surface_bpp == 24)
-               surface_bpp = 32;
+       if (sizes->surface_bpp == 24)
+               sizes->surface_bpp = 32;
 
-       mode_cmd.width = surface_width;
-       mode_cmd.height = surface_height;
+       mode_cmd.width = sizes->surface_width;
+       mode_cmd.height = sizes->surface_height;
 
-       mode_cmd.bpp = surface_bpp;
+       mode_cmd.bpp = sizes->surface_bpp;
        mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
-       mode_cmd.depth = surface_depth;
+       mode_cmd.depth = sizes->surface_depth;
 
        size = mode_cmd.pitch * mode_cmd.height;
        size = ALIGN(size, PAGE_SIZE);
-       fbo = drm_gem_object_alloc(dev, size);
+       fbo = i915_gem_alloc_object(dev, size);
        if (!fbo) {
                DRM_ERROR("failed to allocate framebuffer\n");
                ret = -ENOMEM;
@@ -157,45 +107,37 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
        /* Flush everything out, we'll be doing GTT only from now on */
        i915_gem_object_set_to_gtt_domain(fbo, 1);
 
-       ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo);
-       if (ret) {
-               DRM_ERROR("failed to allocate fb.\n");
-               goto out_unpin;
-       }
-
-       list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
-
-       intel_fb = to_intel_framebuffer(fb);
-       *fb_p = fb;
-
-       info = framebuffer_alloc(sizeof(struct intelfb_par), device);
+       info = framebuffer_alloc(0, device);
        if (!info) {
                ret = -ENOMEM;
                goto out_unpin;
        }
 
-       par = info->par;
+       info->par = ifbdev;
 
-       par->helper.funcs = &intel_fb_helper_funcs;
-       par->helper.dev = dev;
-       ret = drm_fb_helper_init_crtc_count(&par->helper, 2,
-                                           INTELFB_CONN_LIMIT);
-       if (ret)
-               goto out_unref;
+       intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo);
+
+       fb = &ifbdev->ifb.base;
+
+       ifbdev->helper.fb = fb;
+       ifbdev->helper.fbdev = info;
 
        strcpy(info->fix.id, "inteldrmfb");
 
        info->flags = FBINFO_DEFAULT;
-
        info->fbops = &intelfb_ops;
 
-
        /* setup aperture base/size for vesafb takeover */
-       info->aperture_base = dev->mode_config.fb_base;
+       info->apertures = alloc_apertures(1);
+       if (!info->apertures) {
+               ret = -ENOMEM;
+               goto out_unpin;
+       }
+       info->apertures->ranges[0].base = dev->mode_config.fb_base;
        if (IS_I9XX(dev))
-               info->aperture_size = pci_resource_len(dev->pdev, 2);
+               info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2);
        else
-               info->aperture_size = pci_resource_len(dev->pdev, 0);
+               info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
 
        info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
        info->fix.smem_len = size;
@@ -208,12 +150,18 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
                ret = -ENOSPC;
                goto out_unpin;
        }
+
+       ret = fb_alloc_cmap(&info->cmap, 256, 0);
+       if (ret) {
+               ret = -ENOMEM;
+               goto out_unpin;
+       }
        info->screen_size = size;
 
 //     memset(info->screen_base, 0, size);
 
        drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
-       drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
+       drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
 
        /* FIXME: we really shouldn't expose mmio space at all */
        info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar);
@@ -225,14 +173,10 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
        info->pixmap.flags = FB_PIXMAP_SYSTEM;
        info->pixmap.scan_align = 1;
 
-       fb->fbdev = info;
-
-       par->intel_fb = intel_fb;
-
-       /* To allow resizeing without swapping buffers */
        DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
-                       intel_fb->base.width, intel_fb->base.height,
-                       obj_priv->gtt_offset, fbo);
+                     fb->width, fb->height,
+                     obj_priv->gtt_offset, fbo);
+
 
        mutex_unlock(&dev->struct_mutex);
        vga_switcheroo_client_fb_set(dev->pdev, info);
@@ -247,35 +191,86 @@ out:
        return ret;
 }
 
-int intelfb_probe(struct drm_device *dev)
+static int intel_fb_find_or_create_single(struct drm_fb_helper *helper,
+                                         struct drm_fb_helper_surface_size *sizes)
 {
+       struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
+       int new_fb = 0;
        int ret;
 
-       DRM_DEBUG_KMS("\n");
-       ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create);
-       return ret;
+       if (!helper->fb) {
+               ret = intelfb_create(ifbdev, sizes);
+               if (ret)
+                       return ret;
+               new_fb = 1;
+       }
+       return new_fb;
 }
-EXPORT_SYMBOL(intelfb_probe);
 
-int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+       .gamma_set = intel_crtc_fb_gamma_set,
+       .gamma_get = intel_crtc_fb_gamma_get,
+       .fb_probe = intel_fb_find_or_create_single,
+};
+
+int intel_fbdev_destroy(struct drm_device *dev,
+                       struct intel_fbdev *ifbdev)
 {
        struct fb_info *info;
+       struct intel_framebuffer *ifb = &ifbdev->ifb;
 
-       if (!fb)
-               return -EINVAL;
-
-       info = fb->fbdev;
-
-       if (info) {
-               struct intelfb_par *par = info->par;
+       if (ifbdev->helper.fbdev) {
+               info = ifbdev->helper.fbdev;
                unregister_framebuffer(info);
                iounmap(info->screen_base);
-               if (info->par)
-                       drm_fb_helper_free(&par->helper);
+               if (info->cmap.len)
+                       fb_dealloc_cmap(&info->cmap);
                framebuffer_release(info);
        }
 
+       drm_fb_helper_fini(&ifbdev->helper);
+
+       drm_framebuffer_cleanup(&ifb->base);
+       if (ifb->obj)
+               drm_gem_object_unreference_unlocked(ifb->obj);
+
+       return 0;
+}
+
+int intel_fbdev_init(struct drm_device *dev)
+{
+       struct intel_fbdev *ifbdev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
+       if (!ifbdev)
+               return -ENOMEM;
+
+       dev_priv->fbdev = ifbdev;
+       ifbdev->helper.funcs = &intel_fb_helper_funcs;
+
+       drm_fb_helper_init(dev, &ifbdev->helper, 2,
+                          INTELFB_CONN_LIMIT);
+
+       drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
+       drm_fb_helper_initial_config(&ifbdev->helper, 32);
        return 0;
 }
-EXPORT_SYMBOL(intelfb_remove);
+
+void intel_fbdev_fini(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       if (!dev_priv->fbdev)
+               return;
+
+       intel_fbdev_destroy(dev, dev_priv->fbdev);
+       kfree(dev_priv->fbdev);
+       dev_priv->fbdev = NULL;
+}
 MODULE_LICENSE("GPL and additional rights");
+
+void intel_fb_output_poll_changed(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
+}
index 48cade0..65727f0 100644 (file)
@@ -39,7 +39,6 @@
 
 struct intel_hdmi_priv {
        u32 sdvox_reg;
-       u32 save_SDVOX;
        bool has_hdmi_sink;
 };
 
@@ -63,8 +62,12 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
        if (hdmi_priv->has_hdmi_sink)
                sdvox |= SDVO_AUDIO_ENABLE;
 
-       if (intel_crtc->pipe == 1)
-               sdvox |= SDVO_PIPE_B_SELECT;
+       if (intel_crtc->pipe == 1) {
+               if (HAS_PCH_CPT(dev))
+                       sdvox |= PORT_TRANS_B_SEL_CPT;
+               else
+                       sdvox |= SDVO_PIPE_B_SELECT;
+       }
 
        I915_WRITE(hdmi_priv->sdvox_reg, sdvox);
        POSTING_READ(hdmi_priv->sdvox_reg);
@@ -106,27 +109,6 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
        }
 }
 
-static void intel_hdmi_save(struct drm_connector *connector)
-{
-       struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-       struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
-
-       hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg);
-}
-
-static void intel_hdmi_restore(struct drm_connector *connector)
-{
-       struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-       struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
-
-       I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX);
-       POSTING_READ(hdmi_priv->sdvox_reg);
-}
-
 static int intel_hdmi_mode_valid(struct drm_connector *connector,
                                 struct drm_display_mode *mode)
 {
@@ -151,13 +133,14 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
 static enum drm_connector_status
 intel_hdmi_detect(struct drm_connector *connector)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
        struct edid *edid = NULL;
        enum drm_connector_status status = connector_status_disconnected;
 
        hdmi_priv->has_hdmi_sink = false;
-       edid = drm_get_edid(&intel_encoder->base,
+       edid = drm_get_edid(connector,
                            intel_encoder->ddc_bus);
 
        if (edid) {
@@ -165,7 +148,7 @@ intel_hdmi_detect(struct drm_connector *connector)
                        status = connector_status_connected;
                        hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
                }
-               intel_encoder->base.display_info.raw_edid = NULL;
+               connector->display_info.raw_edid = NULL;
                kfree(edid);
        }
 
@@ -174,24 +157,21 @@ intel_hdmi_detect(struct drm_connector *connector)
 
 static int intel_hdmi_get_modes(struct drm_connector *connector)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
 
        /* We should parse the EDID data and find out if it's an HDMI sink so
         * we can send audio to it.
         */
 
-       return intel_ddc_get_modes(intel_encoder);
+       return intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
 }
 
 static void intel_hdmi_destroy(struct drm_connector *connector)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-
-       if (intel_encoder->i2c_bus)
-               intel_i2c_destroy(intel_encoder->i2c_bus);
        drm_sysfs_connector_remove(connector);
        drm_connector_cleanup(connector);
-       kfree(intel_encoder);
+       kfree(connector);
 }
 
 static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
@@ -204,8 +184,6 @@ static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
 
 static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
        .dpms = drm_helper_connector_dpms,
-       .save = intel_hdmi_save,
-       .restore = intel_hdmi_restore,
        .detect = intel_hdmi_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .destroy = intel_hdmi_destroy,
@@ -214,12 +192,17 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
 static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
        .get_modes = intel_hdmi_get_modes,
        .mode_valid = intel_hdmi_mode_valid,
-       .best_encoder = intel_best_encoder,
+       .best_encoder = intel_attached_encoder,
 };
 
 static void intel_hdmi_enc_destroy(struct drm_encoder *encoder)
 {
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+       if (intel_encoder->i2c_bus)
+               intel_i2c_destroy(intel_encoder->i2c_bus);
        drm_encoder_cleanup(encoder);
+       kfree(intel_encoder);
 }
 
 static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
@@ -231,21 +214,30 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_connector *connector;
        struct intel_encoder *intel_encoder;
+       struct intel_connector *intel_connector;
        struct intel_hdmi_priv *hdmi_priv;
 
        intel_encoder = kcalloc(sizeof(struct intel_encoder) +
                               sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
        if (!intel_encoder)
                return;
+
+       intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+       if (!intel_connector) {
+               kfree(intel_encoder);
+               return;
+       }
+
        hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1);
 
-       connector = &intel_encoder->base;
+       connector = &intel_connector->base;
        drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
                           DRM_MODE_CONNECTOR_HDMIA);
        drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
 
        intel_encoder->type = INTEL_OUTPUT_HDMI;
 
+       connector->polled = DRM_CONNECTOR_POLL_HPD;
        connector->interlace_allowed = 0;
        connector->doublescan_allowed = 0;
        intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
@@ -285,7 +277,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
                         DRM_MODE_ENCODER_TMDS);
        drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs);
 
-       drm_mode_connector_attach_encoder(&intel_encoder->base,
+       drm_mode_connector_attach_encoder(&intel_connector->base,
                                          &intel_encoder->enc);
        drm_sysfs_connector_add(connector);
 
@@ -303,6 +295,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
 err_connector:
        drm_connector_cleanup(connector);
        kfree(intel_encoder);
+       kfree(intel_connector);
 
        return;
 }
index b66806a..6a1accd 100644 (file)
@@ -139,75 +139,6 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
        /* XXX: We never power down the LVDS pairs. */
 }
 
-static void intel_lvds_save(struct drm_connector *connector)
-{
-       struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
-       u32 pwm_ctl_reg;
-
-       if (HAS_PCH_SPLIT(dev)) {
-               pp_on_reg = PCH_PP_ON_DELAYS;
-               pp_off_reg = PCH_PP_OFF_DELAYS;
-               pp_ctl_reg = PCH_PP_CONTROL;
-               pp_div_reg = PCH_PP_DIVISOR;
-               pwm_ctl_reg = BLC_PWM_CPU_CTL;
-       } else {
-               pp_on_reg = PP_ON_DELAYS;
-               pp_off_reg = PP_OFF_DELAYS;
-               pp_ctl_reg = PP_CONTROL;
-               pp_div_reg = PP_DIVISOR;
-               pwm_ctl_reg = BLC_PWM_CTL;
-       }
-
-       dev_priv->savePP_ON = I915_READ(pp_on_reg);
-       dev_priv->savePP_OFF = I915_READ(pp_off_reg);
-       dev_priv->savePP_CONTROL = I915_READ(pp_ctl_reg);
-       dev_priv->savePP_DIVISOR = I915_READ(pp_div_reg);
-       dev_priv->saveBLC_PWM_CTL = I915_READ(pwm_ctl_reg);
-       dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
-                                      BACKLIGHT_DUTY_CYCLE_MASK);
-
-       /*
-        * If the light is off at server startup, just make it full brightness
-        */
-       if (dev_priv->backlight_duty_cycle == 0)
-               dev_priv->backlight_duty_cycle =
-                       intel_lvds_get_max_backlight(dev);
-}
-
-static void intel_lvds_restore(struct drm_connector *connector)
-{
-       struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
-       u32 pwm_ctl_reg;
-
-       if (HAS_PCH_SPLIT(dev)) {
-               pp_on_reg = PCH_PP_ON_DELAYS;
-               pp_off_reg = PCH_PP_OFF_DELAYS;
-               pp_ctl_reg = PCH_PP_CONTROL;
-               pp_div_reg = PCH_PP_DIVISOR;
-               pwm_ctl_reg = BLC_PWM_CPU_CTL;
-       } else {
-               pp_on_reg = PP_ON_DELAYS;
-               pp_off_reg = PP_OFF_DELAYS;
-               pp_ctl_reg = PP_CONTROL;
-               pp_div_reg = PP_DIVISOR;
-               pwm_ctl_reg = BLC_PWM_CTL;
-       }
-
-       I915_WRITE(pwm_ctl_reg, dev_priv->saveBLC_PWM_CTL);
-       I915_WRITE(pp_on_reg, dev_priv->savePP_ON);
-       I915_WRITE(pp_off_reg, dev_priv->savePP_OFF);
-       I915_WRITE(pp_div_reg, dev_priv->savePP_DIVISOR);
-       I915_WRITE(pp_ctl_reg, dev_priv->savePP_CONTROL);
-       if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
-               intel_lvds_set_power(dev, true);
-       else
-               intel_lvds_set_power(dev, false);
-}
-
 static int intel_lvds_mode_valid(struct drm_connector *connector,
                                 struct drm_display_mode *mode)
 {
@@ -635,12 +566,13 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
 static int intel_lvds_get_modes(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret = 0;
 
        if (dev_priv->lvds_edid_good) {
-               ret = intel_ddc_get_modes(intel_encoder);
+               ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
 
                if (ret)
                        return ret;
@@ -717,11 +649,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
 static void intel_lvds_destroy(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (intel_encoder->ddc_bus)
-               intel_i2c_destroy(intel_encoder->ddc_bus);
        if (dev_priv->lid_notifier.notifier_call)
                acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
        drm_sysfs_connector_remove(connector);
@@ -734,13 +663,14 @@ static int intel_lvds_set_property(struct drm_connector *connector,
                                   uint64_t value)
 {
        struct drm_device *dev = connector->dev;
-       struct intel_encoder *intel_encoder =
-                       to_intel_encoder(connector);
 
        if (property == dev->mode_config.scaling_mode_property &&
                                connector->encoder) {
                struct drm_crtc *crtc = connector->encoder->crtc;
+               struct drm_encoder *encoder = connector->encoder;
+               struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
                struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
+
                if (value == DRM_MODE_SCALE_NONE) {
                        DRM_DEBUG_KMS("no scaling not supported\n");
                        return 0;
@@ -774,13 +704,11 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
 static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
        .get_modes = intel_lvds_get_modes,
        .mode_valid = intel_lvds_mode_valid,
-       .best_encoder = intel_best_encoder,
+       .best_encoder = intel_attached_encoder,
 };
 
 static const struct drm_connector_funcs intel_lvds_connector_funcs = {
        .dpms = drm_helper_connector_dpms,
-       .save = intel_lvds_save,
-       .restore = intel_lvds_restore,
        .detect = intel_lvds_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = intel_lvds_set_property,
@@ -790,7 +718,12 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
 
 static void intel_lvds_enc_destroy(struct drm_encoder *encoder)
 {
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+       if (intel_encoder->ddc_bus)
+               intel_i2c_destroy(intel_encoder->ddc_bus);
        drm_encoder_cleanup(encoder);
+       kfree(intel_encoder);
 }
 
 static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
@@ -979,6 +912,7 @@ void intel_lvds_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_encoder *intel_encoder;
+       struct intel_connector *intel_connector;
        struct drm_connector *connector;
        struct drm_encoder *encoder;
        struct drm_display_mode *scan; /* *modes, *bios_mode; */
@@ -1012,19 +946,27 @@ void intel_lvds_init(struct drm_device *dev)
                return;
        }
 
-       connector = &intel_encoder->base;
+       intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+       if (!intel_connector) {
+               kfree(intel_encoder);
+               return;
+       }
+
+       connector = &intel_connector->base;
        encoder = &intel_encoder->enc;
-       drm_connector_init(dev, &intel_encoder->base, &intel_lvds_connector_funcs,
+       drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
                           DRM_MODE_CONNECTOR_LVDS);
 
        drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs,
                         DRM_MODE_ENCODER_LVDS);
 
-       drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
+       drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
        intel_encoder->type = INTEL_OUTPUT_LVDS;
 
        intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
        intel_encoder->crtc_mask = (1 << 1);
+       if (IS_I965G(dev))
+               intel_encoder->crtc_mask |= (1 << 0);
        drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
        drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
        connector->display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -1039,7 +981,7 @@ void intel_lvds_init(struct drm_device *dev)
         * the initial panel fitting mode will be FULL_SCREEN.
         */
 
-       drm_connector_attach_property(&intel_encoder->base,
+       drm_connector_attach_property(&intel_connector->base,
                                      dev->mode_config.scaling_mode_property,
                                      DRM_MODE_SCALE_FULLSCREEN);
        lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN;
@@ -1067,7 +1009,7 @@ void intel_lvds_init(struct drm_device *dev)
         */
        dev_priv->lvds_edid_good = true;
 
-       if (!intel_ddc_get_modes(intel_encoder))
+       if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus))
                dev_priv->lvds_edid_good = false;
 
        list_for_each_entry(scan, &connector->probed_modes, head) {
@@ -1151,4 +1093,5 @@ failed:
        drm_connector_cleanup(connector);
        drm_encoder_cleanup(encoder);
        kfree(intel_encoder);
+       kfree(intel_connector);
 }
index 8e5c83b..4b1fd3d 100644 (file)
@@ -54,9 +54,9 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
                }
        };
 
-       intel_i2c_quirk_set(intel_encoder->base.dev, true);
+       intel_i2c_quirk_set(intel_encoder->enc.dev, true);
        ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2);
-       intel_i2c_quirk_set(intel_encoder->base.dev, false);
+       intel_i2c_quirk_set(intel_encoder->enc.dev, false);
        if (ret == 2)
                return true;
 
@@ -66,22 +66,23 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
 /**
  * intel_ddc_get_modes - get modelist from monitor
  * @connector: DRM connector device to use
+ * @adapter: i2c adapter
  *
  * Fetch the EDID information from @connector using the DDC bus.
  */
-int intel_ddc_get_modes(struct intel_encoder *intel_encoder)
+int intel_ddc_get_modes(struct drm_connector *connector,
+                       struct i2c_adapter *adapter)
 {
        struct edid *edid;
        int ret = 0;
 
-       intel_i2c_quirk_set(intel_encoder->base.dev, true);
-       edid = drm_get_edid(&intel_encoder->base, intel_encoder->ddc_bus);
-       intel_i2c_quirk_set(intel_encoder->base.dev, false);
+       intel_i2c_quirk_set(connector->dev, true);
+       edid = drm_get_edid(connector, adapter);
+       intel_i2c_quirk_set(connector->dev, false);
        if (edid) {
-               drm_mode_connector_update_edid_property(&intel_encoder->base,
-                                                       edid);
-               ret = drm_add_edid_modes(&intel_encoder->base, edid);
-               intel_encoder->base.display_info.raw_edid = NULL;
+               drm_mode_connector_update_edid_property(connector, edid);
+               ret = drm_add_edid_modes(connector, edid);
+               connector->display_info.raw_edid = NULL;
                kfree(edid);
        }
 
index 6d524a1..b0e17b0 100644 (file)
@@ -373,7 +373,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
 
        /* never have the overlay hw on without showing a frame */
        BUG_ON(!overlay->vid_bo);
-       obj = overlay->vid_bo->obj;
+       obj = &overlay->vid_bo->base;
 
        i915_gem_object_unpin(obj);
        drm_gem_object_unreference(obj);
@@ -411,7 +411,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
 
        switch (overlay->hw_wedged) {
                case RELEASE_OLD_VID:
-                       obj = overlay->old_vid_bo->obj;
+                       obj = &overlay->old_vid_bo->base;
                        i915_gem_object_unpin(obj);
                        drm_gem_object_unreference(obj);
                        overlay->old_vid_bo = NULL;
@@ -467,7 +467,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
        if (ret != 0)
                return ret;
 
-       obj = overlay->old_vid_bo->obj;
+       obj = &overlay->old_vid_bo->base;
        i915_gem_object_unpin(obj);
        drm_gem_object_unreference(obj);
        overlay->old_vid_bo = NULL;
@@ -1341,7 +1341,7 @@ void intel_setup_overlay(struct drm_device *dev)
                return;
        overlay->dev = dev;
 
-       reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE);
+       reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
        if (!reg_bo)
                goto out_free;
        overlay->reg_bo = to_intel_bo(reg_bo);
index 87d9536..aba72c4 100644 (file)
 #include "i915_drm.h"
 #include "i915_drv.h"
 #include "intel_sdvo_regs.h"
-#include <linux/dmi.h>
+
+#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
+#define SDVO_RGB_MASK  (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
+#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
+#define SDVO_TV_MASK   (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
+
+#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
+                         SDVO_TV_MASK)
+
+#define IS_TV(c)       (c->output_flag & SDVO_TV_MASK)
+#define IS_LVDS(c)     (c->output_flag & SDVO_LVDS_MASK)
+
 
 static char *tv_format_names[] = {
        "NTSC_M"   , "NTSC_J"  , "NTSC_443",
@@ -86,12 +97,6 @@ struct intel_sdvo_priv {
        /* This is for current tv format name */
        char *tv_format_name;
 
-       /* This contains all current supported TV format */
-       char *tv_format_supported[TV_FORMAT_NUM];
-       int   format_supported_num;
-       struct drm_property *tv_format_property;
-       struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
-
        /**
         * This is set if we treat the device as HDMI, instead of DVI.
         */
@@ -112,12 +117,6 @@ struct intel_sdvo_priv {
         */
        struct drm_display_mode *sdvo_lvds_fixed_mode;
 
-       /**
-        * Returned SDTV resolutions allowed for the current format, if the
-        * device reported it.
-        */
-       struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
-
        /*
         * supported encoding mode, used to determine whether HDMI is
         * supported
@@ -130,11 +129,24 @@ struct intel_sdvo_priv {
        /* Mac mini hack -- use the same DDC as the analog connector */
        struct i2c_adapter *analog_ddc_bus;
 
-       int save_sdvo_mult;
-       u16 save_active_outputs;
-       struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
-       struct intel_sdvo_dtd save_output_dtd[16];
-       u32 save_SDVOX;
+};
+
+struct intel_sdvo_connector {
+       /* Mark the type of connector */
+       uint16_t output_flag;
+
+       /* This contains all current supported TV format */
+       char *tv_format_supported[TV_FORMAT_NUM];
+       int   format_supported_num;
+       struct drm_property *tv_format_property;
+       struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
+
+       /**
+        * Returned SDTV resolutions allowed for the current format, if the
+        * device reported it.
+        */
+       struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
+
        /* add the property for the SDVO-TV */
        struct drm_property *left_property;
        struct drm_property *right_property;
@@ -162,7 +174,12 @@ struct intel_sdvo_priv {
 };
 
 static bool
-intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags);
+intel_sdvo_output_setup(struct intel_encoder *intel_encoder,
+                       uint16_t flags);
+static void
+intel_sdvo_tv_create_property(struct drm_connector *connector, int type);
+static void
+intel_sdvo_create_enhance_property(struct drm_connector *connector);
 
 /**
  * Writes the SDVOB or SDVOC with the given value, but always writes both
@@ -171,12 +188,18 @@ intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags);
  */
 static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val)
 {
-       struct drm_device *dev = intel_encoder->base.dev;
+       struct drm_device *dev = intel_encoder->enc.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_sdvo_priv   *sdvo_priv = intel_encoder->dev_priv;
        u32 bval = val, cval = val;
        int i;
 
+       if (sdvo_priv->sdvo_reg == PCH_SDVOB) {
+               I915_WRITE(sdvo_priv->sdvo_reg, val);
+               I915_READ(sdvo_priv->sdvo_reg);
+               return;
+       }
+
        if (sdvo_priv->sdvo_reg == SDVOB) {
                cval = I915_READ(SDVOC);
        } else {
@@ -353,7 +376,8 @@ static const struct _sdvo_cmd_name {
     SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
 };
 
-#define SDVO_NAME(dev_priv) ((dev_priv)->sdvo_reg == SDVOB ? "SDVOB" : "SDVOC")
+#define IS_SDVOB(reg)  (reg == SDVOB || reg == PCH_SDVOB)
+#define SDVO_NAME(dev_priv) (IS_SDVOB((dev_priv)->sdvo_reg) ? "SDVOB" : "SDVOC")
 #define SDVO_PRIV(encoder)   ((struct intel_sdvo_priv *) (encoder)->dev_priv)
 
 static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd,
@@ -563,17 +587,6 @@ static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, b
        return true;
 }
 
-static bool intel_sdvo_get_active_outputs(struct intel_encoder *intel_encoder,
-                                         u16 *outputs)
-{
-       u8 status;
-
-       intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0);
-       status = intel_sdvo_read_response(intel_encoder, outputs, sizeof(*outputs));
-
-       return (status == SDVO_CMD_STATUS_SUCCESS);
-}
-
 static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder,
                                          u16 outputs)
 {
@@ -646,40 +659,6 @@ static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder,
        return (status == SDVO_CMD_STATUS_SUCCESS);
 }
 
-static bool intel_sdvo_get_timing(struct intel_encoder *intel_encoder, u8 cmd,
-                                 struct intel_sdvo_dtd *dtd)
-{
-       u8 status;
-
-       intel_sdvo_write_cmd(intel_encoder, cmd, NULL, 0);
-       status = intel_sdvo_read_response(intel_encoder, &dtd->part1,
-                                         sizeof(dtd->part1));
-       if (status != SDVO_CMD_STATUS_SUCCESS)
-               return false;
-
-       intel_sdvo_write_cmd(intel_encoder, cmd + 1, NULL, 0);
-       status = intel_sdvo_read_response(intel_encoder, &dtd->part2,
-                                         sizeof(dtd->part2));
-       if (status != SDVO_CMD_STATUS_SUCCESS)
-               return false;
-
-       return true;
-}
-
-static bool intel_sdvo_get_input_timing(struct intel_encoder *intel_encoder,
-                                        struct intel_sdvo_dtd *dtd)
-{
-       return intel_sdvo_get_timing(intel_encoder,
-                                    SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
-}
-
-static bool intel_sdvo_get_output_timing(struct intel_encoder *intel_encoder,
-                                        struct intel_sdvo_dtd *dtd)
-{
-       return intel_sdvo_get_timing(intel_encoder,
-                                    SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd);
-}
-
 static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd,
                                  struct intel_sdvo_dtd *dtd)
 {
@@ -767,23 +746,6 @@ static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_en
        return false;
 }
 
-static int intel_sdvo_get_clock_rate_mult(struct intel_encoder *intel_encoder)
-{
-       u8 response, status;
-
-       intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0);
-       status = intel_sdvo_read_response(intel_encoder, &response, 1);
-
-       if (status != SDVO_CMD_STATUS_SUCCESS) {
-               DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n");
-               return SDVO_CLOCK_RATE_MULT_1X;
-       } else {
-               DRM_DEBUG_KMS("Current clock rate multiplier: %d\n", response);
-       }
-
-       return response;
-}
-
 static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val)
 {
        u8 status;
@@ -1071,7 +1033,7 @@ static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder)
        memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ?
                        sizeof(format) : sizeof(format_map));
 
-       intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format_map,
+       intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format,
                             sizeof(format));
 
        status = intel_sdvo_read_response(intel_encoder, NULL, 0);
@@ -1101,7 +1063,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
                /* Set output timings */
                intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
                intel_sdvo_set_target_output(intel_encoder,
-                                            dev_priv->controlled_output);
+                                            dev_priv->attached_output);
                intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
 
                /* Set the input timing to the screen. Assume always input 0. */
@@ -1139,7 +1101,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
                                dev_priv->sdvo_lvds_fixed_mode);
 
                intel_sdvo_set_target_output(intel_encoder,
-                                            dev_priv->controlled_output);
+                                            dev_priv->attached_output);
                intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
 
                /* Set the input timing to the screen. Assume always input 0. */
@@ -1204,7 +1166,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
         * channel on the motherboard.  In a two-input device, the first input
         * will be SDVOB and the second SDVOC.
         */
-       in_out.in0 = sdvo_priv->controlled_output;
+       in_out.in0 = sdvo_priv->attached_output;
        in_out.in1 = 0;
 
        intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP,
@@ -1230,7 +1192,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
        if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) {
                /* Set the output timing to the screen */
                intel_sdvo_set_target_output(intel_encoder,
-                                            sdvo_priv->controlled_output);
+                                            sdvo_priv->attached_output);
                intel_sdvo_set_output_timing(intel_encoder, &input_dtd);
        }
 
@@ -1352,107 +1314,16 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
 
                if (0)
                        intel_sdvo_set_encoder_power_state(intel_encoder, mode);
-               intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->controlled_output);
+               intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->attached_output);
        }
        return;
 }
 
-static void intel_sdvo_save(struct drm_connector *connector)
-{
-       struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-       struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
-       int o;
-
-       sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_encoder);
-       intel_sdvo_get_active_outputs(intel_encoder, &sdvo_priv->save_active_outputs);
-
-       if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
-               intel_sdvo_set_target_input(intel_encoder, true, false);
-               intel_sdvo_get_input_timing(intel_encoder,
-                                           &sdvo_priv->save_input_dtd_1);
-       }
-
-       if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
-               intel_sdvo_set_target_input(intel_encoder, false, true);
-               intel_sdvo_get_input_timing(intel_encoder,
-                                           &sdvo_priv->save_input_dtd_2);
-       }
-
-       for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
-       {
-               u16  this_output = (1 << o);
-               if (sdvo_priv->caps.output_flags & this_output)
-               {
-                       intel_sdvo_set_target_output(intel_encoder, this_output);
-                       intel_sdvo_get_output_timing(intel_encoder,
-                                                    &sdvo_priv->save_output_dtd[o]);
-               }
-       }
-       if (sdvo_priv->is_tv) {
-               /* XXX: Save TV format/enhancements. */
-       }
-
-       sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->sdvo_reg);
-}
-
-static void intel_sdvo_restore(struct drm_connector *connector)
-{
-       struct drm_device *dev = connector->dev;
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-       struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
-       int o;
-       int i;
-       bool input1, input2;
-       u8 status;
-
-       intel_sdvo_set_active_outputs(intel_encoder, 0);
-
-       for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
-       {
-               u16  this_output = (1 << o);
-               if (sdvo_priv->caps.output_flags & this_output) {
-                       intel_sdvo_set_target_output(intel_encoder, this_output);
-                       intel_sdvo_set_output_timing(intel_encoder, &sdvo_priv->save_output_dtd[o]);
-               }
-       }
-
-       if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
-               intel_sdvo_set_target_input(intel_encoder, true, false);
-               intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_1);
-       }
-
-       if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
-               intel_sdvo_set_target_input(intel_encoder, false, true);
-               intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_2);
-       }
-
-       intel_sdvo_set_clock_rate_mult(intel_encoder, sdvo_priv->save_sdvo_mult);
-
-       if (sdvo_priv->is_tv) {
-               /* XXX: Restore TV format/enhancements. */
-       }
-
-       intel_sdvo_write_sdvox(intel_encoder, sdvo_priv->save_SDVOX);
-
-       if (sdvo_priv->save_SDVOX & SDVO_ENABLE)
-       {
-               for (i = 0; i < 2; i++)
-                       intel_wait_for_vblank(dev);
-               status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, &input2);
-               if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
-                       DRM_DEBUG_KMS("First %s output reported failure to "
-                                       "sync\n", SDVO_NAME(sdvo_priv));
-       }
-
-       intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->save_active_outputs);
-}
-
 static int intel_sdvo_mode_valid(struct drm_connector *connector,
                                 struct drm_display_mode *mode)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
 
        if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -1490,6 +1361,8 @@ static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, str
        return true;
 }
 
+/* No use! */
+#if 0
 struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
 {
        struct drm_connector *connector = NULL;
@@ -1560,6 +1433,7 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
        intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
        intel_sdvo_read_response(intel_encoder, &response, 2);
 }
+#endif
 
 static bool
 intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder)
@@ -1598,12 +1472,17 @@ static struct drm_connector *
 intel_find_analog_connector(struct drm_device *dev)
 {
        struct drm_connector *connector;
+       struct drm_encoder *encoder;
        struct intel_encoder *intel_encoder;
 
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               intel_encoder = to_intel_encoder(connector);
-               if (intel_encoder->type == INTEL_OUTPUT_ANALOG)
-                       return connector;
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               intel_encoder = enc_to_intel_encoder(encoder);
+               if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
+                       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+                               if (connector && encoder == intel_attached_encoder(connector))
+                                       return connector;
+                       }
+               }
        }
        return NULL;
 }
@@ -1625,15 +1504,17 @@ intel_analog_is_connected(struct drm_device *dev)
 }
 
 enum drm_connector_status
-intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
+intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
        enum drm_connector_status status = connector_status_connected;
        struct edid *edid = NULL;
 
-       edid = drm_get_edid(&intel_encoder->base,
-                           intel_encoder->ddc_bus);
+       edid = drm_get_edid(connector, intel_encoder->ddc_bus);
 
        /* This is only applied to SDVO cards with multiple outputs */
        if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) {
@@ -1646,8 +1527,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
                 */
                while(temp_ddc > 1) {
                        sdvo_priv->ddc_bus = temp_ddc;
-                       edid = drm_get_edid(&intel_encoder->base,
-                               intel_encoder->ddc_bus);
+                       edid = drm_get_edid(connector, intel_encoder->ddc_bus);
                        if (edid) {
                                /*
                                 * When we can get the EDID, maybe it is the
@@ -1664,28 +1544,25 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
        /* when there is no edid and no monitor is connected with VGA
         * port, try to use the CRT ddc to read the EDID for DVI-connector
         */
-       if (edid == NULL &&
-           sdvo_priv->analog_ddc_bus &&
-           !intel_analog_is_connected(intel_encoder->base.dev))
-               edid = drm_get_edid(&intel_encoder->base,
-                                   sdvo_priv->analog_ddc_bus);
+       if (edid == NULL && sdvo_priv->analog_ddc_bus &&
+           !intel_analog_is_connected(connector->dev))
+               edid = drm_get_edid(connector, sdvo_priv->analog_ddc_bus);
+
        if (edid != NULL) {
-               /* Don't report the output as connected if it's a DVI-I
-                * connector with a non-digital EDID coming out.
-                */
-               if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) {
-                       if (edid->input & DRM_EDID_INPUT_DIGITAL)
-                               sdvo_priv->is_hdmi =
-                                       drm_detect_hdmi_monitor(edid);
-                       else
-                               status = connector_status_disconnected;
-               }
+               bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+               bool need_digital = !!(sdvo_connector->output_flag & SDVO_TMDS_MASK);
 
-               kfree(edid);
-               intel_encoder->base.display_info.raw_edid = NULL;
+               /* DDC bus is shared, match EDID to connector type */
+               if (is_digital && need_digital)
+                       sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid);
+               else if (is_digital != need_digital)
+                       status = connector_status_disconnected;
 
-       } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
+               connector->display_info.raw_edid = NULL;
+       } else
                status = connector_status_disconnected;
+       
+       kfree(edid);
 
        return status;
 }
@@ -1694,8 +1571,12 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
 {
        uint16_t response;
        u8 status;
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+       struct intel_connector *intel_connector = to_intel_connector(connector);
        struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+       struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
+       enum drm_connector_status ret;
 
        intel_sdvo_write_cmd(intel_encoder,
                             SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
@@ -1713,24 +1594,41 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
        if (response == 0)
                return connector_status_disconnected;
 
-       if (intel_sdvo_multifunc_encoder(intel_encoder) &&
-               sdvo_priv->attached_output != response) {
-               if (sdvo_priv->controlled_output != response &&
-                       intel_sdvo_output_setup(intel_encoder, response) != true)
-                       return connector_status_unknown;
-               sdvo_priv->attached_output = response;
+       sdvo_priv->attached_output = response;
+
+       if ((sdvo_connector->output_flag & response) == 0)
+               ret = connector_status_disconnected;
+       else if (response & SDVO_TMDS_MASK)
+               ret = intel_sdvo_hdmi_sink_detect(connector);
+       else
+               ret = connector_status_connected;
+
+       /* May update encoder flag for like clock for SDVO TV, etc.*/
+       if (ret == connector_status_connected) {
+               sdvo_priv->is_tv = false;
+               sdvo_priv->is_lvds = false;
+               intel_encoder->needs_tv_clock = false;
+
+               if (response & SDVO_TV_MASK) {
+                       sdvo_priv->is_tv = true;
+                       intel_encoder->needs_tv_clock = true;
+               }
+               if (response & SDVO_LVDS_MASK)
+                       sdvo_priv->is_lvds = true;
        }
-       return intel_sdvo_hdmi_sink_detect(connector, response);
+
+       return ret;
 }
 
 static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
        int num_modes;
 
        /* set the bus switch and get the modes */
-       num_modes = intel_ddc_get_modes(intel_encoder);
+       num_modes = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
 
        /*
         * Mac mini hack.  On this device, the DVI-I connector shares one DDC
@@ -1740,17 +1638,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
         */
        if (num_modes == 0 &&
            sdvo_priv->analog_ddc_bus &&
-           !intel_analog_is_connected(intel_encoder->base.dev)) {
-               struct i2c_adapter *digital_ddc_bus;
-
+           !intel_analog_is_connected(connector->dev)) {
                /* Switch to the analog ddc bus and try that
                 */
-               digital_ddc_bus = intel_encoder->ddc_bus;
-               intel_encoder->ddc_bus = sdvo_priv->analog_ddc_bus;
-
-               (void) intel_ddc_get_modes(intel_encoder);
-
-               intel_encoder->ddc_bus = digital_ddc_bus;
+               (void) intel_ddc_get_modes(connector, sdvo_priv->analog_ddc_bus);
        }
 }
 
@@ -1821,8 +1712,9 @@ struct drm_display_mode sdvo_tv_modes[] = {
 
 static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
 {
-       struct intel_encoder *output = to_intel_encoder(connector);
-       struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+       struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
        struct intel_sdvo_sdtv_resolution_request tv_res;
        uint32_t reply = 0, format_map = 0;
        int i;
@@ -1842,11 +1734,11 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
               sizeof(format_map) ? sizeof(format_map) :
               sizeof(struct intel_sdvo_sdtv_resolution_request));
 
-       intel_sdvo_set_target_output(output, sdvo_priv->controlled_output);
+       intel_sdvo_set_target_output(intel_encoder, sdvo_priv->attached_output);
 
-       intel_sdvo_write_cmd(output, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+       intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
                             &tv_res, sizeof(tv_res));
-       status = intel_sdvo_read_response(output, &reply, 3);
+       status = intel_sdvo_read_response(intel_encoder, &reply, 3);
        if (status != SDVO_CMD_STATUS_SUCCESS)
                return;
 
@@ -1863,7 +1755,8 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
 
 static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        struct drm_i915_private *dev_priv = connector->dev->dev_private;
        struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
        struct drm_display_mode *newmode;
@@ -1873,7 +1766,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
         * Assume that the preferred modes are
         * arranged in priority order.
         */
-       intel_ddc_get_modes(intel_encoder);
+       intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
        if (list_empty(&connector->probed_modes) == false)
                goto end;
 
@@ -1902,12 +1795,12 @@ end:
 
 static int intel_sdvo_get_modes(struct drm_connector *connector)
 {
-       struct intel_encoder *output = to_intel_encoder(connector);
-       struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
 
-       if (sdvo_priv->is_tv)
+       if (IS_TV(sdvo_connector))
                intel_sdvo_get_tv_modes(connector);
-       else if (sdvo_priv->is_lvds == true)
+       else if (IS_LVDS(sdvo_connector))
                intel_sdvo_get_lvds_modes(connector);
        else
                intel_sdvo_get_ddc_modes(connector);
@@ -1920,11 +1813,11 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
 static
 void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-       struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
        struct drm_device *dev = connector->dev;
 
-       if (sdvo_priv->is_tv) {
+       if (IS_TV(sdvo_priv)) {
                if (sdvo_priv->left_property)
                        drm_property_destroy(dev, sdvo_priv->left_property);
                if (sdvo_priv->right_property)
@@ -1937,8 +1830,6 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
                        drm_property_destroy(dev, sdvo_priv->hpos_property);
                if (sdvo_priv->vpos_property)
                        drm_property_destroy(dev, sdvo_priv->vpos_property);
-       }
-       if (sdvo_priv->is_tv) {
                if (sdvo_priv->saturation_property)
                        drm_property_destroy(dev,
                                        sdvo_priv->saturation_property);
@@ -1948,7 +1839,7 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
                if (sdvo_priv->hue_property)
                        drm_property_destroy(dev, sdvo_priv->hue_property);
        }
-       if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+       if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
                if (sdvo_priv->brightness_property)
                        drm_property_destroy(dev,
                                        sdvo_priv->brightness_property);
@@ -1958,31 +1849,17 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
 
 static void intel_sdvo_destroy(struct drm_connector *connector)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-       struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
-
-       if (intel_encoder->i2c_bus)
-               intel_i2c_destroy(intel_encoder->i2c_bus);
-       if (intel_encoder->ddc_bus)
-               intel_i2c_destroy(intel_encoder->ddc_bus);
-       if (sdvo_priv->analog_ddc_bus)
-               intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
-
-       if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
-               drm_mode_destroy(connector->dev,
-                                sdvo_priv->sdvo_lvds_fixed_mode);
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
 
-       if (sdvo_priv->tv_format_property)
+       if (sdvo_connector->tv_format_property)
                drm_property_destroy(connector->dev,
-                                    sdvo_priv->tv_format_property);
-
-       if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
-               intel_sdvo_destroy_enhance_property(connector);
+                                    sdvo_connector->tv_format_property);
 
+       intel_sdvo_destroy_enhance_property(connector);
        drm_sysfs_connector_remove(connector);
        drm_connector_cleanup(connector);
-
-       kfree(intel_encoder);
+       kfree(connector);
 }
 
 static int
@@ -1990,9 +1867,11 @@ intel_sdvo_set_property(struct drm_connector *connector,
                        struct drm_property *property,
                        uint64_t val)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
-       struct drm_encoder *encoder = &intel_encoder->enc;
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
        struct drm_crtc *crtc = encoder->crtc;
        int ret = 0;
        bool changed = false;
@@ -2003,101 +1882,101 @@ intel_sdvo_set_property(struct drm_connector *connector,
        if (ret < 0)
                goto out;
 
-       if (property == sdvo_priv->tv_format_property) {
+       if (property == sdvo_connector->tv_format_property) {
                if (val >= TV_FORMAT_NUM) {
                        ret = -EINVAL;
                        goto out;
                }
                if (sdvo_priv->tv_format_name ==
-                   sdvo_priv->tv_format_supported[val])
+                   sdvo_connector->tv_format_supported[val])
                        goto out;
 
-               sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val];
+               sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[val];
                changed = true;
        }
 
-       if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+       if (IS_TV(sdvo_connector) || IS_LVDS(sdvo_connector)) {
                cmd = 0;
                temp_value = val;
-               if (sdvo_priv->left_property == property) {
+               if (sdvo_connector->left_property == property) {
                        drm_connector_property_set_value(connector,
-                               sdvo_priv->right_property, val);
-                       if (sdvo_priv->left_margin == temp_value)
+                               sdvo_connector->right_property, val);
+                       if (sdvo_connector->left_margin == temp_value)
                                goto out;
 
-                       sdvo_priv->left_margin = temp_value;
-                       sdvo_priv->right_margin = temp_value;
-                       temp_value = sdvo_priv->max_hscan -
-                                       sdvo_priv->left_margin;
+                       sdvo_connector->left_margin = temp_value;
+                       sdvo_connector->right_margin = temp_value;
+                       temp_value = sdvo_connector->max_hscan -
+                                       sdvo_connector->left_margin;
                        cmd = SDVO_CMD_SET_OVERSCAN_H;
-               } else if (sdvo_priv->right_property == property) {
+               } else if (sdvo_connector->right_property == property) {
                        drm_connector_property_set_value(connector,
-                               sdvo_priv->left_property, val);
-                       if (sdvo_priv->right_margin == temp_value)
+                               sdvo_connector->left_property, val);
+                       if (sdvo_connector->right_margin == temp_value)
                                goto out;
 
-                       sdvo_priv->left_margin = temp_value;
-                       sdvo_priv->right_margin = temp_value;
-                       temp_value = sdvo_priv->max_hscan -
-                               sdvo_priv->left_margin;
+                       sdvo_connector->left_margin = temp_value;
+                       sdvo_connector->right_margin = temp_value;
+                       temp_value = sdvo_connector->max_hscan -
+                               sdvo_connector->left_margin;
                        cmd = SDVO_CMD_SET_OVERSCAN_H;
-               } else if (sdvo_priv->top_property == property) {
+               } else if (sdvo_connector->top_property == property) {
                        drm_connector_property_set_value(connector,
-                               sdvo_priv->bottom_property, val);
-                       if (sdvo_priv->top_margin == temp_value)
+                               sdvo_connector->bottom_property, val);
+                       if (sdvo_connector->top_margin == temp_value)
                                goto out;
 
-                       sdvo_priv->top_margin = temp_value;
-                       sdvo_priv->bottom_margin = temp_value;
-                       temp_value = sdvo_priv->max_vscan -
-                                       sdvo_priv->top_margin;
+                       sdvo_connector->top_margin = temp_value;
+                       sdvo_connector->bottom_margin = temp_value;
+                       temp_value = sdvo_connector->max_vscan -
+                                       sdvo_connector->top_margin;
                        cmd = SDVO_CMD_SET_OVERSCAN_V;
-               } else if (sdvo_priv->bottom_property == property) {
+               } else if (sdvo_connector->bottom_property == property) {
                        drm_connector_property_set_value(connector,
-                               sdvo_priv->top_property, val);
-                       if (sdvo_priv->bottom_margin == temp_value)
+                               sdvo_connector->top_property, val);
+                       if (sdvo_connector->bottom_margin == temp_value)
                                goto out;
-                       sdvo_priv->top_margin = temp_value;
-                       sdvo_priv->bottom_margin = temp_value;
-                       temp_value = sdvo_priv->max_vscan -
-                                       sdvo_priv->top_margin;
+                       sdvo_connector->top_margin = temp_value;
+                       sdvo_connector->bottom_margin = temp_value;
+                       temp_value = sdvo_connector->max_vscan -
+                                       sdvo_connector->top_margin;
                        cmd = SDVO_CMD_SET_OVERSCAN_V;
-               } else if (sdvo_priv->hpos_property == property) {
-                       if (sdvo_priv->cur_hpos == temp_value)
+               } else if (sdvo_connector->hpos_property == property) {
+                       if (sdvo_connector->cur_hpos == temp_value)
                                goto out;
 
                        cmd = SDVO_CMD_SET_POSITION_H;
-                       sdvo_priv->cur_hpos = temp_value;
-               } else if (sdvo_priv->vpos_property == property) {
-                       if (sdvo_priv->cur_vpos == temp_value)
+                       sdvo_connector->cur_hpos = temp_value;
+               } else if (sdvo_connector->vpos_property == property) {
+                       if (sdvo_connector->cur_vpos == temp_value)
                                goto out;
 
                        cmd = SDVO_CMD_SET_POSITION_V;
-                       sdvo_priv->cur_vpos = temp_value;
-               } else if (sdvo_priv->saturation_property == property) {
-                       if (sdvo_priv->cur_saturation == temp_value)
+                       sdvo_connector->cur_vpos = temp_value;
+               } else if (sdvo_connector->saturation_property == property) {
+                       if (sdvo_connector->cur_saturation == temp_value)
                                goto out;
 
                        cmd = SDVO_CMD_SET_SATURATION;
-                       sdvo_priv->cur_saturation = temp_value;
-               } else if (sdvo_priv->contrast_property == property) {
-                       if (sdvo_priv->cur_contrast == temp_value)
+                       sdvo_connector->cur_saturation = temp_value;
+               } else if (sdvo_connector->contrast_property == property) {
+                       if (sdvo_connector->cur_contrast == temp_value)
                                goto out;
 
                        cmd = SDVO_CMD_SET_CONTRAST;
-                       sdvo_priv->cur_contrast = temp_value;
-               } else if (sdvo_priv->hue_property == property) {
-                       if (sdvo_priv->cur_hue == temp_value)
+                       sdvo_connector->cur_contrast = temp_value;
+               } else if (sdvo_connector->hue_property == property) {
+                       if (sdvo_connector->cur_hue == temp_value)
                                goto out;
 
                        cmd = SDVO_CMD_SET_HUE;
-                       sdvo_priv->cur_hue = temp_value;
-               } else if (sdvo_priv->brightness_property == property) {
-                       if (sdvo_priv->cur_brightness == temp_value)
+                       sdvo_connector->cur_hue = temp_value;
+               } else if (sdvo_connector->brightness_property == property) {
+                       if (sdvo_connector->cur_brightness == temp_value)
                                goto out;
 
                        cmd = SDVO_CMD_SET_BRIGHTNESS;
-                       sdvo_priv->cur_brightness = temp_value;
+                       sdvo_connector->cur_brightness = temp_value;
                }
                if (cmd) {
                        intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2);
@@ -2127,8 +2006,6 @@ static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
 
 static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
        .dpms = drm_helper_connector_dpms,
-       .save = intel_sdvo_save,
-       .restore = intel_sdvo_restore,
        .detect = intel_sdvo_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = intel_sdvo_set_property,
@@ -2138,12 +2015,27 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
 static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
        .get_modes = intel_sdvo_get_modes,
        .mode_valid = intel_sdvo_mode_valid,
-       .best_encoder = intel_best_encoder,
+       .best_encoder = intel_attached_encoder,
 };
 
 static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
 {
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+       struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+
+       if (intel_encoder->i2c_bus)
+               intel_i2c_destroy(intel_encoder->i2c_bus);
+       if (intel_encoder->ddc_bus)
+               intel_i2c_destroy(intel_encoder->ddc_bus);
+       if (sdvo_priv->analog_ddc_bus)
+               intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
+
+       if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
+               drm_mode_destroy(encoder->dev,
+                                sdvo_priv->sdvo_lvds_fixed_mode);
+
        drm_encoder_cleanup(encoder);
+       kfree(intel_encoder);
 }
 
 static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
@@ -2159,49 +2051,29 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
  * outputs, then LVDS outputs.
  */
 static void
-intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv)
+intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
+                         struct intel_sdvo_priv *sdvo, u32 reg)
 {
-       uint16_t mask = 0;
-       unsigned int num_bits;
-
-       /* Make a mask of outputs less than or equal to our own priority in the
-        * list.
-        */
-       switch (dev_priv->controlled_output) {
-       case SDVO_OUTPUT_LVDS1:
-               mask |= SDVO_OUTPUT_LVDS1;
-       case SDVO_OUTPUT_LVDS0:
-               mask |= SDVO_OUTPUT_LVDS0;
-       case SDVO_OUTPUT_TMDS1:
-               mask |= SDVO_OUTPUT_TMDS1;
-       case SDVO_OUTPUT_TMDS0:
-               mask |= SDVO_OUTPUT_TMDS0;
-       case SDVO_OUTPUT_RGB1:
-               mask |= SDVO_OUTPUT_RGB1;
-       case SDVO_OUTPUT_RGB0:
-               mask |= SDVO_OUTPUT_RGB0;
-               break;
-       }
+       struct sdvo_device_mapping *mapping;
 
-       /* Count bits to find what number we are in the priority list. */
-       mask &= dev_priv->caps.output_flags;
-       num_bits = hweight16(mask);
-       if (num_bits > 3) {
-               /* if more than 3 outputs, default to DDC bus 3 for now */
-               num_bits = 3;
-       }
+       if (IS_SDVOB(reg))
+               mapping = &(dev_priv->sdvo_mappings[0]);
+       else
+               mapping = &(dev_priv->sdvo_mappings[1]);
 
-       /* Corresponds to SDVO_CONTROL_BUS_DDCx */
-       dev_priv->ddc_bus = 1 << num_bits;
+       sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
 }
 
 static bool
-intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output)
+intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output, int device)
 {
        struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
        uint8_t status;
 
-       intel_sdvo_set_target_output(output, sdvo_priv->controlled_output);
+       if (device == 0)
+               intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS0);
+       else
+               intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS1);
 
        intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0);
        status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1);
@@ -2214,15 +2086,13 @@ static struct intel_encoder *
 intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan)
 {
        struct drm_device *dev = chan->drm_dev;
-       struct drm_connector *connector;
+       struct drm_encoder *encoder;
        struct intel_encoder *intel_encoder = NULL;
 
-       list_for_each_entry(connector,
-                       &dev->mode_config.connector_list, head) {
-               if (to_intel_encoder(connector)->ddc_bus == &chan->adapter) {
-                       intel_encoder = to_intel_encoder(connector);
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               intel_encoder = enc_to_intel_encoder(encoder);
+               if (intel_encoder->ddc_bus == &chan->adapter)
                        break;
-               }
        }
        return intel_encoder;
 }
@@ -2259,7 +2129,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct sdvo_device_mapping *my_mapping, *other_mapping;
 
-       if (sdvo_reg == SDVOB) {
+       if (IS_SDVOB(sdvo_reg)) {
                my_mapping = &dev_priv->sdvo_mappings[0];
                other_mapping = &dev_priv->sdvo_mappings[1];
        } else {
@@ -2284,120 +2154,237 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
        /* No SDVO device info is found for another DVO port,
         * so use mapping assumption we had before BIOS parsing.
         */
-       if (sdvo_reg == SDVOB)
+       if (IS_SDVOB(sdvo_reg))
                return 0x70;
        else
                return 0x72;
 }
 
-static int intel_sdvo_bad_tv_callback(const struct dmi_system_id *id)
+static bool
+intel_sdvo_connector_alloc (struct intel_connector **ret)
 {
-       DRM_DEBUG_KMS("Ignoring bad SDVO TV connector for %s\n", id->ident);
-       return 1;
+       struct intel_connector *intel_connector;
+       struct intel_sdvo_connector *sdvo_connector;
+
+       *ret = kzalloc(sizeof(*intel_connector) +
+                       sizeof(*sdvo_connector), GFP_KERNEL);
+       if (!*ret)
+               return false;
+
+       intel_connector = *ret;
+       sdvo_connector = (struct intel_sdvo_connector *)(intel_connector + 1);
+       intel_connector->dev_priv = sdvo_connector;
+
+       return true;
 }
 
-static struct dmi_system_id intel_sdvo_bad_tv[] = {
-       {
-               .callback = intel_sdvo_bad_tv_callback,
-               .ident = "IntelG45/ICH10R/DME1737",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "IBM CORPORATION"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "4800784"),
-               },
-       },
+static void
+intel_sdvo_connector_create (struct drm_encoder *encoder,
+                            struct drm_connector *connector)
+{
+       drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs,
+                          connector->connector_type);
 
-       { }     /* terminating entry */
-};
+       drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
+
+       connector->interlace_allowed = 0;
+       connector->doublescan_allowed = 0;
+       connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+
+       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_sysfs_connector_add(connector);
+}
 
 static bool
-intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
+intel_sdvo_dvi_init(struct intel_encoder *intel_encoder, int device)
 {
-       struct drm_connector *connector = &intel_encoder->base;
        struct drm_encoder *encoder = &intel_encoder->enc;
        struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
-       bool ret = true, registered = false;
+       struct drm_connector *connector;
+       struct intel_connector *intel_connector;
+       struct intel_sdvo_connector *sdvo_connector;
+
+       if (!intel_sdvo_connector_alloc(&intel_connector))
+               return false;
+
+       sdvo_connector = intel_connector->dev_priv;
+
+       if (device == 0) {
+               sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS0;
+               sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+       } else if (device == 1) {
+               sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS1;
+               sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+       }
+
+       connector = &intel_connector->base;
+       connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+       encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
+       connector->connector_type = DRM_MODE_CONNECTOR_DVID;
+
+       if (intel_sdvo_get_supp_encode(intel_encoder, &sdvo_priv->encode)
+               && intel_sdvo_get_digital_encoding_mode(intel_encoder, device)
+               && sdvo_priv->is_hdmi) {
+               /* enable hdmi encoding mode if supported */
+               intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI);
+               intel_sdvo_set_colorimetry(intel_encoder,
+                                          SDVO_COLORIMETRY_RGB256);
+               connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+       }
+       intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+                                   (1 << INTEL_ANALOG_CLONE_BIT);
+
+       intel_sdvo_connector_create(encoder, connector);
+
+       return true;
+}
+
+static bool
+intel_sdvo_tv_init(struct intel_encoder *intel_encoder, int type)
+{
+        struct drm_encoder *encoder = &intel_encoder->enc;
+        struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+        struct drm_connector *connector;
+        struct intel_connector *intel_connector;
+        struct intel_sdvo_connector *sdvo_connector;
+
+        if (!intel_sdvo_connector_alloc(&intel_connector))
+                return false;
+
+        connector = &intel_connector->base;
+        encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+        connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+        sdvo_connector = intel_connector->dev_priv;
+
+        sdvo_priv->controlled_output |= type;
+        sdvo_connector->output_flag = type;
+
+        sdvo_priv->is_tv = true;
+        intel_encoder->needs_tv_clock = true;
+        intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+
+        intel_sdvo_connector_create(encoder, connector);
+
+        intel_sdvo_tv_create_property(connector, type);
+
+        intel_sdvo_create_enhance_property(connector);
+
+        return true;
+}
+
+static bool
+intel_sdvo_analog_init(struct intel_encoder *intel_encoder, int device)
+{
+        struct drm_encoder *encoder = &intel_encoder->enc;
+        struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+        struct drm_connector *connector;
+        struct intel_connector *intel_connector;
+        struct intel_sdvo_connector *sdvo_connector;
+
+        if (!intel_sdvo_connector_alloc(&intel_connector))
+                return false;
+
+        connector = &intel_connector->base;
+       connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+        encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+        connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+        sdvo_connector = intel_connector->dev_priv;
+
+        if (device == 0) {
+                sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB0;
+                sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+        } else if (device == 1) {
+                sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB1;
+                sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+        }
+
+        intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+                                    (1 << INTEL_ANALOG_CLONE_BIT);
+
+        intel_sdvo_connector_create(encoder, connector);
+        return true;
+}
+
+static bool
+intel_sdvo_lvds_init(struct intel_encoder *intel_encoder, int device)
+{
+        struct drm_encoder *encoder = &intel_encoder->enc;
+        struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+        struct drm_connector *connector;
+        struct intel_connector *intel_connector;
+        struct intel_sdvo_connector *sdvo_connector;
+
+        if (!intel_sdvo_connector_alloc(&intel_connector))
+                return false;
+
+        connector = &intel_connector->base;
+        encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+        connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+        sdvo_connector = intel_connector->dev_priv;
+
+        sdvo_priv->is_lvds = true;
+
+        if (device == 0) {
+                sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS0;
+                sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+        } else if (device == 1) {
+                sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS1;
+                sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+        }
+
+        intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
+                                    (1 << INTEL_SDVO_LVDS_CLONE_BIT);
+
+        intel_sdvo_connector_create(encoder, connector);
+        intel_sdvo_create_enhance_property(connector);
+        return true;
+}
+
+static bool
+intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
+{
+       struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
 
        sdvo_priv->is_tv = false;
        intel_encoder->needs_tv_clock = false;
        sdvo_priv->is_lvds = false;
 
-       if (device_is_registered(&connector->kdev)) {
-               drm_sysfs_connector_remove(connector);
-               registered = true;
-       }
+       /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
 
-       if (flags &
-           (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) {
-               if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0)
-                       sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0;
-               else
-                       sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1;
-
-               encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
-               connector->connector_type = DRM_MODE_CONNECTOR_DVID;
-
-               if (intel_sdvo_get_supp_encode(intel_encoder,
-                                              &sdvo_priv->encode) &&
-                   intel_sdvo_get_digital_encoding_mode(intel_encoder) &&
-                   sdvo_priv->is_hdmi) {
-                       /* enable hdmi encoding mode if supported */
-                       intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI);
-                       intel_sdvo_set_colorimetry(intel_encoder,
-                                                  SDVO_COLORIMETRY_RGB256);
-                       connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
-                       intel_encoder->clone_mask =
-                                       (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
-                                       (1 << INTEL_ANALOG_CLONE_BIT);
-               }
-       } else if ((flags & SDVO_OUTPUT_SVID0) &&
-                  !dmi_check_system(intel_sdvo_bad_tv)) {
-
-               sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
-               encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
-               connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
-               sdvo_priv->is_tv = true;
-               intel_encoder->needs_tv_clock = true;
-               intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
-       } else if (flags & SDVO_OUTPUT_RGB0) {
-
-               sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
-               encoder->encoder_type = DRM_MODE_ENCODER_DAC;
-               connector->connector_type = DRM_MODE_CONNECTOR_VGA;
-               intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
-                                       (1 << INTEL_ANALOG_CLONE_BIT);
-       } else if (flags & SDVO_OUTPUT_RGB1) {
-
-               sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
-               encoder->encoder_type = DRM_MODE_ENCODER_DAC;
-               connector->connector_type = DRM_MODE_CONNECTOR_VGA;
-               intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
-                                       (1 << INTEL_ANALOG_CLONE_BIT);
-       } else if (flags & SDVO_OUTPUT_CVBS0) {
-
-               sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
-               encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
-               connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
-               sdvo_priv->is_tv = true;
-               intel_encoder->needs_tv_clock = true;
-               intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
-       } else if (flags & SDVO_OUTPUT_LVDS0) {
-
-               sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
-               encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
-               connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
-               sdvo_priv->is_lvds = true;
-               intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
-                                       (1 << INTEL_SDVO_LVDS_CLONE_BIT);
-       } else if (flags & SDVO_OUTPUT_LVDS1) {
-
-               sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1;
-               encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
-               connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
-               sdvo_priv->is_lvds = true;
-               intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
-                                       (1 << INTEL_SDVO_LVDS_CLONE_BIT);
-       } else {
+       if (flags & SDVO_OUTPUT_TMDS0)
+               if (!intel_sdvo_dvi_init(intel_encoder, 0))
+                       return false;
+
+       if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
+               if (!intel_sdvo_dvi_init(intel_encoder, 1))
+                       return false;
+
+       /* TV has no XXX1 function block */
+       if (flags & SDVO_OUTPUT_SVID0)
+               if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_SVID0))
+                       return false;
+
+       if (flags & SDVO_OUTPUT_CVBS0)
+               if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_CVBS0))
+                       return false;
+
+       if (flags & SDVO_OUTPUT_RGB0)
+               if (!intel_sdvo_analog_init(intel_encoder, 0))
+                       return false;
+
+       if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
+               if (!intel_sdvo_analog_init(intel_encoder, 1))
+                       return false;
+
+       if (flags & SDVO_OUTPUT_LVDS0)
+               if (!intel_sdvo_lvds_init(intel_encoder, 0))
+                       return false;
 
+       if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
+               if (!intel_sdvo_lvds_init(intel_encoder, 1))
+                       return false;
+
+       if ((flags & SDVO_OUTPUT_MASK) == 0) {
                unsigned char bytes[2];
 
                sdvo_priv->controlled_output = 0;
@@ -2405,28 +2392,25 @@ intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
                DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
                              SDVO_NAME(sdvo_priv),
                              bytes[0], bytes[1]);
-               ret = false;
+               return false;
        }
        intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
 
-       if (ret && registered)
-               ret = drm_sysfs_connector_add(connector) == 0 ? true : false;
-
-
-       return ret;
-
+       return true;
 }
 
-static void intel_sdvo_tv_create_property(struct drm_connector *connector)
+static void intel_sdvo_tv_create_property(struct drm_connector *connector, int type)
 {
-      struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
        struct intel_sdvo_tv_format format;
        uint32_t format_map, i;
        uint8_t status;
 
-       intel_sdvo_set_target_output(intel_encoder,
-                                    sdvo_priv->controlled_output);
+       intel_sdvo_set_target_output(intel_encoder, type);
 
        intel_sdvo_write_cmd(intel_encoder,
                             SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
@@ -2441,35 +2425,37 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector)
        if (format_map == 0)
                return;
 
-       sdvo_priv->format_supported_num = 0;
+       sdvo_connector->format_supported_num = 0;
        for (i = 0 ; i < TV_FORMAT_NUM; i++)
                if (format_map & (1 << i)) {
-                       sdvo_priv->tv_format_supported
-                       [sdvo_priv->format_supported_num++] =
+                       sdvo_connector->tv_format_supported
+                       [sdvo_connector->format_supported_num++] =
                        tv_format_names[i];
                }
 
 
-       sdvo_priv->tv_format_property =
+       sdvo_connector->tv_format_property =
                        drm_property_create(
                                connector->dev, DRM_MODE_PROP_ENUM,
-                               "mode", sdvo_priv->format_supported_num);
+                               "mode", sdvo_connector->format_supported_num);
 
-       for (i = 0; i < sdvo_priv->format_supported_num; i++)
+       for (i = 0; i < sdvo_connector->format_supported_num; i++)
                drm_property_add_enum(
-                               sdvo_priv->tv_format_property, i,
-                               i, sdvo_priv->tv_format_supported[i]);
+                               sdvo_connector->tv_format_property, i,
+                               i, sdvo_connector->tv_format_supported[i]);
 
-       sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[0];
+       sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[0];
        drm_connector_attach_property(
-                       connector, sdvo_priv->tv_format_property, 0);
+                       connector, sdvo_connector->tv_format_property, 0);
 
 }
 
 static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-       struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
        struct intel_sdvo_enhancements_reply sdvo_data;
        struct drm_device *dev = connector->dev;
        uint8_t status;
@@ -2488,7 +2474,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
                DRM_DEBUG_KMS("No enhancement is supported\n");
                return;
        }
-       if (sdvo_priv->is_tv) {
+       if (IS_TV(sdvo_priv)) {
                /* when horizontal overscan is supported, Add the left/right
                 * property
                 */
@@ -2636,8 +2622,6 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
                                        "default %d, current %d\n",
                                        data_value[0], data_value[1], response);
                }
-       }
-       if (sdvo_priv->is_tv) {
                if (sdvo_data.saturation) {
                        intel_sdvo_write_cmd(intel_encoder,
                                SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
@@ -2733,7 +2717,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
                                        data_value[0], data_value[1], response);
                }
        }
-       if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+       if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
                if (sdvo_data.brightness) {
                        intel_sdvo_write_cmd(intel_encoder,
                                SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
@@ -2773,12 +2757,11 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
 bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_connector *connector;
        struct intel_encoder *intel_encoder;
        struct intel_sdvo_priv *sdvo_priv;
-
        u8 ch[0x40];
        int i;
+       u32 i2c_reg, ddc_reg, analog_ddc_reg;
 
        intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
        if (!intel_encoder) {
@@ -2791,11 +2774,21 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
        intel_encoder->dev_priv = sdvo_priv;
        intel_encoder->type = INTEL_OUTPUT_SDVO;
 
+       if (HAS_PCH_SPLIT(dev)) {
+               i2c_reg = PCH_GPIOE;
+               ddc_reg = PCH_GPIOE;
+               analog_ddc_reg = PCH_GPIOA;
+       } else {
+               i2c_reg = GPIOE;
+               ddc_reg = GPIOE;
+               analog_ddc_reg = GPIOA;
+       }
+
        /* setup the DDC bus. */
-       if (sdvo_reg == SDVOB)
-               intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
+       if (IS_SDVOB(sdvo_reg))
+               intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB");
        else
-               intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
+               intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC");
 
        if (!intel_encoder->i2c_bus)
                goto err_inteloutput;
@@ -2809,20 +2802,20 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
        for (i = 0; i < 0x40; i++) {
                if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) {
                        DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
-                                       sdvo_reg == SDVOB ? 'B' : 'C');
+                                     IS_SDVOB(sdvo_reg) ? 'B' : 'C');
                        goto err_i2c;
                }
        }
 
        /* setup the DDC bus. */
-       if (sdvo_reg == SDVOB) {
-               intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
-               sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
+       if (IS_SDVOB(sdvo_reg)) {
+               intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS");
+               sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
                                                "SDVOB/VGA DDC BUS");
                dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
        } else {
-               intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
-               sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
+               intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS");
+               sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
                                                "SDVOC/VGA DDC BUS");
                dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
        }
@@ -2833,41 +2826,21 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
        /* Wrap with our custom algo which switches to DDC mode */
        intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
 
+       /* encoder type will be decided later */
+       drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0);
+       drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
+
        /* In default case sdvo lvds is false */
        intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps);
 
        if (intel_sdvo_output_setup(intel_encoder,
                                    sdvo_priv->caps.output_flags) != true) {
                DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
-                         sdvo_reg == SDVOB ? 'B' : 'C');
+                             IS_SDVOB(sdvo_reg) ? 'B' : 'C');
                goto err_i2c;
        }
 
-
-       connector = &intel_encoder->base;
-       drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
-                          connector->connector_type);
-
-       drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
-       connector->interlace_allowed = 0;
-       connector->doublescan_allowed = 0;
-       connector->display_info.subpixel_order = SubPixelHorizontalRGB;
-
-       drm_encoder_init(dev, &intel_encoder->enc,
-                       &intel_sdvo_enc_funcs, intel_encoder->enc.encoder_type);
-
-       drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
-
-       drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
-       if (sdvo_priv->is_tv)
-               intel_sdvo_tv_create_property(connector);
-
-       if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
-               intel_sdvo_create_enhance_property(connector);
-
-       drm_sysfs_connector_add(connector);
-
-       intel_sdvo_select_ddc_bus(sdvo_priv);
+       intel_sdvo_select_ddc_bus(dev_priv, sdvo_priv, sdvo_reg);
 
        /* Set the input timing to the screen. Assume always input 0. */
        intel_sdvo_set_target_input(intel_encoder, true, false);
index d7d39b2..6d553c2 100644 (file)
@@ -916,143 +916,6 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode)
        }
 }
 
-static void
-intel_tv_save(struct drm_connector *connector)
-{
-       struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-       struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
-       int i;
-
-       tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1);
-       tv_priv->save_TV_H_CTL_2 = I915_READ(TV_H_CTL_2);
-       tv_priv->save_TV_H_CTL_3 = I915_READ(TV_H_CTL_3);
-       tv_priv->save_TV_V_CTL_1 = I915_READ(TV_V_CTL_1);
-       tv_priv->save_TV_V_CTL_2 = I915_READ(TV_V_CTL_2);
-       tv_priv->save_TV_V_CTL_3 = I915_READ(TV_V_CTL_3);
-       tv_priv->save_TV_V_CTL_4 = I915_READ(TV_V_CTL_4);
-       tv_priv->save_TV_V_CTL_5 = I915_READ(TV_V_CTL_5);
-       tv_priv->save_TV_V_CTL_6 = I915_READ(TV_V_CTL_6);
-       tv_priv->save_TV_V_CTL_7 = I915_READ(TV_V_CTL_7);
-       tv_priv->save_TV_SC_CTL_1 = I915_READ(TV_SC_CTL_1);
-       tv_priv->save_TV_SC_CTL_2 = I915_READ(TV_SC_CTL_2);
-       tv_priv->save_TV_SC_CTL_3 = I915_READ(TV_SC_CTL_3);
-
-       tv_priv->save_TV_CSC_Y = I915_READ(TV_CSC_Y);
-       tv_priv->save_TV_CSC_Y2 = I915_READ(TV_CSC_Y2);
-       tv_priv->save_TV_CSC_U = I915_READ(TV_CSC_U);
-       tv_priv->save_TV_CSC_U2 = I915_READ(TV_CSC_U2);
-       tv_priv->save_TV_CSC_V = I915_READ(TV_CSC_V);
-       tv_priv->save_TV_CSC_V2 = I915_READ(TV_CSC_V2);
-       tv_priv->save_TV_CLR_KNOBS = I915_READ(TV_CLR_KNOBS);
-       tv_priv->save_TV_CLR_LEVEL = I915_READ(TV_CLR_LEVEL);
-       tv_priv->save_TV_WIN_POS = I915_READ(TV_WIN_POS);
-       tv_priv->save_TV_WIN_SIZE = I915_READ(TV_WIN_SIZE);
-       tv_priv->save_TV_FILTER_CTL_1 = I915_READ(TV_FILTER_CTL_1);
-       tv_priv->save_TV_FILTER_CTL_2 = I915_READ(TV_FILTER_CTL_2);
-       tv_priv->save_TV_FILTER_CTL_3 = I915_READ(TV_FILTER_CTL_3);
-
-       for (i = 0; i < 60; i++)
-               tv_priv->save_TV_H_LUMA[i] = I915_READ(TV_H_LUMA_0 + (i <<2));
-       for (i = 0; i < 60; i++)
-               tv_priv->save_TV_H_CHROMA[i] = I915_READ(TV_H_CHROMA_0 + (i <<2));
-       for (i = 0; i < 43; i++)
-               tv_priv->save_TV_V_LUMA[i] = I915_READ(TV_V_LUMA_0 + (i <<2));
-       for (i = 0; i < 43; i++)
-               tv_priv->save_TV_V_CHROMA[i] = I915_READ(TV_V_CHROMA_0 + (i <<2));
-
-       tv_priv->save_TV_DAC = I915_READ(TV_DAC);
-       tv_priv->save_TV_CTL = I915_READ(TV_CTL);
-}
-
-static void
-intel_tv_restore(struct drm_connector *connector)
-{
-       struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-       struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
-       struct drm_crtc *crtc = connector->encoder->crtc;
-       struct intel_crtc *intel_crtc;
-       int i;
-
-       /* FIXME: No CRTC? */
-       if (!crtc)
-               return;
-
-       intel_crtc = to_intel_crtc(crtc);
-       I915_WRITE(TV_H_CTL_1, tv_priv->save_TV_H_CTL_1);
-       I915_WRITE(TV_H_CTL_2, tv_priv->save_TV_H_CTL_2);
-       I915_WRITE(TV_H_CTL_3, tv_priv->save_TV_H_CTL_3);
-       I915_WRITE(TV_V_CTL_1, tv_priv->save_TV_V_CTL_1);
-       I915_WRITE(TV_V_CTL_2, tv_priv->save_TV_V_CTL_2);
-       I915_WRITE(TV_V_CTL_3, tv_priv->save_TV_V_CTL_3);
-       I915_WRITE(TV_V_CTL_4, tv_priv->save_TV_V_CTL_4);
-       I915_WRITE(TV_V_CTL_5, tv_priv->save_TV_V_CTL_5);
-       I915_WRITE(TV_V_CTL_6, tv_priv->save_TV_V_CTL_6);
-       I915_WRITE(TV_V_CTL_7, tv_priv->save_TV_V_CTL_7);
-       I915_WRITE(TV_SC_CTL_1, tv_priv->save_TV_SC_CTL_1);
-       I915_WRITE(TV_SC_CTL_2, tv_priv->save_TV_SC_CTL_2);
-       I915_WRITE(TV_SC_CTL_3, tv_priv->save_TV_SC_CTL_3);
-
-       I915_WRITE(TV_CSC_Y, tv_priv->save_TV_CSC_Y);
-       I915_WRITE(TV_CSC_Y2, tv_priv->save_TV_CSC_Y2);
-       I915_WRITE(TV_CSC_U, tv_priv->save_TV_CSC_U);
-       I915_WRITE(TV_CSC_U2, tv_priv->save_TV_CSC_U2);
-       I915_WRITE(TV_CSC_V, tv_priv->save_TV_CSC_V);
-       I915_WRITE(TV_CSC_V2, tv_priv->save_TV_CSC_V2);
-       I915_WRITE(TV_CLR_KNOBS, tv_priv->save_TV_CLR_KNOBS);
-       I915_WRITE(TV_CLR_LEVEL, tv_priv->save_TV_CLR_LEVEL);
-
-       {
-               int pipeconf_reg = (intel_crtc->pipe == 0) ?
-                       PIPEACONF : PIPEBCONF;
-               int dspcntr_reg = (intel_crtc->plane == 0) ?
-                       DSPACNTR : DSPBCNTR;
-               int pipeconf = I915_READ(pipeconf_reg);
-               int dspcntr = I915_READ(dspcntr_reg);
-               int dspbase_reg = (intel_crtc->plane == 0) ?
-                       DSPAADDR : DSPBADDR;
-               /* Pipe must be off here */
-               I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
-               /* Flush the plane changes */
-               I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
-
-               if (!IS_I9XX(dev)) {
-                       /* Wait for vblank for the disable to take effect */
-                       intel_wait_for_vblank(dev);
-               }
-
-               I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
-               /* Wait for vblank for the disable to take effect. */
-               intel_wait_for_vblank(dev);
-
-               /* Filter ctl must be set before TV_WIN_SIZE */
-               I915_WRITE(TV_FILTER_CTL_1, tv_priv->save_TV_FILTER_CTL_1);
-               I915_WRITE(TV_FILTER_CTL_2, tv_priv->save_TV_FILTER_CTL_2);
-               I915_WRITE(TV_FILTER_CTL_3, tv_priv->save_TV_FILTER_CTL_3);
-               I915_WRITE(TV_WIN_POS, tv_priv->save_TV_WIN_POS);
-               I915_WRITE(TV_WIN_SIZE, tv_priv->save_TV_WIN_SIZE);
-               I915_WRITE(pipeconf_reg, pipeconf);
-               I915_WRITE(dspcntr_reg, dspcntr);
-               /* Flush the plane changes */
-               I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
-       }
-
-       for (i = 0; i < 60; i++)
-               I915_WRITE(TV_H_LUMA_0 + (i <<2), tv_priv->save_TV_H_LUMA[i]);
-       for (i = 0; i < 60; i++)
-               I915_WRITE(TV_H_CHROMA_0 + (i <<2), tv_priv->save_TV_H_CHROMA[i]);
-       for (i = 0; i < 43; i++)
-               I915_WRITE(TV_V_LUMA_0 + (i <<2), tv_priv->save_TV_V_LUMA[i]);
-       for (i = 0; i < 43; i++)
-               I915_WRITE(TV_V_CHROMA_0 + (i <<2), tv_priv->save_TV_V_CHROMA[i]);
-
-       I915_WRITE(TV_DAC, tv_priv->save_TV_DAC);
-       I915_WRITE(TV_CTL, tv_priv->save_TV_CTL);
-}
-
 static const struct tv_mode *
 intel_tv_mode_lookup (char *tv_format)
 {
@@ -1078,7 +941,8 @@ intel_tv_mode_find (struct intel_encoder *intel_encoder)
 static enum drm_mode_status
 intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
 
        /* Ensure TV refresh is close to desired refresh */
@@ -1441,7 +1305,8 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder
  */
 static void intel_tv_find_better_format(struct drm_connector *connector)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
        const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
        int i;
@@ -1475,9 +1340,9 @@ intel_tv_detect(struct drm_connector *connector)
 {
        struct drm_crtc *crtc;
        struct drm_display_mode mode;
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
-       struct drm_encoder *encoder = &intel_encoder->enc;
        int dpms_mode;
        int type = tv_priv->type;
 
@@ -1487,10 +1352,12 @@ intel_tv_detect(struct drm_connector *connector)
        if (encoder->crtc && encoder->crtc->enabled) {
                type = intel_tv_detect_type(encoder->crtc, intel_encoder);
        } else {
-               crtc = intel_get_load_detect_pipe(intel_encoder, &mode, &dpms_mode);
+               crtc = intel_get_load_detect_pipe(intel_encoder, connector,
+                                                 &mode, &dpms_mode);
                if (crtc) {
                        type = intel_tv_detect_type(crtc, intel_encoder);
-                       intel_release_load_detect_pipe(intel_encoder, dpms_mode);
+                       intel_release_load_detect_pipe(intel_encoder, connector,
+                                                      dpms_mode);
                } else
                        type = -1;
        }
@@ -1525,7 +1392,8 @@ static void
 intel_tv_chose_preferred_modes(struct drm_connector *connector,
                               struct drm_display_mode *mode_ptr)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
 
        if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
@@ -1550,7 +1418,8 @@ static int
 intel_tv_get_modes(struct drm_connector *connector)
 {
        struct drm_display_mode *mode_ptr;
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
        int j, count = 0;
        u64 tmp;
@@ -1604,11 +1473,9 @@ intel_tv_get_modes(struct drm_connector *connector)
 static void
 intel_tv_destroy (struct drm_connector *connector)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-
        drm_sysfs_connector_remove(connector);
        drm_connector_cleanup(connector);
-       kfree(intel_encoder);
+       kfree(connector);
 }
 
 
@@ -1617,9 +1484,9 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
                      uint64_t val)
 {
        struct drm_device *dev = connector->dev;
-       struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+       struct drm_encoder *encoder = intel_attached_encoder(connector);
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
-       struct drm_encoder *encoder = &intel_encoder->enc;
        struct drm_crtc *crtc = encoder->crtc;
        int ret = 0;
        bool changed = false;
@@ -1676,8 +1543,6 @@ static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
 
 static const struct drm_connector_funcs intel_tv_connector_funcs = {
        .dpms = drm_helper_connector_dpms,
-       .save = intel_tv_save,
-       .restore = intel_tv_restore,
        .detect = intel_tv_detect,
        .destroy = intel_tv_destroy,
        .set_property = intel_tv_set_property,
@@ -1687,12 +1552,15 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
 static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
        .mode_valid = intel_tv_mode_valid,
        .get_modes = intel_tv_get_modes,
-       .best_encoder = intel_best_encoder,
+       .best_encoder = intel_attached_encoder,
 };
 
 static void intel_tv_enc_destroy(struct drm_encoder *encoder)
 {
+       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
        drm_encoder_cleanup(encoder);
+       kfree(intel_encoder);
 }
 
 static const struct drm_encoder_funcs intel_tv_enc_funcs = {
@@ -1741,6 +1609,7 @@ intel_tv_init(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_connector *connector;
        struct intel_encoder *intel_encoder;
+       struct intel_connector *intel_connector;
        struct intel_tv_priv *tv_priv;
        u32 tv_dac_on, tv_dac_off, save_tv_dac;
        char **tv_format_names;
@@ -1786,7 +1655,13 @@ intel_tv_init(struct drm_device *dev)
                return;
        }
 
-       connector = &intel_encoder->base;
+       intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+       if (!intel_connector) {
+               kfree(intel_encoder);
+               return;
+       }
+
+       connector = &intel_connector->base;
 
        drm_connector_init(dev, connector, &intel_tv_connector_funcs,
                           DRM_MODE_CONNECTOR_SVIDEO);
@@ -1794,7 +1669,7 @@ intel_tv_init(struct drm_device *dev)
        drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs,
                         DRM_MODE_ENCODER_TVDAC);
 
-       drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
+       drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
        tv_priv = (struct intel_tv_priv *)(intel_encoder + 1);
        intel_encoder->type = INTEL_OUTPUT_TVOUT;
        intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
index 453df3f..acd31ed 100644 (file)
@@ -22,7 +22,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
              nv50_cursor.o nv50_display.o nv50_fbcon.o \
              nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
              nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
-             nv17_gpio.o nv50_gpio.o
+             nv17_gpio.o nv50_gpio.o \
+            nv50_calc.o
 
 nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
 nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
index abc382a..e7e69cc 100644 (file)
@@ -26,6 +26,7 @@
 #define NV_DEBUG_NOTRACE
 #include "nouveau_drv.h"
 #include "nouveau_hw.h"
+#include "nouveau_encoder.h"
 
 /* these defines are made up */
 #define NV_CIO_CRE_44_HEADA 0x0
@@ -256,6 +257,11 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
 struct init_tbl_entry {
        char *name;
        uint8_t id;
+       /* Return:
+        *  > 0: success, length of opcode
+        *    0: success, but abort further parsing of table (INIT_DONE etc)
+        *  < 0: failure, table parsing will be aborted
+        */
        int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
 };
 
@@ -709,6 +715,83 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
        return dcb_entry;
 }
 
+static int
+read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
+{
+       uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
+       int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
+       int recordoffset = 0, rdofs = 1, wrofs = 0;
+       uint8_t port_type = 0;
+
+       if (!i2ctable)
+               return -EINVAL;
+
+       if (dcb_version >= 0x30) {
+               if (i2ctable[0] != dcb_version) /* necessary? */
+                       NV_WARN(dev,
+                               "DCB I2C table version mismatch (%02X vs %02X)\n",
+                               i2ctable[0], dcb_version);
+               dcb_i2c_ver = i2ctable[0];
+               headerlen = i2ctable[1];
+               if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
+                       i2c_entries = i2ctable[2];
+               else
+                       NV_WARN(dev,
+                               "DCB I2C table has more entries than indexable "
+                               "(%d entries, max %d)\n", i2ctable[2],
+                               DCB_MAX_NUM_I2C_ENTRIES);
+               entry_len = i2ctable[3];
+               /* [4] is i2c_default_indices, read in parse_dcb_table() */
+       }
+       /*
+        * It's your own fault if you call this function on a DCB 1.1 BIOS --
+        * the test below is for DCB 1.2
+        */
+       if (dcb_version < 0x14) {
+               recordoffset = 2;
+               rdofs = 0;
+               wrofs = 1;
+       }
+
+       if (index == 0xf)
+               return 0;
+       if (index >= i2c_entries) {
+               NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
+                        index, i2ctable[2]);
+               return -ENOENT;
+       }
+       if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
+               NV_ERROR(dev, "DCB I2C entry invalid\n");
+               return -EINVAL;
+       }
+
+       if (dcb_i2c_ver >= 0x30) {
+               port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
+
+               /*
+                * Fixup for chips using same address offset for read and
+                * write.
+                */
+               if (port_type == 4)     /* seen on C51 */
+                       rdofs = wrofs = 1;
+               if (port_type >= 5)     /* G80+ */
+                       rdofs = wrofs = 0;
+       }
+
+       if (dcb_i2c_ver >= 0x40) {
+               if (port_type != 5 && port_type != 6)
+                       NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
+
+               i2c->entry = ROM32(i2ctable[headerlen + recordoffset + entry_len * index]);
+       }
+
+       i2c->port_type = port_type;
+       i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
+       i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
+
+       return 0;
+}
+
 static struct nouveau_i2c_chan *
 init_i2c_device_find(struct drm_device *dev, int i2c_index)
 {
@@ -727,6 +810,20 @@ init_i2c_device_find(struct drm_device *dev, int i2c_index)
        }
        if (i2c_index == 0x80)  /* g80+ */
                i2c_index = dcb->i2c_default_indices & 0xf;
+       else
+       if (i2c_index == 0x81)
+               i2c_index = (dcb->i2c_default_indices & 0xf0) >> 4;
+
+       if (i2c_index > DCB_MAX_NUM_I2C_ENTRIES) {
+               NV_ERROR(dev, "invalid i2c_index 0x%x\n", i2c_index);
+               return NULL;
+       }
+
+       /* Make sure i2c table entry has been parsed, it may not
+        * have been if this is a bus not referenced by a DCB encoder
+        */
+       read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
+                          i2c_index, &dcb->i2c[i2c_index]);
 
        return nouveau_i2c_find(dev, i2c_index);
 }
@@ -818,7 +915,7 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
                NV_ERROR(bios->dev,
                         "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
                         offset, config, count);
-               return 0;
+               return -EINVAL;
        }
 
        configval = ROM32(bios->data[offset + 11 + config * 4]);
@@ -920,7 +1017,7 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
                NV_ERROR(bios->dev,
                         "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
                         offset, config, count);
-               return 0;
+               return -EINVAL;
        }
 
        freq = ROM16(bios->data[offset + 12 + config * 2]);
@@ -1066,6 +1163,126 @@ init_io_flag_condition(struct nvbios *bios, uint16_t offset,
        return 2;
 }
 
+static int
+init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+       /*
+        * INIT_DP_CONDITION   opcode: 0x3A ('')
+        *
+        * offset      (8 bit): opcode
+        * offset + 1  (8 bit): "sub" opcode
+        * offset + 2  (8 bit): unknown
+        *
+        */
+
+       struct bit_displayport_encoder_table *dpe = NULL;
+       struct dcb_entry *dcb = bios->display.output;
+       struct drm_device *dev = bios->dev;
+       uint8_t cond = bios->data[offset + 1];
+       int dummy;
+
+       BIOSLOG(bios, "0x%04X: subop 0x%02X\n", offset, cond);
+
+       if (!iexec->execute)
+               return 3;
+
+       dpe = nouveau_bios_dp_table(dev, dcb, &dummy);
+       if (!dpe) {
+               NV_ERROR(dev, "0x%04X: INIT_3A: no encoder table!!\n", offset);
+               return -EINVAL;
+       }
+
+       switch (cond) {
+       case 0:
+       {
+               struct dcb_connector_table_entry *ent =
+                       &bios->dcb.connector.entry[dcb->connector];
+
+               if (ent->type != DCB_CONNECTOR_eDP)
+                       iexec->execute = false;
+       }
+               break;
+       case 1:
+       case 2:
+               if (!(dpe->unknown & cond))
+                       iexec->execute = false;
+               break;
+       case 5:
+       {
+               struct nouveau_i2c_chan *auxch;
+               int ret;
+
+               auxch = nouveau_i2c_find(dev, bios->display.output->i2c_index);
+               if (!auxch)
+                       return -ENODEV;
+
+               ret = nouveau_dp_auxch(auxch, 9, 0xd, &cond, 1);
+               if (ret)
+                       return ret;
+
+               if (cond & 1)
+                       iexec->execute = false;
+       }
+               break;
+       default:
+               NV_WARN(dev, "0x%04X: unknown INIT_3A op: %d\n", offset, cond);
+               break;
+       }
+
+       if (iexec->execute)
+               BIOSLOG(bios, "0x%04X: continuing to execute\n", offset);
+       else
+               BIOSLOG(bios, "0x%04X: skipping following commands\n", offset);
+
+       return 3;
+}
+
+static int
+init_op_3b(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+       /*
+        * INIT_3B   opcode: 0x3B ('')
+        *
+        * offset      (8 bit): opcode
+        * offset + 1  (8 bit): crtc index
+        *
+        */
+
+       uint8_t or = ffs(bios->display.output->or) - 1;
+       uint8_t index = bios->data[offset + 1];
+       uint8_t data;
+
+       if (!iexec->execute)
+               return 2;
+
+       data = bios_idxprt_rd(bios, 0x3d4, index);
+       bios_idxprt_wr(bios, 0x3d4, index, data & ~(1 << or));
+       return 2;
+}
+
+static int
+init_op_3c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+       /*
+        * INIT_3C   opcode: 0x3C ('')
+        *
+        * offset      (8 bit): opcode
+        * offset + 1  (8 bit): crtc index
+        *
+        */
+
+       uint8_t or = ffs(bios->display.output->or) - 1;
+       uint8_t index = bios->data[offset + 1];
+       uint8_t data;
+
+       if (!iexec->execute)
+               return 2;
+
+       data = bios_idxprt_rd(bios, 0x3d4, index);
+       bios_idxprt_wr(bios, 0x3d4, index, data | (1 << or));
+       return 2;
+}
+
 static int
 init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
                      struct init_exec *iexec)
@@ -1170,7 +1387,7 @@ init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
                NV_ERROR(bios->dev,
                         "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
                         offset, config, count);
-               return 0;
+               return -EINVAL;
        }
 
        freq = ROM32(bios->data[offset + 11 + config * 4]);
@@ -1231,12 +1448,11 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
         */
 
        uint8_t i2c_index = bios->data[offset + 1];
-       uint8_t i2c_address = bios->data[offset + 2];
+       uint8_t i2c_address = bios->data[offset + 2] >> 1;
        uint8_t count = bios->data[offset + 3];
-       int len = 4 + count * 3;
        struct nouveau_i2c_chan *chan;
-       struct i2c_msg msg;
-       int i;
+       int len = 4 + count * 3;
+       int ret, i;
 
        if (!iexec->execute)
                return len;
@@ -1247,35 +1463,34 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
 
        chan = init_i2c_device_find(bios->dev, i2c_index);
        if (!chan)
-               return 0;
+               return -ENODEV;
 
        for (i = 0; i < count; i++) {
-               uint8_t i2c_reg = bios->data[offset + 4 + i * 3];
+               uint8_t reg = bios->data[offset + 4 + i * 3];
                uint8_t mask = bios->data[offset + 5 + i * 3];
                uint8_t data = bios->data[offset + 6 + i * 3];
-               uint8_t value;
+               union i2c_smbus_data val;
 
-               msg.addr = i2c_address;
-               msg.flags = I2C_M_RD;
-               msg.len = 1;
-               msg.buf = &value;
-               if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
-                       return 0;
+               ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
+                                    I2C_SMBUS_READ, reg,
+                                    I2C_SMBUS_BYTE_DATA, &val);
+               if (ret < 0)
+                       return ret;
 
                BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
                              "Mask: 0x%02X, Data: 0x%02X\n",
-                       offset, i2c_reg, value, mask, data);
+                       offset, reg, val.byte, mask, data);
 
-               value = (value & mask) | data;
+               if (!bios->execute)
+                       continue;
 
-               if (bios->execute) {
-                       msg.addr = i2c_address;
-                       msg.flags = 0;
-                       msg.len = 1;
-                       msg.buf = &value;
-                       if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
-                               return 0;
-               }
+               val.byte &= mask;
+               val.byte |= data;
+               ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
+                                    I2C_SMBUS_WRITE, reg,
+                                    I2C_SMBUS_BYTE_DATA, &val);
+               if (ret < 0)
+                       return ret;
        }
 
        return len;
@@ -1301,12 +1516,11 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
         */
 
        uint8_t i2c_index = bios->data[offset + 1];
-       uint8_t i2c_address = bios->data[offset + 2];
+       uint8_t i2c_address = bios->data[offset + 2] >> 1;
        uint8_t count = bios->data[offset + 3];
-       int len = 4 + count * 2;
        struct nouveau_i2c_chan *chan;
-       struct i2c_msg msg;
-       int i;
+       int len = 4 + count * 2;
+       int ret, i;
 
        if (!iexec->execute)
                return len;
@@ -1317,23 +1531,25 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
 
        chan = init_i2c_device_find(bios->dev, i2c_index);
        if (!chan)
-               return 0;
+               return -ENODEV;
 
        for (i = 0; i < count; i++) {
-               uint8_t i2c_reg = bios->data[offset + 4 + i * 2];
-               uint8_t data = bios->data[offset + 5 + i * 2];
+               uint8_t reg = bios->data[offset + 4 + i * 2];
+               union i2c_smbus_data val;
+
+               val.byte = bios->data[offset + 5 + i * 2];
 
                BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Data: 0x%02X\n",
-                       offset, i2c_reg, data);
-
-               if (bios->execute) {
-                       msg.addr = i2c_address;
-                       msg.flags = 0;
-                       msg.len = 1;
-                       msg.buf = &data;
-                       if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
-                               return 0;
-               }
+                       offset, reg, val.byte);
+
+               if (!bios->execute)
+                       continue;
+
+               ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
+                                    I2C_SMBUS_WRITE, reg,
+                                    I2C_SMBUS_BYTE_DATA, &val);
+               if (ret < 0)
+                       return ret;
        }
 
        return len;
@@ -1357,7 +1573,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
         */
 
        uint8_t i2c_index = bios->data[offset + 1];
-       uint8_t i2c_address = bios->data[offset + 2];
+       uint8_t i2c_address = bios->data[offset + 2] >> 1;
        uint8_t count = bios->data[offset + 3];
        int len = 4 + count;
        struct nouveau_i2c_chan *chan;
@@ -1374,7 +1590,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
 
        chan = init_i2c_device_find(bios->dev, i2c_index);
        if (!chan)
-               return 0;
+               return -ENODEV;
 
        for (i = 0; i < count; i++) {
                data[i] = bios->data[offset + 4 + i];
@@ -1388,7 +1604,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
                msg.len = count;
                msg.buf = data;
                if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
-                       return 0;
+                       return -EIO;
        }
 
        return len;
@@ -1427,7 +1643,7 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
 
        reg = get_tmds_index_reg(bios->dev, mlv);
        if (!reg)
-               return 0;
+               return -EINVAL;
 
        bios_wr32(bios, reg,
                  tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE);
@@ -1471,7 +1687,7 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
 
        reg = get_tmds_index_reg(bios->dev, mlv);
        if (!reg)
-               return 0;
+               return -EINVAL;
 
        for (i = 0; i < count; i++) {
                uint8_t tmdsaddr = bios->data[offset + 3 + i * 2];
@@ -1946,7 +2162,7 @@ init_configure_mem(struct nvbios *bios, uint16_t offset,
        uint32_t reg, data;
 
        if (bios->major_version > 2)
-               return 0;
+               return -ENODEV;
 
        bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd(
                       bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20);
@@ -2001,7 +2217,7 @@ init_configure_clk(struct nvbios *bios, uint16_t offset,
        int clock;
 
        if (bios->major_version > 2)
-               return 0;
+               return -ENODEV;
 
        clock = ROM16(bios->data[meminitoffs + 4]) * 10;
        setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock);
@@ -2034,7 +2250,7 @@ init_configure_preinit(struct nvbios *bios, uint16_t offset,
        uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6));
 
        if (bios->major_version > 2)
-               return 0;
+               return -ENODEV;
 
        bios_idxprt_wr(bios, NV_CIO_CRX__COLOR,
                             NV_CIO_CRE_SCRATCH4__INDEX, cr3c);
@@ -2656,7 +2872,7 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
                NV_ERROR(bios->dev,
                         "0x%04X: Zero block length - has the M table "
                         "been parsed?\n", offset);
-               return 0;
+               return -EINVAL;
        }
 
        strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf;
@@ -2840,14 +3056,14 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
 
        if (!bios->display.output) {
                NV_ERROR(dev, "INIT_AUXCH: no active output\n");
-               return 0;
+               return -EINVAL;
        }
 
        auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
        if (!auxch) {
                NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n",
                         bios->display.output->i2c_index);
-               return 0;
+               return -ENODEV;
        }
 
        if (!iexec->execute)
@@ -2860,7 +3076,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
                ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1);
                if (ret) {
                        NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret);
-                       return 0;
+                       return ret;
                }
 
                data &= bios->data[offset + 0];
@@ -2869,7 +3085,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
                ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1);
                if (ret) {
                        NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret);
-                       return 0;
+                       return ret;
                }
        }
 
@@ -2899,14 +3115,14 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
 
        if (!bios->display.output) {
                NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n");
-               return 0;
+               return -EINVAL;
        }
 
        auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
        if (!auxch) {
                NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n",
                         bios->display.output->i2c_index);
-               return 0;
+               return -ENODEV;
        }
 
        if (!iexec->execute)
@@ -2917,7 +3133,7 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
                ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1);
                if (ret) {
                        NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret);
-                       return 0;
+                       return ret;
                }
        }
 
@@ -2934,6 +3150,9 @@ static struct init_tbl_entry itbl_entry[] = {
        { "INIT_COPY"                         , 0x37, init_copy                       },
        { "INIT_NOT"                          , 0x38, init_not                        },
        { "INIT_IO_FLAG_CONDITION"            , 0x39, init_io_flag_condition          },
+       { "INIT_DP_CONDITION"                 , 0x3A, init_dp_condition               },
+       { "INIT_OP_3B"                        , 0x3B, init_op_3b                      },
+       { "INIT_OP_3C"                        , 0x3C, init_op_3c                      },
        { "INIT_INDEX_ADDRESS_LATCHED"        , 0x49, init_idx_addr_latched           },
        { "INIT_IO_RESTRICT_PLL2"             , 0x4A, init_io_restrict_pll2           },
        { "INIT_PLL2"                         , 0x4B, init_pll2                       },
@@ -3001,7 +3220,7 @@ parse_init_table(struct nvbios *bios, unsigned int offset,
         * is changed back to EXECUTE.
         */
 
-       int count = 0, i, res;
+       int count = 0, i, ret;
        uint8_t id;
 
        /*
@@ -3016,26 +3235,33 @@ parse_init_table(struct nvbios *bios, unsigned int offset,
                for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != id); i++)
                        ;
 
-               if (itbl_entry[i].name) {
-                       BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n",
-                               offset, itbl_entry[i].id, itbl_entry[i].name);
-
-                       /* execute eventual command handler */
-                       res = (*itbl_entry[i].handler)(bios, offset, iexec);
-                       if (!res)
-                               break;
-                       /*
-                        * Add the offset of the current command including all data
-                        * of that command. The offset will then be pointing on the
-                        * next op code.
-                        */
-                       offset += res;
-               } else {
+               if (!itbl_entry[i].name) {
                        NV_ERROR(bios->dev,
                                 "0x%04X: Init table command not found: "
                                 "0x%02X\n", offset, id);
                        return -ENOENT;
                }
+
+               BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n", offset,
+                       itbl_entry[i].id, itbl_entry[i].name);
+
+               /* execute eventual command handler */
+               ret = (*itbl_entry[i].handler)(bios, offset, iexec);
+               if (ret < 0) {
+                       NV_ERROR(bios->dev, "0x%04X: Failed parsing init "
+                                "table opcode: %s %d\n", offset,
+                                itbl_entry[i].name, ret);
+               }
+
+               if (ret <= 0)
+                       break;
+
+               /*
+                * Add the offset of the current command including all data
+                * of that command. The offset will then be pointing on the
+                * next op code.
+                */
+               offset += ret;
        }
 
        if (offset >= bios->length)
@@ -4285,31 +4511,32 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
                        break;
                }
 
-#if 0 /* for easy debugging */
-       ErrorF("pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
-       ErrorF("pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
-       ErrorF("pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
-       ErrorF("pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
-
-       ErrorF("pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
-       ErrorF("pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
-       ErrorF("pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
-       ErrorF("pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
-
-       ErrorF("pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
-       ErrorF("pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
-       ErrorF("pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
-       ErrorF("pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
-       ErrorF("pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
-       ErrorF("pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
-       ErrorF("pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
-       ErrorF("pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
-
-       ErrorF("pll.max_log2p: %d\n", pll_lim->max_log2p);
-       ErrorF("pll.log2p_bias: %d\n", pll_lim->log2p_bias);
-
-       ErrorF("pll.refclk: %d\n", pll_lim->refclk);
-#endif
+       NV_DEBUG(dev, "pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
+       NV_DEBUG(dev, "pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
+       NV_DEBUG(dev, "pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
+       NV_DEBUG(dev, "pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
+       NV_DEBUG(dev, "pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
+       NV_DEBUG(dev, "pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
+       NV_DEBUG(dev, "pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
+       NV_DEBUG(dev, "pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
+       if (pll_lim->vco2.maxfreq) {
+               NV_DEBUG(dev, "pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
+               NV_DEBUG(dev, "pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
+               NV_DEBUG(dev, "pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
+               NV_DEBUG(dev, "pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
+               NV_DEBUG(dev, "pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
+               NV_DEBUG(dev, "pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
+               NV_DEBUG(dev, "pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
+               NV_DEBUG(dev, "pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
+       }
+       if (!pll_lim->max_p) {
+               NV_DEBUG(dev, "pll.max_log2p: %d\n", pll_lim->max_log2p);
+               NV_DEBUG(dev, "pll.log2p_bias: %d\n", pll_lim->log2p_bias);
+       } else {
+               NV_DEBUG(dev, "pll.min_p: %d\n", pll_lim->min_p);
+               NV_DEBUG(dev, "pll.max_p: %d\n", pll_lim->max_p);
+       }
+       NV_DEBUG(dev, "pll.refclk: %d\n", pll_lim->refclk);
 
        return 0;
 }
@@ -4953,79 +5180,6 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
        return 0;
 }
 
-static int
-read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
-{
-       uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
-       int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
-       int recordoffset = 0, rdofs = 1, wrofs = 0;
-       uint8_t port_type = 0;
-
-       if (!i2ctable)
-               return -EINVAL;
-
-       if (dcb_version >= 0x30) {
-               if (i2ctable[0] != dcb_version) /* necessary? */
-                       NV_WARN(dev,
-                               "DCB I2C table version mismatch (%02X vs %02X)\n",
-                               i2ctable[0], dcb_version);
-               dcb_i2c_ver = i2ctable[0];
-               headerlen = i2ctable[1];
-               if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
-                       i2c_entries = i2ctable[2];
-               else
-                       NV_WARN(dev,
-                               "DCB I2C table has more entries than indexable "
-                               "(%d entries, max %d)\n", i2ctable[2],
-                               DCB_MAX_NUM_I2C_ENTRIES);
-               entry_len = i2ctable[3];
-               /* [4] is i2c_default_indices, read in parse_dcb_table() */
-       }
-       /*
-        * It's your own fault if you call this function on a DCB 1.1 BIOS --
-        * the test below is for DCB 1.2
-        */
-       if (dcb_version < 0x14) {
-               recordoffset = 2;
-               rdofs = 0;
-               wrofs = 1;
-       }
-
-       if (index == 0xf)
-               return 0;
-       if (index >= i2c_entries) {
-               NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
-                        index, i2ctable[2]);
-               return -ENOENT;
-       }
-       if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
-               NV_ERROR(dev, "DCB I2C entry invalid\n");
-               return -EINVAL;
-       }
-
-       if (dcb_i2c_ver >= 0x30) {
-               port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
-
-               /*
-                * Fixup for chips using same address offset for read and
-                * write.
-                */
-               if (port_type == 4)     /* seen on C51 */
-                       rdofs = wrofs = 1;
-               if (port_type >= 5)     /* G80+ */
-                       rdofs = wrofs = 0;
-       }
-
-       if (dcb_i2c_ver >= 0x40 && port_type != 5 && port_type != 6)
-               NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
-
-       i2c->port_type = port_type;
-       i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
-       i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
-
-       return 0;
-}
-
 static struct dcb_gpio_entry *
 new_gpio_entry(struct nvbios *bios)
 {
index c0d7b0a..adf4ec2 100644 (file)
@@ -35,6 +35,7 @@
 #define DCB_LOC_ON_CHIP 0
 
 struct dcb_i2c_entry {
+       uint32_t entry;
        uint8_t port_type;
        uint8_t read, write;
        struct nouveau_i2c_chan *chan;
index 957d176..6f3c195 100644 (file)
@@ -160,11 +160,11 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
        ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
                          ttm_bo_type_device, &nvbo->placement, align, 0,
                          false, NULL, size, nouveau_bo_del_ttm);
-       nvbo->channel = NULL;
        if (ret) {
                /* ttm will call nouveau_bo_del_ttm if it fails.. */
                return ret;
        }
+       nvbo->channel = NULL;
 
        spin_lock(&dev_priv->ttm.bo_list_lock);
        list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list);
@@ -225,7 +225,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
 
        nouveau_bo_placement_set(nvbo, memtype, 0);
 
-       ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
+       ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
        if (ret == 0) {
                switch (bo->mem.mem_type) {
                case TTM_PL_VRAM:
@@ -261,7 +261,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
 
        nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
 
-       ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
+       ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
        if (ret == 0) {
                switch (bo->mem.mem_type) {
                case TTM_PL_VRAM:
@@ -391,25 +391,16 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
                break;
        case TTM_PL_VRAM:
                man->flags = TTM_MEMTYPE_FLAG_FIXED |
-                            TTM_MEMTYPE_FLAG_MAPPABLE |
-                            TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
+                            TTM_MEMTYPE_FLAG_MAPPABLE;
                man->available_caching = TTM_PL_FLAG_UNCACHED |
                                         TTM_PL_FLAG_WC;
                man->default_caching = TTM_PL_FLAG_WC;
-
-               man->io_addr = NULL;
-               man->io_offset = drm_get_resource_start(dev, 1);
-               man->io_size = drm_get_resource_len(dev, 1);
-               if (man->io_size > dev_priv->vram_size)
-                       man->io_size = dev_priv->vram_size;
-
                man->gpu_offset = dev_priv->vm_vram_base;
                break;
        case TTM_PL_TT:
                switch (dev_priv->gart_info.type) {
                case NOUVEAU_GART_AGP:
-                       man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
-                                    TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
+                       man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
                        man->available_caching = TTM_PL_FLAG_UNCACHED;
                        man->default_caching = TTM_PL_FLAG_UNCACHED;
                        break;
@@ -424,10 +415,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
                                 dev_priv->gart_info.type);
                        return -EINVAL;
                }
-
-               man->io_offset  = dev_priv->gart_info.aper_base;
-               man->io_size    = dev_priv->gart_info.aper_size;
-               man->io_addr   = NULL;
                man->gpu_offset = dev_priv->vm_gart_base;
                break;
        default:
@@ -462,7 +449,8 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
 
 static int
 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
-                             struct nouveau_bo *nvbo, bool evict, bool no_wait,
+                             struct nouveau_bo *nvbo, bool evict,
+                             bool no_wait_reserve, bool no_wait_gpu,
                              struct ttm_mem_reg *new_mem)
 {
        struct nouveau_fence *fence = NULL;
@@ -473,7 +461,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
                return ret;
 
        ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
-                                       evict, no_wait, new_mem);
+                                       evict, no_wait_reserve, no_wait_gpu, new_mem);
        if (nvbo->channel && nvbo->channel != chan)
                ret = nouveau_fence_wait(fence, NULL, false, false);
        nouveau_fence_unref((void *)&fence);
@@ -497,7 +485,8 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
 
 static int
 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
-                    int no_wait, struct ttm_mem_reg *new_mem)
+                    bool no_wait_reserve, bool no_wait_gpu,
+                    struct ttm_mem_reg *new_mem)
 {
        struct nouveau_bo *nvbo = nouveau_bo(bo);
        struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
@@ -575,12 +564,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
                dst_offset += (PAGE_SIZE * line_count);
        }
 
-       return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem);
+       return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
 }
 
 static int
 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
-                     bool no_wait, struct ttm_mem_reg *new_mem)
+                     bool no_wait_reserve, bool no_wait_gpu,
+                     struct ttm_mem_reg *new_mem)
 {
        u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
        struct ttm_placement placement;
@@ -593,7 +583,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
 
        tmp_mem = *new_mem;
        tmp_mem.mm_node = NULL;
-       ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
+       ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
        if (ret)
                return ret;
 
@@ -601,11 +591,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
        if (ret)
                goto out;
 
-       ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem);
+       ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
        if (ret)
                goto out;
 
-       ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
+       ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
 out:
        if (tmp_mem.mm_node) {
                spin_lock(&bo->bdev->glob->lru_lock);
@@ -618,7 +608,8 @@ out:
 
 static int
 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
-                     bool no_wait, struct ttm_mem_reg *new_mem)
+                     bool no_wait_reserve, bool no_wait_gpu,
+                     struct ttm_mem_reg *new_mem)
 {
        u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
        struct ttm_placement placement;
@@ -631,15 +622,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
 
        tmp_mem = *new_mem;
        tmp_mem.mm_node = NULL;
-       ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
+       ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
        if (ret)
                return ret;
 
-       ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
+       ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
        if (ret)
                goto out;
 
-       ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
+       ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
        if (ret)
                goto out;
 
@@ -706,7 +697,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
 
 static int
 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
-               bool no_wait, struct ttm_mem_reg *new_mem)
+               bool no_wait_reserve, bool no_wait_gpu,
+               struct ttm_mem_reg *new_mem)
 {
        struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
        struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -721,7 +713,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
        /* Software copy if the card isn't up and running yet. */
        if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
            !dev_priv->channel) {
-               ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+               ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
                goto out;
        }
 
@@ -735,17 +727,17 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
 
        /* Hardware assisted copy. */
        if (new_mem->mem_type == TTM_PL_SYSTEM)
-               ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem);
+               ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
        else if (old_mem->mem_type == TTM_PL_SYSTEM)
-               ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem);
+               ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
        else
-               ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
+               ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
 
        if (!ret)
                goto out;
 
        /* Fallback to software copy. */
-       ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+       ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
 
 out:
        if (ret)
@@ -762,6 +754,55 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
        return 0;
 }
 
+static int
+nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+       struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+       struct drm_device *dev = dev_priv->dev;
+
+       mem->bus.addr = NULL;
+       mem->bus.offset = 0;
+       mem->bus.size = mem->num_pages << PAGE_SHIFT;
+       mem->bus.base = 0;
+       mem->bus.is_iomem = false;
+       if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+               return -EINVAL;
+       switch (mem->mem_type) {
+       case TTM_PL_SYSTEM:
+               /* System memory */
+               return 0;
+       case TTM_PL_TT:
+#if __OS_HAS_AGP
+               if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+                       mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+                       mem->bus.base = dev_priv->gart_info.aper_base;
+                       mem->bus.is_iomem = true;
+               }
+#endif
+               break;
+       case TTM_PL_VRAM:
+               mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+               mem->bus.base = drm_get_resource_start(dev, 1);
+               mem->bus.is_iomem = true;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void
+nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int
+nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+       return 0;
+}
+
 struct ttm_bo_driver nouveau_bo_driver = {
        .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
        .invalidate_caches = nouveau_bo_invalidate_caches,
@@ -774,5 +815,8 @@ struct ttm_bo_driver nouveau_bo_driver = {
        .sync_obj_flush = nouveau_fence_flush,
        .sync_obj_unref = nouveau_fence_unref,
        .sync_obj_ref = nouveau_fence_ref,
+       .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
+       .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
+       .io_mem_free = &nouveau_ttm_io_mem_free,
 };
 
index 14afe1e..7e663a7 100644 (file)
@@ -843,6 +843,7 @@ nouveau_connector_create(struct drm_device *dev,
 
        switch (dcb->type) {
        case DCB_CONNECTOR_VGA:
+               connector->polled = DRM_CONNECTOR_POLL_CONNECT;
                if (dev_priv->card_type >= NV_50) {
                        drm_connector_attach_property(connector,
                                        dev->mode_config.scaling_mode_property,
@@ -854,6 +855,17 @@ nouveau_connector_create(struct drm_device *dev,
        case DCB_CONNECTOR_TV_3:
                nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
                break;
+       case DCB_CONNECTOR_DP:
+       case DCB_CONNECTOR_eDP:
+       case DCB_CONNECTOR_HDMI_0:
+       case DCB_CONNECTOR_HDMI_1:
+       case DCB_CONNECTOR_DVI_I:
+       case DCB_CONNECTOR_DVI_D:
+               if (dev_priv->card_type >= NV_50)
+                       connector->polled = DRM_CONNECTOR_POLL_HPD;
+               else
+                       connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+               /* fall-through */
        default:
                nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
 
index a251886..7933de4 100644 (file)
@@ -33,6 +33,8 @@
 #include "drmP.h"
 #include "nouveau_drv.h"
 
+#include <ttm/ttm_page_alloc.h>
+
 static int
 nouveau_debugfs_channel_info(struct seq_file *m, void *data)
 {
@@ -159,6 +161,7 @@ static struct drm_info_list nouveau_debugfs_list[] = {
        { "chipset", nouveau_debugfs_chipset_info, 0, NULL },
        { "memory", nouveau_debugfs_memory_info, 0, NULL },
        { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
+       { "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
 };
 #define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
 
index cf1c5c0..74e6b4e 100644 (file)
@@ -34,10 +34,6 @@ static void
 nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
 {
        struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
-       struct drm_device *dev = drm_fb->dev;
-
-       if (drm_fb->fbdev)
-               nouveau_fbcon_remove(dev, drm_fb);
 
        if (fb->nvbo)
                drm_gem_object_unreference_unlocked(fb->nvbo->gem);
@@ -61,27 +57,20 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
        .create_handle = nouveau_user_framebuffer_create_handle,
 };
 
-struct drm_framebuffer *
-nouveau_framebuffer_create(struct drm_device *dev, struct nouveau_bo *nvbo,
-                          struct drm_mode_fb_cmd *mode_cmd)
+int
+nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
+                        struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo)
 {
-       struct nouveau_framebuffer *fb;
        int ret;
 
-       fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
-       if (!fb)
-               return NULL;
-
-       ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
+       ret = drm_framebuffer_init(dev, &nouveau_fb->base, &nouveau_framebuffer_funcs);
        if (ret) {
-               kfree(fb);
-               return NULL;
+               return ret;
        }
 
-       drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
-
-       fb->nvbo = nvbo;
-       return &fb->base;
+       drm_helper_mode_fill_fb_struct(&nouveau_fb->base, mode_cmd);
+       nouveau_fb->nvbo = nvbo;
+       return 0;
 }
 
 static struct drm_framebuffer *
@@ -89,24 +78,29 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
                                struct drm_file *file_priv,
                                struct drm_mode_fb_cmd *mode_cmd)
 {
-       struct drm_framebuffer *fb;
+       struct nouveau_framebuffer *nouveau_fb;
        struct drm_gem_object *gem;
+       int ret;
 
        gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
        if (!gem)
                return NULL;
 
-       fb = nouveau_framebuffer_create(dev, nouveau_gem_object(gem), mode_cmd);
-       if (!fb) {
+       nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
+       if (!nouveau_fb)
+               return NULL;
+
+       ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem));
+       if (ret) {
                drm_gem_object_unreference(gem);
                return NULL;
        }
 
-       return fb;
+       return &nouveau_fb->base;
 }
 
 const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
        .fb_create = nouveau_user_framebuffer_create,
-       .fb_changed = nouveau_fbcon_probe,
+       .output_poll_changed = nouveau_fbcon_output_poll_changed,
 };
 
index 1de974a..c6079e3 100644 (file)
@@ -153,7 +153,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
        struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
        struct nouveau_channel *chan;
        struct drm_crtc *crtc;
-       uint32_t fbdev_flags;
        int ret, i;
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -163,8 +162,7 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
                return 0;
 
        NV_INFO(dev, "Disabling fbcon acceleration...\n");
-       fbdev_flags = dev_priv->fbdev_info->flags;
-       dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
+       nouveau_fbcon_save_disable_accel(dev);
 
        NV_INFO(dev, "Unpinning framebuffer(s)...\n");
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -230,9 +228,9 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
        }
 
        acquire_console_sem();
-       fb_set_suspend(dev_priv->fbdev_info, 1);
+       nouveau_fbcon_set_suspend(dev, 1);
        release_console_sem();
-       dev_priv->fbdev_info->flags = fbdev_flags;
+       nouveau_fbcon_restore_accel(dev);
        return 0;
 
 out_abort:
@@ -250,14 +248,12 @@ nouveau_pci_resume(struct pci_dev *pdev)
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_engine *engine = &dev_priv->engine;
        struct drm_crtc *crtc;
-       uint32_t fbdev_flags;
        int ret, i;
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -ENODEV;
 
-       fbdev_flags = dev_priv->fbdev_info->flags;
-       dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
+       nouveau_fbcon_save_disable_accel(dev);
 
        NV_INFO(dev, "We're back, enabling device...\n");
        pci_set_power_state(pdev, PCI_D0);
@@ -332,13 +328,14 @@ nouveau_pci_resume(struct pci_dev *pdev)
        }
 
        acquire_console_sem();
-       fb_set_suspend(dev_priv->fbdev_info, 0);
+       nouveau_fbcon_set_suspend(dev, 0);
        release_console_sem();
 
-       nouveau_fbcon_zfill(dev);
+       nouveau_fbcon_zfill_all(dev);
 
        drm_helper_resume_force_mode(dev);
-       dev_priv->fbdev_info->flags = fbdev_flags;
+
+       nouveau_fbcon_restore_accel(dev);
        return 0;
 }
 
index ace630a..5b13443 100644 (file)
@@ -535,6 +535,7 @@ struct drm_nouveau_private {
 
        struct fb_info *fbdev_info;
 
+       int fifo_alloc_count;
        struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
 
        struct nouveau_engine engine;
@@ -621,6 +622,9 @@ struct drm_nouveau_private {
        struct {
                struct dentry *channel_root;
        } debugfs;
+
+       struct nouveau_fbdev *nfbdev;
+       struct apertures_struct *apertures;
 };
 
 static inline struct drm_nouveau_private *
@@ -1166,6 +1170,12 @@ int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
 int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
 int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
 
+/* nv50_calc. */
+int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
+                 int *N1, int *M1, int *N2, int *M2, int *P);
+int nv50_calc_pll2(struct drm_device *, struct pll_lims *,
+                  int clk, int *N, int *fN, int *M, int *P);
+
 #ifndef ioread32_native
 #ifdef __BIG_ENDIAN
 #define ioread16_native ioread16be
index 9f28b94..e1df820 100644 (file)
@@ -48,6 +48,8 @@ struct nouveau_encoder {
        union {
                struct {
                        int mc_unknown;
+                       uint32_t unk0;
+                       uint32_t unk1;
                        int dpcd_version;
                        int link_nr;
                        int link_bw;
index 4a3f31a..d432134 100644 (file)
@@ -40,8 +40,6 @@ nouveau_framebuffer(struct drm_framebuffer *fb)
 
 extern const struct drm_mode_config_funcs nouveau_mode_config_funcs;
 
-struct drm_framebuffer *
-nouveau_framebuffer_create(struct drm_device *, struct nouveau_bo *,
-                          struct drm_mode_fb_cmd *);
-
+int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
+                            struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo);
 #endif /* __NOUVEAU_FB_H__ */
index 8e7dc1d..fd4a2df 100644 (file)
@@ -52,8 +52,8 @@
 static int
 nouveau_fbcon_sync(struct fb_info *info)
 {
-       struct nouveau_fbcon_par *par = info->par;
-       struct drm_device *dev = par->dev;
+       struct nouveau_fbdev *nfbdev = info->par;
+       struct drm_device *dev = nfbdev->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_channel *chan = dev_priv->channel;
        int ret, i;
@@ -97,7 +97,6 @@ static struct fb_ops nouveau_fbcon_ops = {
        .owner = THIS_MODULE,
        .fb_check_var = drm_fb_helper_check_var,
        .fb_set_par = drm_fb_helper_set_par,
-       .fb_setcolreg = drm_fb_helper_setcolreg,
        .fb_fillrect = cfb_fillrect,
        .fb_copyarea = cfb_copyarea,
        .fb_imageblit = cfb_imageblit,
@@ -111,7 +110,6 @@ static struct fb_ops nv04_fbcon_ops = {
        .owner = THIS_MODULE,
        .fb_check_var = drm_fb_helper_check_var,
        .fb_set_par = drm_fb_helper_set_par,
-       .fb_setcolreg = drm_fb_helper_setcolreg,
        .fb_fillrect = nv04_fbcon_fillrect,
        .fb_copyarea = nv04_fbcon_copyarea,
        .fb_imageblit = nv04_fbcon_imageblit,
@@ -125,7 +123,6 @@ static struct fb_ops nv50_fbcon_ops = {
        .owner = THIS_MODULE,
        .fb_check_var = drm_fb_helper_check_var,
        .fb_set_par = drm_fb_helper_set_par,
-       .fb_setcolreg = drm_fb_helper_setcolreg,
        .fb_fillrect = nv50_fbcon_fillrect,
        .fb_copyarea = nv50_fbcon_copyarea,
        .fb_imageblit = nv50_fbcon_imageblit,
@@ -155,54 +152,10 @@ static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
        *blue = nv_crtc->lut.b[regno];
 }
 
-static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
-       .gamma_set = nouveau_fbcon_gamma_set,
-       .gamma_get = nouveau_fbcon_gamma_get
-};
-
-#if defined(__i386__) || defined(__x86_64__)
-static bool
-nouveau_fbcon_has_vesafb_or_efifb(struct drm_device *dev)
-{
-       struct pci_dev *pdev = dev->pdev;
-       int ramin;
-
-       if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB &&
-           screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
-               return false;
-
-       if (screen_info.lfb_base < pci_resource_start(pdev, 1))
-               goto not_fb;
-
-       if (screen_info.lfb_base + screen_info.lfb_size >=
-           pci_resource_start(pdev, 1) + pci_resource_len(pdev, 1))
-               goto not_fb;
-
-       return true;
-not_fb:
-       ramin = 2;
-       if (pci_resource_len(pdev, ramin) == 0) {
-               ramin = 3;
-               if (pci_resource_len(pdev, ramin) == 0)
-                       return false;
-       }
-
-       if (screen_info.lfb_base < pci_resource_start(pdev, ramin))
-               return false;
-
-       if (screen_info.lfb_base + screen_info.lfb_size >=
-           pci_resource_start(pdev, ramin) + pci_resource_len(pdev, ramin))
-               return false;
-
-       return true;
-}
-#endif
-
-void
-nouveau_fbcon_zfill(struct drm_device *dev)
+static void
+nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
 {
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct fb_info *info = dev_priv->fbdev_info;
+       struct fb_info *info = nfbdev->helper.fbdev;
        struct fb_fillrect rect;
 
        /* Clear the entire fbcon.  The drm will program every connector
@@ -218,28 +171,27 @@ nouveau_fbcon_zfill(struct drm_device *dev)
 }
 
 static int
-nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
-                    uint32_t fb_height, uint32_t surface_width,
-                    uint32_t surface_height, uint32_t surface_depth,
-                    uint32_t surface_bpp, struct drm_framebuffer **pfb)
+nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
+                    struct drm_fb_helper_surface_size *sizes)
 {
+       struct drm_device *dev = nfbdev->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct fb_info *info;
-       struct nouveau_fbcon_par *par;
        struct drm_framebuffer *fb;
        struct nouveau_framebuffer *nouveau_fb;
        struct nouveau_bo *nvbo;
        struct drm_mode_fb_cmd mode_cmd;
-       struct device *device = &dev->pdev->dev;
+       struct pci_dev *pdev = dev->pdev;
+       struct device *device = &pdev->dev;
        int size, ret;
 
-       mode_cmd.width = surface_width;
-       mode_cmd.height = surface_height;
+       mode_cmd.width = sizes->surface_width;
+       mode_cmd.height = sizes->surface_height;
 
-       mode_cmd.bpp = surface_bpp;
+       mode_cmd.bpp = sizes->surface_bpp;
        mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
        mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
-       mode_cmd.depth = surface_depth;
+       mode_cmd.depth = sizes->surface_depth;
 
        size = mode_cmd.pitch * mode_cmd.height;
        size = roundup(size, PAGE_SIZE);
@@ -268,31 +220,28 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
 
        mutex_lock(&dev->struct_mutex);
 
-       fb = nouveau_framebuffer_create(dev, nvbo, &mode_cmd);
-       if (!fb) {
+       info = framebuffer_alloc(0, device);
+       if (!info) {
                ret = -ENOMEM;
-               NV_ERROR(dev, "failed to allocate fb.\n");
                goto out_unref;
        }
 
-       list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
-
-       nouveau_fb = nouveau_framebuffer(fb);
-       *pfb = fb;
-
-       info = framebuffer_alloc(sizeof(struct nouveau_fbcon_par), device);
-       if (!info) {
+       ret = fb_alloc_cmap(&info->cmap, 256, 0);
+       if (ret) {
                ret = -ENOMEM;
                goto out_unref;
        }
 
-       par = info->par;
-       par->helper.funcs = &nouveau_fbcon_helper_funcs;
-       par->helper.dev = dev;
-       ret = drm_fb_helper_init_crtc_count(&par->helper, 2, 4);
-       if (ret)
-               goto out_unref;
-       dev_priv->fbdev_info = info;
+       info->par = nfbdev;
+
+       nouveau_framebuffer_init(dev, &nfbdev->nouveau_fb, &mode_cmd, nvbo);
+
+       nouveau_fb = &nfbdev->nouveau_fb;
+       fb = &nouveau_fb->base;
+
+       /* setup helper */
+       nfbdev->helper.fb = fb;
+       nfbdev->helper.fbdev = info;
 
        strcpy(info->fix.id, "nouveaufb");
        if (nouveau_nofbaccel)
@@ -310,31 +259,17 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
        info->screen_size = size;
 
        drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
-       drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
+       drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
 
        /* FIXME: we really shouldn't expose mmio space at all */
-       info->fix.mmio_start = pci_resource_start(dev->pdev, 1);
-       info->fix.mmio_len = pci_resource_len(dev->pdev, 1);
+       info->fix.mmio_start = pci_resource_start(pdev, 1);
+       info->fix.mmio_len = pci_resource_len(pdev, 1);
 
        /* Set aperture base/size for vesafb takeover */
-#if defined(__i386__) || defined(__x86_64__)
-       if (nouveau_fbcon_has_vesafb_or_efifb(dev)) {
-               /* Some NVIDIA VBIOS' are stupid and decide to put the
-                * framebuffer in the middle of the PRAMIN BAR for
-                * whatever reason.  We need to know the exact lfb_base
-                * to get vesafb kicked off, and the only reliable way
-                * we have left is to find out lfb_base the same way
-                * vesafb did.
-                */
-               info->aperture_base = screen_info.lfb_base;
-               info->aperture_size = screen_info.lfb_size;
-               if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB)
-                       info->aperture_size *= 65536;
-       } else
-#endif
-       {
-               info->aperture_base = info->fix.mmio_start;
-               info->aperture_size = info->fix.mmio_len;
+       info->apertures = dev_priv->apertures;
+       if (!info->apertures) {
+               ret = -ENOMEM;
+               goto out_unref;
        }
 
        info->pixmap.size = 64*1024;
@@ -343,11 +278,6 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
        info->pixmap.flags = FB_PIXMAP_SYSTEM;
        info->pixmap.scan_align = 1;
 
-       fb->fbdev = info;
-
-       par->nouveau_fb = nouveau_fb;
-       par->dev = dev;
-
        if (dev_priv->channel && !nouveau_nofbaccel) {
                switch (dev_priv->card_type) {
                case NV_50:
@@ -361,7 +291,7 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
                };
        }
 
-       nouveau_fbcon_zfill(dev);
+       nouveau_fbcon_zfill(dev, nfbdev);
 
        /* To allow resizeing without swapping buffers */
        NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n",
@@ -379,44 +309,123 @@ out:
        return ret;
 }
 
-int
-nouveau_fbcon_probe(struct drm_device *dev)
+static int
+nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
+                                   struct drm_fb_helper_surface_size *sizes)
 {
-       NV_DEBUG_KMS(dev, "\n");
+       struct nouveau_fbdev *nfbdev = (struct nouveau_fbdev *)helper;
+       int new_fb = 0;
+       int ret;
+
+       if (!helper->fb) {
+               ret = nouveau_fbcon_create(nfbdev, sizes);
+               if (ret)
+                       return ret;
+               new_fb = 1;
+       }
+       return new_fb;
+}
 
-       return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create);
+void
+nouveau_fbcon_output_poll_changed(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       drm_fb_helper_hotplug_event(&dev_priv->nfbdev->helper);
 }
 
 int
-nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
 {
-       struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fb);
+       struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb;
        struct fb_info *info;
 
-       if (!fb)
-               return -EINVAL;
-
-       info = fb->fbdev;
-       if (info) {
-               struct nouveau_fbcon_par *par = info->par;
-
+       if (nfbdev->helper.fbdev) {
+               info = nfbdev->helper.fbdev;
                unregister_framebuffer(info);
+               if (info->cmap.len)
+                       fb_dealloc_cmap(&info->cmap);
+               framebuffer_release(info);
+       }
+
+       if (nouveau_fb->nvbo) {
                nouveau_bo_unmap(nouveau_fb->nvbo);
                drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
                nouveau_fb->nvbo = NULL;
-               if (par)
-                       drm_fb_helper_free(&par->helper);
-               framebuffer_release(info);
        }
-
+       drm_fb_helper_fini(&nfbdev->helper);
+       drm_framebuffer_cleanup(&nouveau_fb->base);
        return 0;
 }
 
 void nouveau_fbcon_gpu_lockup(struct fb_info *info)
 {
-       struct nouveau_fbcon_par *par = info->par;
-       struct drm_device *dev = par->dev;
+       struct nouveau_fbdev *nfbdev = info->par;
+       struct drm_device *dev = nfbdev->dev;
 
        NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
        info->flags |= FBINFO_HWACCEL_DISABLED;
 }
+
+static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
+       .gamma_set = nouveau_fbcon_gamma_set,
+       .gamma_get = nouveau_fbcon_gamma_get,
+       .fb_probe = nouveau_fbcon_find_or_create_single,
+};
+
+
+int nouveau_fbcon_init(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_fbdev *nfbdev;
+
+       nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
+       if (!nfbdev)
+               return -ENOMEM;
+
+       nfbdev->dev = dev;
+       dev_priv->nfbdev = nfbdev;
+       nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
+
+       drm_fb_helper_init(dev, &nfbdev->helper, 2, 4);
+       drm_fb_helper_single_add_all_connectors(&nfbdev->helper);
+       drm_fb_helper_initial_config(&nfbdev->helper, 32);
+       return 0;
+}
+
+void nouveau_fbcon_fini(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+       if (!dev_priv->nfbdev)
+               return;
+
+       nouveau_fbcon_destroy(dev, dev_priv->nfbdev);
+       kfree(dev_priv->nfbdev);
+       dev_priv->nfbdev = NULL;
+}
+
+void nouveau_fbcon_save_disable_accel(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+       dev_priv->nfbdev->saved_flags = dev_priv->nfbdev->helper.fbdev->flags;
+       dev_priv->nfbdev->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+}
+
+void nouveau_fbcon_restore_accel(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       dev_priv->nfbdev->helper.fbdev->flags = dev_priv->nfbdev->saved_flags;
+}
+
+void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state);
+}
+
+void nouveau_fbcon_zfill_all(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       nouveau_fbcon_zfill(dev, dev_priv->nfbdev);
+}
index f9c34e1..e7e1268 100644 (file)
 
 #include "drm_fb_helper.h"
 
-struct nouveau_fbcon_par {
+#include "nouveau_fb.h"
+struct nouveau_fbdev {
        struct drm_fb_helper helper;
+       struct nouveau_framebuffer nouveau_fb;
+       struct list_head fbdev_list;
        struct drm_device *dev;
-       struct nouveau_framebuffer *nouveau_fb;
+       unsigned int saved_flags;
 };
 
-int nouveau_fbcon_probe(struct drm_device *dev);
-int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
 void nouveau_fbcon_restore(void);
-void nouveau_fbcon_zfill(struct drm_device *dev);
 
 void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
 void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
@@ -50,5 +50,14 @@ void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
 int nv50_fbcon_accel_init(struct fb_info *info);
 
 void nouveau_fbcon_gpu_lockup(struct fb_info *info);
+
+int nouveau_fbcon_init(struct drm_device *dev);
+void nouveau_fbcon_fini(struct drm_device *dev);
+void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
+void nouveau_fbcon_zfill_all(struct drm_device *dev);
+void nouveau_fbcon_save_disable_accel(struct drm_device *dev);
+void nouveau_fbcon_restore_accel(struct drm_device *dev);
+
+void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
 #endif /* __NV50_FBCON_H__ */
 
index 1bc0b38..69c76cf 100644 (file)
@@ -57,6 +57,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
        }
 
        ttm_bo_unref(&bo);
+
+       drm_gem_object_release(gem);
+       kfree(gem);
 }
 
 int
@@ -382,7 +385,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
 
                nvbo->channel = chan;
                ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
-                                     false, false);
+                                     false, false, false);
                nvbo->channel = NULL;
                if (unlikely(ret)) {
                        NV_ERROR(dev, "fail ttm_validate\n");
index 32f0e49..f731c5f 100644 (file)
@@ -68,13 +68,12 @@ nouveau_grctx_prog_load(struct drm_device *dev)
                        return ret;
                }
 
-               pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL);
+               pgraph->ctxprog = kmemdup(fw->data, fw->size, GFP_KERNEL);
                if (!pgraph->ctxprog) {
                        NV_ERROR(dev, "OOM copying ctxprog\n");
                        release_firmware(fw);
                        return -ENOMEM;
                }
-               memcpy(pgraph->ctxprog, fw->data, fw->size);
 
                cp = pgraph->ctxprog;
                if (le32_to_cpu(cp->signature) != 0x5043564e ||
@@ -97,14 +96,13 @@ nouveau_grctx_prog_load(struct drm_device *dev)
                        return ret;
                }
 
-               pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
+               pgraph->ctxvals = kmemdup(fw->data, fw->size, GFP_KERNEL);
                if (!pgraph->ctxvals) {
                        NV_ERROR(dev, "OOM copying ctxvals\n");
                        release_firmware(fw);
                        nouveau_grctx_fini(dev);
                        return -ENOMEM;
                }
-               memcpy(pgraph->ctxvals, fw->data, fw->size);
 
                cv = (void *)pgraph->ctxvals;
                if (le32_to_cpu(cv->signature) != 0x5643564e ||
index 88583e7..316a3c7 100644 (file)
@@ -254,16 +254,27 @@ struct nouveau_i2c_chan *
 nouveau_i2c_find(struct drm_device *dev, int index)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nvbios *bios = &dev_priv->vbios;
+       struct dcb_i2c_entry *i2c = &dev_priv->vbios.dcb.i2c[index];
 
        if (index >= DCB_MAX_NUM_I2C_ENTRIES)
                return NULL;
 
-       if (!bios->dcb.i2c[index].chan) {
-               if (nouveau_i2c_init(dev, &bios->dcb.i2c[index], index))
-                       return NULL;
+       if (dev_priv->chipset >= NV_50 && (i2c->entry & 0x00000100)) {
+               uint32_t reg = 0xe500, val;
+
+               if (i2c->port_type == 6) {
+                       reg += i2c->read * 0x50;
+                       val  = 0x2002;
+               } else {
+                       reg += ((i2c->entry & 0x1e00) >> 9) * 0x50;
+                       val  = 0xe001;
+               }
+
+               nv_wr32(dev, reg, (nv_rd32(dev, reg) & ~0xf003) | val);
        }
 
-       return bios->dcb.i2c[index].chan;
+       if (!i2c->chan && nouveau_i2c_init(dev, i2c, index))
+               return NULL;
+       return i2c->chan;
 }
 
index 13e73ce..53360f1 100644 (file)
@@ -1204,7 +1204,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
 {
        struct drm_device *dev = (struct drm_device *)arg;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       uint32_t status, fbdev_flags = 0;
+       uint32_t status;
        unsigned long flags;
 
        status = nv_rd32(dev, NV03_PMC_INTR_0);
@@ -1213,11 +1213,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
 
        spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
 
-       if (dev_priv->fbdev_info) {
-               fbdev_flags = dev_priv->fbdev_info->flags;
-               dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
-       }
-
        if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
                nouveau_fifo_irq_handler(dev);
                status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
@@ -1247,9 +1242,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
        if (status)
                NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
 
-       if (dev_priv->fbdev_info)
-               dev_priv->fbdev_info->flags = fbdev_flags;
-
        spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 
        return IRQ_HANDLED;
index aa9b310..6ca80a3 100644 (file)
 #define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2                          0x02000000
 #define NV50_SOR_DP_UNK118(i,l)          (0x0061c118 + (i) * 0x800 + (l) * 0x80)
 #define NV50_SOR_DP_UNK120(i,l)          (0x0061c120 + (i) * 0x800 + (l) * 0x80)
+#define NV50_SOR_DP_UNK128(i,l)          (0x0061c128 + (i) * 0x800 + (l) * 0x80)
 #define NV50_SOR_DP_UNK130(i,l)          (0x0061c130 + (i) * 0x800 + (l) * 0x80)
 
 #define NV50_PDISPLAY_USER(i)                        ((i) * 0x1000 + 0x00640000)
index e171064..e632339 100644 (file)
@@ -34,6 +34,7 @@
 
 #include "nouveau_drv.h"
 #include "nouveau_drm.h"
+#include "nouveau_fbcon.h"
 #include "nv50_display.h"
 
 static void nouveau_stub_takedown(struct drm_device *dev) {}
@@ -515,8 +516,10 @@ nouveau_card_init(struct drm_device *dev)
 
        dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               drm_helper_initial_config(dev);
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               nouveau_fbcon_init(dev);
+               drm_kms_helper_poll_init(dev);
+       }
 
        return 0;
 
@@ -563,6 +566,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
        NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
 
        if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
+
                nouveau_backlight_exit(dev);
 
                if (dev_priv->channel) {
@@ -637,6 +641,48 @@ static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev)
 #endif
 }
 
+static struct apertures_struct *nouveau_get_apertures(struct drm_device *dev)
+{
+       struct pci_dev *pdev = dev->pdev;
+       struct apertures_struct *aper = alloc_apertures(3);
+       if (!aper)
+               return NULL;
+
+       aper->ranges[0].base = pci_resource_start(pdev, 1);
+       aper->ranges[0].size = pci_resource_len(pdev, 1);
+       aper->count = 1;
+
+       if (pci_resource_len(pdev, 2)) {
+               aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
+               aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
+               aper->count++;
+       }
+
+       if (pci_resource_len(pdev, 3)) {
+               aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
+               aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
+               aper->count++;
+       }
+
+       return aper;
+}
+
+static int nouveau_remove_conflicting_drivers(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       bool primary = false;
+       dev_priv->apertures = nouveau_get_apertures(dev);
+       if (!dev_priv->apertures)
+               return -ENOMEM;
+
+#ifdef CONFIG_X86
+       primary = dev->pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+       
+       remove_conflicting_framebuffers(dev_priv->apertures, "nouveaufb", primary);
+       return 0;
+}
+
 int nouveau_load(struct drm_device *dev, unsigned long flags)
 {
        struct drm_nouveau_private *dev_priv;
@@ -724,6 +770,12 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
        NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
                dev_priv->card_type, reg0);
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               int ret = nouveau_remove_conflicting_drivers(dev);
+               if (ret)
+                       return ret;
+       }
+
        /* map larger RAMIN aperture on NV40 cards */
        dev_priv->ramin  = NULL;
        if (dev_priv->card_type >= NV_40) {
@@ -794,6 +846,8 @@ int nouveau_unload(struct drm_device *dev)
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               drm_kms_helper_poll_fini(dev);
+               nouveau_fbcon_fini(dev);
                if (dev_priv->card_type >= NV_50)
                        nv50_display_destroy(dev);
                else
index 813b25c..1eeac4f 100644 (file)
@@ -30,8 +30,8 @@
 void
 nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
 {
-       struct nouveau_fbcon_par *par = info->par;
-       struct drm_device *dev = par->dev;
+       struct nouveau_fbdev *nfbdev = info->par;
+       struct drm_device *dev = nfbdev->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_channel *chan = dev_priv->channel;
 
@@ -57,8 +57,8 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
 void
 nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 {
-       struct nouveau_fbcon_par *par = info->par;
-       struct drm_device *dev = par->dev;
+       struct nouveau_fbdev *nfbdev = info->par;
+       struct drm_device *dev = nfbdev->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_channel *chan = dev_priv->channel;
 
@@ -91,8 +91,8 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 void
 nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 {
-       struct nouveau_fbcon_par *par = info->par;
-       struct drm_device *dev = par->dev;
+       struct nouveau_fbdev *nfbdev = info->par;
+       struct drm_device *dev = nfbdev->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_channel *chan = dev_priv->channel;
        uint32_t fg;
@@ -179,8 +179,8 @@ nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
 int
 nv04_fbcon_accel_init(struct fb_info *info)
 {
-       struct nouveau_fbcon_par *par = info->par;
-       struct drm_device *dev = par->dev;
+       struct nouveau_fbdev *nfbdev = info->par;
+       struct drm_device *dev = nfbdev->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_channel *chan = dev_priv->channel;
        const int sub = NvSubCtxSurf2D;
@@ -236,7 +236,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
        if (ret)
                return ret;
 
-       ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
+       ret = nv04_fbcon_grobj_new(dev, dev_priv->chipset >= 0x11 ?
                                   0x009f : 0x005f, NvImageBlit);
        if (ret)
                return ret;
index e260986..618355e 100644 (file)
@@ -532,9 +532,82 @@ nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
        return 0;
 }
 
-static int
-nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
-                             int mthd, uint32_t data)
+/*
+ * Software methods, why they are needed, and how they all work:
+ *
+ * NV04 and NV05 keep most of the state in PGRAPH context itself, but some
+ * 2d engine settings are kept inside the grobjs themselves. The grobjs are
+ * 3 words long on both. grobj format on NV04 is:
+ *
+ * word 0:
+ *  - bits 0-7: class
+ *  - bit 12: color key active
+ *  - bit 13: clip rect active
+ *  - bit 14: if set, destination surface is swizzled and taken from buffer 5
+ *            [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
+ *            from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
+ *            NV03_CONTEXT_SURFACE_DST].
+ *  - bits 15-17: 2d operation [aka patch config]
+ *  - bit 24: patch valid [enables rendering using this object]
+ *  - bit 25: surf3d valid [for tex_tri and multitex_tri only]
+ * word 1:
+ *  - bits 0-1: mono format
+ *  - bits 8-13: color format
+ *  - bits 16-31: DMA_NOTIFY instance
+ * word 2:
+ *  - bits 0-15: DMA_A instance
+ *  - bits 16-31: DMA_B instance
+ *
+ * On NV05 it's:
+ *
+ * word 0:
+ *  - bits 0-7: class
+ *  - bit 12: color key active
+ *  - bit 13: clip rect active
+ *  - bit 14: if set, destination surface is swizzled and taken from buffer 5
+ *            [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
+ *            from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
+ *            NV03_CONTEXT_SURFACE_DST].
+ *  - bits 15-17: 2d operation [aka patch config]
+ *  - bits 20-22: dither mode
+ *  - bit 24: patch valid [enables rendering using this object]
+ *  - bit 25: surface_dst/surface_color/surf2d/surf3d valid
+ *  - bit 26: surface_src/surface_zeta valid
+ *  - bit 27: pattern valid
+ *  - bit 28: rop valid
+ *  - bit 29: beta1 valid
+ *  - bit 30: beta4 valid
+ * word 1:
+ *  - bits 0-1: mono format
+ *  - bits 8-13: color format
+ *  - bits 16-31: DMA_NOTIFY instance
+ * word 2:
+ *  - bits 0-15: DMA_A instance
+ *  - bits 16-31: DMA_B instance
+ *
+ * NV05 will set/unset the relevant valid bits when you poke the relevant
+ * object-binding methods with object of the proper type, or with the NULL
+ * type. It'll only allow rendering using the grobj if all needed objects
+ * are bound. The needed set of objects depends on selected operation: for
+ * example rop object is needed by ROP_AND, but not by SRCCOPY_AND.
+ *
+ * NV04 doesn't have these methods implemented at all, and doesn't have the
+ * relevant bits in grobj. Instead, it'll allow rendering whenever bit 24
+ * is set. So we have to emulate them in software, internally keeping the
+ * same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04,
+ * but the last word isn't actually used for anything, we abuse it for this
+ * purpose.
+ *
+ * Actually, NV05 can optionally check bit 24 too, but we disable this since
+ * there's no use for it.
+ *
+ * For unknown reasons, NV04 implements surf3d binding in hardware as an
+ * exception. Also for unknown reasons, NV04 doesn't implement the clipping
+ * methods on the surf3d object, so we have to emulate them too.
+ */
+
+static void
+nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
 {
        struct drm_device *dev = chan->dev;
        uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
@@ -542,42 +615,509 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
        uint32_t tmp;
 
        tmp  = nv_ri32(dev, instance);
-       tmp &= ~0x00038000;
-       tmp |= ((data & 7) << 15);
+       tmp &= ~mask;
+       tmp |= value;
 
        nv_wi32(dev, instance, tmp);
        nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
        nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
+}
+
+static void
+nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
+{
+       struct drm_device *dev = chan->dev;
+       uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
+       uint32_t tmp, ctx1;
+       int class, op, valid = 1;
+
+       ctx1 = nv_ri32(dev, instance);
+       class = ctx1 & 0xff;
+       op = (ctx1 >> 15) & 7;
+       tmp  = nv_ri32(dev, instance + 0xc);
+       tmp &= ~mask;
+       tmp |= value;
+       nv_wi32(dev, instance + 0xc, tmp);
+
+       /* check for valid surf2d/surf_dst/surf_color */
+       if (!(tmp & 0x02000000))
+               valid = 0;
+       /* check for valid surf_src/surf_zeta */
+       if ((class == 0x1f || class == 0x48) && !(tmp & 0x04000000))
+               valid = 0;
+
+       switch (op) {
+       /* SRCCOPY_AND, SRCCOPY: no extra objects required */
+       case 0:
+       case 3:
+               break;
+       /* ROP_AND: requires pattern and rop */
+       case 1:
+               if (!(tmp & 0x18000000))
+                       valid = 0;
+               break;
+       /* BLEND_AND: requires beta1 */
+       case 2:
+               if (!(tmp & 0x20000000))
+                       valid = 0;
+               break;
+       /* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */
+       case 4:
+       case 5:
+               if (!(tmp & 0x40000000))
+                       valid = 0;
+               break;
+       }
+
+       nv04_graph_set_ctx1(chan, 0x01000000, valid << 24);
+}
+
+static int
+nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
+                             int mthd, uint32_t data)
+{
+       if (data > 5)
+               return 1;
+       /* Old versions of the objects only accept first three operations. */
+       if (data > 2 && grclass < 0x40)
+               return 1;
+       nv04_graph_set_ctx1(chan, 0x00038000, data << 15);
+       /* changing operation changes set of objects needed for validation */
+       nv04_graph_set_ctx_val(chan, 0, 0);
+       return 0;
+}
+
+static int
+nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass,
+                             int mthd, uint32_t data)
+{
+       uint32_t min = data & 0xffff, max;
+       uint32_t w = data >> 16;
+       if (min & 0x8000)
+               /* too large */
+               return 1;
+       if (w & 0x8000)
+               /* yes, it accepts negative for some reason. */
+               w |= 0xffff0000;
+       max = min + w;
+       max &= 0x3ffff;
+       nv_wr32(chan->dev, 0x40053c, min);
+       nv_wr32(chan->dev, 0x400544, max);
+       return 0;
+}
+
+static int
+nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass,
+                             int mthd, uint32_t data)
+{
+       uint32_t min = data & 0xffff, max;
+       uint32_t w = data >> 16;
+       if (min & 0x8000)
+               /* too large */
+               return 1;
+       if (w & 0x8000)
+               /* yes, it accepts negative for some reason. */
+               w |= 0xffff0000;
+       max = min + w;
+       max &= 0x3ffff;
+       nv_wr32(chan->dev, 0x400540, min);
+       nv_wr32(chan->dev, 0x400548, max);
        return 0;
 }
 
+static int
+nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass,
+                             int mthd, uint32_t data)
+{
+       switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+       case 0x30:
+               nv04_graph_set_ctx1(chan, 0x00004000, 0);
+               nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+               return 0;
+       case 0x42:
+               nv04_graph_set_ctx1(chan, 0x00004000, 0);
+               nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+               return 0;
+       }
+       return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass,
+                             int mthd, uint32_t data)
+{
+       switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+       case 0x30:
+               nv04_graph_set_ctx1(chan, 0x00004000, 0);
+               nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+               return 0;
+       case 0x42:
+               nv04_graph_set_ctx1(chan, 0x00004000, 0);
+               nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+               return 0;
+       case 0x52:
+               nv04_graph_set_ctx1(chan, 0x00004000, 0x00004000);
+               nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+               return 0;
+       }
+       return 1;
+}
+
+static int
+nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass,
+                             int mthd, uint32_t data)
+{
+       switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+       case 0x30:
+               nv04_graph_set_ctx_val(chan, 0x08000000, 0);
+               return 0;
+       case 0x18:
+               nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
+               return 0;
+       }
+       return 1;
+}
+
+static int
+nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass,
+                             int mthd, uint32_t data)
+{
+       switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+       case 0x30:
+               nv04_graph_set_ctx_val(chan, 0x08000000, 0);
+               return 0;
+       case 0x44:
+               nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
+               return 0;
+       }
+       return 1;
+}
+
+static int
+nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass,
+                             int mthd, uint32_t data)
+{
+       switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+       case 0x30:
+               nv04_graph_set_ctx_val(chan, 0x10000000, 0);
+               return 0;
+       case 0x43:
+               nv04_graph_set_ctx_val(chan, 0x10000000, 0x10000000);
+               return 0;
+       }
+       return 1;
+}
+
+static int
+nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass,
+                             int mthd, uint32_t data)
+{
+       switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+       case 0x30:
+               nv04_graph_set_ctx_val(chan, 0x20000000, 0);
+               return 0;
+       case 0x12:
+               nv04_graph_set_ctx_val(chan, 0x20000000, 0x20000000);
+               return 0;
+       }
+       return 1;
+}
+
+static int
+nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass,
+                             int mthd, uint32_t data)
+{
+       switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+       case 0x30:
+               nv04_graph_set_ctx_val(chan, 0x40000000, 0);
+               return 0;
+       case 0x72:
+               nv04_graph_set_ctx_val(chan, 0x40000000, 0x40000000);
+               return 0;
+       }
+       return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass,
+                             int mthd, uint32_t data)
+{
+       switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+       case 0x30:
+               nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+               return 0;
+       case 0x58:
+               nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+               return 0;
+       }
+       return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass,
+                             int mthd, uint32_t data)
+{
+       switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+       case 0x30:
+               nv04_graph_set_ctx_val(chan, 0x04000000, 0);
+               return 0;
+       case 0x59:
+               nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
+               return 0;
+       }
+       return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass,
+                             int mthd, uint32_t data)
+{
+       switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+       case 0x30:
+               nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+               return 0;
+       case 0x5a:
+               nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+               return 0;
+       }
+       return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass,
+                             int mthd, uint32_t data)
+{
+       switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+       case 0x30:
+               nv04_graph_set_ctx_val(chan, 0x04000000, 0);
+               return 0;
+       case 0x5b:
+               nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
+               return 0;
+       }
+       return 1;
+}
+
+static int
+nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass,
+                             int mthd, uint32_t data)
+{
+       switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+       case 0x30:
+               nv04_graph_set_ctx1(chan, 0x2000, 0);
+               return 0;
+       case 0x19:
+               nv04_graph_set_ctx1(chan, 0x2000, 0x2000);
+               return 0;
+       }
+       return 1;
+}
+
+static int
+nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass,
+                             int mthd, uint32_t data)
+{
+       switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+       case 0x30:
+               nv04_graph_set_ctx1(chan, 0x1000, 0);
+               return 0;
+       /* Yes, for some reason even the old versions of objects
+        * accept 0x57 and not 0x17. Consistency be damned.
+        */
+       case 0x57:
+               nv04_graph_set_ctx1(chan, 0x1000, 0x1000);
+               return 0;
+       }
+       return 1;
+}
+
 static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = {
        { 0x0150, nv04_graph_mthd_set_ref },
        {}
 };
 
-static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = {
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_gdirect[] = {
+       { 0x0184, nv04_graph_mthd_bind_nv01_patt },
+       { 0x0188, nv04_graph_mthd_bind_rop },
+       { 0x018c, nv04_graph_mthd_bind_beta1 },
+       { 0x0190, nv04_graph_mthd_bind_surf_dst },
+       { 0x02fc, nv04_graph_mthd_set_operation },
+       {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_gdirect[] = {
+       { 0x0188, nv04_graph_mthd_bind_nv04_patt },
+       { 0x018c, nv04_graph_mthd_bind_rop },
+       { 0x0190, nv04_graph_mthd_bind_beta1 },
+       { 0x0194, nv04_graph_mthd_bind_beta4 },
+       { 0x0198, nv04_graph_mthd_bind_surf2d },
+       { 0x02fc, nv04_graph_mthd_set_operation },
+       {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_imageblit[] = {
+       { 0x0184, nv04_graph_mthd_bind_chroma },
+       { 0x0188, nv04_graph_mthd_bind_clip },
+       { 0x018c, nv04_graph_mthd_bind_nv01_patt },
+       { 0x0190, nv04_graph_mthd_bind_rop },
+       { 0x0194, nv04_graph_mthd_bind_beta1 },
+       { 0x0198, nv04_graph_mthd_bind_surf_dst },
+       { 0x019c, nv04_graph_mthd_bind_surf_src },
+       { 0x02fc, nv04_graph_mthd_set_operation },
+       {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_imageblit_ifc[] = {
+       { 0x0184, nv04_graph_mthd_bind_chroma },
+       { 0x0188, nv04_graph_mthd_bind_clip },
+       { 0x018c, nv04_graph_mthd_bind_nv04_patt },
+       { 0x0190, nv04_graph_mthd_bind_rop },
+       { 0x0194, nv04_graph_mthd_bind_beta1 },
+       { 0x0198, nv04_graph_mthd_bind_beta4 },
+       { 0x019c, nv04_graph_mthd_bind_surf2d },
+       { 0x02fc, nv04_graph_mthd_set_operation },
+       {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_iifc[] = {
+       { 0x0188, nv04_graph_mthd_bind_chroma },
+       { 0x018c, nv04_graph_mthd_bind_clip },
+       { 0x0190, nv04_graph_mthd_bind_nv04_patt },
+       { 0x0194, nv04_graph_mthd_bind_rop },
+       { 0x0198, nv04_graph_mthd_bind_beta1 },
+       { 0x019c, nv04_graph_mthd_bind_beta4 },
+       { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
+       { 0x03e4, nv04_graph_mthd_set_operation },
+       {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_ifc[] = {
+       { 0x0184, nv04_graph_mthd_bind_chroma },
+       { 0x0188, nv04_graph_mthd_bind_clip },
+       { 0x018c, nv04_graph_mthd_bind_nv01_patt },
+       { 0x0190, nv04_graph_mthd_bind_rop },
+       { 0x0194, nv04_graph_mthd_bind_beta1 },
+       { 0x0198, nv04_graph_mthd_bind_surf_dst },
+       { 0x02fc, nv04_graph_mthd_set_operation },
+       {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifc[] = {
+       { 0x0184, nv04_graph_mthd_bind_chroma },
+       { 0x0188, nv04_graph_mthd_bind_nv01_patt },
+       { 0x018c, nv04_graph_mthd_bind_rop },
+       { 0x0190, nv04_graph_mthd_bind_beta1 },
+       { 0x0194, nv04_graph_mthd_bind_surf_dst },
        { 0x02fc, nv04_graph_mthd_set_operation },
        {},
 };
 
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifc[] = {
+       { 0x0184, nv04_graph_mthd_bind_chroma },
+       { 0x0188, nv04_graph_mthd_bind_nv04_patt },
+       { 0x018c, nv04_graph_mthd_bind_rop },
+       { 0x0190, nv04_graph_mthd_bind_beta1 },
+       { 0x0194, nv04_graph_mthd_bind_beta4 },
+       { 0x0198, nv04_graph_mthd_bind_surf2d },
+       { 0x02fc, nv04_graph_mthd_set_operation },
+       {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifm[] = {
+       { 0x0188, nv04_graph_mthd_bind_nv01_patt },
+       { 0x018c, nv04_graph_mthd_bind_rop },
+       { 0x0190, nv04_graph_mthd_bind_beta1 },
+       { 0x0194, nv04_graph_mthd_bind_surf_dst },
+       { 0x0304, nv04_graph_mthd_set_operation },
+       {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifm[] = {
+       { 0x0188, nv04_graph_mthd_bind_nv04_patt },
+       { 0x018c, nv04_graph_mthd_bind_rop },
+       { 0x0190, nv04_graph_mthd_bind_beta1 },
+       { 0x0194, nv04_graph_mthd_bind_beta4 },
+       { 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf },
+       { 0x0304, nv04_graph_mthd_set_operation },
+       {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_shape[] = {
+       { 0x0184, nv04_graph_mthd_bind_clip },
+       { 0x0188, nv04_graph_mthd_bind_nv01_patt },
+       { 0x018c, nv04_graph_mthd_bind_rop },
+       { 0x0190, nv04_graph_mthd_bind_beta1 },
+       { 0x0194, nv04_graph_mthd_bind_surf_dst },
+       { 0x02fc, nv04_graph_mthd_set_operation },
+       {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_shape[] = {
+       { 0x0184, nv04_graph_mthd_bind_clip },
+       { 0x0188, nv04_graph_mthd_bind_nv04_patt },
+       { 0x018c, nv04_graph_mthd_bind_rop },
+       { 0x0190, nv04_graph_mthd_bind_beta1 },
+       { 0x0194, nv04_graph_mthd_bind_beta4 },
+       { 0x0198, nv04_graph_mthd_bind_surf2d },
+       { 0x02fc, nv04_graph_mthd_set_operation },
+       {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_tex_tri[] = {
+       { 0x0188, nv04_graph_mthd_bind_clip },
+       { 0x018c, nv04_graph_mthd_bind_surf_color },
+       { 0x0190, nv04_graph_mthd_bind_surf_zeta },
+       {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = {
+       { 0x02f8, nv04_graph_mthd_surf3d_clip_h },
+       { 0x02fc, nv04_graph_mthd_surf3d_clip_v },
+       {},
+};
+
 struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
-       { 0x0039, false, NULL },
-       { 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */
-       { 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */
-       { 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */
-       { 0x0077, false, nv04_graph_mthds_set_operation }, /* sifm */
+       { 0x0038, false, NULL }, /* dvd subpicture */
+       { 0x0039, false, NULL }, /* m2mf */
+       { 0x004b, false, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */
+       { 0x004a, false, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */
+       { 0x001f, false, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */
+       { 0x005f, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */
+       { 0x0060, false, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */
+       { 0x0064, false, NULL }, /* nv05 iifc */
+       { 0x0021, false, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */
+       { 0x0061, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */
+       { 0x0065, false, NULL }, /* nv05 ifc */
+       { 0x0036, false, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */
+       { 0x0076, false, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */
+       { 0x0066, false, NULL }, /* nv05 sifc */
+       { 0x0037, false, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */
+       { 0x0077, false, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */
        { 0x0030, false, NULL }, /* null */
        { 0x0042, false, NULL }, /* surf2d */
        { 0x0043, false, NULL }, /* rop */
        { 0x0012, false, NULL }, /* beta1 */
        { 0x0072, false, NULL }, /* beta4 */
        { 0x0019, false, NULL }, /* cliprect */
-       { 0x0044, false, NULL }, /* pattern */
+       { 0x0018, false, NULL }, /* nv01 pattern */
+       { 0x0044, false, NULL }, /* nv04 pattern */
        { 0x0052, false, NULL }, /* swzsurf */
-       { 0x0053, false, NULL }, /* surf3d */
+       { 0x0053, false, nv04_graph_mthds_surf3d }, /* surf3d */
+       { 0x0048, false, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */
        { 0x0054, false, NULL }, /* tex_tri */
        { 0x0055, false, NULL }, /* multitex_tri */
+       { 0x0017, false, NULL }, /* nv01 chroma */
+       { 0x0057, false, NULL }, /* nv04 chroma */
+       { 0x0058, false, NULL }, /* surf_dst */
+       { 0x0059, false, NULL }, /* surf_src */
+       { 0x005a, false, NULL }, /* surf_color */
+       { 0x005b, false, NULL }, /* surf_zeta */
+       { 0x001c, false, nv04_graph_mthds_nv01_shape }, /* nv01 line */
+       { 0x005c, false, nv04_graph_mthds_nv04_shape }, /* nv04 line */
+       { 0x001d, false, nv04_graph_mthds_nv01_shape }, /* nv01 tri */
+       { 0x005d, false, nv04_graph_mthds_nv04_shape }, /* nv04 tri */
+       { 0x001e, false, nv04_graph_mthds_nv01_shape }, /* nv01 rect */
+       { 0x005e, false, nv04_graph_mthds_nv04_shape }, /* nv04 rect */
        { 0x506e, true, nv04_graph_mthds_sw },
        {}
 };
index 11b11c3..9b5c974 100644 (file)
 
 /* TODO:
  *  - get vs count from 0x1540
- *  - document unimplemented bits compared to nvidia
- *    - nsource handling
- *    - R0 & 0x0200 handling
- *    - single-vs handling
- *    - 400314 bit 0
  */
 
 static int
diff --git a/drivers/gpu/drm/nouveau/nv50_calc.c b/drivers/gpu/drm/nouveau/nv50_calc.c
new file mode 100644 (file)
index 0000000..2cdc2bf
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "drm_fixed.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+
+int
+nv50_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk,
+             int *N1, int *M1, int *N2, int *M2, int *P)
+{
+       struct nouveau_pll_vals pll_vals;
+       int ret;
+
+       ret = nouveau_calc_pll_mnp(dev, pll, clk, &pll_vals);
+       if (ret <= 0)
+               return ret;
+
+       *N1 = pll_vals.N1;
+       *M1 = pll_vals.M1;
+       *N2 = pll_vals.N2;
+       *M2 = pll_vals.M2;
+       *P = pll_vals.log2P;
+       return ret;
+}
+
+int
+nv50_calc_pll2(struct drm_device *dev, struct pll_lims *pll, int clk,
+              int *N, int *fN, int *M, int *P)
+{
+       fixed20_12 fb_div, a, b;
+
+       *P = pll->vco1.maxfreq / clk;
+       if (*P > pll->max_p)
+               *P = pll->max_p;
+       if (*P < pll->min_p)
+               *P = pll->min_p;
+
+       /* *M = ceil(refclk / pll->vco.max_inputfreq); */
+       a.full = dfixed_const(pll->refclk);
+       b.full = dfixed_const(pll->vco1.max_inputfreq);
+       a.full = dfixed_div(a, b);
+       a.full = dfixed_ceil(a);
+       *M = dfixed_trunc(a);
+
+       /* fb_div = (vco * *M) / refclk; */
+       fb_div.full = dfixed_const(clk * *P);
+       fb_div.full = dfixed_mul(fb_div, a);
+       a.full = dfixed_const(pll->refclk);
+       fb_div.full = dfixed_div(fb_div, a);
+
+       /* *N = floor(fb_div); */
+       a.full = dfixed_floor(fb_div);
+       *N = dfixed_trunc(fb_div);
+
+       /* *fN = (fmod(fb_div, 1.0) * 8192) - 4096; */
+       b.full = dfixed_const(8192);
+       a.full = dfixed_mul(a, b);
+       fb_div.full = dfixed_mul(fb_div, b);
+       fb_div.full = fb_div.full - a.full;
+       *fN = dfixed_trunc(fb_div) - 4096;
+       *fN &= 0xffff;
+
+       return clk;
+}
index cfabeb9..b4e4a3b 100644 (file)
@@ -264,32 +264,40 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
 int
 nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
 {
-       uint32_t pll_reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
-       struct nouveau_pll_vals pll;
-       struct pll_lims limits;
+       uint32_t reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
+       struct pll_lims pll;
        uint32_t reg1, reg2;
-       int ret;
+       int ret, N1, M1, N2, M2, P;
 
-       ret = get_pll_limits(dev, pll_reg, &limits);
+       ret = get_pll_limits(dev, reg, &pll);
        if (ret)
                return ret;
 
-       ret = nouveau_calc_pll_mnp(dev, &limits, pclk, &pll);
-       if (ret <= 0)
-               return ret;
+       if (pll.vco2.maxfreq) {
+               ret = nv50_calc_pll(dev, &pll, pclk, &N1, &M1, &N2, &M2, &P);
+               if (ret <= 0)
+                       return 0;
+
+               NV_DEBUG(dev, "pclk %d out %d NM1 %d %d NM2 %d %d P %d\n",
+                        pclk, ret, N1, M1, N2, M2, P);
 
-       if (limits.vco2.maxfreq) {
-               reg1 = nv_rd32(dev, pll_reg + 4) & 0xff00ff00;
-               reg2 = nv_rd32(dev, pll_reg + 8) & 0x8000ff00;
-               nv_wr32(dev, pll_reg, 0x10000611);
-               nv_wr32(dev, pll_reg + 4, reg1 | (pll.M1 << 16) | pll.N1);
-               nv_wr32(dev, pll_reg + 8,
-                       reg2 | (pll.log2P << 28) | (pll.M2 << 16) | pll.N2);
+               reg1 = nv_rd32(dev, reg + 4) & 0xff00ff00;
+               reg2 = nv_rd32(dev, reg + 8) & 0x8000ff00;
+               nv_wr32(dev, reg, 0x10000611);
+               nv_wr32(dev, reg + 4, reg1 | (M1 << 16) | N1);
+               nv_wr32(dev, reg + 8, reg2 | (P << 28) | (M2 << 16) | N2);
        } else {
-               reg1 = nv_rd32(dev, pll_reg + 4) & 0xffc00000;
-               nv_wr32(dev, pll_reg, 0x50000610);
-               nv_wr32(dev, pll_reg + 4, reg1 |
-                       (pll.log2P << 16) | (pll.M1 << 8) | pll.N1);
+               ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P);
+               if (ret <= 0)
+                       return 0;
+
+               NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n",
+                        pclk, ret, N1, N2, M1, P);
+
+               reg1 = nv_rd32(dev, reg + 4) & 0xffc00000;
+               nv_wr32(dev, reg, 0x50000610);
+               nv_wr32(dev, reg + 4, reg1 | (P << 16) | (M1 << 8) | N1);
+               nv_wr32(dev, reg + 8, N2);
        }
 
        return 0;
index 649db4c..580a5d1 100644 (file)
@@ -29,6 +29,7 @@
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
 #include "nouveau_fb.h"
+#include "nouveau_fbcon.h"
 #include "drm_crtc_helper.h"
 
 static void
@@ -782,6 +783,37 @@ ack:
        nv_wr32(dev, 0x610030, 0x80000000);
 }
 
+static void
+nv50_display_unk20_dp_hack(struct drm_device *dev, struct dcb_entry *dcb)
+{
+       int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
+       struct drm_encoder *encoder;
+       uint32_t tmp, unk0 = 0, unk1 = 0;
+
+       if (dcb->type != OUTPUT_DP)
+               return;
+
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+               if (nv_encoder->dcb == dcb) {
+                       unk0 = nv_encoder->dp.unk0;
+                       unk1 = nv_encoder->dp.unk1;
+                       break;
+               }
+       }
+
+       if (unk0 || unk1) {
+               tmp  = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+               tmp &= 0xfffffe03;
+               nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp | unk0);
+
+               tmp  = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link));
+               tmp &= 0xfef080c0;
+               nv_wr32(dev, NV50_SOR_DP_UNK128(or, link), tmp | unk1);
+       }
+}
+
 static void
 nv50_display_unk20_handler(struct drm_device *dev)
 {
@@ -805,6 +837,8 @@ nv50_display_unk20_handler(struct drm_device *dev)
 
        nouveau_bios_run_display_table(dev, dcbent, script, pclk);
 
+       nv50_display_unk20_dp_hack(dev, dcbent);
+
        tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head));
        tmp &= ~0x000000f;
        nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp);
@@ -945,6 +979,8 @@ nv50_display_irq_hotplug_bh(struct work_struct *work)
        nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054));
        if (dev_priv->chipset >= 0x90)
                nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074));
+
+       drm_helper_hpd_irq_event(dev);
 }
 
 void
index a8c70e7..6bf025c 100644 (file)
@@ -6,8 +6,8 @@
 void
 nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 {
-       struct nouveau_fbcon_par *par = info->par;
-       struct drm_device *dev = par->dev;
+       struct nouveau_fbdev *nfbdev = info->par;
+       struct drm_device *dev = nfbdev->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_channel *chan = dev_priv->channel;
 
@@ -49,8 +49,8 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 void
 nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
 {
-       struct nouveau_fbcon_par *par = info->par;
-       struct drm_device *dev = par->dev;
+       struct nouveau_fbdev *nfbdev = info->par;
+       struct drm_device *dev = nfbdev->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_channel *chan = dev_priv->channel;
 
@@ -84,8 +84,8 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
 void
 nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 {
-       struct nouveau_fbcon_par *par = info->par;
-       struct drm_device *dev = par->dev;
+       struct nouveau_fbdev *nfbdev = info->par;
+       struct drm_device *dev = nfbdev->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_channel *chan = dev_priv->channel;
        uint32_t width, dwords, *data = (uint32_t *)image->data;
@@ -152,8 +152,8 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 int
 nv50_fbcon_accel_init(struct fb_info *info)
 {
-       struct nouveau_fbcon_par *par = info->par;
-       struct drm_device *dev = par->dev;
+       struct nouveau_fbdev *nfbdev = info->par;
+       struct drm_device *dev = nfbdev->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_channel *chan = dev_priv->channel;
        struct nouveau_gpuobj *eng2d = NULL;
index 0c68698..b11eaf9 100644 (file)
@@ -321,18 +321,23 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
        encoder->possible_clones = 0;
 
        if (nv_encoder->dcb->type == OUTPUT_DP) {
-               uint32_t mc, or = nv_encoder->or;
+               int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1);
+               uint32_t tmp;
 
                if (dev_priv->chipset < 0x90 ||
                    dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
-                       mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or));
+                       tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or));
                else
-                       mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or));
+                       tmp = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or));
 
-               switch ((mc & 0x00000f00) >> 8) {
+               switch ((tmp & 0x00000f00) >> 8) {
                case 8:
                case 9:
-                       nv_encoder->dp.mc_unknown = (mc & 0x000f0000) >> 16;
+                       nv_encoder->dp.mc_unknown = (tmp & 0x000f0000) >> 16;
+                       tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+                       nv_encoder->dp.unk0 = tmp & 0x000001fc;
+                       tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link));
+                       nv_encoder->dp.unk1 = tmp & 0x010f7f3f;
                        break;
                default:
                        break;
index 5319d9e..1bc72c3 100644 (file)
@@ -5742,6 +5742,9 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER
 #define ATOM_PP_THERMALCONTROLLER_RV6xx     7
 #define ATOM_PP_THERMALCONTROLLER_RV770     8
 #define ATOM_PP_THERMALCONTROLLER_ADT7473   9
+#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO     11
+#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
+#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL   0x89    // ADT7473 Fan Control + Internal Thermal Controller
 
 typedef struct _ATOM_PPLIB_STATE
 {
@@ -5749,6 +5752,26 @@ typedef struct _ATOM_PPLIB_STATE
     UCHAR ucClockStateIndices[1]; // variable-sized
 } ATOM_PPLIB_STATE;
 
+typedef struct _ATOM_PPLIB_FANTABLE
+{
+    UCHAR   ucFanTableFormat;                // Change this if the table format changes or version changes so that the other fields are not the same.
+    UCHAR   ucTHyst;                         // Temperature hysteresis. Integer.
+    USHORT  usTMin;                          // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
+    USHORT  usTMed;                          // The middle temperature where we change slopes.
+    USHORT  usTHigh;                         // The high point above TMed for adjusting the second slope.
+    USHORT  usPWMMin;                        // The minimum PWM value in percent (0.01% increments).
+    USHORT  usPWMMed;                        // The PWM value (in percent) at TMed.
+    USHORT  usPWMHigh;                       // The PWM value at THigh.
+} ATOM_PPLIB_FANTABLE;
+
+typedef struct _ATOM_PPLIB_EXTENDEDHEADER
+{
+    USHORT  usSize;
+    ULONG   ulMaxEngineClock;   // For Overdrive.
+    ULONG   ulMaxMemoryClock;   // For Overdrive.
+    // Add extra system parameters here, always adjust size to include all fields.
+} ATOM_PPLIB_EXTENDEDHEADER;
+
 //// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
 #define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
 #define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
@@ -5762,6 +5785,12 @@ typedef struct _ATOM_PPLIB_STATE
 #define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
 #define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
 #define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
+#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
+#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000              // Go to boot state on alerts, e.g. on an AC->DC transition.
+#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000   // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
+#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000                   // Does the driver control VDDCI independently from VDDC.
+#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000               // Enable the 'regulator hot' feature.
+#define ATOM_PP_PLATFORM_CAP_BACO          0x00020000               // Does the driver supports BACO state.
 
 typedef struct _ATOM_PPLIB_POWERPLAYTABLE
 {
@@ -5797,6 +5826,21 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
 
 } ATOM_PPLIB_POWERPLAYTABLE;
 
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
+{
+    ATOM_PPLIB_POWERPLAYTABLE basicTable;
+    UCHAR   ucNumCustomThermalPolicy;
+    USHORT  usCustomThermalPolicyArrayOffset;
+}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
+{
+    ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
+    USHORT                     usFormatID;                      // To be used ONLY by PPGen.
+    USHORT                     usFanTableOffset;
+    USHORT                     usExtendendedHeaderOffset;
+} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
+
 //// ATOM_PPLIB_NONCLOCK_INFO::usClassification
 #define ATOM_PPLIB_CLASSIFICATION_UI_MASK          0x0007
 #define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT         0
@@ -5816,7 +5860,9 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
 #define ATOM_PPLIB_CLASSIFICATION_UVDSTATE               0x0400
 #define ATOM_PPLIB_CLASSIFICATION_3DLOW                  0x0800
 #define ATOM_PPLIB_CLASSIFICATION_ACPI                   0x1000
-// remaining 3 bits are reserved
+#define ATOM_PPLIB_CLASSIFICATION_HD2STATE               0x2000
+#define ATOM_PPLIB_CLASSIFICATION_HDSTATE                0x4000
+#define ATOM_PPLIB_CLASSIFICATION_SDSTATE                0x8000
 
 //// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
 #define ATOM_PPLIB_SINGLE_DISPLAY_ONLY           0x00000001
@@ -5840,9 +5886,15 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
 
 #define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING        0x00001000
 #define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS  0x00002000
+#define ATOM_PPLIB_DISALLOW_ON_DC                        0x00004000
 #define ATOM_PPLIB_ENABLE_VARIBRIGHT                     0x00008000
 
-#define ATOM_PPLIB_DISALLOW_ON_DC                       0x00004000
+//memory related flags
+#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF               0x000010000
+
+//M3 Arb    //2bits, current 3 sets of parameters in total
+#define ATOM_PPLIB_M3ARB_MASK                       0x00060000
+#define ATOM_PPLIB_M3ARB_SHIFT                      17
 
 // Contained in an array starting at the offset
 // in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
@@ -5860,6 +5912,9 @@ typedef struct _ATOM_PPLIB_NONCLOCK_INFO
 // Contained in an array starting at the offset
 // in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
 // referenced from ATOM_PPLIB_STATE::ucClockStateIndices
+#define ATOM_PPLIB_NONCLOCKINFO_VER1      12
+#define ATOM_PPLIB_NONCLOCKINFO_VER2      24
+
 typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
 {
       USHORT usEngineClockLow;
@@ -5882,6 +5937,23 @@ typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
 #define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE    4
 #define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF    8
 #define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF    16
+#define ATOM_PPLIB_R600_FLAGS_LOWPOWER         32   // On the RV770 use 'low power' setting (sequencer S0).
+
+typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
+{
+      USHORT usEngineClockLow;
+      UCHAR  ucEngineClockHigh;
+
+      USHORT usMemoryClockLow;
+      UCHAR  ucMemoryClockHigh;
+
+      USHORT usVDDC;
+      USHORT usVDDCI;
+      USHORT usUnused;
+
+      ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
 
 typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
 
index a87990b..03dd6c4 100644 (file)
@@ -26,7 +26,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/radeon_drm.h>
-#include "radeon_fixed.h"
+#include <drm/drm_fixed.h>
 #include "radeon.h"
 #include "atom.h"
 #include "atom-bits.h"
@@ -245,25 +245,27 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
 
        switch (mode) {
        case DRM_MODE_DPMS_ON:
+               radeon_crtc->enabled = true;
+               /* adjust pm to dpms changes BEFORE enabling crtcs */
+               radeon_pm_compute_clocks(rdev);
                atombios_enable_crtc(crtc, ATOM_ENABLE);
                if (ASIC_IS_DCE3(rdev))
                        atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
                atombios_blank_crtc(crtc, ATOM_DISABLE);
-               /* XXX re-enable when interrupt support is added */
-               if (!ASIC_IS_DCE4(rdev))
-                       drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+               drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
                radeon_crtc_load_lut(crtc);
                break;
        case DRM_MODE_DPMS_STANDBY:
        case DRM_MODE_DPMS_SUSPEND:
        case DRM_MODE_DPMS_OFF:
-               /* XXX re-enable when interrupt support is added */
-               if (!ASIC_IS_DCE4(rdev))
-                       drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+               drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
                atombios_blank_crtc(crtc, ATOM_ENABLE);
                if (ASIC_IS_DCE3(rdev))
                        atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
                atombios_enable_crtc(crtc, ATOM_DISABLE);
+               radeon_crtc->enabled = false;
+               /* adjust pm to dpms changes AFTER disabling crtcs */
+               radeon_pm_compute_clocks(rdev);
                break;
        }
 }
@@ -1160,6 +1162,12 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
                                     struct drm_display_mode *mode,
                                     struct drm_display_mode *adjusted_mode)
 {
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+
+       /* adjust pm to upcoming mode change */
+       radeon_pm_compute_clocks(rdev);
+
        if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
                return false;
        return true;
index 28b31c6..abffb14 100644 (file)
@@ -351,7 +351,7 @@ retry:
        args.v1.ucChannelID = chan->rec.i2c_id;
        args.v1.ucDelay = delay / 10;
        if (ASIC_IS_DCE4(rdev))
-               args.v2.ucHPD_ID = chan->rec.hpd_id;
+               args.v2.ucHPD_ID = chan->rec.hpd;
 
        atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
index e8f447e..8c8e4d3 100644 (file)
 #include "radeon.h"
 #include "radeon_asic.h"
 #include "radeon_drm.h"
-#include "rv770d.h"
+#include "evergreend.h"
 #include "atom.h"
 #include "avivod.h"
 #include "evergreen_reg.h"
 
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+
 static void evergreen_gpu_init(struct radeon_device *rdev);
 void evergreen_fini(struct radeon_device *rdev);
 
+void evergreen_pm_misc(struct radeon_device *rdev)
+{
+
+}
+
+void evergreen_pm_prepare(struct radeon_device *rdev)
+{
+       struct drm_device *ddev = rdev->ddev;
+       struct drm_crtc *crtc;
+       struct radeon_crtc *radeon_crtc;
+       u32 tmp;
+
+       /* disable any active CRTCs */
+       list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+               radeon_crtc = to_radeon_crtc(crtc);
+               if (radeon_crtc->enabled) {
+                       tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
+                       tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+                       WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+               }
+       }
+}
+
+void evergreen_pm_finish(struct radeon_device *rdev)
+{
+       struct drm_device *ddev = rdev->ddev;
+       struct drm_crtc *crtc;
+       struct radeon_crtc *radeon_crtc;
+       u32 tmp;
+
+       /* enable any active CRTCs */
+       list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+               radeon_crtc = to_radeon_crtc(crtc);
+               if (radeon_crtc->enabled) {
+                       tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
+                       tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+                       WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+               }
+       }
+}
+
 bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
 {
        bool connected = false;
-       /* XXX */
+
+       switch (hpd) {
+       case RADEON_HPD_1:
+               if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
+                       connected = true;
+               break;
+       case RADEON_HPD_2:
+               if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
+                       connected = true;
+               break;
+       case RADEON_HPD_3:
+               if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
+                       connected = true;
+               break;
+       case RADEON_HPD_4:
+               if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
+                       connected = true;
+               break;
+       case RADEON_HPD_5:
+               if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
+                       connected = true;
+               break;
+       case RADEON_HPD_6:
+               if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
+                       connected = true;
+                       break;
+       default:
+               break;
+       }
+
        return connected;
 }
 
 void evergreen_hpd_set_polarity(struct radeon_device *rdev,
                                enum radeon_hpd_id hpd)
 {
-       /* XXX */
+       u32 tmp;
+       bool connected = evergreen_hpd_sense(rdev, hpd);
+
+       switch (hpd) {
+       case RADEON_HPD_1:
+               tmp = RREG32(DC_HPD1_INT_CONTROL);
+               if (connected)
+                       tmp &= ~DC_HPDx_INT_POLARITY;
+               else
+                       tmp |= DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD1_INT_CONTROL, tmp);
+               break;
+       case RADEON_HPD_2:
+               tmp = RREG32(DC_HPD2_INT_CONTROL);
+               if (connected)
+                       tmp &= ~DC_HPDx_INT_POLARITY;
+               else
+                       tmp |= DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD2_INT_CONTROL, tmp);
+               break;
+       case RADEON_HPD_3:
+               tmp = RREG32(DC_HPD3_INT_CONTROL);
+               if (connected)
+                       tmp &= ~DC_HPDx_INT_POLARITY;
+               else
+                       tmp |= DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD3_INT_CONTROL, tmp);
+               break;
+       case RADEON_HPD_4:
+               tmp = RREG32(DC_HPD4_INT_CONTROL);
+               if (connected)
+                       tmp &= ~DC_HPDx_INT_POLARITY;
+               else
+                       tmp |= DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD4_INT_CONTROL, tmp);
+               break;
+       case RADEON_HPD_5:
+               tmp = RREG32(DC_HPD5_INT_CONTROL);
+               if (connected)
+                       tmp &= ~DC_HPDx_INT_POLARITY;
+               else
+                       tmp |= DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD5_INT_CONTROL, tmp);
+                       break;
+       case RADEON_HPD_6:
+               tmp = RREG32(DC_HPD6_INT_CONTROL);
+               if (connected)
+                       tmp &= ~DC_HPDx_INT_POLARITY;
+               else
+                       tmp |= DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD6_INT_CONTROL, tmp);
+               break;
+       default:
+               break;
+       }
 }
 
 void evergreen_hpd_init(struct radeon_device *rdev)
 {
-       /* XXX */
+       struct drm_device *dev = rdev->ddev;
+       struct drm_connector *connector;
+       u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
+               DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+               switch (radeon_connector->hpd.hpd) {
+               case RADEON_HPD_1:
+                       WREG32(DC_HPD1_CONTROL, tmp);
+                       rdev->irq.hpd[0] = true;
+                       break;
+               case RADEON_HPD_2:
+                       WREG32(DC_HPD2_CONTROL, tmp);
+                       rdev->irq.hpd[1] = true;
+                       break;
+               case RADEON_HPD_3:
+                       WREG32(DC_HPD3_CONTROL, tmp);
+                       rdev->irq.hpd[2] = true;
+                       break;
+               case RADEON_HPD_4:
+                       WREG32(DC_HPD4_CONTROL, tmp);
+                       rdev->irq.hpd[3] = true;
+                       break;
+               case RADEON_HPD_5:
+                       WREG32(DC_HPD5_CONTROL, tmp);
+                       rdev->irq.hpd[4] = true;
+                       break;
+               case RADEON_HPD_6:
+                       WREG32(DC_HPD6_CONTROL, tmp);
+                       rdev->irq.hpd[5] = true;
+                       break;
+               default:
+                       break;
+               }
+       }
+       if (rdev->irq.installed)
+               evergreen_irq_set(rdev);
 }
 
-
-void evergreen_bandwidth_update(struct radeon_device *rdev)
+void evergreen_hpd_fini(struct radeon_device *rdev)
 {
-       /* XXX */
+       struct drm_device *dev = rdev->ddev;
+       struct drm_connector *connector;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+               switch (radeon_connector->hpd.hpd) {
+               case RADEON_HPD_1:
+                       WREG32(DC_HPD1_CONTROL, 0);
+                       rdev->irq.hpd[0] = false;
+                       break;
+               case RADEON_HPD_2:
+                       WREG32(DC_HPD2_CONTROL, 0);
+                       rdev->irq.hpd[1] = false;
+                       break;
+               case RADEON_HPD_3:
+                       WREG32(DC_HPD3_CONTROL, 0);
+                       rdev->irq.hpd[2] = false;
+                       break;
+               case RADEON_HPD_4:
+                       WREG32(DC_HPD4_CONTROL, 0);
+                       rdev->irq.hpd[3] = false;
+                       break;
+               case RADEON_HPD_5:
+                       WREG32(DC_HPD5_CONTROL, 0);
+                       rdev->irq.hpd[4] = false;
+                       break;
+               case RADEON_HPD_6:
+                       WREG32(DC_HPD6_CONTROL, 0);
+                       rdev->irq.hpd[5] = false;
+                       break;
+               default:
+                       break;
+               }
+       }
 }
 
-void evergreen_hpd_fini(struct radeon_device *rdev)
+void evergreen_bandwidth_update(struct radeon_device *rdev)
 {
        /* XXX */
 }
@@ -83,10 +279,31 @@ static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
 /*
  * GART
  */
+void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+       unsigned i;
+       u32 tmp;
+
+       WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               /* read MC_STATUS */
+               tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
+               tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
+               if (tmp == 2) {
+                       printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
+                       return;
+               }
+               if (tmp) {
+                       return;
+               }
+               udelay(1);
+       }
+}
+
 int evergreen_pcie_gart_enable(struct radeon_device *rdev)
 {
        u32 tmp;
-       int r, i;
+       int r;
 
        if (rdev->gart.table.vram.robj == NULL) {
                dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
@@ -121,10 +338,9 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
                                RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
        WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
                        (u32)(rdev->dummy_page.addr >> 12));
-       for (i = 1; i < 7; i++)
-               WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+       WREG32(VM_CONTEXT1_CNTL, 0);
 
-       r600_pcie_gart_tlb_flush(rdev);
+       evergreen_pcie_gart_tlb_flush(rdev);
        rdev->gart.ready = true;
        return 0;
 }
@@ -132,11 +348,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
 void evergreen_pcie_gart_disable(struct radeon_device *rdev)
 {
        u32 tmp;
-       int i, r;
+       int r;
 
        /* Disable all tables */
-       for (i = 0; i < 7; i++)
-               WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+       WREG32(VM_CONTEXT0_CNTL, 0);
+       WREG32(VM_CONTEXT1_CNTL, 0);
 
        /* Setup L2 cache */
        WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
@@ -173,7 +389,6 @@ void evergreen_pcie_gart_fini(struct radeon_device *rdev)
 void evergreen_agp_enable(struct radeon_device *rdev)
 {
        u32 tmp;
-       int i;
 
        /* Setup L2 cache */
        WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
@@ -193,8 +408,8 @@ void evergreen_agp_enable(struct radeon_device *rdev)
        WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
        WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
        WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
-       for (i = 0; i < 7; i++)
-               WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+       WREG32(VM_CONTEXT0_CNTL, 0);
+       WREG32(VM_CONTEXT1_CNTL, 0);
 }
 
 static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -400,40 +615,656 @@ static void evergreen_mc_program(struct radeon_device *rdev)
        rv515_vga_render_disable(rdev);
 }
 
-#if 0
 /*
  * CP.
  */
-static void evergreen_cp_stop(struct radeon_device *rdev)
-{
-       /* XXX */
-}
-
 
 static int evergreen_cp_load_microcode(struct radeon_device *rdev)
 {
-       /* XXX */
+       const __be32 *fw_data;
+       int i;
 
+       if (!rdev->me_fw || !rdev->pfp_fw)
+               return -EINVAL;
+
+       r700_cp_stop(rdev);
+       WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
+
+       fw_data = (const __be32 *)rdev->pfp_fw->data;
+       WREG32(CP_PFP_UCODE_ADDR, 0);
+       for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
+               WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+       WREG32(CP_PFP_UCODE_ADDR, 0);
+
+       fw_data = (const __be32 *)rdev->me_fw->data;
+       WREG32(CP_ME_RAM_WADDR, 0);
+       for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
+               WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+
+       WREG32(CP_PFP_UCODE_ADDR, 0);
+       WREG32(CP_ME_RAM_WADDR, 0);
+       WREG32(CP_ME_RAM_RADDR, 0);
        return 0;
 }
 
+int evergreen_cp_resume(struct radeon_device *rdev)
+{
+       u32 tmp;
+       u32 rb_bufsz;
+       int r;
+
+       /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
+       WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
+                                SOFT_RESET_PA |
+                                SOFT_RESET_SH |
+                                SOFT_RESET_VGT |
+                                SOFT_RESET_SX));
+       RREG32(GRBM_SOFT_RESET);
+       mdelay(15);
+       WREG32(GRBM_SOFT_RESET, 0);
+       RREG32(GRBM_SOFT_RESET);
+
+       /* Set ring buffer size */
+       rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+       tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+       tmp |= BUF_SWAP_32BIT;
+#endif
+       WREG32(CP_RB_CNTL, tmp);
+       WREG32(CP_SEM_WAIT_TIMER, 0x4);
+
+       /* Set the write pointer delay */
+       WREG32(CP_RB_WPTR_DELAY, 0);
+
+       /* Initialize the ring buffer's read and write pointers */
+       WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
+       WREG32(CP_RB_RPTR_WR, 0);
+       WREG32(CP_RB_WPTR, 0);
+       WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
+       WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
+       mdelay(1);
+       WREG32(CP_RB_CNTL, tmp);
+
+       WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
+       WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
+
+       rdev->cp.rptr = RREG32(CP_RB_RPTR);
+       rdev->cp.wptr = RREG32(CP_RB_WPTR);
+
+       r600_cp_start(rdev);
+       rdev->cp.ready = true;
+       r = radeon_ring_test(rdev);
+       if (r) {
+               rdev->cp.ready = false;
+               return r;
+       }
+       return 0;
+}
 
 /*
  * Core functions
  */
-static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
+static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
+                                                 u32 num_tile_pipes,
                                                  u32 num_backends,
                                                  u32 backend_disable_mask)
 {
        u32 backend_map = 0;
+       u32 enabled_backends_mask = 0;
+       u32 enabled_backends_count = 0;
+       u32 cur_pipe;
+       u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
+       u32 cur_backend = 0;
+       u32 i;
+       bool force_no_swizzle;
+
+       if (num_tile_pipes > EVERGREEN_MAX_PIPES)
+               num_tile_pipes = EVERGREEN_MAX_PIPES;
+       if (num_tile_pipes < 1)
+               num_tile_pipes = 1;
+       if (num_backends > EVERGREEN_MAX_BACKENDS)
+               num_backends = EVERGREEN_MAX_BACKENDS;
+       if (num_backends < 1)
+               num_backends = 1;
+
+       for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
+               if (((backend_disable_mask >> i) & 1) == 0) {
+                       enabled_backends_mask |= (1 << i);
+                       ++enabled_backends_count;
+               }
+               if (enabled_backends_count == num_backends)
+                       break;
+       }
+
+       if (enabled_backends_count == 0) {
+               enabled_backends_mask = 1;
+               enabled_backends_count = 1;
+       }
+
+       if (enabled_backends_count != num_backends)
+               num_backends = enabled_backends_count;
+
+       memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
+       switch (rdev->family) {
+       case CHIP_CEDAR:
+       case CHIP_REDWOOD:
+               force_no_swizzle = false;
+               break;
+       case CHIP_CYPRESS:
+       case CHIP_HEMLOCK:
+       case CHIP_JUNIPER:
+       default:
+               force_no_swizzle = true;
+               break;
+       }
+       if (force_no_swizzle) {
+               bool last_backend_enabled = false;
+
+               force_no_swizzle = false;
+               for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
+                       if (((enabled_backends_mask >> i) & 1) == 1) {
+                               if (last_backend_enabled)
+                                       force_no_swizzle = true;
+                               last_backend_enabled = true;
+                       } else
+                               last_backend_enabled = false;
+               }
+       }
+
+       switch (num_tile_pipes) {
+       case 1:
+       case 3:
+       case 5:
+       case 7:
+               DRM_ERROR("odd number of pipes!\n");
+               break;
+       case 2:
+               swizzle_pipe[0] = 0;
+               swizzle_pipe[1] = 1;
+               break;
+       case 4:
+               if (force_no_swizzle) {
+                       swizzle_pipe[0] = 0;
+                       swizzle_pipe[1] = 1;
+                       swizzle_pipe[2] = 2;
+                       swizzle_pipe[3] = 3;
+               } else {
+                       swizzle_pipe[0] = 0;
+                       swizzle_pipe[1] = 2;
+                       swizzle_pipe[2] = 1;
+                       swizzle_pipe[3] = 3;
+               }
+               break;
+       case 6:
+               if (force_no_swizzle) {
+                       swizzle_pipe[0] = 0;
+                       swizzle_pipe[1] = 1;
+                       swizzle_pipe[2] = 2;
+                       swizzle_pipe[3] = 3;
+                       swizzle_pipe[4] = 4;
+                       swizzle_pipe[5] = 5;
+               } else {
+                       swizzle_pipe[0] = 0;
+                       swizzle_pipe[1] = 2;
+                       swizzle_pipe[2] = 4;
+                       swizzle_pipe[3] = 1;
+                       swizzle_pipe[4] = 3;
+                       swizzle_pipe[5] = 5;
+               }
+               break;
+       case 8:
+               if (force_no_swizzle) {
+                       swizzle_pipe[0] = 0;
+                       swizzle_pipe[1] = 1;
+                       swizzle_pipe[2] = 2;
+                       swizzle_pipe[3] = 3;
+                       swizzle_pipe[4] = 4;
+                       swizzle_pipe[5] = 5;
+                       swizzle_pipe[6] = 6;
+                       swizzle_pipe[7] = 7;
+               } else {
+                       swizzle_pipe[0] = 0;
+                       swizzle_pipe[1] = 2;
+                       swizzle_pipe[2] = 4;
+                       swizzle_pipe[3] = 6;
+                       swizzle_pipe[4] = 1;
+                       swizzle_pipe[5] = 3;
+                       swizzle_pipe[6] = 5;
+                       swizzle_pipe[7] = 7;
+               }
+               break;
+       }
+
+       for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
+               while (((1 << cur_backend) & enabled_backends_mask) == 0)
+                       cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
+
+               backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
+
+               cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
+       }
 
        return backend_map;
 }
-#endif
 
 static void evergreen_gpu_init(struct radeon_device *rdev)
 {
-       /* XXX */
+       u32 cc_rb_backend_disable = 0;
+       u32 cc_gc_shader_pipe_config;
+       u32 gb_addr_config = 0;
+       u32 mc_shared_chmap, mc_arb_ramcfg;
+       u32 gb_backend_map;
+       u32 grbm_gfx_index;
+       u32 sx_debug_1;
+       u32 smx_dc_ctl0;
+       u32 sq_config;
+       u32 sq_lds_resource_mgmt;
+       u32 sq_gpr_resource_mgmt_1;
+       u32 sq_gpr_resource_mgmt_2;
+       u32 sq_gpr_resource_mgmt_3;
+       u32 sq_thread_resource_mgmt;
+       u32 sq_thread_resource_mgmt_2;
+       u32 sq_stack_resource_mgmt_1;
+       u32 sq_stack_resource_mgmt_2;
+       u32 sq_stack_resource_mgmt_3;
+       u32 vgt_cache_invalidation;
+       u32 hdp_host_path_cntl;
+       int i, j, num_shader_engines, ps_thread_count;
+
+       switch (rdev->family) {
+       case CHIP_CYPRESS:
+       case CHIP_HEMLOCK:
+               rdev->config.evergreen.num_ses = 2;
+               rdev->config.evergreen.max_pipes = 4;
+               rdev->config.evergreen.max_tile_pipes = 8;
+               rdev->config.evergreen.max_simds = 10;
+               rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+               rdev->config.evergreen.max_gprs = 256;
+               rdev->config.evergreen.max_threads = 248;
+               rdev->config.evergreen.max_gs_threads = 32;
+               rdev->config.evergreen.max_stack_entries = 512;
+               rdev->config.evergreen.sx_num_of_sets = 4;
+               rdev->config.evergreen.sx_max_export_size = 256;
+               rdev->config.evergreen.sx_max_export_pos_size = 64;
+               rdev->config.evergreen.sx_max_export_smx_size = 192;
+               rdev->config.evergreen.max_hw_contexts = 8;
+               rdev->config.evergreen.sq_num_cf_insts = 2;
+
+               rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+               rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+               rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               break;
+       case CHIP_JUNIPER:
+               rdev->config.evergreen.num_ses = 1;
+               rdev->config.evergreen.max_pipes = 4;
+               rdev->config.evergreen.max_tile_pipes = 4;
+               rdev->config.evergreen.max_simds = 10;
+               rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+               rdev->config.evergreen.max_gprs = 256;
+               rdev->config.evergreen.max_threads = 248;
+               rdev->config.evergreen.max_gs_threads = 32;
+               rdev->config.evergreen.max_stack_entries = 512;
+               rdev->config.evergreen.sx_num_of_sets = 4;
+               rdev->config.evergreen.sx_max_export_size = 256;
+               rdev->config.evergreen.sx_max_export_pos_size = 64;
+               rdev->config.evergreen.sx_max_export_smx_size = 192;
+               rdev->config.evergreen.max_hw_contexts = 8;
+               rdev->config.evergreen.sq_num_cf_insts = 2;
+
+               rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+               rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+               rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               break;
+       case CHIP_REDWOOD:
+               rdev->config.evergreen.num_ses = 1;
+               rdev->config.evergreen.max_pipes = 4;
+               rdev->config.evergreen.max_tile_pipes = 4;
+               rdev->config.evergreen.max_simds = 5;
+               rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+               rdev->config.evergreen.max_gprs = 256;
+               rdev->config.evergreen.max_threads = 248;
+               rdev->config.evergreen.max_gs_threads = 32;
+               rdev->config.evergreen.max_stack_entries = 256;
+               rdev->config.evergreen.sx_num_of_sets = 4;
+               rdev->config.evergreen.sx_max_export_size = 256;
+               rdev->config.evergreen.sx_max_export_pos_size = 64;
+               rdev->config.evergreen.sx_max_export_smx_size = 192;
+               rdev->config.evergreen.max_hw_contexts = 8;
+               rdev->config.evergreen.sq_num_cf_insts = 2;
+
+               rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+               rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+               rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               break;
+       case CHIP_CEDAR:
+       default:
+               rdev->config.evergreen.num_ses = 1;
+               rdev->config.evergreen.max_pipes = 2;
+               rdev->config.evergreen.max_tile_pipes = 2;
+               rdev->config.evergreen.max_simds = 2;
+               rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+               rdev->config.evergreen.max_gprs = 256;
+               rdev->config.evergreen.max_threads = 192;
+               rdev->config.evergreen.max_gs_threads = 16;
+               rdev->config.evergreen.max_stack_entries = 256;
+               rdev->config.evergreen.sx_num_of_sets = 4;
+               rdev->config.evergreen.sx_max_export_size = 128;
+               rdev->config.evergreen.sx_max_export_pos_size = 32;
+               rdev->config.evergreen.sx_max_export_smx_size = 96;
+               rdev->config.evergreen.max_hw_contexts = 4;
+               rdev->config.evergreen.sq_num_cf_insts = 1;
+
+               rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+               rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+               rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               break;
+       }
+
+       /* Initialize HDP */
+       for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+               WREG32((0x2c14 + j), 0x00000000);
+               WREG32((0x2c18 + j), 0x00000000);
+               WREG32((0x2c1c + j), 0x00000000);
+               WREG32((0x2c20 + j), 0x00000000);
+               WREG32((0x2c24 + j), 0x00000000);
+       }
+
+       WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+       cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
+
+       cc_gc_shader_pipe_config |=
+               INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
+                                 & EVERGREEN_MAX_PIPES_MASK);
+       cc_gc_shader_pipe_config |=
+               INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
+                              & EVERGREEN_MAX_SIMDS_MASK);
+
+       cc_rb_backend_disable =
+               BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
+                               & EVERGREEN_MAX_BACKENDS_MASK);
+
+
+       mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+       mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+       switch (rdev->config.evergreen.max_tile_pipes) {
+       case 1:
+       default:
+               gb_addr_config |= NUM_PIPES(0);
+               break;
+       case 2:
+               gb_addr_config |= NUM_PIPES(1);
+               break;
+       case 4:
+               gb_addr_config |= NUM_PIPES(2);
+               break;
+       case 8:
+               gb_addr_config |= NUM_PIPES(3);
+               break;
+       }
+
+       gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
+       gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
+       gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
+       gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
+       gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
+       gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
+
+       if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
+               gb_addr_config |= ROW_SIZE(2);
+       else
+               gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
+
+       if (rdev->ddev->pdev->device == 0x689e) {
+               u32 efuse_straps_4;
+               u32 efuse_straps_3;
+               u8 efuse_box_bit_131_124;
+
+               WREG32(RCU_IND_INDEX, 0x204);
+               efuse_straps_4 = RREG32(RCU_IND_DATA);
+               WREG32(RCU_IND_INDEX, 0x203);
+               efuse_straps_3 = RREG32(RCU_IND_DATA);
+               efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
+
+               switch(efuse_box_bit_131_124) {
+               case 0x00:
+                       gb_backend_map = 0x76543210;
+                       break;
+               case 0x55:
+                       gb_backend_map = 0x77553311;
+                       break;
+               case 0x56:
+                       gb_backend_map = 0x77553300;
+                       break;
+               case 0x59:
+                       gb_backend_map = 0x77552211;
+                       break;
+               case 0x66:
+                       gb_backend_map = 0x77443300;
+                       break;
+               case 0x99:
+                       gb_backend_map = 0x66552211;
+                       break;
+               case 0x5a:
+                       gb_backend_map = 0x77552200;
+                       break;
+               case 0xaa:
+                       gb_backend_map = 0x66442200;
+                       break;
+               case 0x95:
+                       gb_backend_map = 0x66553311;
+                       break;
+               default:
+                       DRM_ERROR("bad backend map, using default\n");
+                       gb_backend_map =
+                               evergreen_get_tile_pipe_to_backend_map(rdev,
+                                                                      rdev->config.evergreen.max_tile_pipes,
+                                                                      rdev->config.evergreen.max_backends,
+                                                                      ((EVERGREEN_MAX_BACKENDS_MASK <<
+                                                                  rdev->config.evergreen.max_backends) &
+                                                                       EVERGREEN_MAX_BACKENDS_MASK));
+                       break;
+               }
+       } else if (rdev->ddev->pdev->device == 0x68b9) {
+               u32 efuse_straps_3;
+               u8 efuse_box_bit_127_124;
+
+               WREG32(RCU_IND_INDEX, 0x203);
+               efuse_straps_3 = RREG32(RCU_IND_DATA);
+               efuse_box_bit_127_124 = (u8)(efuse_straps_3 & 0xF0000000) >> 28;
+
+               switch(efuse_box_bit_127_124) {
+               case 0x0:
+                       gb_backend_map = 0x00003210;
+                       break;
+               case 0x5:
+               case 0x6:
+               case 0x9:
+               case 0xa:
+                       gb_backend_map = 0x00003311;
+                       break;
+               default:
+                       DRM_ERROR("bad backend map, using default\n");
+                       gb_backend_map =
+                               evergreen_get_tile_pipe_to_backend_map(rdev,
+                                                                      rdev->config.evergreen.max_tile_pipes,
+                                                                      rdev->config.evergreen.max_backends,
+                                                                      ((EVERGREEN_MAX_BACKENDS_MASK <<
+                                                                  rdev->config.evergreen.max_backends) &
+                                                                       EVERGREEN_MAX_BACKENDS_MASK));
+                       break;
+               }
+       } else
+               gb_backend_map =
+                       evergreen_get_tile_pipe_to_backend_map(rdev,
+                                                              rdev->config.evergreen.max_tile_pipes,
+                                                              rdev->config.evergreen.max_backends,
+                                                              ((EVERGREEN_MAX_BACKENDS_MASK <<
+                                                                rdev->config.evergreen.max_backends) &
+                                                               EVERGREEN_MAX_BACKENDS_MASK));
+
+       WREG32(GB_BACKEND_MAP, gb_backend_map);
+       WREG32(GB_ADDR_CONFIG, gb_addr_config);
+       WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+       WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+
+       num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
+       grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
+
+       for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
+               u32 rb = cc_rb_backend_disable | (0xf0 << 16);
+               u32 sp = cc_gc_shader_pipe_config;
+               u32 gfx = grbm_gfx_index | SE_INDEX(i);
+
+               if (i == num_shader_engines) {
+                       rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK);
+                       sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK);
+               }
+
+               WREG32(GRBM_GFX_INDEX, gfx);
+               WREG32(RLC_GFX_INDEX, gfx);
+
+               WREG32(CC_RB_BACKEND_DISABLE, rb);
+               WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
+               WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
+               WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
+        }
+
+       grbm_gfx_index |= SE_BROADCAST_WRITES;
+       WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
+       WREG32(RLC_GFX_INDEX, grbm_gfx_index);
+
+       WREG32(CGTS_SYS_TCC_DISABLE, 0);
+       WREG32(CGTS_TCC_DISABLE, 0);
+       WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
+       WREG32(CGTS_USER_TCC_DISABLE, 0);
+
+       /* set HW defaults for 3D engine */
+       WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+                                    ROQ_IB2_START(0x2b)));
+
+       WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
+
+       WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
+                            SYNC_GRADIENT |
+                            SYNC_WALKER |
+                            SYNC_ALIGNER));
+
+       sx_debug_1 = RREG32(SX_DEBUG_1);
+       sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
+       WREG32(SX_DEBUG_1, sx_debug_1);
+
+
+       smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
+       smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
+       smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
+       WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+
+       WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
+                                       POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
+                                       SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
+
+       WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
+                                SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
+                                SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
+
+       WREG32(VGT_NUM_INSTANCES, 1);
+       WREG32(SPI_CONFIG_CNTL, 0);
+       WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+       WREG32(CP_PERFMON_CNTL, 0);
+
+       WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
+                                 FETCH_FIFO_HIWATER(0x4) |
+                                 DONE_FIFO_HIWATER(0xe0) |
+                                 ALU_UPDATE_FIFO_HIWATER(0x8)));
+
+       sq_config = RREG32(SQ_CONFIG);
+       sq_config &= ~(PS_PRIO(3) |
+                      VS_PRIO(3) |
+                      GS_PRIO(3) |
+                      ES_PRIO(3));
+       sq_config |= (VC_ENABLE |
+                     EXPORT_SRC_C |
+                     PS_PRIO(0) |
+                     VS_PRIO(1) |
+                     GS_PRIO(2) |
+                     ES_PRIO(3));
+
+       if (rdev->family == CHIP_CEDAR)
+               /* no vertex cache */
+               sq_config &= ~VC_ENABLE;
+
+       sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
+
+       sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
+       sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
+       sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
+       sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
+       sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
+       sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
+       sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
+
+       if (rdev->family == CHIP_CEDAR)
+               ps_thread_count = 96;
+       else
+               ps_thread_count = 128;
+
+       sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
+       sq_thread_resource_mgmt |= NUM_VS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+       sq_thread_resource_mgmt |= NUM_GS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+       sq_thread_resource_mgmt |= NUM_ES_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+       sq_thread_resource_mgmt_2 = NUM_HS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+       sq_thread_resource_mgmt_2 |= NUM_LS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+
+       sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+       sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+       sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+       sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+       sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+       sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+
+       WREG32(SQ_CONFIG, sq_config);
+       WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
+       WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
+       WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
+       WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
+       WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
+       WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
+       WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
+       WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
+       WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
+       WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
+
+       WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+                                         FORCE_EOV_MAX_REZ_CNT(255)));
+
+       if (rdev->family == CHIP_CEDAR)
+               vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
+       else
+               vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
+       vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
+       WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
+
+       WREG32(VGT_GS_VERTEX_REUSE, 16);
+       WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+       WREG32(CB_PERF_CTR0_SEL_0, 0);
+       WREG32(CB_PERF_CTR0_SEL_1, 0);
+       WREG32(CB_PERF_CTR1_SEL_0, 0);
+       WREG32(CB_PERF_CTR1_SEL_1, 0);
+       WREG32(CB_PERF_CTR2_SEL_0, 0);
+       WREG32(CB_PERF_CTR2_SEL_1, 0);
+       WREG32(CB_PERF_CTR3_SEL_0, 0);
+       WREG32(CB_PERF_CTR3_SEL_1, 0);
+
+       hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+       WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+       WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+       udelay(50);
+
 }
 
 int evergreen_mc_init(struct radeon_device *rdev)
@@ -476,26 +1307,627 @@ int evergreen_mc_init(struct radeon_device *rdev)
        rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
        rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
-       /* FIXME remove this once we support unmappable VRAM */
-       if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
-               rdev->mc.mc_vram_size = rdev->mc.aper_size;
-               rdev->mc.real_vram_size = rdev->mc.aper_size;
-       }
        r600_vram_gtt_location(rdev, &rdev->mc);
        radeon_update_bandwidth_info(rdev);
 
        return 0;
 }
 
-int evergreen_gpu_reset(struct radeon_device *rdev)
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
 {
        /* FIXME: implement for evergreen */
+       return false;
+}
+
+static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
+{
+       struct evergreen_mc_save save;
+       u32 srbm_reset = 0;
+       u32 grbm_reset = 0;
+
+       dev_info(rdev->dev, "GPU softreset \n");
+       dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
+               RREG32(GRBM_STATUS));
+       dev_info(rdev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
+               RREG32(GRBM_STATUS_SE0));
+       dev_info(rdev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
+               RREG32(GRBM_STATUS_SE1));
+       dev_info(rdev->dev, "  SRBM_STATUS=0x%08X\n",
+               RREG32(SRBM_STATUS));
+       evergreen_mc_stop(rdev, &save);
+       if (evergreen_mc_wait_for_idle(rdev)) {
+               dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+       }
+       /* Disable CP parsing/prefetching */
+       WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+
+       /* reset all the gfx blocks */
+       grbm_reset = (SOFT_RESET_CP |
+                     SOFT_RESET_CB |
+                     SOFT_RESET_DB |
+                     SOFT_RESET_PA |
+                     SOFT_RESET_SC |
+                     SOFT_RESET_SPI |
+                     SOFT_RESET_SH |
+                     SOFT_RESET_SX |
+                     SOFT_RESET_TC |
+                     SOFT_RESET_TA |
+                     SOFT_RESET_VC |
+                     SOFT_RESET_VGT);
+
+       dev_info(rdev->dev, "  GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
+       WREG32(GRBM_SOFT_RESET, grbm_reset);
+       (void)RREG32(GRBM_SOFT_RESET);
+       udelay(50);
+       WREG32(GRBM_SOFT_RESET, 0);
+       (void)RREG32(GRBM_SOFT_RESET);
+
+       /* reset all the system blocks */
+       srbm_reset = SRBM_SOFT_RESET_ALL_MASK;
+
+       dev_info(rdev->dev, "  SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
+       WREG32(SRBM_SOFT_RESET, srbm_reset);
+       (void)RREG32(SRBM_SOFT_RESET);
+       udelay(50);
+       WREG32(SRBM_SOFT_RESET, 0);
+       (void)RREG32(SRBM_SOFT_RESET);
+       /* Wait a little for things to settle down */
+       udelay(50);
+       dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
+               RREG32(GRBM_STATUS));
+       dev_info(rdev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
+               RREG32(GRBM_STATUS_SE0));
+       dev_info(rdev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
+               RREG32(GRBM_STATUS_SE1));
+       dev_info(rdev->dev, "  SRBM_STATUS=0x%08X\n",
+               RREG32(SRBM_STATUS));
+       /* After reset we need to reinit the asic as GPU often endup in an
+        * incoherent state.
+        */
+       atom_asic_init(rdev->mode_info.atom_context);
+       evergreen_mc_resume(rdev, &save);
+       return 0;
+}
+
+int evergreen_asic_reset(struct radeon_device *rdev)
+{
+       return evergreen_gpu_soft_reset(rdev);
+}
+
+/* Interrupts */
+
+u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
+{
+       switch (crtc) {
+       case 0:
+               return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET);
+       case 1:
+               return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET);
+       case 2:
+               return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET);
+       case 3:
+               return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET);
+       case 4:
+               return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET);
+       case 5:
+               return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET);
+       default:
+               return 0;
+       }
+}
+
+void evergreen_disable_interrupt_state(struct radeon_device *rdev)
+{
+       u32 tmp;
+
+       WREG32(CP_INT_CNTL, 0);
+       WREG32(GRBM_INT_CNTL, 0);
+       WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+       WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+       WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+       WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+       WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+       WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+
+       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+
+       WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+       WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
+
+       tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+       WREG32(DC_HPD1_INT_CONTROL, tmp);
+       tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+       WREG32(DC_HPD2_INT_CONTROL, tmp);
+       tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+       WREG32(DC_HPD3_INT_CONTROL, tmp);
+       tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+       WREG32(DC_HPD4_INT_CONTROL, tmp);
+       tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+       WREG32(DC_HPD5_INT_CONTROL, tmp);
+       tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+       WREG32(DC_HPD6_INT_CONTROL, tmp);
+
+}
+
+int evergreen_irq_set(struct radeon_device *rdev)
+{
+       u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+       u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
+       u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
+       u32 grbm_int_cntl = 0;
+
+       if (!rdev->irq.installed) {
+               WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+               return -EINVAL;
+       }
+       /* don't enable anything if the ih is disabled */
+       if (!rdev->ih.enabled) {
+               r600_disable_interrupts(rdev);
+               /* force the active interrupt state to all disabled */
+               evergreen_disable_interrupt_state(rdev);
+               return 0;
+       }
+
+       hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+       hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+       hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+       hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+       hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+       hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+
+       if (rdev->irq.sw_int) {
+               DRM_DEBUG("evergreen_irq_set: sw int\n");
+               cp_int_cntl |= RB_INT_ENABLE;
+       }
+       if (rdev->irq.crtc_vblank_int[0]) {
+               DRM_DEBUG("evergreen_irq_set: vblank 0\n");
+               crtc1 |= VBLANK_INT_MASK;
+       }
+       if (rdev->irq.crtc_vblank_int[1]) {
+               DRM_DEBUG("evergreen_irq_set: vblank 1\n");
+               crtc2 |= VBLANK_INT_MASK;
+       }
+       if (rdev->irq.crtc_vblank_int[2]) {
+               DRM_DEBUG("evergreen_irq_set: vblank 2\n");
+               crtc3 |= VBLANK_INT_MASK;
+       }
+       if (rdev->irq.crtc_vblank_int[3]) {
+               DRM_DEBUG("evergreen_irq_set: vblank 3\n");
+               crtc4 |= VBLANK_INT_MASK;
+       }
+       if (rdev->irq.crtc_vblank_int[4]) {
+               DRM_DEBUG("evergreen_irq_set: vblank 4\n");
+               crtc5 |= VBLANK_INT_MASK;
+       }
+       if (rdev->irq.crtc_vblank_int[5]) {
+               DRM_DEBUG("evergreen_irq_set: vblank 5\n");
+               crtc6 |= VBLANK_INT_MASK;
+       }
+       if (rdev->irq.hpd[0]) {
+               DRM_DEBUG("evergreen_irq_set: hpd 1\n");
+               hpd1 |= DC_HPDx_INT_EN;
+       }
+       if (rdev->irq.hpd[1]) {
+               DRM_DEBUG("evergreen_irq_set: hpd 2\n");
+               hpd2 |= DC_HPDx_INT_EN;
+       }
+       if (rdev->irq.hpd[2]) {
+               DRM_DEBUG("evergreen_irq_set: hpd 3\n");
+               hpd3 |= DC_HPDx_INT_EN;
+       }
+       if (rdev->irq.hpd[3]) {
+               DRM_DEBUG("evergreen_irq_set: hpd 4\n");
+               hpd4 |= DC_HPDx_INT_EN;
+       }
+       if (rdev->irq.hpd[4]) {
+               DRM_DEBUG("evergreen_irq_set: hpd 5\n");
+               hpd5 |= DC_HPDx_INT_EN;
+       }
+       if (rdev->irq.hpd[5]) {
+               DRM_DEBUG("evergreen_irq_set: hpd 6\n");
+               hpd6 |= DC_HPDx_INT_EN;
+       }
+       if (rdev->irq.gui_idle) {
+               DRM_DEBUG("gui idle\n");
+               grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
+       }
+
+       WREG32(CP_INT_CNTL, cp_int_cntl);
+       WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+
+       WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
+       WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
+       WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
+       WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
+       WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
+       WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+
+       WREG32(DC_HPD1_INT_CONTROL, hpd1);
+       WREG32(DC_HPD2_INT_CONTROL, hpd2);
+       WREG32(DC_HPD3_INT_CONTROL, hpd3);
+       WREG32(DC_HPD4_INT_CONTROL, hpd4);
+       WREG32(DC_HPD5_INT_CONTROL, hpd5);
+       WREG32(DC_HPD6_INT_CONTROL, hpd6);
+
        return 0;
 }
 
+static inline void evergreen_irq_ack(struct radeon_device *rdev,
+                                    u32 *disp_int,
+                                    u32 *disp_int_cont,
+                                    u32 *disp_int_cont2,
+                                    u32 *disp_int_cont3,
+                                    u32 *disp_int_cont4,
+                                    u32 *disp_int_cont5)
+{
+       u32 tmp;
+
+       *disp_int = RREG32(DISP_INTERRUPT_STATUS);
+       *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+       *disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
+       *disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
+       *disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
+       *disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
+
+       if (*disp_int & LB_D1_VBLANK_INTERRUPT)
+               WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
+       if (*disp_int & LB_D1_VLINE_INTERRUPT)
+               WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
+
+       if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT)
+               WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
+       if (*disp_int_cont & LB_D2_VLINE_INTERRUPT)
+               WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
+
+       if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
+               WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
+       if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
+               WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
+
+       if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
+               WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
+       if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
+               WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
+
+       if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
+               WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
+       if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
+               WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
+
+       if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
+               WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
+       if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
+               WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
+
+       if (*disp_int & DC_HPD1_INTERRUPT) {
+               tmp = RREG32(DC_HPD1_INT_CONTROL);
+               tmp |= DC_HPDx_INT_ACK;
+               WREG32(DC_HPD1_INT_CONTROL, tmp);
+       }
+       if (*disp_int_cont & DC_HPD2_INTERRUPT) {
+               tmp = RREG32(DC_HPD2_INT_CONTROL);
+               tmp |= DC_HPDx_INT_ACK;
+               WREG32(DC_HPD2_INT_CONTROL, tmp);
+       }
+       if (*disp_int_cont2 & DC_HPD3_INTERRUPT) {
+               tmp = RREG32(DC_HPD3_INT_CONTROL);
+               tmp |= DC_HPDx_INT_ACK;
+               WREG32(DC_HPD3_INT_CONTROL, tmp);
+       }
+       if (*disp_int_cont3 & DC_HPD4_INTERRUPT) {
+               tmp = RREG32(DC_HPD4_INT_CONTROL);
+               tmp |= DC_HPDx_INT_ACK;
+               WREG32(DC_HPD4_INT_CONTROL, tmp);
+       }
+       if (*disp_int_cont4 & DC_HPD5_INTERRUPT) {
+               tmp = RREG32(DC_HPD5_INT_CONTROL);
+               tmp |= DC_HPDx_INT_ACK;
+               WREG32(DC_HPD5_INT_CONTROL, tmp);
+       }
+       if (*disp_int_cont5 & DC_HPD6_INTERRUPT) {
+               tmp = RREG32(DC_HPD5_INT_CONTROL);
+               tmp |= DC_HPDx_INT_ACK;
+               WREG32(DC_HPD6_INT_CONTROL, tmp);
+       }
+}
+
+void evergreen_irq_disable(struct radeon_device *rdev)
+{
+       u32 disp_int, disp_int_cont, disp_int_cont2;
+       u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
+
+       r600_disable_interrupts(rdev);
+       /* Wait and acknowledge irq */
+       mdelay(1);
+       evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
+                         &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
+       evergreen_disable_interrupt_state(rdev);
+}
+
+static void evergreen_irq_suspend(struct radeon_device *rdev)
+{
+       evergreen_irq_disable(rdev);
+       r600_rlc_stop(rdev);
+}
+
+static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
+{
+       u32 wptr, tmp;
+
+       /* XXX use writeback */
+       wptr = RREG32(IH_RB_WPTR);
+
+       if (wptr & RB_OVERFLOW) {
+               /* When a ring buffer overflow happen start parsing interrupt
+                * from the last not overwritten vector (wptr + 16). Hopefully
+                * this should allow us to catchup.
+                */
+               dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
+                       wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+               rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
+               tmp = RREG32(IH_RB_CNTL);
+               tmp |= IH_WPTR_OVERFLOW_CLEAR;
+               WREG32(IH_RB_CNTL, tmp);
+       }
+       return (wptr & rdev->ih.ptr_mask);
+}
+
+int evergreen_irq_process(struct radeon_device *rdev)
+{
+       u32 wptr = evergreen_get_ih_wptr(rdev);
+       u32 rptr = rdev->ih.rptr;
+       u32 src_id, src_data;
+       u32 ring_index;
+       u32 disp_int, disp_int_cont, disp_int_cont2;
+       u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
+       unsigned long flags;
+       bool queue_hotplug = false;
+
+       DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+       if (!rdev->ih.enabled)
+               return IRQ_NONE;
+
+       spin_lock_irqsave(&rdev->ih.lock, flags);
+
+       if (rptr == wptr) {
+               spin_unlock_irqrestore(&rdev->ih.lock, flags);
+               return IRQ_NONE;
+       }
+       if (rdev->shutdown) {
+               spin_unlock_irqrestore(&rdev->ih.lock, flags);
+               return IRQ_NONE;
+       }
+
+restart_ih:
+       /* display interrupts */
+       evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
+                         &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
+
+       rdev->ih.wptr = wptr;
+       while (rptr != wptr) {
+               /* wptr/rptr are in bytes! */
+               ring_index = rptr / 4;
+               src_id =  rdev->ih.ring[ring_index] & 0xff;
+               src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
+
+               switch (src_id) {
+               case 1: /* D1 vblank/vline */
+                       switch (src_data) {
+                       case 0: /* D1 vblank */
+                               if (disp_int & LB_D1_VBLANK_INTERRUPT) {
+                                       drm_handle_vblank(rdev->ddev, 0);
+                                       wake_up(&rdev->irq.vblank_queue);
+                                       disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+                                       DRM_DEBUG("IH: D1 vblank\n");
+                               }
+                               break;
+                       case 1: /* D1 vline */
+                               if (disp_int & LB_D1_VLINE_INTERRUPT) {
+                                       disp_int &= ~LB_D1_VLINE_INTERRUPT;
+                                       DRM_DEBUG("IH: D1 vline\n");
+                               }
+                               break;
+                       default:
+                               DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+                               break;
+                       }
+                       break;
+               case 2: /* D2 vblank/vline */
+                       switch (src_data) {
+                       case 0: /* D2 vblank */
+                               if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+                                       drm_handle_vblank(rdev->ddev, 1);
+                                       wake_up(&rdev->irq.vblank_queue);
+                                       disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+                                       DRM_DEBUG("IH: D2 vblank\n");
+                               }
+                               break;
+                       case 1: /* D2 vline */
+                               if (disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+                                       disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+                                       DRM_DEBUG("IH: D2 vline\n");
+                               }
+                               break;
+                       default:
+                               DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+                               break;
+                       }
+                       break;
+               case 3: /* D3 vblank/vline */
+                       switch (src_data) {
+                       case 0: /* D3 vblank */
+                               if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+                                       drm_handle_vblank(rdev->ddev, 2);
+                                       wake_up(&rdev->irq.vblank_queue);
+                                       disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+                                       DRM_DEBUG("IH: D3 vblank\n");
+                               }
+                               break;
+                       case 1: /* D3 vline */
+                               if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+                                       disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+                                       DRM_DEBUG("IH: D3 vline\n");
+                               }
+                               break;
+                       default:
+                               DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+                               break;
+                       }
+                       break;
+               case 4: /* D4 vblank/vline */
+                       switch (src_data) {
+                       case 0: /* D4 vblank */
+                               if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+                                       drm_handle_vblank(rdev->ddev, 3);
+                                       wake_up(&rdev->irq.vblank_queue);
+                                       disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+                                       DRM_DEBUG("IH: D4 vblank\n");
+                               }
+                               break;
+                       case 1: /* D4 vline */
+                               if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+                                       disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+                                       DRM_DEBUG("IH: D4 vline\n");
+                               }
+                               break;
+                       default:
+                               DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+                               break;
+                       }
+                       break;
+               case 5: /* D5 vblank/vline */
+                       switch (src_data) {
+                       case 0: /* D5 vblank */
+                               if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+                                       drm_handle_vblank(rdev->ddev, 4);
+                                       wake_up(&rdev->irq.vblank_queue);
+                                       disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+                                       DRM_DEBUG("IH: D5 vblank\n");
+                               }
+                               break;
+                       case 1: /* D5 vline */
+                               if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+                                       disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+                                       DRM_DEBUG("IH: D5 vline\n");
+                               }
+                               break;
+                       default:
+                               DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+                               break;
+                       }
+                       break;
+               case 6: /* D6 vblank/vline */
+                       switch (src_data) {
+                       case 0: /* D6 vblank */
+                               if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+                                       drm_handle_vblank(rdev->ddev, 5);
+                                       wake_up(&rdev->irq.vblank_queue);
+                                       disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+                                       DRM_DEBUG("IH: D6 vblank\n");
+                               }
+                               break;
+                       case 1: /* D6 vline */
+                               if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+                                       disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+                                       DRM_DEBUG("IH: D6 vline\n");
+                               }
+                               break;
+                       default:
+                               DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+                               break;
+                       }
+                       break;
+               case 42: /* HPD hotplug */
+                       switch (src_data) {
+                       case 0:
+                               if (disp_int & DC_HPD1_INTERRUPT) {
+                                       disp_int &= ~DC_HPD1_INTERRUPT;
+                                       queue_hotplug = true;
+                                       DRM_DEBUG("IH: HPD1\n");
+                               }
+                               break;
+                       case 1:
+                               if (disp_int_cont & DC_HPD2_INTERRUPT) {
+                                       disp_int_cont &= ~DC_HPD2_INTERRUPT;
+                                       queue_hotplug = true;
+                                       DRM_DEBUG("IH: HPD2\n");
+                               }
+                               break;
+                       case 2:
+                               if (disp_int_cont2 & DC_HPD3_INTERRUPT) {
+                                       disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+                                       queue_hotplug = true;
+                                       DRM_DEBUG("IH: HPD3\n");
+                               }
+                               break;
+                       case 3:
+                               if (disp_int_cont3 & DC_HPD4_INTERRUPT) {
+                                       disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+                                       queue_hotplug = true;
+                                       DRM_DEBUG("IH: HPD4\n");
+                               }
+                               break;
+                       case 4:
+                               if (disp_int_cont4 & DC_HPD5_INTERRUPT) {
+                                       disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+                                       queue_hotplug = true;
+                                       DRM_DEBUG("IH: HPD5\n");
+                               }
+                               break;
+                       case 5:
+                               if (disp_int_cont5 & DC_HPD6_INTERRUPT) {
+                                       disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+                                       queue_hotplug = true;
+                                       DRM_DEBUG("IH: HPD6\n");
+                               }
+                               break;
+                       default:
+                               DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+                               break;
+                       }
+                       break;
+               case 176: /* CP_INT in ring buffer */
+               case 177: /* CP_INT in IB1 */
+               case 178: /* CP_INT in IB2 */
+                       DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
+                       radeon_fence_process(rdev);
+                       break;
+               case 181: /* CP EOP event */
+                       DRM_DEBUG("IH: CP EOP\n");
+                       break;
+               case 233: /* GUI IDLE */
+                       DRM_DEBUG("IH: CP EOP\n");
+                       rdev->pm.gui_idle = true;
+                       wake_up(&rdev->irq.idle_queue);
+                       break;
+               default:
+                       DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+                       break;
+               }
+
+               /* wptr/rptr are in bytes! */
+               rptr += 16;
+               rptr &= rdev->ih.ptr_mask;
+       }
+       /* make sure wptr hasn't changed while processing */
+       wptr = evergreen_get_ih_wptr(rdev);
+       if (wptr != rdev->ih.wptr)
+               goto restart_ih;
+       if (queue_hotplug)
+               queue_work(rdev->wq, &rdev->hotplug_work);
+       rdev->ih.rptr = rptr;
+       WREG32(IH_RB_RPTR, rdev->ih.rptr);
+       spin_unlock_irqrestore(&rdev->ih.lock, flags);
+       return IRQ_HANDLED;
+}
+
 static int evergreen_startup(struct radeon_device *rdev)
 {
-#if 0
        int r;
 
        if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
@@ -505,17 +1937,15 @@ static int evergreen_startup(struct radeon_device *rdev)
                        return r;
                }
        }
-#endif
+
        evergreen_mc_program(rdev);
-#if 0
        if (rdev->flags & RADEON_IS_AGP) {
-               evergreem_agp_enable(rdev);
+               evergreen_agp_enable(rdev);
        } else {
                r = evergreen_pcie_gart_enable(rdev);
                if (r)
                        return r;
        }
-#endif
        evergreen_gpu_init(rdev);
 #if 0
        if (!rdev->r600_blit.shader_obj) {
@@ -536,6 +1966,7 @@ static int evergreen_startup(struct radeon_device *rdev)
                DRM_ERROR("failed to pin blit object %d\n", r);
                return r;
        }
+#endif
 
        /* Enable IRQ */
        r = r600_irq_init(rdev);
@@ -544,7 +1975,7 @@ static int evergreen_startup(struct radeon_device *rdev)
                radeon_irq_kms_fini(rdev);
                return r;
        }
-       r600_irq_set(rdev);
+       evergreen_irq_set(rdev);
 
        r = radeon_ring_init(rdev, rdev->cp.ring_size);
        if (r)
@@ -552,12 +1983,12 @@ static int evergreen_startup(struct radeon_device *rdev)
        r = evergreen_cp_load_microcode(rdev);
        if (r)
                return r;
-       r = r600_cp_resume(rdev);
+       r = evergreen_cp_resume(rdev);
        if (r)
                return r;
        /* write back buffer are not vital so don't worry about failure */
        r600_wb_enable(rdev);
-#endif
+
        return 0;
 }
 
@@ -582,13 +2013,13 @@ int evergreen_resume(struct radeon_device *rdev)
                DRM_ERROR("r600 startup failed on resume\n");
                return r;
        }
-#if 0
+
        r = r600_ib_test(rdev);
        if (r) {
                DRM_ERROR("radeon: failled testing IB (%d).\n", r);
                return r;
        }
-#endif
+
        return r;
 
 }
@@ -597,12 +2028,14 @@ int evergreen_suspend(struct radeon_device *rdev)
 {
 #if 0
        int r;
-
+#endif
        /* FIXME: we should wait for ring to be empty */
        r700_cp_stop(rdev);
        rdev->cp.ready = false;
+       evergreen_irq_suspend(rdev);
        r600_wb_disable(rdev);
        evergreen_pcie_gart_disable(rdev);
+#if 0
        /* unpin shaders bo */
        r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
        if (likely(r == 0)) {
@@ -682,8 +2115,6 @@ int evergreen_init(struct radeon_device *rdev)
        r = radeon_clocks_init(rdev);
        if (r)
                return r;
-       /* Initialize power management */
-       radeon_pm_init(rdev);
        /* Fence driver */
        r = radeon_fence_driver_init(rdev);
        if (r)
@@ -702,7 +2133,7 @@ int evergreen_init(struct radeon_device *rdev)
        r = radeon_bo_init(rdev);
        if (r)
                return r;
-#if 0
+
        r = radeon_irq_kms_init(rdev);
        if (r)
                return r;
@@ -716,14 +2147,16 @@ int evergreen_init(struct radeon_device *rdev)
        r = r600_pcie_gart_init(rdev);
        if (r)
                return r;
-#endif
+
        rdev->accel_working = false;
        r = evergreen_startup(rdev);
        if (r) {
-               evergreen_suspend(rdev);
-               /*r600_wb_fini(rdev);*/
-               /*radeon_ring_fini(rdev);*/
-               /*evergreen_pcie_gart_fini(rdev);*/
+               dev_err(rdev->dev, "disabling GPU acceleration\n");
+               r700_cp_fini(rdev);
+               r600_wb_fini(rdev);
+               r600_irq_fini(rdev);
+               radeon_irq_kms_fini(rdev);
+               evergreen_pcie_gart_fini(rdev);
                rdev->accel_working = false;
        }
        if (rdev->accel_working) {
@@ -743,16 +2176,12 @@ int evergreen_init(struct radeon_device *rdev)
 
 void evergreen_fini(struct radeon_device *rdev)
 {
-       radeon_pm_fini(rdev);
-       evergreen_suspend(rdev);
-#if 0
-       r600_blit_fini(rdev);
+       /*r600_blit_fini(rdev);*/
+       r700_cp_fini(rdev);
+       r600_wb_fini(rdev);
        r600_irq_fini(rdev);
        radeon_irq_kms_fini(rdev);
-       radeon_ring_fini(rdev);
-       r600_wb_fini(rdev);
        evergreen_pcie_gart_fini(rdev);
-#endif
        radeon_gem_fini(rdev);
        radeon_fence_driver_fini(rdev);
        radeon_clocks_fini(rdev);
index f7c7c96..af86af8 100644 (file)
 #define EVERGREEN_CRTC5_REGISTER_OFFSET                 (0x129f0 - 0x6df0)
 
 /* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
+#define EVERGREEN_CRTC_V_BLANK_START_END                0x6e34
 #define EVERGREEN_CRTC_CONTROL                          0x6e70
 #       define EVERGREEN_CRTC_MASTER_EN                 (1 << 0)
+#       define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define EVERGREEN_CRTC_STATUS                           0x6e8c
+#define EVERGREEN_CRTC_STATUS_POSITION                  0x6e90
 #define EVERGREEN_CRTC_UPDATE_LOCK                      0x6ed4
 
 #define EVERGREEN_DC_GPIO_HPD_MASK                      0x64b0
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
new file mode 100644 (file)
index 0000000..93e9e17
--- /dev/null
@@ -0,0 +1,556 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef EVERGREEND_H
+#define EVERGREEND_H
+
+#define EVERGREEN_MAX_SH_GPRS           256
+#define EVERGREEN_MAX_TEMP_GPRS         16
+#define EVERGREEN_MAX_SH_THREADS        256
+#define EVERGREEN_MAX_SH_STACK_ENTRIES  4096
+#define EVERGREEN_MAX_FRC_EOV_CNT       16384
+#define EVERGREEN_MAX_BACKENDS          8
+#define EVERGREEN_MAX_BACKENDS_MASK     0xFF
+#define EVERGREEN_MAX_SIMDS             16
+#define EVERGREEN_MAX_SIMDS_MASK        0xFFFF
+#define EVERGREEN_MAX_PIPES             8
+#define EVERGREEN_MAX_PIPES_MASK        0xFF
+#define EVERGREEN_MAX_LDS_NUM           0xFFFF
+
+/* Registers */
+
+#define RCU_IND_INDEX                                  0x100
+#define RCU_IND_DATA                                   0x104
+
+#define GRBM_GFX_INDEX                                 0x802C
+#define                INSTANCE_INDEX(x)                       ((x) << 0)
+#define                SE_INDEX(x)                             ((x) << 16)
+#define                INSTANCE_BROADCAST_WRITES               (1 << 30)
+#define                SE_BROADCAST_WRITES                     (1 << 31)
+#define RLC_GFX_INDEX                                  0x3fC4
+#define CC_GC_SHADER_PIPE_CONFIG                       0x8950
+#define                WRITE_DIS                               (1 << 0)
+#define CC_RB_BACKEND_DISABLE                          0x98F4
+#define                BACKEND_DISABLE(x)                      ((x) << 16)
+#define GB_ADDR_CONFIG                                 0x98F8
+#define                NUM_PIPES(x)                            ((x) << 0)
+#define                PIPE_INTERLEAVE_SIZE(x)                 ((x) << 4)
+#define                BANK_INTERLEAVE_SIZE(x)                 ((x) << 8)
+#define                NUM_SHADER_ENGINES(x)                   ((x) << 12)
+#define                SHADER_ENGINE_TILE_SIZE(x)              ((x) << 16)
+#define                NUM_GPUS(x)                             ((x) << 20)
+#define                MULTI_GPU_TILE_SIZE(x)                  ((x) << 24)
+#define                ROW_SIZE(x)                             ((x) << 28)
+#define GB_BACKEND_MAP                                 0x98FC
+#define DMIF_ADDR_CONFIG                               0xBD4
+#define HDP_ADDR_CONFIG                                0x2F48
+
+#define        CC_SYS_RB_BACKEND_DISABLE                       0x3F88
+#define        GC_USER_RB_BACKEND_DISABLE                      0x9B7C
+
+#define        CGTS_SYS_TCC_DISABLE                            0x3F90
+#define        CGTS_TCC_DISABLE                                0x9148
+#define        CGTS_USER_SYS_TCC_DISABLE                       0x3F94
+#define        CGTS_USER_TCC_DISABLE                           0x914C
+
+#define        CONFIG_MEMSIZE                                  0x5428
+
+#define CP_ME_CNTL                                     0x86D8
+#define                CP_ME_HALT                                      (1 << 28)
+#define                CP_PFP_HALT                                     (1 << 26)
+#define        CP_ME_RAM_DATA                                  0xC160
+#define        CP_ME_RAM_RADDR                                 0xC158
+#define        CP_ME_RAM_WADDR                                 0xC15C
+#define CP_MEQ_THRESHOLDS                              0x8764
+#define                STQ_SPLIT(x)                                    ((x) << 0)
+#define        CP_PERFMON_CNTL                                 0x87FC
+#define        CP_PFP_UCODE_ADDR                               0xC150
+#define        CP_PFP_UCODE_DATA                               0xC154
+#define        CP_QUEUE_THRESHOLDS                             0x8760
+#define                ROQ_IB1_START(x)                                ((x) << 0)
+#define                ROQ_IB2_START(x)                                ((x) << 8)
+#define        CP_RB_BASE                                      0xC100
+#define        CP_RB_CNTL                                      0xC104
+#define                RB_BUFSZ(x)                                     ((x) << 0)
+#define                RB_BLKSZ(x)                                     ((x) << 8)
+#define                RB_NO_UPDATE                                    (1 << 27)
+#define                RB_RPTR_WR_ENA                                  (1 << 31)
+#define                BUF_SWAP_32BIT                                  (2 << 16)
+#define        CP_RB_RPTR                                      0x8700
+#define        CP_RB_RPTR_ADDR                                 0xC10C
+#define        CP_RB_RPTR_ADDR_HI                              0xC110
+#define        CP_RB_RPTR_WR                                   0xC108
+#define        CP_RB_WPTR                                      0xC114
+#define        CP_RB_WPTR_ADDR                                 0xC118
+#define        CP_RB_WPTR_ADDR_HI                              0xC11C
+#define        CP_RB_WPTR_DELAY                                0x8704
+#define        CP_SEM_WAIT_TIMER                               0x85BC
+#define        CP_DEBUG                                        0xC1FC
+
+
+#define        GC_USER_SHADER_PIPE_CONFIG                      0x8954
+#define                INACTIVE_QD_PIPES(x)                            ((x) << 8)
+#define                INACTIVE_QD_PIPES_MASK                          0x0000FF00
+#define                INACTIVE_SIMDS(x)                               ((x) << 16)
+#define                INACTIVE_SIMDS_MASK                             0x00FF0000
+
+#define        GRBM_CNTL                                       0x8000
+#define                GRBM_READ_TIMEOUT(x)                            ((x) << 0)
+#define        GRBM_SOFT_RESET                                 0x8020
+#define                SOFT_RESET_CP                                   (1 << 0)
+#define                SOFT_RESET_CB                                   (1 << 1)
+#define                SOFT_RESET_DB                                   (1 << 3)
+#define                SOFT_RESET_PA                                   (1 << 5)
+#define                SOFT_RESET_SC                                   (1 << 6)
+#define                SOFT_RESET_SPI                                  (1 << 8)
+#define                SOFT_RESET_SH                                   (1 << 9)
+#define                SOFT_RESET_SX                                   (1 << 10)
+#define                SOFT_RESET_TC                                   (1 << 11)
+#define                SOFT_RESET_TA                                   (1 << 12)
+#define                SOFT_RESET_VC                                   (1 << 13)
+#define                SOFT_RESET_VGT                                  (1 << 14)
+
+#define        GRBM_STATUS                                     0x8010
+#define                CMDFIFO_AVAIL_MASK                              0x0000000F
+#define                SRBM_RQ_PENDING                                 (1 << 5)
+#define                CF_RQ_PENDING                                   (1 << 7)
+#define                PF_RQ_PENDING                                   (1 << 8)
+#define                GRBM_EE_BUSY                                    (1 << 10)
+#define                SX_CLEAN                                        (1 << 11)
+#define                DB_CLEAN                                        (1 << 12)
+#define                CB_CLEAN                                        (1 << 13)
+#define                TA_BUSY                                         (1 << 14)
+#define                VGT_BUSY_NO_DMA                                 (1 << 16)
+#define                VGT_BUSY                                        (1 << 17)
+#define                SX_BUSY                                         (1 << 20)
+#define                SH_BUSY                                         (1 << 21)
+#define                SPI_BUSY                                        (1 << 22)
+#define                SC_BUSY                                         (1 << 24)
+#define                PA_BUSY                                         (1 << 25)
+#define                DB_BUSY                                         (1 << 26)
+#define                CP_COHERENCY_BUSY                               (1 << 28)
+#define                CP_BUSY                                         (1 << 29)
+#define                CB_BUSY                                         (1 << 30)
+#define                GUI_ACTIVE                                      (1 << 31)
+#define        GRBM_STATUS_SE0                                 0x8014
+#define        GRBM_STATUS_SE1                                 0x8018
+#define                SE_SX_CLEAN                                     (1 << 0)
+#define                SE_DB_CLEAN                                     (1 << 1)
+#define                SE_CB_CLEAN                                     (1 << 2)
+#define                SE_TA_BUSY                                      (1 << 25)
+#define                SE_SX_BUSY                                      (1 << 26)
+#define                SE_SPI_BUSY                                     (1 << 27)
+#define                SE_SH_BUSY                                      (1 << 28)
+#define                SE_SC_BUSY                                      (1 << 29)
+#define                SE_DB_BUSY                                      (1 << 30)
+#define                SE_CB_BUSY                                      (1 << 31)
+
+#define        HDP_HOST_PATH_CNTL                              0x2C00
+#define        HDP_NONSURFACE_BASE                             0x2C04
+#define        HDP_NONSURFACE_INFO                             0x2C08
+#define        HDP_NONSURFACE_SIZE                             0x2C0C
+#define HDP_REG_COHERENCY_FLUSH_CNTL                   0x54A0
+#define        HDP_TILING_CONFIG                               0x2F3C
+
+#define MC_SHARED_CHMAP                                                0x2004
+#define                NOOFCHAN_SHIFT                                  12
+#define                NOOFCHAN_MASK                                   0x00003000
+
+#define        MC_ARB_RAMCFG                                   0x2760
+#define                NOOFBANK_SHIFT                                  0
+#define                NOOFBANK_MASK                                   0x00000003
+#define                NOOFRANK_SHIFT                                  2
+#define                NOOFRANK_MASK                                   0x00000004
+#define                NOOFROWS_SHIFT                                  3
+#define                NOOFROWS_MASK                                   0x00000038
+#define                NOOFCOLS_SHIFT                                  6
+#define                NOOFCOLS_MASK                                   0x000000C0
+#define                CHANSIZE_SHIFT                                  8
+#define                CHANSIZE_MASK                                   0x00000100
+#define                BURSTLENGTH_SHIFT                               9
+#define                BURSTLENGTH_MASK                                0x00000200
+#define                CHANSIZE_OVERRIDE                               (1 << 11)
+#define        MC_VM_AGP_TOP                                   0x2028
+#define        MC_VM_AGP_BOT                                   0x202C
+#define        MC_VM_AGP_BASE                                  0x2030
+#define        MC_VM_FB_LOCATION                               0x2024
+#define        MC_VM_MB_L1_TLB0_CNTL                           0x2234
+#define        MC_VM_MB_L1_TLB1_CNTL                           0x2238
+#define        MC_VM_MB_L1_TLB2_CNTL                           0x223C
+#define        MC_VM_MB_L1_TLB3_CNTL                           0x2240
+#define                ENABLE_L1_TLB                                   (1 << 0)
+#define                ENABLE_L1_FRAGMENT_PROCESSING                   (1 << 1)
+#define                SYSTEM_ACCESS_MODE_PA_ONLY                      (0 << 3)
+#define                SYSTEM_ACCESS_MODE_USE_SYS_MAP                  (1 << 3)
+#define                SYSTEM_ACCESS_MODE_IN_SYS                       (2 << 3)
+#define                SYSTEM_ACCESS_MODE_NOT_IN_SYS                   (3 << 3)
+#define                SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU       (0 << 5)
+#define                EFFECTIVE_L1_TLB_SIZE(x)                        ((x)<<15)
+#define                EFFECTIVE_L1_QUEUE_SIZE(x)                      ((x)<<18)
+#define        MC_VM_MD_L1_TLB0_CNTL                           0x2654
+#define        MC_VM_MD_L1_TLB1_CNTL                           0x2658
+#define        MC_VM_MD_L1_TLB2_CNTL                           0x265C
+#define        MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR              0x203C
+#define        MC_VM_SYSTEM_APERTURE_HIGH_ADDR                 0x2038
+#define        MC_VM_SYSTEM_APERTURE_LOW_ADDR                  0x2034
+
+#define        PA_CL_ENHANCE                                   0x8A14
+#define                CLIP_VTX_REORDER_ENA                            (1 << 0)
+#define                NUM_CLIP_SEQ(x)                                 ((x) << 1)
+#define PA_SC_AA_CONFIG                                        0x28C04
+#define PA_SC_CLIPRECT_RULE                            0x2820C
+#define        PA_SC_EDGERULE                                  0x28230
+#define        PA_SC_FIFO_SIZE                                 0x8BCC
+#define                SC_PRIM_FIFO_SIZE(x)                            ((x) << 0)
+#define                SC_HIZ_TILE_FIFO_SIZE(x)                        ((x) << 12)
+#define                SC_EARLYZ_TILE_FIFO_SIZE(x)                     ((x) << 20)
+#define        PA_SC_FORCE_EOV_MAX_CNTS                        0x8B24
+#define                FORCE_EOV_MAX_CLK_CNT(x)                        ((x) << 0)
+#define                FORCE_EOV_MAX_REZ_CNT(x)                        ((x) << 16)
+#define PA_SC_LINE_STIPPLE                             0x28A0C
+#define        PA_SC_LINE_STIPPLE_STATE                        0x8B10
+
+#define        SCRATCH_REG0                                    0x8500
+#define        SCRATCH_REG1                                    0x8504
+#define        SCRATCH_REG2                                    0x8508
+#define        SCRATCH_REG3                                    0x850C
+#define        SCRATCH_REG4                                    0x8510
+#define        SCRATCH_REG5                                    0x8514
+#define        SCRATCH_REG6                                    0x8518
+#define        SCRATCH_REG7                                    0x851C
+#define        SCRATCH_UMSK                                    0x8540
+#define        SCRATCH_ADDR                                    0x8544
+
+#define        SMX_DC_CTL0                                     0xA020
+#define                USE_HASH_FUNCTION                               (1 << 0)
+#define                NUMBER_OF_SETS(x)                               ((x) << 1)
+#define                FLUSH_ALL_ON_EVENT                              (1 << 10)
+#define                STALL_ON_EVENT                                  (1 << 11)
+#define        SMX_EVENT_CTL                                   0xA02C
+#define                ES_FLUSH_CTL(x)                                 ((x) << 0)
+#define                GS_FLUSH_CTL(x)                                 ((x) << 3)
+#define                ACK_FLUSH_CTL(x)                                ((x) << 6)
+#define                SYNC_FLUSH_CTL                                  (1 << 8)
+
+#define        SPI_CONFIG_CNTL                                 0x9100
+#define                GPR_WRITE_PRIORITY(x)                           ((x) << 0)
+#define        SPI_CONFIG_CNTL_1                               0x913C
+#define                VTX_DONE_DELAY(x)                               ((x) << 0)
+#define                INTERP_ONE_PRIM_PER_ROW                         (1 << 4)
+#define        SPI_INPUT_Z                                     0x286D8
+#define        SPI_PS_IN_CONTROL_0                             0x286CC
+#define                NUM_INTERP(x)                                   ((x)<<0)
+#define                POSITION_ENA                                    (1<<8)
+#define                POSITION_CENTROID                               (1<<9)
+#define                POSITION_ADDR(x)                                ((x)<<10)
+#define                PARAM_GEN(x)                                    ((x)<<15)
+#define                PARAM_GEN_ADDR(x)                               ((x)<<19)
+#define                BARYC_SAMPLE_CNTL(x)                            ((x)<<26)
+#define                PERSP_GRADIENT_ENA                              (1<<28)
+#define                LINEAR_GRADIENT_ENA                             (1<<29)
+#define                POSITION_SAMPLE                                 (1<<30)
+#define                BARYC_AT_SAMPLE_ENA                             (1<<31)
+
+#define        SQ_CONFIG                                       0x8C00
+#define                VC_ENABLE                                       (1 << 0)
+#define                EXPORT_SRC_C                                    (1 << 1)
+#define                CS_PRIO(x)                                      ((x) << 18)
+#define                LS_PRIO(x)                                      ((x) << 20)
+#define                HS_PRIO(x)                                      ((x) << 22)
+#define                PS_PRIO(x)                                      ((x) << 24)
+#define                VS_PRIO(x)                                      ((x) << 26)
+#define                GS_PRIO(x)                                      ((x) << 28)
+#define                ES_PRIO(x)                                      ((x) << 30)
+#define        SQ_GPR_RESOURCE_MGMT_1                          0x8C04
+#define                NUM_PS_GPRS(x)                                  ((x) << 0)
+#define                NUM_VS_GPRS(x)                                  ((x) << 16)
+#define                NUM_CLAUSE_TEMP_GPRS(x)                         ((x) << 28)
+#define        SQ_GPR_RESOURCE_MGMT_2                          0x8C08
+#define                NUM_GS_GPRS(x)                                  ((x) << 0)
+#define                NUM_ES_GPRS(x)                                  ((x) << 16)
+#define        SQ_GPR_RESOURCE_MGMT_3                          0x8C0C
+#define                NUM_HS_GPRS(x)                                  ((x) << 0)
+#define                NUM_LS_GPRS(x)                                  ((x) << 16)
+#define        SQ_THREAD_RESOURCE_MGMT                         0x8C18
+#define                NUM_PS_THREADS(x)                               ((x) << 0)
+#define                NUM_VS_THREADS(x)                               ((x) << 8)
+#define                NUM_GS_THREADS(x)                               ((x) << 16)
+#define                NUM_ES_THREADS(x)                               ((x) << 24)
+#define        SQ_THREAD_RESOURCE_MGMT_2                       0x8C1C
+#define                NUM_HS_THREADS(x)                               ((x) << 0)
+#define                NUM_LS_THREADS(x)                               ((x) << 8)
+#define        SQ_STACK_RESOURCE_MGMT_1                        0x8C20
+#define                NUM_PS_STACK_ENTRIES(x)                         ((x) << 0)
+#define                NUM_VS_STACK_ENTRIES(x)                         ((x) << 16)
+#define        SQ_STACK_RESOURCE_MGMT_2                        0x8C24
+#define                NUM_GS_STACK_ENTRIES(x)                         ((x) << 0)
+#define                NUM_ES_STACK_ENTRIES(x)                         ((x) << 16)
+#define        SQ_STACK_RESOURCE_MGMT_3                        0x8C28
+#define                NUM_HS_STACK_ENTRIES(x)                         ((x) << 0)
+#define                NUM_LS_STACK_ENTRIES(x)                         ((x) << 16)
+#define        SQ_DYN_GPR_CNTL_PS_FLUSH_REQ                    0x8D8C
+#define        SQ_LDS_RESOURCE_MGMT                            0x8E2C
+
+#define        SQ_MS_FIFO_SIZES                                0x8CF0
+#define                CACHE_FIFO_SIZE(x)                              ((x) << 0)
+#define                FETCH_FIFO_HIWATER(x)                           ((x) << 8)
+#define                DONE_FIFO_HIWATER(x)                            ((x) << 16)
+#define                ALU_UPDATE_FIFO_HIWATER(x)                      ((x) << 24)
+
+#define        SX_DEBUG_1                                      0x9058
+#define                ENABLE_NEW_SMX_ADDRESS                          (1 << 16)
+#define        SX_EXPORT_BUFFER_SIZES                          0x900C
+#define                COLOR_BUFFER_SIZE(x)                            ((x) << 0)
+#define                POSITION_BUFFER_SIZE(x)                         ((x) << 8)
+#define                SMX_BUFFER_SIZE(x)                              ((x) << 16)
+#define        SX_MISC                                         0x28350
+
+#define CB_PERF_CTR0_SEL_0                             0x9A20
+#define CB_PERF_CTR0_SEL_1                             0x9A24
+#define CB_PERF_CTR1_SEL_0                             0x9A28
+#define CB_PERF_CTR1_SEL_1                             0x9A2C
+#define CB_PERF_CTR2_SEL_0                             0x9A30
+#define CB_PERF_CTR2_SEL_1                             0x9A34
+#define CB_PERF_CTR3_SEL_0                             0x9A38
+#define CB_PERF_CTR3_SEL_1                             0x9A3C
+
+#define        TA_CNTL_AUX                                     0x9508
+#define                DISABLE_CUBE_WRAP                               (1 << 0)
+#define                DISABLE_CUBE_ANISO                              (1 << 1)
+#define                SYNC_GRADIENT                                   (1 << 24)
+#define                SYNC_WALKER                                     (1 << 25)
+#define                SYNC_ALIGNER                                    (1 << 26)
+
+#define        VGT_CACHE_INVALIDATION                          0x88C4
+#define                CACHE_INVALIDATION(x)                           ((x) << 0)
+#define                        VC_ONLY                                         0
+#define                        TC_ONLY                                         1
+#define                        VC_AND_TC                                       2
+#define                AUTO_INVLD_EN(x)                                ((x) << 6)
+#define                        NO_AUTO                                         0
+#define                        ES_AUTO                                         1
+#define                        GS_AUTO                                         2
+#define                        ES_AND_GS_AUTO                                  3
+#define        VGT_GS_VERTEX_REUSE                             0x88D4
+#define        VGT_NUM_INSTANCES                               0x8974
+#define        VGT_OUT_DEALLOC_CNTL                            0x28C5C
+#define                DEALLOC_DIST_MASK                               0x0000007F
+#define        VGT_VERTEX_REUSE_BLOCK_CNTL                     0x28C58
+#define                VTX_REUSE_DEPTH_MASK                            0x000000FF
+
+#define VM_CONTEXT0_CNTL                               0x1410
+#define                ENABLE_CONTEXT                                  (1 << 0)
+#define                PAGE_TABLE_DEPTH(x)                             (((x) & 3) << 1)
+#define                RANGE_PROTECTION_FAULT_ENABLE_DEFAULT           (1 << 4)
+#define VM_CONTEXT1_CNTL                               0x1414
+#define        VM_CONTEXT0_PAGE_TABLE_BASE_ADDR                0x153C
+#define        VM_CONTEXT0_PAGE_TABLE_END_ADDR                 0x157C
+#define        VM_CONTEXT0_PAGE_TABLE_START_ADDR               0x155C
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR      0x1518
+#define VM_CONTEXT0_REQUEST_RESPONSE                   0x1470
+#define                REQUEST_TYPE(x)                                 (((x) & 0xf) << 0)
+#define                RESPONSE_TYPE_MASK                              0x000000F0
+#define                RESPONSE_TYPE_SHIFT                             4
+#define VM_L2_CNTL                                     0x1400
+#define                ENABLE_L2_CACHE                                 (1 << 0)
+#define                ENABLE_L2_FRAGMENT_PROCESSING                   (1 << 1)
+#define                ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE         (1 << 9)
+#define                EFFECTIVE_L2_QUEUE_SIZE(x)                      (((x) & 7) << 14)
+#define VM_L2_CNTL2                                    0x1404
+#define                INVALIDATE_ALL_L1_TLBS                          (1 << 0)
+#define                INVALIDATE_L2_CACHE                             (1 << 1)
+#define VM_L2_CNTL3                                    0x1408
+#define                BANK_SELECT(x)                                  ((x) << 0)
+#define                CACHE_UPDATE_MODE(x)                            ((x) << 6)
+#define        VM_L2_STATUS                                    0x140C
+#define                L2_BUSY                                         (1 << 0)
+
+#define        WAIT_UNTIL                                      0x8040
+
+#define        SRBM_STATUS                                     0x0E50
+#define        SRBM_SOFT_RESET                                 0x0E60
+#define                SRBM_SOFT_RESET_ALL_MASK                0x00FEEFA6
+#define                SOFT_RESET_BIF                          (1 << 1)
+#define                SOFT_RESET_CG                           (1 << 2)
+#define                SOFT_RESET_DC                           (1 << 5)
+#define                SOFT_RESET_GRBM                         (1 << 8)
+#define                SOFT_RESET_HDP                          (1 << 9)
+#define                SOFT_RESET_IH                           (1 << 10)
+#define                SOFT_RESET_MC                           (1 << 11)
+#define                SOFT_RESET_RLC                          (1 << 13)
+#define                SOFT_RESET_ROM                          (1 << 14)
+#define                SOFT_RESET_SEM                          (1 << 15)
+#define                SOFT_RESET_VMC                          (1 << 17)
+#define                SOFT_RESET_TST                          (1 << 21)
+#define                SOFT_RESET_REGBB                        (1 << 22)
+#define                SOFT_RESET_ORB                          (1 << 23)
+
+#define IH_RB_CNTL                                        0x3e00
+#       define IH_RB_ENABLE                               (1 << 0)
+#       define IH_IB_SIZE(x)                              ((x) << 1) /* log2 */
+#       define IH_RB_FULL_DRAIN_ENABLE                    (1 << 6)
+#       define IH_WPTR_WRITEBACK_ENABLE                   (1 << 8)
+#       define IH_WPTR_WRITEBACK_TIMER(x)                 ((x) << 9) /* log2 */
+#       define IH_WPTR_OVERFLOW_ENABLE                    (1 << 16)
+#       define IH_WPTR_OVERFLOW_CLEAR                     (1 << 31)
+#define IH_RB_BASE                                        0x3e04
+#define IH_RB_RPTR                                        0x3e08
+#define IH_RB_WPTR                                        0x3e0c
+#       define RB_OVERFLOW                                (1 << 0)
+#       define WPTR_OFFSET_MASK                           0x3fffc
+#define IH_RB_WPTR_ADDR_HI                                0x3e10
+#define IH_RB_WPTR_ADDR_LO                                0x3e14
+#define IH_CNTL                                           0x3e18
+#       define ENABLE_INTR                                (1 << 0)
+#       define IH_MC_SWAP(x)                              ((x) << 2)
+#       define IH_MC_SWAP_NONE                            0
+#       define IH_MC_SWAP_16BIT                           1
+#       define IH_MC_SWAP_32BIT                           2
+#       define IH_MC_SWAP_64BIT                           3
+#       define RPTR_REARM                                 (1 << 4)
+#       define MC_WRREQ_CREDIT(x)                         ((x) << 15)
+#       define MC_WR_CLEAN_CNT(x)                         ((x) << 20)
+
+#define CP_INT_CNTL                                     0xc124
+#       define CNTX_BUSY_INT_ENABLE                     (1 << 19)
+#       define CNTX_EMPTY_INT_ENABLE                    (1 << 20)
+#       define SCRATCH_INT_ENABLE                       (1 << 25)
+#       define TIME_STAMP_INT_ENABLE                    (1 << 26)
+#       define IB2_INT_ENABLE                           (1 << 29)
+#       define IB1_INT_ENABLE                           (1 << 30)
+#       define RB_INT_ENABLE                            (1 << 31)
+#define CP_INT_STATUS                                   0xc128
+#       define SCRATCH_INT_STAT                         (1 << 25)
+#       define TIME_STAMP_INT_STAT                      (1 << 26)
+#       define IB2_INT_STAT                             (1 << 29)
+#       define IB1_INT_STAT                             (1 << 30)
+#       define RB_INT_STAT                              (1 << 31)
+
+#define GRBM_INT_CNTL                                   0x8060
+#       define RDERR_INT_ENABLE                         (1 << 0)
+#       define GUI_IDLE_INT_ENABLE                      (1 << 19)
+
+/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
+#define CRTC_STATUS_FRAME_COUNT                         0x6e98
+
+/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
+#define VLINE_STATUS                                    0x6bb8
+#       define VLINE_OCCURRED                           (1 << 0)
+#       define VLINE_ACK                                (1 << 4)
+#       define VLINE_STAT                               (1 << 12)
+#       define VLINE_INTERRUPT                          (1 << 16)
+#       define VLINE_INTERRUPT_TYPE                     (1 << 17)
+/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
+#define VBLANK_STATUS                                   0x6bbc
+#       define VBLANK_OCCURRED                          (1 << 0)
+#       define VBLANK_ACK                               (1 << 4)
+#       define VBLANK_STAT                              (1 << 12)
+#       define VBLANK_INTERRUPT                         (1 << 16)
+#       define VBLANK_INTERRUPT_TYPE                    (1 << 17)
+
+/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
+#define INT_MASK                                        0x6b40
+#       define VBLANK_INT_MASK                          (1 << 0)
+#       define VLINE_INT_MASK                           (1 << 4)
+
+#define DISP_INTERRUPT_STATUS                           0x60f4
+#       define LB_D1_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D1_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD1_INTERRUPT                        (1 << 17)
+#       define DC_HPD1_RX_INTERRUPT                     (1 << 18)
+#       define DACA_AUTODETECT_INTERRUPT                (1 << 22)
+#       define DACB_AUTODETECT_INTERRUPT                (1 << 23)
+#       define DC_I2C_SW_DONE_INTERRUPT                 (1 << 24)
+#       define DC_I2C_HW_DONE_INTERRUPT                 (1 << 25)
+#define DISP_INTERRUPT_STATUS_CONTINUE                  0x60f8
+#       define LB_D2_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D2_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD2_INTERRUPT                        (1 << 17)
+#       define DC_HPD2_RX_INTERRUPT                     (1 << 18)
+#       define DISP_TIMER_INTERRUPT                     (1 << 24)
+#define DISP_INTERRUPT_STATUS_CONTINUE2                 0x60fc
+#       define LB_D3_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D3_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD3_INTERRUPT                        (1 << 17)
+#       define DC_HPD3_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE3                 0x6100
+#       define LB_D4_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D4_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD4_INTERRUPT                        (1 << 17)
+#       define DC_HPD4_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE4                 0x614c
+#       define LB_D5_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D5_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD5_INTERRUPT                        (1 << 17)
+#       define DC_HPD5_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE5                 0x6050
+#       define LB_D6_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D6_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD6_INTERRUPT                        (1 << 17)
+#       define DC_HPD6_RX_INTERRUPT                     (1 << 18)
+
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS                                 0x6858
+#       define GRPH_PFLIP_INT_OCCURRED                  (1 << 0)
+#       define GRPH_PFLIP_INT_CLEAR                     (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define        GRPH_INT_CONTROL                                0x685c
+#       define GRPH_PFLIP_INT_MASK                      (1 << 0)
+#       define GRPH_PFLIP_INT_TYPE                      (1 << 8)
+
+#define        DACA_AUTODETECT_INT_CONTROL                     0x66c8
+#define        DACB_AUTODETECT_INT_CONTROL                     0x67c8
+
+#define DC_HPD1_INT_STATUS                              0x601c
+#define DC_HPD2_INT_STATUS                              0x6028
+#define DC_HPD3_INT_STATUS                              0x6034
+#define DC_HPD4_INT_STATUS                              0x6040
+#define DC_HPD5_INT_STATUS                              0x604c
+#define DC_HPD6_INT_STATUS                              0x6058
+#       define DC_HPDx_INT_STATUS                       (1 << 0)
+#       define DC_HPDx_SENSE                            (1 << 1)
+#       define DC_HPDx_RX_INT_STATUS                    (1 << 8)
+
+#define DC_HPD1_INT_CONTROL                             0x6020
+#define DC_HPD2_INT_CONTROL                             0x602c
+#define DC_HPD3_INT_CONTROL                             0x6038
+#define DC_HPD4_INT_CONTROL                             0x6044
+#define DC_HPD5_INT_CONTROL                             0x6050
+#define DC_HPD6_INT_CONTROL                             0x605c
+#       define DC_HPDx_INT_ACK                          (1 << 0)
+#       define DC_HPDx_INT_POLARITY                     (1 << 8)
+#       define DC_HPDx_INT_EN                           (1 << 16)
+#       define DC_HPDx_RX_INT_ACK                       (1 << 20)
+#       define DC_HPDx_RX_INT_EN                        (1 << 24)
+
+#define DC_HPD1_CONTROL                                   0x6024
+#define DC_HPD2_CONTROL                                   0x6030
+#define DC_HPD3_CONTROL                                   0x603c
+#define DC_HPD4_CONTROL                                   0x6048
+#define DC_HPD5_CONTROL                                   0x6054
+#define DC_HPD6_CONTROL                                   0x6060
+#       define DC_HPDx_CONNECTION_TIMER(x)                ((x) << 0)
+#       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
+#       define DC_HPDx_EN                                 (1 << 28)
+
+#endif
index cf60c0b..cc004b0 100644 (file)
@@ -37,6 +37,7 @@
 #include "rs100d.h"
 #include "rv200d.h"
 #include "rv250d.h"
+#include "atom.h"
 
 #include <linux/firmware.h>
 #include <linux/platform_device.h>
@@ -67,6 +68,264 @@ MODULE_FIRMWARE(FIRMWARE_R520);
  * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
  */
 
+void r100_pm_get_dynpm_state(struct radeon_device *rdev)
+{
+       int i;
+       rdev->pm.dynpm_can_upclock = true;
+       rdev->pm.dynpm_can_downclock = true;
+
+       switch (rdev->pm.dynpm_planned_action) {
+       case DYNPM_ACTION_MINIMUM:
+               rdev->pm.requested_power_state_index = 0;
+               rdev->pm.dynpm_can_downclock = false;
+               break;
+       case DYNPM_ACTION_DOWNCLOCK:
+               if (rdev->pm.current_power_state_index == 0) {
+                       rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+                       rdev->pm.dynpm_can_downclock = false;
+               } else {
+                       if (rdev->pm.active_crtc_count > 1) {
+                               for (i = 0; i < rdev->pm.num_power_states; i++) {
+                                       if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+                                               continue;
+                                       else if (i >= rdev->pm.current_power_state_index) {
+                                               rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+                                               break;
+                                       } else {
+                                               rdev->pm.requested_power_state_index = i;
+                                               break;
+                                       }
+                               }
+                       } else
+                               rdev->pm.requested_power_state_index =
+                                       rdev->pm.current_power_state_index - 1;
+               }
+               /* don't use the power state if crtcs are active and no display flag is set */
+               if ((rdev->pm.active_crtc_count > 0) &&
+                   (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
+                    RADEON_PM_MODE_NO_DISPLAY)) {
+                       rdev->pm.requested_power_state_index++;
+               }
+               break;
+       case DYNPM_ACTION_UPCLOCK:
+               if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
+                       rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+                       rdev->pm.dynpm_can_upclock = false;
+               } else {
+                       if (rdev->pm.active_crtc_count > 1) {
+                               for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
+                                       if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+                                               continue;
+                                       else if (i <= rdev->pm.current_power_state_index) {
+                                               rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+                                               break;
+                                       } else {
+                                               rdev->pm.requested_power_state_index = i;
+                                               break;
+                                       }
+                               }
+                       } else
+                               rdev->pm.requested_power_state_index =
+                                       rdev->pm.current_power_state_index + 1;
+               }
+               break;
+       case DYNPM_ACTION_DEFAULT:
+               rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+               rdev->pm.dynpm_can_upclock = false;
+               break;
+       case DYNPM_ACTION_NONE:
+       default:
+               DRM_ERROR("Requested mode for not defined action\n");
+               return;
+       }
+       /* only one clock mode per power state */
+       rdev->pm.requested_clock_mode_index = 0;
+
+       DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
+                 rdev->pm.power_state[rdev->pm.requested_power_state_index].
+                 clock_info[rdev->pm.requested_clock_mode_index].sclk,
+                 rdev->pm.power_state[rdev->pm.requested_power_state_index].
+                 clock_info[rdev->pm.requested_clock_mode_index].mclk,
+                 rdev->pm.power_state[rdev->pm.requested_power_state_index].
+                 pcie_lanes);
+}
+
+void r100_pm_init_profile(struct radeon_device *rdev)
+{
+       /* default */
+       rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+       rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+       rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+       rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+       /* low sh */
+       rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+       rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
+       rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+       rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+       /* high sh */
+       rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+       rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+       rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+       rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+       /* low mh */
+       rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+       rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+       rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+       rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+       /* high mh */
+       rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+       rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+       rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+       rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+}
+
+void r100_pm_misc(struct radeon_device *rdev)
+{
+       int requested_index = rdev->pm.requested_power_state_index;
+       struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
+       struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
+       u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
+
+       if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
+               if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+                       tmp = RREG32(voltage->gpio.reg);
+                       if (voltage->active_high)
+                               tmp |= voltage->gpio.mask;
+                       else
+                               tmp &= ~(voltage->gpio.mask);
+                       WREG32(voltage->gpio.reg, tmp);
+                       if (voltage->delay)
+                               udelay(voltage->delay);
+               } else {
+                       tmp = RREG32(voltage->gpio.reg);
+                       if (voltage->active_high)
+                               tmp &= ~voltage->gpio.mask;
+                       else
+                               tmp |= voltage->gpio.mask;
+                       WREG32(voltage->gpio.reg, tmp);
+                       if (voltage->delay)
+                               udelay(voltage->delay);
+               }
+       }
+
+       sclk_cntl = RREG32_PLL(SCLK_CNTL);
+       sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
+       sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
+       sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
+       sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
+       if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
+               sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
+               if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
+                       sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
+               else
+                       sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
+               if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
+                       sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
+               else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
+                       sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
+       } else
+               sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
+
+       if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
+               sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
+               if (voltage->delay) {
+                       sclk_more_cntl |= VOLTAGE_DROP_SYNC;
+                       switch (voltage->delay) {
+                       case 33:
+                               sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
+                               break;
+                       case 66:
+                               sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
+                               break;
+                       case 99:
+                               sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
+                               break;
+                       case 132:
+                               sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
+                               break;
+                       }
+               } else
+                       sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
+       } else
+               sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
+
+       if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
+               sclk_cntl &= ~FORCE_HDP;
+       else
+               sclk_cntl |= FORCE_HDP;
+
+       WREG32_PLL(SCLK_CNTL, sclk_cntl);
+       WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
+       WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
+
+       /* set pcie lanes */
+       if ((rdev->flags & RADEON_IS_PCIE) &&
+           !(rdev->flags & RADEON_IS_IGP) &&
+           rdev->asic->set_pcie_lanes &&
+           (ps->pcie_lanes !=
+            rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
+               radeon_set_pcie_lanes(rdev,
+                                     ps->pcie_lanes);
+               DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
+       }
+}
+
+void r100_pm_prepare(struct radeon_device *rdev)
+{
+       struct drm_device *ddev = rdev->ddev;
+       struct drm_crtc *crtc;
+       struct radeon_crtc *radeon_crtc;
+       u32 tmp;
+
+       /* disable any active CRTCs */
+       list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+               radeon_crtc = to_radeon_crtc(crtc);
+               if (radeon_crtc->enabled) {
+                       if (radeon_crtc->crtc_id) {
+                               tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
+                               tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
+                               WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+                       } else {
+                               tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+                               tmp |= RADEON_CRTC_DISP_REQ_EN_B;
+                               WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+                       }
+               }
+       }
+}
+
+void r100_pm_finish(struct radeon_device *rdev)
+{
+       struct drm_device *ddev = rdev->ddev;
+       struct drm_crtc *crtc;
+       struct radeon_crtc *radeon_crtc;
+       u32 tmp;
+
+       /* enable any active CRTCs */
+       list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+               radeon_crtc = to_radeon_crtc(crtc);
+               if (radeon_crtc->enabled) {
+                       if (radeon_crtc->crtc_id) {
+                               tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
+                               tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
+                               WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+                       } else {
+                               tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+                               tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
+                               WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+                       }
+               }
+       }
+}
+
+bool r100_gui_idle(struct radeon_device *rdev)
+{
+       if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
+               return false;
+       else
+               return true;
+}
+
 /* hpd for digital panel detect/disconnect */
 bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
 {
@@ -254,6 +513,9 @@ int r100_irq_set(struct radeon_device *rdev)
        if (rdev->irq.sw_int) {
                tmp |= RADEON_SW_INT_ENABLE;
        }
+       if (rdev->irq.gui_idle) {
+               tmp |= RADEON_GUI_IDLE_MASK;
+       }
        if (rdev->irq.crtc_vblank_int[0]) {
                tmp |= RADEON_CRTC_VBLANK_MASK;
        }
@@ -288,6 +550,12 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
                RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
                RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
 
+       /* the interrupt works, but the status bit is permanently asserted */
+       if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
+               if (!rdev->irq.gui_idle_acked)
+                       irq_mask |= RADEON_GUI_IDLE_STAT;
+       }
+
        if (irqs) {
                WREG32(RADEON_GEN_INT_STATUS, irqs);
        }
@@ -299,6 +567,9 @@ int r100_irq_process(struct radeon_device *rdev)
        uint32_t status, msi_rearm;
        bool queue_hotplug = false;
 
+       /* reset gui idle ack.  the status bit is broken */
+       rdev->irq.gui_idle_acked = false;
+
        status = r100_irq_ack(rdev);
        if (!status) {
                return IRQ_NONE;
@@ -311,6 +582,12 @@ int r100_irq_process(struct radeon_device *rdev)
                if (status & RADEON_SW_INT_TEST) {
                        radeon_fence_process(rdev);
                }
+               /* gui idle interrupt */
+               if (status & RADEON_GUI_IDLE_STAT) {
+                       rdev->irq.gui_idle_acked = true;
+                       rdev->pm.gui_idle = true;
+                       wake_up(&rdev->irq.idle_queue);
+               }
                /* Vertical blank interrupts */
                if (status & RADEON_CRTC_VBLANK_STAT) {
                        drm_handle_vblank(rdev->ddev, 0);
@@ -332,6 +609,8 @@ int r100_irq_process(struct radeon_device *rdev)
                }
                status = r100_irq_ack(rdev);
        }
+       /* reset gui idle ack.  the status bit is broken */
+       rdev->irq.gui_idle_acked = false;
        if (queue_hotplug)
                queue_work(rdev->wq, &rdev->hotplug_work);
        if (rdev->msi_enabled) {
@@ -663,26 +942,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
        if (r100_debugfs_cp_init(rdev)) {
                DRM_ERROR("Failed to register debugfs file for CP !\n");
        }
-       /* Reset CP */
-       tmp = RREG32(RADEON_CP_CSQ_STAT);
-       if ((tmp & (1 << 31))) {
-               DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
-               WREG32(RADEON_CP_CSQ_MODE, 0);
-               WREG32(RADEON_CP_CSQ_CNTL, 0);
-               WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
-               tmp = RREG32(RADEON_RBBM_SOFT_RESET);
-               mdelay(2);
-               WREG32(RADEON_RBBM_SOFT_RESET, 0);
-               tmp = RREG32(RADEON_RBBM_SOFT_RESET);
-               mdelay(2);
-               tmp = RREG32(RADEON_CP_CSQ_STAT);
-               if ((tmp & (1 << 31))) {
-                       DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
-               }
-       } else {
-               DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
-       }
-
        if (!rdev->me_fw) {
                r = r100_cp_init_microcode(rdev);
                if (r) {
@@ -787,39 +1046,6 @@ void r100_cp_disable(struct radeon_device *rdev)
        }
 }
 
-int r100_cp_reset(struct radeon_device *rdev)
-{
-       uint32_t tmp;
-       bool reinit_cp;
-       int i;
-
-       reinit_cp = rdev->cp.ready;
-       rdev->cp.ready = false;
-       WREG32(RADEON_CP_CSQ_MODE, 0);
-       WREG32(RADEON_CP_CSQ_CNTL, 0);
-       WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
-       (void)RREG32(RADEON_RBBM_SOFT_RESET);
-       udelay(200);
-       WREG32(RADEON_RBBM_SOFT_RESET, 0);
-       /* Wait to prevent race in RBBM_STATUS */
-       mdelay(1);
-       for (i = 0; i < rdev->usec_timeout; i++) {
-               tmp = RREG32(RADEON_RBBM_STATUS);
-               if (!(tmp & (1 << 16))) {
-                       DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
-                                tmp);
-                       if (reinit_cp) {
-                               return r100_cp_init(rdev, rdev->cp.ring_size);
-                       }
-                       return 0;
-               }
-               DRM_UDELAY(1);
-       }
-       tmp = RREG32(RADEON_RBBM_STATUS);
-       DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
-       return -1;
-}
-
 void r100_cp_commit(struct radeon_device *rdev)
 {
        WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
@@ -1733,76 +1959,163 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
        return -1;
 }
 
-void r100_gpu_init(struct radeon_device *rdev)
+void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
 {
-       /* TODO: anythings to do here ? pipes ? */
-       r100_hdp_reset(rdev);
+       lockup->last_cp_rptr = cp->rptr;
+       lockup->last_jiffies = jiffies;
+}
+
+/**
+ * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
+ * @rdev:      radeon device structure
+ * @lockup:    r100_gpu_lockup structure holding CP lockup tracking informations
+ * @cp:                radeon_cp structure holding CP information
+ *
+ * We don't need to initialize the lockup tracking information as we will either
+ * have CP rptr to a different value of jiffies wrap around which will force
+ * initialization of the lockup tracking informations.
+ *
+ * A possible false positivie is if we get call after while and last_cp_rptr ==
+ * the current CP rptr, even if it's unlikely it might happen. To avoid this
+ * if the elapsed time since last call is bigger than 2 second than we return
+ * false and update the tracking information. Due to this the caller must call
+ * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
+ * the fencing code should be cautious about that.
+ *
+ * Caller should write to the ring to force CP to do something so we don't get
+ * false positive when CP is just gived nothing to do.
+ *
+ **/
+bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
+{
+       unsigned long cjiffies, elapsed;
+
+       cjiffies = jiffies;
+       if (!time_after(cjiffies, lockup->last_jiffies)) {
+               /* likely a wrap around */
+               lockup->last_cp_rptr = cp->rptr;
+               lockup->last_jiffies = jiffies;
+               return false;
+       }
+       if (cp->rptr != lockup->last_cp_rptr) {
+               /* CP is still working no lockup */
+               lockup->last_cp_rptr = cp->rptr;
+               lockup->last_jiffies = jiffies;
+               return false;
+       }
+       elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
+       if (elapsed >= 3000) {
+               /* very likely the improbable case where current
+                * rptr is equal to last recorded, a while ago, rptr
+                * this is more likely a false positive update tracking
+                * information which should force us to be recall at
+                * latter point
+                */
+               lockup->last_cp_rptr = cp->rptr;
+               lockup->last_jiffies = jiffies;
+               return false;
+       }
+       if (elapsed >= 1000) {
+               dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
+               return true;
+       }
+       /* give a chance to the GPU ... */
+       return false;
 }
 
-void r100_hdp_reset(struct radeon_device *rdev)
+bool r100_gpu_is_lockup(struct radeon_device *rdev)
 {
-       uint32_t tmp;
+       u32 rbbm_status;
+       int r;
 
-       tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
-       tmp |= (7 << 28);
-       WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
-       (void)RREG32(RADEON_HOST_PATH_CNTL);
-       udelay(200);
-       WREG32(RADEON_RBBM_SOFT_RESET, 0);
-       WREG32(RADEON_HOST_PATH_CNTL, tmp);
-       (void)RREG32(RADEON_HOST_PATH_CNTL);
+       rbbm_status = RREG32(R_000E40_RBBM_STATUS);
+       if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
+               r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
+               return false;
+       }
+       /* force CP activities */
+       r = radeon_ring_lock(rdev, 2);
+       if (!r) {
+               /* PACKET2 NOP */
+               radeon_ring_write(rdev, 0x80000000);
+               radeon_ring_write(rdev, 0x80000000);
+               radeon_ring_unlock_commit(rdev);
+       }
+       rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+       return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
 }
 
-int r100_rb2d_reset(struct radeon_device *rdev)
+void r100_bm_disable(struct radeon_device *rdev)
 {
-       uint32_t tmp;
-       int i;
+       u32 tmp;
 
-       WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
-       (void)RREG32(RADEON_RBBM_SOFT_RESET);
-       udelay(200);
-       WREG32(RADEON_RBBM_SOFT_RESET, 0);
-       /* Wait to prevent race in RBBM_STATUS */
+       /* disable bus mastering */
+       tmp = RREG32(R_000030_BUS_CNTL);
+       WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
+       mdelay(1);
+       WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
+       mdelay(1);
+       WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
+       tmp = RREG32(RADEON_BUS_CNTL);
+       mdelay(1);
+       pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
+       pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
        mdelay(1);
-       for (i = 0; i < rdev->usec_timeout; i++) {
-               tmp = RREG32(RADEON_RBBM_STATUS);
-               if (!(tmp & (1 << 26))) {
-                       DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
-                                tmp);
-                       return 0;
-               }
-               DRM_UDELAY(1);
-       }
-       tmp = RREG32(RADEON_RBBM_STATUS);
-       DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
-       return -1;
 }
 
-int r100_gpu_reset(struct radeon_device *rdev)
+int r100_asic_reset(struct radeon_device *rdev)
 {
-       uint32_t status;
+       struct r100_mc_save save;
+       u32 status, tmp;
 
-       /* reset order likely matter */
-       status = RREG32(RADEON_RBBM_STATUS);
-       /* reset HDP */
-       r100_hdp_reset(rdev);
-       /* reset rb2d */
-       if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
-               r100_rb2d_reset(rdev);
+       r100_mc_stop(rdev, &save);
+       status = RREG32(R_000E40_RBBM_STATUS);
+       if (!G_000E40_GUI_ACTIVE(status)) {
+               return 0;
        }
-       /* TODO: reset 3D engine */
+       status = RREG32(R_000E40_RBBM_STATUS);
+       dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+       /* stop CP */
+       WREG32(RADEON_CP_CSQ_CNTL, 0);
+       tmp = RREG32(RADEON_CP_RB_CNTL);
+       WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+       WREG32(RADEON_CP_RB_RPTR_WR, 0);
+       WREG32(RADEON_CP_RB_WPTR, 0);
+       WREG32(RADEON_CP_RB_CNTL, tmp);
+       /* save PCI state */
+       pci_save_state(rdev->pdev);
+       /* disable bus mastering */
+       r100_bm_disable(rdev);
+       WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
+                                       S_0000F0_SOFT_RESET_RE(1) |
+                                       S_0000F0_SOFT_RESET_PP(1) |
+                                       S_0000F0_SOFT_RESET_RB(1));
+       RREG32(R_0000F0_RBBM_SOFT_RESET);
+       mdelay(500);
+       WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+       mdelay(1);
+       status = RREG32(R_000E40_RBBM_STATUS);
+       dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
        /* reset CP */
-       status = RREG32(RADEON_RBBM_STATUS);
-       if (status & (1 << 16)) {
-               r100_cp_reset(rdev);
-       }
+       WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+       RREG32(R_0000F0_RBBM_SOFT_RESET);
+       mdelay(500);
+       WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+       mdelay(1);
+       status = RREG32(R_000E40_RBBM_STATUS);
+       dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+       /* restore PCI & busmastering */
+       pci_restore_state(rdev->pdev);
+       r100_enable_bm(rdev);
        /* Check if GPU is idle */
-       status = RREG32(RADEON_RBBM_STATUS);
-       if (status & RADEON_RBBM_ACTIVE) {
-               DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
+       if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
+               G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
+               dev_err(rdev->dev, "failed to reset GPU\n");
+               rdev->gpu_lockup = true;
                return -1;
        }
-       DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
+       r100_mc_resume(rdev, &save);
+       dev_info(rdev->dev, "GPU reset succeed\n");
        return 0;
 }
 
@@ -2002,11 +2315,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
                else
                        rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
        }
-       /* FIXME remove this once we support unmappable VRAM */
-       if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
-               rdev->mc.mc_vram_size = rdev->mc.aper_size;
-               rdev->mc.real_vram_size = rdev->mc.aper_size;
-       }
 }
 
 void r100_vga_set_state(struct radeon_device *rdev, bool state)
@@ -2335,53 +2643,53 @@ void r100_bandwidth_update(struct radeon_device *rdev)
        fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
        uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
        fixed20_12 memtcas_ff[8] = {
-               fixed_init(1),
-               fixed_init(2),
-               fixed_init(3),
-               fixed_init(0),
-               fixed_init_half(1),
-               fixed_init_half(2),
-               fixed_init(0),
+               dfixed_init(1),
+               dfixed_init(2),
+               dfixed_init(3),
+               dfixed_init(0),
+               dfixed_init_half(1),
+               dfixed_init_half(2),
+               dfixed_init(0),
        };
        fixed20_12 memtcas_rs480_ff[8] = {
-               fixed_init(0),
-               fixed_init(1),
-               fixed_init(2),
-               fixed_init(3),
-               fixed_init(0),
-               fixed_init_half(1),
-               fixed_init_half(2),
-               fixed_init_half(3),
+               dfixed_init(0),
+               dfixed_init(1),
+               dfixed_init(2),
+               dfixed_init(3),
+               dfixed_init(0),
+               dfixed_init_half(1),
+               dfixed_init_half(2),
+               dfixed_init_half(3),
        };
        fixed20_12 memtcas2_ff[8] = {
-               fixed_init(0),
-               fixed_init(1),
-               fixed_init(2),
-               fixed_init(3),
-               fixed_init(4),
-               fixed_init(5),
-               fixed_init(6),
-               fixed_init(7),
+               dfixed_init(0),
+               dfixed_init(1),
+               dfixed_init(2),
+               dfixed_init(3),
+               dfixed_init(4),
+               dfixed_init(5),
+               dfixed_init(6),
+               dfixed_init(7),
        };
        fixed20_12 memtrbs[8] = {
-               fixed_init(1),
-               fixed_init_half(1),
-               fixed_init(2),
-               fixed_init_half(2),
-               fixed_init(3),
-               fixed_init_half(3),
-               fixed_init(4),
-               fixed_init_half(4)
+               dfixed_init(1),
+               dfixed_init_half(1),
+               dfixed_init(2),
+               dfixed_init_half(2),
+               dfixed_init(3),
+               dfixed_init_half(3),
+               dfixed_init(4),
+               dfixed_init_half(4)
        };
        fixed20_12 memtrbs_r4xx[8] = {
-               fixed_init(4),
-               fixed_init(5),
-               fixed_init(6),
-               fixed_init(7),
-               fixed_init(8),
-               fixed_init(9),
-               fixed_init(10),
-               fixed_init(11)
+               dfixed_init(4),
+               dfixed_init(5),
+               dfixed_init(6),
+               dfixed_init(7),
+               dfixed_init(8),
+               dfixed_init(9),
+               dfixed_init(10),
+               dfixed_init(11)
        };
        fixed20_12 min_mem_eff;
        fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
@@ -2412,7 +2720,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
                }
        }
 
-       min_mem_eff.full = rfixed_const_8(0);
+       min_mem_eff.full = dfixed_const_8(0);
        /* get modes */
        if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
                uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
@@ -2433,28 +2741,28 @@ void r100_bandwidth_update(struct radeon_device *rdev)
        mclk_ff = rdev->pm.mclk;
 
        temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
-       temp_ff.full = rfixed_const(temp);
-       mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
+       temp_ff.full = dfixed_const(temp);
+       mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
 
        pix_clk.full = 0;
        pix_clk2.full = 0;
        peak_disp_bw.full = 0;
        if (mode1) {
-               temp_ff.full = rfixed_const(1000);
-               pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
-               pix_clk.full = rfixed_div(pix_clk, temp_ff);
-               temp_ff.full = rfixed_const(pixel_bytes1);
-               peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
+               temp_ff.full = dfixed_const(1000);
+               pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
+               pix_clk.full = dfixed_div(pix_clk, temp_ff);
+               temp_ff.full = dfixed_const(pixel_bytes1);
+               peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
        }
        if (mode2) {
-               temp_ff.full = rfixed_const(1000);
-               pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
-               pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
-               temp_ff.full = rfixed_const(pixel_bytes2);
-               peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
+               temp_ff.full = dfixed_const(1000);
+               pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
+               pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
+               temp_ff.full = dfixed_const(pixel_bytes2);
+               peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
        }
 
-       mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
+       mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
        if (peak_disp_bw.full >= mem_bw.full) {
                DRM_ERROR("You may not have enough display bandwidth for current mode\n"
                          "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
@@ -2496,9 +2804,9 @@ void r100_bandwidth_update(struct radeon_device *rdev)
                mem_tras = ((temp >> 12) & 0xf) + 4;
        }
        /* convert to FF */
-       trcd_ff.full = rfixed_const(mem_trcd);
-       trp_ff.full = rfixed_const(mem_trp);
-       tras_ff.full = rfixed_const(mem_tras);
+       trcd_ff.full = dfixed_const(mem_trcd);
+       trp_ff.full = dfixed_const(mem_trp);
+       tras_ff.full = dfixed_const(mem_tras);
 
        /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
        temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
@@ -2516,7 +2824,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
                /* extra cas latency stored in bits 23-25 0-4 clocks */
                data = (temp >> 23) & 0x7;
                if (data < 5)
-                       tcas_ff.full += rfixed_const(data);
+                       tcas_ff.full += dfixed_const(data);
        }
 
        if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
@@ -2553,72 +2861,72 @@ void r100_bandwidth_update(struct radeon_device *rdev)
 
        if (rdev->flags & RADEON_IS_AGP) {
                fixed20_12 agpmode_ff;
-               agpmode_ff.full = rfixed_const(radeon_agpmode);
-               temp_ff.full = rfixed_const_666(16);
-               sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff);
+               agpmode_ff.full = dfixed_const(radeon_agpmode);
+               temp_ff.full = dfixed_const_666(16);
+               sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
        }
        /* TODO PCIE lanes may affect this - agpmode == 16?? */
 
        if (ASIC_IS_R300(rdev)) {
-               sclk_delay_ff.full = rfixed_const(250);
+               sclk_delay_ff.full = dfixed_const(250);
        } else {
                if ((rdev->family == CHIP_RV100) ||
                    rdev->flags & RADEON_IS_IGP) {
                        if (rdev->mc.vram_is_ddr)
-                               sclk_delay_ff.full = rfixed_const(41);
+                               sclk_delay_ff.full = dfixed_const(41);
                        else
-                               sclk_delay_ff.full = rfixed_const(33);
+                               sclk_delay_ff.full = dfixed_const(33);
                } else {
                        if (rdev->mc.vram_width == 128)
-                               sclk_delay_ff.full = rfixed_const(57);
+                               sclk_delay_ff.full = dfixed_const(57);
                        else
-                               sclk_delay_ff.full = rfixed_const(41);
+                               sclk_delay_ff.full = dfixed_const(41);
                }
        }
 
-       mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff);
+       mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
 
        if (rdev->mc.vram_is_ddr) {
                if (rdev->mc.vram_width == 32) {
-                       k1.full = rfixed_const(40);
+                       k1.full = dfixed_const(40);
                        c  = 3;
                } else {
-                       k1.full = rfixed_const(20);
+                       k1.full = dfixed_const(20);
                        c  = 1;
                }
        } else {
-               k1.full = rfixed_const(40);
+               k1.full = dfixed_const(40);
                c  = 3;
        }
 
-       temp_ff.full = rfixed_const(2);
-       mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff);
-       temp_ff.full = rfixed_const(c);
-       mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff);
-       temp_ff.full = rfixed_const(4);
-       mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff);
-       mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff);
+       temp_ff.full = dfixed_const(2);
+       mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
+       temp_ff.full = dfixed_const(c);
+       mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
+       temp_ff.full = dfixed_const(4);
+       mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
+       mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
        mc_latency_mclk.full += k1.full;
 
-       mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff);
-       mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff);
+       mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
+       mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
 
        /*
          HW cursor time assuming worst case of full size colour cursor.
        */
-       temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
+       temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
        temp_ff.full += trcd_ff.full;
        if (temp_ff.full < tras_ff.full)
                temp_ff.full = tras_ff.full;
-       cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff);
+       cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
 
-       temp_ff.full = rfixed_const(cur_size);
-       cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff);
+       temp_ff.full = dfixed_const(cur_size);
+       cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
        /*
          Find the total latency for the display data.
        */
-       disp_latency_overhead.full = rfixed_const(8);
-       disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
+       disp_latency_overhead.full = dfixed_const(8);
+       disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
        mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
        mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
 
@@ -2646,16 +2954,16 @@ void r100_bandwidth_update(struct radeon_device *rdev)
                /*
                  Find the drain rate of the display buffer.
                */
-               temp_ff.full = rfixed_const((16/pixel_bytes1));
-               disp_drain_rate.full = rfixed_div(pix_clk, temp_ff);
+               temp_ff.full = dfixed_const((16/pixel_bytes1));
+               disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
 
                /*
                  Find the critical point of the display buffer.
                */
-               crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency);
-               crit_point_ff.full += rfixed_const_half(0);
+               crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
+               crit_point_ff.full += dfixed_const_half(0);
 
-               critical_point = rfixed_trunc(crit_point_ff);
+               critical_point = dfixed_trunc(crit_point_ff);
 
                if (rdev->disp_priority == 2) {
                        critical_point = 0;
@@ -2726,8 +3034,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
                /*
                  Find the drain rate of the display buffer.
                */
-               temp_ff.full = rfixed_const((16/pixel_bytes2));
-               disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff);
+               temp_ff.full = dfixed_const((16/pixel_bytes2));
+               disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
 
                grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
                grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
@@ -2748,8 +3056,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
                        critical_point2 = 0;
                else {
                        temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
-                       temp_ff.full = rfixed_const(temp);
-                       temp_ff.full = rfixed_mul(mclk_ff, temp_ff);
+                       temp_ff.full = dfixed_const(temp);
+                       temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
                        if (sclk_ff.full < temp_ff.full)
                                temp_ff.full = sclk_ff.full;
 
@@ -2757,15 +3065,15 @@ void r100_bandwidth_update(struct radeon_device *rdev)
 
                        if (mode1) {
                                temp_ff.full = read_return_rate.full - disp_drain_rate.full;
-                               time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff);
+                               time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
                        } else {
                                time_disp1_drop_priority.full = 0;
                        }
                        crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
-                       crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2);
-                       crit_point_ff.full += rfixed_const_half(0);
+                       crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
+                       crit_point_ff.full += dfixed_const_half(0);
 
-                       critical_point2 = rfixed_trunc(crit_point_ff);
+                       critical_point2 = dfixed_trunc(crit_point_ff);
 
                        if (rdev->disp_priority == 2) {
                                critical_point2 = 0;
@@ -3399,7 +3707,7 @@ static int r100_startup(struct radeon_device *rdev)
        /* Resume clock */
        r100_clock_startup(rdev);
        /* Initialize GPU configuration (# pipes, ...) */
-       r100_gpu_init(rdev);
+//     r100_gpu_init(rdev);
        /* Initialize GART (initialize after TTM so we can allocate
         * memory through TTM but finalize after TTM) */
        r100_enable_bm(rdev);
@@ -3436,7 +3744,7 @@ int r100_resume(struct radeon_device *rdev)
        /* Resume clock before doing reset */
        r100_clock_startup(rdev);
        /* Reset gpu before posting otherwise ATOM will enter infinite loop */
-       if (radeon_gpu_reset(rdev)) {
+       if (radeon_asic_reset(rdev)) {
                dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
                        RREG32(R_000E40_RBBM_STATUS),
                        RREG32(R_0007C0_CP_STAT));
@@ -3462,7 +3770,6 @@ int r100_suspend(struct radeon_device *rdev)
 
 void r100_fini(struct radeon_device *rdev)
 {
-       radeon_pm_fini(rdev);
        r100_cp_fini(rdev);
        r100_wb_fini(rdev);
        r100_ib_fini(rdev);
@@ -3505,7 +3812,7 @@ int r100_init(struct radeon_device *rdev)
                        return r;
        }
        /* Reset gpu before posting otherwise ATOM will enter infinite loop */
-       if (radeon_gpu_reset(rdev)) {
+       if (radeon_asic_reset(rdev)) {
                dev_warn(rdev->dev,
                        "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
                        RREG32(R_000E40_RBBM_STATUS),
@@ -3518,8 +3825,6 @@ int r100_init(struct radeon_device *rdev)
        r100_errata(rdev);
        /* Initialize clocks */
        radeon_get_clock_info(rdev->ddev);
-       /* Initialize power management */
-       radeon_pm_init(rdev);
        /* initialize AGP */
        if (rdev->flags & RADEON_IS_AGP) {
                r = radeon_agp_init(rdev);
index df29a63..d016b16 100644 (file)
 #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
 
 /* Registers */
+#define R_0000F0_RBBM_SOFT_RESET                     0x0000F0
+#define   S_0000F0_SOFT_RESET_CP(x)                    (((x) & 0x1) << 0)
+#define   G_0000F0_SOFT_RESET_CP(x)                    (((x) >> 0) & 0x1)
+#define   C_0000F0_SOFT_RESET_CP                       0xFFFFFFFE
+#define   S_0000F0_SOFT_RESET_HI(x)                    (((x) & 0x1) << 1)
+#define   G_0000F0_SOFT_RESET_HI(x)                    (((x) >> 1) & 0x1)
+#define   C_0000F0_SOFT_RESET_HI                       0xFFFFFFFD
+#define   S_0000F0_SOFT_RESET_SE(x)                    (((x) & 0x1) << 2)
+#define   G_0000F0_SOFT_RESET_SE(x)                    (((x) >> 2) & 0x1)
+#define   C_0000F0_SOFT_RESET_SE                       0xFFFFFFFB
+#define   S_0000F0_SOFT_RESET_RE(x)                    (((x) & 0x1) << 3)
+#define   G_0000F0_SOFT_RESET_RE(x)                    (((x) >> 3) & 0x1)
+#define   C_0000F0_SOFT_RESET_RE                       0xFFFFFFF7
+#define   S_0000F0_SOFT_RESET_PP(x)                    (((x) & 0x1) << 4)
+#define   G_0000F0_SOFT_RESET_PP(x)                    (((x) >> 4) & 0x1)
+#define   C_0000F0_SOFT_RESET_PP                       0xFFFFFFEF
+#define   S_0000F0_SOFT_RESET_E2(x)                    (((x) & 0x1) << 5)
+#define   G_0000F0_SOFT_RESET_E2(x)                    (((x) >> 5) & 0x1)
+#define   C_0000F0_SOFT_RESET_E2                       0xFFFFFFDF
+#define   S_0000F0_SOFT_RESET_RB(x)                    (((x) & 0x1) << 6)
+#define   G_0000F0_SOFT_RESET_RB(x)                    (((x) >> 6) & 0x1)
+#define   C_0000F0_SOFT_RESET_RB                       0xFFFFFFBF
+#define   S_0000F0_SOFT_RESET_HDP(x)                   (((x) & 0x1) << 7)
+#define   G_0000F0_SOFT_RESET_HDP(x)                   (((x) >> 7) & 0x1)
+#define   C_0000F0_SOFT_RESET_HDP                      0xFFFFFF7F
+#define   S_0000F0_SOFT_RESET_MC(x)                    (((x) & 0x1) << 8)
+#define   G_0000F0_SOFT_RESET_MC(x)                    (((x) >> 8) & 0x1)
+#define   C_0000F0_SOFT_RESET_MC                       0xFFFFFEFF
+#define   S_0000F0_SOFT_RESET_AIC(x)                   (((x) & 0x1) << 9)
+#define   G_0000F0_SOFT_RESET_AIC(x)                   (((x) >> 9) & 0x1)
+#define   C_0000F0_SOFT_RESET_AIC                      0xFFFFFDFF
+#define   S_0000F0_SOFT_RESET_VIP(x)                   (((x) & 0x1) << 10)
+#define   G_0000F0_SOFT_RESET_VIP(x)                   (((x) >> 10) & 0x1)
+#define   C_0000F0_SOFT_RESET_VIP                      0xFFFFFBFF
+#define   S_0000F0_SOFT_RESET_DISP(x)                  (((x) & 0x1) << 11)
+#define   G_0000F0_SOFT_RESET_DISP(x)                  (((x) >> 11) & 0x1)
+#define   C_0000F0_SOFT_RESET_DISP                     0xFFFFF7FF
+#define   S_0000F0_SOFT_RESET_CG(x)                    (((x) & 0x1) << 12)
+#define   G_0000F0_SOFT_RESET_CG(x)                    (((x) >> 12) & 0x1)
+#define   C_0000F0_SOFT_RESET_CG                       0xFFFFEFFF
+#define R_000030_BUS_CNTL                            0x000030
+#define   S_000030_BUS_DBL_RESYNC(x)                   (((x) & 0x1) << 0)
+#define   G_000030_BUS_DBL_RESYNC(x)                   (((x) >> 0) & 0x1)
+#define   C_000030_BUS_DBL_RESYNC                      0xFFFFFFFE
+#define   S_000030_BUS_MSTR_RESET(x)                   (((x) & 0x1) << 1)
+#define   G_000030_BUS_MSTR_RESET(x)                   (((x) >> 1) & 0x1)
+#define   C_000030_BUS_MSTR_RESET                      0xFFFFFFFD
+#define   S_000030_BUS_FLUSH_BUF(x)                    (((x) & 0x1) << 2)
+#define   G_000030_BUS_FLUSH_BUF(x)                    (((x) >> 2) & 0x1)
+#define   C_000030_BUS_FLUSH_BUF                       0xFFFFFFFB
+#define   S_000030_BUS_STOP_REQ_DIS(x)                 (((x) & 0x1) << 3)
+#define   G_000030_BUS_STOP_REQ_DIS(x)                 (((x) >> 3) & 0x1)
+#define   C_000030_BUS_STOP_REQ_DIS                    0xFFFFFFF7
+#define   S_000030_BUS_PM4_READ_COMBINE_EN(x)          (((x) & 0x1) << 4)
+#define   G_000030_BUS_PM4_READ_COMBINE_EN(x)          (((x) >> 4) & 0x1)
+#define   C_000030_BUS_PM4_READ_COMBINE_EN             0xFFFFFFEF
+#define   S_000030_BUS_WRT_COMBINE_EN(x)               (((x) & 0x1) << 5)
+#define   G_000030_BUS_WRT_COMBINE_EN(x)               (((x) >> 5) & 0x1)
+#define   C_000030_BUS_WRT_COMBINE_EN                  0xFFFFFFDF
+#define   S_000030_BUS_MASTER_DIS(x)                   (((x) & 0x1) << 6)
+#define   G_000030_BUS_MASTER_DIS(x)                   (((x) >> 6) & 0x1)
+#define   C_000030_BUS_MASTER_DIS                      0xFFFFFFBF
+#define   S_000030_BIOS_ROM_WRT_EN(x)                  (((x) & 0x1) << 7)
+#define   G_000030_BIOS_ROM_WRT_EN(x)                  (((x) >> 7) & 0x1)
+#define   C_000030_BIOS_ROM_WRT_EN                     0xFFFFFF7F
+#define   S_000030_BM_DAC_CRIPPLE(x)                   (((x) & 0x1) << 8)
+#define   G_000030_BM_DAC_CRIPPLE(x)                   (((x) >> 8) & 0x1)
+#define   C_000030_BM_DAC_CRIPPLE                      0xFFFFFEFF
+#define   S_000030_BUS_NON_PM4_READ_COMBINE_EN(x)      (((x) & 0x1) << 9)
+#define   G_000030_BUS_NON_PM4_READ_COMBINE_EN(x)      (((x) >> 9) & 0x1)
+#define   C_000030_BUS_NON_PM4_READ_COMBINE_EN         0xFFFFFDFF
+#define   S_000030_BUS_XFERD_DISCARD_EN(x)             (((x) & 0x1) << 10)
+#define   G_000030_BUS_XFERD_DISCARD_EN(x)             (((x) >> 10) & 0x1)
+#define   C_000030_BUS_XFERD_DISCARD_EN                0xFFFFFBFF
+#define   S_000030_BUS_SGL_READ_DISABLE(x)             (((x) & 0x1) << 11)
+#define   G_000030_BUS_SGL_READ_DISABLE(x)             (((x) >> 11) & 0x1)
+#define   C_000030_BUS_SGL_READ_DISABLE                0xFFFFF7FF
+#define   S_000030_BIOS_DIS_ROM(x)                     (((x) & 0x1) << 12)
+#define   G_000030_BIOS_DIS_ROM(x)                     (((x) >> 12) & 0x1)
+#define   C_000030_BIOS_DIS_ROM                        0xFFFFEFFF
+#define   S_000030_BUS_PCI_READ_RETRY_EN(x)            (((x) & 0x1) << 13)
+#define   G_000030_BUS_PCI_READ_RETRY_EN(x)            (((x) >> 13) & 0x1)
+#define   C_000030_BUS_PCI_READ_RETRY_EN               0xFFFFDFFF
+#define   S_000030_BUS_AGP_AD_STEPPING_EN(x)           (((x) & 0x1) << 14)
+#define   G_000030_BUS_AGP_AD_STEPPING_EN(x)           (((x) >> 14) & 0x1)
+#define   C_000030_BUS_AGP_AD_STEPPING_EN              0xFFFFBFFF
+#define   S_000030_BUS_PCI_WRT_RETRY_EN(x)             (((x) & 0x1) << 15)
+#define   G_000030_BUS_PCI_WRT_RETRY_EN(x)             (((x) >> 15) & 0x1)
+#define   C_000030_BUS_PCI_WRT_RETRY_EN                0xFFFF7FFF
+#define   S_000030_BUS_RETRY_WS(x)                     (((x) & 0xF) << 16)
+#define   G_000030_BUS_RETRY_WS(x)                     (((x) >> 16) & 0xF)
+#define   C_000030_BUS_RETRY_WS                        0xFFF0FFFF
+#define   S_000030_BUS_MSTR_RD_MULT(x)                 (((x) & 0x1) << 20)
+#define   G_000030_BUS_MSTR_RD_MULT(x)                 (((x) >> 20) & 0x1)
+#define   C_000030_BUS_MSTR_RD_MULT                    0xFFEFFFFF
+#define   S_000030_BUS_MSTR_RD_LINE(x)                 (((x) & 0x1) << 21)
+#define   G_000030_BUS_MSTR_RD_LINE(x)                 (((x) >> 21) & 0x1)
+#define   C_000030_BUS_MSTR_RD_LINE                    0xFFDFFFFF
+#define   S_000030_BUS_SUSPEND(x)                      (((x) & 0x1) << 22)
+#define   G_000030_BUS_SUSPEND(x)                      (((x) >> 22) & 0x1)
+#define   C_000030_BUS_SUSPEND                         0xFFBFFFFF
+#define   S_000030_LAT_16X(x)                          (((x) & 0x1) << 23)
+#define   G_000030_LAT_16X(x)                          (((x) >> 23) & 0x1)
+#define   C_000030_LAT_16X                             0xFF7FFFFF
+#define   S_000030_BUS_RD_DISCARD_EN(x)                (((x) & 0x1) << 24)
+#define   G_000030_BUS_RD_DISCARD_EN(x)                (((x) >> 24) & 0x1)
+#define   C_000030_BUS_RD_DISCARD_EN                   0xFEFFFFFF
+#define   S_000030_ENFRCWRDY(x)                        (((x) & 0x1) << 25)
+#define   G_000030_ENFRCWRDY(x)                        (((x) >> 25) & 0x1)
+#define   C_000030_ENFRCWRDY                           0xFDFFFFFF
+#define   S_000030_BUS_MSTR_WS(x)                      (((x) & 0x1) << 26)
+#define   G_000030_BUS_MSTR_WS(x)                      (((x) >> 26) & 0x1)
+#define   C_000030_BUS_MSTR_WS                         0xFBFFFFFF
+#define   S_000030_BUS_PARKING_DIS(x)                  (((x) & 0x1) << 27)
+#define   G_000030_BUS_PARKING_DIS(x)                  (((x) >> 27) & 0x1)
+#define   C_000030_BUS_PARKING_DIS                     0xF7FFFFFF
+#define   S_000030_BUS_MSTR_DISCONNECT_EN(x)           (((x) & 0x1) << 28)
+#define   G_000030_BUS_MSTR_DISCONNECT_EN(x)           (((x) >> 28) & 0x1)
+#define   C_000030_BUS_MSTR_DISCONNECT_EN              0xEFFFFFFF
+#define   S_000030_SERR_EN(x)                          (((x) & 0x1) << 29)
+#define   G_000030_SERR_EN(x)                          (((x) >> 29) & 0x1)
+#define   C_000030_SERR_EN                             0xDFFFFFFF
+#define   S_000030_BUS_READ_BURST(x)                   (((x) & 0x1) << 30)
+#define   G_000030_BUS_READ_BURST(x)                   (((x) >> 30) & 0x1)
+#define   C_000030_BUS_READ_BURST                      0xBFFFFFFF
+#define   S_000030_BUS_RDY_READ_DLY(x)                 (((x) & 0x1) << 31)
+#define   G_000030_BUS_RDY_READ_DLY(x)                 (((x) >> 31) & 0x1)
+#define   C_000030_BUS_RDY_READ_DLY                    0x7FFFFFFF
 #define R_000040_GEN_INT_CNTL                        0x000040
 #define   S_000040_CRTC_VBLANK(x)                      (((x) & 0x1) << 0)
 #define   G_000040_CRTC_VBLANK(x)                      (((x) >> 0) & 0x1)
 #define   G_00000D_FORCE_RB(x)                         (((x) >> 28) & 0x1)
 #define   C_00000D_FORCE_RB                            0xEFFFFFFF
 
+/* PLL regs */
+#define SCLK_CNTL                                      0xd
+#define   FORCE_HDP                                    (1 << 17)
+#define CLK_PWRMGT_CNTL                                0x14
+#define   GLOBAL_PMAN_EN                               (1 << 10)
+#define   DISP_PM                                      (1 << 20)
+#define PLL_PWRMGT_CNTL                                0x15
+#define   MPLL_TURNOFF                                 (1 << 0)
+#define   SPLL_TURNOFF                                 (1 << 1)
+#define   PPLL_TURNOFF                                 (1 << 2)
+#define   P2PLL_TURNOFF                                (1 << 3)
+#define   TVPLL_TURNOFF                                (1 << 4)
+#define   MOBILE_SU                                    (1 << 16)
+#define   SU_SCLK_USE_BCLK                             (1 << 17)
+#define SCLK_CNTL2                                     0x1e
+#define   REDUCED_SPEED_SCLK_MODE                      (1 << 16)
+#define   REDUCED_SPEED_SCLK_SEL(x)                    ((x) << 17)
+#define MCLK_MISC                                      0x1f
+#define   EN_MCLK_TRISTATE_IN_SUSPEND                  (1 << 18)
+#define SCLK_MORE_CNTL                                 0x35
+#define   REDUCED_SPEED_SCLK_EN                        (1 << 16)
+#define   IO_CG_VOLTAGE_DROP                           (1 << 17)
+#define   VOLTAGE_DELAY_SEL(x)                         ((x) << 20)
+#define   VOLTAGE_DROP_SYNC                            (1 << 19)
+
+/* mmreg */
+#define DISP_PWR_MAN                                   0xd08
+#define   DISP_D3_GRPH_RST                             (1 << 18)
+#define   DISP_D3_SUBPIC_RST                           (1 << 19)
+#define   DISP_D3_OV0_RST                              (1 << 20)
+#define   DISP_D1D2_GRPH_RST                           (1 << 21)
+#define   DISP_D1D2_SUBPIC_RST                         (1 << 22)
+#define   DISP_D1D2_OV0_RST                            (1 << 23)
+#define   DISP_DVO_ENABLE_RST                          (1 << 24)
+#define   TV_ENABLE_RST                                (1 << 25)
+#define   AUTO_PWRUP_EN                                (1 << 26)
 
 #endif
index a5ff807..b2f9efe 100644 (file)
@@ -27,8 +27,9 @@
  */
 #include <linux/seq_file.h>
 #include <linux/slab.h>
-#include "drmP.h"
-#include "drm.h"
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc_helper.h>
 #include "radeon_reg.h"
 #include "radeon.h"
 #include "radeon_asic.h"
@@ -151,6 +152,10 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev)
        u32 tmp;
        int r;
 
+       WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
+       WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
+       WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
+       WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
        tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
        tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
        WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
@@ -323,7 +328,6 @@ void r300_gpu_init(struct radeon_device *rdev)
 {
        uint32_t gb_tile_config, tmp;
 
-       r100_hdp_reset(rdev);
        if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
            (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
                /* r300,r350 */
@@ -375,89 +379,85 @@ void r300_gpu_init(struct radeon_device *rdev)
                 rdev->num_gb_pipes, rdev->num_z_pipes);
 }
 
-int r300_ga_reset(struct radeon_device *rdev)
+bool r300_gpu_is_lockup(struct radeon_device *rdev)
 {
-       uint32_t tmp;
-       bool reinit_cp;
-       int i;
+       u32 rbbm_status;
+       int r;
 
-       reinit_cp = rdev->cp.ready;
-       rdev->cp.ready = false;
-       for (i = 0; i < rdev->usec_timeout; i++) {
-               WREG32(RADEON_CP_CSQ_MODE, 0);
-               WREG32(RADEON_CP_CSQ_CNTL, 0);
-               WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
-               (void)RREG32(RADEON_RBBM_SOFT_RESET);
-               udelay(200);
-               WREG32(RADEON_RBBM_SOFT_RESET, 0);
-               /* Wait to prevent race in RBBM_STATUS */
-               mdelay(1);
-               tmp = RREG32(RADEON_RBBM_STATUS);
-               if (tmp & ((1 << 20) | (1 << 26))) {
-                       DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
-                       /* GA still busy soft reset it */
-                       WREG32(0x429C, 0x200);
-                       WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
-                       WREG32(R300_RE_SCISSORS_TL, 0);
-                       WREG32(R300_RE_SCISSORS_BR, 0);
-                       WREG32(0x24AC, 0);
-               }
-               /* Wait to prevent race in RBBM_STATUS */
-               mdelay(1);
-               tmp = RREG32(RADEON_RBBM_STATUS);
-               if (!(tmp & ((1 << 20) | (1 << 26)))) {
-                       break;
-               }
+       rbbm_status = RREG32(R_000E40_RBBM_STATUS);
+       if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
+               r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
+               return false;
        }
-       for (i = 0; i < rdev->usec_timeout; i++) {
-               tmp = RREG32(RADEON_RBBM_STATUS);
-               if (!(tmp & ((1 << 20) | (1 << 26)))) {
-                       DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
-                                tmp);
-                       if (reinit_cp) {
-                               return r100_cp_init(rdev, rdev->cp.ring_size);
-                       }
-                       return 0;
-               }
-               DRM_UDELAY(1);
+       /* force CP activities */
+       r = radeon_ring_lock(rdev, 2);
+       if (!r) {
+               /* PACKET2 NOP */
+               radeon_ring_write(rdev, 0x80000000);
+               radeon_ring_write(rdev, 0x80000000);
+               radeon_ring_unlock_commit(rdev);
        }
-       tmp = RREG32(RADEON_RBBM_STATUS);
-       DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
-       return -1;
+       rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+       return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
 }
 
-int r300_gpu_reset(struct radeon_device *rdev)
+int r300_asic_reset(struct radeon_device *rdev)
 {
-       uint32_t status;
-
-       /* reset order likely matter */
-       status = RREG32(RADEON_RBBM_STATUS);
-       /* reset HDP */
-       r100_hdp_reset(rdev);
-       /* reset rb2d */
-       if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
-               r100_rb2d_reset(rdev);
-       }
-       /* reset GA */
-       if (status & ((1 << 20) | (1 << 26))) {
-               r300_ga_reset(rdev);
-       }
-       /* reset CP */
-       status = RREG32(RADEON_RBBM_STATUS);
-       if (status & (1 << 16)) {
-               r100_cp_reset(rdev);
+       struct r100_mc_save save;
+       u32 status, tmp;
+
+       r100_mc_stop(rdev, &save);
+       status = RREG32(R_000E40_RBBM_STATUS);
+       if (!G_000E40_GUI_ACTIVE(status)) {
+               return 0;
        }
+       status = RREG32(R_000E40_RBBM_STATUS);
+       dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+       /* stop CP */
+       WREG32(RADEON_CP_CSQ_CNTL, 0);
+       tmp = RREG32(RADEON_CP_RB_CNTL);
+       WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+       WREG32(RADEON_CP_RB_RPTR_WR, 0);
+       WREG32(RADEON_CP_RB_WPTR, 0);
+       WREG32(RADEON_CP_RB_CNTL, tmp);
+       /* save PCI state */
+       pci_save_state(rdev->pdev);
+       /* disable bus mastering */
+       r100_bm_disable(rdev);
+       WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
+                                       S_0000F0_SOFT_RESET_GA(1));
+       RREG32(R_0000F0_RBBM_SOFT_RESET);
+       mdelay(500);
+       WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+       mdelay(1);
+       status = RREG32(R_000E40_RBBM_STATUS);
+       dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+       /* resetting the CP seems to be problematic sometimes it end up
+        * hard locking the computer, but it's necessary for successfull
+        * reset more test & playing is needed on R3XX/R4XX to find a
+        * reliable (if any solution)
+        */
+       WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+       RREG32(R_0000F0_RBBM_SOFT_RESET);
+       mdelay(500);
+       WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+       mdelay(1);
+       status = RREG32(R_000E40_RBBM_STATUS);
+       dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+       /* restore PCI & busmastering */
+       pci_restore_state(rdev->pdev);
+       r100_enable_bm(rdev);
        /* Check if GPU is idle */
-       status = RREG32(RADEON_RBBM_STATUS);
-       if (status & RADEON_RBBM_ACTIVE) {
-               DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
+       if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
+               dev_err(rdev->dev, "failed to reset GPU\n");
+               rdev->gpu_lockup = true;
                return -1;
        }
-       DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
+       r100_mc_resume(rdev, &save);
+       dev_info(rdev->dev, "GPU reset succeed\n");
        return 0;
 }
 
-
 /*
  * r300,r350,rv350,rv380 VRAM info
  */
@@ -1316,7 +1316,7 @@ int r300_resume(struct radeon_device *rdev)
        /* Resume clock before doing reset */
        r300_clock_startup(rdev);
        /* Reset gpu before posting otherwise ATOM will enter infinite loop */
-       if (radeon_gpu_reset(rdev)) {
+       if (radeon_asic_reset(rdev)) {
                dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
                        RREG32(R_000E40_RBBM_STATUS),
                        RREG32(R_0007C0_CP_STAT));
@@ -1344,7 +1344,6 @@ int r300_suspend(struct radeon_device *rdev)
 
 void r300_fini(struct radeon_device *rdev)
 {
-       radeon_pm_fini(rdev);
        r100_cp_fini(rdev);
        r100_wb_fini(rdev);
        r100_ib_fini(rdev);
@@ -1387,7 +1386,7 @@ int r300_init(struct radeon_device *rdev)
                        return r;
        }
        /* Reset gpu before posting otherwise ATOM will enter infinite loop */
-       if (radeon_gpu_reset(rdev)) {
+       if (radeon_asic_reset(rdev)) {
                dev_warn(rdev->dev,
                        "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
                        RREG32(R_000E40_RBBM_STATUS),
@@ -1400,8 +1399,6 @@ int r300_init(struct radeon_device *rdev)
        r300_errata(rdev);
        /* Initialize clocks */
        radeon_get_clock_info(rdev->ddev);
-       /* Initialize power management */
-       radeon_pm_init(rdev);
        /* initialize AGP */
        if (rdev->flags & RADEON_IS_AGP) {
                r = radeon_agp_init(rdev);
index 4c73114..968a333 100644 (file)
 #define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
 #define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
 #define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
-
+#define R_0000F0_RBBM_SOFT_RESET                     0x0000F0
+#define   S_0000F0_SOFT_RESET_CP(x)                    (((x) & 0x1) << 0)
+#define   G_0000F0_SOFT_RESET_CP(x)                    (((x) >> 0) & 0x1)
+#define   C_0000F0_SOFT_RESET_CP                       0xFFFFFFFE
+#define   S_0000F0_SOFT_RESET_HI(x)                    (((x) & 0x1) << 1)
+#define   G_0000F0_SOFT_RESET_HI(x)                    (((x) >> 1) & 0x1)
+#define   C_0000F0_SOFT_RESET_HI                       0xFFFFFFFD
+#define   S_0000F0_SOFT_RESET_VAP(x)                   (((x) & 0x1) << 2)
+#define   G_0000F0_SOFT_RESET_VAP(x)                   (((x) >> 2) & 0x1)
+#define   C_0000F0_SOFT_RESET_VAP                      0xFFFFFFFB
+#define   S_0000F0_SOFT_RESET_RE(x)                    (((x) & 0x1) << 3)
+#define   G_0000F0_SOFT_RESET_RE(x)                    (((x) >> 3) & 0x1)
+#define   C_0000F0_SOFT_RESET_RE                       0xFFFFFFF7
+#define   S_0000F0_SOFT_RESET_PP(x)                    (((x) & 0x1) << 4)
+#define   G_0000F0_SOFT_RESET_PP(x)                    (((x) >> 4) & 0x1)
+#define   C_0000F0_SOFT_RESET_PP                       0xFFFFFFEF
+#define   S_0000F0_SOFT_RESET_E2(x)                    (((x) & 0x1) << 5)
+#define   G_0000F0_SOFT_RESET_E2(x)                    (((x) >> 5) & 0x1)
+#define   C_0000F0_SOFT_RESET_E2                       0xFFFFFFDF
+#define   S_0000F0_SOFT_RESET_RB(x)                    (((x) & 0x1) << 6)
+#define   G_0000F0_SOFT_RESET_RB(x)                    (((x) >> 6) & 0x1)
+#define   C_0000F0_SOFT_RESET_RB                       0xFFFFFFBF
+#define   S_0000F0_SOFT_RESET_HDP(x)                   (((x) & 0x1) << 7)
+#define   G_0000F0_SOFT_RESET_HDP(x)                   (((x) >> 7) & 0x1)
+#define   C_0000F0_SOFT_RESET_HDP                      0xFFFFFF7F
+#define   S_0000F0_SOFT_RESET_MC(x)                    (((x) & 0x1) << 8)
+#define   G_0000F0_SOFT_RESET_MC(x)                    (((x) >> 8) & 0x1)
+#define   C_0000F0_SOFT_RESET_MC                       0xFFFFFEFF
+#define   S_0000F0_SOFT_RESET_AIC(x)                   (((x) & 0x1) << 9)
+#define   G_0000F0_SOFT_RESET_AIC(x)                   (((x) >> 9) & 0x1)
+#define   C_0000F0_SOFT_RESET_AIC                      0xFFFFFDFF
+#define   S_0000F0_SOFT_RESET_VIP(x)                   (((x) & 0x1) << 10)
+#define   G_0000F0_SOFT_RESET_VIP(x)                   (((x) >> 10) & 0x1)
+#define   C_0000F0_SOFT_RESET_VIP                      0xFFFFFBFF
+#define   S_0000F0_SOFT_RESET_DISP(x)                  (((x) & 0x1) << 11)
+#define   G_0000F0_SOFT_RESET_DISP(x)                  (((x) >> 11) & 0x1)
+#define   C_0000F0_SOFT_RESET_DISP                     0xFFFFF7FF
+#define   S_0000F0_SOFT_RESET_CG(x)                    (((x) & 0x1) << 12)
+#define   G_0000F0_SOFT_RESET_CG(x)                    (((x) >> 12) & 0x1)
+#define   C_0000F0_SOFT_RESET_CG                       0xFFFFEFFF
+#define   S_0000F0_SOFT_RESET_GA(x)                    (((x) & 0x1) << 13)
+#define   G_0000F0_SOFT_RESET_GA(x)                    (((x) >> 13) & 0x1)
+#define   C_0000F0_SOFT_RESET_GA                       0xFFFFDFFF
+#define   S_0000F0_SOFT_RESET_IDCT(x)                  (((x) & 0x1) << 14)
+#define   G_0000F0_SOFT_RESET_IDCT(x)                  (((x) >> 14) & 0x1)
+#define   C_0000F0_SOFT_RESET_IDCT                     0xFFFFBFFF
 
 #define R_00000D_SCLK_CNTL                           0x00000D
 #define   S_00000D_SCLK_SRC_SEL(x)                     (((x) & 0x7) << 0)
index c2bda4a..4415a5e 100644 (file)
 #include "r420d.h"
 #include "r420_reg_safe.h"
 
+void r420_pm_init_profile(struct radeon_device *rdev)
+{
+       /* default */
+       rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+       rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+       rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+       rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+       /* low sh */
+       rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+       rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+       rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+       rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+       /* high sh */
+       rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+       rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+       rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+       rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+       /* low mh */
+       rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+       rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+       rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+       rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+       /* high mh */
+       rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+       rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+       rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+       rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+}
+
 static void r420_set_reg_safe(struct radeon_device *rdev)
 {
        rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
@@ -241,7 +270,7 @@ int r420_resume(struct radeon_device *rdev)
        /* Resume clock before doing reset */
        r420_clock_resume(rdev);
        /* Reset gpu before posting otherwise ATOM will enter infinite loop */
-       if (radeon_gpu_reset(rdev)) {
+       if (radeon_asic_reset(rdev)) {
                dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
                        RREG32(R_000E40_RBBM_STATUS),
                        RREG32(R_0007C0_CP_STAT));
@@ -274,7 +303,6 @@ int r420_suspend(struct radeon_device *rdev)
 
 void r420_fini(struct radeon_device *rdev)
 {
-       radeon_pm_fini(rdev);
        r100_cp_fini(rdev);
        r100_wb_fini(rdev);
        r100_ib_fini(rdev);
@@ -322,7 +350,7 @@ int r420_init(struct radeon_device *rdev)
                }
        }
        /* Reset gpu before posting otherwise ATOM will enter infinite loop */
-       if (radeon_gpu_reset(rdev)) {
+       if (radeon_asic_reset(rdev)) {
                dev_warn(rdev->dev,
                        "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
                        RREG32(R_000E40_RBBM_STATUS),
@@ -334,8 +362,6 @@ int r420_init(struct radeon_device *rdev)
 
        /* Initialize clocks */
        radeon_get_clock_info(rdev->ddev);
-       /* Initialize power management */
-       radeon_pm_init(rdev);
        /* initialize AGP */
        if (rdev->flags & RADEON_IS_AGP) {
                r = radeon_agp_init(rdev);
index 0cf2ad2..93c9a2b 100644 (file)
 
 #define AVIVO_D1CRTC_CONTROL                                    0x6080
 #       define AVIVO_CRTC_EN                                    (1 << 0)
+#       define AVIVO_CRTC_DISP_READ_REQUEST_DISABLE             (1 << 24)
 #define AVIVO_D1CRTC_BLANK_CONTROL                              0x6084
 #define AVIVO_D1CRTC_INTERLACE_CONTROL                          0x6088
 #define AVIVO_D1CRTC_INTERLACE_STATUS                           0x608c
+#define AVIVO_D1CRTC_STATUS_POSITION                            0x60a0
 #define AVIVO_D1CRTC_FRAME_COUNT                                0x60a4
 #define AVIVO_D1CRTC_STEREO_CONTROL                             0x60c4
 
 #define AVIVO_D2CRTC_BLANK_CONTROL                              0x6884
 #define AVIVO_D2CRTC_INTERLACE_CONTROL                          0x6888
 #define AVIVO_D2CRTC_INTERLACE_STATUS                           0x688c
+#define AVIVO_D2CRTC_STATUS_POSITION                            0x68a0
 #define AVIVO_D2CRTC_FRAME_COUNT                                0x68a4
 #define AVIVO_D2CRTC_STEREO_CONTROL                             0x68c4
 
index 3c44b8d..34330df 100644 (file)
@@ -53,7 +53,6 @@ static void r520_gpu_init(struct radeon_device *rdev)
 {
        unsigned pipe_select_current, gb_pipe_select, tmp;
 
-       r100_hdp_reset(rdev);
        rv515_vga_render_disable(rdev);
        /*
         * DST_PIPE_CONFIG              0x170C
@@ -209,7 +208,7 @@ int r520_resume(struct radeon_device *rdev)
        /* Resume clock before doing reset */
        rv515_clock_startup(rdev);
        /* Reset gpu before posting otherwise ATOM will enter infinite loop */
-       if (radeon_gpu_reset(rdev)) {
+       if (radeon_asic_reset(rdev)) {
                dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
                        RREG32(R_000E40_RBBM_STATUS),
                        RREG32(R_0007C0_CP_STAT));
@@ -246,7 +245,7 @@ int r520_init(struct radeon_device *rdev)
                return -EINVAL;
        }
        /* Reset gpu before posting otherwise ATOM will enter infinite loop */
-       if (radeon_gpu_reset(rdev)) {
+       if (radeon_asic_reset(rdev)) {
                dev_warn(rdev->dev,
                        "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
                        RREG32(R_000E40_RBBM_STATUS),
@@ -262,8 +261,6 @@ int r520_init(struct radeon_device *rdev)
        }
        /* Initialize clocks */
        radeon_get_clock_info(rdev->ddev);
-       /* Initialize power management */
-       radeon_pm_init(rdev);
        /* initialize AGP */
        if (rdev->flags & RADEON_IS_AGP) {
                r = radeon_agp_init(rdev);
index 8f3454e..44e96a2 100644 (file)
@@ -44,6 +44,9 @@
 #define R700_PFP_UCODE_SIZE 848
 #define R700_PM4_UCODE_SIZE 1360
 #define R700_RLC_UCODE_SIZE 1024
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+#define EVERGREEN_RLC_UCODE_SIZE 768
 
 /* Firmware Names */
 MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -68,6 +71,18 @@ MODULE_FIRMWARE("radeon/RV710_pfp.bin");
 MODULE_FIRMWARE("radeon/RV710_me.bin");
 MODULE_FIRMWARE("radeon/R600_rlc.bin");
 MODULE_FIRMWARE("radeon/R700_rlc.bin");
+MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
+MODULE_FIRMWARE("radeon/CEDAR_me.bin");
+MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
 
 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
 
@@ -75,6 +90,401 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev);
 int r600_mc_wait_for_idle(struct radeon_device *rdev);
 void r600_gpu_init(struct radeon_device *rdev);
 void r600_fini(struct radeon_device *rdev);
+void r600_irq_disable(struct radeon_device *rdev);
+
+void r600_pm_get_dynpm_state(struct radeon_device *rdev)
+{
+       int i;
+
+       rdev->pm.dynpm_can_upclock = true;
+       rdev->pm.dynpm_can_downclock = true;
+
+       /* power state array is low to high, default is first */
+       if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
+               int min_power_state_index = 0;
+
+               if (rdev->pm.num_power_states > 2)
+                       min_power_state_index = 1;
+
+               switch (rdev->pm.dynpm_planned_action) {
+               case DYNPM_ACTION_MINIMUM:
+                       rdev->pm.requested_power_state_index = min_power_state_index;
+                       rdev->pm.requested_clock_mode_index = 0;
+                       rdev->pm.dynpm_can_downclock = false;
+                       break;
+               case DYNPM_ACTION_DOWNCLOCK:
+                       if (rdev->pm.current_power_state_index == min_power_state_index) {
+                               rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+                               rdev->pm.dynpm_can_downclock = false;
+                       } else {
+                               if (rdev->pm.active_crtc_count > 1) {
+                                       for (i = 0; i < rdev->pm.num_power_states; i++) {
+                                               if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+                                                       continue;
+                                               else if (i >= rdev->pm.current_power_state_index) {
+                                                       rdev->pm.requested_power_state_index =
+                                                               rdev->pm.current_power_state_index;
+                                                       break;
+                                               } else {
+                                                       rdev->pm.requested_power_state_index = i;
+                                                       break;
+                                               }
+                                       }
+                               } else
+                                       rdev->pm.requested_power_state_index =
+                                               rdev->pm.current_power_state_index - 1;
+                       }
+                       rdev->pm.requested_clock_mode_index = 0;
+                       /* don't use the power state if crtcs are active and no display flag is set */
+                       if ((rdev->pm.active_crtc_count > 0) &&
+                           (rdev->pm.power_state[rdev->pm.requested_power_state_index].
+                            clock_info[rdev->pm.requested_clock_mode_index].flags &
+                            RADEON_PM_MODE_NO_DISPLAY)) {
+                               rdev->pm.requested_power_state_index++;
+                       }
+                       break;
+               case DYNPM_ACTION_UPCLOCK:
+                       if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
+                               rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+                               rdev->pm.dynpm_can_upclock = false;
+                       } else {
+                               if (rdev->pm.active_crtc_count > 1) {
+                                       for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
+                                               if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+                                                       continue;
+                                               else if (i <= rdev->pm.current_power_state_index) {
+                                                       rdev->pm.requested_power_state_index =
+                                                               rdev->pm.current_power_state_index;
+                                                       break;
+                                               } else {
+                                                       rdev->pm.requested_power_state_index = i;
+                                                       break;
+                                               }
+                                       }
+                               } else
+                                       rdev->pm.requested_power_state_index =
+                                               rdev->pm.current_power_state_index + 1;
+                       }
+                       rdev->pm.requested_clock_mode_index = 0;
+                       break;
+               case DYNPM_ACTION_DEFAULT:
+                       rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+                       rdev->pm.requested_clock_mode_index = 0;
+                       rdev->pm.dynpm_can_upclock = false;
+                       break;
+               case DYNPM_ACTION_NONE:
+               default:
+                       DRM_ERROR("Requested mode for not defined action\n");
+                       return;
+               }
+       } else {
+               /* XXX select a power state based on AC/DC, single/dualhead, etc. */
+               /* for now just select the first power state and switch between clock modes */
+               /* power state array is low to high, default is first (0) */
+               if (rdev->pm.active_crtc_count > 1) {
+                       rdev->pm.requested_power_state_index = -1;
+                       /* start at 1 as we don't want the default mode */
+                       for (i = 1; i < rdev->pm.num_power_states; i++) {
+                               if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+                                       continue;
+                               else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
+                                        (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
+                                       rdev->pm.requested_power_state_index = i;
+                                       break;
+                               }
+                       }
+                       /* if nothing selected, grab the default state. */
+                       if (rdev->pm.requested_power_state_index == -1)
+                               rdev->pm.requested_power_state_index = 0;
+               } else
+                       rdev->pm.requested_power_state_index = 1;
+
+               switch (rdev->pm.dynpm_planned_action) {
+               case DYNPM_ACTION_MINIMUM:
+                       rdev->pm.requested_clock_mode_index = 0;
+                       rdev->pm.dynpm_can_downclock = false;
+                       break;
+               case DYNPM_ACTION_DOWNCLOCK:
+                       if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
+                               if (rdev->pm.current_clock_mode_index == 0) {
+                                       rdev->pm.requested_clock_mode_index = 0;
+                                       rdev->pm.dynpm_can_downclock = false;
+                               } else
+                                       rdev->pm.requested_clock_mode_index =
+                                               rdev->pm.current_clock_mode_index - 1;
+                       } else {
+                               rdev->pm.requested_clock_mode_index = 0;
+                               rdev->pm.dynpm_can_downclock = false;
+                       }
+                       /* don't use the power state if crtcs are active and no display flag is set */
+                       if ((rdev->pm.active_crtc_count > 0) &&
+                           (rdev->pm.power_state[rdev->pm.requested_power_state_index].
+                            clock_info[rdev->pm.requested_clock_mode_index].flags &
+                            RADEON_PM_MODE_NO_DISPLAY)) {
+                               rdev->pm.requested_clock_mode_index++;
+                       }
+                       break;
+               case DYNPM_ACTION_UPCLOCK:
+                       if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
+                               if (rdev->pm.current_clock_mode_index ==
+                                   (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
+                                       rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
+                                       rdev->pm.dynpm_can_upclock = false;
+                               } else
+                                       rdev->pm.requested_clock_mode_index =
+                                               rdev->pm.current_clock_mode_index + 1;
+                       } else {
+                               rdev->pm.requested_clock_mode_index =
+                                       rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
+                               rdev->pm.dynpm_can_upclock = false;
+                       }
+                       break;
+               case DYNPM_ACTION_DEFAULT:
+                       rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+                       rdev->pm.requested_clock_mode_index = 0;
+                       rdev->pm.dynpm_can_upclock = false;
+                       break;
+               case DYNPM_ACTION_NONE:
+               default:
+                       DRM_ERROR("Requested mode for not defined action\n");
+                       return;
+               }
+       }
+
+       DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
+                 rdev->pm.power_state[rdev->pm.requested_power_state_index].
+                 clock_info[rdev->pm.requested_clock_mode_index].sclk,
+                 rdev->pm.power_state[rdev->pm.requested_power_state_index].
+                 clock_info[rdev->pm.requested_clock_mode_index].mclk,
+                 rdev->pm.power_state[rdev->pm.requested_power_state_index].
+                 pcie_lanes);
+}
+
+static int r600_pm_get_type_index(struct radeon_device *rdev,
+                                 enum radeon_pm_state_type ps_type,
+                                 int instance)
+{
+       int i;
+       int found_instance = -1;
+
+       for (i = 0; i < rdev->pm.num_power_states; i++) {
+               if (rdev->pm.power_state[i].type == ps_type) {
+                       found_instance++;
+                       if (found_instance == instance)
+                               return i;
+               }
+       }
+       /* return default if no match */
+       return rdev->pm.default_power_state_index;
+}
+
+void rs780_pm_init_profile(struct radeon_device *rdev)
+{
+       if (rdev->pm.num_power_states == 2) {
+               /* default */
+               rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+               rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+               rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+               /* low sh */
+               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+               /* high sh */
+               rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
+               rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+               /* low mh */
+               rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+               /* high mh */
+               rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
+               rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+       } else if (rdev->pm.num_power_states == 3) {
+               /* default */
+               rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+               rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+               rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+               /* low sh */
+               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
+               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+               /* high sh */
+               rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
+               rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
+               rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+               /* low mh */
+               rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
+               rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
+               rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+               /* high mh */
+               rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
+               rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
+               rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+       } else {
+               /* default */
+               rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+               rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+               rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+               /* low sh */
+               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
+               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
+               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+               /* high sh */
+               rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
+               rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
+               rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+               /* low mh */
+               rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
+               rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+               /* high mh */
+               rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
+               rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
+               rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+       }
+}
+
+void r600_pm_init_profile(struct radeon_device *rdev)
+{
+       if (rdev->family == CHIP_R600) {
+               /* XXX */
+               /* default */
+               rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+               rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+               rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+               /* low sh */
+               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+               /* high sh */
+               rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+               rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+               rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+               /* low mh */
+               rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+               rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+               rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+               /* high mh */
+               rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+               rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+               rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+               rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+       } else {
+               if (rdev->pm.num_power_states < 4) {
+                       /* default */
+                       rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+                       rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+                       rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+                       rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+                       /* low sh */
+                       rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
+                       rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+                       rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+                       rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
+                       /* high sh */
+                       rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
+                       rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
+                       rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+                       rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+                       /* low mh */
+                       rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
+                       rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
+                       rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+                       rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
+                       /* high mh */
+                       rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
+                       rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
+                       rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+                       rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+               } else {
+                       /* default */
+                       rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+                       rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+                       rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+                       rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+                       /* low sh */
+                       if (rdev->flags & RADEON_IS_MOBILITY) {
+                               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
+                                       r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+                               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
+                                       r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+                               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+                               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
+                       } else {
+                               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
+                                       r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+                               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
+                                       r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+                               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+                               rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
+                       }
+                       /* high sh */
+                       rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
+                               r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+                       rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx =
+                               r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+                       rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+                       rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+                       /* low mh */
+                       if (rdev->flags & RADEON_IS_MOBILITY) {
+                               rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
+                                       r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+                               rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
+                                       r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+                               rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+                               rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 2;
+                       } else {
+                               rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
+                                       r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+                               rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
+                                       r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+                               rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+                               rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
+                       }
+                       /* high mh */
+                       rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
+                               r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+                       rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx =
+                               r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+                       rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+                       rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+               }
+       }
+}
+
+void r600_pm_misc(struct radeon_device *rdev)
+{
+
+}
+
+bool r600_gui_idle(struct radeon_device *rdev)
+{
+       if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
+               return false;
+       else
+               return true;
+}
 
 /* hpd for digital panel detect/disconnect */
 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
@@ -714,11 +1124,6 @@ int r600_mc_init(struct radeon_device *rdev)
        rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
-       /* FIXME remove this once we support unmappable VRAM */
-       if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
-               rdev->mc.mc_vram_size = rdev->mc.aper_size;
-               rdev->mc.real_vram_size = rdev->mc.aper_size;
-       }
        r600_vram_gtt_location(rdev, &rdev->mc);
 
        if (rdev->flags & RADEON_IS_IGP)
@@ -750,7 +1155,6 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
                        S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
                        S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
                        S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
-       u32 srbm_reset = 0;
        u32 tmp;
 
        dev_info(rdev->dev, "GPU softreset \n");
@@ -765,7 +1169,7 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
                dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
        }
        /* Disable CP parsing/prefetching */
-       WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
+       WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
        /* Check if any of the rendering block is busy and reset it */
        if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
            (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
@@ -784,72 +1188,56 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
                        S_008020_SOFT_RESET_VGT(1);
                dev_info(rdev->dev, "  R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
                WREG32(R_008020_GRBM_SOFT_RESET, tmp);
-               (void)RREG32(R_008020_GRBM_SOFT_RESET);
-               udelay(50);
+               RREG32(R_008020_GRBM_SOFT_RESET);
+               mdelay(15);
                WREG32(R_008020_GRBM_SOFT_RESET, 0);
-               (void)RREG32(R_008020_GRBM_SOFT_RESET);
        }
        /* Reset CP (we always reset CP) */
        tmp = S_008020_SOFT_RESET_CP(1);
        dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
        WREG32(R_008020_GRBM_SOFT_RESET, tmp);
-       (void)RREG32(R_008020_GRBM_SOFT_RESET);
-       udelay(50);
+       RREG32(R_008020_GRBM_SOFT_RESET);
+       mdelay(15);
        WREG32(R_008020_GRBM_SOFT_RESET, 0);
-       (void)RREG32(R_008020_GRBM_SOFT_RESET);
-       /* Reset others GPU block if necessary */
-       if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-               srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
-       if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
-               srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
-       if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
-               srbm_reset |= S_000E60_SOFT_RESET_IH(1);
-       if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-               srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
-       if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-               srbm_reset |= S_000E60_SOFT_RESET_MC(1);
-       if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-               srbm_reset |= S_000E60_SOFT_RESET_MC(1);
-       if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-               srbm_reset |= S_000E60_SOFT_RESET_MC(1);
-       if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-               srbm_reset |= S_000E60_SOFT_RESET_MC(1);
-       if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-               srbm_reset |= S_000E60_SOFT_RESET_MC(1);
-       if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-               srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
-       if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-               srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
-       if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-               srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
-       dev_info(rdev->dev, "  R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
-       WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
-       (void)RREG32(R_000E60_SRBM_SOFT_RESET);
-       udelay(50);
-       WREG32(R_000E60_SRBM_SOFT_RESET, 0);
-       (void)RREG32(R_000E60_SRBM_SOFT_RESET);
-       WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
-       (void)RREG32(R_000E60_SRBM_SOFT_RESET);
-       udelay(50);
-       WREG32(R_000E60_SRBM_SOFT_RESET, 0);
-       (void)RREG32(R_000E60_SRBM_SOFT_RESET);
        /* Wait a little for things to settle down */
-       udelay(50);
+       mdelay(1);
        dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
                RREG32(R_008010_GRBM_STATUS));
        dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
                RREG32(R_008014_GRBM_STATUS2));
        dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
                RREG32(R_000E50_SRBM_STATUS));
-       /* After reset we need to reinit the asic as GPU often endup in an
-        * incoherent state.
-        */
-       atom_asic_init(rdev->mode_info.atom_context);
        rv515_mc_resume(rdev, &save);
        return 0;
 }
 
-int r600_gpu_reset(struct radeon_device *rdev)
+bool r600_gpu_is_lockup(struct radeon_device *rdev)
+{
+       u32 srbm_status;
+       u32 grbm_status;
+       u32 grbm_status2;
+       int r;
+
+       srbm_status = RREG32(R_000E50_SRBM_STATUS);
+       grbm_status = RREG32(R_008010_GRBM_STATUS);
+       grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
+       if (!G_008010_GUI_ACTIVE(grbm_status)) {
+               r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
+               return false;
+       }
+       /* force CP activities */
+       r = radeon_ring_lock(rdev, 2);
+       if (!r) {
+               /* PACKET2 NOP */
+               radeon_ring_write(rdev, 0x80000000);
+               radeon_ring_write(rdev, 0x80000000);
+               radeon_ring_unlock_commit(rdev);
+       }
+       rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
+       return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
+}
+
+int r600_asic_reset(struct radeon_device *rdev)
 {
        return r600_gpu_soft_reset(rdev);
 }
@@ -1467,10 +1855,31 @@ int r600_init_microcode(struct radeon_device *rdev)
                chip_name = "RV710";
                rlc_chip_name = "R700";
                break;
+       case CHIP_CEDAR:
+               chip_name = "CEDAR";
+               rlc_chip_name = "CEDAR";
+               break;
+       case CHIP_REDWOOD:
+               chip_name = "REDWOOD";
+               rlc_chip_name = "REDWOOD";
+               break;
+       case CHIP_JUNIPER:
+               chip_name = "JUNIPER";
+               rlc_chip_name = "JUNIPER";
+               break;
+       case CHIP_CYPRESS:
+       case CHIP_HEMLOCK:
+               chip_name = "CYPRESS";
+               rlc_chip_name = "CYPRESS";
+               break;
        default: BUG();
        }
 
-       if (rdev->family >= CHIP_RV770) {
+       if (rdev->family >= CHIP_CEDAR) {
+               pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+               me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+               rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+       } else if (rdev->family >= CHIP_RV770) {
                pfp_req_size = R700_PFP_UCODE_SIZE * 4;
                me_req_size = R700_PM4_UCODE_SIZE * 4;
                rlc_req_size = R700_RLC_UCODE_SIZE * 4;
@@ -1584,12 +1993,15 @@ int r600_cp_start(struct radeon_device *rdev)
        }
        radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
        radeon_ring_write(rdev, 0x1);
-       if (rdev->family < CHIP_RV770) {
-               radeon_ring_write(rdev, 0x3);
-               radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
-       } else {
+       if (rdev->family >= CHIP_CEDAR) {
+               radeon_ring_write(rdev, 0x0);
+               radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
+       } else if (rdev->family >= CHIP_RV770) {
                radeon_ring_write(rdev, 0x0);
                radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
+       } else {
+               radeon_ring_write(rdev, 0x3);
+               radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
        }
        radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
        radeon_ring_write(rdev, 0);
@@ -2051,8 +2463,6 @@ int r600_init(struct radeon_device *rdev)
        r = radeon_clocks_init(rdev);
        if (r)
                return r;
-       /* Initialize power management */
-       radeon_pm_init(rdev);
        /* Fence driver */
        r = radeon_fence_driver_init(rdev);
        if (r)
@@ -2117,7 +2527,6 @@ int r600_init(struct radeon_device *rdev)
 
 void r600_fini(struct radeon_device *rdev)
 {
-       radeon_pm_fini(rdev);
        r600_audio_fini(rdev);
        r600_blit_fini(rdev);
        r600_cp_fini(rdev);
@@ -2290,10 +2699,11 @@ static void r600_ih_ring_fini(struct radeon_device *rdev)
        }
 }
 
-static void r600_rlc_stop(struct radeon_device *rdev)
+void r600_rlc_stop(struct radeon_device *rdev)
 {
 
-       if (rdev->family >= CHIP_RV770) {
+       if ((rdev->family >= CHIP_RV770) &&
+           (rdev->family <= CHIP_RV740)) {
                /* r7xx asics need to soft reset RLC before halting */
                WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
                RREG32(SRBM_SOFT_RESET);
@@ -2330,7 +2740,12 @@ static int r600_rlc_init(struct radeon_device *rdev)
        WREG32(RLC_UCODE_CNTL, 0);
 
        fw_data = (const __be32 *)rdev->rlc_fw->data;
-       if (rdev->family >= CHIP_RV770) {
+       if (rdev->family >= CHIP_CEDAR) {
+               for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
+                       WREG32(RLC_UCODE_ADDR, i);
+                       WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+               }
+       } else if (rdev->family >= CHIP_RV770) {
                for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
                        WREG32(RLC_UCODE_ADDR, i);
                        WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
@@ -2360,7 +2775,7 @@ static void r600_enable_interrupts(struct radeon_device *rdev)
        rdev->ih.enabled = true;
 }
 
-static void r600_disable_interrupts(struct radeon_device *rdev)
+void r600_disable_interrupts(struct radeon_device *rdev)
 {
        u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
        u32 ih_cntl = RREG32(IH_CNTL);
@@ -2475,7 +2890,10 @@ int r600_irq_init(struct radeon_device *rdev)
        WREG32(IH_CNTL, ih_cntl);
 
        /* force the active interrupt state to all disabled */
-       r600_disable_interrupt_state(rdev);
+       if (rdev->family >= CHIP_CEDAR)
+               evergreen_disable_interrupt_state(rdev);
+       else
+               r600_disable_interrupt_state(rdev);
 
        /* enable irqs */
        r600_enable_interrupts(rdev);
@@ -2485,7 +2903,7 @@ int r600_irq_init(struct radeon_device *rdev)
 
 void r600_irq_suspend(struct radeon_device *rdev)
 {
-       r600_disable_interrupts(rdev);
+       r600_irq_disable(rdev);
        r600_rlc_stop(rdev);
 }
 
@@ -2500,6 +2918,8 @@ int r600_irq_set(struct radeon_device *rdev)
        u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
        u32 mode_int = 0;
        u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
+       u32 grbm_int_cntl = 0;
+       u32 hdmi1, hdmi2;
 
        if (!rdev->irq.installed) {
                WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
@@ -2513,7 +2933,9 @@ int r600_irq_set(struct radeon_device *rdev)
                return 0;
        }
 
+       hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
        if (ASIC_IS_DCE3(rdev)) {
+               hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
                hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
                hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
                hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2523,6 +2945,7 @@ int r600_irq_set(struct radeon_device *rdev)
                        hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
                }
        } else {
+               hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
                hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
                hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
                hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2564,10 +2987,25 @@ int r600_irq_set(struct radeon_device *rdev)
                DRM_DEBUG("r600_irq_set: hpd 6\n");
                hpd6 |= DC_HPDx_INT_EN;
        }
+       if (rdev->irq.hdmi[0]) {
+               DRM_DEBUG("r600_irq_set: hdmi 1\n");
+               hdmi1 |= R600_HDMI_INT_EN;
+       }
+       if (rdev->irq.hdmi[1]) {
+               DRM_DEBUG("r600_irq_set: hdmi 2\n");
+               hdmi2 |= R600_HDMI_INT_EN;
+       }
+       if (rdev->irq.gui_idle) {
+               DRM_DEBUG("gui idle\n");
+               grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
+       }
 
        WREG32(CP_INT_CNTL, cp_int_cntl);
        WREG32(DxMODE_INT_MASK, mode_int);
+       WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+       WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
        if (ASIC_IS_DCE3(rdev)) {
+               WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
                WREG32(DC_HPD1_INT_CONTROL, hpd1);
                WREG32(DC_HPD2_INT_CONTROL, hpd2);
                WREG32(DC_HPD3_INT_CONTROL, hpd3);
@@ -2577,6 +3015,7 @@ int r600_irq_set(struct radeon_device *rdev)
                        WREG32(DC_HPD6_INT_CONTROL, hpd6);
                }
        } else {
+               WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
                WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
                WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
                WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
@@ -2660,6 +3099,18 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
                        WREG32(DC_HPD6_INT_CONTROL, tmp);
                }
        }
+       if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
+               WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+       }
+       if (ASIC_IS_DCE3(rdev)) {
+               if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
+                       WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+               }
+       } else {
+               if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
+                       WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+               }
+       }
 }
 
 void r600_irq_disable(struct radeon_device *rdev)
@@ -2713,6 +3164,8 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
  *     19         1  FP Hot plug detection B
  *     19         2  DAC A auto-detection
  *     19         3  DAC B auto-detection
+ *     21         4  HDMI block A
+ *     21         5  HDMI block B
  *    176         -  CP_INT RB
  *    177         -  CP_INT IB1
  *    178         -  CP_INT IB2
@@ -2852,6 +3305,10 @@ restart_ih:
                                break;
                        }
                        break;
+               case 21: /* HDMI */
+                       DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
+                       r600_audio_schedule_polling(rdev);
+                       break;
                case 176: /* CP_INT in ring buffer */
                case 177: /* CP_INT in IB1 */
                case 178: /* CP_INT in IB2 */
@@ -2861,6 +3318,11 @@ restart_ih:
                case 181: /* CP EOP event */
                        DRM_DEBUG("IH: CP EOP\n");
                        break;
+               case 233: /* GUI IDLE */
+                       DRM_DEBUG("IH: CP EOP\n");
+                       rdev->pm.gui_idle = true;
+                       wake_up(&rdev->irq.idle_queue);
+                       break;
                default:
                        DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
                        break;
index 1d89805..2b26553 100644 (file)
@@ -44,7 +44,7 @@ static int r600_audio_chipset_supported(struct radeon_device *rdev)
 /*
  * current number of channels
  */
-static int r600_audio_channels(struct radeon_device *rdev)
+int r600_audio_channels(struct radeon_device *rdev)
 {
        return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1;
 }
@@ -52,7 +52,7 @@ static int r600_audio_channels(struct radeon_device *rdev)
 /*
  * current bits per sample
  */
-static int r600_audio_bits_per_sample(struct radeon_device *rdev)
+int r600_audio_bits_per_sample(struct radeon_device *rdev)
 {
        uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4;
        switch (value) {
@@ -71,7 +71,7 @@ static int r600_audio_bits_per_sample(struct radeon_device *rdev)
 /*
  * current sampling rate in HZ
  */
-static int r600_audio_rate(struct radeon_device *rdev)
+int r600_audio_rate(struct radeon_device *rdev)
 {
        uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
        uint32_t result;
@@ -90,7 +90,7 @@ static int r600_audio_rate(struct radeon_device *rdev)
 /*
  * iec 60958 status bits
  */
-static uint8_t r600_audio_status_bits(struct radeon_device *rdev)
+uint8_t r600_audio_status_bits(struct radeon_device *rdev)
 {
        return RREG32(R600_AUDIO_STATUS_BITS) & 0xff;
 }
@@ -98,11 +98,20 @@ static uint8_t r600_audio_status_bits(struct radeon_device *rdev)
 /*
  * iec 60958 category code
  */
-static uint8_t r600_audio_category_code(struct radeon_device *rdev)
+uint8_t r600_audio_category_code(struct radeon_device *rdev)
 {
        return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff;
 }
 
+/*
+ * schedule next audio update event
+ */
+void r600_audio_schedule_polling(struct radeon_device *rdev)
+{
+       mod_timer(&rdev->audio_timer,
+               jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
+}
+
 /*
  * update all hdmi interfaces with current audio parameters
  */
@@ -118,7 +127,7 @@ static void r600_audio_update_hdmi(unsigned long param)
        uint8_t category_code = r600_audio_category_code(rdev);
 
        struct drm_encoder *encoder;
-       int changes = 0;
+       int changes = 0, still_going = 0;
 
        changes |= channels != rdev->audio_channels;
        changes |= rate != rdev->audio_rate;
@@ -135,15 +144,13 @@ static void r600_audio_update_hdmi(unsigned long param)
        }
 
        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+               still_going |= radeon_encoder->audio_polling_active;
                if (changes || r600_hdmi_buffer_status_changed(encoder))
-                       r600_hdmi_update_audio_settings(
-                               encoder, channels,
-                               rate, bps, status_bits,
-                               category_code);
+                       r600_hdmi_update_audio_settings(encoder);
        }
 
-       mod_timer(&rdev->audio_timer,
-               jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
+       if(still_going) r600_audio_schedule_polling(rdev);
 }
 
 /*
@@ -176,9 +183,34 @@ int r600_audio_init(struct radeon_device *rdev)
                r600_audio_update_hdmi,
                (unsigned long)rdev);
 
+       return 0;
+}
+
+/*
+ * enable the polling timer, to check for status changes
+ */
+void r600_audio_enable_polling(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+       DRM_DEBUG("r600_audio_enable_polling: %d", radeon_encoder->audio_polling_active);
+       if (radeon_encoder->audio_polling_active)
+               return;
+
+       radeon_encoder->audio_polling_active = 1;
        mod_timer(&rdev->audio_timer, jiffies + 1);
+}
 
-       return 0;
+/*
+ * disable the polling timer, so we get no more status updates
+ */
+void r600_audio_disable_polling(struct drm_encoder *encoder)
+{
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       DRM_DEBUG("r600_audio_disable_polling: %d", radeon_encoder->audio_polling_active);
+       radeon_encoder->audio_polling_active = 0;
 }
 
 /*
index f6c6c77..d13622a 100644 (file)
@@ -447,6 +447,9 @@ int r600_blit_init(struct radeon_device *rdev)
        u32 packet2s[16];
        int num_packet2s = 0;
 
+       /* don't reinitialize blit */
+       if (rdev->r600_blit.shader_obj)
+               return 0;
        mutex_init(&rdev->r600_blit.mutex);
        rdev->r600_blit.state_offset = 0;
 
index 2616b82..26b4bc9 100644 (file)
@@ -290,17 +290,15 @@ void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
        if (!offset)
                return;
 
-       if (r600_hdmi_is_audio_buffer_filled(encoder)) {
-               /* disable audio workaround and start delivering of audio frames */
-               WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
+       if (!radeon_encoder->hdmi_audio_workaround ||
+               r600_hdmi_is_audio_buffer_filled(encoder)) {
 
-       } else if (radeon_encoder->hdmi_audio_workaround) {
-               /* enable audio workaround and start delivering of audio frames */
-               WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
+               /* disable audio workaround */
+               WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
 
        } else {
-               /* disable audio workaround and stop delivering of audio frames */
-               WREG32_P(offset+R600_HDMI_CNTL, 0x00000000, ~0x00001001);
+               /* enable audio workaround */
+               WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
        }
 }
 
@@ -345,25 +343,23 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
 
        /* audio packets per line, does anyone know how to calc this ? */
        WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000);
-
-       /* update? reset? don't realy know */
-       WREG32_P(offset+R600_HDMI_CNTL, 0x14000000, ~0x14000000);
 }
 
 /*
  * update settings with current parameters from audio engine
  */
-void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
-                                    int channels,
-                                    int rate,
-                                    int bps,
-                                    uint8_t status_bits,
-                                    uint8_t category_code)
+void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
 {
        struct drm_device *dev = encoder->dev;
        struct radeon_device *rdev = dev->dev_private;
        uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
 
+       int channels = r600_audio_channels(rdev);
+       int rate = r600_audio_rate(rdev);
+       int bps = r600_audio_bits_per_sample(rdev);
+       uint8_t status_bits = r600_audio_status_bits(rdev);
+       uint8_t category_code = r600_audio_category_code(rdev);
+
        uint32_t iec;
 
        if (!offset)
@@ -415,9 +411,6 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
        r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0);
 
        r600_hdmi_audio_workaround(encoder);
-
-       /* update? reset? don't realy know */
-       WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000);
 }
 
 static int r600_hdmi_find_free_block(struct drm_device *dev)
@@ -486,6 +479,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
        struct drm_device *dev = encoder->dev;
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       uint32_t offset;
 
        if (ASIC_IS_DCE4(rdev))
                return;
@@ -499,10 +493,10 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
                }
        }
 
+       offset = radeon_encoder->hdmi_offset;
        if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
                WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
        } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
-               int offset = radeon_encoder->hdmi_offset;
                switch (radeon_encoder->encoder_id) {
                case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
                        WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
@@ -518,6 +512,21 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
                }
        }
 
+       if (rdev->irq.installed
+           && rdev->family != CHIP_RS600
+           && rdev->family != CHIP_RS690
+           && rdev->family != CHIP_RS740) {
+
+               /* if irq is available use it */
+               rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true;
+               radeon_irq_set(rdev);
+
+               r600_audio_disable_polling(encoder);
+       } else {
+               /* if not fallback to polling */
+               r600_audio_enable_polling(encoder);
+       }
+
        DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
                radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
 }
@@ -530,22 +539,30 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
        struct drm_device *dev = encoder->dev;
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       uint32_t offset;
 
        if (ASIC_IS_DCE4(rdev))
                return;
 
-       if (!radeon_encoder->hdmi_offset) {
+       offset = radeon_encoder->hdmi_offset;
+       if (!offset) {
                dev_err(rdev->dev, "Disabling not enabled HDMI\n");
                return;
        }
 
        DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
-               radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
+               offset, radeon_encoder->encoder_id);
+
+       /* disable irq */
+       rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = false;
+       radeon_irq_set(rdev);
+
+       /* disable polling */
+       r600_audio_disable_polling(encoder);
 
        if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
                WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
        } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
-               int offset = radeon_encoder->hdmi_offset;
                switch (radeon_encoder->encoder_id) {
                case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
                        WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
index 7b1d223..d84612a 100644 (file)
 #define R600_HDMI_BLOCK3                  0x7800
 
 /* HDMI registers */
-#define R600_HDMI_ENABLE           0x00
-#define R600_HDMI_STATUS           0x04
-#define R600_HDMI_CNTL             0x08
-#define R600_HDMI_UNKNOWN_0        0x0C
-#define R600_HDMI_AUDIOCNTL        0x10
-#define R600_HDMI_VIDEOCNTL        0x14
-#define R600_HDMI_VERSION          0x18
-#define R600_HDMI_UNKNOWN_1        0x28
-#define R600_HDMI_VIDEOINFOFRAME_0 0x54
-#define R600_HDMI_VIDEOINFOFRAME_1 0x58
-#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
-#define R600_HDMI_VIDEOINFOFRAME_3 0x60
-#define R600_HDMI_32kHz_CTS        0xac
-#define R600_HDMI_32kHz_N          0xb0
-#define R600_HDMI_44_1kHz_CTS      0xb4
-#define R600_HDMI_44_1kHz_N        0xb8
-#define R600_HDMI_48kHz_CTS        0xbc
-#define R600_HDMI_48kHz_N          0xc0
-#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
-#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
-#define R600_HDMI_IEC60958_1       0xd4
-#define R600_HDMI_IEC60958_2       0xd8
-#define R600_HDMI_UNKNOWN_2        0xdc
-#define R600_HDMI_AUDIO_DEBUG_0    0xe0
-#define R600_HDMI_AUDIO_DEBUG_1    0xe4
-#define R600_HDMI_AUDIO_DEBUG_2    0xe8
-#define R600_HDMI_AUDIO_DEBUG_3    0xec
+#define R600_HDMI_ENABLE                0x00
+#define R600_HDMI_STATUS                0x04
+#       define R600_HDMI_INT_PENDING    (1 << 29)
+#define R600_HDMI_CNTL                  0x08
+#       define R600_HDMI_INT_EN         (1 << 28)
+#       define R600_HDMI_INT_ACK        (1 << 29)
+#define R600_HDMI_UNKNOWN_0             0x0C
+#define R600_HDMI_AUDIOCNTL             0x10
+#define R600_HDMI_VIDEOCNTL             0x14
+#define R600_HDMI_VERSION               0x18
+#define R600_HDMI_UNKNOWN_1             0x28
+#define R600_HDMI_VIDEOINFOFRAME_0      0x54
+#define R600_HDMI_VIDEOINFOFRAME_1      0x58
+#define R600_HDMI_VIDEOINFOFRAME_2      0x5c
+#define R600_HDMI_VIDEOINFOFRAME_3      0x60
+#define R600_HDMI_32kHz_CTS             0xac
+#define R600_HDMI_32kHz_N               0xb0
+#define R600_HDMI_44_1kHz_CTS           0xb4
+#define R600_HDMI_44_1kHz_N             0xb8
+#define R600_HDMI_48kHz_CTS             0xbc
+#define R600_HDMI_48kHz_N               0xc0
+#define R600_HDMI_AUDIOINFOFRAME_0      0xcc
+#define R600_HDMI_AUDIOINFOFRAME_1      0xd0
+#define R600_HDMI_IEC60958_1            0xd4
+#define R600_HDMI_IEC60958_2            0xd8
+#define R600_HDMI_UNKNOWN_2             0xdc
+#define R600_HDMI_AUDIO_DEBUG_0         0xe0
+#define R600_HDMI_AUDIO_DEBUG_1         0xe4
+#define R600_HDMI_AUDIO_DEBUG_2         0xe8
+#define R600_HDMI_AUDIO_DEBUG_3         0xec
 
 /* HDMI additional config base register addresses */
 #define R600_HDMI_CONFIG1                 0x7600
index 034218c..66a37fb 100644 (file)
@@ -89,7 +89,6 @@ extern int radeon_testing;
 extern int radeon_connector_table;
 extern int radeon_tv;
 extern int radeon_new_pll;
-extern int radeon_dynpm;
 extern int radeon_audio;
 extern int radeon_disp_priority;
 extern int radeon_hw_i2c;
@@ -99,6 +98,7 @@ extern int radeon_hw_i2c;
  * symbol;
  */
 #define RADEON_MAX_USEC_TIMEOUT                100000  /* 100 ms */
+#define RADEON_FENCE_JIFFIES_TIMEOUT   (HZ / 2)
 /* RADEON_IB_POOL_SIZE must be a power of 2 */
 #define RADEON_IB_POOL_SIZE            16
 #define RADEON_DEBUGFS_MAX_NUM_FILES   32
@@ -172,6 +172,8 @@ struct radeon_clock {
 int radeon_pm_init(struct radeon_device *rdev);
 void radeon_pm_fini(struct radeon_device *rdev);
 void radeon_pm_compute_clocks(struct radeon_device *rdev);
+void radeon_pm_suspend(struct radeon_device *rdev);
+void radeon_pm_resume(struct radeon_device *rdev);
 void radeon_combios_get_power_modes(struct radeon_device *rdev);
 void radeon_atombios_get_power_modes(struct radeon_device *rdev);
 
@@ -182,7 +184,8 @@ struct radeon_fence_driver {
        uint32_t                        scratch_reg;
        atomic_t                        seq;
        uint32_t                        last_seq;
-       unsigned long                   count_timeout;
+       unsigned long                   last_jiffies;
+       unsigned long                   last_timeout;
        wait_queue_head_t               queue;
        rwlock_t                        lock;
        struct list_head                created;
@@ -197,7 +200,6 @@ struct radeon_fence {
        struct list_head                list;
        /* protected by radeon_fence.lock */
        uint32_t                        seq;
-       unsigned long                   timeout;
        bool                            emited;
        bool                            signaled;
 };
@@ -259,6 +261,7 @@ struct radeon_bo_list {
        unsigned                rdomain;
        unsigned                wdomain;
        u32                     tiling_flags;
+       bool                    reserved;
 };
 
 /*
@@ -371,10 +374,15 @@ struct radeon_irq {
        bool            installed;
        bool            sw_int;
        /* FIXME: use a define max crtc rather than hardcode it */
-       bool            crtc_vblank_int[2];
+       bool            crtc_vblank_int[6];
        wait_queue_head_t       vblank_queue;
        /* FIXME: use defines for max hpd/dacs */
        bool            hpd[6];
+       bool            gui_idle;
+       bool            gui_idle_acked;
+       wait_queue_head_t       idle_queue;
+       /* FIXME: use defines for max HDMI blocks */
+       bool            hdmi[2];
        spinlock_t sw_lock;
        int sw_refcount;
 };
@@ -462,7 +470,9 @@ int radeon_ib_test(struct radeon_device *rdev);
 extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
 /* Ring access between begin & end cannot sleep */
 void radeon_ring_free_size(struct radeon_device *rdev);
+int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw);
 int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
+void radeon_ring_commit(struct radeon_device *rdev);
 void radeon_ring_unlock_commit(struct radeon_device *rdev);
 void radeon_ring_unlock_undo(struct radeon_device *rdev);
 int radeon_ring_test(struct radeon_device *rdev);
@@ -597,17 +607,24 @@ struct radeon_wb {
  * Equation between gpu/memory clock and available bandwidth is hw dependent
  * (type of memory, bus size, efficiency, ...)
  */
-enum radeon_pm_state {
-       PM_STATE_DISABLED,
-       PM_STATE_MINIMUM,
-       PM_STATE_PAUSED,
-       PM_STATE_ACTIVE
+
+enum radeon_pm_method {
+       PM_METHOD_PROFILE,
+       PM_METHOD_DYNPM,
+};
+
+enum radeon_dynpm_state {
+       DYNPM_STATE_DISABLED,
+       DYNPM_STATE_MINIMUM,
+       DYNPM_STATE_PAUSED,
+       DYNPM_STATE_ACTIVE
 };
-enum radeon_pm_action {
-       PM_ACTION_NONE,
-       PM_ACTION_MINIMUM,
-       PM_ACTION_DOWNCLOCK,
-       PM_ACTION_UPCLOCK
+enum radeon_dynpm_action {
+       DYNPM_ACTION_NONE,
+       DYNPM_ACTION_MINIMUM,
+       DYNPM_ACTION_DOWNCLOCK,
+       DYNPM_ACTION_UPCLOCK,
+       DYNPM_ACTION_DEFAULT
 };
 
 enum radeon_voltage_type {
@@ -625,11 +642,25 @@ enum radeon_pm_state_type {
        POWER_STATE_TYPE_PERFORMANCE,
 };
 
-enum radeon_pm_clock_mode_type {
-       POWER_MODE_TYPE_DEFAULT,
-       POWER_MODE_TYPE_LOW,
-       POWER_MODE_TYPE_MID,
-       POWER_MODE_TYPE_HIGH,
+enum radeon_pm_profile_type {
+       PM_PROFILE_DEFAULT,
+       PM_PROFILE_AUTO,
+       PM_PROFILE_LOW,
+       PM_PROFILE_HIGH,
+};
+
+#define PM_PROFILE_DEFAULT_IDX 0
+#define PM_PROFILE_LOW_SH_IDX  1
+#define PM_PROFILE_HIGH_SH_IDX 2
+#define PM_PROFILE_LOW_MH_IDX  3
+#define PM_PROFILE_HIGH_MH_IDX 4
+#define PM_PROFILE_MAX         5
+
+struct radeon_pm_profile {
+       int dpms_off_ps_idx;
+       int dpms_on_ps_idx;
+       int dpms_off_cm_idx;
+       int dpms_on_cm_idx;
 };
 
 struct radeon_voltage {
@@ -646,12 +677,8 @@ struct radeon_voltage {
        u32 voltage;
 };
 
-struct radeon_pm_non_clock_info {
-       /* pcie lanes */
-       int pcie_lanes;
-       /* standardized non-clock flags */
-       u32 flags;
-};
+/* clock mode flags */
+#define RADEON_PM_MODE_NO_DISPLAY          (1 << 0)
 
 struct radeon_pm_clock_info {
        /* memory clock */
@@ -660,10 +687,13 @@ struct radeon_pm_clock_info {
        u32 sclk;
        /* voltage info */
        struct radeon_voltage voltage;
-       /* standardized clock flags - not sure we'll need these */
+       /* standardized clock flags */
        u32 flags;
 };
 
+/* state flags */
+#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0)
+
 struct radeon_power_state {
        enum radeon_pm_state_type type;
        /* XXX: use a define for num clock modes */
@@ -671,9 +701,11 @@ struct radeon_power_state {
        /* number of valid clock modes in this power state */
        int num_clock_modes;
        struct radeon_pm_clock_info *default_clock_mode;
-       /* non clock info about this state */
-       struct radeon_pm_non_clock_info non_clock_info;
-       bool voltage_drop_active;
+       /* standardized state flags */
+       u32 flags;
+       u32 misc; /* vbios specific flags */
+       u32 misc2; /* vbios specific flags */
+       int pcie_lanes; /* pcie lanes */
 };
 
 /*
@@ -683,14 +715,11 @@ struct radeon_power_state {
 
 struct radeon_pm {
        struct mutex            mutex;
-       struct delayed_work     idle_work;
-       enum radeon_pm_state    state;
-       enum radeon_pm_action   planned_action;
-       unsigned long           action_timeout;
-       bool                    downclocked;
-       int                     active_crtcs;
+       u32                     active_crtcs;
+       int                     active_crtc_count;
        int                     req_vblank;
        bool                    vblank_sync;
+       bool                    gui_idle;
        fixed20_12              max_bandwidth;
        fixed20_12              igp_sideport_mclk;
        fixed20_12              igp_system_mclk;
@@ -707,12 +736,27 @@ struct radeon_pm {
        struct radeon_power_state power_state[8];
        /* number of valid power states */
        int                     num_power_states;
-       struct radeon_power_state *current_power_state;
-       struct radeon_pm_clock_info *current_clock_mode;
-       struct radeon_power_state *requested_power_state;
-       struct radeon_pm_clock_info *requested_clock_mode;
-       struct radeon_power_state *default_power_state;
+       int                     current_power_state_index;
+       int                     current_clock_mode_index;
+       int                     requested_power_state_index;
+       int                     requested_clock_mode_index;
+       int                     default_power_state_index;
+       u32                     current_sclk;
+       u32                     current_mclk;
        struct radeon_i2c_chan *i2c_bus;
+       /* selected pm method */
+       enum radeon_pm_method     pm_method;
+       /* dynpm power management */
+       struct delayed_work     dynpm_idle_work;
+       enum radeon_dynpm_state dynpm_state;
+       enum radeon_dynpm_action        dynpm_planned_action;
+       unsigned long           dynpm_action_timeout;
+       bool                    dynpm_can_upclock;
+       bool                    dynpm_can_downclock;
+       /* profile-based power management */
+       enum radeon_pm_profile_type profile;
+       int                     profile_index;
+       struct radeon_pm_profile profiles[PM_PROFILE_MAX];
 };
 
 
@@ -746,7 +790,8 @@ struct radeon_asic {
        int (*resume)(struct radeon_device *rdev);
        int (*suspend)(struct radeon_device *rdev);
        void (*vga_set_state)(struct radeon_device *rdev, bool state);
-       int (*gpu_reset)(struct radeon_device *rdev);
+       bool (*gpu_is_lockup)(struct radeon_device *rdev);
+       int (*asic_reset)(struct radeon_device *rdev);
        void (*gart_tlb_flush)(struct radeon_device *rdev);
        int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
        int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
@@ -799,44 +844,84 @@ struct radeon_asic {
         * through ring.
         */
        void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
+       bool (*gui_idle)(struct radeon_device *rdev);
+       /* power management */
+       void (*pm_misc)(struct radeon_device *rdev);
+       void (*pm_prepare)(struct radeon_device *rdev);
+       void (*pm_finish)(struct radeon_device *rdev);
+       void (*pm_init_profile)(struct radeon_device *rdev);
+       void (*pm_get_dynpm_state)(struct radeon_device *rdev);
 };
 
 /*
  * Asic structures
  */
+struct r100_gpu_lockup {
+       unsigned long   last_jiffies;
+       u32             last_cp_rptr;
+};
+
 struct r100_asic {
-       const unsigned  *reg_safe_bm;
-       unsigned        reg_safe_bm_size;
-       u32             hdp_cntl;
+       const unsigned          *reg_safe_bm;
+       unsigned                reg_safe_bm_size;
+       u32                     hdp_cntl;
+       struct r100_gpu_lockup  lockup;
 };
 
 struct r300_asic {
-       const unsigned  *reg_safe_bm;
-       unsigned        reg_safe_bm_size;
-       u32             resync_scratch;
-       u32             hdp_cntl;
+       const unsigned          *reg_safe_bm;
+       unsigned                reg_safe_bm_size;
+       u32                     resync_scratch;
+       u32                     hdp_cntl;
+       struct r100_gpu_lockup  lockup;
 };
 
 struct r600_asic {
-       unsigned max_pipes;
-       unsigned max_tile_pipes;
-       unsigned max_simds;
-       unsigned max_backends;
-       unsigned max_gprs;
-       unsigned max_threads;
-       unsigned max_stack_entries;
-       unsigned max_hw_contexts;
-       unsigned max_gs_threads;
-       unsigned sx_max_export_size;
-       unsigned sx_max_export_pos_size;
-       unsigned sx_max_export_smx_size;
-       unsigned sq_num_cf_insts;
-       unsigned tiling_nbanks;
-       unsigned tiling_npipes;
-       unsigned tiling_group_size;
+       unsigned                max_pipes;
+       unsigned                max_tile_pipes;
+       unsigned                max_simds;
+       unsigned                max_backends;
+       unsigned                max_gprs;
+       unsigned                max_threads;
+       unsigned                max_stack_entries;
+       unsigned                max_hw_contexts;
+       unsigned                max_gs_threads;
+       unsigned                sx_max_export_size;
+       unsigned                sx_max_export_pos_size;
+       unsigned                sx_max_export_smx_size;
+       unsigned                sq_num_cf_insts;
+       unsigned                tiling_nbanks;
+       unsigned                tiling_npipes;
+       unsigned                tiling_group_size;
+       struct r100_gpu_lockup  lockup;
 };
 
 struct rv770_asic {
+       unsigned                max_pipes;
+       unsigned                max_tile_pipes;
+       unsigned                max_simds;
+       unsigned                max_backends;
+       unsigned                max_gprs;
+       unsigned                max_threads;
+       unsigned                max_stack_entries;
+       unsigned                max_hw_contexts;
+       unsigned                max_gs_threads;
+       unsigned                sx_max_export_size;
+       unsigned                sx_max_export_pos_size;
+       unsigned                sx_max_export_smx_size;
+       unsigned                sq_num_cf_insts;
+       unsigned                sx_num_of_sets;
+       unsigned                sc_prim_fifo_size;
+       unsigned                sc_hiz_tile_fifo_size;
+       unsigned                sc_earlyz_tile_fifo_fize;
+       unsigned                tiling_nbanks;
+       unsigned                tiling_npipes;
+       unsigned                tiling_group_size;
+       struct r100_gpu_lockup  lockup;
+};
+
+struct evergreen_asic {
+       unsigned num_ses;
        unsigned max_pipes;
        unsigned max_tile_pipes;
        unsigned max_simds;
@@ -853,7 +938,7 @@ struct rv770_asic {
        unsigned sx_num_of_sets;
        unsigned sc_prim_fifo_size;
        unsigned sc_hiz_tile_fifo_size;
-       unsigned sc_earlyz_tile_fifo_fize;
+       unsigned sc_earlyz_tile_fifo_size;
        unsigned tiling_nbanks;
        unsigned tiling_npipes;
        unsigned tiling_group_size;
@@ -864,6 +949,7 @@ union radeon_asic_config {
        struct r100_asic        r100;
        struct r600_asic        r600;
        struct rv770_asic       rv770;
+       struct evergreen_asic   evergreen;
 };
 
 /*
@@ -927,9 +1013,6 @@ struct radeon_device {
        bool                            is_atom_bios;
        uint16_t                        bios_header_start;
        struct radeon_bo                *stollen_vga_memory;
-       struct fb_info                  *fbdev_info;
-       struct radeon_bo                *fbdev_rbo;
-       struct radeon_framebuffer       *fbdev_rfb;
        /* Register mmio */
        resource_size_t                 rmmio_base;
        resource_size_t                 rmmio_size;
@@ -974,6 +1057,7 @@ struct radeon_device {
        struct work_struct hotplug_work;
        int num_crtc; /* number of crtcs */
        struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
+       struct mutex vram_mutex;
 
        /* audio stuff */
        struct timer_list       audio_timer;
@@ -984,6 +1068,7 @@ struct radeon_device {
        uint8_t                 audio_category_code;
 
        bool powered_down;
+       struct notifier_block acpi_nb;
 };
 
 int radeon_device_init(struct radeon_device *rdev,
@@ -1145,7 +1230,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
 #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
 #define radeon_cs_parse(p) rdev->asic->cs_parse((p))
 #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
-#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev))
+#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev))
+#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
 #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
 #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
 #define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
@@ -1173,9 +1259,16 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
 #define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
 #define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
 #define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
+#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
+#define radeon_pm_misc(rdev) (rdev)->asic->pm_misc((rdev))
+#define radeon_pm_prepare(rdev) (rdev)->asic->pm_prepare((rdev))
+#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev))
+#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev))
+#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev))
 
 /* Common functions */
 /* AGP */
+extern int radeon_gpu_reset(struct radeon_device *rdev);
 extern void radeon_agp_disable(struct radeon_device *rdev);
 extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
 extern void radeon_gart_restore(struct radeon_device *rdev);
@@ -1200,6 +1293,8 @@ extern int radeon_resume_kms(struct drm_device *dev);
 extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
 
 /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
+extern void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
+extern bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
 
 /* rv200,rv250,rv280 */
 extern void r200_set_safe_registers(struct radeon_device *rdev);
@@ -1260,6 +1355,7 @@ extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
 extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
 extern bool r600_card_posted(struct radeon_device *rdev);
 extern void r600_cp_stop(struct radeon_device *rdev);
+extern int r600_cp_start(struct radeon_device *rdev);
 extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
 extern int r600_cp_resume(struct radeon_device *rdev);
 extern void r600_cp_fini(struct radeon_device *rdev);
@@ -1276,29 +1372,39 @@ extern void r600_scratch_init(struct radeon_device *rdev);
 extern int r600_blit_init(struct radeon_device *rdev);
 extern void r600_blit_fini(struct radeon_device *rdev);
 extern int r600_init_microcode(struct radeon_device *rdev);
-extern int r600_gpu_reset(struct radeon_device *rdev);
+extern int r600_asic_reset(struct radeon_device *rdev);
 /* r600 irq */
 extern int r600_irq_init(struct radeon_device *rdev);
 extern void r600_irq_fini(struct radeon_device *rdev);
 extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
 extern int r600_irq_set(struct radeon_device *rdev);
 extern void r600_irq_suspend(struct radeon_device *rdev);
+extern void r600_disable_interrupts(struct radeon_device *rdev);
+extern void r600_rlc_stop(struct radeon_device *rdev);
 /* r600 audio */
 extern int r600_audio_init(struct radeon_device *rdev);
 extern int r600_audio_tmds_index(struct drm_encoder *encoder);
 extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
+extern int r600_audio_channels(struct radeon_device *rdev);
+extern int r600_audio_bits_per_sample(struct radeon_device *rdev);
+extern int r600_audio_rate(struct radeon_device *rdev);
+extern uint8_t r600_audio_status_bits(struct radeon_device *rdev);
+extern uint8_t r600_audio_category_code(struct radeon_device *rdev);
+extern void r600_audio_schedule_polling(struct radeon_device *rdev);
+extern void r600_audio_enable_polling(struct drm_encoder *encoder);
+extern void r600_audio_disable_polling(struct drm_encoder *encoder);
 extern void r600_audio_fini(struct radeon_device *rdev);
 extern void r600_hdmi_init(struct drm_encoder *encoder);
 extern void r600_hdmi_enable(struct drm_encoder *encoder);
 extern void r600_hdmi_disable(struct drm_encoder *encoder);
 extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
 extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
-extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
-                                           int channels,
-                                           int rate,
-                                           int bps,
-                                           uint8_t status_bits,
-                                           uint8_t category_code);
+extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
+
+extern void r700_cp_stop(struct radeon_device *rdev);
+extern void r700_cp_fini(struct radeon_device *rdev);
+extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
+extern int evergreen_irq_set(struct radeon_device *rdev);
 
 /* evergreen */
 struct evergreen_mc_save {
index a4b4bc9..e57df08 100644 (file)
@@ -134,7 +134,8 @@ static struct radeon_asic r100_asic = {
        .suspend = &r100_suspend,
        .resume = &r100_resume,
        .vga_set_state = &r100_vga_set_state,
-       .gpu_reset = &r100_gpu_reset,
+       .gpu_is_lockup = &r100_gpu_is_lockup,
+       .asic_reset = &r100_asic_reset,
        .gart_tlb_flush = &r100_pci_gart_tlb_flush,
        .gart_set_page = &r100_pci_gart_set_page,
        .cp_commit = &r100_cp_commit,
@@ -164,6 +165,12 @@ static struct radeon_asic r100_asic = {
        .hpd_sense = &r100_hpd_sense,
        .hpd_set_polarity = &r100_hpd_set_polarity,
        .ioctl_wait_idle = NULL,
+       .gui_idle = &r100_gui_idle,
+       .pm_misc = &r100_pm_misc,
+       .pm_prepare = &r100_pm_prepare,
+       .pm_finish = &r100_pm_finish,
+       .pm_init_profile = &r100_pm_init_profile,
+       .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
 };
 
 static struct radeon_asic r200_asic = {
@@ -172,7 +179,8 @@ static struct radeon_asic r200_asic = {
        .suspend = &r100_suspend,
        .resume = &r100_resume,
        .vga_set_state = &r100_vga_set_state,
-       .gpu_reset = &r100_gpu_reset,
+       .gpu_is_lockup = &r100_gpu_is_lockup,
+       .asic_reset = &r100_asic_reset,
        .gart_tlb_flush = &r100_pci_gart_tlb_flush,
        .gart_set_page = &r100_pci_gart_set_page,
        .cp_commit = &r100_cp_commit,
@@ -201,6 +209,12 @@ static struct radeon_asic r200_asic = {
        .hpd_sense = &r100_hpd_sense,
        .hpd_set_polarity = &r100_hpd_set_polarity,
        .ioctl_wait_idle = NULL,
+       .gui_idle = &r100_gui_idle,
+       .pm_misc = &r100_pm_misc,
+       .pm_prepare = &r100_pm_prepare,
+       .pm_finish = &r100_pm_finish,
+       .pm_init_profile = &r100_pm_init_profile,
+       .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
 };
 
 static struct radeon_asic r300_asic = {
@@ -209,7 +223,8 @@ static struct radeon_asic r300_asic = {
        .suspend = &r300_suspend,
        .resume = &r300_resume,
        .vga_set_state = &r100_vga_set_state,
-       .gpu_reset = &r300_gpu_reset,
+       .gpu_is_lockup = &r300_gpu_is_lockup,
+       .asic_reset = &r300_asic_reset,
        .gart_tlb_flush = &r100_pci_gart_tlb_flush,
        .gart_set_page = &r100_pci_gart_set_page,
        .cp_commit = &r100_cp_commit,
@@ -239,6 +254,12 @@ static struct radeon_asic r300_asic = {
        .hpd_sense = &r100_hpd_sense,
        .hpd_set_polarity = &r100_hpd_set_polarity,
        .ioctl_wait_idle = NULL,
+       .gui_idle = &r100_gui_idle,
+       .pm_misc = &r100_pm_misc,
+       .pm_prepare = &r100_pm_prepare,
+       .pm_finish = &r100_pm_finish,
+       .pm_init_profile = &r100_pm_init_profile,
+       .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
 };
 
 static struct radeon_asic r300_asic_pcie = {
@@ -247,7 +268,8 @@ static struct radeon_asic r300_asic_pcie = {
        .suspend = &r300_suspend,
        .resume = &r300_resume,
        .vga_set_state = &r100_vga_set_state,
-       .gpu_reset = &r300_gpu_reset,
+       .gpu_is_lockup = &r300_gpu_is_lockup,
+       .asic_reset = &r300_asic_reset,
        .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
        .gart_set_page = &rv370_pcie_gart_set_page,
        .cp_commit = &r100_cp_commit,
@@ -276,6 +298,12 @@ static struct radeon_asic r300_asic_pcie = {
        .hpd_sense = &r100_hpd_sense,
        .hpd_set_polarity = &r100_hpd_set_polarity,
        .ioctl_wait_idle = NULL,
+       .gui_idle = &r100_gui_idle,
+       .pm_misc = &r100_pm_misc,
+       .pm_prepare = &r100_pm_prepare,
+       .pm_finish = &r100_pm_finish,
+       .pm_init_profile = &r100_pm_init_profile,
+       .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
 };
 
 static struct radeon_asic r420_asic = {
@@ -284,7 +312,8 @@ static struct radeon_asic r420_asic = {
        .suspend = &r420_suspend,
        .resume = &r420_resume,
        .vga_set_state = &r100_vga_set_state,
-       .gpu_reset = &r300_gpu_reset,
+       .gpu_is_lockup = &r300_gpu_is_lockup,
+       .asic_reset = &r300_asic_reset,
        .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
        .gart_set_page = &rv370_pcie_gart_set_page,
        .cp_commit = &r100_cp_commit,
@@ -314,6 +343,12 @@ static struct radeon_asic r420_asic = {
        .hpd_sense = &r100_hpd_sense,
        .hpd_set_polarity = &r100_hpd_set_polarity,
        .ioctl_wait_idle = NULL,
+       .gui_idle = &r100_gui_idle,
+       .pm_misc = &r100_pm_misc,
+       .pm_prepare = &r100_pm_prepare,
+       .pm_finish = &r100_pm_finish,
+       .pm_init_profile = &r420_pm_init_profile,
+       .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
 };
 
 static struct radeon_asic rs400_asic = {
@@ -322,7 +357,8 @@ static struct radeon_asic rs400_asic = {
        .suspend = &rs400_suspend,
        .resume = &rs400_resume,
        .vga_set_state = &r100_vga_set_state,
-       .gpu_reset = &r300_gpu_reset,
+       .gpu_is_lockup = &r300_gpu_is_lockup,
+       .asic_reset = &r300_asic_reset,
        .gart_tlb_flush = &rs400_gart_tlb_flush,
        .gart_set_page = &rs400_gart_set_page,
        .cp_commit = &r100_cp_commit,
@@ -352,6 +388,12 @@ static struct radeon_asic rs400_asic = {
        .hpd_sense = &r100_hpd_sense,
        .hpd_set_polarity = &r100_hpd_set_polarity,
        .ioctl_wait_idle = NULL,
+       .gui_idle = &r100_gui_idle,
+       .pm_misc = &r100_pm_misc,
+       .pm_prepare = &r100_pm_prepare,
+       .pm_finish = &r100_pm_finish,
+       .pm_init_profile = &r100_pm_init_profile,
+       .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
 };
 
 static struct radeon_asic rs600_asic = {
@@ -360,7 +402,8 @@ static struct radeon_asic rs600_asic = {
        .suspend = &rs600_suspend,
        .resume = &rs600_resume,
        .vga_set_state = &r100_vga_set_state,
-       .gpu_reset = &r300_gpu_reset,
+       .gpu_is_lockup = &r300_gpu_is_lockup,
+       .asic_reset = &rs600_asic_reset,
        .gart_tlb_flush = &rs600_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
        .cp_commit = &r100_cp_commit,
@@ -390,6 +433,12 @@ static struct radeon_asic rs600_asic = {
        .hpd_sense = &rs600_hpd_sense,
        .hpd_set_polarity = &rs600_hpd_set_polarity,
        .ioctl_wait_idle = NULL,
+       .gui_idle = &r100_gui_idle,
+       .pm_misc = &rs600_pm_misc,
+       .pm_prepare = &rs600_pm_prepare,
+       .pm_finish = &rs600_pm_finish,
+       .pm_init_profile = &r420_pm_init_profile,
+       .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
 };
 
 static struct radeon_asic rs690_asic = {
@@ -398,7 +447,8 @@ static struct radeon_asic rs690_asic = {
        .suspend = &rs690_suspend,
        .resume = &rs690_resume,
        .vga_set_state = &r100_vga_set_state,
-       .gpu_reset = &r300_gpu_reset,
+       .gpu_is_lockup = &r300_gpu_is_lockup,
+       .asic_reset = &rs600_asic_reset,
        .gart_tlb_flush = &rs400_gart_tlb_flush,
        .gart_set_page = &rs400_gart_set_page,
        .cp_commit = &r100_cp_commit,
@@ -428,6 +478,12 @@ static struct radeon_asic rs690_asic = {
        .hpd_sense = &rs600_hpd_sense,
        .hpd_set_polarity = &rs600_hpd_set_polarity,
        .ioctl_wait_idle = NULL,
+       .gui_idle = &r100_gui_idle,
+       .pm_misc = &rs600_pm_misc,
+       .pm_prepare = &rs600_pm_prepare,
+       .pm_finish = &rs600_pm_finish,
+       .pm_init_profile = &r420_pm_init_profile,
+       .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
 };
 
 static struct radeon_asic rv515_asic = {
@@ -436,7 +492,8 @@ static struct radeon_asic rv515_asic = {
        .suspend = &rv515_suspend,
        .resume = &rv515_resume,
        .vga_set_state = &r100_vga_set_state,
-       .gpu_reset = &rv515_gpu_reset,
+       .gpu_is_lockup = &r300_gpu_is_lockup,
+       .asic_reset = &rs600_asic_reset,
        .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
        .gart_set_page = &rv370_pcie_gart_set_page,
        .cp_commit = &r100_cp_commit,
@@ -466,6 +523,12 @@ static struct radeon_asic rv515_asic = {
        .hpd_sense = &rs600_hpd_sense,
        .hpd_set_polarity = &rs600_hpd_set_polarity,
        .ioctl_wait_idle = NULL,
+       .gui_idle = &r100_gui_idle,
+       .pm_misc = &rs600_pm_misc,
+       .pm_prepare = &rs600_pm_prepare,
+       .pm_finish = &rs600_pm_finish,
+       .pm_init_profile = &r420_pm_init_profile,
+       .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
 };
 
 static struct radeon_asic r520_asic = {
@@ -474,7 +537,8 @@ static struct radeon_asic r520_asic = {
        .suspend = &rv515_suspend,
        .resume = &r520_resume,
        .vga_set_state = &r100_vga_set_state,
-       .gpu_reset = &rv515_gpu_reset,
+       .gpu_is_lockup = &r300_gpu_is_lockup,
+       .asic_reset = &rs600_asic_reset,
        .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
        .gart_set_page = &rv370_pcie_gart_set_page,
        .cp_commit = &r100_cp_commit,
@@ -504,6 +568,12 @@ static struct radeon_asic r520_asic = {
        .hpd_sense = &rs600_hpd_sense,
        .hpd_set_polarity = &rs600_hpd_set_polarity,
        .ioctl_wait_idle = NULL,
+       .gui_idle = &r100_gui_idle,
+       .pm_misc = &rs600_pm_misc,
+       .pm_prepare = &rs600_pm_prepare,
+       .pm_finish = &rs600_pm_finish,
+       .pm_init_profile = &r420_pm_init_profile,
+       .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
 };
 
 static struct radeon_asic r600_asic = {
@@ -513,7 +583,8 @@ static struct radeon_asic r600_asic = {
        .resume = &r600_resume,
        .cp_commit = &r600_cp_commit,
        .vga_set_state = &r600_vga_set_state,
-       .gpu_reset = &r600_gpu_reset,
+       .gpu_is_lockup = &r600_gpu_is_lockup,
+       .asic_reset = &r600_asic_reset,
        .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
        .ring_test = &r600_ring_test,
@@ -541,6 +612,12 @@ static struct radeon_asic r600_asic = {
        .hpd_sense = &r600_hpd_sense,
        .hpd_set_polarity = &r600_hpd_set_polarity,
        .ioctl_wait_idle = r600_ioctl_wait_idle,
+       .gui_idle = &r600_gui_idle,
+       .pm_misc = &r600_pm_misc,
+       .pm_prepare = &rs600_pm_prepare,
+       .pm_finish = &rs600_pm_finish,
+       .pm_init_profile = &r600_pm_init_profile,
+       .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
 };
 
 static struct radeon_asic rs780_asic = {
@@ -549,8 +626,9 @@ static struct radeon_asic rs780_asic = {
        .suspend = &r600_suspend,
        .resume = &r600_resume,
        .cp_commit = &r600_cp_commit,
+       .gpu_is_lockup = &r600_gpu_is_lockup,
        .vga_set_state = &r600_vga_set_state,
-       .gpu_reset = &r600_gpu_reset,
+       .asic_reset = &r600_asic_reset,
        .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
        .ring_test = &r600_ring_test,
@@ -578,6 +656,12 @@ static struct radeon_asic rs780_asic = {
        .hpd_sense = &r600_hpd_sense,
        .hpd_set_polarity = &r600_hpd_set_polarity,
        .ioctl_wait_idle = r600_ioctl_wait_idle,
+       .gui_idle = &r600_gui_idle,
+       .pm_misc = &r600_pm_misc,
+       .pm_prepare = &rs600_pm_prepare,
+       .pm_finish = &rs600_pm_finish,
+       .pm_init_profile = &rs780_pm_init_profile,
+       .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
 };
 
 static struct radeon_asic rv770_asic = {
@@ -586,7 +670,8 @@ static struct radeon_asic rv770_asic = {
        .suspend = &rv770_suspend,
        .resume = &rv770_resume,
        .cp_commit = &r600_cp_commit,
-       .gpu_reset = &rv770_gpu_reset,
+       .asic_reset = &r600_asic_reset,
+       .gpu_is_lockup = &r600_gpu_is_lockup,
        .vga_set_state = &r600_vga_set_state,
        .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
@@ -615,6 +700,12 @@ static struct radeon_asic rv770_asic = {
        .hpd_sense = &r600_hpd_sense,
        .hpd_set_polarity = &r600_hpd_set_polarity,
        .ioctl_wait_idle = r600_ioctl_wait_idle,
+       .gui_idle = &r600_gui_idle,
+       .pm_misc = &rv770_pm_misc,
+       .pm_prepare = &rs600_pm_prepare,
+       .pm_finish = &rs600_pm_finish,
+       .pm_init_profile = &r600_pm_init_profile,
+       .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
 };
 
 static struct radeon_asic evergreen_asic = {
@@ -622,16 +713,17 @@ static struct radeon_asic evergreen_asic = {
        .fini = &evergreen_fini,
        .suspend = &evergreen_suspend,
        .resume = &evergreen_resume,
-       .cp_commit = NULL,
-       .gpu_reset = &evergreen_gpu_reset,
+       .cp_commit = &r600_cp_commit,
+       .gpu_is_lockup = &evergreen_gpu_is_lockup,
+       .asic_reset = &evergreen_asic_reset,
        .vga_set_state = &r600_vga_set_state,
-       .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
+       .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
-       .ring_test = NULL,
-       .ring_ib_execute = NULL,
-       .irq_set = NULL,
-       .irq_process = NULL,
-       .get_vblank_counter = NULL,
+       .ring_test = &r600_ring_test,
+       .ring_ib_execute = &r600_ring_ib_execute,
+       .irq_set = &evergreen_irq_set,
+       .irq_process = &evergreen_irq_process,
+       .get_vblank_counter = &evergreen_get_vblank_counter,
        .fence_ring_emit = NULL,
        .cs_parse = NULL,
        .copy_blit = NULL,
@@ -650,6 +742,12 @@ static struct radeon_asic evergreen_asic = {
        .hpd_fini = &evergreen_hpd_fini,
        .hpd_sense = &evergreen_hpd_sense,
        .hpd_set_polarity = &evergreen_hpd_set_polarity,
+       .gui_idle = &r600_gui_idle,
+       .pm_misc = &evergreen_pm_misc,
+       .pm_prepare = &evergreen_pm_prepare,
+       .pm_finish = &evergreen_pm_finish,
+       .pm_init_profile = &r600_pm_init_profile,
+       .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
 };
 
 int radeon_asic_init(struct radeon_device *rdev)
index a0b8280..5c40a3d 100644 (file)
@@ -60,7 +60,8 @@ int r100_resume(struct radeon_device *rdev);
 uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
 void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 void r100_vga_set_state(struct radeon_device *rdev, bool state);
-int r100_gpu_reset(struct radeon_device *rdev);
+bool r100_gpu_is_lockup(struct radeon_device *rdev);
+int r100_asic_reset(struct radeon_device *rdev);
 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
 void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
 int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
@@ -110,8 +111,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev);
 void r100_wb_disable(struct radeon_device *rdev);
 void r100_wb_fini(struct radeon_device *rdev);
 int r100_wb_init(struct radeon_device *rdev);
-void r100_hdp_reset(struct radeon_device *rdev);
-int r100_rb2d_reset(struct radeon_device *rdev);
 int r100_cp_reset(struct radeon_device *rdev);
 void r100_vga_render_disable(struct radeon_device *rdev);
 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
@@ -126,6 +125,13 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
                         unsigned idx);
 void r100_enable_bm(struct radeon_device *rdev);
 void r100_set_common_regs(struct radeon_device *rdev);
+void r100_bm_disable(struct radeon_device *rdev);
+extern bool r100_gui_idle(struct radeon_device *rdev);
+extern void r100_pm_misc(struct radeon_device *rdev);
+extern void r100_pm_prepare(struct radeon_device *rdev);
+extern void r100_pm_finish(struct radeon_device *rdev);
+extern void r100_pm_init_profile(struct radeon_device *rdev);
+extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
 
 /*
  * r200,rv250,rs300,rv280
@@ -134,7 +140,7 @@ extern int r200_copy_dma(struct radeon_device *rdev,
                        uint64_t src_offset,
                        uint64_t dst_offset,
                        unsigned num_pages,
-                       struct radeon_fence *fence);
+                        struct radeon_fence *fence);
 
 /*
  * r300,r350,rv350,rv380
@@ -143,7 +149,8 @@ extern int r300_init(struct radeon_device *rdev);
 extern void r300_fini(struct radeon_device *rdev);
 extern int r300_suspend(struct radeon_device *rdev);
 extern int r300_resume(struct radeon_device *rdev);
-extern int r300_gpu_reset(struct radeon_device *rdev);
+extern bool r300_gpu_is_lockup(struct radeon_device *rdev);
+extern int r300_asic_reset(struct radeon_device *rdev);
 extern void r300_ring_start(struct radeon_device *rdev);
 extern void r300_fence_ring_emit(struct radeon_device *rdev,
                                struct radeon_fence *fence);
@@ -162,6 +169,7 @@ extern int r420_init(struct radeon_device *rdev);
 extern void r420_fini(struct radeon_device *rdev);
 extern int r420_suspend(struct radeon_device *rdev);
 extern int r420_resume(struct radeon_device *rdev);
+extern void r420_pm_init_profile(struct radeon_device *rdev);
 
 /*
  * rs400,rs480
@@ -178,6 +186,7 @@ void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 /*
  * rs600.
  */
+extern int rs600_asic_reset(struct radeon_device *rdev);
 extern int rs600_init(struct radeon_device *rdev);
 extern void rs600_fini(struct radeon_device *rdev);
 extern int rs600_suspend(struct radeon_device *rdev);
@@ -195,6 +204,9 @@ void rs600_hpd_fini(struct radeon_device *rdev);
 bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
 void rs600_hpd_set_polarity(struct radeon_device *rdev,
                            enum radeon_hpd_id hpd);
+extern void rs600_pm_misc(struct radeon_device *rdev);
+extern void rs600_pm_prepare(struct radeon_device *rdev);
+extern void rs600_pm_finish(struct radeon_device *rdev);
 
 /*
  * rs690,rs740
@@ -212,7 +224,6 @@ void rs690_bandwidth_update(struct radeon_device *rdev);
  */
 int rv515_init(struct radeon_device *rdev);
 void rv515_fini(struct radeon_device *rdev);
-int rv515_gpu_reset(struct radeon_device *rdev);
 uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
 void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 void rv515_ring_start(struct radeon_device *rdev);
@@ -252,7 +263,8 @@ int r600_copy_dma(struct radeon_device *rdev,
                  struct radeon_fence *fence);
 int r600_irq_process(struct radeon_device *rdev);
 int r600_irq_set(struct radeon_device *rdev);
-int r600_gpu_reset(struct radeon_device *rdev);
+bool r600_gpu_is_lockup(struct radeon_device *rdev);
+int r600_asic_reset(struct radeon_device *rdev);
 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
                         uint32_t tiling_flags, uint32_t pitch,
                         uint32_t offset, uint32_t obj_size);
@@ -268,6 +280,11 @@ bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
 void r600_hpd_set_polarity(struct radeon_device *rdev,
                           enum radeon_hpd_id hpd);
 extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
+extern bool r600_gui_idle(struct radeon_device *rdev);
+extern void r600_pm_misc(struct radeon_device *rdev);
+extern void r600_pm_init_profile(struct radeon_device *rdev);
+extern void rs780_pm_init_profile(struct radeon_device *rdev);
+extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
 
 /*
  * rv770,rv730,rv710,rv740
@@ -276,20 +293,29 @@ int rv770_init(struct radeon_device *rdev);
 void rv770_fini(struct radeon_device *rdev);
 int rv770_suspend(struct radeon_device *rdev);
 int rv770_resume(struct radeon_device *rdev);
-int rv770_gpu_reset(struct radeon_device *rdev);
+extern void rv770_pm_misc(struct radeon_device *rdev);
 
 /*
  * evergreen
  */
+void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
 int evergreen_init(struct radeon_device *rdev);
 void evergreen_fini(struct radeon_device *rdev);
 int evergreen_suspend(struct radeon_device *rdev);
 int evergreen_resume(struct radeon_device *rdev);
-int evergreen_gpu_reset(struct radeon_device *rdev);
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
+int evergreen_asic_reset(struct radeon_device *rdev);
 void evergreen_bandwidth_update(struct radeon_device *rdev);
 void evergreen_hpd_init(struct radeon_device *rdev);
 void evergreen_hpd_fini(struct radeon_device *rdev);
 bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
 void evergreen_hpd_set_polarity(struct radeon_device *rdev,
                                enum radeon_hpd_id hpd);
+u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
+int evergreen_irq_set(struct radeon_device *rdev);
+int evergreen_irq_process(struct radeon_device *rdev);
+extern void evergreen_pm_misc(struct radeon_device *rdev);
+extern void evergreen_pm_prepare(struct radeon_device *rdev);
+extern void evergreen_pm_finish(struct radeon_device *rdev);
+
 #endif
index 9916d82..6e733fd 100644 (file)
@@ -530,6 +530,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
                        }
 
                        /* look up gpio for ddc, hpd */
+                       ddc_bus.valid = false;
+                       hpd.hpd = RADEON_HPD_NONE;
                        if ((le16_to_cpu(path->usDeviceTag) &
                             (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
                                for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
@@ -547,7 +549,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
                                                ATOM_I2C_RECORD *i2c_record;
                                                ATOM_HPD_INT_RECORD *hpd_record;
                                                ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
-                                               hpd.hpd = RADEON_HPD_NONE;
 
                                                while (record->ucRecordType > 0
                                                       && record->
@@ -585,13 +586,10 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
                                                break;
                                        }
                                }
-                       } else {
-                               hpd.hpd = RADEON_HPD_NONE;
-                               ddc_bus.valid = false;
                        }
 
                        /* needed for aux chan transactions */
-                       ddc_bus.hpd_id = hpd.hpd ? (hpd.hpd - 1) : 0;
+                       ddc_bus.hpd = hpd.hpd;
 
                        conn_id = le16_to_cpu(path->usConnObjectId);
 
@@ -1174,7 +1172,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
                lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
                        le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
                lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
-                       le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
+                       le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
                lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
                        le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
                lvds->panel_pwr_delay =
@@ -1442,26 +1440,30 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
 
 static const char *thermal_controller_names[] = {
        "NONE",
-       "LM63",
-       "ADM1032",
-       "ADM1030",
-       "MUA6649",
-       "LM64",
-       "F75375",
-       "ASC7512",
+       "lm63",
+       "adm1032",
+       "adm1030",
+       "max6649",
+       "lm64",
+       "f75375",
+       "asc7xxx",
 };
 
 static const char *pp_lib_thermal_controller_names[] = {
        "NONE",
-       "LM63",
-       "ADM1032",
-       "ADM1030",
-       "MUA6649",
-       "LM64",
-       "F75375",
+       "lm63",
+       "adm1032",
+       "adm1030",
+       "max6649",
+       "lm64",
+       "f75375",
        "RV6xx",
        "RV770",
-       "ADT7473",
+       "adt7473",
+       "External GPIO",
+       "Evergreen",
+       "adt7473 with internal",
+
 };
 
 union power_info {
@@ -1485,7 +1487,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
        int state_index = 0, mode_index = 0;
        struct radeon_i2c_bus_rec i2c_bus;
 
-       rdev->pm.default_power_state = NULL;
+       rdev->pm.default_power_state_index = -1;
 
        if (atom_parse_data_header(mode_info->atom_context, index, NULL,
                                   &frev, &crev, &data_offset)) {
@@ -1498,10 +1500,19 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
                                         power_info->info.ucOverdriveControllerAddress >> 1);
                                i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
                                rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
+                               if (rdev->pm.i2c_bus) {
+                                       struct i2c_board_info info = { };
+                                       const char *name = thermal_controller_names[power_info->info.
+                                                                                   ucOverdriveThermalController];
+                                       info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
+                                       strlcpy(info.type, name, sizeof(info.type));
+                                       i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+                               }
                        }
                        num_modes = power_info->info.ucNumOfPowerModeEntries;
                        if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
                                num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+                       /* last mode is usually default, array is low to high */
                        for (i = 0; i < num_modes; i++) {
                                rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
                                switch (frev) {
@@ -1515,13 +1526,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
                                        if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
                                            (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
                                                continue;
-                                       /* skip overclock modes for now */
-                                       if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
-                                            rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
-                                           (rdev->pm.power_state[state_index].clock_info[0].sclk >
-                                            rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
-                                               continue;
-                                       rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+                                       rdev->pm.power_state[state_index].pcie_lanes =
                                                power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
                                        misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
                                        if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
@@ -1542,6 +1547,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
                                                rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
                                                        power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
                                        }
+                                       rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+                                       rdev->pm.power_state[state_index].misc = misc;
                                        /* order matters! */
                                        if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
                                                rdev->pm.power_state[state_index].type =
@@ -1555,15 +1562,23 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
                                        if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
                                                rdev->pm.power_state[state_index].type =
                                                        POWER_STATE_TYPE_BALANCED;
-                                       if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
+                                       if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
                                                rdev->pm.power_state[state_index].type =
                                                        POWER_STATE_TYPE_PERFORMANCE;
+                                               rdev->pm.power_state[state_index].flags &=
+                                                       ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+                                       }
                                        if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
                                                rdev->pm.power_state[state_index].type =
                                                        POWER_STATE_TYPE_DEFAULT;
-                                               rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+                                               rdev->pm.default_power_state_index = state_index;
                                                rdev->pm.power_state[state_index].default_clock_mode =
                                                        &rdev->pm.power_state[state_index].clock_info[0];
+                                               rdev->pm.power_state[state_index].flags &=
+                                                       ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+                                       } else if (state_index == 0) {
+                                               rdev->pm.power_state[state_index].clock_info[0].flags |=
+                                                       RADEON_PM_MODE_NO_DISPLAY;
                                        }
                                        state_index++;
                                        break;
@@ -1577,13 +1592,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
                                        if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
                                            (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
                                                continue;
-                                       /* skip overclock modes for now */
-                                       if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
-                                            rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
-                                           (rdev->pm.power_state[state_index].clock_info[0].sclk >
-                                            rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
-                                               continue;
-                                       rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+                                       rdev->pm.power_state[state_index].pcie_lanes =
                                                power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
                                        misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
                                        misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
@@ -1605,6 +1614,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
                                                rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
                                                        power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
                                        }
+                                       rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+                                       rdev->pm.power_state[state_index].misc = misc;
+                                       rdev->pm.power_state[state_index].misc2 = misc2;
                                        /* order matters! */
                                        if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
                                                rdev->pm.power_state[state_index].type =
@@ -1618,18 +1630,29 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
                                        if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
                                                rdev->pm.power_state[state_index].type =
                                                        POWER_STATE_TYPE_BALANCED;
-                                       if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
+                                       if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
                                                rdev->pm.power_state[state_index].type =
                                                        POWER_STATE_TYPE_PERFORMANCE;
+                                               rdev->pm.power_state[state_index].flags &=
+                                                       ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+                                       }
                                        if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
                                                rdev->pm.power_state[state_index].type =
                                                        POWER_STATE_TYPE_BALANCED;
+                                       if (misc2 & ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT)
+                                               rdev->pm.power_state[state_index].flags &=
+                                                       ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
                                        if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
                                                rdev->pm.power_state[state_index].type =
                                                        POWER_STATE_TYPE_DEFAULT;
-                                               rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+                                               rdev->pm.default_power_state_index = state_index;
                                                rdev->pm.power_state[state_index].default_clock_mode =
                                                        &rdev->pm.power_state[state_index].clock_info[0];
+                                               rdev->pm.power_state[state_index].flags &=
+                                                       ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+                                       } else if (state_index == 0) {
+                                               rdev->pm.power_state[state_index].clock_info[0].flags |=
+                                                       RADEON_PM_MODE_NO_DISPLAY;
                                        }
                                        state_index++;
                                        break;
@@ -1643,13 +1666,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
                                        if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
                                            (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
                                                continue;
-                                       /* skip overclock modes for now */
-                                       if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
-                                            rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
-                                           (rdev->pm.power_state[state_index].clock_info[0].sclk >
-                                            rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
-                                               continue;
-                                       rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+                                       rdev->pm.power_state[state_index].pcie_lanes =
                                                power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
                                        misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
                                        misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
@@ -1677,6 +1694,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
                                                        power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
                                                }
                                        }
+                                       rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+                                       rdev->pm.power_state[state_index].misc = misc;
+                                       rdev->pm.power_state[state_index].misc2 = misc2;
                                        /* order matters! */
                                        if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
                                                rdev->pm.power_state[state_index].type =
@@ -1690,42 +1710,76 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
                                        if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
                                                rdev->pm.power_state[state_index].type =
                                                        POWER_STATE_TYPE_BALANCED;
-                                       if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
+                                       if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
                                                rdev->pm.power_state[state_index].type =
                                                        POWER_STATE_TYPE_PERFORMANCE;
+                                               rdev->pm.power_state[state_index].flags &=
+                                                       ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+                                       }
                                        if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
                                                rdev->pm.power_state[state_index].type =
                                                        POWER_STATE_TYPE_BALANCED;
                                        if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
                                                rdev->pm.power_state[state_index].type =
                                                        POWER_STATE_TYPE_DEFAULT;
-                                               rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+                                               rdev->pm.default_power_state_index = state_index;
                                                rdev->pm.power_state[state_index].default_clock_mode =
                                                        &rdev->pm.power_state[state_index].clock_info[0];
+                                       } else if (state_index == 0) {
+                                               rdev->pm.power_state[state_index].clock_info[0].flags |=
+                                                       RADEON_PM_MODE_NO_DISPLAY;
                                        }
                                        state_index++;
                                        break;
                                }
                        }
-               } else if (frev == 4) {
+                       /* last mode is usually default */
+                       if (rdev->pm.default_power_state_index == -1) {
+                               rdev->pm.power_state[state_index - 1].type =
+                                       POWER_STATE_TYPE_DEFAULT;
+                               rdev->pm.default_power_state_index = state_index - 1;
+                               rdev->pm.power_state[state_index - 1].default_clock_mode =
+                                       &rdev->pm.power_state[state_index - 1].clock_info[0];
+                               rdev->pm.power_state[state_index].flags &=
+                                       ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+                               rdev->pm.power_state[state_index].misc = 0;
+                               rdev->pm.power_state[state_index].misc2 = 0;
+                       }
+               } else {
                        /* add the i2c bus for thermal/fan chip */
                        /* no support for internal controller yet */
-                       if (power_info->info_4.sThermalController.ucType > 0) {
-                               if ((power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
-                                   (power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV770)) {
+                       ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController;
+                       if (controller->ucType > 0) {
+                               if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
+                                   (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) ||
+                                   (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN)) {
                                        DRM_INFO("Internal thermal controller %s fan control\n",
-                                                (power_info->info_4.sThermalController.ucFanParameters &
+                                                (controller->ucFanParameters &
                                                  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+                               } else if ((controller->ucType ==
+                                           ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
+                                          (controller->ucType ==
+                                           ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) {
+                                       DRM_INFO("Special thermal controller config\n");
                                } else {
                                        DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
-                                                pp_lib_thermal_controller_names[power_info->info_4.sThermalController.ucType],
-                                                power_info->info_4.sThermalController.ucI2cAddress >> 1,
-                                                (power_info->info_4.sThermalController.ucFanParameters &
+                                                pp_lib_thermal_controller_names[controller->ucType],
+                                                controller->ucI2cAddress >> 1,
+                                                (controller->ucFanParameters &
                                                  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
-                                       i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info_4.sThermalController.ucI2cLine);
+                                       i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
                                        rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
+                                       if (rdev->pm.i2c_bus) {
+                                               struct i2c_board_info info = { };
+                                               const char *name = pp_lib_thermal_controller_names[controller->ucType];
+                                               info.addr = controller->ucI2cAddress >> 1;
+                                               strlcpy(info.type, name, sizeof(info.type));
+                                               i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+                                       }
+
                                }
                        }
+                       /* first mode is usually default, followed by low to high */
                        for (i = 0; i < power_info->info_4.ucNumStates; i++) {
                                mode_index = 0;
                                power_state = (struct _ATOM_PPLIB_STATE *)
@@ -1754,14 +1808,34 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
                                                /* skip invalid modes */
                                                if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
                                                        continue;
-                                               /* skip overclock modes for now */
-                                               if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
-                                                   rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)
+                                               rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+                                                       VOLTAGE_SW;
+                                               rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+                                                       clock_info->usVDDC;
+                                               mode_index++;
+                                       } else if (ASIC_IS_DCE4(rdev)) {
+                                               struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info =
+                                                       (struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *)
+                                                       (mode_info->atom_context->bios +
+                                                        data_offset +
+                                                        le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
+                                                        (power_state->ucClockStateIndices[j] *
+                                                         power_info->info_4.ucClockInfoSize));
+                                               sclk = le16_to_cpu(clock_info->usEngineClockLow);
+                                               sclk |= clock_info->ucEngineClockHigh << 16;
+                                               mclk = le16_to_cpu(clock_info->usMemoryClockLow);
+                                               mclk |= clock_info->ucMemoryClockHigh << 16;
+                                               rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+                                               rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+                                               /* skip invalid modes */
+                                               if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
+                                                   (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
                                                        continue;
                                                rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
                                                        VOLTAGE_SW;
                                                rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
                                                        clock_info->usVDDC;
+                                               /* XXX usVDDCI */
                                                mode_index++;
                                        } else {
                                                struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info =
@@ -1781,12 +1855,6 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
                                                if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
                                                    (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
                                                        continue;
-                                               /* skip overclock modes for now */
-                                               if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk >
-                                                    rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
-                                                   (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
-                                                    rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
-                                                       continue;
                                                rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
                                                        VOLTAGE_SW;
                                                rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
@@ -1798,7 +1866,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
                                if (mode_index) {
                                        misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
                                        misc2 = le16_to_cpu(non_clock_info->usClassification);
-                                       rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+                                       rdev->pm.power_state[state_index].misc = misc;
+                                       rdev->pm.power_state[state_index].misc2 = misc2;
+                                       rdev->pm.power_state[state_index].pcie_lanes =
                                                ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
                                                ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
                                        switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
@@ -1815,22 +1885,36 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
                                                        POWER_STATE_TYPE_PERFORMANCE;
                                                break;
                                        }
+                                       rdev->pm.power_state[state_index].flags = 0;
+                                       if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
+                                               rdev->pm.power_state[state_index].flags |=
+                                                       RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
                                        if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
                                                rdev->pm.power_state[state_index].type =
                                                        POWER_STATE_TYPE_DEFAULT;
-                                               rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+                                               rdev->pm.default_power_state_index = state_index;
                                                rdev->pm.power_state[state_index].default_clock_mode =
                                                        &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
                                        }
                                        state_index++;
                                }
                        }
+                       /* if multiple clock modes, mark the lowest as no display */
+                       for (i = 0; i < state_index; i++) {
+                               if (rdev->pm.power_state[i].num_clock_modes > 1)
+                                       rdev->pm.power_state[i].clock_info[0].flags |=
+                                               RADEON_PM_MODE_NO_DISPLAY;
+                       }
+                       /* first mode is usually default */
+                       if (rdev->pm.default_power_state_index == -1) {
+                               rdev->pm.power_state[0].type =
+                                       POWER_STATE_TYPE_DEFAULT;
+                               rdev->pm.default_power_state_index = 0;
+                               rdev->pm.power_state[0].default_clock_mode =
+                                       &rdev->pm.power_state[0].clock_info[0];
+                       }
                }
        } else {
-               /* XXX figure out some good default low power mode for cards w/out power tables */
-       }
-
-       if (rdev->pm.default_power_state == NULL) {
                /* add the default mode */
                rdev->pm.power_state[state_index].type =
                        POWER_STATE_TYPE_DEFAULT;
@@ -1840,18 +1924,16 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
                rdev->pm.power_state[state_index].default_clock_mode =
                        &rdev->pm.power_state[state_index].clock_info[0];
                rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
-               if (rdev->asic->get_pcie_lanes)
-                       rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev);
-               else
-                       rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
-               rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+               rdev->pm.power_state[state_index].pcie_lanes = 16;
+               rdev->pm.default_power_state_index = state_index;
+               rdev->pm.power_state[state_index].flags = 0;
                state_index++;
        }
+
        rdev->pm.num_power_states = state_index;
 
-       rdev->pm.current_power_state = rdev->pm.default_power_state;
-       rdev->pm.current_clock_mode =
-               rdev->pm.default_power_state->default_clock_mode;
+       rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+       rdev->pm.current_clock_mode_index = 0;
 }
 
 void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
index 8ad71f7..fbba938 100644 (file)
@@ -85,12 +85,11 @@ static bool radeon_read_bios(struct radeon_device *rdev)
                pci_unmap_rom(rdev->pdev, bios);
                return false;
        }
-       rdev->bios = kmalloc(size, GFP_KERNEL);
+       rdev->bios = kmemdup(bios, size, GFP_KERNEL);
        if (rdev->bios == NULL) {
                pci_unmap_rom(rdev->pdev, bios);
                return false;
        }
-       memcpy(rdev->bios, bios, size);
        pci_unmap_rom(rdev->pdev, bios);
        return true;
 }
index 37db8ad..7b5e10d 100644 (file)
@@ -450,17 +450,17 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
 {
        int edid_info;
        struct edid *edid;
+       unsigned char *raw;
        edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
        if (!edid_info)
                return false;
 
-       edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
-                      GFP_KERNEL);
+       raw = rdev->bios + edid_info;
+       edid = kmalloc(EDID_LENGTH * (raw[0x7e] + 1), GFP_KERNEL);
        if (edid == NULL)
                return false;
 
-       memcpy((unsigned char *)edid,
-              (unsigned char *)(rdev->bios + edid_info), EDID_LENGTH);
+       memcpy((unsigned char *)edid, raw, EDID_LENGTH * (raw[0x7e] + 1));
 
        if (!drm_edid_is_valid(edid)) {
                kfree(edid);
@@ -600,7 +600,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
        }
        i2c.mm_i2c = false;
        i2c.i2c_id = 0;
-       i2c.hpd_id = 0;
+       i2c.hpd = RADEON_HPD_NONE;
 
        if (ddc_line)
                i2c.valid = true;
@@ -1113,18 +1113,20 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
                                break;
 
                        if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
-                           (RBIOS16(tmp + 2) ==
-                            lvds->native_mode.vdisplay)) {
-                               lvds->native_mode.htotal = RBIOS16(tmp + 17) * 8;
-                               lvds->native_mode.hsync_start = RBIOS16(tmp + 21) * 8;
-                               lvds->native_mode.hsync_end = (RBIOS8(tmp + 23) +
-                                                              RBIOS16(tmp + 21)) * 8;
-
-                               lvds->native_mode.vtotal = RBIOS16(tmp + 24);
-                               lvds->native_mode.vsync_start = RBIOS16(tmp + 28) & 0x7ff;
-                               lvds->native_mode.vsync_end =
-                                       ((RBIOS16(tmp + 28) & 0xf800) >> 11) +
-                                       (RBIOS16(tmp + 28) & 0x7ff);
+                           (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
+                               lvds->native_mode.htotal = lvds->native_mode.hdisplay +
+                                       (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
+                               lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
+                                       (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+                               lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
+                                       (RBIOS8(tmp + 23) * 8);
+
+                               lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
+                                       (RBIOS16(tmp + 24) - RBIOS16(tmp + 26));
+                               lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
+                                       ((RBIOS16(tmp + 28) & 0x7ff) - RBIOS16(tmp + 26));
+                               lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
+                                       ((RBIOS16(tmp + 28) & 0xf800) >> 11);
 
                                lvds->native_mode.clock = RBIOS16(tmp + 9) * 10;
                                lvds->native_mode.flags = 0;
@@ -2196,7 +2198,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                                                  ATOM_DEVICE_DFP1_SUPPORT);
 
                        ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
-                       hpd.hpd = RADEON_HPD_NONE;
+                       hpd.hpd = RADEON_HPD_1;
                        radeon_add_legacy_connector(dev,
                                                    0,
                                                    ATOM_DEVICE_CRT1_SUPPORT |
@@ -2366,7 +2368,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
        u8 rev, blocks, tmp;
        int state_index = 0;
 
-       rdev->pm.default_power_state = NULL;
+       rdev->pm.default_power_state_index = -1;
 
        if (rdev->flags & RADEON_IS_MOBILITY) {
                offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
@@ -2380,17 +2382,13 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
                        if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
                            (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
                                goto default_mode;
-                       /* skip overclock modes for now */
-                       if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
-                            rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
-                           (rdev->pm.power_state[state_index].clock_info[0].sclk >
-                            rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
-                               goto default_mode;
                        rdev->pm.power_state[state_index].type =
                                POWER_STATE_TYPE_BATTERY;
                        misc = RBIOS16(offset + 0x5 + 0x0);
                        if (rev > 4)
                                misc2 = RBIOS16(offset + 0x5 + 0xe);
+                       rdev->pm.power_state[state_index].misc = misc;
+                       rdev->pm.power_state[state_index].misc2 = misc2;
                        if (misc & 0x4) {
                                rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO;
                                if (misc & 0x8)
@@ -2437,8 +2435,9 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
                        } else
                                rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
                        if (rev > 6)
-                               rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+                               rdev->pm.power_state[state_index].pcie_lanes =
                                        RBIOS8(offset + 0x5 + 0x10);
+                       rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
                        state_index++;
                } else {
                        /* XXX figure out some good default low power mode for mobility cards w/out power tables */
@@ -2456,16 +2455,13 @@ default_mode:
        rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
        rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0];
        rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
-       if (rdev->asic->get_pcie_lanes)
-               rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev);
-       else
-               rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
-       rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+       rdev->pm.power_state[state_index].pcie_lanes = 16;
+       rdev->pm.power_state[state_index].flags = 0;
+       rdev->pm.default_power_state_index = state_index;
        rdev->pm.num_power_states = state_index + 1;
 
-       rdev->pm.current_power_state = rdev->pm.default_power_state;
-       rdev->pm.current_clock_mode =
-               rdev->pm.default_power_state->default_clock_mode;
+       rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+       rdev->pm.current_clock_mode_index = 0;
 }
 
 void radeon_external_tmds_setup(struct drm_encoder *encoder)
index 4559a53..0c7ccc6 100644 (file)
@@ -1041,7 +1041,6 @@ radeon_add_atom_connector(struct drm_device *dev,
        struct radeon_connector_atom_dig *radeon_dig_connector;
        uint32_t subpixel_order = SubPixelNone;
        bool shared_ddc = false;
-       int ret;
 
        /* fixme - tv/cv/din */
        if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -1076,9 +1075,7 @@ radeon_add_atom_connector(struct drm_device *dev,
        switch (connector_type) {
        case DRM_MODE_CONNECTOR_VGA:
                drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
-               ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
-               if (ret)
-                       goto failed;
+               drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
                        if (!radeon_connector->ddc_bus)
@@ -1088,12 +1085,11 @@ radeon_add_atom_connector(struct drm_device *dev,
                drm_connector_attach_property(&radeon_connector->base,
                                              rdev->mode_info.load_detect_property,
                                              1);
+               connector->polled = DRM_CONNECTOR_POLL_CONNECT;
                break;
        case DRM_MODE_CONNECTOR_DVIA:
                drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
-               ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
-               if (ret)
-                       goto failed;
+               drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
                        if (!radeon_connector->ddc_bus)
@@ -1113,9 +1109,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                radeon_dig_connector->igp_lane_info = igp_lane_info;
                radeon_connector->con_priv = radeon_dig_connector;
                drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
-               ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
-               if (ret)
-                       goto failed;
+               drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
                        if (!radeon_connector->ddc_bus)
@@ -1141,9 +1135,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                radeon_dig_connector->igp_lane_info = igp_lane_info;
                radeon_connector->con_priv = radeon_dig_connector;
                drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
-               ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
-               if (ret)
-                       goto failed;
+               drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI");
                        if (!radeon_connector->ddc_bus)
@@ -1163,9 +1155,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                radeon_dig_connector->igp_lane_info = igp_lane_info;
                radeon_connector->con_priv = radeon_dig_connector;
                drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
-               ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
-               if (ret)
-                       goto failed;
+               drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
                if (i2c_bus->valid) {
                        /* add DP i2c bus */
                        if (connector_type == DRM_MODE_CONNECTOR_eDP)
@@ -1191,9 +1181,7 @@ radeon_add_atom_connector(struct drm_device *dev,
        case DRM_MODE_CONNECTOR_9PinDIN:
                if (radeon_tv == 1) {
                        drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
-                       ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
-                       if (ret)
-                               goto failed;
+                       drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
                        radeon_connector->dac_load_detect = true;
                        drm_connector_attach_property(&radeon_connector->base,
                                                      rdev->mode_info.load_detect_property,
@@ -1211,9 +1199,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                radeon_dig_connector->igp_lane_info = igp_lane_info;
                radeon_connector->con_priv = radeon_dig_connector;
                drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
-               ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
-               if (ret)
-                       goto failed;
+               drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
                        if (!radeon_connector->ddc_bus)
@@ -1226,6 +1212,12 @@ radeon_add_atom_connector(struct drm_device *dev,
                break;
        }
 
+       if (hpd->hpd == RADEON_HPD_NONE) {
+               if (i2c_bus->valid)
+                       connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+       } else
+               connector->polled = DRM_CONNECTOR_POLL_HPD;
+
        connector->display_info.subpixel_order = subpixel_order;
        drm_sysfs_connector_add(connector);
        return;
@@ -1250,7 +1242,6 @@ radeon_add_legacy_connector(struct drm_device *dev,
        struct drm_connector *connector;
        struct radeon_connector *radeon_connector;
        uint32_t subpixel_order = SubPixelNone;
-       int ret;
 
        /* fixme - tv/cv/din */
        if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -1278,9 +1269,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
        switch (connector_type) {
        case DRM_MODE_CONNECTOR_VGA:
                drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
-               ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
-               if (ret)
-                       goto failed;
+               drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
                        if (!radeon_connector->ddc_bus)
@@ -1290,12 +1279,11 @@ radeon_add_legacy_connector(struct drm_device *dev,
                drm_connector_attach_property(&radeon_connector->base,
                                              rdev->mode_info.load_detect_property,
                                              1);
+               connector->polled = DRM_CONNECTOR_POLL_CONNECT;
                break;
        case DRM_MODE_CONNECTOR_DVIA:
                drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
-               ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
-               if (ret)
-                       goto failed;
+               drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
                        if (!radeon_connector->ddc_bus)
@@ -1309,9 +1297,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
        case DRM_MODE_CONNECTOR_DVII:
        case DRM_MODE_CONNECTOR_DVID:
                drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
-               ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
-               if (ret)
-                       goto failed;
+               drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
                        if (!radeon_connector->ddc_bus)
@@ -1330,9 +1316,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
        case DRM_MODE_CONNECTOR_9PinDIN:
                if (radeon_tv == 1) {
                        drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
-                       ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
-                       if (ret)
-                               goto failed;
+                       drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
                        radeon_connector->dac_load_detect = true;
                        /* RS400,RC410,RS480 chipset seems to report a lot
                         * of false positive on load detect, we haven't yet
@@ -1351,9 +1335,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
                break;
        case DRM_MODE_CONNECTOR_LVDS:
                drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
-               ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
-               if (ret)
-                       goto failed;
+               drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
                        if (!radeon_connector->ddc_bus)
@@ -1366,6 +1348,11 @@ radeon_add_legacy_connector(struct drm_device *dev,
                break;
        }
 
+       if (hpd->hpd == RADEON_HPD_NONE) {
+               if (i2c_bus->valid)
+                       connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+       } else
+               connector->polled = DRM_CONNECTOR_POLL_HPD;
        connector->display_info.subpixel_order = subpixel_order;
        drm_sysfs_connector_add(connector);
        return;
index f9b0fe0..ae0fb73 100644 (file)
@@ -220,10 +220,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
        int r;
 
        mutex_lock(&rdev->cs_mutex);
-       if (rdev->gpu_lockup) {
-               mutex_unlock(&rdev->cs_mutex);
-               return -EINVAL;
-       }
        /* initialize parser */
        memset(&parser, 0, sizeof(struct radeon_cs_parser));
        parser.filp = filp;
index 7b629e3..a20b612 100644 (file)
@@ -299,24 +299,24 @@ void radeon_update_bandwidth_info(struct radeon_device *rdev)
                sclk = radeon_get_engine_clock(rdev);
                mclk = rdev->clock.default_mclk;
 
-               a.full = rfixed_const(100);
-               rdev->pm.sclk.full = rfixed_const(sclk);
-               rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
-               rdev->pm.mclk.full = rfixed_const(mclk);
-               rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
+               a.full = dfixed_const(100);
+               rdev->pm.sclk.full = dfixed_const(sclk);
+               rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
+               rdev->pm.mclk.full = dfixed_const(mclk);
+               rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
 
-               a.full = rfixed_const(16);
+               a.full = dfixed_const(16);
                /* core_bandwidth = sclk(Mhz) * 16 */
-               rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
+               rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
        } else {
                sclk = radeon_get_engine_clock(rdev);
                mclk = radeon_get_memory_clock(rdev);
 
-               a.full = rfixed_const(100);
-               rdev->pm.sclk.full = rfixed_const(sclk);
-               rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
-               rdev->pm.mclk.full = rfixed_const(mclk);
-               rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
+               a.full = dfixed_const(100);
+               rdev->pm.sclk.full = dfixed_const(sclk);
+               rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
+               rdev->pm.mclk.full = dfixed_const(mclk);
+               rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
        }
 }
 
@@ -599,9 +599,11 @@ int radeon_device_init(struct radeon_device *rdev,
                spin_lock_init(&rdev->ih.lock);
        mutex_init(&rdev->gem.mutex);
        mutex_init(&rdev->pm.mutex);
+       mutex_init(&rdev->vram_mutex);
        rwlock_init(&rdev->fence_drv.lock);
        INIT_LIST_HEAD(&rdev->gem.objects);
        init_waitqueue_head(&rdev->irq.vblank_queue);
+       init_waitqueue_head(&rdev->irq.idle_queue);
 
        /* setup workqueue */
        rdev->wq = create_workqueue("radeon");
@@ -671,7 +673,7 @@ int radeon_device_init(struct radeon_device *rdev,
                /* Acceleration not working on AGP card try again
                 * with fallback to PCI or PCIE GART
                 */
-               radeon_gpu_reset(rdev);
+               radeon_asic_reset(rdev);
                radeon_fini(rdev);
                radeon_agp_disable(rdev);
                r = radeon_init(rdev);
@@ -691,6 +693,8 @@ void radeon_device_fini(struct radeon_device *rdev)
 {
        DRM_INFO("radeon: finishing device.\n");
        rdev->shutdown = true;
+       /* evict vram memory */
+       radeon_bo_evict_vram(rdev);
        radeon_fini(rdev);
        destroy_workqueue(rdev->wq);
        vga_switcheroo_unregister_client(rdev->pdev);
@@ -728,9 +732,10 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
                        continue;
                }
                robj = rfb->obj->driver_private;
-               if (robj != rdev->fbdev_rbo) {
+               /* don't unpin kernel fb objects */
+               if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
                        r = radeon_bo_reserve(robj, false);
-                       if (unlikely(r == 0)) {
+                       if (r == 0) {
                                radeon_bo_unpin(robj);
                                radeon_bo_unreserve(robj);
                        }
@@ -743,6 +748,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
 
        radeon_save_bios_scratch_regs(rdev);
 
+       radeon_pm_suspend(rdev);
        radeon_suspend(rdev);
        radeon_hpd_fini(rdev);
        /* evict remaining vram memory */
@@ -755,7 +761,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
                pci_set_power_state(dev->pdev, PCI_D3hot);
        }
        acquire_console_sem();
-       fb_set_suspend(rdev->fbdev_info, 1);
+       radeon_fbdev_set_suspend(rdev, 1);
        release_console_sem();
        return 0;
 }
@@ -778,8 +784,9 @@ int radeon_resume_kms(struct drm_device *dev)
        /* resume AGP if in use */
        radeon_agp_resume(rdev);
        radeon_resume(rdev);
+       radeon_pm_resume(rdev);
        radeon_restore_bios_scratch_regs(rdev);
-       fb_set_suspend(rdev->fbdev_info, 0);
+       radeon_fbdev_set_suspend(rdev, 0);
        release_console_sem();
 
        /* reset hpd state */
@@ -789,6 +796,26 @@ int radeon_resume_kms(struct drm_device *dev)
        return 0;
 }
 
+int radeon_gpu_reset(struct radeon_device *rdev)
+{
+       int r;
+
+       radeon_save_bios_scratch_regs(rdev);
+       radeon_suspend(rdev);
+
+       r = radeon_asic_reset(rdev);
+       if (!r) {
+               dev_info(rdev->dev, "GPU reset succeed\n");
+               radeon_resume(rdev);
+               radeon_restore_bios_scratch_regs(rdev);
+               drm_helper_resume_force_mode(rdev->ddev);
+               return 0;
+       }
+       /* bad news, how to tell it to userspace ? */
+       dev_info(rdev->dev, "GPU reset failed\n");
+       return r;
+}
+
 
 /*
  * Debugfs
index bb1c122..1006549 100644 (file)
@@ -633,37 +633,37 @@ calc_fb_div(struct radeon_pll *pll,
 
        vco_freq = freq * post_div;
        /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */
-       a.full = rfixed_const(pll->reference_freq);
-       feedback_divider.full = rfixed_const(vco_freq);
-       feedback_divider.full = rfixed_div(feedback_divider, a);
-       a.full = rfixed_const(ref_div);
-       feedback_divider.full = rfixed_mul(feedback_divider, a);
+       a.full = dfixed_const(pll->reference_freq);
+       feedback_divider.full = dfixed_const(vco_freq);
+       feedback_divider.full = dfixed_div(feedback_divider, a);
+       a.full = dfixed_const(ref_div);
+       feedback_divider.full = dfixed_mul(feedback_divider, a);
 
        if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
                /* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */
-               a.full = rfixed_const(10);
-               feedback_divider.full = rfixed_mul(feedback_divider, a);
-               feedback_divider.full += rfixed_const_half(0);
-               feedback_divider.full = rfixed_floor(feedback_divider);
-               feedback_divider.full = rfixed_div(feedback_divider, a);
+               a.full = dfixed_const(10);
+               feedback_divider.full = dfixed_mul(feedback_divider, a);
+               feedback_divider.full += dfixed_const_half(0);
+               feedback_divider.full = dfixed_floor(feedback_divider);
+               feedback_divider.full = dfixed_div(feedback_divider, a);
 
                /* *fb_div = floor(feedback_divider); */
-               a.full = rfixed_floor(feedback_divider);
-               *fb_div = rfixed_trunc(a);
+               a.full = dfixed_floor(feedback_divider);
+               *fb_div = dfixed_trunc(a);
                /* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */
-               a.full = rfixed_const(10);
-               b.full = rfixed_mul(feedback_divider, a);
+               a.full = dfixed_const(10);
+               b.full = dfixed_mul(feedback_divider, a);
 
-               feedback_divider.full = rfixed_floor(feedback_divider);
-               feedback_divider.full = rfixed_mul(feedback_divider, a);
+               feedback_divider.full = dfixed_floor(feedback_divider);
+               feedback_divider.full = dfixed_mul(feedback_divider, a);
                feedback_divider.full = b.full - feedback_divider.full;
-               *fb_div_frac = rfixed_trunc(feedback_divider);
+               *fb_div_frac = dfixed_trunc(feedback_divider);
        } else {
                /* *fb_div = floor(feedback_divider + 0.5); */
-               feedback_divider.full += rfixed_const_half(0);
-               feedback_divider.full = rfixed_floor(feedback_divider);
+               feedback_divider.full += dfixed_const_half(0);
+               feedback_divider.full = dfixed_floor(feedback_divider);
 
-               *fb_div = rfixed_trunc(feedback_divider);
+               *fb_div = dfixed_trunc(feedback_divider);
                *fb_div_frac = 0;
        }
 
@@ -693,10 +693,10 @@ calc_fb_ref_div(struct radeon_pll *pll,
                pll_out_max = pll->pll_out_max;
        }
 
-       ffreq.full = rfixed_const(freq);
+       ffreq.full = dfixed_const(freq);
        /* max_error = ffreq * 0.0025; */
-       a.full = rfixed_const(400);
-       max_error.full = rfixed_div(ffreq, a);
+       a.full = dfixed_const(400);
+       max_error.full = dfixed_div(ffreq, a);
 
        for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) {
                if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) {
@@ -707,9 +707,9 @@ calc_fb_ref_div(struct radeon_pll *pll,
                                continue;
 
                        /* pll_out = vco / post_div; */
-                       a.full = rfixed_const(post_div);
-                       pll_out.full = rfixed_const(vco);
-                       pll_out.full = rfixed_div(pll_out, a);
+                       a.full = dfixed_const(post_div);
+                       pll_out.full = dfixed_const(vco);
+                       pll_out.full = dfixed_div(pll_out, a);
 
                        if (pll_out.full >= ffreq.full) {
                                error.full = pll_out.full - ffreq.full;
@@ -831,10 +831,6 @@ void radeon_compute_pll(struct radeon_pll *pll,
 static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
        struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
-       struct drm_device *dev = fb->dev;
-
-       if (fb->fbdev)
-               radeonfb_remove(dev, fb);
 
        if (radeon_fb->obj)
                drm_gem_object_unreference_unlocked(radeon_fb->obj);
@@ -856,21 +852,15 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
        .create_handle = radeon_user_framebuffer_create_handle,
 };
 
-struct drm_framebuffer *
-radeon_framebuffer_create(struct drm_device *dev,
-                         struct drm_mode_fb_cmd *mode_cmd,
-                         struct drm_gem_object *obj)
+void
+radeon_framebuffer_init(struct drm_device *dev,
+                       struct radeon_framebuffer *rfb,
+                       struct drm_mode_fb_cmd *mode_cmd,
+                       struct drm_gem_object *obj)
 {
-       struct radeon_framebuffer *radeon_fb;
-
-       radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
-       if (radeon_fb == NULL) {
-               return NULL;
-       }
-       drm_framebuffer_init(dev, &radeon_fb->base, &radeon_fb_funcs);
-       drm_helper_mode_fill_fb_struct(&radeon_fb->base, mode_cmd);
-       radeon_fb->obj = obj;
-       return &radeon_fb->base;
+       rfb->obj = obj;
+       drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
+       drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
 }
 
 static struct drm_framebuffer *
@@ -879,6 +869,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
                               struct drm_mode_fb_cmd *mode_cmd)
 {
        struct drm_gem_object *obj;
+       struct radeon_framebuffer *radeon_fb;
 
        obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
        if (obj ==  NULL) {
@@ -886,12 +877,26 @@ radeon_user_framebuffer_create(struct drm_device *dev,
                        "can't create framebuffer\n", mode_cmd->handle);
                return NULL;
        }
-       return radeon_framebuffer_create(dev, mode_cmd, obj);
+
+       radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
+       if (radeon_fb == NULL) {
+               return NULL;
+       }
+
+       radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
+
+       return &radeon_fb->base;
+}
+
+static void radeon_output_poll_changed(struct drm_device *dev)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       radeon_fb_output_poll_changed(rdev);
 }
 
 static const struct drm_mode_config_funcs radeon_mode_funcs = {
        .fb_create = radeon_user_framebuffer_create,
-       .fb_changed = radeonfb_probe,
+       .output_poll_changed = radeon_output_poll_changed
 };
 
 struct drm_prop_enum_list {
@@ -978,8 +983,11 @@ void radeon_update_display_priority(struct radeon_device *rdev)
                /* set display priority to high for r3xx, rv515 chips
                 * this avoids flickering due to underflow to the
                 * display controllers during heavy acceleration.
+                * Don't force high on rs4xx igp chips as it seems to
+                * affect the sound card.  See kernel bug 15982.
                 */
-               if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515))
+               if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) &&
+                   !(rdev->flags & RADEON_IS_IGP))
                        rdev->disp_priority = 2;
                else
                        rdev->disp_priority = 0;
@@ -1031,15 +1039,24 @@ int radeon_modeset_init(struct radeon_device *rdev)
        }
        /* initialize hpd */
        radeon_hpd_init(rdev);
-       drm_helper_initial_config(rdev->ddev);
+
+       /* Initialize power management */
+       radeon_pm_init(rdev);
+
+       radeon_fbdev_init(rdev);
+       drm_kms_helper_poll_init(rdev->ddev);
+
        return 0;
 }
 
 void radeon_modeset_fini(struct radeon_device *rdev)
 {
+       radeon_fbdev_fini(rdev);
        kfree(rdev->mode_info.bios_hardcoded_edid);
+       radeon_pm_fini(rdev);
 
        if (rdev->mode_info.mode_config_initialized) {
+               drm_kms_helper_poll_fini(rdev->ddev);
                radeon_hpd_fini(rdev);
                drm_mode_config_cleanup(rdev->ddev);
                rdev->mode_info.mode_config_initialized = false;
@@ -1089,15 +1106,15 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
        }
        if (radeon_crtc->rmx_type != RMX_OFF) {
                fixed20_12 a, b;
-               a.full = rfixed_const(crtc->mode.vdisplay);
-               b.full = rfixed_const(radeon_crtc->native_mode.hdisplay);
-               radeon_crtc->vsc.full = rfixed_div(a, b);
-               a.full = rfixed_const(crtc->mode.hdisplay);
-               b.full = rfixed_const(radeon_crtc->native_mode.vdisplay);
-               radeon_crtc->hsc.full = rfixed_div(a, b);
+               a.full = dfixed_const(crtc->mode.vdisplay);
+               b.full = dfixed_const(radeon_crtc->native_mode.hdisplay);
+               radeon_crtc->vsc.full = dfixed_div(a, b);
+               a.full = dfixed_const(crtc->mode.hdisplay);
+               b.full = dfixed_const(radeon_crtc->native_mode.vdisplay);
+               radeon_crtc->hsc.full = dfixed_div(a, b);
        } else {
-               radeon_crtc->vsc.full = rfixed_const(1);
-               radeon_crtc->hsc.full = rfixed_const(1);
+               radeon_crtc->vsc.full = dfixed_const(1);
+               radeon_crtc->hsc.full = dfixed_const(1);
        }
        return true;
 }
index b3749d4..902d173 100644 (file)
  * - 2.1.0 - add square tiling interface
  * - 2.2.0 - add r6xx/r7xx const buffer support
  * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs
+ * - 2.4.0 - add crtc id query
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       3
+#define KMS_DRIVER_MINOR       4
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
@@ -91,7 +92,6 @@ int radeon_testing = 0;
 int radeon_connector_table = 0;
 int radeon_tv = 1;
 int radeon_new_pll = -1;
-int radeon_dynpm = -1;
 int radeon_audio = 1;
 int radeon_disp_priority = 0;
 int radeon_hw_i2c = 0;
@@ -132,9 +132,6 @@ module_param_named(tv, radeon_tv, int, 0444);
 MODULE_PARM_DESC(new_pll, "Select new PLL code");
 module_param_named(new_pll, radeon_new_pll, int, 0444);
 
-MODULE_PARM_DESC(dynpm, "Disable/Enable dynamic power management (1 = enable)");
-module_param_named(dynpm, radeon_dynpm, int, 0444);
-
 MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
 module_param_named(audio, radeon_audio, int, 0444);
 
index c5ddaf5..1ebb100 100644 (file)
@@ -309,9 +309,6 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
        struct drm_device *dev = encoder->dev;
        struct radeon_device *rdev = dev->dev_private;
 
-       /* adjust pm to upcoming mode change */
-       radeon_pm_compute_clocks(rdev);
-
        /* set the active encoder to connector routing */
        radeon_encoder_set_active_device(encoder);
        drm_mode_set_crtcinfo(adjusted_mode, 0);
@@ -1111,8 +1108,6 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
        }
        radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
 
-       /* adjust pm to dpms change */
-       radeon_pm_compute_clocks(rdev);
 }
 
 union crtc_source_param {
@@ -1546,10 +1541,49 @@ static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
 
 static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
 {
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig;
        radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
 
+       switch (radeon_encoder->encoder_id) {
+       case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+       case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+       case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+               atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE);
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+               if (ASIC_IS_DCE4(rdev))
+                       /* disable the transmitter */
+                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+               else {
+                       /* disable the encoder and transmitter */
+                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+                       atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
+               }
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_DDI:
+               atombios_ddia_setup(encoder, ATOM_DISABLE);
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+               atombios_external_tmds_setup(encoder, ATOM_DISABLE);
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+       case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+               atombios_dac_setup(encoder, ATOM_DISABLE);
+               if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+                       atombios_tv_setup(encoder, ATOM_DISABLE);
+               break;
+       }
+
        if (radeon_encoder_is_digital(encoder)) {
                if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
                        r600_hdmi_disable(encoder);
index 9ac57a0..e192acf 100644 (file)
  * Authors:
  *     David Airlie
  */
-    /*
-     *  Modularization
-     */
-
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/fb.h>
 
 #include <linux/vga_switcheroo.h>
 
-struct radeon_fb_device {
+/* object hierarchy -
+   this contains a helper + a radeon fb
+   the helper contains a pointer to radeon framebuffer baseclass.
+*/
+struct radeon_fbdev {
        struct drm_fb_helper helper;
-       struct radeon_framebuffer       *rfb;
-       struct radeon_device            *rdev;
+       struct radeon_framebuffer rfb;
+       struct list_head fbdev_list;
+       struct radeon_device *rdev;
 };
 
 static struct fb_ops radeonfb_ops = {
        .owner = THIS_MODULE,
        .fb_check_var = drm_fb_helper_check_var,
        .fb_set_par = drm_fb_helper_set_par,
-       .fb_setcolreg = drm_fb_helper_setcolreg,
        .fb_fillrect = cfb_fillrect,
        .fb_copyarea = cfb_copyarea,
        .fb_imageblit = cfb_imageblit,
@@ -61,45 +61,6 @@ static struct fb_ops radeonfb_ops = {
        .fb_setcmap = drm_fb_helper_setcmap,
 };
 
-/**
- * Currently it is assumed that the old framebuffer is reused.
- *
- * LOCKING
- * caller should hold the mode config lock.
- *
- */
-int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
-{
-       struct fb_info *info;
-       struct drm_framebuffer *fb;
-       struct drm_display_mode *mode = crtc->desired_mode;
-
-       fb = crtc->fb;
-       if (fb == NULL) {
-               return 1;
-       }
-       info = fb->fbdev;
-       if (info == NULL) {
-               return 1;
-       }
-       if (mode == NULL) {
-               return 1;
-       }
-       info->var.xres = mode->hdisplay;
-       info->var.right_margin = mode->hsync_start - mode->hdisplay;
-       info->var.hsync_len = mode->hsync_end - mode->hsync_start;
-       info->var.left_margin = mode->htotal - mode->hsync_end;
-       info->var.yres = mode->vdisplay;
-       info->var.lower_margin = mode->vsync_start - mode->vdisplay;
-       info->var.vsync_len = mode->vsync_end - mode->vsync_start;
-       info->var.upper_margin = mode->vtotal - mode->vsync_end;
-       info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
-       /* avoid overflow */
-       info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
-
-       return 0;
-}
-EXPORT_SYMBOL(radeonfb_resize);
 
 static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
 {
@@ -125,57 +86,44 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo
        return aligned;
 }
 
-static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
-       .gamma_set = radeon_crtc_fb_gamma_set,
-       .gamma_get = radeon_crtc_fb_gamma_get,
-};
+static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
+{
+       struct radeon_bo *rbo = gobj->driver_private;
+       int ret;
+
+       ret = radeon_bo_reserve(rbo, false);
+       if (likely(ret == 0)) {
+               radeon_bo_kunmap(rbo);
+               radeon_bo_unreserve(rbo);
+       }
+       drm_gem_object_unreference_unlocked(gobj);
+}
 
-int radeonfb_create(struct drm_device *dev,
-                   uint32_t fb_width, uint32_t fb_height,
-                   uint32_t surface_width, uint32_t surface_height,
-                   uint32_t surface_depth, uint32_t surface_bpp,
-                   struct drm_framebuffer **fb_p)
+static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
+                                        struct drm_mode_fb_cmd *mode_cmd,
+                                        struct drm_gem_object **gobj_p)
 {
-       struct radeon_device *rdev = dev->dev_private;
-       struct fb_info *info;
-       struct radeon_fb_device *rfbdev;
-       struct drm_framebuffer *fb = NULL;
-       struct radeon_framebuffer *rfb;
-       struct drm_mode_fb_cmd mode_cmd;
+       struct radeon_device *rdev = rfbdev->rdev;
        struct drm_gem_object *gobj = NULL;
        struct radeon_bo *rbo = NULL;
-       struct device *device = &rdev->pdev->dev;
-       int size, aligned_size, ret;
-       u64 fb_gpuaddr;
-       void *fbptr = NULL;
-       unsigned long tmp;
        bool fb_tiled = false; /* useful for testing */
        u32 tiling_flags = 0;
+       int ret;
+       int aligned_size, size;
 
-       mode_cmd.width = surface_width;
-       mode_cmd.height = surface_height;
-
-       /* avivo can't scanout real 24bpp */
-       if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
-               surface_bpp = 32;
-
-       mode_cmd.bpp = surface_bpp;
        /* need to align pitch with crtc limits */
-       mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8);
-       mode_cmd.depth = surface_depth;
+       mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
 
-       size = mode_cmd.pitch * mode_cmd.height;
+       size = mode_cmd->pitch * mode_cmd->height;
        aligned_size = ALIGN(size, PAGE_SIZE);
-
        ret = radeon_gem_object_create(rdev, aligned_size, 0,
-                       RADEON_GEM_DOMAIN_VRAM,
-                       false, ttm_bo_type_kernel,
-                       &gobj);
+                                      RADEON_GEM_DOMAIN_VRAM,
+                                      false, ttm_bo_type_kernel,
+                                      &gobj);
        if (ret) {
-               printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
-                      surface_width, surface_height);
-               ret = -ENOMEM;
-               goto out;
+               printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
+                      aligned_size);
+               return -ENOMEM;
        }
        rbo = gobj->driver_private;
 
@@ -183,7 +131,7 @@ int radeonfb_create(struct drm_device *dev,
                tiling_flags = RADEON_TILING_MACRO;
 
 #ifdef __BIG_ENDIAN
-       switch (mode_cmd.bpp) {
+       switch (mode_cmd->bpp) {
        case 32:
                tiling_flags |= RADEON_TILING_SWAP_32BIT;
                break;
@@ -196,57 +144,81 @@ int radeonfb_create(struct drm_device *dev,
 
        if (tiling_flags) {
                ret = radeon_bo_set_tiling_flags(rbo,
-                                       tiling_flags | RADEON_TILING_SURFACE,
-                                       mode_cmd.pitch);
+                                                tiling_flags | RADEON_TILING_SURFACE,
+                                                mode_cmd->pitch);
                if (ret)
                        dev_err(rdev->dev, "FB failed to set tiling flags\n");
        }
-       mutex_lock(&rdev->ddev->struct_mutex);
-       fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
-       if (fb == NULL) {
-               DRM_ERROR("failed to allocate fb.\n");
-               ret = -ENOMEM;
-               goto out_unref;
-       }
+
+
        ret = radeon_bo_reserve(rbo, false);
        if (unlikely(ret != 0))
                goto out_unref;
-       ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
+       ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, NULL);
        if (ret) {
                radeon_bo_unreserve(rbo);
                goto out_unref;
        }
        if (fb_tiled)
                radeon_bo_check_tiling(rbo, 0, 0);
-       ret = radeon_bo_kmap(rbo, &fbptr);
+       ret = radeon_bo_kmap(rbo, NULL);
        radeon_bo_unreserve(rbo);
        if (ret) {
                goto out_unref;
        }
 
-       list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list);
+       *gobj_p = gobj;
+       return 0;
+out_unref:
+       radeonfb_destroy_pinned_object(gobj);
+       *gobj_p = NULL;
+       return ret;
+}
+
+static int radeonfb_create(struct radeon_fbdev *rfbdev,
+                          struct drm_fb_helper_surface_size *sizes)
+{
+       struct radeon_device *rdev = rfbdev->rdev;
+       struct fb_info *info;
+       struct drm_framebuffer *fb = NULL;
+       struct drm_mode_fb_cmd mode_cmd;
+       struct drm_gem_object *gobj = NULL;
+       struct radeon_bo *rbo = NULL;
+       struct device *device = &rdev->pdev->dev;
+       int ret;
+       unsigned long tmp;
+
+       mode_cmd.width = sizes->surface_width;
+       mode_cmd.height = sizes->surface_height;
+
+       /* avivo can't scanout real 24bpp */
+       if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
+               sizes->surface_bpp = 32;
+
+       mode_cmd.bpp = sizes->surface_bpp;
+       mode_cmd.depth = sizes->surface_depth;
 
-       *fb_p = fb;
-       rfb = to_radeon_framebuffer(fb);
-       rdev->fbdev_rfb = rfb;
-       rdev->fbdev_rbo = rbo;
+       ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
+       rbo = gobj->driver_private;
 
-       info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
+       /* okay we have an object now allocate the framebuffer */
+       info = framebuffer_alloc(0, device);
        if (info == NULL) {
                ret = -ENOMEM;
                goto out_unref;
        }
 
-       rdev->fbdev_info = info;
-       rfbdev = info->par;
-       rfbdev->helper.funcs = &radeon_fb_helper_funcs;
-       rfbdev->helper.dev = dev;
-       ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, rdev->num_crtc,
-                                           RADEONFB_CONN_LIMIT);
-       if (ret)
-               goto out_unref;
+       info->par = rfbdev;
+
+       radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
 
-       memset_io(fbptr, 0x0, aligned_size);
+       fb = &rfbdev->rfb.base;
+
+       /* setup helper */
+       rfbdev->helper.fb = fb;
+       rfbdev->helper.fbdev = info;
+
+       memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
 
        strcpy(info->fix.id, "radeondrmfb");
 
@@ -255,17 +227,22 @@ int radeonfb_create(struct drm_device *dev,
        info->flags = FBINFO_DEFAULT;
        info->fbops = &radeonfb_ops;
 
-       tmp = fb_gpuaddr - rdev->mc.vram_start;
+       tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
        info->fix.smem_start = rdev->mc.aper_base + tmp;
-       info->fix.smem_len = size;
-       info->screen_base = fbptr;
-       info->screen_size = size;
+       info->fix.smem_len = radeon_bo_size(rbo);
+       info->screen_base = rbo->kptr;
+       info->screen_size = radeon_bo_size(rbo);
 
-       drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
+       drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
 
        /* setup aperture base/size for vesafb takeover */
-       info->aperture_base = rdev->ddev->mode_config.fb_base;
-       info->aperture_size = rdev->mc.real_vram_size;
+       info->apertures = alloc_apertures(1);
+       if (!info->apertures) {
+               ret = -ENOMEM;
+               goto out_unref;
+       }
+       info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
+       info->apertures->ranges[0].size = rdev->mc.real_vram_size;
 
        info->fix.mmio_start = 0;
        info->fix.mmio_len = 0;
@@ -274,44 +251,55 @@ int radeonfb_create(struct drm_device *dev,
        info->pixmap.access_align = 32;
        info->pixmap.flags = FB_PIXMAP_SYSTEM;
        info->pixmap.scan_align = 1;
+
        if (info->screen_base == NULL) {
                ret = -ENOSPC;
                goto out_unref;
        }
+
+       ret = fb_alloc_cmap(&info->cmap, 256, 0);
+       if (ret) {
+               ret = -ENOMEM;
+               goto out_unref;
+       }
+
        DRM_INFO("fb mappable at 0x%lX\n",  info->fix.smem_start);
        DRM_INFO("vram apper at 0x%lX\n",  (unsigned long)rdev->mc.aper_base);
-       DRM_INFO("size %lu\n", (unsigned long)size);
+       DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
        DRM_INFO("fb depth is %d\n", fb->depth);
        DRM_INFO("   pitch is %d\n", fb->pitch);
 
-       fb->fbdev = info;
-       rfbdev->rfb = rfb;
-       rfbdev->rdev = rdev;
-
-       mutex_unlock(&rdev->ddev->struct_mutex);
        vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
        return 0;
 
 out_unref:
        if (rbo) {
-               ret = radeon_bo_reserve(rbo, false);
-               if (likely(ret == 0)) {
-                       radeon_bo_kunmap(rbo);
-                       radeon_bo_unreserve(rbo);
-               }
+
        }
        if (fb && ret) {
-               list_del(&fb->filp_head);
                drm_gem_object_unreference(gobj);
                drm_framebuffer_cleanup(fb);
                kfree(fb);
        }
-       drm_gem_object_unreference(gobj);
-       mutex_unlock(&rdev->ddev->struct_mutex);
-out:
        return ret;
 }
 
+static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper,
+                                          struct drm_fb_helper_surface_size *sizes)
+{
+       struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
+       int new_fb = 0;
+       int ret;
+
+       if (!helper->fb) {
+               ret = radeonfb_create(rfbdev, sizes);
+               if (ret)
+                       return ret;
+               new_fb = 1;
+       }
+       return new_fb;
+}
+
 static char *mode_option;
 int radeon_parse_options(char *options)
 {
@@ -328,46 +316,102 @@ int radeon_parse_options(char *options)
        return 0;
 }
 
-int radeonfb_probe(struct drm_device *dev)
+void radeon_fb_output_poll_changed(struct radeon_device *rdev)
 {
-       struct radeon_device *rdev = dev->dev_private;
-       int bpp_sel = 32;
-
-       /* select 8 bpp console on RN50 or 16MB cards */
-       if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
-               bpp_sel = 8;
-
-       return drm_fb_helper_single_fb_probe(dev, bpp_sel, &radeonfb_create);
+       drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
 }
 
-int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
 {
        struct fb_info *info;
-       struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
+       struct radeon_framebuffer *rfb = &rfbdev->rfb;
        struct radeon_bo *rbo;
        int r;
 
-       if (!fb) {
-               return -EINVAL;
+       if (rfbdev->helper.fbdev) {
+               info = rfbdev->helper.fbdev;
+
+               unregister_framebuffer(info);
+               if (info->cmap.len)
+                       fb_dealloc_cmap(&info->cmap);
+               framebuffer_release(info);
        }
-       info = fb->fbdev;
-       if (info) {
-               struct radeon_fb_device *rfbdev = info->par;
+
+       if (rfb->obj) {
                rbo = rfb->obj->driver_private;
-               unregister_framebuffer(info);
                r = radeon_bo_reserve(rbo, false);
                if (likely(r == 0)) {
                        radeon_bo_kunmap(rbo);
                        radeon_bo_unpin(rbo);
                        radeon_bo_unreserve(rbo);
                }
-               drm_fb_helper_free(&rfbdev->helper);
-               framebuffer_release(info);
+               drm_gem_object_unreference_unlocked(rfb->obj);
        }
+       drm_fb_helper_fini(&rfbdev->helper);
+       drm_framebuffer_cleanup(&rfb->base);
 
-       printk(KERN_INFO "unregistered panic notifier\n");
+       return 0;
+}
+
+static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
+       .gamma_set = radeon_crtc_fb_gamma_set,
+       .gamma_get = radeon_crtc_fb_gamma_get,
+       .fb_probe = radeon_fb_find_or_create_single,
+};
+
+int radeon_fbdev_init(struct radeon_device *rdev)
+{
+       struct radeon_fbdev *rfbdev;
+       int bpp_sel = 32;
+
+       /* select 8 bpp console on RN50 or 16MB cards */
+       if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
+               bpp_sel = 8;
+
+       rfbdev = kzalloc(sizeof(struct radeon_fbdev), GFP_KERNEL);
+       if (!rfbdev)
+               return -ENOMEM;
+
+       rfbdev->rdev = rdev;
+       rdev->mode_info.rfbdev = rfbdev;
+       rfbdev->helper.funcs = &radeon_fb_helper_funcs;
 
+       drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
+                          rdev->num_crtc,
+                          RADEONFB_CONN_LIMIT);
+       drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
+       drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
        return 0;
 }
-EXPORT_SYMBOL(radeonfb_remove);
-MODULE_LICENSE("GPL");
+
+void radeon_fbdev_fini(struct radeon_device *rdev)
+{
+       if (!rdev->mode_info.rfbdev)
+               return;
+
+       radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev);
+       kfree(rdev->mode_info.rfbdev);
+       rdev->mode_info.rfbdev = NULL;
+}
+
+void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
+{
+       fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
+}
+
+int radeon_fbdev_total_size(struct radeon_device *rdev)
+{
+       struct radeon_bo *robj;
+       int size = 0;
+
+       robj = rdev->mode_info.rfbdev->rfb.obj->driver_private;
+       size += radeon_bo_size(robj);
+       return size;
+}
+
+bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
+{
+       if (robj == rdev->mode_info.rfbdev->rfb.obj->driver_private)
+               return true;
+       return false;
+}
index d90f95b..b1f9a81 100644 (file)
@@ -58,7 +58,6 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
                radeon_fence_ring_emit(rdev, fence);
 
        fence->emited = true;
-       fence->timeout = jiffies + ((2000 * HZ) / 1000);
        list_del(&fence->list);
        list_add_tail(&fence->list, &rdev->fence_drv.emited);
        write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
@@ -71,15 +70,34 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
        struct list_head *i, *n;
        uint32_t seq;
        bool wake = false;
+       unsigned long cjiffies;
 
-       if (rdev == NULL) {
-               return true;
-       }
-       if (rdev->shutdown) {
-               return true;
-       }
        seq = RREG32(rdev->fence_drv.scratch_reg);
-       rdev->fence_drv.last_seq = seq;
+       if (seq != rdev->fence_drv.last_seq) {
+               rdev->fence_drv.last_seq = seq;
+               rdev->fence_drv.last_jiffies = jiffies;
+               rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+       } else {
+               cjiffies = jiffies;
+               if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
+                       cjiffies -= rdev->fence_drv.last_jiffies;
+                       if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
+                               /* update the timeout */
+                               rdev->fence_drv.last_timeout -= cjiffies;
+                       } else {
+                               /* the 500ms timeout is elapsed we should test
+                                * for GPU lockup
+                                */
+                               rdev->fence_drv.last_timeout = 1;
+                       }
+               } else {
+                       /* wrap around update last jiffies, we will just wait
+                        * a little longer
+                        */
+                       rdev->fence_drv.last_jiffies = cjiffies;
+               }
+               return false;
+       }
        n = NULL;
        list_for_each(i, &rdev->fence_drv.emited) {
                fence = list_entry(i, struct radeon_fence, list);
@@ -171,9 +189,8 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
 int radeon_fence_wait(struct radeon_fence *fence, bool intr)
 {
        struct radeon_device *rdev;
-       unsigned long cur_jiffies;
-       unsigned long timeout;
-       bool expired = false;
+       unsigned long irq_flags, timeout;
+       u32 seq;
        int r;
 
        if (fence == NULL) {
@@ -184,21 +201,18 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
        if (radeon_fence_signaled(fence)) {
                return 0;
        }
-
+       timeout = rdev->fence_drv.last_timeout;
 retry:
-       cur_jiffies = jiffies;
-       timeout = HZ / 100;
-       if (time_after(fence->timeout, cur_jiffies)) {
-               timeout = fence->timeout - cur_jiffies;
-       }
-
+       /* save current sequence used to check for GPU lockup */
+       seq = rdev->fence_drv.last_seq;
        if (intr) {
                radeon_irq_kms_sw_irq_get(rdev);
                r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
                                radeon_fence_signaled(fence), timeout);
                radeon_irq_kms_sw_irq_put(rdev);
-               if (unlikely(r < 0))
+               if (unlikely(r < 0)) {
                        return r;
+               }
        } else {
                radeon_irq_kms_sw_irq_get(rdev);
                r = wait_event_timeout(rdev->fence_drv.queue,
@@ -206,38 +220,36 @@ retry:
                radeon_irq_kms_sw_irq_put(rdev);
        }
        if (unlikely(!radeon_fence_signaled(fence))) {
-               if (unlikely(r == 0)) {
-                       expired = true;
+               /* we were interrupted for some reason and fence isn't
+                * isn't signaled yet, resume wait
+                */
+               if (r) {
+                       timeout = r;
+                       goto retry;
                }
-               if (unlikely(expired)) {
-                       timeout = 1;
-                       if (time_after(cur_jiffies, fence->timeout)) {
-                               timeout = cur_jiffies - fence->timeout;
-                       }
-                       timeout = jiffies_to_msecs(timeout);
-                       if (timeout > 500) {
-                               DRM_ERROR("fence(%p:0x%08X) %lums timeout "
-                                         "going to reset GPU\n",
-                                         fence, fence->seq, timeout);
-                               radeon_gpu_reset(rdev);
-                               WREG32(rdev->fence_drv.scratch_reg, fence->seq);
-                       }
+               /* don't protect read access to rdev->fence_drv.last_seq
+                * if we experiencing a lockup the value doesn't change
+                */
+               if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
+                       /* good news we believe it's a lockup */
+                       WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq);
+                       /* FIXME: what should we do ? marking everyone
+                        * as signaled for now
+                        */
+                       rdev->gpu_lockup = true;
+                       r = radeon_gpu_reset(rdev);
+                       if (r)
+                               return r;
+                       WREG32(rdev->fence_drv.scratch_reg, fence->seq);
+                       rdev->gpu_lockup = false;
                }
+               timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+               write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+               rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+               rdev->fence_drv.last_jiffies = jiffies;
+               write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
                goto retry;
        }
-       if (unlikely(expired)) {
-               rdev->fence_drv.count_timeout++;
-               cur_jiffies = jiffies;
-               timeout = 1;
-               if (time_after(cur_jiffies, fence->timeout)) {
-                       timeout = cur_jiffies - fence->timeout;
-               }
-               timeout = jiffies_to_msecs(timeout);
-               DRM_ERROR("fence(%p:0x%08X) %lums timeout\n",
-                         fence, fence->seq, timeout);
-               DRM_ERROR("last signaled fence(0x%08X)\n",
-                         rdev->fence_drv.last_seq);
-       }
        return 0;
 }
 
@@ -333,7 +345,6 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
        INIT_LIST_HEAD(&rdev->fence_drv.created);
        INIT_LIST_HEAD(&rdev->fence_drv.emited);
        INIT_LIST_HEAD(&rdev->fence_drv.signaled);
-       rdev->fence_drv.count_timeout = 0;
        init_waitqueue_head(&rdev->fence_drv.queue);
        rdev->fence_drv.initialized = true;
        write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
diff --git a/drivers/gpu/drm/radeon/radeon_fixed.h b/drivers/gpu/drm/radeon/radeon_fixed.h
deleted file mode 100644 (file)
index 3d4d84e..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright 2009 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Dave Airlie
- */
-#ifndef RADEON_FIXED_H
-#define RADEON_FIXED_H
-
-typedef union rfixed {
-       u32 full;
-} fixed20_12;
-
-
-#define rfixed_const(A) (u32)(((A) << 12))/*  + ((B + 0.000122)*4096)) */
-#define rfixed_const_half(A) (u32)(((A) << 12) + 2048)
-#define rfixed_const_666(A) (u32)(((A) << 12) + 2731)
-#define rfixed_const_8(A) (u32)(((A) << 12) + 3277)
-#define rfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12)
-#define fixed_init(A) { .full = rfixed_const((A)) }
-#define fixed_init_half(A) { .full = rfixed_const_half((A)) }
-#define rfixed_trunc(A) ((A).full >> 12)
-
-static inline u32 rfixed_floor(fixed20_12 A)
-{
-       u32 non_frac = rfixed_trunc(A);
-
-       return rfixed_const(non_frac);
-}
-
-static inline u32 rfixed_ceil(fixed20_12 A)
-{
-       u32 non_frac = rfixed_trunc(A);
-
-       if (A.full > rfixed_const(non_frac))
-               return rfixed_const(non_frac + 1);
-       else
-               return rfixed_const(non_frac);
-}
-
-static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B)
-{
-       u64 tmp = ((u64)A.full << 13);
-
-       do_div(tmp, B.full);
-       tmp += 1;
-       tmp /= 2;
-       return lower_32_bits(tmp);
-}
-#endif
index 1770d3c..e65b903 100644 (file)
@@ -173,7 +173,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
        int i, j;
 
        if (!rdev->gart.ready) {
-               DRM_ERROR("trying to bind memory to unitialized GART !\n");
+               WARN(1, "trying to bind memory to unitialized GART !\n");
                return -EINVAL;
        }
        t = offset / RADEON_GPU_PAGE_SIZE;
index ef92d14..a72a3ee 100644 (file)
@@ -44,6 +44,9 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
        if (robj) {
                radeon_bo_unref(&robj);
        }
+
+       drm_gem_object_release(gobj);
+       kfree(gobj);
 }
 
 int radeon_gem_object_create(struct radeon_device *rdev, int size,
@@ -158,8 +161,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
        args->vram_visible = rdev->mc.real_vram_size;
        if (rdev->stollen_vga_memory)
                args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
-       if (rdev->fbdev_rbo)
-               args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo);
+       args->vram_visible -= radeon_fbdev_total_size(rdev);
        args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
                RADEON_IB_POOL_SIZE*64*1024;
        return 0;
index a212041..059bfa4 100644 (file)
@@ -26,6 +26,7 @@
  *          Jerome Glisse
  */
 #include "drmP.h"
+#include "drm_crtc_helper.h"
 #include "radeon_drm.h"
 #include "radeon_reg.h"
 #include "radeon.h"
@@ -55,7 +56,7 @@ static void radeon_hotplug_work_func(struct work_struct *work)
                        radeon_connector_hotplug(connector);
        }
        /* Just fire off a uevent and let userspace tell us what to do */
-       drm_sysfs_hotplug_event(dev);
+       drm_helper_hpd_irq_event(dev);
 }
 
 void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
@@ -67,6 +68,7 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
 
        /* Disable *all* interrupts */
        rdev->irq.sw_int = false;
+       rdev->irq.gui_idle = false;
        for (i = 0; i < rdev->num_crtc; i++)
                rdev->irq.crtc_vblank_int[i] = false;
        for (i = 0; i < 6; i++)
@@ -96,6 +98,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
        }
        /* Disable *all* interrupts */
        rdev->irq.sw_int = false;
+       rdev->irq.gui_idle = false;
        for (i = 0; i < rdev->num_crtc; i++)
                rdev->irq.crtc_vblank_int[i] = false;
        for (i = 0; i < 6; i++)
index c633319..0406835 100644 (file)
@@ -98,11 +98,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
        struct radeon_device *rdev = dev->dev_private;
        struct drm_radeon_info *info;
+       struct radeon_mode_info *minfo = &rdev->mode_info;
        uint32_t *value_ptr;
        uint32_t value;
+       struct drm_crtc *crtc;
+       int i, found;
 
        info = data;
        value_ptr = (uint32_t *)((unsigned long)info->value);
+       value = *value_ptr;
        switch (info->request) {
        case RADEON_INFO_DEVICE_ID:
                value = dev->pci_device;
@@ -116,6 +120,20 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
        case RADEON_INFO_ACCEL_WORKING:
                value = rdev->accel_working;
                break;
+       case RADEON_INFO_CRTC_FROM_ID:
+               for (i = 0, found = 0; i < rdev->num_crtc; i++) {
+                       crtc = (struct drm_crtc *)minfo->crtcs[i];
+                       if (crtc && crtc->base.id == value) {
+                               value = i;
+                               found = 1;
+                               break;
+                       }
+               }
+               if (!found) {
+                       DRM_DEBUG("unknown crtc id %d\n", value);
+                       return -EINVAL;
+               }
+               break;
        default:
                DRM_DEBUG("Invalid request %d\n", info->request);
                return -EINVAL;
index 88865e3..e1e5255 100644 (file)
@@ -26,7 +26,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/radeon_drm.h>
-#include "radeon_fixed.h"
+#include <drm/drm_fixed.h>
 #include "radeon.h"
 #include "atom.h"
 
@@ -314,6 +314,9 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
 
        switch (mode) {
        case DRM_MODE_DPMS_ON:
+               radeon_crtc->enabled = true;
+               /* adjust pm to dpms changes BEFORE enabling crtcs */
+               radeon_pm_compute_clocks(rdev);
                if (radeon_crtc->crtc_id)
                        WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask));
                else {
@@ -335,6 +338,9 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
                                                                                    RADEON_CRTC_DISP_REQ_EN_B));
                        WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~mask);
                }
+               radeon_crtc->enabled = false;
+               /* adjust pm to dpms changes AFTER disabling crtcs */
+               radeon_pm_compute_clocks(rdev);
                break;
        }
 }
@@ -966,6 +972,12 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
                                   struct drm_display_mode *mode,
                                   struct drm_display_mode *adjusted_mode)
 {
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+
+       /* adjust pm to upcoming mode change */
+       radeon_pm_compute_clocks(rdev);
+
        if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
                return false;
        return true;
index 0274abe..5a13b3e 100644 (file)
@@ -116,8 +116,6 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
        else
                radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
 
-       /* adjust pm to dpms change */
-       radeon_pm_compute_clocks(rdev);
 }
 
 static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
@@ -217,11 +215,6 @@ static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
                                     struct drm_display_mode *adjusted_mode)
 {
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-       struct drm_device *dev = encoder->dev;
-       struct radeon_device *rdev = dev->dev_private;
-
-       /* adjust pm to upcoming mode change */
-       radeon_pm_compute_clocks(rdev);
 
        /* set the active encoder to connector routing */
        radeon_encoder_set_active_device(encoder);
@@ -286,8 +279,6 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode
        else
                radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
 
-       /* adjust pm to dpms change */
-       radeon_pm_compute_clocks(rdev);
 }
 
 static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
@@ -474,8 +465,6 @@ static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
        else
                radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
 
-       /* adjust pm to dpms change */
-       radeon_pm_compute_clocks(rdev);
 }
 
 static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
@@ -642,8 +631,6 @@ static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
        else
                radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
 
-       /* adjust pm to dpms change */
-       radeon_pm_compute_clocks(rdev);
 }
 
 static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
@@ -852,8 +839,6 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
        else
                radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
 
-       /* adjust pm to dpms change */
-       radeon_pm_compute_clocks(rdev);
 }
 
 static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
index 5413fcd..67358ba 100644 (file)
 #include <drm_mode.h>
 #include <drm_edid.h>
 #include <drm_dp_helper.h>
+#include <drm_fixed.h>
 #include <linux/i2c.h>
 #include <linux/i2c-id.h>
 #include <linux/i2c-algo-bit.h>
-#include "radeon_fixed.h"
 
+struct radeon_bo;
 struct radeon_device;
 
 #define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base)
@@ -65,6 +66,16 @@ enum radeon_tv_std {
        TV_STD_PAL_N,
 };
 
+enum radeon_hpd_id {
+       RADEON_HPD_1 = 0,
+       RADEON_HPD_2,
+       RADEON_HPD_3,
+       RADEON_HPD_4,
+       RADEON_HPD_5,
+       RADEON_HPD_6,
+       RADEON_HPD_NONE = 0xff,
+};
+
 /* radeon gpio-based i2c
  * 1. "mask" reg and bits
  *    grabs the gpio pins for software use
@@ -84,7 +95,7 @@ struct radeon_i2c_bus_rec {
        /* id used by atom */
        uint8_t i2c_id;
        /* id used by atom */
-       uint8_t hpd_id;
+       enum radeon_hpd_id hpd;
        /* can be used with hw i2c engine */
        bool hw_capable;
        /* uses multi-media i2c engine */
@@ -202,6 +213,8 @@ enum radeon_dvo_chip {
        DVO_SIL1178,
 };
 
+struct radeon_fbdev;
+
 struct radeon_mode_info {
        struct atom_context *atom_context;
        struct card_info *atom_card_info;
@@ -218,6 +231,9 @@ struct radeon_mode_info {
        struct drm_property *tmds_pll_property;
        /* hardcoded DFP edid from BIOS */
        struct edid *bios_hardcoded_edid;
+
+       /* pointer to fbdev info structure */
+       struct radeon_fbdev *rfbdev;
 };
 
 #define MAX_H_CODE_TIMING_LEN 32
@@ -339,6 +355,7 @@ struct radeon_encoder {
        enum radeon_rmx_type rmx_type;
        struct drm_display_mode native_mode;
        void *enc_priv;
+       int audio_polling_active;
        int hdmi_offset;
        int hdmi_config_offset;
        int hdmi_audio_workaround;
@@ -363,16 +380,6 @@ struct radeon_gpio_rec {
        u32 mask;
 };
 
-enum radeon_hpd_id {
-       RADEON_HPD_NONE = 0,
-       RADEON_HPD_1,
-       RADEON_HPD_2,
-       RADEON_HPD_3,
-       RADEON_HPD_4,
-       RADEON_HPD_5,
-       RADEON_HPD_6,
-};
-
 struct radeon_hpd {
        enum radeon_hpd_id hpd;
        u8 plugged_state;
@@ -532,11 +539,10 @@ extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
                                     u16 blue, int regno);
 extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
                                     u16 *blue, int regno);
-struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev,
-                                                 struct drm_mode_fb_cmd *mode_cmd,
-                                                 struct drm_gem_object *obj);
-
-int radeonfb_probe(struct drm_device *dev);
+void radeon_framebuffer_init(struct drm_device *dev,
+                            struct radeon_framebuffer *rfb,
+                            struct drm_mode_fb_cmd *mode_cmd,
+                            struct drm_gem_object *obj);
 
 int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
 bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev);
@@ -575,4 +581,13 @@ void radeon_legacy_tv_adjust_pll2(struct drm_encoder *encoder,
 void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
                               struct drm_display_mode *mode,
                               struct drm_display_mode *adjusted_mode);
+
+/* fbdev layer */
+int radeon_fbdev_init(struct radeon_device *rdev);
+void radeon_fbdev_fini(struct radeon_device *rdev);
+void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state);
+int radeon_fbdev_total_size(struct radeon_device *rdev);
+bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
+
+void radeon_fb_output_poll_changed(struct radeon_device *rdev);
 #endif
index 1227747..d5b9373 100644 (file)
@@ -112,9 +112,11 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
 
        radeon_ttm_placement_from_domain(bo, domain);
        /* Kernel allocation are uninterruptible */
+       mutex_lock(&rdev->vram_mutex);
        r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
                        &bo->placement, 0, 0, !kernel, NULL, size,
                        &radeon_ttm_bo_destroy);
+       mutex_unlock(&rdev->vram_mutex);
        if (unlikely(r != 0)) {
                if (r != -ERESTARTSYS)
                        dev_err(rdev->dev,
@@ -166,11 +168,15 @@ void radeon_bo_kunmap(struct radeon_bo *bo)
 void radeon_bo_unref(struct radeon_bo **bo)
 {
        struct ttm_buffer_object *tbo;
+       struct radeon_device *rdev;
 
        if ((*bo) == NULL)
                return;
+       rdev = (*bo)->rdev;
        tbo = &((*bo)->tbo);
+       mutex_lock(&rdev->vram_mutex);
        ttm_bo_unref(&tbo);
+       mutex_unlock(&rdev->vram_mutex);
        if (tbo == NULL)
                *bo = NULL;
 }
@@ -192,7 +198,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
        }
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
        if (likely(r == 0)) {
                bo->pin_count = 1;
                if (gpu_addr != NULL)
@@ -216,7 +222,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
                return 0;
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
        if (unlikely(r != 0))
                dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
        return r;
@@ -295,6 +301,7 @@ int radeon_bo_list_reserve(struct list_head *head)
                r = radeon_bo_reserve(lobj->bo, false);
                if (unlikely(r != 0))
                        return r;
+               lobj->reserved = true;
        }
        return 0;
 }
@@ -305,7 +312,7 @@ void radeon_bo_list_unreserve(struct list_head *head)
 
        list_for_each_entry(lobj, head, list) {
                /* only unreserve object we successfully reserved */
-               if (radeon_bo_is_reserved(lobj->bo))
+               if (lobj->reserved && radeon_bo_is_reserved(lobj->bo))
                        radeon_bo_unreserve(lobj->bo);
        }
 }
@@ -316,6 +323,9 @@ int radeon_bo_list_validate(struct list_head *head)
        struct radeon_bo *bo;
        int r;
 
+       list_for_each_entry(lobj, head, list) {
+               lobj->reserved = false;
+       }
        r = radeon_bo_list_reserve(head);
        if (unlikely(r != 0)) {
                return r;
@@ -331,7 +341,7 @@ int radeon_bo_list_validate(struct list_head *head)
                                                                lobj->rdomain);
                        }
                        r = ttm_bo_validate(&bo->tbo, &bo->placement,
-                                               true, false);
+                                               true, false, false);
                        if (unlikely(r))
                                return r;
                }
@@ -499,11 +509,33 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
        radeon_bo_check_tiling(rbo, 0, 1);
 }
 
-void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
+       struct radeon_device *rdev;
        struct radeon_bo *rbo;
+       unsigned long offset, size;
+       int r;
+
        if (!radeon_ttm_bo_is_radeon_bo(bo))
-               return;
+               return 0;
        rbo = container_of(bo, struct radeon_bo, tbo);
        radeon_bo_check_tiling(rbo, 0, 0);
+       rdev = rbo->rdev;
+       if (bo->mem.mem_type == TTM_PL_VRAM) {
+               size = bo->mem.num_pages << PAGE_SHIFT;
+               offset = bo->mem.mm_node->start << PAGE_SHIFT;
+               if ((offset + size) > rdev->mc.visible_vram_size) {
+                       /* hurrah the memory is not visible ! */
+                       radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+                       rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+                       r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
+                       if (unlikely(r != 0))
+                               return r;
+                       offset = bo->mem.mm_node->start << PAGE_SHIFT;
+                       /* this should not happen */
+                       if ((offset + size) > rdev->mc.visible_vram_size)
+                               return -EINVAL;
+               }
+       }
+       return 0;
 }
index 7ab43de..353998d 100644 (file)
@@ -168,6 +168,6 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
                                bool force_drop);
 extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
                                        struct ttm_mem_reg *mem);
-extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
 #endif
index a4b5749..a8d162c 100644 (file)
 #include "drmP.h"
 #include "radeon.h"
 #include "avivod.h"
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+#endif
+#include <linux/power_supply.h>
 
 #define RADEON_IDLE_LOOP_MS 100
 #define RADEON_RECLOCK_DELAY_MS 200
 #define RADEON_WAIT_VBLANK_TIMEOUT 200
+#define RADEON_WAIT_IDLE_TIMEOUT 200
 
+static void radeon_dynpm_idle_work_handler(struct work_struct *work);
+static int radeon_debugfs_pm_init(struct radeon_device *rdev);
+static bool radeon_pm_in_vbl(struct radeon_device *rdev);
 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
-static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
+static void radeon_pm_update_profile(struct radeon_device *rdev);
 static void radeon_pm_set_clocks(struct radeon_device *rdev);
-static void radeon_pm_idle_work_handler(struct work_struct *work);
-static int radeon_debugfs_pm_init(struct radeon_device *rdev);
-
-static const char *pm_state_names[4] = {
-       "PM_STATE_DISABLED",
-       "PM_STATE_MINIMUM",
-       "PM_STATE_PAUSED",
-       "PM_STATE_ACTIVE"
-};
 
-static const char *pm_state_types[5] = {
-       "Default",
-       "Powersave",
-       "Battery",
-       "Balanced",
-       "Performance",
-};
+#define ACPI_AC_CLASS           "ac_adapter"
 
-static void radeon_print_power_mode_info(struct radeon_device *rdev)
+#ifdef CONFIG_ACPI
+static int radeon_acpi_event(struct notifier_block *nb,
+                            unsigned long val,
+                            void *data)
 {
-       int i, j;
-       bool is_default;
+       struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
+       struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
 
-       DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states);
-       for (i = 0; i < rdev->pm.num_power_states; i++) {
-               if (rdev->pm.default_power_state == &rdev->pm.power_state[i])
-                       is_default = true;
+       if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
+               if (power_supply_is_system_supplied() > 0)
+                       DRM_DEBUG("pm: AC\n");
                else
-                       is_default = false;
-               DRM_INFO("State %d %s %s\n", i,
-                        pm_state_types[rdev->pm.power_state[i].type],
-                        is_default ? "(default)" : "");
-               if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
-                       DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].non_clock_info.pcie_lanes);
-               DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes);
-               for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) {
-                       if (rdev->flags & RADEON_IS_IGP)
-                               DRM_INFO("\t\t%d engine: %d\n",
-                                        j,
-                                        rdev->pm.power_state[i].clock_info[j].sclk * 10);
-                       else
-                               DRM_INFO("\t\t%d engine/memory: %d/%d\n",
-                                        j,
-                                        rdev->pm.power_state[i].clock_info[j].sclk * 10,
-                                        rdev->pm.power_state[i].clock_info[j].mclk * 10);
+                       DRM_DEBUG("pm: DC\n");
+
+               if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+                       if (rdev->pm.profile == PM_PROFILE_AUTO) {
+                               mutex_lock(&rdev->pm.mutex);
+                               radeon_pm_update_profile(rdev);
+                               radeon_pm_set_clocks(rdev);
+                               mutex_unlock(&rdev->pm.mutex);
+                       }
                }
        }
+
+       return NOTIFY_OK;
 }
+#endif
 
-static struct radeon_power_state * radeon_pick_power_state(struct radeon_device *rdev,
-                                                          enum radeon_pm_state_type type)
+static void radeon_pm_update_profile(struct radeon_device *rdev)
 {
-       int i, j;
-       enum radeon_pm_state_type wanted_types[2];
-       int wanted_count;
-
-       switch (type) {
-       case POWER_STATE_TYPE_DEFAULT:
-       default:
-               return rdev->pm.default_power_state;
-       case POWER_STATE_TYPE_POWERSAVE:
-               if (rdev->flags & RADEON_IS_MOBILITY) {
-                       wanted_types[0] = POWER_STATE_TYPE_POWERSAVE;
-                       wanted_types[1] = POWER_STATE_TYPE_BATTERY;
-                       wanted_count = 2;
-               } else {
-                       wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
-                       wanted_count = 1;
-               }
+       switch (rdev->pm.profile) {
+       case PM_PROFILE_DEFAULT:
+               rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
                break;
-       case POWER_STATE_TYPE_BATTERY:
-               if (rdev->flags & RADEON_IS_MOBILITY) {
-                       wanted_types[0] = POWER_STATE_TYPE_BATTERY;
-                       wanted_types[1] = POWER_STATE_TYPE_POWERSAVE;
-                       wanted_count = 2;
+       case PM_PROFILE_AUTO:
+               if (power_supply_is_system_supplied() > 0) {
+                       if (rdev->pm.active_crtc_count > 1)
+                               rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
+                       else
+                               rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
                } else {
-                       wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
-                       wanted_count = 1;
+                       if (rdev->pm.active_crtc_count > 1)
+                               rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
+                       else
+                               rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
                }
                break;
-       case POWER_STATE_TYPE_BALANCED:
-       case POWER_STATE_TYPE_PERFORMANCE:
-               wanted_types[0] = type;
-               wanted_count = 1;
+       case PM_PROFILE_LOW:
+               if (rdev->pm.active_crtc_count > 1)
+                       rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
+               else
+                       rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
                break;
-       }
-
-       for (i = 0; i < wanted_count; i++) {
-               for (j = 0; j < rdev->pm.num_power_states; j++) {
-                       if (rdev->pm.power_state[j].type == wanted_types[i])
-                               return &rdev->pm.power_state[j];
-               }
-       }
-
-       return rdev->pm.default_power_state;
-}
-
-static struct radeon_pm_clock_info * radeon_pick_clock_mode(struct radeon_device *rdev,
-                                                           struct radeon_power_state *power_state,
-                                                           enum radeon_pm_clock_mode_type type)
-{
-       switch (type) {
-       case POWER_MODE_TYPE_DEFAULT:
-       default:
-               return power_state->default_clock_mode;
-       case POWER_MODE_TYPE_LOW:
-               return &power_state->clock_info[0];
-       case POWER_MODE_TYPE_MID:
-               if (power_state->num_clock_modes > 2)
-                       return &power_state->clock_info[1];
+       case PM_PROFILE_HIGH:
+               if (rdev->pm.active_crtc_count > 1)
+                       rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
                else
-                       return &power_state->clock_info[0];
+                       rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
                break;
-       case POWER_MODE_TYPE_HIGH:
-               return &power_state->clock_info[power_state->num_clock_modes - 1];
        }
 
+       if (rdev->pm.active_crtc_count == 0) {
+               rdev->pm.requested_power_state_index =
+                       rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
+               rdev->pm.requested_clock_mode_index =
+                       rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
+       } else {
+               rdev->pm.requested_power_state_index =
+                       rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
+               rdev->pm.requested_clock_mode_index =
+                       rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
+       }
 }
 
-static void radeon_get_power_state(struct radeon_device *rdev,
-                                  enum radeon_pm_action action)
+static void radeon_unmap_vram_bos(struct radeon_device *rdev)
 {
-       switch (action) {
-       case PM_ACTION_MINIMUM:
-               rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_BATTERY);
-               rdev->pm.requested_clock_mode =
-                       radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_LOW);
-               break;
-       case PM_ACTION_DOWNCLOCK:
-               rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_POWERSAVE);
-               rdev->pm.requested_clock_mode =
-                       radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_MID);
-               break;
-       case PM_ACTION_UPCLOCK:
-               rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_DEFAULT);
-               rdev->pm.requested_clock_mode =
-                       radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_HIGH);
-               break;
-       case PM_ACTION_NONE:
-       default:
-               DRM_ERROR("Requested mode for not defined action\n");
+       struct radeon_bo *bo, *n;
+
+       if (list_empty(&rdev->gem.objects))
                return;
+
+       list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
+               if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+                       ttm_bo_unmap_virtual(&bo->tbo);
        }
-       DRM_INFO("Requested: e: %d m: %d p: %d\n",
-                rdev->pm.requested_clock_mode->sclk,
-                rdev->pm.requested_clock_mode->mclk,
-                rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
+
+       if (rdev->gart.table.vram.robj)
+               ttm_bo_unmap_virtual(&rdev->gart.table.vram.robj->tbo);
+
+       if (rdev->stollen_vga_memory)
+               ttm_bo_unmap_virtual(&rdev->stollen_vga_memory->tbo);
+
+       if (rdev->r600_blit.shader_obj)
+               ttm_bo_unmap_virtual(&rdev->r600_blit.shader_obj->tbo);
 }
 
-static inline void radeon_sync_with_vblank(struct radeon_device *rdev)
+static void radeon_sync_with_vblank(struct radeon_device *rdev)
 {
        if (rdev->pm.active_crtcs) {
                rdev->pm.vblank_sync = false;
@@ -192,73 +150,332 @@ static inline void radeon_sync_with_vblank(struct radeon_device *rdev)
 
 static void radeon_set_power_state(struct radeon_device *rdev)
 {
-       /* if *_clock_mode are the same, *_power_state are as well */
-       if (rdev->pm.requested_clock_mode == rdev->pm.current_clock_mode)
+       u32 sclk, mclk;
+
+       if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
+           (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
                return;
 
-       DRM_INFO("Setting: e: %d m: %d p: %d\n",
-                rdev->pm.requested_clock_mode->sclk,
-                rdev->pm.requested_clock_mode->mclk,
-                rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
-
-       /* set pcie lanes */
-       /* TODO */
-
-       /* set voltage */
-       /* TODO */
-
-       /* set engine clock */
-       radeon_sync_with_vblank(rdev);
-       radeon_pm_debug_check_in_vbl(rdev, false);
-       radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk);
-       radeon_pm_debug_check_in_vbl(rdev, true);
-
-#if 0
-       /* set memory clock */
-       if (rdev->asic->set_memory_clock) {
-               radeon_sync_with_vblank(rdev);
-               radeon_pm_debug_check_in_vbl(rdev, false);
-               radeon_set_memory_clock(rdev, rdev->pm.requested_clock_mode->mclk);
-               radeon_pm_debug_check_in_vbl(rdev, true);
+       if (radeon_gui_idle(rdev)) {
+               sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+                       clock_info[rdev->pm.requested_clock_mode_index].sclk;
+               if (sclk > rdev->clock.default_sclk)
+                       sclk = rdev->clock.default_sclk;
+
+               mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+                       clock_info[rdev->pm.requested_clock_mode_index].mclk;
+               if (mclk > rdev->clock.default_mclk)
+                       mclk = rdev->clock.default_mclk;
+
+               /* voltage, pcie lanes, etc.*/
+               radeon_pm_misc(rdev);
+
+               if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+                       radeon_sync_with_vblank(rdev);
+
+                       if (!radeon_pm_in_vbl(rdev))
+                               return;
+
+                       radeon_pm_prepare(rdev);
+                       /* set engine clock */
+                       if (sclk != rdev->pm.current_sclk) {
+                               radeon_pm_debug_check_in_vbl(rdev, false);
+                               radeon_set_engine_clock(rdev, sclk);
+                               radeon_pm_debug_check_in_vbl(rdev, true);
+                               rdev->pm.current_sclk = sclk;
+                               DRM_DEBUG("Setting: e: %d\n", sclk);
+                       }
+
+                       /* set memory clock */
+                       if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
+                               radeon_pm_debug_check_in_vbl(rdev, false);
+                               radeon_set_memory_clock(rdev, mclk);
+                               radeon_pm_debug_check_in_vbl(rdev, true);
+                               rdev->pm.current_mclk = mclk;
+                               DRM_DEBUG("Setting: m: %d\n", mclk);
+                       }
+                       radeon_pm_finish(rdev);
+               } else {
+                       /* set engine clock */
+                       if (sclk != rdev->pm.current_sclk) {
+                               radeon_sync_with_vblank(rdev);
+                               radeon_pm_prepare(rdev);
+                               radeon_set_engine_clock(rdev, sclk);
+                               radeon_pm_finish(rdev);
+                               rdev->pm.current_sclk = sclk;
+                               DRM_DEBUG("Setting: e: %d\n", sclk);
+                       }
+                       /* set memory clock */
+                       if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
+                               radeon_sync_with_vblank(rdev);
+                               radeon_pm_prepare(rdev);
+                               radeon_set_memory_clock(rdev, mclk);
+                               radeon_pm_finish(rdev);
+                               rdev->pm.current_mclk = mclk;
+                               DRM_DEBUG("Setting: m: %d\n", mclk);
+                       }
+               }
+
+               rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
+               rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
+       } else
+               DRM_DEBUG("pm: GUI not idle!!!\n");
+}
+
+static void radeon_pm_set_clocks(struct radeon_device *rdev)
+{
+       int i;
+
+       mutex_lock(&rdev->ddev->struct_mutex);
+       mutex_lock(&rdev->vram_mutex);
+       mutex_lock(&rdev->cp.mutex);
+
+       /* gui idle int has issues on older chips it seems */
+       if (rdev->family >= CHIP_R600) {
+               if (rdev->irq.installed) {
+                       /* wait for GPU idle */
+                       rdev->pm.gui_idle = false;
+                       rdev->irq.gui_idle = true;
+                       radeon_irq_set(rdev);
+                       wait_event_interruptible_timeout(
+                               rdev->irq.idle_queue, rdev->pm.gui_idle,
+                               msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
+                       rdev->irq.gui_idle = false;
+                       radeon_irq_set(rdev);
+               }
+       } else {
+               if (rdev->cp.ready) {
+                       struct radeon_fence *fence;
+                       radeon_ring_alloc(rdev, 64);
+                       radeon_fence_create(rdev, &fence);
+                       radeon_fence_emit(rdev, fence);
+                       radeon_ring_commit(rdev);
+                       radeon_fence_wait(fence, false);
+                       radeon_fence_unref(&fence);
+               }
        }
-#endif
+       radeon_unmap_vram_bos(rdev);
+
+       if (rdev->irq.installed) {
+               for (i = 0; i < rdev->num_crtc; i++) {
+                       if (rdev->pm.active_crtcs & (1 << i)) {
+                               rdev->pm.req_vblank |= (1 << i);
+                               drm_vblank_get(rdev->ddev, i);
+                       }
+               }
+       }
+
+       radeon_set_power_state(rdev);
+
+       if (rdev->irq.installed) {
+               for (i = 0; i < rdev->num_crtc; i++) {
+                       if (rdev->pm.req_vblank & (1 << i)) {
+                               rdev->pm.req_vblank &= ~(1 << i);
+                               drm_vblank_put(rdev->ddev, i);
+                       }
+               }
+       }
+
+       /* update display watermarks based on new power state */
+       radeon_update_bandwidth_info(rdev);
+       if (rdev->pm.active_crtc_count)
+               radeon_bandwidth_update(rdev);
+
+       rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+
+       mutex_unlock(&rdev->cp.mutex);
+       mutex_unlock(&rdev->vram_mutex);
+       mutex_unlock(&rdev->ddev->struct_mutex);
+}
+
+static ssize_t radeon_get_pm_profile(struct device *dev,
+                                    struct device_attribute *attr,
+                                    char *buf)
+{
+       struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+       struct radeon_device *rdev = ddev->dev_private;
+       int cp = rdev->pm.profile;
+
+       return snprintf(buf, PAGE_SIZE, "%s\n",
+                       (cp == PM_PROFILE_AUTO) ? "auto" :
+                       (cp == PM_PROFILE_LOW) ? "low" :
+                       (cp == PM_PROFILE_HIGH) ? "high" : "default");
+}
+
+static ssize_t radeon_set_pm_profile(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf,
+                                    size_t count)
+{
+       struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+       struct radeon_device *rdev = ddev->dev_private;
+
+       mutex_lock(&rdev->pm.mutex);
+       if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+               if (strncmp("default", buf, strlen("default")) == 0)
+                       rdev->pm.profile = PM_PROFILE_DEFAULT;
+               else if (strncmp("auto", buf, strlen("auto")) == 0)
+                       rdev->pm.profile = PM_PROFILE_AUTO;
+               else if (strncmp("low", buf, strlen("low")) == 0)
+                       rdev->pm.profile = PM_PROFILE_LOW;
+               else if (strncmp("high", buf, strlen("high")) == 0)
+                       rdev->pm.profile = PM_PROFILE_HIGH;
+               else {
+                       DRM_ERROR("invalid power profile!\n");
+                       goto fail;
+               }
+               radeon_pm_update_profile(rdev);
+               radeon_pm_set_clocks(rdev);
+       }
+fail:
+       mutex_unlock(&rdev->pm.mutex);
+
+       return count;
+}
+
+static ssize_t radeon_get_pm_method(struct device *dev,
+                                   struct device_attribute *attr,
+                                   char *buf)
+{
+       struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+       struct radeon_device *rdev = ddev->dev_private;
+       int pm = rdev->pm.pm_method;
+
+       return snprintf(buf, PAGE_SIZE, "%s\n",
+                       (pm == PM_METHOD_DYNPM) ? "dynpm" : "profile");
+}
+
+static ssize_t radeon_set_pm_method(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf,
+                                   size_t count)
+{
+       struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+       struct radeon_device *rdev = ddev->dev_private;
+
+
+       if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
+               mutex_lock(&rdev->pm.mutex);
+               rdev->pm.pm_method = PM_METHOD_DYNPM;
+               rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
+               rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+               mutex_unlock(&rdev->pm.mutex);
+       } else if (strncmp("profile", buf, strlen("profile")) == 0) {
+               mutex_lock(&rdev->pm.mutex);
+               rdev->pm.pm_method = PM_METHOD_PROFILE;
+               /* disable dynpm */
+               rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+               rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+               cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+               mutex_unlock(&rdev->pm.mutex);
+       } else {
+               DRM_ERROR("invalid power method!\n");
+               goto fail;
+       }
+       radeon_pm_compute_clocks(rdev);
+fail:
+       return count;
+}
+
+static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
+static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
 
-       rdev->pm.current_power_state = rdev->pm.requested_power_state;
-       rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode;
+void radeon_pm_suspend(struct radeon_device *rdev)
+{
+       mutex_lock(&rdev->pm.mutex);
+       cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+       rdev->pm.current_power_state_index = -1;
+       rdev->pm.current_clock_mode_index = -1;
+       rdev->pm.current_sclk = 0;
+       rdev->pm.current_mclk = 0;
+       mutex_unlock(&rdev->pm.mutex);
+}
+
+void radeon_pm_resume(struct radeon_device *rdev)
+{
+       radeon_pm_compute_clocks(rdev);
 }
 
 int radeon_pm_init(struct radeon_device *rdev)
 {
-       rdev->pm.state = PM_STATE_DISABLED;
-       rdev->pm.planned_action = PM_ACTION_NONE;
-       rdev->pm.downclocked = false;
+       int ret;
+       /* default to profile method */
+       rdev->pm.pm_method = PM_METHOD_PROFILE;
+       rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+       rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+       rdev->pm.dynpm_can_upclock = true;
+       rdev->pm.dynpm_can_downclock = true;
+       rdev->pm.current_sclk = 0;
+       rdev->pm.current_mclk = 0;
 
        if (rdev->bios) {
                if (rdev->is_atom_bios)
                        radeon_atombios_get_power_modes(rdev);
                else
                        radeon_combios_get_power_modes(rdev);
-               radeon_print_power_mode_info(rdev);
+               radeon_pm_init_profile(rdev);
+               rdev->pm.current_power_state_index = -1;
+               rdev->pm.current_clock_mode_index = -1;
        }
 
-       if (radeon_debugfs_pm_init(rdev)) {
-               DRM_ERROR("Failed to register debugfs file for PM!\n");
-       }
+       if (rdev->pm.num_power_states > 1) {
+               if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+                       mutex_lock(&rdev->pm.mutex);
+                       rdev->pm.profile = PM_PROFILE_DEFAULT;
+                       radeon_pm_update_profile(rdev);
+                       radeon_pm_set_clocks(rdev);
+                       mutex_unlock(&rdev->pm.mutex);
+               }
+
+               /* where's the best place to put these? */
+               ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+               if (ret)
+                       DRM_ERROR("failed to create device file for power profile\n");
+               ret = device_create_file(rdev->dev, &dev_attr_power_method);
+               if (ret)
+                       DRM_ERROR("failed to create device file for power method\n");
+
+#ifdef CONFIG_ACPI
+               rdev->acpi_nb.notifier_call = radeon_acpi_event;
+               register_acpi_notifier(&rdev->acpi_nb);
+#endif
+               INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
 
-       INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler);
+               if (radeon_debugfs_pm_init(rdev)) {
+                       DRM_ERROR("Failed to register debugfs file for PM!\n");
+               }
 
-       if (radeon_dynpm != -1 && radeon_dynpm) {
-               rdev->pm.state = PM_STATE_PAUSED;
-               DRM_INFO("radeon: dynamic power management enabled\n");
+               DRM_INFO("radeon: power management initialized\n");
        }
 
-       DRM_INFO("radeon: power management initialized\n");
-
        return 0;
 }
 
 void radeon_pm_fini(struct radeon_device *rdev)
 {
+       if (rdev->pm.num_power_states > 1) {
+               mutex_lock(&rdev->pm.mutex);
+               if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+                       rdev->pm.profile = PM_PROFILE_DEFAULT;
+                       radeon_pm_update_profile(rdev);
+                       radeon_pm_set_clocks(rdev);
+               } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+                       /* cancel work */
+                       cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
+                       /* reset default clocks */
+                       rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+                       rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+                       radeon_pm_set_clocks(rdev);
+               }
+               mutex_unlock(&rdev->pm.mutex);
+
+               device_remove_file(rdev->dev, &dev_attr_power_profile);
+               device_remove_file(rdev->dev, &dev_attr_power_method);
+#ifdef CONFIG_ACPI
+               unregister_acpi_notifier(&rdev->acpi_nb);
+#endif
+       }
+
        if (rdev->pm.i2c_bus)
                radeon_i2c_destroy(rdev->pm.i2c_bus);
 }
@@ -266,146 +483,167 @@ void radeon_pm_fini(struct radeon_device *rdev)
 void radeon_pm_compute_clocks(struct radeon_device *rdev)
 {
        struct drm_device *ddev = rdev->ddev;
-       struct drm_connector *connector;
+       struct drm_crtc *crtc;
        struct radeon_crtc *radeon_crtc;
-       int count = 0;
 
-       if (rdev->pm.state == PM_STATE_DISABLED)
+       if (rdev->pm.num_power_states < 2)
                return;
 
        mutex_lock(&rdev->pm.mutex);
 
        rdev->pm.active_crtcs = 0;
-       list_for_each_entry(connector,
-               &ddev->mode_config.connector_list, head) {
-               if (connector->encoder &&
-                   connector->encoder->crtc &&
-                   connector->dpms != DRM_MODE_DPMS_OFF) {
-                       radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
+       rdev->pm.active_crtc_count = 0;
+       list_for_each_entry(crtc,
+               &ddev->mode_config.crtc_list, head) {
+               radeon_crtc = to_radeon_crtc(crtc);
+               if (radeon_crtc->enabled) {
                        rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
-                       ++count;
+                       rdev->pm.active_crtc_count++;
                }
        }
 
-       if (count > 1) {
-               if (rdev->pm.state == PM_STATE_ACTIVE) {
-                       cancel_delayed_work(&rdev->pm.idle_work);
-
-                       rdev->pm.state = PM_STATE_PAUSED;
-                       rdev->pm.planned_action = PM_ACTION_UPCLOCK;
-                       if (rdev->pm.downclocked)
-                               radeon_pm_set_clocks(rdev);
-
-                       DRM_DEBUG("radeon: dynamic power management deactivated\n");
-               }
-       } else if (count == 1) {
-               /* TODO: Increase clocks if needed for current mode */
-
-               if (rdev->pm.state == PM_STATE_MINIMUM) {
-                       rdev->pm.state = PM_STATE_ACTIVE;
-                       rdev->pm.planned_action = PM_ACTION_UPCLOCK;
-                       radeon_pm_set_clocks(rdev);
-
-                       queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
-                               msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
-               }
-               else if (rdev->pm.state == PM_STATE_PAUSED) {
-                       rdev->pm.state = PM_STATE_ACTIVE;
-                       queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
-                               msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
-                       DRM_DEBUG("radeon: dynamic power management activated\n");
-               }
-       }
-       else { /* count == 0 */
-               if (rdev->pm.state != PM_STATE_MINIMUM) {
-                       cancel_delayed_work(&rdev->pm.idle_work);
-
-                       rdev->pm.state = PM_STATE_MINIMUM;
-                       rdev->pm.planned_action = PM_ACTION_MINIMUM;
-                       radeon_pm_set_clocks(rdev);
+       if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+               radeon_pm_update_profile(rdev);
+               radeon_pm_set_clocks(rdev);
+       } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+               if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
+                       if (rdev->pm.active_crtc_count > 1) {
+                               if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
+                                       cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+
+                                       rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
+                                       rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+                                       radeon_pm_get_dynpm_state(rdev);
+                                       radeon_pm_set_clocks(rdev);
+
+                                       DRM_DEBUG("radeon: dynamic power management deactivated\n");
+                               }
+                       } else if (rdev->pm.active_crtc_count == 1) {
+                               /* TODO: Increase clocks if needed for current mode */
+
+                               if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
+                                       rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+                                       rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
+                                       radeon_pm_get_dynpm_state(rdev);
+                                       radeon_pm_set_clocks(rdev);
+
+                                       queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
+                                                          msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+                               } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
+                                       rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+                                       queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
+                                                          msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+                                       DRM_DEBUG("radeon: dynamic power management activated\n");
+                               }
+                       } else { /* count == 0 */
+                               if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
+                                       cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+
+                                       rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
+                                       rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
+                                       radeon_pm_get_dynpm_state(rdev);
+                                       radeon_pm_set_clocks(rdev);
+                               }
+                       }
                }
        }
 
        mutex_unlock(&rdev->pm.mutex);
 }
 
-static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
+static bool radeon_pm_in_vbl(struct radeon_device *rdev)
 {
-       u32 stat_crtc1 = 0, stat_crtc2 = 0;
+       u32 stat_crtc = 0, vbl = 0, position = 0;
        bool in_vbl = true;
 
-       if (ASIC_IS_AVIVO(rdev)) {
+       if (ASIC_IS_DCE4(rdev)) {
+               if (rdev->pm.active_crtcs & (1 << 0)) {
+                       vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+                                    EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
+                       position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+                                         EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
+               }
+               if (rdev->pm.active_crtcs & (1 << 1)) {
+                       vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+                                    EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
+                       position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+                                         EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
+               }
+               if (rdev->pm.active_crtcs & (1 << 2)) {
+                       vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+                                    EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
+                       position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+                                         EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
+               }
+               if (rdev->pm.active_crtcs & (1 << 3)) {
+                       vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+                                    EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
+                       position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+                                         EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
+               }
+               if (rdev->pm.active_crtcs & (1 << 4)) {
+                       vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+                                    EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
+                       position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+                                         EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
+               }
+               if (rdev->pm.active_crtcs & (1 << 5)) {
+                       vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+                                    EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
+                       position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+                                         EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
+               }
+       } else if (ASIC_IS_AVIVO(rdev)) {
+               if (rdev->pm.active_crtcs & (1 << 0)) {
+                       vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END) & 0xfff;
+                       position = RREG32(AVIVO_D1CRTC_STATUS_POSITION) & 0xfff;
+               }
+               if (rdev->pm.active_crtcs & (1 << 1)) {
+                       vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END) & 0xfff;
+                       position = RREG32(AVIVO_D2CRTC_STATUS_POSITION) & 0xfff;
+               }
+               if (position < vbl && position > 1)
+                       in_vbl = false;
+       } else {
                if (rdev->pm.active_crtcs & (1 << 0)) {
-                       stat_crtc1 = RREG32(D1CRTC_STATUS);
-                       if (!(stat_crtc1 & 1))
+                       stat_crtc = RREG32(RADEON_CRTC_STATUS);
+                       if (!(stat_crtc & 1))
                                in_vbl = false;
                }
                if (rdev->pm.active_crtcs & (1 << 1)) {
-                       stat_crtc2 = RREG32(D2CRTC_STATUS);
-                       if (!(stat_crtc2 & 1))
+                       stat_crtc = RREG32(RADEON_CRTC2_STATUS);
+                       if (!(stat_crtc & 1))
                                in_vbl = false;
                }
        }
-       if (in_vbl == false)
-               DRM_INFO("not in vbl for pm change %08x %08x at %s\n", stat_crtc1,
-                        stat_crtc2, finish ? "exit" : "entry");
-       return in_vbl;
-}
-static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
-{
-       /*radeon_fence_wait_last(rdev);*/
-       switch (rdev->pm.planned_action) {
-       case PM_ACTION_UPCLOCK:
-               rdev->pm.downclocked = false;
-               break;
-       case PM_ACTION_DOWNCLOCK:
-               rdev->pm.downclocked = true;
-               break;
-       case PM_ACTION_MINIMUM:
-               break;
-       case PM_ACTION_NONE:
-               DRM_ERROR("%s: PM_ACTION_NONE\n", __func__);
-               break;
-       }
 
-       radeon_set_power_state(rdev);
-       rdev->pm.planned_action = PM_ACTION_NONE;
+       if (position < vbl && position > 1)
+               in_vbl = false;
+
+       return in_vbl;
 }
 
-static void radeon_pm_set_clocks(struct radeon_device *rdev)
+static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
 {
-       radeon_get_power_state(rdev, rdev->pm.planned_action);
-       mutex_lock(&rdev->cp.mutex);
+       u32 stat_crtc = 0;
+       bool in_vbl = radeon_pm_in_vbl(rdev);
 
-       if (rdev->pm.active_crtcs & (1 << 0)) {
-               rdev->pm.req_vblank |= (1 << 0);
-               drm_vblank_get(rdev->ddev, 0);
-       }
-       if (rdev->pm.active_crtcs & (1 << 1)) {
-               rdev->pm.req_vblank |= (1 << 1);
-               drm_vblank_get(rdev->ddev, 1);
-       }
-       radeon_pm_set_clocks_locked(rdev);
-       if (rdev->pm.req_vblank & (1 << 0)) {
-               rdev->pm.req_vblank &= ~(1 << 0);
-               drm_vblank_put(rdev->ddev, 0);
-       }
-       if (rdev->pm.req_vblank & (1 << 1)) {
-               rdev->pm.req_vblank &= ~(1 << 1);
-               drm_vblank_put(rdev->ddev, 1);
-       }
-
-       mutex_unlock(&rdev->cp.mutex);
+       if (in_vbl == false)
+               DRM_DEBUG("not in vbl for pm change %08x at %s\n", stat_crtc,
+                        finish ? "exit" : "entry");
+       return in_vbl;
 }
 
-static void radeon_pm_idle_work_handler(struct work_struct *work)
+static void radeon_dynpm_idle_work_handler(struct work_struct *work)
 {
        struct radeon_device *rdev;
+       int resched;
        rdev = container_of(work, struct radeon_device,
-                               pm.idle_work.work);
+                               pm.dynpm_idle_work.work);
 
+       resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
        mutex_lock(&rdev->pm.mutex);
-       if (rdev->pm.state == PM_STATE_ACTIVE) {
+       if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
                unsigned long irq_flags;
                int not_processed = 0;
 
@@ -421,35 +659,40 @@ static void radeon_pm_idle_work_handler(struct work_struct *work)
                read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
 
                if (not_processed >= 3) { /* should upclock */
-                       if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) {
-                               rdev->pm.planned_action = PM_ACTION_NONE;
-                       } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
-                               rdev->pm.downclocked) {
-                               rdev->pm.planned_action =
-                                       PM_ACTION_UPCLOCK;
-                               rdev->pm.action_timeout = jiffies +
+                       if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
+                               rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+                       } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
+                                  rdev->pm.dynpm_can_upclock) {
+                               rdev->pm.dynpm_planned_action =
+                                       DYNPM_ACTION_UPCLOCK;
+                               rdev->pm.dynpm_action_timeout = jiffies +
                                msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
                        }
                } else if (not_processed == 0) { /* should downclock */
-                       if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) {
-                               rdev->pm.planned_action = PM_ACTION_NONE;
-                       } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
-                               !rdev->pm.downclocked) {
-                               rdev->pm.planned_action =
-                                       PM_ACTION_DOWNCLOCK;
-                               rdev->pm.action_timeout = jiffies +
+                       if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
+                               rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+                       } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
+                                  rdev->pm.dynpm_can_downclock) {
+                               rdev->pm.dynpm_planned_action =
+                                       DYNPM_ACTION_DOWNCLOCK;
+                               rdev->pm.dynpm_action_timeout = jiffies +
                                msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
                        }
                }
 
-               if (rdev->pm.planned_action != PM_ACTION_NONE &&
-                   jiffies > rdev->pm.action_timeout) {
+               /* Note, radeon_pm_set_clocks is called with static_switch set
+                * to false since we want to wait for vbl to avoid flicker.
+                */
+               if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
+                   jiffies > rdev->pm.dynpm_action_timeout) {
+                       radeon_pm_get_dynpm_state(rdev);
                        radeon_pm_set_clocks(rdev);
                }
        }
        mutex_unlock(&rdev->pm.mutex);
+       ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
 
-       queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
+       queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
                                        msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
 }
 
@@ -464,7 +707,6 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
        struct drm_device *dev = node->minor->dev;
        struct radeon_device *rdev = dev->dev_private;
 
-       seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]);
        seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
        seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
        seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
index eabbc9c..c332f46 100644 (file)
 #       define RADEON_CRTC_CRNT_VLINE_MASK  (0x7ff << 16)
 #define RADEON_CRTC2_CRNT_FRAME             0x0314
 #define RADEON_CRTC2_GUI_TRIG_VLINE         0x0318
-#define RADEON_CRTC2_STATUS                 0x03fc
 #define RADEON_CRTC2_VLINE_CRNT_VLINE       0x0310
 #define RADEON_CRTC8_DATA                   0x03d5 /* VGA, 0x3b5 */
 #define RADEON_CRTC8_IDX                    0x03d4 /* VGA, 0x3b4 */
 #      define RADEON_FP_DETECT_MASK            (1 << 4)
 #      define RADEON_CRTC2_VBLANK_MASK         (1 << 9)
 #      define RADEON_FP2_DETECT_MASK           (1 << 10)
+#      define RADEON_GUI_IDLE_MASK             (1 << 19)
 #      define RADEON_SW_INT_ENABLE             (1 << 25)
 #define RADEON_GEN_INT_STATUS               0x0044
 #      define AVIVO_DISPLAY_INT_STATUS         (1 << 0)
 #      define RADEON_CRTC2_VBLANK_STAT_ACK     (1 << 9)
 #      define RADEON_FP2_DETECT_STAT           (1 << 10)
 #      define RADEON_FP2_DETECT_STAT_ACK       (1 << 10)
+#      define RADEON_GUI_IDLE_STAT             (1 << 19)
+#      define RADEON_GUI_IDLE_STAT_ACK         (1 << 19)
 #      define RADEON_SW_INT_FIRE               (1 << 26)
 #      define RADEON_SW_INT_TEST               (1 << 25)
 #      define RADEON_SW_INT_TEST_ACK           (1 << 25)
index f6e1e8d..261e98a 100644 (file)
@@ -219,24 +219,26 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
 void radeon_ib_pool_fini(struct radeon_device *rdev)
 {
        int r;
+       struct radeon_bo *robj;
 
        if (!rdev->ib_pool.ready) {
                return;
        }
        mutex_lock(&rdev->ib_pool.mutex);
        radeon_ib_bogus_cleanup(rdev);
+       robj = rdev->ib_pool.robj;
+       rdev->ib_pool.robj = NULL;
+       mutex_unlock(&rdev->ib_pool.mutex);
 
-       if (rdev->ib_pool.robj) {
-               r = radeon_bo_reserve(rdev->ib_pool.robj, false);
+       if (robj) {
+               r = radeon_bo_reserve(robj, false);
                if (likely(r == 0)) {
-                       radeon_bo_kunmap(rdev->ib_pool.robj);
-                       radeon_bo_unpin(rdev->ib_pool.robj);
-                       radeon_bo_unreserve(rdev->ib_pool.robj);
+                       radeon_bo_kunmap(robj);
+                       radeon_bo_unpin(robj);
+                       radeon_bo_unreserve(robj);
                }
-               radeon_bo_unref(&rdev->ib_pool.robj);
-               rdev->ib_pool.robj = NULL;
+               radeon_bo_unref(&robj);
        }
-       mutex_unlock(&rdev->ib_pool.mutex);
 }
 
 
@@ -258,31 +260,41 @@ void radeon_ring_free_size(struct radeon_device *rdev)
        }
 }
 
-int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
+int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
 {
        int r;
 
        /* Align requested size with padding so unlock_commit can
         * pad safely */
        ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
-       mutex_lock(&rdev->cp.mutex);
        while (ndw > (rdev->cp.ring_free_dw - 1)) {
                radeon_ring_free_size(rdev);
                if (ndw < rdev->cp.ring_free_dw) {
                        break;
                }
                r = radeon_fence_wait_next(rdev);
-               if (r) {
-                       mutex_unlock(&rdev->cp.mutex);
+               if (r)
                        return r;
-               }
        }
        rdev->cp.count_dw = ndw;
        rdev->cp.wptr_old = rdev->cp.wptr;
        return 0;
 }
 
-void radeon_ring_unlock_commit(struct radeon_device *rdev)
+int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
+{
+       int r;
+
+       mutex_lock(&rdev->cp.mutex);
+       r = radeon_ring_alloc(rdev, ndw);
+       if (r) {
+               mutex_unlock(&rdev->cp.mutex);
+               return r;
+       }
+       return 0;
+}
+
+void radeon_ring_commit(struct radeon_device *rdev)
 {
        unsigned count_dw_pad;
        unsigned i;
@@ -295,6 +307,11 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev)
        }
        DRM_MEMORYBARRIER();
        radeon_cp_commit(rdev);
+}
+
+void radeon_ring_unlock_commit(struct radeon_device *rdev)
+{
+       radeon_ring_commit(rdev);
        mutex_unlock(&rdev->cp.mutex);
 }
 
@@ -344,20 +361,23 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
 void radeon_ring_fini(struct radeon_device *rdev)
 {
        int r;
+       struct radeon_bo *ring_obj;
 
        mutex_lock(&rdev->cp.mutex);
-       if (rdev->cp.ring_obj) {
-               r = radeon_bo_reserve(rdev->cp.ring_obj, false);
+       ring_obj = rdev->cp.ring_obj;
+       rdev->cp.ring = NULL;
+       rdev->cp.ring_obj = NULL;
+       mutex_unlock(&rdev->cp.mutex);
+
+       if (ring_obj) {
+               r = radeon_bo_reserve(ring_obj, false);
                if (likely(r == 0)) {
-                       radeon_bo_kunmap(rdev->cp.ring_obj);
-                       radeon_bo_unpin(rdev->cp.ring_obj);
-                       radeon_bo_unreserve(rdev->cp.ring_obj);
+                       radeon_bo_kunmap(ring_obj);
+                       radeon_bo_unpin(ring_obj);
+                       radeon_bo_unreserve(ring_obj);
                }
-               radeon_bo_unref(&rdev->cp.ring_obj);
-               rdev->cp.ring = NULL;
-               rdev->cp.ring_obj = NULL;
+               radeon_bo_unref(&ring_obj);
        }
-       mutex_unlock(&rdev->cp.mutex);
 }
 
 
index d031b68..e9918d8 100644 (file)
@@ -33,6 +33,7 @@
 #include <ttm/ttm_bo_driver.h>
 #include <ttm/ttm_placement.h>
 #include <ttm/ttm_module.h>
+#include <ttm/ttm_page_alloc.h>
 #include <drm/drmP.h>
 #include <drm/radeon_drm.h>
 #include <linux/seq_file.h>
@@ -162,34 +163,21 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
                                          (unsigned)type);
                                return -EINVAL;
                        }
-                       man->io_offset = rdev->mc.agp_base;
-                       man->io_size = rdev->mc.gtt_size;
-                       man->io_addr = NULL;
                        if (!rdev->ddev->agp->cant_use_aperture)
-                               man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
-                                            TTM_MEMTYPE_FLAG_MAPPABLE;
+                               man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
                        man->available_caching = TTM_PL_FLAG_UNCACHED |
                                                 TTM_PL_FLAG_WC;
                        man->default_caching = TTM_PL_FLAG_WC;
-               } else
-#endif
-               {
-                       man->io_offset = 0;
-                       man->io_size = 0;
-                       man->io_addr = NULL;
                }
+#endif
                break;
        case TTM_PL_VRAM:
                /* "On-card" video ram */
                man->gpu_offset = rdev->mc.vram_start;
                man->flags = TTM_MEMTYPE_FLAG_FIXED |
-                            TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
                             TTM_MEMTYPE_FLAG_MAPPABLE;
                man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
                man->default_caching = TTM_PL_FLAG_WC;
-               man->io_addr = NULL;
-               man->io_offset = rdev->mc.aper_base;
-               man->io_size = rdev->mc.aper_size;
                break;
        default:
                DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
@@ -244,9 +232,9 @@ static void radeon_move_null(struct ttm_buffer_object *bo,
 }
 
 static int radeon_move_blit(struct ttm_buffer_object *bo,
-                           bool evict, int no_wait,
-                           struct ttm_mem_reg *new_mem,
-                           struct ttm_mem_reg *old_mem)
+                       bool evict, int no_wait_reserve, bool no_wait_gpu,
+                       struct ttm_mem_reg *new_mem,
+                       struct ttm_mem_reg *old_mem)
 {
        struct radeon_device *rdev;
        uint64_t old_start, new_start;
@@ -290,13 +278,14 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
        r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
        /* FIXME: handle copy error */
        r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
-                                     evict, no_wait, new_mem);
+                                     evict, no_wait_reserve, no_wait_gpu, new_mem);
        radeon_fence_unref(&fence);
        return r;
 }
 
 static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
-                               bool evict, bool interruptible, bool no_wait,
+                               bool evict, bool interruptible,
+                               bool no_wait_reserve, bool no_wait_gpu,
                                struct ttm_mem_reg *new_mem)
 {
        struct radeon_device *rdev;
@@ -317,7 +306,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
        placement.busy_placement = &placements;
        placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
        r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
-                            interruptible, no_wait);
+                            interruptible, no_wait_reserve, no_wait_gpu);
        if (unlikely(r)) {
                return r;
        }
@@ -331,11 +320,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
        if (unlikely(r)) {
                goto out_cleanup;
        }
-       r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem);
+       r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
-       r = ttm_bo_move_ttm(bo, true, no_wait, new_mem);
+       r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
 out_cleanup:
        if (tmp_mem.mm_node) {
                struct ttm_bo_global *glob = rdev->mman.bdev.glob;
@@ -349,7 +338,8 @@ out_cleanup:
 }
 
 static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
-                               bool evict, bool interruptible, bool no_wait,
+                               bool evict, bool interruptible,
+                               bool no_wait_reserve, bool no_wait_gpu,
                                struct ttm_mem_reg *new_mem)
 {
        struct radeon_device *rdev;
@@ -369,15 +359,15 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
        placement.num_busy_placement = 1;
        placement.busy_placement = &placements;
        placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
-       r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
+       r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
        if (unlikely(r)) {
                return r;
        }
-       r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem);
+       r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
-       r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem);
+       r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
@@ -394,8 +384,9 @@ out_cleanup:
 }
 
 static int radeon_bo_move(struct ttm_buffer_object *bo,
-                         bool evict, bool interruptible, bool no_wait,
-                         struct ttm_mem_reg *new_mem)
+                       bool evict, bool interruptible,
+                       bool no_wait_reserve, bool no_wait_gpu,
+                       struct ttm_mem_reg *new_mem)
 {
        struct radeon_device *rdev;
        struct ttm_mem_reg *old_mem = &bo->mem;
@@ -422,23 +413,66 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
        if (old_mem->mem_type == TTM_PL_VRAM &&
            new_mem->mem_type == TTM_PL_SYSTEM) {
                r = radeon_move_vram_ram(bo, evict, interruptible,
-                                           no_wait, new_mem);
+                                       no_wait_reserve, no_wait_gpu, new_mem);
        } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
                   new_mem->mem_type == TTM_PL_VRAM) {
                r = radeon_move_ram_vram(bo, evict, interruptible,
-                                           no_wait, new_mem);
+                                           no_wait_reserve, no_wait_gpu, new_mem);
        } else {
-               r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem);
+               r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
        }
 
        if (r) {
 memcpy:
-               r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+               r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
        }
-
        return r;
 }
 
+static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+       struct radeon_device *rdev = radeon_get_rdev(bdev);
+
+       mem->bus.addr = NULL;
+       mem->bus.offset = 0;
+       mem->bus.size = mem->num_pages << PAGE_SHIFT;
+       mem->bus.base = 0;
+       mem->bus.is_iomem = false;
+       if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+               return -EINVAL;
+       switch (mem->mem_type) {
+       case TTM_PL_SYSTEM:
+               /* system memory */
+               return 0;
+       case TTM_PL_TT:
+#if __OS_HAS_AGP
+               if (rdev->flags & RADEON_IS_AGP) {
+                       /* RADEON_IS_AGP is set only if AGP is active */
+                       mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+                       mem->bus.base = rdev->mc.agp_base;
+                       mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
+               }
+#endif
+               break;
+       case TTM_PL_VRAM:
+               mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+               /* check if it's visible */
+               if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
+                       return -EINVAL;
+               mem->bus.base = rdev->mc.aper_base;
+               mem->bus.is_iomem = true;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
 static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
                                bool lazy, bool interruptible)
 {
@@ -479,6 +513,8 @@ static struct ttm_bo_driver radeon_bo_driver = {
        .sync_obj_ref = &radeon_sync_obj_ref,
        .move_notify = &radeon_bo_move_notify,
        .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
+       .io_mem_reserve = &radeon_ttm_io_mem_reserve,
+       .io_mem_free = &radeon_ttm_io_mem_free,
 };
 
 int radeon_ttm_init(struct radeon_device *rdev)
@@ -571,13 +607,17 @@ static const struct vm_operations_struct *ttm_vm_ops = NULL;
 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct ttm_buffer_object *bo;
+       struct radeon_device *rdev;
        int r;
 
-       bo = (struct ttm_buffer_object *)vma->vm_private_data;
+       bo = (struct ttm_buffer_object *)vma->vm_private_data;  
        if (bo == NULL) {
                return VM_FAULT_NOPAGE;
        }
+       rdev = radeon_get_rdev(bo->bdev);
+       mutex_lock(&rdev->vram_mutex);
        r = ttm_vm_ops->fault(vma, vmf);
+       mutex_unlock(&rdev->vram_mutex);
        return r;
 }
 
@@ -745,8 +785,8 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
 {
 #if defined(CONFIG_DEBUG_FS)
-       static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
-       static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
+       static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1];
+       static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32];
        unsigned i;
 
        for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
@@ -763,7 +803,13 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
                        radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
 
        }
-       return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES);
+       /* Add ttm page pool to debugfs */
+       sprintf(radeon_mem_types_names[i], "ttm_page_pool");
+       radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+       radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
+       radeon_mem_types_list[i].driver_features = 0;
+       radeon_mem_types_list[i].data = NULL;
+       return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1);
 
 #endif
        return 0;
index 1a41cb2..9e4240b 100644 (file)
@@ -243,8 +243,6 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev)
 
 void rs400_gpu_init(struct radeon_device *rdev)
 {
-       /* FIXME: HDP same place on rs400 ? */
-       r100_hdp_reset(rdev);
        /* FIXME: is this correct ? */
        r420_pipes_init(rdev);
        if (rs400_mc_wait_for_idle(rdev)) {
@@ -433,7 +431,7 @@ int rs400_resume(struct radeon_device *rdev)
        /* setup MC before calling post tables */
        rs400_mc_program(rdev);
        /* Reset gpu before posting otherwise ATOM will enter infinite loop */
-       if (radeon_gpu_reset(rdev)) {
+       if (radeon_asic_reset(rdev)) {
                dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
                        RREG32(R_000E40_RBBM_STATUS),
                        RREG32(R_0007C0_CP_STAT));
@@ -458,7 +456,6 @@ int rs400_suspend(struct radeon_device *rdev)
 
 void rs400_fini(struct radeon_device *rdev)
 {
-       radeon_pm_fini(rdev);
        r100_cp_fini(rdev);
        r100_wb_fini(rdev);
        r100_ib_fini(rdev);
@@ -497,7 +494,7 @@ int rs400_init(struct radeon_device *rdev)
                        return r;
        }
        /* Reset gpu before posting otherwise ATOM will enter infinite loop */
-       if (radeon_gpu_reset(rdev)) {
+       if (radeon_asic_reset(rdev)) {
                dev_warn(rdev->dev,
                        "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
                        RREG32(R_000E40_RBBM_STATUS),
@@ -509,8 +506,6 @@ int rs400_init(struct radeon_device *rdev)
 
        /* Initialize clocks */
        radeon_get_clock_info(rdev->ddev);
-       /* Initialize power management */
-       radeon_pm_init(rdev);
        /* initialize memory controller */
        rs400_mc_init(rdev);
        /* Fence driver */
index a81bc7a..79887ca 100644 (file)
 void rs600_gpu_init(struct radeon_device *rdev);
 int rs600_mc_wait_for_idle(struct radeon_device *rdev);
 
+void rs600_pm_misc(struct radeon_device *rdev)
+{
+       int requested_index = rdev->pm.requested_power_state_index;
+       struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
+       struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
+       u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl;
+       u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl;
+
+       if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
+               if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+                       tmp = RREG32(voltage->gpio.reg);
+                       if (voltage->active_high)
+                               tmp |= voltage->gpio.mask;
+                       else
+                               tmp &= ~(voltage->gpio.mask);
+                       WREG32(voltage->gpio.reg, tmp);
+                       if (voltage->delay)
+                               udelay(voltage->delay);
+               } else {
+                       tmp = RREG32(voltage->gpio.reg);
+                       if (voltage->active_high)
+                               tmp &= ~voltage->gpio.mask;
+                       else
+                               tmp |= voltage->gpio.mask;
+                       WREG32(voltage->gpio.reg, tmp);
+                       if (voltage->delay)
+                               udelay(voltage->delay);
+               }
+       }
+
+       dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
+       dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
+       dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf);
+       if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
+               if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) {
+                       dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2);
+                       dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2);
+               } else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) {
+                       dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4);
+                       dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4);
+               }
+       } else {
+               dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1);
+               dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1);
+       }
+       WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length);
+
+       dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL);
+       if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
+               dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP;
+               if (voltage->delay) {
+                       dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC;
+                       dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay);
+               } else
+                       dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC;
+       } else
+               dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP;
+       WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl);
+
+       hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL);
+       if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
+               hdp_dyn_cntl &= ~HDP_FORCEON;
+       else
+               hdp_dyn_cntl |= HDP_FORCEON;
+       WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl);
+#if 0
+       /* mc_host_dyn seems to cause hangs from time to time */
+       mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL);
+       if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN)
+               mc_host_dyn_cntl &= ~MC_HOST_FORCEON;
+       else
+               mc_host_dyn_cntl |= MC_HOST_FORCEON;
+       WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl);
+#endif
+       dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL);
+       if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN)
+               dyn_backbias_cntl |= IO_CG_BACKBIAS_EN;
+       else
+               dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN;
+       WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl);
+
+       /* set pcie lanes */
+       if ((rdev->flags & RADEON_IS_PCIE) &&
+           !(rdev->flags & RADEON_IS_IGP) &&
+           rdev->asic->set_pcie_lanes &&
+           (ps->pcie_lanes !=
+            rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
+               radeon_set_pcie_lanes(rdev,
+                                     ps->pcie_lanes);
+               DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
+       }
+}
+
+void rs600_pm_prepare(struct radeon_device *rdev)
+{
+       struct drm_device *ddev = rdev->ddev;
+       struct drm_crtc *crtc;
+       struct radeon_crtc *radeon_crtc;
+       u32 tmp;
+
+       /* disable any active CRTCs */
+       list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+               radeon_crtc = to_radeon_crtc(crtc);
+               if (radeon_crtc->enabled) {
+                       tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
+                       tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+                       WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+               }
+       }
+}
+
+void rs600_pm_finish(struct radeon_device *rdev)
+{
+       struct drm_device *ddev = rdev->ddev;
+       struct drm_crtc *crtc;
+       struct radeon_crtc *radeon_crtc;
+       u32 tmp;
+
+       /* enable any active CRTCs */
+       list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+               radeon_crtc = to_radeon_crtc(crtc);
+               if (radeon_crtc->enabled) {
+                       tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
+                       tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+                       WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+               }
+       }
+}
+
 /* hpd for digital panel detect/disconnect */
 bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
 {
@@ -147,6 +276,78 @@ void rs600_hpd_fini(struct radeon_device *rdev)
        }
 }
 
+void rs600_bm_disable(struct radeon_device *rdev)
+{
+       u32 tmp;
+
+       /* disable bus mastering */
+       pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
+       pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
+       mdelay(1);
+}
+
+int rs600_asic_reset(struct radeon_device *rdev)
+{
+       u32 status, tmp;
+
+       struct rv515_mc_save save;
+
+       /* Stops all mc clients */
+       rv515_mc_stop(rdev, &save);
+       status = RREG32(R_000E40_RBBM_STATUS);
+       if (!G_000E40_GUI_ACTIVE(status)) {
+               return 0;
+       }
+       status = RREG32(R_000E40_RBBM_STATUS);
+       dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+       /* stop CP */
+       WREG32(RADEON_CP_CSQ_CNTL, 0);
+       tmp = RREG32(RADEON_CP_RB_CNTL);
+       WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+       WREG32(RADEON_CP_RB_RPTR_WR, 0);
+       WREG32(RADEON_CP_RB_WPTR, 0);
+       WREG32(RADEON_CP_RB_CNTL, tmp);
+       pci_save_state(rdev->pdev);
+       /* disable bus mastering */
+       rs600_bm_disable(rdev);
+       /* reset GA+VAP */
+       WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
+                                       S_0000F0_SOFT_RESET_GA(1));
+       RREG32(R_0000F0_RBBM_SOFT_RESET);
+       mdelay(500);
+       WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+       mdelay(1);
+       status = RREG32(R_000E40_RBBM_STATUS);
+       dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+       /* reset CP */
+       WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+       RREG32(R_0000F0_RBBM_SOFT_RESET);
+       mdelay(500);
+       WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+       mdelay(1);
+       status = RREG32(R_000E40_RBBM_STATUS);
+       dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+       /* reset MC */
+       WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1));
+       RREG32(R_0000F0_RBBM_SOFT_RESET);
+       mdelay(500);
+       WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+       mdelay(1);
+       status = RREG32(R_000E40_RBBM_STATUS);
+       dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+       /* restore PCI & busmastering */
+       pci_restore_state(rdev->pdev);
+       /* Check if GPU is idle */
+       if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
+               dev_err(rdev->dev, "failed to reset GPU\n");
+               rdev->gpu_lockup = true;
+               return -1;
+       }
+       rv515_mc_resume(rdev, &save);
+       dev_info(rdev->dev, "GPU reset succeed\n");
+       return 0;
+}
+
 /*
  * GART.
  */
@@ -310,6 +511,9 @@ int rs600_irq_set(struct radeon_device *rdev)
        if (rdev->irq.sw_int) {
                tmp |= S_000040_SW_INT_EN(1);
        }
+       if (rdev->irq.gui_idle) {
+               tmp |= S_000040_GUI_IDLE(1);
+       }
        if (rdev->irq.crtc_vblank_int[0]) {
                mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
        }
@@ -332,9 +536,15 @@ int rs600_irq_set(struct radeon_device *rdev)
 static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
 {
        uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
-       uint32_t irq_mask = ~C_000044_SW_INT;
+       uint32_t irq_mask = S_000044_SW_INT(1);
        u32 tmp;
 
+       /* the interrupt works, but the status bit is permanently asserted */
+       if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
+               if (!rdev->irq.gui_idle_acked)
+                       irq_mask |= S_000044_GUI_IDLE_STAT(1);
+       }
+
        if (G_000044_DISPLAY_INT_STAT(irqs)) {
                *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
                if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) {
@@ -382,6 +592,9 @@ int rs600_irq_process(struct radeon_device *rdev)
        uint32_t r500_disp_int;
        bool queue_hotplug = false;
 
+       /* reset gui idle ack.  the status bit is broken */
+       rdev->irq.gui_idle_acked = false;
+
        status = rs600_irq_ack(rdev, &r500_disp_int);
        if (!status && !r500_disp_int) {
                return IRQ_NONE;
@@ -390,6 +603,12 @@ int rs600_irq_process(struct radeon_device *rdev)
                /* SW interrupt */
                if (G_000044_SW_INT(status))
                        radeon_fence_process(rdev);
+               /* GUI idle */
+               if (G_000040_GUI_IDLE(status)) {
+                       rdev->irq.gui_idle_acked = true;
+                       rdev->pm.gui_idle = true;
+                       wake_up(&rdev->irq.idle_queue);
+               }
                /* Vertical blank interrupts */
                if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
                        drm_handle_vblank(rdev->ddev, 0);
@@ -411,6 +630,8 @@ int rs600_irq_process(struct radeon_device *rdev)
                }
                status = rs600_irq_ack(rdev, &r500_disp_int);
        }
+       /* reset gui idle ack.  the status bit is broken */
+       rdev->irq.gui_idle_acked = false;
        if (queue_hotplug)
                queue_work(rdev->wq, &rdev->hotplug_work);
        if (rdev->msi_enabled) {
@@ -454,7 +675,6 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev)
 
 void rs600_gpu_init(struct radeon_device *rdev)
 {
-       r100_hdp_reset(rdev);
        r420_pipes_init(rdev);
        /* Wait for mc idle */
        if (rs600_mc_wait_for_idle(rdev))
@@ -601,7 +821,7 @@ int rs600_resume(struct radeon_device *rdev)
        /* Resume clock before doing reset */
        rv515_clock_startup(rdev);
        /* Reset gpu before posting otherwise ATOM will enter infinite loop */
-       if (radeon_gpu_reset(rdev)) {
+       if (radeon_asic_reset(rdev)) {
                dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
                        RREG32(R_000E40_RBBM_STATUS),
                        RREG32(R_0007C0_CP_STAT));
@@ -626,7 +846,6 @@ int rs600_suspend(struct radeon_device *rdev)
 
 void rs600_fini(struct radeon_device *rdev)
 {
-       radeon_pm_fini(rdev);
        r100_cp_fini(rdev);
        r100_wb_fini(rdev);
        r100_ib_fini(rdev);
@@ -664,7 +883,7 @@ int rs600_init(struct radeon_device *rdev)
                return -EINVAL;
        }
        /* Reset gpu before posting otherwise ATOM will enter infinite loop */
-       if (radeon_gpu_reset(rdev)) {
+       if (radeon_asic_reset(rdev)) {
                dev_warn(rdev->dev,
                        "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
                        RREG32(R_000E40_RBBM_STATUS),
@@ -676,8 +895,6 @@ int rs600_init(struct radeon_device *rdev)
 
        /* Initialize clocks */
        radeon_get_clock_info(rdev->ddev);
-       /* Initialize power management */
-       radeon_pm_init(rdev);
        /* initialize memory controller */
        rs600_mc_init(rdev);
        rs600_debugfs(rdev);
index e52d269..a27c13a 100644 (file)
 #define   S_000074_MC_IND_DATA(x)                      (((x) & 0xFFFFFFFF) << 0)
 #define   G_000074_MC_IND_DATA(x)                      (((x) >> 0) & 0xFFFFFFFF)
 #define   C_000074_MC_IND_DATA                         0x00000000
+#define R_0000F0_RBBM_SOFT_RESET                     0x0000F0
+#define   S_0000F0_SOFT_RESET_CP(x)                    (((x) & 0x1) << 0)
+#define   G_0000F0_SOFT_RESET_CP(x)                    (((x) >> 0) & 0x1)
+#define   C_0000F0_SOFT_RESET_CP                       0xFFFFFFFE
+#define   S_0000F0_SOFT_RESET_HI(x)                    (((x) & 0x1) << 1)
+#define   G_0000F0_SOFT_RESET_HI(x)                    (((x) >> 1) & 0x1)
+#define   C_0000F0_SOFT_RESET_HI                       0xFFFFFFFD
+#define   S_0000F0_SOFT_RESET_VAP(x)                   (((x) & 0x1) << 2)
+#define   G_0000F0_SOFT_RESET_VAP(x)                   (((x) >> 2) & 0x1)
+#define   C_0000F0_SOFT_RESET_VAP                      0xFFFFFFFB
+#define   S_0000F0_SOFT_RESET_RE(x)                    (((x) & 0x1) << 3)
+#define   G_0000F0_SOFT_RESET_RE(x)                    (((x) >> 3) & 0x1)
+#define   C_0000F0_SOFT_RESET_RE                       0xFFFFFFF7
+#define   S_0000F0_SOFT_RESET_PP(x)                    (((x) & 0x1) << 4)
+#define   G_0000F0_SOFT_RESET_PP(x)                    (((x) >> 4) & 0x1)
+#define   C_0000F0_SOFT_RESET_PP                       0xFFFFFFEF
+#define   S_0000F0_SOFT_RESET_E2(x)                    (((x) & 0x1) << 5)
+#define   G_0000F0_SOFT_RESET_E2(x)                    (((x) >> 5) & 0x1)
+#define   C_0000F0_SOFT_RESET_E2                       0xFFFFFFDF
+#define   S_0000F0_SOFT_RESET_RB(x)                    (((x) & 0x1) << 6)
+#define   G_0000F0_SOFT_RESET_RB(x)                    (((x) >> 6) & 0x1)
+#define   C_0000F0_SOFT_RESET_RB                       0xFFFFFFBF
+#define   S_0000F0_SOFT_RESET_HDP(x)                   (((x) & 0x1) << 7)
+#define   G_0000F0_SOFT_RESET_HDP(x)                   (((x) >> 7) & 0x1)
+#define   C_0000F0_SOFT_RESET_HDP                      0xFFFFFF7F
+#define   S_0000F0_SOFT_RESET_MC(x)                    (((x) & 0x1) << 8)
+#define   G_0000F0_SOFT_RESET_MC(x)                    (((x) >> 8) & 0x1)
+#define   C_0000F0_SOFT_RESET_MC                       0xFFFFFEFF
+#define   S_0000F0_SOFT_RESET_AIC(x)                   (((x) & 0x1) << 9)
+#define   G_0000F0_SOFT_RESET_AIC(x)                   (((x) >> 9) & 0x1)
+#define   C_0000F0_SOFT_RESET_AIC                      0xFFFFFDFF
+#define   S_0000F0_SOFT_RESET_VIP(x)                   (((x) & 0x1) << 10)
+#define   G_0000F0_SOFT_RESET_VIP(x)                   (((x) >> 10) & 0x1)
+#define   C_0000F0_SOFT_RESET_VIP                      0xFFFFFBFF
+#define   S_0000F0_SOFT_RESET_DISP(x)                  (((x) & 0x1) << 11)
+#define   G_0000F0_SOFT_RESET_DISP(x)                  (((x) >> 11) & 0x1)
+#define   C_0000F0_SOFT_RESET_DISP                     0xFFFFF7FF
+#define   S_0000F0_SOFT_RESET_CG(x)                    (((x) & 0x1) << 12)
+#define   G_0000F0_SOFT_RESET_CG(x)                    (((x) >> 12) & 0x1)
+#define   C_0000F0_SOFT_RESET_CG                       0xFFFFEFFF
+#define   S_0000F0_SOFT_RESET_GA(x)                    (((x) & 0x1) << 13)
+#define   G_0000F0_SOFT_RESET_GA(x)                    (((x) >> 13) & 0x1)
+#define   C_0000F0_SOFT_RESET_GA                       0xFFFFDFFF
+#define   S_0000F0_SOFT_RESET_IDCT(x)                  (((x) & 0x1) << 14)
+#define   G_0000F0_SOFT_RESET_IDCT(x)                  (((x) >> 14) & 0x1)
+#define   C_0000F0_SOFT_RESET_IDCT                     0xFFFFBFFF
 #define R_000134_HDP_FB_LOCATION                     0x000134
 #define   S_000134_HDP_FB_START(x)                     (((x) & 0xFFFF) << 0)
 #define   G_000134_HDP_FB_START(x)                     (((x) >> 0) & 0xFFFF)
 #define   G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x)     (((x) >> 24) & 0x1)
 #define   C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK        0xFEFFFFFF
 
+/* PLL regs */
+#define GENERAL_PWRMGT                                 0x8
+#define   GLOBAL_PWRMGT_EN                             (1 << 0)
+#define   MOBILE_SU                                    (1 << 2)
+#define DYN_PWRMGT_SCLK_LENGTH                         0xc
+#define   NORMAL_POWER_SCLK_HILEN(x)                   ((x) << 0)
+#define   NORMAL_POWER_SCLK_LOLEN(x)                   ((x) << 4)
+#define   REDUCED_POWER_SCLK_HILEN(x)                  ((x) << 8)
+#define   REDUCED_POWER_SCLK_LOLEN(x)                  ((x) << 12)
+#define   POWER_D1_SCLK_HILEN(x)                       ((x) << 16)
+#define   POWER_D1_SCLK_LOLEN(x)                       ((x) << 20)
+#define   STATIC_SCREEN_HILEN(x)                       ((x) << 24)
+#define   STATIC_SCREEN_LOLEN(x)                       ((x) << 28)
+#define DYN_SCLK_VOL_CNTL                              0xe
+#define   IO_CG_VOLTAGE_DROP                           (1 << 0)
+#define   VOLTAGE_DROP_SYNC                            (1 << 2)
+#define   VOLTAGE_DELAY_SEL(x)                         ((x) << 3)
+#define HDP_DYN_CNTL                                   0x10
+#define   HDP_FORCEON                                  (1 << 0)
+#define MC_HOST_DYN_CNTL                               0x1e
+#define   MC_HOST_FORCEON                              (1 << 0)
+#define DYN_BACKBIAS_CNTL                              0x29
+#define   IO_CG_BACKBIAS_EN                            (1 << 0)
+
+/* mmreg */
+#define DOUT_POWER_MANAGEMENT_CNTL                     0x7ee0
+#define   PWRDN_WAIT_BUSY_OFF                          (1 << 0)
+#define   PWRDN_WAIT_PWRSEQ_OFF                        (1 << 4)
+#define   PWRDN_WAIT_PPLL_OFF                          (1 << 8)
+#define   PWRUP_WAIT_PPLL_ON                           (1 << 12)
+#define   PWRUP_WAIT_MEM_INIT_DONE                     (1 << 16)
+#define   PM_ASSERT_RESET                              (1 << 20)
+#define   PM_PWRDN_PPLL                                (1 << 24)
+
 #endif
index bbf3da7..bcc3319 100644 (file)
@@ -48,8 +48,6 @@ static int rs690_mc_wait_for_idle(struct radeon_device *rdev)
 
 static void rs690_gpu_init(struct radeon_device *rdev)
 {
-       /* FIXME: HDP same place on rs690 ? */
-       r100_hdp_reset(rdev);
        /* FIXME: is this correct ? */
        r420_pipes_init(rdev);
        if (rs690_mc_wait_for_idle(rdev)) {
@@ -78,59 +76,59 @@ void rs690_pm_info(struct radeon_device *rdev)
                /* Get various system informations from bios */
                switch (crev) {
                case 1:
-                       tmp.full = rfixed_const(100);
-                       rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info.ulBootUpMemoryClock);
-                       rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
-                       rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
-                       rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->info.usFSBClock));
-                       rdev->pm.igp_ht_link_width.full = rfixed_const(info->info.ucHTLinkWidth);
+                       tmp.full = dfixed_const(100);
+                       rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock);
+                       rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
+                       rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
+                       rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock));
+                       rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth);
                        break;
                case 2:
-                       tmp.full = rfixed_const(100);
-                       rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info_v2.ulBootUpSidePortClock);
-                       rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
-                       rdev->pm.igp_system_mclk.full = rfixed_const(info->info_v2.ulBootUpUMAClock);
-                       rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
-                       rdev->pm.igp_ht_link_clk.full = rfixed_const(info->info_v2.ulHTLinkFreq);
-                       rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp);
-                       rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
+                       tmp.full = dfixed_const(100);
+                       rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock);
+                       rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
+                       rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock);
+                       rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+                       rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq);
+                       rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
+                       rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
                        break;
                default:
-                       tmp.full = rfixed_const(100);
+                       tmp.full = dfixed_const(100);
                        /* We assume the slower possible clock ie worst case */
                        /* DDR 333Mhz */
-                       rdev->pm.igp_sideport_mclk.full = rfixed_const(333);
+                       rdev->pm.igp_sideport_mclk.full = dfixed_const(333);
                        /* FIXME: system clock ? */
-                       rdev->pm.igp_system_mclk.full = rfixed_const(100);
-                       rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
-                       rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
-                       rdev->pm.igp_ht_link_width.full = rfixed_const(8);
+                       rdev->pm.igp_system_mclk.full = dfixed_const(100);
+                       rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+                       rdev->pm.igp_ht_link_clk.full = dfixed_const(200);
+                       rdev->pm.igp_ht_link_width.full = dfixed_const(8);
                        DRM_ERROR("No integrated system info for your GPU, using safe default\n");
                        break;
                }
        } else {
-               tmp.full = rfixed_const(100);
+               tmp.full = dfixed_const(100);
                /* We assume the slower possible clock ie worst case */
                /* DDR 333Mhz */
-               rdev->pm.igp_sideport_mclk.full = rfixed_const(333);
+               rdev->pm.igp_sideport_mclk.full = dfixed_const(333);
                /* FIXME: system clock ? */
-               rdev->pm.igp_system_mclk.full = rfixed_const(100);
-               rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
-               rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
-               rdev->pm.igp_ht_link_width.full = rfixed_const(8);
+               rdev->pm.igp_system_mclk.full = dfixed_const(100);
+               rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+               rdev->pm.igp_ht_link_clk.full = dfixed_const(200);
+               rdev->pm.igp_ht_link_width.full = dfixed_const(8);
                DRM_ERROR("No integrated system info for your GPU, using safe default\n");
        }
        /* Compute various bandwidth */
        /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4  */
-       tmp.full = rfixed_const(4);
-       rdev->pm.k8_bandwidth.full = rfixed_mul(rdev->pm.igp_system_mclk, tmp);
+       tmp.full = dfixed_const(4);
+       rdev->pm.k8_bandwidth.full = dfixed_mul(rdev->pm.igp_system_mclk, tmp);
        /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8
         *              = ht_clk * ht_width / 5
         */
-       tmp.full = rfixed_const(5);
-       rdev->pm.ht_bandwidth.full = rfixed_mul(rdev->pm.igp_ht_link_clk,
+       tmp.full = dfixed_const(5);
+       rdev->pm.ht_bandwidth.full = dfixed_mul(rdev->pm.igp_ht_link_clk,
                                                rdev->pm.igp_ht_link_width);
-       rdev->pm.ht_bandwidth.full = rfixed_div(rdev->pm.ht_bandwidth, tmp);
+       rdev->pm.ht_bandwidth.full = dfixed_div(rdev->pm.ht_bandwidth, tmp);
        if (tmp.full < rdev->pm.max_bandwidth.full) {
                /* HT link is a limiting factor */
                rdev->pm.max_bandwidth.full = tmp.full;
@@ -138,10 +136,10 @@ void rs690_pm_info(struct radeon_device *rdev)
        /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7
         *                    = (sideport_clk * 14) / 10
         */
-       tmp.full = rfixed_const(14);
-       rdev->pm.sideport_bandwidth.full = rfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
-       tmp.full = rfixed_const(10);
-       rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp);
+       tmp.full = dfixed_const(14);
+       rdev->pm.sideport_bandwidth.full = dfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
+       tmp.full = dfixed_const(10);
+       rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp);
 }
 
 void rs690_mc_init(struct radeon_device *rdev)
@@ -241,20 +239,20 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
                return;
        }
 
-       if (crtc->vsc.full > rfixed_const(2))
-               wm->num_line_pair.full = rfixed_const(2);
+       if (crtc->vsc.full > dfixed_const(2))
+               wm->num_line_pair.full = dfixed_const(2);
        else
-               wm->num_line_pair.full = rfixed_const(1);
-
-       b.full = rfixed_const(mode->crtc_hdisplay);
-       c.full = rfixed_const(256);
-       a.full = rfixed_div(b, c);
-       request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
-       request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
-       if (a.full < rfixed_const(4)) {
+               wm->num_line_pair.full = dfixed_const(1);
+
+       b.full = dfixed_const(mode->crtc_hdisplay);
+       c.full = dfixed_const(256);
+       a.full = dfixed_div(b, c);
+       request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
+       request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
+       if (a.full < dfixed_const(4)) {
                wm->lb_request_fifo_depth = 4;
        } else {
-               wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth);
+               wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
        }
 
        /* Determine consumption rate
@@ -263,23 +261,23 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
         *  vsc = vertical scaling ratio, defined as source/destination
         *  hsc = horizontal scaling ration, defined as source/destination
         */
-       a.full = rfixed_const(mode->clock);
-       b.full = rfixed_const(1000);
-       a.full = rfixed_div(a, b);
-       pclk.full = rfixed_div(b, a);
+       a.full = dfixed_const(mode->clock);
+       b.full = dfixed_const(1000);
+       a.full = dfixed_div(a, b);
+       pclk.full = dfixed_div(b, a);
        if (crtc->rmx_type != RMX_OFF) {
-               b.full = rfixed_const(2);
+               b.full = dfixed_const(2);
                if (crtc->vsc.full > b.full)
                        b.full = crtc->vsc.full;
-               b.full = rfixed_mul(b, crtc->hsc);
-               c.full = rfixed_const(2);
-               b.full = rfixed_div(b, c);
-               consumption_time.full = rfixed_div(pclk, b);
+               b.full = dfixed_mul(b, crtc->hsc);
+               c.full = dfixed_const(2);
+               b.full = dfixed_div(b, c);
+               consumption_time.full = dfixed_div(pclk, b);
        } else {
                consumption_time.full = pclk.full;
        }
-       a.full = rfixed_const(1);
-       wm->consumption_rate.full = rfixed_div(a, consumption_time);
+       a.full = dfixed_const(1);
+       wm->consumption_rate.full = dfixed_div(a, consumption_time);
 
 
        /* Determine line time
@@ -287,18 +285,18 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
         *  LineTime = total number of horizontal pixels
         *  pclk = pixel clock period(ns)
         */
-       a.full = rfixed_const(crtc->base.mode.crtc_htotal);
-       line_time.full = rfixed_mul(a, pclk);
+       a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+       line_time.full = dfixed_mul(a, pclk);
 
        /* Determine active time
         *  ActiveTime = time of active region of display within one line,
         *  hactive = total number of horizontal active pixels
         *  htotal = total number of horizontal pixels
         */
-       a.full = rfixed_const(crtc->base.mode.crtc_htotal);
-       b.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
-       wm->active_time.full = rfixed_mul(line_time, b);
-       wm->active_time.full = rfixed_div(wm->active_time, a);
+       a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+       b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+       wm->active_time.full = dfixed_mul(line_time, b);
+       wm->active_time.full = dfixed_div(wm->active_time, a);
 
        /* Maximun bandwidth is the minimun bandwidth of all component */
        rdev->pm.max_bandwidth = rdev->pm.core_bandwidth;
@@ -306,8 +304,8 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
                if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
                        rdev->pm.sideport_bandwidth.full)
                        rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
-               read_delay_latency.full = rfixed_const(370 * 800 * 1000);
-               read_delay_latency.full = rfixed_div(read_delay_latency,
+               read_delay_latency.full = dfixed_const(370 * 800 * 1000);
+               read_delay_latency.full = dfixed_div(read_delay_latency,
                        rdev->pm.igp_sideport_mclk);
        } else {
                if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
@@ -316,23 +314,23 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
                if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
                        rdev->pm.ht_bandwidth.full)
                        rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth;
-               read_delay_latency.full = rfixed_const(5000);
+               read_delay_latency.full = dfixed_const(5000);
        }
 
        /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
-       a.full = rfixed_const(16);
-       rdev->pm.sclk.full = rfixed_mul(rdev->pm.max_bandwidth, a);
-       a.full = rfixed_const(1000);
-       rdev->pm.sclk.full = rfixed_div(a, rdev->pm.sclk);
+       a.full = dfixed_const(16);
+       rdev->pm.sclk.full = dfixed_mul(rdev->pm.max_bandwidth, a);
+       a.full = dfixed_const(1000);
+       rdev->pm.sclk.full = dfixed_div(a, rdev->pm.sclk);
        /* Determine chunk time
         * ChunkTime = the time it takes the DCP to send one chunk of data
         * to the LB which consists of pipeline delay and inter chunk gap
         * sclk = system clock(ns)
         */
-       a.full = rfixed_const(256 * 13);
-       chunk_time.full = rfixed_mul(rdev->pm.sclk, a);
-       a.full = rfixed_const(10);
-       chunk_time.full = rfixed_div(chunk_time, a);
+       a.full = dfixed_const(256 * 13);
+       chunk_time.full = dfixed_mul(rdev->pm.sclk, a);
+       a.full = dfixed_const(10);
+       chunk_time.full = dfixed_div(chunk_time, a);
 
        /* Determine the worst case latency
         * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
@@ -342,13 +340,13 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
         * ChunkTime = time it takes the DCP to send one chunk of data to the LB
         *             which consists of pipeline delay and inter chunk gap
         */
-       if (rfixed_trunc(wm->num_line_pair) > 1) {
-               a.full = rfixed_const(3);
-               wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
+       if (dfixed_trunc(wm->num_line_pair) > 1) {
+               a.full = dfixed_const(3);
+               wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
                wm->worst_case_latency.full += read_delay_latency.full;
        } else {
-               a.full = rfixed_const(2);
-               wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
+               a.full = dfixed_const(2);
+               wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
                wm->worst_case_latency.full += read_delay_latency.full;
        }
 
@@ -362,34 +360,34 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
         *              of data to the LB which consists of
         *  pipeline delay and inter chunk gap
         */
-       if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) {
+       if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
                tolerable_latency.full = line_time.full;
        } else {
-               tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2);
+               tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
                tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
-               tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time);
+               tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
                tolerable_latency.full = line_time.full - tolerable_latency.full;
        }
        /* We assume worst case 32bits (4 bytes) */
-       wm->dbpp.full = rfixed_const(4 * 8);
+       wm->dbpp.full = dfixed_const(4 * 8);
 
        /* Determine the maximum priority mark
         *  width = viewport width in pixels
         */
-       a.full = rfixed_const(16);
-       wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
-       wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
-       wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
+       a.full = dfixed_const(16);
+       wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+       wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
+       wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
 
        /* Determine estimated width */
        estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
-       estimated_width.full = rfixed_div(estimated_width, consumption_time);
-       if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
-               wm->priority_mark.full = rfixed_const(10);
+       estimated_width.full = dfixed_div(estimated_width, consumption_time);
+       if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
+               wm->priority_mark.full = dfixed_const(10);
        } else {
-               a.full = rfixed_const(16);
-               wm->priority_mark.full = rfixed_div(estimated_width, a);
-               wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
+               a.full = dfixed_const(16);
+               wm->priority_mark.full = dfixed_div(estimated_width, a);
+               wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
                wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
        }
 }
@@ -441,58 +439,58 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
        WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
 
        if (mode0 && mode1) {
-               if (rfixed_trunc(wm0.dbpp) > 64)
-                       a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
+               if (dfixed_trunc(wm0.dbpp) > 64)
+                       a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
                else
                        a.full = wm0.num_line_pair.full;
-               if (rfixed_trunc(wm1.dbpp) > 64)
-                       b.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
+               if (dfixed_trunc(wm1.dbpp) > 64)
+                       b.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
                else
                        b.full = wm1.num_line_pair.full;
                a.full += b.full;
-               fill_rate.full = rfixed_div(wm0.sclk, a);
+               fill_rate.full = dfixed_div(wm0.sclk, a);
                if (wm0.consumption_rate.full > fill_rate.full) {
                        b.full = wm0.consumption_rate.full - fill_rate.full;
-                       b.full = rfixed_mul(b, wm0.active_time);
-                       a.full = rfixed_mul(wm0.worst_case_latency,
+                       b.full = dfixed_mul(b, wm0.active_time);
+                       a.full = dfixed_mul(wm0.worst_case_latency,
                                                wm0.consumption_rate);
                        a.full = a.full + b.full;
-                       b.full = rfixed_const(16 * 1000);
-                       priority_mark02.full = rfixed_div(a, b);
+                       b.full = dfixed_const(16 * 1000);
+                       priority_mark02.full = dfixed_div(a, b);
                } else {
-                       a.full = rfixed_mul(wm0.worst_case_latency,
+                       a.full = dfixed_mul(wm0.worst_case_latency,
                                                wm0.consumption_rate);
-                       b.full = rfixed_const(16 * 1000);
-                       priority_mark02.full = rfixed_div(a, b);
+                       b.full = dfixed_const(16 * 1000);
+                       priority_mark02.full = dfixed_div(a, b);
                }
                if (wm1.consumption_rate.full > fill_rate.full) {
                        b.full = wm1.consumption_rate.full - fill_rate.full;
-                       b.full = rfixed_mul(b, wm1.active_time);
-                       a.full = rfixed_mul(wm1.worst_case_latency,
+                       b.full = dfixed_mul(b, wm1.active_time);
+                       a.full = dfixed_mul(wm1.worst_case_latency,
                                                wm1.consumption_rate);
                        a.full = a.full + b.full;
-                       b.full = rfixed_const(16 * 1000);
-                       priority_mark12.full = rfixed_div(a, b);
+                       b.full = dfixed_const(16 * 1000);
+                       priority_mark12.full = dfixed_div(a, b);
                } else {
-                       a.full = rfixed_mul(wm1.worst_case_latency,
+                       a.full = dfixed_mul(wm1.worst_case_latency,
                                                wm1.consumption_rate);
-                       b.full = rfixed_const(16 * 1000);
-                       priority_mark12.full = rfixed_div(a, b);
+                       b.full = dfixed_const(16 * 1000);
+                       priority_mark12.full = dfixed_div(a, b);
                }
                if (wm0.priority_mark.full > priority_mark02.full)
                        priority_mark02.full = wm0.priority_mark.full;
-               if (rfixed_trunc(priority_mark02) < 0)
+               if (dfixed_trunc(priority_mark02) < 0)
                        priority_mark02.full = 0;
                if (wm0.priority_mark_max.full > priority_mark02.full)
                        priority_mark02.full = wm0.priority_mark_max.full;
                if (wm1.priority_mark.full > priority_mark12.full)
                        priority_mark12.full = wm1.priority_mark.full;
-               if (rfixed_trunc(priority_mark12) < 0)
+               if (dfixed_trunc(priority_mark12) < 0)
                        priority_mark12.full = 0;
                if (wm1.priority_mark_max.full > priority_mark12.full)
                        priority_mark12.full = wm1.priority_mark_max.full;
-               d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
-               d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+               d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+               d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
                if (rdev->disp_priority == 2) {
                        d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
                        d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
@@ -502,32 +500,32 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
                WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
                WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
        } else if (mode0) {
-               if (rfixed_trunc(wm0.dbpp) > 64)
-                       a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
+               if (dfixed_trunc(wm0.dbpp) > 64)
+                       a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
                else
                        a.full = wm0.num_line_pair.full;
-               fill_rate.full = rfixed_div(wm0.sclk, a);
+               fill_rate.full = dfixed_div(wm0.sclk, a);
                if (wm0.consumption_rate.full > fill_rate.full) {
                        b.full = wm0.consumption_rate.full - fill_rate.full;
-                       b.full = rfixed_mul(b, wm0.active_time);
-                       a.full = rfixed_mul(wm0.worst_case_latency,
+                       b.full = dfixed_mul(b, wm0.active_time);
+                       a.full = dfixed_mul(wm0.worst_case_latency,
                                                wm0.consumption_rate);
                        a.full = a.full + b.full;
-                       b.full = rfixed_const(16 * 1000);
-                       priority_mark02.full = rfixed_div(a, b);
+                       b.full = dfixed_const(16 * 1000);
+                       priority_mark02.full = dfixed_div(a, b);
                } else {
-                       a.full = rfixed_mul(wm0.worst_case_latency,
+                       a.full = dfixed_mul(wm0.worst_case_latency,
                                                wm0.consumption_rate);
-                       b.full = rfixed_const(16 * 1000);
-                       priority_mark02.full = rfixed_div(a, b);
+                       b.full = dfixed_const(16 * 1000);
+                       priority_mark02.full = dfixed_div(a, b);
                }
                if (wm0.priority_mark.full > priority_mark02.full)
                        priority_mark02.full = wm0.priority_mark.full;
-               if (rfixed_trunc(priority_mark02) < 0)
+               if (dfixed_trunc(priority_mark02) < 0)
                        priority_mark02.full = 0;
                if (wm0.priority_mark_max.full > priority_mark02.full)
                        priority_mark02.full = wm0.priority_mark_max.full;
-               d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
+               d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
                if (rdev->disp_priority == 2)
                        d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
                WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
@@ -537,32 +535,32 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
                WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT,
                        S_006D4C_D2MODE_PRIORITY_B_OFF(1));
        } else {
-               if (rfixed_trunc(wm1.dbpp) > 64)
-                       a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
+               if (dfixed_trunc(wm1.dbpp) > 64)
+                       a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
                else
                        a.full = wm1.num_line_pair.full;
-               fill_rate.full = rfixed_div(wm1.sclk, a);
+               fill_rate.full = dfixed_div(wm1.sclk, a);
                if (wm1.consumption_rate.full > fill_rate.full) {
                        b.full = wm1.consumption_rate.full - fill_rate.full;
-                       b.full = rfixed_mul(b, wm1.active_time);
-                       a.full = rfixed_mul(wm1.worst_case_latency,
+                       b.full = dfixed_mul(b, wm1.active_time);
+                       a.full = dfixed_mul(wm1.worst_case_latency,
                                                wm1.consumption_rate);
                        a.full = a.full + b.full;
-                       b.full = rfixed_const(16 * 1000);
-                       priority_mark12.full = rfixed_div(a, b);
+                       b.full = dfixed_const(16 * 1000);
+                       priority_mark12.full = dfixed_div(a, b);
                } else {
-                       a.full = rfixed_mul(wm1.worst_case_latency,
+                       a.full = dfixed_mul(wm1.worst_case_latency,
                                                wm1.consumption_rate);
-                       b.full = rfixed_const(16 * 1000);
-                       priority_mark12.full = rfixed_div(a, b);
+                       b.full = dfixed_const(16 * 1000);
+                       priority_mark12.full = dfixed_div(a, b);
                }
                if (wm1.priority_mark.full > priority_mark12.full)
                        priority_mark12.full = wm1.priority_mark.full;
-               if (rfixed_trunc(priority_mark12) < 0)
+               if (dfixed_trunc(priority_mark12) < 0)
                        priority_mark12.full = 0;
                if (wm1.priority_mark_max.full > priority_mark12.full)
                        priority_mark12.full = wm1.priority_mark_max.full;
-               d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+               d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
                if (rdev->disp_priority == 2)
                        d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
                WREG32(R_006548_D1MODE_PRIORITY_A_CNT,
@@ -653,7 +651,7 @@ int rs690_resume(struct radeon_device *rdev)
        /* Resume clock before doing reset */
        rv515_clock_startup(rdev);
        /* Reset gpu before posting otherwise ATOM will enter infinite loop */
-       if (radeon_gpu_reset(rdev)) {
+       if (radeon_asic_reset(rdev)) {
                dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
                        RREG32(R_000E40_RBBM_STATUS),
                        RREG32(R_0007C0_CP_STAT));
@@ -678,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev)
 
 void rs690_fini(struct radeon_device *rdev)
 {
-       radeon_pm_fini(rdev);
        r100_cp_fini(rdev);
        r100_wb_fini(rdev);
        r100_ib_fini(rdev);
@@ -717,7 +714,7 @@ int rs690_init(struct radeon_device *rdev)
                return -EINVAL;
        }
        /* Reset gpu before posting otherwise ATOM will enter infinite loop */
-       if (radeon_gpu_reset(rdev)) {
+       if (radeon_asic_reset(rdev)) {
                dev_warn(rdev->dev,
                        "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
                        RREG32(R_000E40_RBBM_STATUS),
@@ -729,8 +726,6 @@ int rs690_init(struct radeon_device *rdev)
 
        /* Initialize clocks */
        radeon_get_clock_info(rdev->ddev);
-       /* Initialize power management */
-       radeon_pm_init(rdev);
        /* initialize memory controller */
        rs690_mc_init(rdev);
        rv515_debugfs(rdev);
index 9035121..7d9a7b0 100644 (file)
@@ -147,16 +147,11 @@ void rv515_gpu_init(struct radeon_device *rdev)
 {
        unsigned pipe_select_current, gb_pipe_select, tmp;
 
-       r100_hdp_reset(rdev);
-       r100_rb2d_reset(rdev);
-
        if (r100_gui_wait_for_idle(rdev)) {
                printk(KERN_WARNING "Failed to wait GUI idle while "
                       "reseting GPU. Bad things might happen.\n");
        }
-
        rv515_vga_render_disable(rdev);
-
        r420_pipes_init(rdev);
        gb_pipe_select = RREG32(0x402C);
        tmp = RREG32(0x170C);
@@ -174,91 +169,6 @@ void rv515_gpu_init(struct radeon_device *rdev)
        }
 }
 
-int rv515_ga_reset(struct radeon_device *rdev)
-{
-       uint32_t tmp;
-       bool reinit_cp;
-       int i;
-
-       reinit_cp = rdev->cp.ready;
-       rdev->cp.ready = false;
-       for (i = 0; i < rdev->usec_timeout; i++) {
-               WREG32(CP_CSQ_MODE, 0);
-               WREG32(CP_CSQ_CNTL, 0);
-               WREG32(RBBM_SOFT_RESET, 0x32005);
-               (void)RREG32(RBBM_SOFT_RESET);
-               udelay(200);
-               WREG32(RBBM_SOFT_RESET, 0);
-               /* Wait to prevent race in RBBM_STATUS */
-               mdelay(1);
-               tmp = RREG32(RBBM_STATUS);
-               if (tmp & ((1 << 20) | (1 << 26))) {
-                       DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp);
-                       /* GA still busy soft reset it */
-                       WREG32(0x429C, 0x200);
-                       WREG32(VAP_PVS_STATE_FLUSH_REG, 0);
-                       WREG32(0x43E0, 0);
-                       WREG32(0x43E4, 0);
-                       WREG32(0x24AC, 0);
-               }
-               /* Wait to prevent race in RBBM_STATUS */
-               mdelay(1);
-               tmp = RREG32(RBBM_STATUS);
-               if (!(tmp & ((1 << 20) | (1 << 26)))) {
-                       break;
-               }
-       }
-       for (i = 0; i < rdev->usec_timeout; i++) {
-               tmp = RREG32(RBBM_STATUS);
-               if (!(tmp & ((1 << 20) | (1 << 26)))) {
-                       DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
-                                tmp);
-                       DRM_INFO("GA_IDLE=0x%08X\n", RREG32(0x425C));
-                       DRM_INFO("RB3D_RESET_STATUS=0x%08X\n", RREG32(0x46f0));
-                       DRM_INFO("ISYNC_CNTL=0x%08X\n", RREG32(0x1724));
-                       if (reinit_cp) {
-                               return r100_cp_init(rdev, rdev->cp.ring_size);
-                       }
-                       return 0;
-               }
-               DRM_UDELAY(1);
-       }
-       tmp = RREG32(RBBM_STATUS);
-       DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
-       return -1;
-}
-
-int rv515_gpu_reset(struct radeon_device *rdev)
-{
-       uint32_t status;
-
-       /* reset order likely matter */
-       status = RREG32(RBBM_STATUS);
-       /* reset HDP */
-       r100_hdp_reset(rdev);
-       /* reset rb2d */
-       if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
-               r100_rb2d_reset(rdev);
-       }
-       /* reset GA */
-       if (status & ((1 << 20) | (1 << 26))) {
-               rv515_ga_reset(rdev);
-       }
-       /* reset CP */
-       status = RREG32(RBBM_STATUS);
-       if (status & (1 << 16)) {
-               r100_cp_reset(rdev);
-       }
-       /* Check if GPU is idle */
-       status = RREG32(RBBM_STATUS);
-       if (status & (1 << 31)) {
-               DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
-               return -1;
-       }
-       DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
-       return 0;
-}
-
 static void rv515_vram_get_type(struct radeon_device *rdev)
 {
        uint32_t tmp;
@@ -335,7 +245,7 @@ static int rv515_debugfs_ga_info(struct seq_file *m, void *data)
 
        tmp = RREG32(0x2140);
        seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
-       radeon_gpu_reset(rdev);
+       radeon_asic_reset(rdev);
        tmp = RREG32(0x425C);
        seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
        return 0;
@@ -503,7 +413,7 @@ int rv515_resume(struct radeon_device *rdev)
        /* Resume clock before doing reset */
        rv515_clock_startup(rdev);
        /* Reset gpu before posting otherwise ATOM will enter infinite loop */
-       if (radeon_gpu_reset(rdev)) {
+       if (radeon_asic_reset(rdev)) {
                dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
                        RREG32(R_000E40_RBBM_STATUS),
                        RREG32(R_0007C0_CP_STAT));
@@ -535,7 +445,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
 
 void rv515_fini(struct radeon_device *rdev)
 {
-       radeon_pm_fini(rdev);
        r100_cp_fini(rdev);
        r100_wb_fini(rdev);
        r100_ib_fini(rdev);
@@ -573,7 +482,7 @@ int rv515_init(struct radeon_device *rdev)
                return -EINVAL;
        }
        /* Reset gpu before posting otherwise ATOM will enter infinite loop */
-       if (radeon_gpu_reset(rdev)) {
+       if (radeon_asic_reset(rdev)) {
                dev_warn(rdev->dev,
                        "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
                        RREG32(R_000E40_RBBM_STATUS),
@@ -584,8 +493,6 @@ int rv515_init(struct radeon_device *rdev)
                return -EINVAL;
        /* Initialize clocks */
        radeon_get_clock_info(rdev->ddev);
-       /* Initialize power management */
-       radeon_pm_init(rdev);
        /* initialize AGP */
        if (rdev->flags & RADEON_IS_AGP) {
                r = radeon_agp_init(rdev);
@@ -885,20 +792,20 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
                return;
        }
 
-       if (crtc->vsc.full > rfixed_const(2))
-               wm->num_line_pair.full = rfixed_const(2);
+       if (crtc->vsc.full > dfixed_const(2))
+               wm->num_line_pair.full = dfixed_const(2);
        else
-               wm->num_line_pair.full = rfixed_const(1);
-
-       b.full = rfixed_const(mode->crtc_hdisplay);
-       c.full = rfixed_const(256);
-       a.full = rfixed_div(b, c);
-       request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
-       request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
-       if (a.full < rfixed_const(4)) {
+               wm->num_line_pair.full = dfixed_const(1);
+
+       b.full = dfixed_const(mode->crtc_hdisplay);
+       c.full = dfixed_const(256);
+       a.full = dfixed_div(b, c);
+       request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
+       request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
+       if (a.full < dfixed_const(4)) {
                wm->lb_request_fifo_depth = 4;
        } else {
-               wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth);
+               wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
        }
 
        /* Determine consumption rate
@@ -907,23 +814,23 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
         *  vsc = vertical scaling ratio, defined as source/destination
         *  hsc = horizontal scaling ration, defined as source/destination
         */
-       a.full = rfixed_const(mode->clock);
-       b.full = rfixed_const(1000);
-       a.full = rfixed_div(a, b);
-       pclk.full = rfixed_div(b, a);
+       a.full = dfixed_const(mode->clock);
+       b.full = dfixed_const(1000);
+       a.full = dfixed_div(a, b);
+       pclk.full = dfixed_div(b, a);
        if (crtc->rmx_type != RMX_OFF) {
-               b.full = rfixed_const(2);
+               b.full = dfixed_const(2);
                if (crtc->vsc.full > b.full)
                        b.full = crtc->vsc.full;
-               b.full = rfixed_mul(b, crtc->hsc);
-               c.full = rfixed_const(2);
-               b.full = rfixed_div(b, c);
-               consumption_time.full = rfixed_div(pclk, b);
+               b.full = dfixed_mul(b, crtc->hsc);
+               c.full = dfixed_const(2);
+               b.full = dfixed_div(b, c);
+               consumption_time.full = dfixed_div(pclk, b);
        } else {
                consumption_time.full = pclk.full;
        }
-       a.full = rfixed_const(1);
-       wm->consumption_rate.full = rfixed_div(a, consumption_time);
+       a.full = dfixed_const(1);
+       wm->consumption_rate.full = dfixed_div(a, consumption_time);
 
 
        /* Determine line time
@@ -931,27 +838,27 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
         *  LineTime = total number of horizontal pixels
         *  pclk = pixel clock period(ns)
         */
-       a.full = rfixed_const(crtc->base.mode.crtc_htotal);
-       line_time.full = rfixed_mul(a, pclk);
+       a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+       line_time.full = dfixed_mul(a, pclk);
 
        /* Determine active time
         *  ActiveTime = time of active region of display within one line,
         *  hactive = total number of horizontal active pixels
         *  htotal = total number of horizontal pixels
         */
-       a.full = rfixed_const(crtc->base.mode.crtc_htotal);
-       b.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
-       wm->active_time.full = rfixed_mul(line_time, b);
-       wm->active_time.full = rfixed_div(wm->active_time, a);
+       a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+       b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+       wm->active_time.full = dfixed_mul(line_time, b);
+       wm->active_time.full = dfixed_div(wm->active_time, a);
 
        /* Determine chunk time
         * ChunkTime = the time it takes the DCP to send one chunk of data
         * to the LB which consists of pipeline delay and inter chunk gap
         * sclk = system clock(Mhz)
         */
-       a.full = rfixed_const(600 * 1000);
-       chunk_time.full = rfixed_div(a, rdev->pm.sclk);
-       read_delay_latency.full = rfixed_const(1000);
+       a.full = dfixed_const(600 * 1000);
+       chunk_time.full = dfixed_div(a, rdev->pm.sclk);
+       read_delay_latency.full = dfixed_const(1000);
 
        /* Determine the worst case latency
         * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
@@ -961,9 +868,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
         * ChunkTime = time it takes the DCP to send one chunk of data to the LB
         *             which consists of pipeline delay and inter chunk gap
         */
-       if (rfixed_trunc(wm->num_line_pair) > 1) {
-               a.full = rfixed_const(3);
-               wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
+       if (dfixed_trunc(wm->num_line_pair) > 1) {
+               a.full = dfixed_const(3);
+               wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
                wm->worst_case_latency.full += read_delay_latency.full;
        } else {
                wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full;
@@ -979,34 +886,34 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
         *              of data to the LB which consists of
         *  pipeline delay and inter chunk gap
         */
-       if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) {
+       if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
                tolerable_latency.full = line_time.full;
        } else {
-               tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2);
+               tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
                tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
-               tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time);
+               tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
                tolerable_latency.full = line_time.full - tolerable_latency.full;
        }
        /* We assume worst case 32bits (4 bytes) */
-       wm->dbpp.full = rfixed_const(2 * 16);
+       wm->dbpp.full = dfixed_const(2 * 16);
 
        /* Determine the maximum priority mark
         *  width = viewport width in pixels
         */
-       a.full = rfixed_const(16);
-       wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
-       wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
-       wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
+       a.full = dfixed_const(16);
+       wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+       wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
+       wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
 
        /* Determine estimated width */
        estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
-       estimated_width.full = rfixed_div(estimated_width, consumption_time);
-       if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
+       estimated_width.full = dfixed_div(estimated_width, consumption_time);
+       if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
                wm->priority_mark.full = wm->priority_mark_max.full;
        } else {
-               a.full = rfixed_const(16);
-               wm->priority_mark.full = rfixed_div(estimated_width, a);
-               wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
+               a.full = dfixed_const(16);
+               wm->priority_mark.full = dfixed_div(estimated_width, a);
+               wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
                wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
        }
 }
@@ -1035,58 +942,58 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
        WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
 
        if (mode0 && mode1) {
-               if (rfixed_trunc(wm0.dbpp) > 64)
-                       a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
+               if (dfixed_trunc(wm0.dbpp) > 64)
+                       a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
                else
                        a.full = wm0.num_line_pair.full;
-               if (rfixed_trunc(wm1.dbpp) > 64)
-                       b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
+               if (dfixed_trunc(wm1.dbpp) > 64)
+                       b.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
                else
                        b.full = wm1.num_line_pair.full;
                a.full += b.full;
-               fill_rate.full = rfixed_div(wm0.sclk, a);
+               fill_rate.full = dfixed_div(wm0.sclk, a);
                if (wm0.consumption_rate.full > fill_rate.full) {
                        b.full = wm0.consumption_rate.full - fill_rate.full;
-                       b.full = rfixed_mul(b, wm0.active_time);
-                       a.full = rfixed_const(16);
-                       b.full = rfixed_div(b, a);
-                       a.full = rfixed_mul(wm0.worst_case_latency,
+                       b.full = dfixed_mul(b, wm0.active_time);
+                       a.full = dfixed_const(16);
+                       b.full = dfixed_div(b, a);
+                       a.full = dfixed_mul(wm0.worst_case_latency,
                                                wm0.consumption_rate);
                        priority_mark02.full = a.full + b.full;
                } else {
-                       a.full = rfixed_mul(wm0.worst_case_latency,
+                       a.full = dfixed_mul(wm0.worst_case_latency,
                                                wm0.consumption_rate);
-                       b.full = rfixed_const(16 * 1000);
-                       priority_mark02.full = rfixed_div(a, b);
+                       b.full = dfixed_const(16 * 1000);
+                       priority_mark02.full = dfixed_div(a, b);
                }
                if (wm1.consumption_rate.full > fill_rate.full) {
                        b.full = wm1.consumption_rate.full - fill_rate.full;
-                       b.full = rfixed_mul(b, wm1.active_time);
-                       a.full = rfixed_const(16);
-                       b.full = rfixed_div(b, a);
-                       a.full = rfixed_mul(wm1.worst_case_latency,
+                       b.full = dfixed_mul(b, wm1.active_time);
+                       a.full = dfixed_const(16);
+                       b.full = dfixed_div(b, a);
+                       a.full = dfixed_mul(wm1.worst_case_latency,
                                                wm1.consumption_rate);
                        priority_mark12.full = a.full + b.full;
                } else {
-                       a.full = rfixed_mul(wm1.worst_case_latency,
+                       a.full = dfixed_mul(wm1.worst_case_latency,
                                                wm1.consumption_rate);
-                       b.full = rfixed_const(16 * 1000);
-                       priority_mark12.full = rfixed_div(a, b);
+                       b.full = dfixed_const(16 * 1000);
+                       priority_mark12.full = dfixed_div(a, b);
                }
                if (wm0.priority_mark.full > priority_mark02.full)
                        priority_mark02.full = wm0.priority_mark.full;
-               if (rfixed_trunc(priority_mark02) < 0)
+               if (dfixed_trunc(priority_mark02) < 0)
                        priority_mark02.full = 0;
                if (wm0.priority_mark_max.full > priority_mark02.full)
                        priority_mark02.full = wm0.priority_mark_max.full;
                if (wm1.priority_mark.full > priority_mark12.full)
                        priority_mark12.full = wm1.priority_mark.full;
-               if (rfixed_trunc(priority_mark12) < 0)
+               if (dfixed_trunc(priority_mark12) < 0)
                        priority_mark12.full = 0;
                if (wm1.priority_mark_max.full > priority_mark12.full)
                        priority_mark12.full = wm1.priority_mark_max.full;
-               d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
-               d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+               d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+               d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
                if (rdev->disp_priority == 2) {
                        d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
                        d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
@@ -1096,32 +1003,32 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
                WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
                WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
        } else if (mode0) {
-               if (rfixed_trunc(wm0.dbpp) > 64)
-                       a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
+               if (dfixed_trunc(wm0.dbpp) > 64)
+                       a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
                else
                        a.full = wm0.num_line_pair.full;
-               fill_rate.full = rfixed_div(wm0.sclk, a);
+               fill_rate.full = dfixed_div(wm0.sclk, a);
                if (wm0.consumption_rate.full > fill_rate.full) {
                        b.full = wm0.consumption_rate.full - fill_rate.full;
-                       b.full = rfixed_mul(b, wm0.active_time);
-                       a.full = rfixed_const(16);
-                       b.full = rfixed_div(b, a);
-                       a.full = rfixed_mul(wm0.worst_case_latency,
+                       b.full = dfixed_mul(b, wm0.active_time);
+                       a.full = dfixed_const(16);
+                       b.full = dfixed_div(b, a);
+                       a.full = dfixed_mul(wm0.worst_case_latency,
                                                wm0.consumption_rate);
                        priority_mark02.full = a.full + b.full;
                } else {
-                       a.full = rfixed_mul(wm0.worst_case_latency,
+                       a.full = dfixed_mul(wm0.worst_case_latency,
                                                wm0.consumption_rate);
-                       b.full = rfixed_const(16);
-                       priority_mark02.full = rfixed_div(a, b);
+                       b.full = dfixed_const(16);
+                       priority_mark02.full = dfixed_div(a, b);
                }
                if (wm0.priority_mark.full > priority_mark02.full)
                        priority_mark02.full = wm0.priority_mark.full;
-               if (rfixed_trunc(priority_mark02) < 0)
+               if (dfixed_trunc(priority_mark02) < 0)
                        priority_mark02.full = 0;
                if (wm0.priority_mark_max.full > priority_mark02.full)
                        priority_mark02.full = wm0.priority_mark_max.full;
-               d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
+               d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
                if (rdev->disp_priority == 2)
                        d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
                WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
@@ -1129,32 +1036,32 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
                WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
                WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
        } else {
-               if (rfixed_trunc(wm1.dbpp) > 64)
-                       a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
+               if (dfixed_trunc(wm1.dbpp) > 64)
+                       a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
                else
                        a.full = wm1.num_line_pair.full;
-               fill_rate.full = rfixed_div(wm1.sclk, a);
+               fill_rate.full = dfixed_div(wm1.sclk, a);
                if (wm1.consumption_rate.full > fill_rate.full) {
                        b.full = wm1.consumption_rate.full - fill_rate.full;
-                       b.full = rfixed_mul(b, wm1.active_time);
-                       a.full = rfixed_const(16);
-                       b.full = rfixed_div(b, a);
-                       a.full = rfixed_mul(wm1.worst_case_latency,
+                       b.full = dfixed_mul(b, wm1.active_time);
+                       a.full = dfixed_const(16);
+                       b.full = dfixed_div(b, a);
+                       a.full = dfixed_mul(wm1.worst_case_latency,
                                                wm1.consumption_rate);
                        priority_mark12.full = a.full + b.full;
                } else {
-                       a.full = rfixed_mul(wm1.worst_case_latency,
+                       a.full = dfixed_mul(wm1.worst_case_latency,
                                                wm1.consumption_rate);
-                       b.full = rfixed_const(16 * 1000);
-                       priority_mark12.full = rfixed_div(a, b);
+                       b.full = dfixed_const(16 * 1000);
+                       priority_mark12.full = dfixed_div(a, b);
                }
                if (wm1.priority_mark.full > priority_mark12.full)
                        priority_mark12.full = wm1.priority_mark.full;
-               if (rfixed_trunc(priority_mark12) < 0)
+               if (dfixed_trunc(priority_mark12) < 0)
                        priority_mark12.full = 0;
                if (wm1.priority_mark_max.full > priority_mark12.full)
                        priority_mark12.full = wm1.priority_mark_max.full;
-               d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+               d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
                if (rdev->disp_priority == 2)
                        d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
                WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
index fc216e4..590309a 100644 (file)
 #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
 
 /* Registers */
+#define R_0000F0_RBBM_SOFT_RESET                     0x0000F0
+#define   S_0000F0_SOFT_RESET_CP(x)                    (((x) & 0x1) << 0)
+#define   G_0000F0_SOFT_RESET_CP(x)                    (((x) >> 0) & 0x1)
+#define   C_0000F0_SOFT_RESET_CP                       0xFFFFFFFE
+#define   S_0000F0_SOFT_RESET_HI(x)                    (((x) & 0x1) << 1)
+#define   G_0000F0_SOFT_RESET_HI(x)                    (((x) >> 1) & 0x1)
+#define   C_0000F0_SOFT_RESET_HI                       0xFFFFFFFD
+#define   S_0000F0_SOFT_RESET_VAP(x)                   (((x) & 0x1) << 2)
+#define   G_0000F0_SOFT_RESET_VAP(x)                   (((x) >> 2) & 0x1)
+#define   C_0000F0_SOFT_RESET_VAP                      0xFFFFFFFB
+#define   S_0000F0_SOFT_RESET_RE(x)                    (((x) & 0x1) << 3)
+#define   G_0000F0_SOFT_RESET_RE(x)                    (((x) >> 3) & 0x1)
+#define   C_0000F0_SOFT_RESET_RE                       0xFFFFFFF7
+#define   S_0000F0_SOFT_RESET_PP(x)                    (((x) & 0x1) << 4)
+#define   G_0000F0_SOFT_RESET_PP(x)                    (((x) >> 4) & 0x1)
+#define   C_0000F0_SOFT_RESET_PP                       0xFFFFFFEF
+#define   S_0000F0_SOFT_RESET_E2(x)                    (((x) & 0x1) << 5)
+#define   G_0000F0_SOFT_RESET_E2(x)                    (((x) >> 5) & 0x1)
+#define   C_0000F0_SOFT_RESET_E2                       0xFFFFFFDF
+#define   S_0000F0_SOFT_RESET_RB(x)                    (((x) & 0x1) << 6)
+#define   G_0000F0_SOFT_RESET_RB(x)                    (((x) >> 6) & 0x1)
+#define   C_0000F0_SOFT_RESET_RB                       0xFFFFFFBF
+#define   S_0000F0_SOFT_RESET_HDP(x)                   (((x) & 0x1) << 7)
+#define   G_0000F0_SOFT_RESET_HDP(x)                   (((x) >> 7) & 0x1)
+#define   C_0000F0_SOFT_RESET_HDP                      0xFFFFFF7F
+#define   S_0000F0_SOFT_RESET_MC(x)                    (((x) & 0x1) << 8)
+#define   G_0000F0_SOFT_RESET_MC(x)                    (((x) >> 8) & 0x1)
+#define   C_0000F0_SOFT_RESET_MC                       0xFFFFFEFF
+#define   S_0000F0_SOFT_RESET_AIC(x)                   (((x) & 0x1) << 9)
+#define   G_0000F0_SOFT_RESET_AIC(x)                   (((x) >> 9) & 0x1)
+#define   C_0000F0_SOFT_RESET_AIC                      0xFFFFFDFF
+#define   S_0000F0_SOFT_RESET_VIP(x)                   (((x) & 0x1) << 10)
+#define   G_0000F0_SOFT_RESET_VIP(x)                   (((x) >> 10) & 0x1)
+#define   C_0000F0_SOFT_RESET_VIP                      0xFFFFFBFF
+#define   S_0000F0_SOFT_RESET_DISP(x)                  (((x) & 0x1) << 11)
+#define   G_0000F0_SOFT_RESET_DISP(x)                  (((x) >> 11) & 0x1)
+#define   C_0000F0_SOFT_RESET_DISP                     0xFFFFF7FF
+#define   S_0000F0_SOFT_RESET_CG(x)                    (((x) & 0x1) << 12)
+#define   G_0000F0_SOFT_RESET_CG(x)                    (((x) >> 12) & 0x1)
+#define   C_0000F0_SOFT_RESET_CG                       0xFFFFEFFF
+#define   S_0000F0_SOFT_RESET_GA(x)                    (((x) & 0x1) << 13)
+#define   G_0000F0_SOFT_RESET_GA(x)                    (((x) >> 13) & 0x1)
+#define   C_0000F0_SOFT_RESET_GA                       0xFFFFDFFF
+#define   S_0000F0_SOFT_RESET_IDCT(x)                  (((x) & 0x1) << 14)
+#define   G_0000F0_SOFT_RESET_IDCT(x)                  (((x) >> 14) & 0x1)
+#define   C_0000F0_SOFT_RESET_IDCT                     0xFFFFBFFF
 #define R_0000F8_CONFIG_MEMSIZE                      0x0000F8
 #define   S_0000F8_CONFIG_MEMSIZE(x)                   (((x) & 0xFFFFFFFF) << 0)
 #define   G_0000F8_CONFIG_MEMSIZE(x)                   (((x) >> 0) & 0xFFFFFFFF)
index 97958a6..253f24a 100644 (file)
 static void rv770_gpu_init(struct radeon_device *rdev);
 void rv770_fini(struct radeon_device *rdev);
 
+void rv770_pm_misc(struct radeon_device *rdev)
+{
+
+}
 
 /*
  * GART
@@ -237,7 +241,6 @@ void r700_cp_stop(struct radeon_device *rdev)
        WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
 }
 
-
 static int rv770_cp_load_microcode(struct radeon_device *rdev)
 {
        const __be32 *fw_data;
@@ -272,6 +275,11 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
        return 0;
 }
 
+void r700_cp_fini(struct radeon_device *rdev)
+{
+       r700_cp_stop(rdev);
+       radeon_ring_fini(rdev);
+}
 
 /*
  * Core functions
@@ -906,23 +914,12 @@ int rv770_mc_init(struct radeon_device *rdev)
        rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
-       /* FIXME remove this once we support unmappable VRAM */
-       if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
-               rdev->mc.mc_vram_size = rdev->mc.aper_size;
-               rdev->mc.real_vram_size = rdev->mc.aper_size;
-       }
        r600_vram_gtt_location(rdev, &rdev->mc);
        radeon_update_bandwidth_info(rdev);
 
        return 0;
 }
 
-int rv770_gpu_reset(struct radeon_device *rdev)
-{
-       /* FIXME: implement any rv770 specific bits */
-       return r600_gpu_reset(rdev);
-}
-
 static int rv770_startup(struct radeon_device *rdev)
 {
        int r;
@@ -1094,8 +1091,6 @@ int rv770_init(struct radeon_device *rdev)
        r = radeon_clocks_init(rdev);
        if (r)
                return r;
-       /* Initialize power management */
-       radeon_pm_init(rdev);
        /* Fence driver */
        r = radeon_fence_driver_init(rdev);
        if (r)
@@ -1132,7 +1127,7 @@ int rv770_init(struct radeon_device *rdev)
        r = rv770_startup(rdev);
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
-               r600_cp_fini(rdev);
+               r700_cp_fini(rdev);
                r600_wb_fini(rdev);
                r600_irq_fini(rdev);
                radeon_irq_kms_fini(rdev);
@@ -1164,9 +1159,8 @@ int rv770_init(struct radeon_device *rdev)
 
 void rv770_fini(struct radeon_device *rdev)
 {
-       radeon_pm_fini(rdev);
        r600_blit_fini(rdev);
-       r600_cp_fini(rdev);
+       r700_cp_fini(rdev);
        r600_wb_fini(rdev);
        r600_irq_fini(rdev);
        radeon_irq_kms_fini(rdev);
index bff6fc2..2d0c9ca 100644 (file)
@@ -539,11 +539,10 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset)
 {
        drm_savage_private_t *dev_priv;
 
-       dev_priv = kmalloc(sizeof(drm_savage_private_t), GFP_KERNEL);
+       dev_priv = kzalloc(sizeof(drm_savage_private_t), GFP_KERNEL);
        if (dev_priv == NULL)
                return -ENOMEM;
 
-       memset(dev_priv, 0, sizeof(drm_savage_private_t));
        dev->dev_private = (void *)dev_priv;
 
        dev_priv->chipset = (enum savage_family)chipset;
index 1e138f5..4256e20 100644 (file)
@@ -4,6 +4,6 @@
 ccflags-y := -Iinclude/drm
 ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
        ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
-       ttm_object.o ttm_lock.o ttm_execbuf_util.o
+       ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o
 
 obj-$(CONFIG_DRM_TTM) += ttm.o
index 0e3754a..555ebb1 100644 (file)
@@ -79,8 +79,6 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
        printk(KERN_ERR TTM_PFX "    use_type: %d\n", man->use_type);
        printk(KERN_ERR TTM_PFX "    flags: 0x%08X\n", man->flags);
        printk(KERN_ERR TTM_PFX "    gpu_offset: 0x%08lX\n", man->gpu_offset);
-       printk(KERN_ERR TTM_PFX "    io_offset: 0x%08lX\n", man->io_offset);
-       printk(KERN_ERR TTM_PFX "    io_size: %ld\n", man->io_size);
        printk(KERN_ERR TTM_PFX "    size: %llu\n", man->size);
        printk(KERN_ERR TTM_PFX "    available_caching: 0x%08X\n",
                man->available_caching);
@@ -357,7 +355,8 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
 
 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
                                  struct ttm_mem_reg *mem,
-                                 bool evict, bool interruptible, bool no_wait)
+                                 bool evict, bool interruptible,
+                                 bool no_wait_reserve, bool no_wait_gpu)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -402,12 +401,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 
        if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
            !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
-               ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
+               ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
        else if (bdev->driver->move)
                ret = bdev->driver->move(bo, evict, interruptible,
-                                        no_wait, mem);
+                                        no_wait_reserve, no_wait_gpu, mem);
        else
-               ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
+               ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
 
        if (ret)
                goto out_err;
@@ -605,8 +604,22 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
 }
 EXPORT_SYMBOL(ttm_bo_unref);
 
+int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
+{
+       return cancel_delayed_work_sync(&bdev->wq);
+}
+EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
+
+void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
+{
+       if (resched)
+               schedule_delayed_work(&bdev->wq,
+                                     ((HZ / 100) < 1) ? 1 : HZ / 100);
+}
+EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
+
 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
-                       bool no_wait)
+                       bool no_wait_reserve, bool no_wait_gpu)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_bo_global *glob = bo->glob;
@@ -615,7 +628,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
        int ret = 0;
 
        spin_lock(&bo->lock);
-       ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+       ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
        spin_unlock(&bo->lock);
 
        if (unlikely(ret != 0)) {
@@ -631,6 +644,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
 
        evict_mem = bo->mem;
        evict_mem.mm_node = NULL;
+       evict_mem.bus.io_reserved = false;
 
        placement.fpfn = 0;
        placement.lpfn = 0;
@@ -638,7 +652,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
        placement.num_busy_placement = 0;
        bdev->driver->evict_flags(bo, &placement);
        ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
-                               no_wait);
+                               no_wait_reserve, no_wait_gpu);
        if (ret) {
                if (ret != -ERESTARTSYS) {
                        printk(KERN_ERR TTM_PFX
@@ -650,7 +664,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
        }
 
        ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
-                                    no_wait);
+                                    no_wait_reserve, no_wait_gpu);
        if (ret) {
                if (ret != -ERESTARTSYS)
                        printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
@@ -670,7 +684,8 @@ out:
 
 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
                                uint32_t mem_type,
-                               bool interruptible, bool no_wait)
+                               bool interruptible, bool no_wait_reserve,
+                               bool no_wait_gpu)
 {
        struct ttm_bo_global *glob = bdev->glob;
        struct ttm_mem_type_manager *man = &bdev->man[mem_type];
@@ -687,11 +702,11 @@ retry:
        bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
        kref_get(&bo->list_kref);
 
-       ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+       ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
 
        if (unlikely(ret == -EBUSY)) {
                spin_unlock(&glob->lru_lock);
-               if (likely(!no_wait))
+               if (likely(!no_wait_gpu))
                        ret = ttm_bo_wait_unreserved(bo, interruptible);
 
                kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -713,7 +728,7 @@ retry:
        while (put_count--)
                kref_put(&bo->list_kref, ttm_bo_ref_bug);
 
-       ret = ttm_bo_evict(bo, interruptible, no_wait);
+       ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
        ttm_bo_unreserve(bo);
 
        kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -764,7 +779,9 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
                                        uint32_t mem_type,
                                        struct ttm_placement *placement,
                                        struct ttm_mem_reg *mem,
-                                       bool interruptible, bool no_wait)
+                                       bool interruptible,
+                                       bool no_wait_reserve,
+                                       bool no_wait_gpu)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_bo_global *glob = bdev->glob;
@@ -785,7 +802,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
                }
                spin_unlock(&glob->lru_lock);
                ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
-                                               no_wait);
+                                               no_wait_reserve, no_wait_gpu);
                if (unlikely(ret != 0))
                        return ret;
        } while (1);
@@ -855,7 +872,8 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                        struct ttm_placement *placement,
                        struct ttm_mem_reg *mem,
-                       bool interruptible, bool no_wait)
+                       bool interruptible, bool no_wait_reserve,
+                       bool no_wait_gpu)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_mem_type_manager *man;
@@ -952,7 +970,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                }
 
                ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
-                                               interruptible, no_wait);
+                                               interruptible, no_wait_reserve, no_wait_gpu);
                if (ret == 0 && mem->mm_node) {
                        mem->placement = cur_flags;
                        mem->mm_node->private = bo;
@@ -978,7 +996,8 @@ EXPORT_SYMBOL(ttm_bo_wait_cpu);
 
 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
                        struct ttm_placement *placement,
-                       bool interruptible, bool no_wait)
+                       bool interruptible, bool no_wait_reserve,
+                       bool no_wait_gpu)
 {
        struct ttm_bo_global *glob = bo->glob;
        int ret = 0;
@@ -992,20 +1011,21 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
         * instead of doing it here.
         */
        spin_lock(&bo->lock);
-       ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+       ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
        spin_unlock(&bo->lock);
        if (ret)
                return ret;
        mem.num_pages = bo->num_pages;
        mem.size = mem.num_pages << PAGE_SHIFT;
        mem.page_alignment = bo->mem.page_alignment;
+       mem.bus.io_reserved = false;
        /*
         * Determine where to move the buffer.
         */
-       ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
+       ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
        if (ret)
                goto out_unlock;
-       ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
+       ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
 out_unlock:
        if (ret && mem.mm_node) {
                spin_lock(&glob->lru_lock);
@@ -1039,7 +1059,8 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
 
 int ttm_bo_validate(struct ttm_buffer_object *bo,
                        struct ttm_placement *placement,
-                       bool interruptible, bool no_wait)
+                       bool interruptible, bool no_wait_reserve,
+                       bool no_wait_gpu)
 {
        int ret;
 
@@ -1054,7 +1075,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
         */
        ret = ttm_bo_mem_compat(placement, &bo->mem);
        if (ret < 0) {
-               ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
+               ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
                if (ret)
                        return ret;
        } else {
@@ -1153,6 +1174,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
        bo->mem.num_pages = bo->num_pages;
        bo->mem.mm_node = NULL;
        bo->mem.page_alignment = page_alignment;
+       bo->mem.bus.io_reserved = false;
        bo->buffer_start = buffer_start & PAGE_MASK;
        bo->priv_flags = 0;
        bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
@@ -1175,7 +1197,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
                        goto out_err;
        }
 
-       ret = ttm_bo_validate(bo, placement, interruptible, false);
+       ret = ttm_bo_validate(bo, placement, interruptible, false, false);
        if (ret)
                goto out_err;
 
@@ -1249,7 +1271,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
        spin_lock(&glob->lru_lock);
        while (!list_empty(&man->lru)) {
                spin_unlock(&glob->lru_lock);
-               ret = ttm_mem_evict_first(bdev, mem_type, false, false);
+               ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
                if (ret) {
                        if (allow_errors) {
                                return ret;
@@ -1553,26 +1575,6 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
        return true;
 }
 
-int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
-                     struct ttm_mem_reg *mem,
-                     unsigned long *bus_base,
-                     unsigned long *bus_offset, unsigned long *bus_size)
-{
-       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-
-       *bus_size = 0;
-       if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
-               return -EINVAL;
-
-       if (ttm_mem_reg_is_pci(bdev, mem)) {
-               *bus_offset = mem->mm_node->start << PAGE_SHIFT;
-               *bus_size = mem->num_pages << PAGE_SHIFT;
-               *bus_base = man->io_offset;
-       }
-
-       return 0;
-}
-
 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
 {
        struct ttm_bo_device *bdev = bo->bdev;
@@ -1581,8 +1583,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
 
        if (!bdev->dev_mapping)
                return;
-
        unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
+       ttm_mem_io_free(bdev, &bo->mem);
 }
 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
 
@@ -1811,7 +1813,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
                evict_mem.mem_type = TTM_PL_SYSTEM;
 
                ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
-                                            false, false);
+                                            false, false, false);
                if (unlikely(ret != 0))
                        goto out;
        }
index d764e82..13012a1 100644 (file)
@@ -50,7 +50,8 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
 }
 
 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-                   bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
+                   bool evict, bool no_wait_reserve,
+                   bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 {
        struct ttm_tt *ttm = bo->ttm;
        struct ttm_mem_reg *old_mem = &bo->mem;
@@ -81,30 +82,51 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
 }
 EXPORT_SYMBOL(ttm_bo_move_ttm);
 
+int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+       int ret;
+
+       if (!mem->bus.io_reserved) {
+               mem->bus.io_reserved = true;
+               ret = bdev->driver->io_mem_reserve(bdev, mem);
+               if (unlikely(ret != 0))
+                       return ret;
+       }
+       return 0;
+}
+
+void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+       if (bdev->driver->io_mem_reserve) {
+               if (mem->bus.io_reserved) {
+                       mem->bus.io_reserved = false;
+                       bdev->driver->io_mem_free(bdev, mem);
+               }
+       }
+}
+
 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
                        void **virtual)
 {
-       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-       unsigned long bus_offset;
-       unsigned long bus_size;
-       unsigned long bus_base;
        int ret;
        void *addr;
 
        *virtual = NULL;
-       ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
-       if (ret || bus_size == 0)
+       ret = ttm_mem_io_reserve(bdev, mem);
+       if (ret || !mem->bus.is_iomem)
                return ret;
 
-       if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
-               addr = (void *)(((u8 *) man->io_addr) + bus_offset);
-       else {
+       if (mem->bus.addr) {
+               addr = mem->bus.addr;
+       else {
                if (mem->placement & TTM_PL_FLAG_WC)
-                       addr = ioremap_wc(bus_base + bus_offset, bus_size);
+                       addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
                else
-                       addr = ioremap_nocache(bus_base + bus_offset, bus_size);
-               if (!addr)
+                       addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
+               if (!addr) {
+                       ttm_mem_io_free(bdev, mem);
                        return -ENOMEM;
+               }
        }
        *virtual = addr;
        return 0;
@@ -117,8 +139,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
 
        man = &bdev->man[mem->mem_type];
 
-       if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
+       if (virtual && mem->bus.addr == NULL)
                iounmap(virtual);
+       ttm_mem_io_free(bdev, mem);
 }
 
 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -208,7 +231,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
 }
 
 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
-                      bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
+                      bool evict, bool no_wait_reserve, bool no_wait_gpu,
+                      struct ttm_mem_reg *new_mem)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
@@ -369,26 +393,23 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
 EXPORT_SYMBOL(ttm_io_prot);
 
 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
-                         unsigned long bus_base,
-                         unsigned long bus_offset,
-                         unsigned long bus_size,
+                         unsigned long offset,
+                         unsigned long size,
                          struct ttm_bo_kmap_obj *map)
 {
-       struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_mem_reg *mem = &bo->mem;
-       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 
-       if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
+       if (bo->mem.bus.addr) {
                map->bo_kmap_type = ttm_bo_map_premapped;
-               map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
+               map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
        } else {
                map->bo_kmap_type = ttm_bo_map_iomap;
                if (mem->placement & TTM_PL_FLAG_WC)
-                       map->virtual = ioremap_wc(bus_base + bus_offset,
-                                                 bus_size);
+                       map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
+                                                 size);
                else
-                       map->virtual = ioremap_nocache(bus_base + bus_offset,
-                                                      bus_size);
+                       map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
+                                                      size);
        }
        return (!map->virtual) ? -ENOMEM : 0;
 }
@@ -441,13 +462,12 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
                unsigned long start_page, unsigned long num_pages,
                struct ttm_bo_kmap_obj *map)
 {
+       unsigned long offset, size;
        int ret;
-       unsigned long bus_base;
-       unsigned long bus_offset;
-       unsigned long bus_size;
 
        BUG_ON(!list_empty(&bo->swap));
        map->virtual = NULL;
+       map->bo = bo;
        if (num_pages > bo->num_pages)
                return -EINVAL;
        if (start_page > bo->num_pages)
@@ -456,16 +476,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
        if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
                return -EPERM;
 #endif
-       ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
-                               &bus_offset, &bus_size);
+       ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
        if (ret)
                return ret;
-       if (bus_size == 0) {
+       if (!bo->mem.bus.is_iomem) {
                return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
        } else {
-               bus_offset += start_page << PAGE_SHIFT;
-               bus_size = num_pages << PAGE_SHIFT;
-               return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
+               offset = start_page << PAGE_SHIFT;
+               size = num_pages << PAGE_SHIFT;
+               return ttm_bo_ioremap(bo, offset, size, map);
        }
 }
 EXPORT_SYMBOL(ttm_bo_kmap);
@@ -477,6 +496,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
        switch (map->bo_kmap_type) {
        case ttm_bo_map_iomap:
                iounmap(map->virtual);
+               ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
                break;
        case ttm_bo_map_vmap:
                vunmap(map->virtual);
@@ -494,39 +514,11 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
 }
 EXPORT_SYMBOL(ttm_bo_kunmap);
 
-int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
-                   unsigned long dst_offset,
-                   unsigned long *pfn, pgprot_t *prot)
-{
-       struct ttm_mem_reg *mem = &bo->mem;
-       struct ttm_bo_device *bdev = bo->bdev;
-       unsigned long bus_offset;
-       unsigned long bus_size;
-       unsigned long bus_base;
-       int ret;
-       ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
-                       &bus_size);
-       if (ret)
-               return -EINVAL;
-       if (bus_size != 0)
-               *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
-       else
-               if (!bo->ttm)
-                       return -EINVAL;
-               else
-                       *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
-                                                          dst_offset >>
-                                                          PAGE_SHIFT));
-       *prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
-               PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
-
-       return 0;
-}
-
 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                              void *sync_obj,
                              void *sync_obj_arg,
-                             bool evict, bool no_wait,
+                             bool evict, bool no_wait_reserve,
+                             bool no_wait_gpu,
                              struct ttm_mem_reg *new_mem)
 {
        struct ttm_bo_device *bdev = bo->bdev;
index 668dbe8..fe6cb77 100644 (file)
@@ -74,9 +74,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
            vma->vm_private_data;
        struct ttm_bo_device *bdev = bo->bdev;
-       unsigned long bus_base;
-       unsigned long bus_offset;
-       unsigned long bus_size;
        unsigned long page_offset;
        unsigned long page_last;
        unsigned long pfn;
@@ -84,7 +81,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        struct page *page;
        int ret;
        int i;
-       bool is_iomem;
        unsigned long address = (unsigned long)vmf->virtual_address;
        int retval = VM_FAULT_NOPAGE;
 
@@ -101,8 +97,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                return VM_FAULT_NOPAGE;
        }
 
-       if (bdev->driver->fault_reserve_notify)
-               bdev->driver->fault_reserve_notify(bo);
+       if (bdev->driver->fault_reserve_notify) {
+               ret = bdev->driver->fault_reserve_notify(bo);
+               switch (ret) {
+               case 0:
+                       break;
+               case -EBUSY:
+                       set_need_resched();
+               case -ERESTARTSYS:
+                       retval = VM_FAULT_NOPAGE;
+                       goto out_unlock;
+               default:
+                       retval = VM_FAULT_SIGBUS;
+                       goto out_unlock;
+               }
+       }
 
        /*
         * Wait for buffer data in transit, due to a pipelined
@@ -122,15 +131,12 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                spin_unlock(&bo->lock);
 
 
-       ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
-                               &bus_size);
-       if (unlikely(ret != 0)) {
+       ret = ttm_mem_io_reserve(bdev, &bo->mem);
+       if (ret) {
                retval = VM_FAULT_SIGBUS;
                goto out_unlock;
        }
 
-       is_iomem = (bus_size != 0);
-
        page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
            bo->vm_node->start - vma->vm_pgoff;
        page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
@@ -154,8 +160,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
         * vma->vm_page_prot when the object changes caching policy, with
         * the correct locks held.
         */
-
-       if (is_iomem) {
+       if (bo->mem.bus.is_iomem) {
                vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
                                                vma->vm_page_prot);
        } else {
@@ -171,10 +176,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
         */
 
        for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
-
-               if (is_iomem)
-                       pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
-                           page_offset;
+               if (bo->mem.bus.is_iomem)
+                       pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
                else {
                        page = ttm_tt_get_page(ttm, page_offset);
                        if (unlikely(!page && i == 0)) {
@@ -198,7 +201,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                        retval =
                            (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
                        goto out_unlock;
-
                }
 
                address += PAGE_SIZE;
@@ -221,8 +223,7 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
 
 static void ttm_bo_vm_close(struct vm_area_struct *vma)
 {
-       struct ttm_buffer_object *bo =
-           (struct ttm_buffer_object *)vma->vm_private_data;
+       struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
 
        ttm_bo_unref(&bo);
        vma->vm_private_data = NULL;
index 801b702..e70ddd8 100644 (file)
@@ -27,6 +27,7 @@
 
 #include "ttm/ttm_memory.h"
 #include "ttm/ttm_module.h"
+#include "ttm/ttm_page_alloc.h"
 #include <linux/spinlock.h>
 #include <linux/sched.h>
 #include <linux/wait.h>
@@ -393,6 +394,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
                       "Zone %7s: Available graphics memory: %llu kiB.\n",
                       zone->name, (unsigned long long) zone->max_mem >> 10);
        }
+       ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
        return 0;
 out_no_zone:
        ttm_mem_global_release(glob);
@@ -405,6 +407,9 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
        unsigned int i;
        struct ttm_mem_zone *zone;
 
+       /* let the page allocator first stop the shrink work. */
+       ttm_page_alloc_fini();
+
        flush_workqueue(glob->swap_queue);
        destroy_workqueue(glob->swap_queue);
        glob->swap_queue = NULL;
@@ -412,7 +417,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
                zone = glob->zones[i];
                kobject_del(&zone->kobj);
                kobject_put(&zone->kobj);
-       }
+                       }
        kobject_del(&glob->kobj);
        kobject_put(&glob->kobj);
 }
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
new file mode 100644 (file)
index 0000000..0d9a42c
--- /dev/null
@@ -0,0 +1,845 @@
+/*
+ * Copyright (c) Red Hat Inc.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ *          Jerome Glisse <jglisse@redhat.com>
+ *          Pauli Nieminen <suokkos@gmail.com>
+ */
+
+/* simple list based uncached page pool
+ * - Pool collects resently freed pages for reuse
+ * - Use page->lru to keep a free list
+ * - doesn't track currently in use pages
+ */
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/seq_file.h> /* for seq_printf */
+#include <linux/slab.h>
+
+#include <asm/atomic.h>
+#include <asm/agp.h>
+
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_page_alloc.h"
+
+
+#define NUM_PAGES_TO_ALLOC             (PAGE_SIZE/sizeof(struct page *))
+#define SMALL_ALLOCATION               16
+#define FREE_ALL_PAGES                 (~0U)
+/* times are in msecs */
+#define PAGE_FREE_INTERVAL             1000
+
+/**
+ * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
+ *
+ * @lock: Protects the shared pool from concurrnet access. Must be used with
+ * irqsave/irqrestore variants because pool allocator maybe called from
+ * delayed work.
+ * @fill_lock: Prevent concurrent calls to fill.
+ * @list: Pool of free uc/wc pages for fast reuse.
+ * @gfp_flags: Flags to pass for alloc_page.
+ * @npages: Number of pages in pool.
+ */
+struct ttm_page_pool {
+       spinlock_t              lock;
+       bool                    fill_lock;
+       struct list_head        list;
+       int                     gfp_flags;
+       unsigned                npages;
+       char                    *name;
+       unsigned long           nfrees;
+       unsigned long           nrefills;
+};
+
+/**
+ * Limits for the pool. They are handled without locks because only place where
+ * they may change is in sysfs store. They won't have immediate effect anyway
+ * so forcing serialiazation to access them is pointless.
+ */
+
+struct ttm_pool_opts {
+       unsigned        alloc_size;
+       unsigned        max_size;
+       unsigned        small;
+};
+
+#define NUM_POOLS 4
+
+/**
+ * struct ttm_pool_manager - Holds memory pools for fst allocation
+ *
+ * Manager is read only object for pool code so it doesn't need locking.
+ *
+ * @free_interval: minimum number of jiffies between freeing pages from pool.
+ * @page_alloc_inited: reference counting for pool allocation.
+ * @work: Work that is used to shrink the pool. Work is only run when there is
+ * some pages to free.
+ * @small_allocation: Limit in number of pages what is small allocation.
+ *
+ * @pools: All pool objects in use.
+ **/
+struct ttm_pool_manager {
+       struct kobject          kobj;
+       struct shrinker         mm_shrink;
+       atomic_t                page_alloc_inited;
+       struct ttm_pool_opts    options;
+
+       union {
+               struct ttm_page_pool    pools[NUM_POOLS];
+               struct {
+                       struct ttm_page_pool    wc_pool;
+                       struct ttm_page_pool    uc_pool;
+                       struct ttm_page_pool    wc_pool_dma32;
+                       struct ttm_page_pool    uc_pool_dma32;
+               } ;
+       };
+};
+
+static struct attribute ttm_page_pool_max = {
+       .name = "pool_max_size",
+       .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_small = {
+       .name = "pool_small_allocation",
+       .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_alloc_size = {
+       .name = "pool_allocation_size",
+       .mode = S_IRUGO | S_IWUSR
+};
+
+static struct attribute *ttm_pool_attrs[] = {
+       &ttm_page_pool_max,
+       &ttm_page_pool_small,
+       &ttm_page_pool_alloc_size,
+       NULL
+};
+
+static void ttm_pool_kobj_release(struct kobject *kobj)
+{
+       struct ttm_pool_manager *m =
+               container_of(kobj, struct ttm_pool_manager, kobj);
+       (void)m;
+}
+
+static ssize_t ttm_pool_store(struct kobject *kobj,
+               struct attribute *attr, const char *buffer, size_t size)
+{
+       struct ttm_pool_manager *m =
+               container_of(kobj, struct ttm_pool_manager, kobj);
+       int chars;
+       unsigned val;
+       chars = sscanf(buffer, "%u", &val);
+       if (chars == 0)
+               return size;
+
+       /* Convert kb to number of pages */
+       val = val / (PAGE_SIZE >> 10);
+
+       if (attr == &ttm_page_pool_max)
+               m->options.max_size = val;
+       else if (attr == &ttm_page_pool_small)
+               m->options.small = val;
+       else if (attr == &ttm_page_pool_alloc_size) {
+               if (val > NUM_PAGES_TO_ALLOC*8) {
+                       printk(KERN_ERR "[ttm] Setting allocation size to %lu "
+                                       "is not allowed. Recomended size is "
+                                       "%lu\n",
+                                       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+                                       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+                       return size;
+               } else if (val > NUM_PAGES_TO_ALLOC) {
+                       printk(KERN_WARNING "[ttm] Setting allocation size to "
+                                       "larger than %lu is not recomended.\n",
+                                       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+               }
+               m->options.alloc_size = val;
+       }
+
+       return size;
+}
+
+static ssize_t ttm_pool_show(struct kobject *kobj,
+               struct attribute *attr, char *buffer)
+{
+       struct ttm_pool_manager *m =
+               container_of(kobj, struct ttm_pool_manager, kobj);
+       unsigned val = 0;
+
+       if (attr == &ttm_page_pool_max)
+               val = m->options.max_size;
+       else if (attr == &ttm_page_pool_small)
+               val = m->options.small;
+       else if (attr == &ttm_page_pool_alloc_size)
+               val = m->options.alloc_size;
+
+       val = val * (PAGE_SIZE >> 10);
+
+       return snprintf(buffer, PAGE_SIZE, "%u\n", val);
+}
+
+static const struct sysfs_ops ttm_pool_sysfs_ops = {
+       .show = &ttm_pool_show,
+       .store = &ttm_pool_store,
+};
+
+static struct kobj_type ttm_pool_kobj_type = {
+       .release = &ttm_pool_kobj_release,
+       .sysfs_ops = &ttm_pool_sysfs_ops,
+       .default_attrs = ttm_pool_attrs,
+};
+
+static struct ttm_pool_manager _manager = {
+       .page_alloc_inited      = ATOMIC_INIT(0)
+};
+
+#ifndef CONFIG_X86
+static int set_pages_array_wb(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+       int i;
+
+       for (i = 0; i < addrinarray; i++)
+               unmap_page_from_agp(pages[i]);
+#endif
+       return 0;
+}
+
+static int set_pages_array_wc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+       int i;
+
+       for (i = 0; i < addrinarray; i++)
+               map_page_into_agp(pages[i]);
+#endif
+       return 0;
+}
+
+static int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+       int i;
+
+       for (i = 0; i < addrinarray; i++)
+               map_page_into_agp(pages[i]);
+#endif
+       return 0;
+}
+#endif
+
+/**
+ * Select the right pool or requested caching state and ttm flags. */
+static struct ttm_page_pool *ttm_get_pool(int flags,
+               enum ttm_caching_state cstate)
+{
+       int pool_index;
+
+       if (cstate == tt_cached)
+               return NULL;
+
+       if (cstate == tt_wc)
+               pool_index = 0x0;
+       else
+               pool_index = 0x1;
+
+       if (flags & TTM_PAGE_FLAG_DMA32)
+               pool_index |= 0x2;
+
+       return &_manager.pools[pool_index];
+}
+
+/* set memory back to wb and free the pages. */
+static void ttm_pages_put(struct page *pages[], unsigned npages)
+{
+       unsigned i;
+       if (set_pages_array_wb(pages, npages))
+               printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n",
+                               npages);
+       for (i = 0; i < npages; ++i)
+               __free_page(pages[i]);
+}
+
+static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
+               unsigned freed_pages)
+{
+       pool->npages -= freed_pages;
+       pool->nfrees += freed_pages;
+}
+
+/**
+ * Free pages from pool.
+ *
+ * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
+ * number of pages in one go.
+ *
+ * @pool: to free the pages from
+ * @free_all: If set to true will free all pages in pool
+ **/
+static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
+{
+       unsigned long irq_flags;
+       struct page *p;
+       struct page **pages_to_free;
+       unsigned freed_pages = 0,
+                npages_to_free = nr_free;
+
+       if (NUM_PAGES_TO_ALLOC < nr_free)
+               npages_to_free = NUM_PAGES_TO_ALLOC;
+
+       pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+                       GFP_KERNEL);
+       if (!pages_to_free) {
+               printk(KERN_ERR "Failed to allocate memory for pool free operation.\n");
+               return 0;
+       }
+
+restart:
+       spin_lock_irqsave(&pool->lock, irq_flags);
+
+       list_for_each_entry_reverse(p, &pool->list, lru) {
+               if (freed_pages >= npages_to_free)
+                       break;
+
+               pages_to_free[freed_pages++] = p;
+               /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
+               if (freed_pages >= NUM_PAGES_TO_ALLOC) {
+                       /* remove range of pages from the pool */
+                       __list_del(p->lru.prev, &pool->list);
+
+                       ttm_pool_update_free_locked(pool, freed_pages);
+                       /**
+                        * Because changing page caching is costly
+                        * we unlock the pool to prevent stalling.
+                        */
+                       spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+                       ttm_pages_put(pages_to_free, freed_pages);
+                       if (likely(nr_free != FREE_ALL_PAGES))
+                               nr_free -= freed_pages;
+
+                       if (NUM_PAGES_TO_ALLOC >= nr_free)
+                               npages_to_free = nr_free;
+                       else
+                               npages_to_free = NUM_PAGES_TO_ALLOC;
+
+                       freed_pages = 0;
+
+                       /* free all so restart the processing */
+                       if (nr_free)
+                               goto restart;
+
+                       /* Not allowed to fall tough or break because
+                        * following context is inside spinlock while we are
+                        * outside here.
+                        */
+                       goto out;
+
+               }
+       }
+
+       /* remove range of pages from the pool */
+       if (freed_pages) {
+               __list_del(&p->lru, &pool->list);
+
+               ttm_pool_update_free_locked(pool, freed_pages);
+               nr_free -= freed_pages;
+       }
+
+       spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+       if (freed_pages)
+               ttm_pages_put(pages_to_free, freed_pages);
+out:
+       kfree(pages_to_free);
+       return nr_free;
+}
+
+/* Get good estimation how many pages are free in pools */
+static int ttm_pool_get_num_unused_pages(void)
+{
+       unsigned i;
+       int total = 0;
+       for (i = 0; i < NUM_POOLS; ++i)
+               total += _manager.pools[i].npages;
+
+       return total;
+}
+
+/**
+ * Calback for mm to request pool to reduce number of page held.
+ */
+static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
+{
+       static atomic_t start_pool = ATOMIC_INIT(0);
+       unsigned i;
+       unsigned pool_offset = atomic_add_return(1, &start_pool);
+       struct ttm_page_pool *pool;
+
+       pool_offset = pool_offset % NUM_POOLS;
+       /* select start pool in round robin fashion */
+       for (i = 0; i < NUM_POOLS; ++i) {
+               unsigned nr_free = shrink_pages;
+               if (shrink_pages == 0)
+                       break;
+               pool = &_manager.pools[(i + pool_offset)%NUM_POOLS];
+               shrink_pages = ttm_page_pool_free(pool, nr_free);
+       }
+       /* return estimated number of unused pages in pool */
+       return ttm_pool_get_num_unused_pages();
+}
+
+static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
+{
+       manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
+       manager->mm_shrink.seeks = 1;
+       register_shrinker(&manager->mm_shrink);
+}
+
+static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
+{
+       unregister_shrinker(&manager->mm_shrink);
+}
+
+static int ttm_set_pages_caching(struct page **pages,
+               enum ttm_caching_state cstate, unsigned cpages)
+{
+       int r = 0;
+       /* Set page caching */
+       switch (cstate) {
+       case tt_uncached:
+               r = set_pages_array_uc(pages, cpages);
+               if (r)
+                       printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n",
+                                       cpages);
+               break;
+       case tt_wc:
+               r = set_pages_array_wc(pages, cpages);
+               if (r)
+                       printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n",
+                                       cpages);
+               break;
+       default:
+               break;
+       }
+       return r;
+}
+
+/**
+ * Free pages the pages that failed to change the caching state. If there is
+ * any pages that have changed their caching state already put them to the
+ * pool.
+ */
+static void ttm_handle_caching_state_failure(struct list_head *pages,
+               int ttm_flags, enum ttm_caching_state cstate,
+               struct page **failed_pages, unsigned cpages)
+{
+       unsigned i;
+       /* Failed pages has to be reed */
+       for (i = 0; i < cpages; ++i) {
+               list_del(&failed_pages[i]->lru);
+               __free_page(failed_pages[i]);
+       }
+}
+
+/**
+ * Allocate new pages with correct caching.
+ *
+ * This function is reentrant if caller updates count depending on number of
+ * pages returned in pages array.
+ */
+static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
+               int ttm_flags, enum ttm_caching_state cstate, unsigned count)
+{
+       struct page **caching_array;
+       struct page *p;
+       int r = 0;
+       unsigned i, cpages;
+       unsigned max_cpages = min(count,
+                       (unsigned)(PAGE_SIZE/sizeof(struct page *)));
+
+       /* allocate array for page caching change */
+       caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
+
+       if (!caching_array) {
+               printk(KERN_ERR "[ttm] unable to allocate table for new pages.");
+               return -ENOMEM;
+       }
+
+       for (i = 0, cpages = 0; i < count; ++i) {
+               p = alloc_page(gfp_flags);
+
+               if (!p) {
+                       printk(KERN_ERR "[ttm] unable to get page %u\n", i);
+
+                       /* store already allocated pages in the pool after
+                        * setting the caching state */
+                       if (cpages) {
+                               r = ttm_set_pages_caching(caching_array, cstate, cpages);
+                               if (r)
+                                       ttm_handle_caching_state_failure(pages,
+                                               ttm_flags, cstate,
+                                               caching_array, cpages);
+                       }
+                       r = -ENOMEM;
+                       goto out;
+               }
+
+#ifdef CONFIG_HIGHMEM
+               /* gfp flags of highmem page should never be dma32 so we
+                * we should be fine in such case
+                */
+               if (!PageHighMem(p))
+#endif
+               {
+                       caching_array[cpages++] = p;
+                       if (cpages == max_cpages) {
+
+                               r = ttm_set_pages_caching(caching_array,
+                                               cstate, cpages);
+                               if (r) {
+                                       ttm_handle_caching_state_failure(pages,
+                                               ttm_flags, cstate,
+                                               caching_array, cpages);
+                                       goto out;
+                               }
+                               cpages = 0;
+                       }
+               }
+
+               list_add(&p->lru, pages);
+       }
+
+       if (cpages) {
+               r = ttm_set_pages_caching(caching_array, cstate, cpages);
+               if (r)
+                       ttm_handle_caching_state_failure(pages,
+                                       ttm_flags, cstate,
+                                       caching_array, cpages);
+       }
+out:
+       kfree(caching_array);
+
+       return r;
+}
+
+/**
+ * Fill the given pool if there isn't enough pages and requested number of
+ * pages is small.
+ */
+static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
+               int ttm_flags, enum ttm_caching_state cstate, unsigned count,
+               unsigned long *irq_flags)
+{
+       struct page *p;
+       int r;
+       unsigned cpages = 0;
+       /**
+        * Only allow one pool fill operation at a time.
+        * If pool doesn't have enough pages for the allocation new pages are
+        * allocated from outside of pool.
+        */
+       if (pool->fill_lock)
+               return;
+
+       pool->fill_lock = true;
+
+       /* If allocation request is small and there is not enough
+        * pages in pool we fill the pool first */
+       if (count < _manager.options.small
+               && count > pool->npages) {
+               struct list_head new_pages;
+               unsigned alloc_size = _manager.options.alloc_size;
+
+               /**
+                * Can't change page caching if in irqsave context. We have to
+                * drop the pool->lock.
+                */
+               spin_unlock_irqrestore(&pool->lock, *irq_flags);
+
+               INIT_LIST_HEAD(&new_pages);
+               r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
+                               cstate, alloc_size);
+               spin_lock_irqsave(&pool->lock, *irq_flags);
+
+               if (!r) {
+                       list_splice(&new_pages, &pool->list);
+                       ++pool->nrefills;
+                       pool->npages += alloc_size;
+               } else {
+                       printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool);
+                       /* If we have any pages left put them to the pool. */
+                       list_for_each_entry(p, &pool->list, lru) {
+                               ++cpages;
+                       }
+                       list_splice(&new_pages, &pool->list);
+                       pool->npages += cpages;
+               }
+
+       }
+       pool->fill_lock = false;
+}
+
+/**
+ * Cut count nubmer of pages from the pool and put them to return list
+ *
+ * @return count of pages still to allocate to fill the request.
+ */
+static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
+               struct list_head *pages, int ttm_flags,
+               enum ttm_caching_state cstate, unsigned count)
+{
+       unsigned long irq_flags;
+       struct list_head *p;
+       unsigned i;
+
+       spin_lock_irqsave(&pool->lock, irq_flags);
+       ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
+
+       if (count >= pool->npages) {
+               /* take all pages from the pool */
+               list_splice_init(&pool->list, pages);
+               count -= pool->npages;
+               pool->npages = 0;
+               goto out;
+       }
+       /* find the last pages to include for requested number of pages. Split
+        * pool to begin and halves to reduce search space. */
+       if (count <= pool->npages/2) {
+               i = 0;
+               list_for_each(p, &pool->list) {
+                       if (++i == count)
+                               break;
+               }
+       } else {
+               i = pool->npages + 1;
+               list_for_each_prev(p, &pool->list) {
+                       if (--i == count)
+                               break;
+               }
+       }
+       /* Cut count number of pages from pool */
+       list_cut_position(pages, &pool->list, p);
+       pool->npages -= count;
+       count = 0;
+out:
+       spin_unlock_irqrestore(&pool->lock, irq_flags);
+       return count;
+}
+
+/*
+ * On success pages list will hold count number of correctly
+ * cached pages.
+ */
+int ttm_get_pages(struct list_head *pages, int flags,
+               enum ttm_caching_state cstate, unsigned count)
+{
+       struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+       struct page *p = NULL;
+       int gfp_flags = 0;
+       int r;
+
+       /* set zero flag for page allocation if required */
+       if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+               gfp_flags |= __GFP_ZERO;
+
+       /* No pool for cached pages */
+       if (pool == NULL) {
+               if (flags & TTM_PAGE_FLAG_DMA32)
+                       gfp_flags |= GFP_DMA32;
+               else
+                       gfp_flags |= __GFP_HIGHMEM;
+
+               for (r = 0; r < count; ++r) {
+                       p = alloc_page(gfp_flags);
+                       if (!p) {
+
+                               printk(KERN_ERR "[ttm] unable to allocate page.");
+                               return -ENOMEM;
+                       }
+
+                       list_add(&p->lru, pages);
+               }
+               return 0;
+       }
+
+
+       /* combine zero flag to pool flags */
+       gfp_flags |= pool->gfp_flags;
+
+       /* First we take pages from the pool */
+       count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
+
+       /* clear the pages coming from the pool if requested */
+       if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
+               list_for_each_entry(p, pages, lru) {
+                       clear_page(page_address(p));
+               }
+       }
+
+       /* If pool didn't have enough pages allocate new one. */
+       if (count > 0) {
+               /* ttm_alloc_new_pages doesn't reference pool so we can run
+                * multiple requests in parallel.
+                **/
+               r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
+               if (r) {
+                       /* If there is any pages in the list put them back to
+                        * the pool. */
+                       printk(KERN_ERR "[ttm] Failed to allocate extra pages "
+                                       "for large request.");
+                       ttm_put_pages(pages, 0, flags, cstate);
+                       return r;
+               }
+       }
+
+
+       return 0;
+}
+
+/* Put all pages in pages list to correct pool to wait for reuse */
+void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
+               enum ttm_caching_state cstate)
+{
+       unsigned long irq_flags;
+       struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+       struct page *p, *tmp;
+
+       if (pool == NULL) {
+               /* No pool for this memory type so free the pages */
+
+               list_for_each_entry_safe(p, tmp, pages, lru) {
+                       __free_page(p);
+               }
+               /* Make the pages list empty */
+               INIT_LIST_HEAD(pages);
+               return;
+       }
+       if (page_count == 0) {
+               list_for_each_entry_safe(p, tmp, pages, lru) {
+                       ++page_count;
+               }
+       }
+
+       spin_lock_irqsave(&pool->lock, irq_flags);
+       list_splice_init(pages, &pool->list);
+       pool->npages += page_count;
+       /* Check that we don't go over the pool limit */
+       page_count = 0;
+       if (pool->npages > _manager.options.max_size) {
+               page_count = pool->npages - _manager.options.max_size;
+               /* free at least NUM_PAGES_TO_ALLOC number of pages
+                * to reduce calls to set_memory_wb */
+               if (page_count < NUM_PAGES_TO_ALLOC)
+                       page_count = NUM_PAGES_TO_ALLOC;
+       }
+       spin_unlock_irqrestore(&pool->lock, irq_flags);
+       if (page_count)
+               ttm_page_pool_free(pool, page_count);
+}
+
+static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
+               char *name)
+{
+       spin_lock_init(&pool->lock);
+       pool->fill_lock = false;
+       INIT_LIST_HEAD(&pool->list);
+       pool->npages = pool->nfrees = 0;
+       pool->gfp_flags = flags;
+       pool->name = name;
+}
+
+int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
+{
+       int ret;
+       if (atomic_add_return(1, &_manager.page_alloc_inited) > 1)
+               return 0;
+
+       printk(KERN_INFO "[ttm] Initializing pool allocator.\n");
+
+       ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc");
+
+       ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER, "uc");
+
+       ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32,
+                       "wc dma");
+
+       ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32,
+                       "uc dma");
+
+       _manager.options.max_size = max_pages;
+       _manager.options.small = SMALL_ALLOCATION;
+       _manager.options.alloc_size = NUM_PAGES_TO_ALLOC;
+
+       kobject_init(&_manager.kobj, &ttm_pool_kobj_type);
+       ret = kobject_add(&_manager.kobj, &glob->kobj, "pool");
+       if (unlikely(ret != 0)) {
+               kobject_put(&_manager.kobj);
+               return ret;
+       }
+
+       ttm_pool_mm_shrink_init(&_manager);
+
+       return 0;
+}
+
+void ttm_page_alloc_fini()
+{
+       int i;
+
+       if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0)
+               return;
+
+       printk(KERN_INFO "[ttm] Finilizing pool allocator.\n");
+       ttm_pool_mm_shrink_fini(&_manager);
+
+       for (i = 0; i < NUM_POOLS; ++i)
+               ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES);
+
+       kobject_put(&_manager.kobj);
+}
+
+int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+       struct ttm_page_pool *p;
+       unsigned i;
+       char *h[] = {"pool", "refills", "pages freed", "size"};
+       if (atomic_read(&_manager.page_alloc_inited) == 0) {
+               seq_printf(m, "No pool allocator running.\n");
+               return 0;
+       }
+       seq_printf(m, "%6s %12s %13s %8s\n",
+                       h[0], h[1], h[2], h[3]);
+       for (i = 0; i < NUM_POOLS; ++i) {
+               p = &_manager.pools[i];
+
+               seq_printf(m, "%6s %12ld %13ld %8d\n",
+                               p->name, p->nrefills,
+                               p->nfrees, p->npages);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(ttm_page_alloc_debugfs);
index d5fd5b8..a7bab87 100644 (file)
@@ -39,6 +39,7 @@
 #include "ttm/ttm_module.h"
 #include "ttm/ttm_bo_driver.h"
 #include "ttm/ttm_placement.h"
+#include "ttm/ttm_page_alloc.h"
 
 static int ttm_tt_swapin(struct ttm_tt *ttm);
 
@@ -56,21 +57,6 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
        ttm->pages = NULL;
 }
 
-static struct page *ttm_tt_alloc_page(unsigned page_flags)
-{
-       gfp_t gfp_flags = GFP_USER;
-
-       if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
-               gfp_flags |= __GFP_ZERO;
-
-       if (page_flags & TTM_PAGE_FLAG_DMA32)
-               gfp_flags |= __GFP_DMA32;
-       else
-               gfp_flags |= __GFP_HIGHMEM;
-
-       return alloc_page(gfp_flags);
-}
-
 static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
 {
        int write;
@@ -111,15 +97,21 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
 static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
 {
        struct page *p;
+       struct list_head h;
        struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
        int ret;
 
        while (NULL == (p = ttm->pages[index])) {
-               p = ttm_tt_alloc_page(ttm->page_flags);
 
-               if (!p)
+               INIT_LIST_HEAD(&h);
+
+               ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1);
+
+               if (ret != 0)
                        return NULL;
 
+               p = list_first_entry(&h, struct page, lru);
+
                ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
                if (unlikely(ret != 0))
                        goto out_err;
@@ -228,10 +220,10 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
        if (ttm->caching_state == c_state)
                return 0;
 
-       if (c_state != tt_cached) {
-               ret = ttm_tt_populate(ttm);
-               if (unlikely(ret != 0))
-                       return ret;
+       if (ttm->state == tt_unpopulated) {
+               /* Change caching but don't populate */
+               ttm->caching_state = c_state;
+               return 0;
        }
 
        if (ttm->caching_state == tt_cached)
@@ -282,13 +274,17 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
 static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
 {
        int i;
+       unsigned count = 0;
+       struct list_head h;
        struct page *cur_page;
        struct ttm_backend *be = ttm->be;
 
+       INIT_LIST_HEAD(&h);
+
        if (be)
                be->func->clear(be);
-       (void)ttm_tt_set_caching(ttm, tt_cached);
        for (i = 0; i < ttm->num_pages; ++i) {
+
                cur_page = ttm->pages[i];
                ttm->pages[i] = NULL;
                if (cur_page) {
@@ -298,9 +294,11 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
                                       "Leaking pages.\n");
                        ttm_mem_global_free_page(ttm->glob->mem_glob,
                                                 cur_page);
-                       __free_page(cur_page);
+                       list_add(&cur_page->lru, &h);
+                       count++;
                }
        }
+       ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state);
        ttm->state = tt_unpopulated;
        ttm->first_himem_page = ttm->num_pages;
        ttm->last_lomem_page = -1;
index 825ebe3..c4f5114 100644 (file)
@@ -137,9 +137,6 @@ int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
 int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
                      struct ttm_mem_type_manager *man)
 {
-       struct vmw_private *dev_priv =
-           container_of(bdev, struct vmw_private, bdev);
-
        switch (type) {
        case TTM_PL_SYSTEM:
                /* System memory */
@@ -151,11 +148,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
        case TTM_PL_VRAM:
                /* "On-card" video ram */
                man->gpu_offset = 0;
-               man->io_offset = dev_priv->vram_start;
-               man->io_size = dev_priv->vram_size;
-               man->flags = TTM_MEMTYPE_FLAG_FIXED |
-                   TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE;
-               man->io_addr = NULL;
+               man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
                man->available_caching = TTM_PL_MASK_CACHING;
                man->default_caching = TTM_PL_FLAG_WC;
                break;
@@ -193,6 +186,42 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo)
        vmw_dmabuf_gmr_unbind(bo);
 }
 
+static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+       struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
+
+       mem->bus.addr = NULL;
+       mem->bus.is_iomem = false;
+       mem->bus.offset = 0;
+       mem->bus.size = mem->num_pages << PAGE_SHIFT;
+       mem->bus.base = 0;
+       if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+               return -EINVAL;
+       switch (mem->mem_type) {
+       case TTM_PL_SYSTEM:
+               /* System memory */
+               return 0;
+       case TTM_PL_VRAM:
+               mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+               mem->bus.base = dev_priv->vram_start;
+               mem->bus.is_iomem = true;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+       return 0;
+}
+
 /**
  * FIXME: We're using the old vmware polling method to sync.
  * Do this with fences instead.
@@ -248,5 +277,8 @@ struct ttm_bo_driver vmw_bo_driver = {
        .sync_obj_unref = vmw_sync_obj_unref,
        .sync_obj_ref = vmw_sync_obj_ref,
        .move_notify = vmw_move_notify,
-       .swap_notify = vmw_swap_notify
+       .swap_notify = vmw_swap_notify,
+       .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
+       .io_mem_reserve = &vmw_ttm_io_mem_reserve,
+       .io_mem_free = &vmw_ttm_io_mem_free,
 };
index 0897359..dbd36b8 100644 (file)
@@ -570,7 +570,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
         * Put BO in VRAM, only if there is space.
         */
 
-       ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false);
+       ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false);
        if (unlikely(ret == -ERESTARTSYS))
                return ret;
 
@@ -590,7 +590,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
         * previous contents.
         */
 
-       ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
+       ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
        return ret;
 }
 
index a933670..7421aaa 100644 (file)
@@ -559,8 +559,13 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
        info->pixmap.scan_align = 1;
 #endif
 
-       info->aperture_base = vmw_priv->vram_start;
-       info->aperture_size = vmw_priv->vram_size;
+       info->apertures = alloc_apertures(1);
+       if (!info->apertures) {
+               ret = -ENOMEM;
+               goto err_aper;
+       }
+       info->apertures->ranges[0].base = vmw_priv->vram_start;
+       info->apertures->ranges[0].size = vmw_priv->vram_size;
 
        /*
         * Dirty & Deferred IO
@@ -580,6 +585,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
 
 err_defio:
        fb_deferred_io_cleanup(info);
+err_aper:
        ttm_bo_kunmap(&par->map);
 err_unref:
        ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
@@ -628,7 +634,7 @@ int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
        if (unlikely(ret != 0))
                return ret;
 
-       ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false);
+       ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false);
        ttm_bo_unreserve(bo);
 
        return ret;
@@ -652,7 +658,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
        if (unlikely(ret != 0))
                goto err_unlock;
 
-       ret = ttm_bo_validate(bo, &ne_placement, false, false);
+       ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
        ttm_bo_unreserve(bo);
 err_unlock:
        ttm_write_unlock(&vmw_priv->active_master->lock);
index 31f9afe..bbc7c4c 100644 (file)
@@ -752,14 +752,8 @@ err_not_scanout:
        return NULL;
 }
 
-static int vmw_kms_fb_changed(struct drm_device *dev)
-{
-       return 0;
-}
-
 static struct drm_mode_config_funcs vmw_kms_funcs = {
        .fb_create = vmw_kms_fb_create,
-       .fb_changed = vmw_kms_fb_changed,
 };
 
 int vmw_kms_init(struct vmw_private *dev_priv)
index 5b6eabe..ad566c8 100644 (file)
@@ -118,7 +118,7 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
        if (pin)
                overlay_placement = &vmw_vram_ne_placement;
 
-       ret = ttm_bo_validate(bo, overlay_placement, interruptible, false);
+       ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false);
 
        ttm_bo_unreserve(bo);
 
index 61ab4da..8d0e31a 100644 (file)
@@ -18,12 +18,12 @@ config VGA_ARB_MAX_GPUS
          multiple GPUS.  The overhead for each GPU is very small.
 
 config VGA_SWITCHEROO
-       bool "Laptop Hybrid Grapics - GPU switching support"
+       bool "Laptop Hybrid Graphics - GPU switching support"
        depends on X86
        depends on ACPI
        help
-         Many laptops released in 2008/9/10 have two gpus with a multiplxer
+         Many laptops released in 2008/9/10 have two GPUs with a multiplexer
          to switch between them. This adds support for dynamic switching when
           X isn't running and delayed switching until the next logoff. This
-         features is called hybrid graphics, ATI PowerXpress, and Nvidia
+         feature is called hybrid graphics, ATI PowerXpress, and Nvidia
          HybridPower.
index 79119f5..bd6da7a 100644 (file)
@@ -155,6 +155,7 @@ static int macio_adb_reset_bus(void)
        while ((in_8(&adb->ctrl.r) & ADB_RST) != 0) {
                if (--timeout == 0) {
                        out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) & ~ADB_RST);
+                       spin_unlock_irqrestore(&macio_lock, flags);
                        return -1;
                }
        }
index 888448c..c9da5c4 100644 (file)
@@ -1183,8 +1183,10 @@ static ssize_t smu_read_command(struct file *file, struct smu_private *pp,
                return -EOVERFLOW;
        spin_lock_irqsave(&pp->lock, flags);
        if (pp->cmd.status == 1) {
-               if (file->f_flags & O_NONBLOCK)
+               if (file->f_flags & O_NONBLOCK) {
+                       spin_unlock_irqrestore(&pp->lock, flags);
                        return -EAGAIN;
+               }
                add_wait_queue(&pp->wait, &wait);
                for (;;) {
                        set_current_state(TASK_INTERRUPTIBLE);
index c42eeb4..16d82f1 100644 (file)
@@ -182,6 +182,7 @@ remove_thermostat(struct i2c_client *client)
 
        thermostat = NULL;
 
+       i2c_set_clientdata(client, NULL);
        kfree(th);
 
        return 0;
@@ -399,6 +400,7 @@ static int probe_thermostat(struct i2c_client *client,
        rc = read_reg(th, CONFIG_REG);
        if (rc < 0) {
                dev_err(&client->dev, "Thermostat failed to read config!\n");
+               i2c_set_clientdata(client, NULL);
                kfree(th);
                return -ENODEV;
        }
index 129cda7..749d174 100644 (file)
@@ -757,10 +757,8 @@ static int __devexit wf_smu_remove(struct platform_device *ddev)
                wf_put_control(cpufreq_clamp);
 
        /* Destroy control loops state structures */
-       if (wf_smu_sys_fans)
-               kfree(wf_smu_sys_fans);
-       if (wf_smu_cpu_fans)
-               kfree(wf_smu_cpu_fans);
+       kfree(wf_smu_sys_fans);
+       kfree(wf_smu_cpu_fans);
 
        return 0;
 }
index bea9916..3442732 100644 (file)
@@ -687,12 +687,9 @@ static int __devexit wf_smu_remove(struct platform_device *ddev)
                wf_put_control(cpufreq_clamp);
 
        /* Destroy control loops state structures */
-       if (wf_smu_slots_fans)
-               kfree(wf_smu_cpu_fans);
-       if (wf_smu_drive_fans)
-               kfree(wf_smu_cpu_fans);
-       if (wf_smu_cpu_fans)
-               kfree(wf_smu_cpu_fans);
+       kfree(wf_smu_slots_fans);
+       kfree(wf_smu_drive_fans);
+       kfree(wf_smu_cpu_fans);
 
        return 0;
 }
index 7b6f7ee..f12dc3e 100644 (file)
@@ -3,7 +3,6 @@
 #
 
 obj-$(CONFIG_IBM_ASM)          += ibmasm/
-obj-$(CONFIG_HDPU_FEATURES)    += hdpuftrs/
 obj-$(CONFIG_AD525X_DPOT)      += ad525x_dpot.o
 obj-$(CONFIG_ATMEL_PWM)                += atmel_pwm.o
 obj-$(CONFIG_ATMEL_SSC)                += atmel-ssc.o
diff --git a/drivers/misc/hdpuftrs/Makefile b/drivers/misc/hdpuftrs/Makefile
deleted file mode 100644 (file)
index ac74ae6..0000000
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_HDPU_FEATURES) := hdpu_cpustate.o hdpu_nexus.o
diff --git a/drivers/misc/hdpuftrs/hdpu_cpustate.c b/drivers/misc/hdpuftrs/hdpu_cpustate.c
deleted file mode 100644 (file)
index 176fe4e..0000000
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- *     Sky CPU State Driver
- *
- *     Copyright (C) 2002 Brian Waite
- *
- *     This driver allows use of the CPU state bits
- *     It exports the /dev/sky_cpustate and also
- *     /proc/sky_cpustate pseudo-file for status information.
- *
- *     This program is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License
- *     as published by the Free Software Foundation; either version
- *     2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/smp_lock.h>
-#include <linux/miscdevice.h>
-#include <linux/proc_fs.h>
-#include <linux/hdpu_features.h>
-#include <linux/platform_device.h>
-#include <asm/uaccess.h>
-#include <linux/seq_file.h>
-#include <asm/io.h>
-
-#define SKY_CPUSTATE_VERSION           "1.1"
-
-static int hdpu_cpustate_probe(struct platform_device *pdev);
-static int hdpu_cpustate_remove(struct platform_device *pdev);
-
-static unsigned char cpustate_get_state(void);
-static int cpustate_proc_open(struct inode *inode, struct file *file);
-static int cpustate_proc_read(struct seq_file *seq, void *offset);
-
-static struct cpustate_t cpustate;
-
-static const struct file_operations proc_cpustate = {
-       .open = cpustate_proc_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
-
-static int cpustate_proc_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, cpustate_proc_read, NULL);
-}
-
-static int cpustate_proc_read(struct seq_file *seq, void *offset)
-{
-       seq_printf(seq, "CPU State: %04x\n", cpustate_get_state());
-       return 0;
-}
-
-static int cpustate_get_ref(int excl)
-{
-
-       int retval = -EBUSY;
-
-       spin_lock(&cpustate.lock);
-
-       if (cpustate.excl)
-               goto out_busy;
-
-       if (excl) {
-               if (cpustate.open_count)
-                       goto out_busy;
-               cpustate.excl = 1;
-       }
-
-       cpustate.open_count++;
-       retval = 0;
-
-      out_busy:
-       spin_unlock(&cpustate.lock);
-       return retval;
-}
-
-static int cpustate_free_ref(void)
-{
-
-       spin_lock(&cpustate.lock);
-
-       cpustate.excl = 0;
-       cpustate.open_count--;
-
-       spin_unlock(&cpustate.lock);
-       return 0;
-}
-
-static unsigned char cpustate_get_state(void)
-{
-
-       return cpustate.cached_val;
-}
-
-static void cpustate_set_state(unsigned char new_state)
-{
-       unsigned int state = (new_state << 21);
-
-#ifdef DEBUG_CPUSTATE
-       printk("CPUSTATE -> 0x%x\n", new_state);
-#endif
-       spin_lock(&cpustate.lock);
-       cpustate.cached_val = new_state;
-       writel((0xff << 21), cpustate.clr_addr);
-       writel(state, cpustate.set_addr);
-       spin_unlock(&cpustate.lock);
-}
-
-/*
- *     Now all the various file operations that we export.
- */
-
-static ssize_t cpustate_read(struct file *file, char *buf,
-                            size_t count, loff_t * ppos)
-{
-       unsigned char data;
-
-       if (count < 0)
-               return -EFAULT;
-       if (count == 0)
-               return 0;
-
-       data = cpustate_get_state();
-       if (copy_to_user(buf, &data, sizeof(unsigned char)))
-               return -EFAULT;
-       return sizeof(unsigned char);
-}
-
-static ssize_t cpustate_write(struct file *file, const char *buf,
-                             size_t count, loff_t * ppos)
-{
-       unsigned char data;
-
-       if (count < 0)
-               return -EFAULT;
-
-       if (count == 0)
-               return 0;
-
-       if (copy_from_user((unsigned char *)&data, buf, sizeof(unsigned char)))
-               return -EFAULT;
-
-       cpustate_set_state(data);
-       return sizeof(unsigned char);
-}
-
-static int cpustate_open(struct inode *inode, struct file *file)
-{
-       int ret;
-
-       lock_kernel();
-       ret = cpustate_get_ref((file->f_flags & O_EXCL));
-       unlock_kernel();
-
-       return ret;
-}
-
-static int cpustate_release(struct inode *inode, struct file *file)
-{
-       return cpustate_free_ref();
-}
-
-static struct platform_driver hdpu_cpustate_driver = {
-       .probe = hdpu_cpustate_probe,
-       .remove = hdpu_cpustate_remove,
-       .driver = {
-               .name = HDPU_CPUSTATE_NAME,
-               .owner = THIS_MODULE,
-       },
-};
-
-/*
- *     The various file operations we support.
- */
-static const struct file_operations cpustate_fops = {
-      .owner   = THIS_MODULE,
-      .open    = cpustate_open,
-      .release = cpustate_release,
-      .read    = cpustate_read,
-      .write   = cpustate_write,
-      .llseek  = no_llseek,
-};
-
-static struct miscdevice cpustate_dev = {
-       .minor  = MISC_DYNAMIC_MINOR,
-       .name   = "sky_cpustate",
-       .fops   = &cpustate_fops,
-};
-
-static int hdpu_cpustate_probe(struct platform_device *pdev)
-{
-       struct resource *res;
-       struct proc_dir_entry *proc_de;
-       int ret;
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               printk(KERN_ERR "sky_cpustate: "
-                      "Invalid memory resource.\n");
-               return -EINVAL;
-       }
-       cpustate.set_addr = (unsigned long *)res->start;
-       cpustate.clr_addr = (unsigned long *)res->end - 1;
-
-       ret = misc_register(&cpustate_dev);
-       if (ret) {
-               printk(KERN_WARNING "sky_cpustate: "
-                      "Unable to register misc device.\n");
-               cpustate.set_addr = NULL;
-               cpustate.clr_addr = NULL;
-               return ret;
-       }
-
-       proc_de = proc_create("sky_cpustate", 0666, NULL, &proc_cpustate);
-       if (!proc_de) {
-               printk(KERN_WARNING "sky_cpustate: "
-                      "Unable to create proc entry\n");
-       }
-
-       printk(KERN_INFO "Sky CPU State Driver v" SKY_CPUSTATE_VERSION "\n");
-       return 0;
-}
-
-static int hdpu_cpustate_remove(struct platform_device *pdev)
-{
-       cpustate.set_addr = NULL;
-       cpustate.clr_addr = NULL;
-
-       remove_proc_entry("sky_cpustate", NULL);
-       misc_deregister(&cpustate_dev);
-
-       return 0;
-}
-
-static int __init cpustate_init(void)
-{
-       return platform_driver_register(&hdpu_cpustate_driver);
-}
-
-static void __exit cpustate_exit(void)
-{
-       platform_driver_unregister(&hdpu_cpustate_driver);
-}
-
-module_init(cpustate_init);
-module_exit(cpustate_exit);
-
-MODULE_AUTHOR("Brian Waite");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" HDPU_CPUSTATE_NAME);
diff --git a/drivers/misc/hdpuftrs/hdpu_nexus.c b/drivers/misc/hdpuftrs/hdpu_nexus.c
deleted file mode 100644 (file)
index ce39fa5..0000000
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- *     Sky Nexus Register Driver
- *
- *     Copyright (C) 2002 Brian Waite
- *
- *     This driver allows reading the Nexus register
- *     It exports the /proc/sky_chassis_id and also
- *     /proc/sky_slot_id pseudo-file for status information.
- *
- *     This program is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License
- *     as published by the Free Software Foundation; either version
- *     2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/proc_fs.h>
-#include <linux/hdpu_features.h>
-#include <linux/platform_device.h>
-#include <linux/seq_file.h>
-#include <asm/io.h>
-
-static int hdpu_nexus_probe(struct platform_device *pdev);
-static int hdpu_nexus_remove(struct platform_device *pdev);
-static int hdpu_slot_id_open(struct inode *inode, struct file *file);
-static int hdpu_slot_id_read(struct seq_file *seq, void *offset);
-static int hdpu_chassis_id_open(struct inode *inode, struct file *file);
-static int hdpu_chassis_id_read(struct seq_file *seq, void *offset);
-
-static struct proc_dir_entry *hdpu_slot_id;
-static struct proc_dir_entry *hdpu_chassis_id;
-static int slot_id = -1;
-static int chassis_id = -1;
-
-static const struct file_operations proc_slot_id = {
-       .open = hdpu_slot_id_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
-
-static const struct file_operations proc_chassis_id = {
-       .open = hdpu_chassis_id_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
-
-static struct platform_driver hdpu_nexus_driver = {
-       .probe = hdpu_nexus_probe,
-       .remove = hdpu_nexus_remove,
-       .driver = {
-               .name = HDPU_NEXUS_NAME,
-               .owner = THIS_MODULE,
-       },
-};
-
-static int hdpu_slot_id_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, hdpu_slot_id_read, NULL);
-}
-
-static int hdpu_slot_id_read(struct seq_file *seq, void *offset)
-{
-       seq_printf(seq, "%d\n", slot_id);
-       return 0;
-}
-
-static int hdpu_chassis_id_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, hdpu_chassis_id_read, NULL);
-}
-
-static int hdpu_chassis_id_read(struct seq_file *seq, void *offset)
-{
-       seq_printf(seq, "%d\n", chassis_id);
-       return 0;
-}
-
-static int hdpu_nexus_probe(struct platform_device *pdev)
-{
-       struct resource *res;
-       int *nexus_id_addr;
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               printk(KERN_ERR "sky_nexus: "
-                      "Invalid memory resource.\n");
-               return -EINVAL;
-       }
-       nexus_id_addr = ioremap(res->start,
-                               (unsigned long)(res->end - res->start));
-       if (nexus_id_addr) {
-               slot_id = (*nexus_id_addr >> 8) & 0x1f;
-               chassis_id = *nexus_id_addr & 0xff;
-               iounmap(nexus_id_addr);
-       } else {
-               printk(KERN_ERR "sky_nexus: Could not map slot id\n");
-       }
-
-       hdpu_slot_id = proc_create("sky_slot_id", 0666, NULL, &proc_slot_id);
-       if (!hdpu_slot_id) {
-               printk(KERN_WARNING "sky_nexus: "
-                      "Unable to create proc dir entry: sky_slot_id\n");
-       }
-
-       hdpu_chassis_id = proc_create("sky_chassis_id", 0666, NULL,
-                                     &proc_chassis_id);
-       if (!hdpu_chassis_id)
-               printk(KERN_WARNING "sky_nexus: "
-                      "Unable to create proc dir entry: sky_chassis_id\n");
-
-       return 0;
-}
-
-static int hdpu_nexus_remove(struct platform_device *pdev)
-{
-       slot_id = -1;
-       chassis_id = -1;
-
-       remove_proc_entry("sky_slot_id", NULL);
-       remove_proc_entry("sky_chassis_id", NULL);
-
-       hdpu_slot_id = 0;
-       hdpu_chassis_id = 0;
-
-       return 0;
-}
-
-static int __init nexus_init(void)
-{
-       return platform_driver_register(&hdpu_nexus_driver);
-}
-
-static void __exit nexus_exit(void)
-{
-       platform_driver_unregister(&hdpu_nexus_driver);
-}
-
-module_init(nexus_init);
-module_exit(nexus_exit);
-
-MODULE_AUTHOR("Brian Waite");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" HDPU_NEXUS_NAME);
index 04ae884..61f1d27 100644 (file)
@@ -3,6 +3,7 @@
  *
  *  Copyright (C) 2007 Google Inc,
  *  Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
+ *  Copyright (C) 2009, Code Aurora Forum. All Rights Reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -26,6 +27,7 @@
 #include <linux/log2.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/card.h>
+#include <linux/mmc/sdio.h>
 #include <linux/clk.h>
 #include <linux/scatterlist.h>
 #include <linux/platform_device.h>
@@ -47,6 +49,8 @@
 
 #define DRIVER_NAME "msm-sdcc"
 
+#define BUSCLK_PWRSAVE 1
+#define BUSCLK_TIMEOUT (HZ)
 static unsigned int msmsdcc_fmin = 144000;
 static unsigned int msmsdcc_fmax = 50000000;
 static unsigned int msmsdcc_4bit = 1;
@@ -57,6 +61,67 @@ static unsigned int msmsdcc_sdioirq;
 #define PIO_SPINMAX 30
 #define CMD_SPINMAX 20
 
+
+static inline void
+msmsdcc_disable_clocks(struct msmsdcc_host *host, int deferr)
+{
+       WARN_ON(!host->clks_on);
+
+       BUG_ON(host->curr.mrq);
+
+       if (deferr) {
+               mod_timer(&host->busclk_timer, jiffies + BUSCLK_TIMEOUT);
+       } else {
+               del_timer_sync(&host->busclk_timer);
+               /* Need to check clks_on again in case the busclk
+                * timer fired
+                */
+               if (host->clks_on) {
+                       clk_disable(host->clk);
+                       clk_disable(host->pclk);
+                       host->clks_on = 0;
+               }
+       }
+}
+
+static inline int
+msmsdcc_enable_clocks(struct msmsdcc_host *host)
+{
+       int rc;
+
+       del_timer_sync(&host->busclk_timer);
+
+       if (!host->clks_on) {
+               rc = clk_enable(host->pclk);
+               if (rc)
+                       return rc;
+               rc = clk_enable(host->clk);
+               if (rc) {
+                       clk_disable(host->pclk);
+                       return rc;
+               }
+               udelay(1 + ((3 * USEC_PER_SEC) /
+                      (host->clk_rate ? host->clk_rate : msmsdcc_fmin)));
+               host->clks_on = 1;
+       }
+       return 0;
+}
+
+static inline unsigned int
+msmsdcc_readl(struct msmsdcc_host *host, unsigned int reg)
+{
+       return readl(host->base + reg);
+}
+
+static inline void
+msmsdcc_writel(struct msmsdcc_host *host, u32 data, unsigned int reg)
+{
+       writel(data, host->base + reg);
+       /* 3 clk delay required! */
+       udelay(1 + ((3 * USEC_PER_SEC) /
+              (host->clk_rate ? host->clk_rate : msmsdcc_fmin)));
+}
+
 static void
 msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd,
                      u32 c);
@@ -64,8 +129,6 @@ msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd,
 static void
 msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
 {
-       writel(0, host->base + MMCICOMMAND);
-
        BUG_ON(host->curr.data);
 
        host->curr.mrq = NULL;
@@ -76,6 +139,9 @@ msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
        if (mrq->cmd->error == -ETIMEDOUT)
                mdelay(5);
 
+#if BUSCLK_PWRSAVE
+       msmsdcc_disable_clocks(host, 1);
+#endif
        /*
         * Need to drop the host lock here; mmc_request_done may call
         * back into the driver...
@@ -88,7 +154,6 @@ msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
 static void
 msmsdcc_stop_data(struct msmsdcc_host *host)
 {
-       writel(0, host->base + MMCIDATACTRL);
        host->curr.data = NULL;
        host->curr.got_dataend = host->curr.got_datablkend = 0;
 }
@@ -109,6 +174,31 @@ uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host)
        return 0;
 }
 
+static inline void
+msmsdcc_start_command_exec(struct msmsdcc_host *host, u32 arg, u32 c) {
+       msmsdcc_writel(host, arg, MMCIARGUMENT);
+       msmsdcc_writel(host, c, MMCICOMMAND);
+}
+
+static void
+msmsdcc_dma_exec_func(struct msm_dmov_cmd *cmd)
+{
+       struct msmsdcc_host *host = (struct msmsdcc_host *)cmd->data;
+
+       msmsdcc_writel(host, host->cmd_timeout, MMCIDATATIMER);
+       msmsdcc_writel(host, (unsigned int)host->curr.xfer_size,
+                      MMCIDATALENGTH);
+       msmsdcc_writel(host, host->cmd_pio_irqmask, MMCIMASK1);
+       msmsdcc_writel(host, host->cmd_datactrl, MMCIDATACTRL);
+
+       if (host->cmd_cmd) {
+               msmsdcc_start_command_exec(host,
+                                          (u32) host->cmd_cmd->arg,
+                                          (u32) host->cmd_c);
+       }
+       host->dma.active = 1;
+}
+
 static void
 msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
                          unsigned int result,
@@ -121,8 +211,11 @@ msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
        struct mmc_request      *mrq;
 
        spin_lock_irqsave(&host->lock, flags);
+       host->dma.active = 0;
+
        mrq = host->curr.mrq;
        BUG_ON(!mrq);
+       WARN_ON(!mrq->data);
 
        if (!(result & DMOV_RSLT_VALID)) {
                pr_err("msmsdcc: Invalid DataMover result\n");
@@ -146,7 +239,6 @@ msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
                if (!mrq->data->error)
                        mrq->data->error = -EIO;
        }
-       host->dma.busy = 0;
        dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,
                     host->dma.dir);
 
@@ -159,6 +251,7 @@ msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
        }
 
        host->dma.sg = NULL;
+       host->dma.busy = 0;
 
        if ((host->curr.got_dataend && host->curr.got_datablkend)
             || mrq->data->error) {
@@ -172,12 +265,14 @@ msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
                if (!mrq->data->error)
                        host->curr.data_xfered = host->curr.xfer_size;
                if (!mrq->data->stop || mrq->cmd->error) {
-                       writel(0, host->base + MMCICOMMAND);
                        host->curr.mrq = NULL;
                        host->curr.cmd = NULL;
                        mrq->data->bytes_xfered = host->curr.data_xfered;
 
                        spin_unlock_irqrestore(&host->lock, flags);
+#if BUSCLK_PWRSAVE
+                       msmsdcc_disable_clocks(host, 1);
+#endif
                        mmc_request_done(host->mmc, mrq);
                        return;
                } else
@@ -218,6 +313,8 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
        host->dma.sg = data->sg;
        host->dma.num_ents = data->sg_len;
 
+       BUG_ON(host->dma.num_ents > NR_SG); /* Prevent memory corruption */
+
        nc = host->dma.nc;
 
        switch (host->pdev_id) {
@@ -246,22 +343,15 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
 
        host->curr.user_pages = 0;
 
-       n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
-                      host->dma.num_ents, host->dma.dir);
-
-       if (n != host->dma.num_ents) {
-               pr_err("%s: Unable to map in all sg elements\n",
-                      mmc_hostname(host->mmc));
-               host->dma.sg = NULL;
-               host->dma.num_ents = 0;
-               return -ENOMEM;
-       }
-
        box = &nc->cmd[0];
        for (i = 0; i < host->dma.num_ents; i++) {
                box->cmd = CMD_MODE_BOX;
 
-               if (i == (host->dma.num_ents - 1))
+       /* Initialize sg dma address */
+       sg->dma_address = page_to_dma(mmc_dev(host->mmc), sg_page(sg))
+                               + sg->offset;
+
+       if (i == (host->dma.num_ents - 1))
                        box->cmd |= CMD_LC;
                rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
                        (sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
@@ -300,15 +390,70 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
                               DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
        host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
 
+       n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
+                       host->dma.num_ents, host->dma.dir);
+/* dsb inside dma_map_sg will write nc out to mem as well */
+
+       if (n != host->dma.num_ents) {
+               printk(KERN_ERR "%s: Unable to map in all sg elements\n",
+                       mmc_hostname(host->mmc));
+               host->dma.sg = NULL;
+               host->dma.num_ents = 0;
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static int
+snoop_cccr_abort(struct mmc_command *cmd)
+{
+       if ((cmd->opcode == 52) &&
+           (cmd->arg & 0x80000000) &&
+           (((cmd->arg >> 9) & 0x1ffff) == SDIO_CCCR_ABORT))
+               return 1;
        return 0;
 }
 
 static void
-msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data)
+msmsdcc_start_command_deferred(struct msmsdcc_host *host,
+                               struct mmc_command *cmd, u32 *c)
+{
+       *c |= (cmd->opcode | MCI_CPSM_ENABLE);
+
+       if (cmd->flags & MMC_RSP_PRESENT) {
+               if (cmd->flags & MMC_RSP_136)
+                       *c |= MCI_CPSM_LONGRSP;
+               *c |= MCI_CPSM_RESPONSE;
+       }
+
+       if (/*interrupt*/0)
+               *c |= MCI_CPSM_INTERRUPT;
+
+       if ((((cmd->opcode == 17) || (cmd->opcode == 18))  ||
+            ((cmd->opcode == 24) || (cmd->opcode == 25))) ||
+             (cmd->opcode == 53))
+               *c |= MCI_CSPM_DATCMD;
+
+       if (cmd == cmd->mrq->stop)
+               *c |= MCI_CSPM_MCIABORT;
+
+       if (snoop_cccr_abort(cmd))
+               *c |= MCI_CSPM_MCIABORT;
+
+       if (host->curr.cmd != NULL) {
+               printk(KERN_ERR "%s: Overlapping command requests\n",
+                       mmc_hostname(host->mmc));
+       }
+       host->curr.cmd = cmd;
+}
+
+static void
+msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data,
+                       struct mmc_command *cmd, u32 c)
 {
        unsigned int datactrl, timeout;
        unsigned long long clks;
-       void __iomem *base = host->base;
        unsigned int pio_irqmask = 0;
 
        host->curr.data = data;
@@ -320,13 +465,6 @@ msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data)
 
        memset(&host->pio, 0, sizeof(host->pio));
 
-       clks = (unsigned long long)data->timeout_ns * host->clk_rate;
-       do_div(clks, NSEC_PER_SEC);
-       timeout = data->timeout_clks + (unsigned int)clks;
-       writel(timeout, base + MMCIDATATIMER);
-
-       writel(host->curr.xfer_size, base + MMCIDATALENGTH);
-
        datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
 
        if (!msmsdcc_config_dma(host, data))
@@ -347,47 +485,51 @@ msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data)
        if (data->flags & MMC_DATA_READ)
                datactrl |= MCI_DPSM_DIRECTION;
 
-       writel(pio_irqmask, base + MMCIMASK1);
-       writel(datactrl, base + MMCIDATACTRL);
+       clks = (unsigned long long)data->timeout_ns * host->clk_rate;
+       do_div(clks, NSEC_PER_SEC);
+       timeout = data->timeout_clks + (unsigned int)clks*2 ;
 
        if (datactrl & MCI_DPSM_DMAENABLE) {
+               /* Save parameters for the exec function */
+               host->cmd_timeout = timeout;
+               host->cmd_pio_irqmask = pio_irqmask;
+               host->cmd_datactrl = datactrl;
+               host->cmd_cmd = cmd;
+
+               host->dma.hdr.execute_func = msmsdcc_dma_exec_func;
+               host->dma.hdr.data = (void *)host;
                host->dma.busy = 1;
+
+               if (cmd) {
+                       msmsdcc_start_command_deferred(host, cmd, &c);
+                       host->cmd_c = c;
+               }
                msm_dmov_enqueue_cmd(host->dma.channel, &host->dma.hdr);
+       } else {
+               msmsdcc_writel(host, timeout, MMCIDATATIMER);
+
+               msmsdcc_writel(host, host->curr.xfer_size, MMCIDATALENGTH);
+
+               msmsdcc_writel(host, pio_irqmask, MMCIMASK1);
+               msmsdcc_writel(host, datactrl, MMCIDATACTRL);
+
+               if (cmd) {
+                       /* Daisy-chain the command if requested */
+                       msmsdcc_start_command(host, cmd, c);
+               }
        }
 }
 
 static void
 msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd, u32 c)
 {
-       void __iomem *base = host->base;
-
-       if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
-               writel(0, base + MMCICOMMAND);
-               udelay(2 + ((5 * 1000000) / host->clk_rate));
-       }
-
-       c |= cmd->opcode | MCI_CPSM_ENABLE;
-
-       if (cmd->flags & MMC_RSP_PRESENT) {
-               if (cmd->flags & MMC_RSP_136)
-                       c |= MCI_CPSM_LONGRSP;
-               c |= MCI_CPSM_RESPONSE;
-       }
-
-       if (cmd->opcode == 17 || cmd->opcode == 18 ||
-           cmd->opcode == 24 || cmd->opcode == 25 ||
-           cmd->opcode == 53)
-               c |= MCI_CSPM_DATCMD;
-
        if (cmd == cmd->mrq->stop)
                c |= MCI_CSPM_MCIABORT;
 
-       host->curr.cmd = cmd;
-
        host->stats.cmds++;
 
-       writel(cmd->arg, base + MMCIARGUMENT);
-       writel(c, base + MMCICOMMAND);
+       msmsdcc_start_command_deferred(host, cmd, &c);
+       msmsdcc_start_command_exec(host, cmd->arg, c);
 }
 
 static void
@@ -421,13 +563,11 @@ msmsdcc_data_err(struct msmsdcc_host *host, struct mmc_data *data,
 static int
 msmsdcc_pio_read(struct msmsdcc_host *host, char *buffer, unsigned int remain)
 {
-       void __iomem    *base = host->base;
        uint32_t        *ptr = (uint32_t *) buffer;
        int             count = 0;
 
-       while (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL) {
-
-               *ptr = readl(base + MMCIFIFO + (count % MCI_FIFOSIZE));
+       while (msmsdcc_readl(host, MMCISTATUS) & MCI_RXDATAAVLBL) {
+               *ptr = msmsdcc_readl(host, MMCIFIFO + (count % MCI_FIFOSIZE));
                ptr++;
                count += sizeof(uint32_t);
 
@@ -459,7 +599,7 @@ msmsdcc_pio_write(struct msmsdcc_host *host, char *buffer,
                if (remain == 0)
                        break;
 
-               status = readl(base + MMCISTATUS);
+               status = msmsdcc_readl(host, MMCISTATUS);
        } while (status & MCI_TXFIFOHALFEMPTY);
 
        return ptr - buffer;
@@ -469,7 +609,7 @@ static int
 msmsdcc_spin_on_status(struct msmsdcc_host *host, uint32_t mask, int maxspin)
 {
        while (maxspin) {
-               if ((readl(host->base + MMCISTATUS) & mask))
+               if ((msmsdcc_readl(host, MMCISTATUS) & mask))
                        return 0;
                udelay(1);
                --maxspin;
@@ -477,14 +617,13 @@ msmsdcc_spin_on_status(struct msmsdcc_host *host, uint32_t mask, int maxspin)
        return -ETIMEDOUT;
 }
 
-static int
+static irqreturn_t
 msmsdcc_pio_irq(int irq, void *dev_id)
 {
        struct msmsdcc_host     *host = dev_id;
-       void __iomem            *base = host->base;
        uint32_t                status;
 
-       status = readl(base + MMCISTATUS);
+       status = msmsdcc_readl(host, MMCISTATUS);
 
        do {
                unsigned long flags;
@@ -539,14 +678,14 @@ msmsdcc_pio_irq(int irq, void *dev_id)
                        host->pio.sg_off = 0;
                }
 
-               status = readl(base + MMCISTATUS);
+               status = msmsdcc_readl(host, MMCISTATUS);
        } while (1);
 
        if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE)
-               writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
+               msmsdcc_writel(host, MCI_RXDATAAVLBLMASK, MMCIMASK1);
 
        if (!host->curr.xfer_remain)
-               writel(0, base + MMCIMASK1);
+               msmsdcc_writel(host, 0, MMCIMASK1);
 
        return IRQ_HANDLED;
 }
@@ -554,15 +693,13 @@ msmsdcc_pio_irq(int irq, void *dev_id)
 static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status)
 {
        struct mmc_command *cmd = host->curr.cmd;
-       void __iomem       *base = host->base;
 
        host->curr.cmd = NULL;
-       cmd->resp[0] = readl(base + MMCIRESPONSE0);
-       cmd->resp[1] = readl(base + MMCIRESPONSE1);
-       cmd->resp[2] = readl(base + MMCIRESPONSE2);
-       cmd->resp[3] = readl(base + MMCIRESPONSE3);
+       cmd->resp[0] = msmsdcc_readl(host, MMCIRESPONSE0);
+       cmd->resp[1] = msmsdcc_readl(host, MMCIRESPONSE1);
+       cmd->resp[2] = msmsdcc_readl(host, MMCIRESPONSE2);
+       cmd->resp[3] = msmsdcc_readl(host, MMCIRESPONSE3);
 
-       del_timer(&host->command_timer);
        if (status & MCI_CMDTIMEOUT) {
                cmd->error = -ETIMEDOUT;
        } else if (status & MCI_CMDCRCFAIL &&
@@ -580,8 +717,10 @@ static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status)
                        msmsdcc_request_end(host, cmd->mrq);
                } else /* host->data == NULL */
                        msmsdcc_request_end(host, cmd->mrq);
-       } else if (!(cmd->data->flags & MMC_DATA_READ))
-               msmsdcc_start_data(host, cmd->data);
+       } else if (cmd->data)
+               if (!(cmd->data->flags & MMC_DATA_READ))
+                       msmsdcc_start_data(host, cmd->data,
+                                               NULL, 0);
 }
 
 static void
@@ -590,6 +729,11 @@ msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
 {
        struct mmc_data *data = host->curr.data;
 
+       if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
+                     MCI_CMDTIMEOUT) && host->curr.cmd) {
+               msmsdcc_do_cmdirq(host, status);
+       }
+
        if (!data)
                return;
 
@@ -602,7 +746,8 @@ msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
                        msm_dmov_stop_cmd(host->dma.channel,
                                          &host->dma.hdr, 0);
                else {
-                       msmsdcc_stop_data(host);
+                       if (host->curr.data)
+                               msmsdcc_stop_data(host);
                        if (!data->stop)
                                msmsdcc_request_end(host, data->mrq);
                        else
@@ -657,17 +802,18 @@ msmsdcc_irq(int irq, void *dev_id)
        spin_lock(&host->lock);
 
        do {
-               status = readl(base + MMCISTATUS);
+               status = msmsdcc_readl(host, MMCISTATUS);
+               status &= (msmsdcc_readl(host, MMCIMASK0) |
+                                             MCI_DATABLOCKENDMASK);
+               msmsdcc_writel(host, status, MMCICLEAR);
 
-               status &= (readl(base + MMCIMASK0) | MCI_DATABLOCKENDMASK);
-               writel(status, base + MMCICLEAR);
+               if (status & MCI_SDIOINTR)
+                       status &= ~MCI_SDIOINTR;
 
-               msmsdcc_handle_irq_data(host, status, base);
+               if (!status)
+                       break;
 
-               if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
-                             MCI_CMDTIMEOUT) && host->curr.cmd) {
-                       msmsdcc_do_cmdirq(host, status);
-               }
+               msmsdcc_handle_irq_data(host, status, base);
 
                if (status & MCI_SDIOINTOPER) {
                        cardint = 1;
@@ -714,24 +860,27 @@ msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq)
                return;
        }
 
+       msmsdcc_enable_clocks(host);
+
        host->curr.mrq = mrq;
 
        if (mrq->data && mrq->data->flags & MMC_DATA_READ)
-               msmsdcc_start_data(host, mrq->data);
-
-       msmsdcc_start_command(host, mrq->cmd, 0);
+               /* Queue/read data, daisy-chain command when data starts */
+               msmsdcc_start_data(host, mrq->data, mrq->cmd, 0);
+       else
+               msmsdcc_start_command(host, mrq->cmd, 0);
 
        if (host->cmdpoll && !msmsdcc_spin_on_status(host,
                                MCI_CMDRESPEND|MCI_CMDCRCFAIL|MCI_CMDTIMEOUT,
                                CMD_SPINMAX)) {
-               uint32_t status = readl(host->base + MMCISTATUS);
+               uint32_t status = msmsdcc_readl(host, MMCISTATUS);
                msmsdcc_do_cmdirq(host, status);
-               writel(MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT,
-                      host->base + MMCICLEAR);
+               msmsdcc_writel(host,
+                              MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT,
+                              MMCICLEAR);
                host->stats.cmdpoll_hits++;
        } else {
                host->stats.cmdpoll_misses++;
-               mod_timer(&host->command_timer, jiffies + HZ);
        }
        spin_unlock_irqrestore(&host->lock, flags);
 }
@@ -742,14 +891,13 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
        struct msmsdcc_host *host = mmc_priv(mmc);
        u32 clk = 0, pwr = 0;
        int rc;
+       unsigned long flags;
 
-       if (ios->clock) {
+       spin_lock_irqsave(&host->lock, flags);
 
-               if (!host->clks_on) {
-                       clk_enable(host->pclk);
-                       clk_enable(host->clk);
-                       host->clks_on = 1;
-               }
+       msmsdcc_enable_clocks(host);
+
+       if (ios->clock) {
                if (ios->clock != host->clk_rate) {
                        rc = clk_set_rate(host->clk, ios->clock);
                        if (rc < 0)
@@ -787,18 +935,16 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
        if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
                pwr |= MCI_OD;
 
-       writel(clk, host->base + MMCICLOCK);
+       msmsdcc_writel(host, clk, MMCICLOCK);
 
        if (host->pwr != pwr) {
                host->pwr = pwr;
-               writel(pwr, host->base + MMCIPOWER);
-       }
-
-       if (!(clk & MCI_CLK_ENABLE) && host->clks_on) {
-               clk_disable(host->clk);
-               clk_disable(host->pclk);
-               host->clks_on = 0;
+               msmsdcc_writel(host, pwr, MMCIPOWER);
        }
+#if BUSCLK_PWRSAVE
+       msmsdcc_disable_clocks(host, 1);
+#endif
+       spin_unlock_irqrestore(&host->lock, flags);
 }
 
 static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
@@ -809,13 +955,13 @@ static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
 
        spin_lock_irqsave(&host->lock, flags);
        if (msmsdcc_sdioirq == 1) {
-               status = readl(host->base + MMCIMASK0);
+               status = msmsdcc_readl(host, MMCIMASK0);
                if (enable)
                        status |= MCI_SDIOINTOPERMASK;
                else
                        status &= ~MCI_SDIOINTOPERMASK;
                host->saved_irq0mask = status;
-               writel(status, host->base + MMCIMASK0);
+               msmsdcc_writel(host, status, MMCIMASK0);
        }
        spin_unlock_irqrestore(&host->lock, flags);
 }
@@ -875,42 +1021,13 @@ msmsdcc_status_notify_cb(int card_present, void *dev_id)
        msmsdcc_check_status((unsigned long) host);
 }
 
-/*
- * called when a command expires.
- * Dump some debugging, and then error
- * out the transaction.
- */
 static void
-msmsdcc_command_expired(unsigned long _data)
+msmsdcc_busclk_expired(unsigned long _data)
 {
        struct msmsdcc_host     *host = (struct msmsdcc_host *) _data;
-       struct mmc_request      *mrq;
-       unsigned long           flags;
-
-       spin_lock_irqsave(&host->lock, flags);
-       mrq = host->curr.mrq;
-
-       if (!mrq) {
-               pr_info("%s: Command expiry misfire\n",
-                       mmc_hostname(host->mmc));
-               spin_unlock_irqrestore(&host->lock, flags);
-               return;
-       }
-
-       pr_err("%s: Command timeout (%p %p %p %p)\n",
-              mmc_hostname(host->mmc), mrq, mrq->cmd,
-              mrq->data, host->dma.sg);
-
-       mrq->cmd->error = -ETIMEDOUT;
-       msmsdcc_stop_data(host);
 
-       writel(0, host->base + MMCICOMMAND);
-
-       host->curr.mrq = NULL;
-       host->curr.cmd = NULL;
-
-       spin_unlock_irqrestore(&host->lock, flags);
-       mmc_request_done(host->mmc, mrq);
+       if (host->clks_on)
+               msmsdcc_disable_clocks(host, 0);
 }
 
 static int
@@ -1012,6 +1129,7 @@ msmsdcc_probe(struct platform_device *pdev)
        host->pdev_id = pdev->id;
        host->plat = plat;
        host->mmc = mmc;
+       host->curr.cmd = NULL;
 
        host->cmdpoll = 1;
 
@@ -1027,36 +1145,35 @@ msmsdcc_probe(struct platform_device *pdev)
        host->dmares = dmares;
        spin_lock_init(&host->lock);
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+       if (plat->embedded_sdio)
+               mmc_set_embedded_sdio_data(mmc,
+                                          &plat->embedded_sdio->cis,
+                                          &plat->embedded_sdio->cccr,
+                                          plat->embedded_sdio->funcs,
+                                          plat->embedded_sdio->num_funcs);
+#endif
+
        /*
         * Setup DMA
         */
        msmsdcc_init_dma(host);
 
-       /*
-        * Setup main peripheral bus clock
-        */
+       /* Get our clocks */
        host->pclk = clk_get(&pdev->dev, "sdc_pclk");
        if (IS_ERR(host->pclk)) {
                ret = PTR_ERR(host->pclk);
                goto host_free;
        }
 
-       ret = clk_enable(host->pclk);
-       if (ret)
-               goto pclk_put;
-
-       host->pclk_rate = clk_get_rate(host->pclk);
-
-       /*
-        * Setup SDC MMC clock
-        */
        host->clk = clk_get(&pdev->dev, "sdc_clk");
        if (IS_ERR(host->clk)) {
                ret = PTR_ERR(host->clk);
-               goto pclk_disable;
+               goto pclk_put;
        }
 
-       ret = clk_enable(host->clk);
+       /* Enable clocks */
+       ret = msmsdcc_enable_clocks(host);
        if (ret)
                goto clk_put;
 
@@ -1066,10 +1183,9 @@ msmsdcc_probe(struct platform_device *pdev)
                goto clk_disable;
        }
 
+       host->pclk_rate = clk_get_rate(host->pclk);
        host->clk_rate = clk_get_rate(host->clk);
 
-       host->clks_on = 1;
-
        /*
         * Setup MMC host structure
         */
@@ -1092,10 +1208,10 @@ msmsdcc_probe(struct platform_device *pdev)
        mmc->max_req_size = 33554432;   /* MCI_DATA_LENGTH is 25 bits */
        mmc->max_seg_size = mmc->max_req_size;
 
-       writel(0, host->base + MMCIMASK0);
-       writel(0x5e007ff, host->base + MMCICLEAR); /* Add: 1 << 25 */
+       msmsdcc_writel(host, 0, MMCIMASK0);
+       msmsdcc_writel(host, 0x5e007ff, MMCICLEAR);
 
-       writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+       msmsdcc_writel(host, MCI_IRQENABLE, MMCIMASK0);
        host->saved_irq0mask = MCI_IRQENABLE;
 
        /*
@@ -1137,13 +1253,9 @@ msmsdcc_probe(struct platform_device *pdev)
                host->eject = !host->oldstat;
        }
 
-       /*
-        * Setup a command timer. We currently need this due to
-        * some 'strange' timeout / error handling situations.
-        */
-       init_timer(&host->command_timer);
-       host->command_timer.data = (unsigned long) host;
-       host->command_timer.function = msmsdcc_command_expired;
+       init_timer(&host->busclk_timer);
+       host->busclk_timer.data = (unsigned long) host;
+       host->busclk_timer.function = msmsdcc_busclk_expired;
 
        ret = request_irq(cmd_irqres->start, msmsdcc_irq, IRQF_SHARED,
                          DRIVER_NAME " (cmd)", host);
@@ -1181,6 +1293,9 @@ msmsdcc_probe(struct platform_device *pdev)
        if (host->timer.function)
                pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc));
 
+#if BUSCLK_PWRSAVE
+       msmsdcc_disable_clocks(host, 1);
+#endif
        return 0;
  cmd_irq_free:
        free_irq(cmd_irqres->start, host);
@@ -1188,11 +1303,9 @@ msmsdcc_probe(struct platform_device *pdev)
        if (host->stat_irq)
                free_irq(host->stat_irq, host);
  clk_disable:
-       clk_disable(host->clk);
+       msmsdcc_disable_clocks(host, 0);
  clk_put:
        clk_put(host->clk);
- pclk_disable:
-       clk_disable(host->pclk);
  pclk_put:
        clk_put(host->pclk);
  host_free:
@@ -1215,15 +1328,10 @@ msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
 
                if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
                        rc = mmc_suspend_host(mmc, state);
-               if (!rc) {
-                       writel(0, host->base + MMCIMASK0);
-
-                       if (host->clks_on) {
-                               clk_disable(host->clk);
-                               clk_disable(host->pclk);
-                               host->clks_on = 0;
-                       }
-               }
+               if (!rc)
+                       msmsdcc_writel(host, 0, MMCIMASK0);
+               if (host->clks_on)
+                       msmsdcc_disable_clocks(host, 0);
        }
        return rc;
 }
@@ -1232,27 +1340,21 @@ static int
 msmsdcc_resume(struct platform_device *dev)
 {
        struct mmc_host *mmc = mmc_get_drvdata(dev);
-       unsigned long flags;
 
        if (mmc) {
                struct msmsdcc_host *host = mmc_priv(mmc);
 
-               spin_lock_irqsave(&host->lock, flags);
+               msmsdcc_enable_clocks(host);
 
-               if (!host->clks_on) {
-                       clk_enable(host->pclk);
-                       clk_enable(host->clk);
-                       host->clks_on = 1;
-               }
-
-               writel(host->saved_irq0mask, host->base + MMCIMASK0);
-
-               spin_unlock_irqrestore(&host->lock, flags);
+               msmsdcc_writel(host, host->saved_irq0mask, MMCIMASK0);
 
                if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
                        mmc_resume_host(mmc);
                if (host->stat_irq)
                        enable_irq(host->stat_irq);
+#if BUSCLK_PWRSAVE
+               msmsdcc_disable_clocks(host, 1);
+#endif
        }
        return 0;
 }
index 8c84484..da0039c 100644 (file)
@@ -171,6 +171,7 @@ struct msmsdcc_dma_data {
        int                             channel;
        struct msmsdcc_host             *host;
        int                             busy; /* Set if DM is busy */
+       int                             active;
 };
 
 struct msmsdcc_pio_data {
@@ -213,7 +214,7 @@ struct msmsdcc_host {
        struct clk              *clk;           /* main MMC bus clock */
        struct clk              *pclk;          /* SDCC peripheral bus clock */
        unsigned int            clks_on;        /* set if clocks are enabled */
-       struct timer_list       command_timer;
+       struct timer_list       busclk_timer;
 
        unsigned int            eject;          /* eject state */
 
@@ -233,6 +234,18 @@ struct msmsdcc_host {
        struct msmsdcc_pio_data pio;
        int                     cmdpoll;
        struct msmsdcc_stats    stats;
+
+#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ
+       struct work_struct      resume_task;
+#endif
+
+       /* Command parameters */
+       unsigned int            cmd_timeout;
+       unsigned int            cmd_pio_irqmask;
+       unsigned int            cmd_datactrl;
+       struct mmc_command      *cmd_cmd;
+       u32                     cmd_c;
+
 };
 
 #endif
index 2b1ea3d..891e1dd 100644 (file)
@@ -1891,8 +1891,8 @@ static int serial8250_get_poll_char(struct uart_port *port)
        struct uart_8250_port *up = (struct uart_8250_port *)port;
        unsigned char lsr = serial_inp(up, UART_LSR);
 
-       while (!(lsr & UART_LSR_DR))
-               lsr = serial_inp(up, UART_LSR);
+       if (!(lsr & UART_LSR_DR))
+               return NO_POLL_CHAR;
 
        return serial_inp(up, UART_RX);
 }
index 743ebf5..eb4cb48 100644 (file)
@@ -342,9 +342,9 @@ static int pl010_get_poll_char(struct uart_port *port)
        struct uart_amba_port *uap = (struct uart_amba_port *)port;
        unsigned int status;
 
-       do {
-               status = readw(uap->port.membase + UART01x_FR);
-       } while (status & UART01x_FR_RXFE);
+       status = readw(uap->port.membase + UART01x_FR);
+       if (status & UART01x_FR_RXFE)
+               return NO_POLL_CHAR;
 
        return readw(uap->port.membase + UART01x_DR);
 }
index eadc1ab..a9a94ae 100644 (file)
@@ -14,7 +14,9 @@
 #include <linux/kernel.h>
 #include <linux/ctype.h>
 #include <linux/kgdb.h>
+#include <linux/kdb.h>
 #include <linux/tty.h>
+#include <linux/console.h>
 
 #define MAX_CONFIG_LEN         40
 
@@ -32,6 +34,40 @@ static struct kparam_string kps = {
 static struct tty_driver       *kgdb_tty_driver;
 static int                     kgdb_tty_line;
 
+#ifdef CONFIG_KDB_KEYBOARD
+static int kgdboc_register_kbd(char **cptr)
+{
+       if (strncmp(*cptr, "kbd", 3) == 0) {
+               if (kdb_poll_idx < KDB_POLL_FUNC_MAX) {
+                       kdb_poll_funcs[kdb_poll_idx] = kdb_get_kbd_char;
+                       kdb_poll_idx++;
+                       if (cptr[0][3] == ',')
+                               *cptr += 4;
+                       else
+                               return 1;
+               }
+       }
+       return 0;
+}
+
+static void kgdboc_unregister_kbd(void)
+{
+       int i;
+
+       for (i = 0; i < kdb_poll_idx; i++) {
+               if (kdb_poll_funcs[i] == kdb_get_kbd_char) {
+                       kdb_poll_idx--;
+                       kdb_poll_funcs[i] = kdb_poll_funcs[kdb_poll_idx];
+                       kdb_poll_funcs[kdb_poll_idx] = NULL;
+                       i--;
+               }
+       }
+}
+#else /* ! CONFIG_KDB_KEYBOARD */
+#define kgdboc_register_kbd(x) 0
+#define kgdboc_unregister_kbd()
+#endif /* ! CONFIG_KDB_KEYBOARD */
+
 static int kgdboc_option_setup(char *opt)
 {
        if (strlen(opt) > MAX_CONFIG_LEN) {
@@ -45,25 +81,51 @@ static int kgdboc_option_setup(char *opt)
 
 __setup("kgdboc=", kgdboc_option_setup);
 
+static void cleanup_kgdboc(void)
+{
+       kgdboc_unregister_kbd();
+       if (configured == 1)
+               kgdb_unregister_io_module(&kgdboc_io_ops);
+}
+
 static int configure_kgdboc(void)
 {
        struct tty_driver *p;
        int tty_line = 0;
        int err;
+       char *cptr = config;
+       struct console *cons;
 
        err = kgdboc_option_setup(config);
        if (err || !strlen(config) || isspace(config[0]))
                goto noconfig;
 
        err = -ENODEV;
+       kgdboc_io_ops.is_console = 0;
+       kgdb_tty_driver = NULL;
 
-       p = tty_find_polling_driver(config, &tty_line);
+       if (kgdboc_register_kbd(&cptr))
+               goto do_register;
+
+       p = tty_find_polling_driver(cptr, &tty_line);
        if (!p)
                goto noconfig;
 
+       cons = console_drivers;
+       while (cons) {
+               int idx;
+               if (cons->device && cons->device(cons, &idx) == p &&
+                   idx == tty_line) {
+                       kgdboc_io_ops.is_console = 1;
+                       break;
+               }
+               cons = cons->next;
+       }
+
        kgdb_tty_driver = p;
        kgdb_tty_line = tty_line;
 
+do_register:
        err = kgdb_register_io_module(&kgdboc_io_ops);
        if (err)
                goto noconfig;
@@ -75,6 +137,7 @@ static int configure_kgdboc(void)
 noconfig:
        config[0] = 0;
        configured = 0;
+       cleanup_kgdboc();
 
        return err;
 }
@@ -88,20 +151,18 @@ static int __init init_kgdboc(void)
        return configure_kgdboc();
 }
 
-static void cleanup_kgdboc(void)
-{
-       if (configured == 1)
-               kgdb_unregister_io_module(&kgdboc_io_ops);
-}
-
 static int kgdboc_get_char(void)
 {
+       if (!kgdb_tty_driver)
+               return -1;
        return kgdb_tty_driver->ops->poll_get_char(kgdb_tty_driver,
                                                kgdb_tty_line);
 }
 
 static void kgdboc_put_char(u8 chr)
 {
+       if (!kgdb_tty_driver)
+               return;
        kgdb_tty_driver->ops->poll_put_char(kgdb_tty_driver,
                                        kgdb_tty_line, chr);
 }
@@ -162,6 +223,25 @@ static struct kgdb_io kgdboc_io_ops = {
        .post_exception         = kgdboc_post_exp_handler,
 };
 
+#ifdef CONFIG_KGDB_SERIAL_CONSOLE
+/* This is only available if kgdboc is a built in for early debugging */
+int __init kgdboc_early_init(char *opt)
+{
+       /* save the first character of the config string because the
+        * init routine can destroy it.
+        */
+       char save_ch;
+
+       kgdboc_option_setup(opt);
+       save_ch = config[0];
+       init_kgdboc();
+       config[0] = save_ch;
+       return 0;
+}
+
+early_param("ekgdboc", kgdboc_early_init);
+#endif /* CONFIG_KGDB_SERIAL_CONSOLE */
+
 module_init(init_kgdboc);
 module_exit(cleanup_kgdboc);
 module_param_call(kgdboc, param_set_kgdboc_var, param_get_string, &kps, 0644);
index 55e113a..6a9c660 100644 (file)
@@ -2071,6 +2071,7 @@ static int mpsc_drv_probe(struct platform_device *dev)
 
                if (!(rc = mpsc_drv_map_regs(pi, dev))) {
                        mpsc_drv_get_platform_data(pi, dev, dev->id);
+                       pi->port.dev = &dev->dev;
 
                        if (!(rc = mpsc_make_ready(pi))) {
                                spin_lock_init(&pi->tx_lock);
index 8d993c4..f250a61 100644 (file)
@@ -151,7 +151,11 @@ static int sci_poll_get_char(struct uart_port *port)
                        handle_error(port);
                        continue;
                }
-       } while (!(status & SCxSR_RDxF(port)));
+               break;
+       } while (1);
+
+       if (!(status & SCxSR_RDxF(port)))
+               return NO_POLL_CHAR;
 
        c = sci_in(port, SCxRDR);
 
index 2c7a66a..978b3ce 100644 (file)
@@ -102,6 +102,8 @@ struct uart_sunzilog_port {
 #endif
 };
 
+static void sunzilog_putchar(struct uart_port *port, int ch);
+
 #define ZILOG_CHANNEL_FROM_PORT(PORT)  ((struct zilog_channel __iomem *)((PORT)->membase))
 #define UART_ZILOG(PORT)               ((struct uart_sunzilog_port *)(PORT))
 
@@ -996,6 +998,50 @@ static int sunzilog_verify_port(struct uart_port *port, struct serial_struct *se
        return -EINVAL;
 }
 
+#ifdef CONFIG_CONSOLE_POLL
+static int sunzilog_get_poll_char(struct uart_port *port)
+{
+       unsigned char ch, r1;
+       struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port;
+       struct zilog_channel __iomem *channel
+               = ZILOG_CHANNEL_FROM_PORT(&up->port);
+
+
+       r1 = read_zsreg(channel, R1);
+       if (r1 & (PAR_ERR | Rx_OVR | CRC_ERR)) {
+               writeb(ERR_RES, &channel->control);
+               ZSDELAY();
+               ZS_WSYNC(channel);
+       }
+
+       ch = readb(&channel->control);
+       ZSDELAY();
+
+       /* This funny hack depends upon BRK_ABRT not interfering
+        * with the other bits we care about in R1.
+        */
+       if (ch & BRK_ABRT)
+               r1 |= BRK_ABRT;
+
+       if (!(ch & Rx_CH_AV))
+               return NO_POLL_CHAR;
+
+       ch = readb(&channel->data);
+       ZSDELAY();
+
+       ch &= up->parity_mask;
+       return ch;
+}
+
+static void sunzilog_put_poll_char(struct uart_port *port,
+                       unsigned char ch)
+{
+       struct uart_sunzilog_port *up = (struct uart_sunzilog_port *)port;
+
+       sunzilog_putchar(&up->port, ch);
+}
+#endif /* CONFIG_CONSOLE_POLL */
+
 static struct uart_ops sunzilog_pops = {
        .tx_empty       =       sunzilog_tx_empty,
        .set_mctrl      =       sunzilog_set_mctrl,
@@ -1013,6 +1059,10 @@ static struct uart_ops sunzilog_pops = {
        .request_port   =       sunzilog_request_port,
        .config_port    =       sunzilog_config_port,
        .verify_port    =       sunzilog_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+       .poll_get_char  =       sunzilog_get_poll_char,
+       .poll_put_char  =       sunzilog_put_poll_char,
+#endif
 };
 
 static int uart_chip_count;
index 6e98a36..94ecdbc 100644 (file)
@@ -19,6 +19,9 @@
 #include <linux/usb/ch9.h>
 #include <linux/usb/ehci_def.h>
 #include <linux/delay.h>
+#include <linux/serial_core.h>
+#include <linux/kgdb.h>
+#include <linux/kthread.h>
 #include <asm/io.h>
 #include <asm/pci-direct.h>
 #include <asm/fixmap.h>
@@ -55,6 +58,7 @@ static struct ehci_regs __iomem *ehci_regs;
 static struct ehci_dbg_port __iomem *ehci_debug;
 static int dbgp_not_safe; /* Cannot use debug device during ehci reset */
 static unsigned int dbgp_endpoint_out;
+static unsigned int dbgp_endpoint_in;
 
 struct ehci_dev {
        u32 bus;
@@ -91,6 +95,13 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
        return (x & ~0x0f) | (len & 0x0f);
 }
 
+#ifdef CONFIG_KGDB
+static struct kgdb_io kgdbdbgp_io_ops;
+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
+#else
+#define dbgp_kgdb_mode (0)
+#endif
+
 /*
  * USB Packet IDs (PIDs)
  */
@@ -182,11 +193,10 @@ static void dbgp_breath(void)
        /* Sleep to give the debug port a chance to breathe */
 }
 
-static int dbgp_wait_until_done(unsigned ctrl)
+static int dbgp_wait_until_done(unsigned ctrl, int loop)
 {
        u32 pids, lpid;
        int ret;
-       int loop = DBGP_LOOPS;
 
 retry:
        writel(ctrl | DBGP_GO, &ehci_debug->control);
@@ -276,13 +286,13 @@ static int dbgp_bulk_write(unsigned devnum, unsigned endpoint,
        dbgp_set_data(bytes, size);
        writel(addr, &ehci_debug->address);
        writel(pids, &ehci_debug->pids);
-       ret = dbgp_wait_until_done(ctrl);
+       ret = dbgp_wait_until_done(ctrl, DBGP_LOOPS);
 
        return ret;
 }
 
 static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
-                                int size)
+                         int size, int loops)
 {
        u32 pids, addr, ctrl;
        int ret;
@@ -302,7 +312,7 @@ static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
 
        writel(addr, &ehci_debug->address);
        writel(pids, &ehci_debug->pids);
-       ret = dbgp_wait_until_done(ctrl);
+       ret = dbgp_wait_until_done(ctrl, loops);
        if (ret < 0)
                return ret;
 
@@ -343,12 +353,12 @@ static int dbgp_control_msg(unsigned devnum, int requesttype,
        dbgp_set_data(&req, sizeof(req));
        writel(addr, &ehci_debug->address);
        writel(pids, &ehci_debug->pids);
-       ret = dbgp_wait_until_done(ctrl);
+       ret = dbgp_wait_until_done(ctrl, DBGP_LOOPS);
        if (ret < 0)
                return ret;
 
        /* Read the result */
-       return dbgp_bulk_read(devnum, 0, data, size);
+       return dbgp_bulk_read(devnum, 0, data, size, DBGP_LOOPS);
 }
 
 /* Find a PCI capability */
@@ -559,6 +569,7 @@ try_again:
                goto err;
        }
        dbgp_endpoint_out = dbgp_desc.bDebugOutEndpoint;
+       dbgp_endpoint_in = dbgp_desc.bDebugInEndpoint;
 
        /* Move the device to 127 if it isn't already there */
        if (devnum != USB_DEBUG_DEVNUM) {
@@ -968,8 +979,9 @@ int dbgp_reset_prep(void)
        if (!ehci_debug)
                return 0;
 
-       if (early_dbgp_console.index != -1 &&
-               !(early_dbgp_console.flags & CON_BOOT))
+       if ((early_dbgp_console.index != -1 &&
+            !(early_dbgp_console.flags & CON_BOOT)) ||
+           dbgp_kgdb_mode)
                return 1;
        /* This means the console is not initialized, or should get
         * shutdown so as to allow for reuse of the usb device, which
@@ -982,3 +994,93 @@ int dbgp_reset_prep(void)
        return 0;
 }
 EXPORT_SYMBOL_GPL(dbgp_reset_prep);
+
+#ifdef CONFIG_KGDB
+
+static char kgdbdbgp_buf[DBGP_MAX_PACKET];
+static int kgdbdbgp_buf_sz;
+static int kgdbdbgp_buf_idx;
+static int kgdbdbgp_loop_cnt = DBGP_LOOPS;
+
+static int kgdbdbgp_read_char(void)
+{
+       int ret;
+
+       if (kgdbdbgp_buf_idx < kgdbdbgp_buf_sz) {
+               char ch = kgdbdbgp_buf[kgdbdbgp_buf_idx++];
+               return ch;
+       }
+
+       ret = dbgp_bulk_read(USB_DEBUG_DEVNUM, dbgp_endpoint_in,
+                            &kgdbdbgp_buf, DBGP_MAX_PACKET,
+                            kgdbdbgp_loop_cnt);
+       if (ret <= 0)
+               return NO_POLL_CHAR;
+       kgdbdbgp_buf_sz = ret;
+       kgdbdbgp_buf_idx = 1;
+       return kgdbdbgp_buf[0];
+}
+
+static void kgdbdbgp_write_char(u8 chr)
+{
+       early_dbgp_write(NULL, &chr, 1);
+}
+
+static struct kgdb_io kgdbdbgp_io_ops = {
+       .name = "kgdbdbgp",
+       .read_char = kgdbdbgp_read_char,
+       .write_char = kgdbdbgp_write_char,
+};
+
+static int kgdbdbgp_wait_time;
+
+static int __init kgdbdbgp_parse_config(char *str)
+{
+       char *ptr;
+
+       if (!ehci_debug) {
+               if (early_dbgp_init(str))
+                       return -1;
+       }
+       ptr = strchr(str, ',');
+       if (ptr) {
+               ptr++;
+               kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
+       }
+       kgdb_register_io_module(&kgdbdbgp_io_ops);
+       kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
+
+       return 0;
+}
+early_param("kgdbdbgp", kgdbdbgp_parse_config);
+
+static int kgdbdbgp_reader_thread(void *ptr)
+{
+       int ret;
+
+       while (readl(&ehci_debug->control) & DBGP_ENABLED) {
+               kgdbdbgp_loop_cnt = 1;
+               ret = kgdbdbgp_read_char();
+               kgdbdbgp_loop_cnt = DBGP_LOOPS;
+               if (ret != NO_POLL_CHAR) {
+                       if (ret == 0x3 || ret == '$') {
+                               if (ret == '$')
+                                       kgdbdbgp_buf_idx--;
+                               kgdb_breakpoint();
+                       }
+                       continue;
+               }
+               schedule_timeout_interruptible(kgdbdbgp_wait_time * HZ);
+       }
+       return 0;
+}
+
+static int __init kgdbdbgp_start_thread(void)
+{
+       if (dbgp_kgdb_mode && kgdbdbgp_wait_time)
+               kthread_run(kgdbdbgp_reader_thread, NULL, "%s", "dbgp");
+
+       return 0;
+}
+module_init(kgdbdbgp_start_thread);
+#endif /* CONFIG_KGDB */
index fd55c27..1e6fec4 100644 (file)
@@ -2202,7 +2202,6 @@ config FB_MSM
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
        select FB_CFB_IMAGEBLIT
-       default y
 
 config FB_MX3
        tristate "MX3 Framebuffer support"
index ecf4055..4a56f46 100644 (file)
@@ -168,7 +168,7 @@ static void efifb_destroy(struct fb_info *info)
 {
        if (info->screen_base)
                iounmap(info->screen_base);
-       release_mem_region(info->aperture_base, info->aperture_size);
+       release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
        framebuffer_release(info);
 }
 
@@ -292,8 +292,13 @@ static int __devinit efifb_probe(struct platform_device *dev)
        info->pseudo_palette = info->par;
        info->par = NULL;
 
-       info->aperture_base = efifb_fix.smem_start;
-       info->aperture_size = size_remap;
+       info->apertures = alloc_apertures(1);
+       if (!info->apertures) {
+               err = -ENOMEM;
+               goto err_release_fb;
+       }
+       info->apertures->ranges[0].base = efifb_fix.smem_start;
+       info->apertures->ranges[0].size = size_remap;
 
        info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
        if (!info->screen_base) {
index a15b44e..e08b7b5 100644 (file)
@@ -1468,16 +1468,67 @@ static int fb_check_foreignness(struct fb_info *fi)
        return 0;
 }
 
-static bool fb_do_apertures_overlap(struct fb_info *gen, struct fb_info *hw)
+static bool apertures_overlap(struct aperture *gen, struct aperture *hw)
 {
        /* is the generic aperture base the same as the HW one */
-       if (gen->aperture_base == hw->aperture_base)
+       if (gen->base == hw->base)
                return true;
        /* is the generic aperture base inside the hw base->hw base+size */
-       if (gen->aperture_base > hw->aperture_base && gen->aperture_base <= hw->aperture_base + hw->aperture_size)
+       if (gen->base > hw->base && gen->base <= hw->base + hw->size)
                return true;
        return false;
 }
+
+static bool fb_do_apertures_overlap(struct apertures_struct *gena,
+                                   struct apertures_struct *hwa)
+{
+       int i, j;
+       if (!hwa || !gena)
+               return false;
+
+       for (i = 0; i < hwa->count; ++i) {
+               struct aperture *h = &hwa->ranges[i];
+               for (j = 0; j < gena->count; ++j) {
+                       struct aperture *g = &gena->ranges[j];
+                       printk(KERN_DEBUG "checking generic (%llx %llx) vs hw (%llx %llx)\n",
+                               g->base, g->size, h->base, h->size);
+                       if (apertures_overlap(g, h))
+                               return true;
+               }
+       }
+
+       return false;
+}
+
+#define VGA_FB_PHYS 0xA0000
+void remove_conflicting_framebuffers(struct apertures_struct *a,
+                                    const char *name, bool primary)
+{
+       int i;
+
+       /* check all firmware fbs and kick off if the base addr overlaps */
+       for (i = 0 ; i < FB_MAX; i++) {
+               struct apertures_struct *gen_aper;
+               if (!registered_fb[i])
+                       continue;
+
+               if (!(registered_fb[i]->flags & FBINFO_MISC_FIRMWARE))
+                       continue;
+
+               gen_aper = registered_fb[i]->apertures;
+               if (fb_do_apertures_overlap(gen_aper, a) ||
+                       (primary && gen_aper && gen_aper->count &&
+                        gen_aper->ranges[0].base == VGA_FB_PHYS)) {
+
+                       printk(KERN_ERR "fb: conflicting fb hw usage "
+                              "%s vs %s - removing generic driver\n",
+                              name, registered_fb[i]->fix.id);
+                       unregister_framebuffer(registered_fb[i]);
+               }
+       }
+}
+EXPORT_SYMBOL(remove_conflicting_framebuffers);
+
 /**
  *     register_framebuffer - registers a frame buffer device
  *     @fb_info: frame buffer info structure
@@ -1501,21 +1552,8 @@ register_framebuffer(struct fb_info *fb_info)
        if (fb_check_foreignness(fb_info))
                return -ENOSYS;
 
-       /* check all firmware fbs and kick off if the base addr overlaps */
-       for (i = 0 ; i < FB_MAX; i++) {
-               if (!registered_fb[i])
-                       continue;
-
-               if (registered_fb[i]->flags & FBINFO_MISC_FIRMWARE) {
-                       if (fb_do_apertures_overlap(registered_fb[i], fb_info)) {
-                               printk(KERN_ERR "fb: conflicting fb hw usage "
-                                      "%s vs %s - removing generic driver\n",
-                                      fb_info->fix.id,
-                                      registered_fb[i]->fix.id);
-                               unregister_framebuffer(registered_fb[i]);
-                       }
-               }
-       }
+       remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id,
+                                        fb_is_primary_device(fb_info));
 
        num_registered_fb++;
        for (i = 0 ; i < FB_MAX; i++)
index 81aa312..0a08f13 100644 (file)
@@ -80,6 +80,7 @@ EXPORT_SYMBOL(framebuffer_alloc);
  */
 void framebuffer_release(struct fb_info *info)
 {
+       kfree(info->apertures);
        kfree(info);
 }
 EXPORT_SYMBOL(framebuffer_release);
index 61f8b8f..46dda7d 100644 (file)
@@ -285,7 +285,7 @@ static void offb_destroy(struct fb_info *info)
 {
        if (info->screen_base)
                iounmap(info->screen_base);
-       release_mem_region(info->aperture_base, info->aperture_size);
+       release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
        framebuffer_release(info);
 }
 
@@ -491,8 +491,11 @@ static void __init offb_init_fb(const char *name, const char *full_name,
        var->vmode = FB_VMODE_NONINTERLACED;
 
        /* set offb aperture size for generic probing */
-       info->aperture_base = address;
-       info->aperture_size = fix->smem_len;
+       info->apertures = alloc_apertures(1);
+       if (!info->apertures)
+               goto out_aper;
+       info->apertures->ranges[0].base = address;
+       info->apertures->ranges[0].size = fix->smem_len;
 
        info->fbops = &offb_ops;
        info->screen_base = ioremap(address, fix->smem_len);
@@ -501,17 +504,20 @@ static void __init offb_init_fb(const char *name, const char *full_name,
 
        fb_alloc_cmap(&info->cmap, 256, 0);
 
-       if (register_framebuffer(info) < 0) {
-               iounmap(par->cmap_adr);
-               par->cmap_adr = NULL;
-               iounmap(info->screen_base);
-               framebuffer_release(info);
-               release_mem_region(res_start, res_size);
-               return;
-       }
+       if (register_framebuffer(info) < 0)
+               goto out_err;
 
        printk(KERN_INFO "fb%d: Open Firmware frame buffer device on %s\n",
               info->node, full_name);
+       return;
+
+out_err:
+       iounmap(info->screen_base);
+out_aper:
+       iounmap(par->cmap_adr);
+       par->cmap_adr = NULL;
+       framebuffer_release(info);
+       release_mem_region(res_start, res_size);
 }
 
 
index dfb57ee..881c9f7 100644 (file)
@@ -10,6 +10,7 @@ config PANEL_GENERIC
 config PANEL_SHARP_LS037V7DW01
         tristate "Sharp LS037V7DW01 LCD Panel"
         depends on OMAP2_DSS
+        select BACKLIGHT_CLASS_DEVICE
         help
           LCD Panel used in TI's SDP3430 and EVM boards
 
@@ -33,8 +34,14 @@ config PANEL_TOPPOLY_TDO35S
 
 config PANEL_TPO_TD043MTEA1
         tristate "TPO TD043MTEA1 LCD Panel"
-        depends on OMAP2_DSS && I2C
+        depends on OMAP2_DSS && SPI
         help
           LCD Panel used in OMAP3 Pandora
 
+config PANEL_ACX565AKM
+       tristate "ACX565AKM Panel"
+       depends on OMAP2_DSS_SDI
+       select BACKLIGHT_CLASS_DEVICE
+       help
+         This is the LCD panel used on Nokia N900
 endmenu
index e2bb321..aa38609 100644 (file)
@@ -5,3 +5,4 @@ obj-$(CONFIG_PANEL_SHARP_LQ043T1DG01) += panel-sharp-lq043t1dg01.o
 obj-$(CONFIG_PANEL_TAAL) += panel-taal.o
 obj-$(CONFIG_PANEL_TOPPOLY_TDO35S) += panel-toppoly-tdo35s.o
 obj-$(CONFIG_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
+obj-$(CONFIG_PANEL_ACX565AKM) += panel-acx565akm.o
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
new file mode 100644 (file)
index 0000000..1f8eb70
--- /dev/null
@@ -0,0 +1,819 @@
+/*
+ * Support for ACX565AKM LCD Panel used on Nokia N900
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Original Driver Author: Imre Deak <imre.deak@nokia.com>
+ * Based on panel-generic.c by Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Adapted to new DSS2 framework: Roger Quadros <roger.quadros@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
+
+#include <plat/display.h>
+
+#define MIPID_CMD_READ_DISP_ID         0x04
+#define MIPID_CMD_READ_RED             0x06
+#define MIPID_CMD_READ_GREEN           0x07
+#define MIPID_CMD_READ_BLUE            0x08
+#define MIPID_CMD_READ_DISP_STATUS     0x09
+#define MIPID_CMD_RDDSDR               0x0F
+#define MIPID_CMD_SLEEP_IN             0x10
+#define MIPID_CMD_SLEEP_OUT            0x11
+#define MIPID_CMD_DISP_OFF             0x28
+#define MIPID_CMD_DISP_ON              0x29
+#define MIPID_CMD_WRITE_DISP_BRIGHTNESS        0x51
+#define MIPID_CMD_READ_DISP_BRIGHTNESS 0x52
+#define MIPID_CMD_WRITE_CTRL_DISP      0x53
+
+#define CTRL_DISP_BRIGHTNESS_CTRL_ON   (1 << 5)
+#define CTRL_DISP_AMBIENT_LIGHT_CTRL_ON        (1 << 4)
+#define CTRL_DISP_BACKLIGHT_ON         (1 << 2)
+#define CTRL_DISP_AUTO_BRIGHTNESS_ON   (1 << 1)
+
+#define MIPID_CMD_READ_CTRL_DISP       0x54
+#define MIPID_CMD_WRITE_CABC           0x55
+#define MIPID_CMD_READ_CABC            0x56
+
+#define MIPID_VER_LPH8923              3
+#define MIPID_VER_LS041Y3              4
+#define MIPID_VER_L4F00311             8
+#define MIPID_VER_ACX565AKM            9
+
+struct acx565akm_device {
+       char            *name;
+       int             enabled;
+       int             model;
+       int             revision;
+       u8              display_id[3];
+       unsigned        has_bc:1;
+       unsigned        has_cabc:1;
+       unsigned        cabc_mode;
+       unsigned long   hw_guard_end;           /* next value of jiffies
+                                                  when we can issue the
+                                                  next sleep in/out command */
+       unsigned long   hw_guard_wait;          /* max guard time in jiffies */
+
+       struct spi_device       *spi;
+       struct mutex            mutex;
+
+       struct omap_dss_device  *dssdev;
+       struct backlight_device *bl_dev;
+};
+
+static struct acx565akm_device acx_dev;
+static int acx565akm_bl_update_status(struct backlight_device *dev);
+
+/*--------------------MIPID interface-----------------------------*/
+
+static void acx565akm_transfer(struct acx565akm_device *md, int cmd,
+                             const u8 *wbuf, int wlen, u8 *rbuf, int rlen)
+{
+       struct spi_message      m;
+       struct spi_transfer     *x, xfer[5];
+       int                     r;
+
+       BUG_ON(md->spi == NULL);
+
+       spi_message_init(&m);
+
+       memset(xfer, 0, sizeof(xfer));
+       x = &xfer[0];
+
+       cmd &=  0xff;
+       x->tx_buf = &cmd;
+       x->bits_per_word = 9;
+       x->len = 2;
+
+       if (rlen > 1 && wlen == 0) {
+               /*
+                * Between the command and the response data there is a
+                * dummy clock cycle. Add an extra bit after the command
+                * word to account for this.
+                */
+               x->bits_per_word = 10;
+               cmd <<= 1;
+       }
+       spi_message_add_tail(x, &m);
+
+       if (wlen) {
+               x++;
+               x->tx_buf = wbuf;
+               x->len = wlen;
+               x->bits_per_word = 9;
+               spi_message_add_tail(x, &m);
+       }
+
+       if (rlen) {
+               x++;
+               x->rx_buf       = rbuf;
+               x->len          = rlen;
+               spi_message_add_tail(x, &m);
+       }
+
+       r = spi_sync(md->spi, &m);
+       if (r < 0)
+               dev_dbg(&md->spi->dev, "spi_sync %d\n", r);
+}
+
+static inline void acx565akm_cmd(struct acx565akm_device *md, int cmd)
+{
+       acx565akm_transfer(md, cmd, NULL, 0, NULL, 0);
+}
+
+static inline void acx565akm_write(struct acx565akm_device *md,
+                              int reg, const u8 *buf, int len)
+{
+       acx565akm_transfer(md, reg, buf, len, NULL, 0);
+}
+
+static inline void acx565akm_read(struct acx565akm_device *md,
+                             int reg, u8 *buf, int len)
+{
+       acx565akm_transfer(md, reg, NULL, 0, buf, len);
+}
+
+static void hw_guard_start(struct acx565akm_device *md, int guard_msec)
+{
+       md->hw_guard_wait = msecs_to_jiffies(guard_msec);
+       md->hw_guard_end = jiffies + md->hw_guard_wait;
+}
+
+static void hw_guard_wait(struct acx565akm_device *md)
+{
+       unsigned long wait = md->hw_guard_end - jiffies;
+
+       if ((long)wait > 0 && wait <= md->hw_guard_wait) {
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               schedule_timeout(wait);
+       }
+}
+
+/*----------------------MIPID wrappers----------------------------*/
+
+static void set_sleep_mode(struct acx565akm_device *md, int on)
+{
+       int cmd;
+
+       if (on)
+               cmd = MIPID_CMD_SLEEP_IN;
+       else
+               cmd = MIPID_CMD_SLEEP_OUT;
+       /*
+        * We have to keep 120msec between sleep in/out commands.
+        * (8.2.15, 8.2.16).
+        */
+       hw_guard_wait(md);
+       acx565akm_cmd(md, cmd);
+       hw_guard_start(md, 120);
+}
+
+static void set_display_state(struct acx565akm_device *md, int enabled)
+{
+       int cmd = enabled ? MIPID_CMD_DISP_ON : MIPID_CMD_DISP_OFF;
+
+       acx565akm_cmd(md, cmd);
+}
+
+static int panel_enabled(struct acx565akm_device *md)
+{
+       u32 disp_status;
+       int enabled;
+
+       acx565akm_read(md, MIPID_CMD_READ_DISP_STATUS, (u8 *)&disp_status, 4);
+       disp_status = __be32_to_cpu(disp_status);
+       enabled = (disp_status & (1 << 17)) && (disp_status & (1 << 10));
+       dev_dbg(&md->spi->dev,
+               "LCD panel %senabled by bootloader (status 0x%04x)\n",
+               enabled ? "" : "not ", disp_status);
+       return enabled;
+}
+
+static int panel_detect(struct acx565akm_device *md)
+{
+       acx565akm_read(md, MIPID_CMD_READ_DISP_ID, md->display_id, 3);
+       dev_dbg(&md->spi->dev, "MIPI display ID: %02x%02x%02x\n",
+               md->display_id[0], md->display_id[1], md->display_id[2]);
+
+       switch (md->display_id[0]) {
+       case 0x10:
+               md->model = MIPID_VER_ACX565AKM;
+               md->name = "acx565akm";
+               md->has_bc = 1;
+               md->has_cabc = 1;
+               break;
+       case 0x29:
+               md->model = MIPID_VER_L4F00311;
+               md->name = "l4f00311";
+               break;
+       case 0x45:
+               md->model = MIPID_VER_LPH8923;
+               md->name = "lph8923";
+               break;
+       case 0x83:
+               md->model = MIPID_VER_LS041Y3;
+               md->name = "ls041y3";
+               break;
+       default:
+               md->name = "unknown";
+               dev_err(&md->spi->dev, "invalid display ID\n");
+               return -ENODEV;
+       }
+
+       md->revision = md->display_id[1];
+
+       dev_info(&md->spi->dev, "omapfb: %s rev %02x LCD detected\n",
+                       md->name, md->revision);
+
+       return 0;
+}
+
+/*----------------------Backlight Control-------------------------*/
+
+static void enable_backlight_ctrl(struct acx565akm_device *md, int enable)
+{
+       u16 ctrl;
+
+       acx565akm_read(md, MIPID_CMD_READ_CTRL_DISP, (u8 *)&ctrl, 1);
+       if (enable) {
+               ctrl |= CTRL_DISP_BRIGHTNESS_CTRL_ON |
+                       CTRL_DISP_BACKLIGHT_ON;
+       } else {
+               ctrl &= ~(CTRL_DISP_BRIGHTNESS_CTRL_ON |
+                         CTRL_DISP_BACKLIGHT_ON);
+       }
+
+       ctrl |= 1 << 8;
+       acx565akm_write(md, MIPID_CMD_WRITE_CTRL_DISP, (u8 *)&ctrl, 2);
+}
+
+static void set_cabc_mode(struct acx565akm_device *md, unsigned mode)
+{
+       u16 cabc_ctrl;
+
+       md->cabc_mode = mode;
+       if (!md->enabled)
+               return;
+       cabc_ctrl = 0;
+       acx565akm_read(md, MIPID_CMD_READ_CABC, (u8 *)&cabc_ctrl, 1);
+       cabc_ctrl &= ~3;
+       cabc_ctrl |= (1 << 8) | (mode & 3);
+       acx565akm_write(md, MIPID_CMD_WRITE_CABC, (u8 *)&cabc_ctrl, 2);
+}
+
+static unsigned get_cabc_mode(struct acx565akm_device *md)
+{
+       return md->cabc_mode;
+}
+
+static unsigned get_hw_cabc_mode(struct acx565akm_device *md)
+{
+       u8 cabc_ctrl;
+
+       acx565akm_read(md, MIPID_CMD_READ_CABC, &cabc_ctrl, 1);
+       return cabc_ctrl & 3;
+}
+
+static void acx565akm_set_brightness(struct acx565akm_device *md, int level)
+{
+       int bv;
+
+       bv = level | (1 << 8);
+       acx565akm_write(md, MIPID_CMD_WRITE_DISP_BRIGHTNESS, (u8 *)&bv, 2);
+
+       if (level)
+               enable_backlight_ctrl(md, 1);
+       else
+               enable_backlight_ctrl(md, 0);
+}
+
+static int acx565akm_get_actual_brightness(struct acx565akm_device *md)
+{
+       u8 bv;
+
+       acx565akm_read(md, MIPID_CMD_READ_DISP_BRIGHTNESS, &bv, 1);
+
+       return bv;
+}
+
+
+static int acx565akm_bl_update_status(struct backlight_device *dev)
+{
+       struct acx565akm_device *md = dev_get_drvdata(&dev->dev);
+       int r;
+       int level;
+
+       dev_dbg(&md->spi->dev, "%s\n", __func__);
+
+       mutex_lock(&md->mutex);
+
+       if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+                       dev->props.power == FB_BLANK_UNBLANK)
+               level = dev->props.brightness;
+       else
+               level = 0;
+
+       r = 0;
+       if (md->has_bc)
+               acx565akm_set_brightness(md, level);
+       else if (md->dssdev->set_backlight)
+               r = md->dssdev->set_backlight(md->dssdev, level);
+       else
+               r = -ENODEV;
+
+       mutex_unlock(&md->mutex);
+
+       return r;
+}
+
+static int acx565akm_bl_get_intensity(struct backlight_device *dev)
+{
+       struct acx565akm_device *md = dev_get_drvdata(&dev->dev);
+
+       dev_dbg(&dev->dev, "%s\n", __func__);
+
+       if (!md->has_bc && md->dssdev->set_backlight == NULL)
+               return -ENODEV;
+
+       if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+                       dev->props.power == FB_BLANK_UNBLANK) {
+               if (md->has_bc)
+                       return acx565akm_get_actual_brightness(md);
+               else
+                       return dev->props.brightness;
+       }
+
+       return 0;
+}
+
+static const struct backlight_ops acx565akm_bl_ops = {
+       .get_brightness = acx565akm_bl_get_intensity,
+       .update_status  = acx565akm_bl_update_status,
+};
+
+/*--------------------Auto Brightness control via Sysfs---------------------*/
+
+static const char *cabc_modes[] = {
+       "off",          /* always used when CABC is not supported */
+       "ui",
+       "still-image",
+       "moving-image",
+};
+
+static ssize_t show_cabc_mode(struct device *dev,
+               struct device_attribute *attr,
+               char *buf)
+{
+       struct acx565akm_device *md = dev_get_drvdata(dev);
+       const char *mode_str;
+       int mode;
+       int len;
+
+       if (!md->has_cabc)
+               mode = 0;
+       else
+               mode = get_cabc_mode(md);
+       mode_str = "unknown";
+       if (mode >= 0 && mode < ARRAY_SIZE(cabc_modes))
+               mode_str = cabc_modes[mode];
+       len = snprintf(buf, PAGE_SIZE, "%s\n", mode_str);
+
+       return len < PAGE_SIZE - 1 ? len : PAGE_SIZE - 1;
+}
+
+static ssize_t store_cabc_mode(struct device *dev,
+               struct device_attribute *attr,
+               const char *buf, size_t count)
+{
+       struct acx565akm_device *md = dev_get_drvdata(dev);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) {
+               const char *mode_str = cabc_modes[i];
+               int cmp_len = strlen(mode_str);
+
+               if (count > 0 && buf[count - 1] == '\n')
+                       count--;
+               if (count != cmp_len)
+                       continue;
+
+               if (strncmp(buf, mode_str, cmp_len) == 0)
+                       break;
+       }
+
+       if (i == ARRAY_SIZE(cabc_modes))
+               return -EINVAL;
+
+       if (!md->has_cabc && i != 0)
+               return -EINVAL;
+
+       mutex_lock(&md->mutex);
+       set_cabc_mode(md, i);
+       mutex_unlock(&md->mutex);
+
+       return count;
+}
+
+static ssize_t show_cabc_available_modes(struct device *dev,
+               struct device_attribute *attr,
+               char *buf)
+{
+       struct acx565akm_device *md = dev_get_drvdata(dev);
+       int len;
+       int i;
+
+       if (!md->has_cabc)
+               return snprintf(buf, PAGE_SIZE, "%s\n", cabc_modes[0]);
+
+       for (i = 0, len = 0;
+            len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++)
+               len += snprintf(&buf[len], PAGE_SIZE - len, "%s%s%s",
+                       i ? " " : "", cabc_modes[i],
+                       i == ARRAY_SIZE(cabc_modes) - 1 ? "\n" : "");
+
+       return len < PAGE_SIZE ? len : PAGE_SIZE - 1;
+}
+
+static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR,
+               show_cabc_mode, store_cabc_mode);
+static DEVICE_ATTR(cabc_available_modes, S_IRUGO,
+               show_cabc_available_modes, NULL);
+
+static struct attribute *bldev_attrs[] = {
+       &dev_attr_cabc_mode.attr,
+       &dev_attr_cabc_available_modes.attr,
+       NULL,
+};
+
+static struct attribute_group bldev_attr_group = {
+       .attrs = bldev_attrs,
+};
+
+
+/*---------------------------ACX Panel----------------------------*/
+
+static int acx_get_recommended_bpp(struct omap_dss_device *dssdev)
+{
+       return 16;
+}
+
+static struct omap_video_timings acx_panel_timings = {
+       .x_res          = 800,
+       .y_res          = 480,
+       .pixel_clock    = 24000,
+       .hfp            = 28,
+       .hsw            = 4,
+       .hbp            = 24,
+       .vfp            = 3,
+       .vsw            = 3,
+       .vbp            = 4,
+};
+
+static int acx_panel_probe(struct omap_dss_device *dssdev)
+{
+       int r;
+       struct acx565akm_device *md = &acx_dev;
+       struct backlight_device *bldev;
+       int max_brightness, brightness;
+       struct backlight_properties props;
+
+       dev_dbg(&dssdev->dev, "%s\n", __func__);
+       dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+                                       OMAP_DSS_LCD_IHS;
+       /* FIXME AC bias ? */
+       dssdev->panel.timings = acx_panel_timings;
+
+       if (dssdev->platform_enable)
+               dssdev->platform_enable(dssdev);
+       /*
+        * After reset we have to wait 5 msec before the first
+        * command can be sent.
+        */
+       msleep(5);
+
+       md->enabled = panel_enabled(md);
+
+       r = panel_detect(md);
+       if (r) {
+               dev_err(&dssdev->dev, "%s panel detect error\n", __func__);
+               if (!md->enabled && dssdev->platform_disable)
+                       dssdev->platform_disable(dssdev);
+               return r;
+       }
+
+       mutex_lock(&acx_dev.mutex);
+       acx_dev.dssdev = dssdev;
+       mutex_unlock(&acx_dev.mutex);
+
+       if (!md->enabled) {
+               if (dssdev->platform_disable)
+                       dssdev->platform_disable(dssdev);
+       }
+
+       /*------- Backlight control --------*/
+
+       props.fb_blank = FB_BLANK_UNBLANK;
+       props.power = FB_BLANK_UNBLANK;
+
+       bldev = backlight_device_register("acx565akm", &md->spi->dev,
+                       md, &acx565akm_bl_ops, &props);
+       md->bl_dev = bldev;
+       if (md->has_cabc) {
+               r = sysfs_create_group(&bldev->dev.kobj, &bldev_attr_group);
+               if (r) {
+                       dev_err(&bldev->dev,
+                               "%s failed to create sysfs files\n", __func__);
+                       backlight_device_unregister(bldev);
+                       return r;
+               }
+               md->cabc_mode = get_hw_cabc_mode(md);
+       }
+
+       if (md->has_bc)
+               max_brightness = 255;
+       else
+               max_brightness = dssdev->max_backlight_level;
+
+       if (md->has_bc)
+               brightness = acx565akm_get_actual_brightness(md);
+       else if (dssdev->get_backlight)
+               brightness = dssdev->get_backlight(dssdev);
+       else
+               brightness = 0;
+
+       bldev->props.max_brightness = max_brightness;
+       bldev->props.brightness = brightness;
+
+       acx565akm_bl_update_status(bldev);
+       return 0;
+}
+
+static void acx_panel_remove(struct omap_dss_device *dssdev)
+{
+       struct acx565akm_device *md = &acx_dev;
+
+       dev_dbg(&dssdev->dev, "%s\n", __func__);
+       sysfs_remove_group(&md->bl_dev->dev.kobj, &bldev_attr_group);
+       backlight_device_unregister(md->bl_dev);
+       mutex_lock(&acx_dev.mutex);
+       acx_dev.dssdev = NULL;
+       mutex_unlock(&acx_dev.mutex);
+}
+
+static int acx_panel_power_on(struct omap_dss_device *dssdev)
+{
+       struct acx565akm_device *md = &acx_dev;
+       int r;
+
+       dev_dbg(&dssdev->dev, "%s\n", __func__);
+
+       mutex_lock(&md->mutex);
+
+       r = omapdss_sdi_display_enable(dssdev);
+       if (r) {
+               pr_err("%s sdi enable failed\n", __func__);
+               return r;
+       }
+
+       /*FIXME tweak me */
+       msleep(50);
+
+       if (dssdev->platform_enable) {
+               r = dssdev->platform_enable(dssdev);
+               if (r)
+                       goto fail;
+       }
+
+       if (md->enabled) {
+               dev_dbg(&md->spi->dev, "panel already enabled\n");
+               mutex_unlock(&md->mutex);
+               return 0;
+       }
+
+       /*
+        * We have to meet all the following delay requirements:
+        * 1. tRW: reset pulse width 10usec (7.12.1)
+        * 2. tRT: reset cancel time 5msec (7.12.1)
+        * 3. Providing PCLK,HS,VS signals for 2 frames = ~50msec worst
+        *    case (7.6.2)
+        * 4. 120msec before the sleep out command (7.12.1)
+        */
+       msleep(120);
+
+       set_sleep_mode(md, 0);
+       md->enabled = 1;
+
+       /* 5msec between sleep out and the next command. (8.2.16) */
+       msleep(5);
+       set_display_state(md, 1);
+       set_cabc_mode(md, md->cabc_mode);
+
+       mutex_unlock(&md->mutex);
+
+       return acx565akm_bl_update_status(md->bl_dev);
+fail:
+       omapdss_sdi_display_disable(dssdev);
+       return r;
+}
+
+static void acx_panel_power_off(struct omap_dss_device *dssdev)
+{
+       struct acx565akm_device *md = &acx_dev;
+
+       dev_dbg(&dssdev->dev, "%s\n", __func__);
+
+       mutex_lock(&md->mutex);
+
+       if (!md->enabled) {
+               mutex_unlock(&md->mutex);
+               return;
+       }
+       set_display_state(md, 0);
+       set_sleep_mode(md, 1);
+       md->enabled = 0;
+       /*
+        * We have to provide PCLK,HS,VS signals for 2 frames (worst case
+        * ~50msec) after sending the sleep in command and asserting the
+        * reset signal. We probably could assert the reset w/o the delay
+        * but we still delay to avoid possible artifacts. (7.6.1)
+        */
+       msleep(50);
+
+       if (dssdev->platform_disable)
+               dssdev->platform_disable(dssdev);
+
+       /* FIXME need to tweak this delay */
+       msleep(100);
+
+       omapdss_sdi_display_disable(dssdev);
+
+       mutex_unlock(&md->mutex);
+}
+
+static int acx_panel_enable(struct omap_dss_device *dssdev)
+{
+       int r;
+
+       dev_dbg(&dssdev->dev, "%s\n", __func__);
+       r = acx_panel_power_on(dssdev);
+
+       if (r)
+               return r;
+
+       dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+       return 0;
+}
+
+static void acx_panel_disable(struct omap_dss_device *dssdev)
+{
+       dev_dbg(&dssdev->dev, "%s\n", __func__);
+       acx_panel_power_off(dssdev);
+       dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+static int acx_panel_suspend(struct omap_dss_device *dssdev)
+{
+       dev_dbg(&dssdev->dev, "%s\n", __func__);
+       acx_panel_power_off(dssdev);
+       dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+       return 0;
+}
+
+static int acx_panel_resume(struct omap_dss_device *dssdev)
+{
+       int r;
+
+       dev_dbg(&dssdev->dev, "%s\n", __func__);
+       r = acx_panel_power_on(dssdev);
+       if (r)
+               return r;
+
+       dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+       return 0;
+}
+
+static void acx_panel_set_timings(struct omap_dss_device *dssdev,
+               struct omap_video_timings *timings)
+{
+       int r;
+
+       if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+               omapdss_sdi_display_disable(dssdev);
+
+       dssdev->panel.timings = *timings;
+
+       if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+               r = omapdss_sdi_display_enable(dssdev);
+               if (r)
+                       dev_err(&dssdev->dev, "%s enable failed\n", __func__);
+       }
+}
+
+static void acx_panel_get_timings(struct omap_dss_device *dssdev,
+               struct omap_video_timings *timings)
+{
+       *timings = dssdev->panel.timings;
+}
+
+static int acx_panel_check_timings(struct omap_dss_device *dssdev,
+               struct omap_video_timings *timings)
+{
+       return 0;
+}
+
+
+static struct omap_dss_driver acx_panel_driver = {
+       .probe          = acx_panel_probe,
+       .remove         = acx_panel_remove,
+
+       .enable         = acx_panel_enable,
+       .disable        = acx_panel_disable,
+       .suspend        = acx_panel_suspend,
+       .resume         = acx_panel_resume,
+
+       .set_timings    = acx_panel_set_timings,
+       .get_timings    = acx_panel_get_timings,
+       .check_timings  = acx_panel_check_timings,
+
+       .get_recommended_bpp = acx_get_recommended_bpp,
+
+       .driver         = {
+               .name   = "panel-acx565akm",
+               .owner  = THIS_MODULE,
+       },
+};
+
+/*--------------------SPI probe-------------------------*/
+
+static int acx565akm_spi_probe(struct spi_device *spi)
+{
+       struct acx565akm_device *md = &acx_dev;
+
+       dev_dbg(&spi->dev, "%s\n", __func__);
+
+       spi->mode = SPI_MODE_3;
+       md->spi = spi;
+       mutex_init(&md->mutex);
+       dev_set_drvdata(&spi->dev, md);
+
+       omap_dss_register_driver(&acx_panel_driver);
+
+       return 0;
+}
+
+static int acx565akm_spi_remove(struct spi_device *spi)
+{
+       struct acx565akm_device *md = dev_get_drvdata(&spi->dev);
+
+       dev_dbg(&md->spi->dev, "%s\n", __func__);
+       omap_dss_unregister_driver(&acx_panel_driver);
+
+       return 0;
+}
+
+static struct spi_driver acx565akm_spi_driver = {
+       .driver = {
+               .name   = "acx565akm",
+               .bus    = &spi_bus_type,
+               .owner  = THIS_MODULE,
+       },
+       .probe  = acx565akm_spi_probe,
+       .remove = __devexit_p(acx565akm_spi_remove),
+};
+
+static int __init acx565akm_init(void)
+{
+       return spi_register_driver(&acx565akm_spi_driver);
+}
+
+static void __exit acx565akm_exit(void)
+{
+       spi_unregister_driver(&acx565akm_spi_driver);
+}
+
+module_init(acx565akm_init);
+module_exit(acx565akm_exit);
+
+MODULE_AUTHOR("Nokia Corporation");
+MODULE_DESCRIPTION("acx565akm LCD Driver");
+MODULE_LICENSE("GPL");
index 8d51a5e..7d9eb2b 100644 (file)
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/device.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
 #include <linux/err.h>
+#include <linux/slab.h>
 
 #include <plat/display.h>
 
+struct sharp_data {
+       struct backlight_device *bl;
+};
+
 static struct omap_video_timings sharp_ls_timings = {
        .x_res = 480,
        .y_res = 640,
@@ -39,18 +46,89 @@ static struct omap_video_timings sharp_ls_timings = {
        .vbp            = 1,
 };
 
+static int sharp_ls_bl_update_status(struct backlight_device *bl)
+{
+       struct omap_dss_device *dssdev = dev_get_drvdata(&bl->dev);
+       int level;
+
+       if (!dssdev->set_backlight)
+               return -EINVAL;
+
+       if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
+                       bl->props.power == FB_BLANK_UNBLANK)
+               level = bl->props.brightness;
+       else
+               level = 0;
+
+       return dssdev->set_backlight(dssdev, level);
+}
+
+static int sharp_ls_bl_get_brightness(struct backlight_device *bl)
+{
+       if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
+                       bl->props.power == FB_BLANK_UNBLANK)
+               return bl->props.brightness;
+
+       return 0;
+}
+
+static const struct backlight_ops sharp_ls_bl_ops = {
+       .get_brightness = sharp_ls_bl_get_brightness,
+       .update_status  = sharp_ls_bl_update_status,
+};
+
+
+
 static int sharp_ls_panel_probe(struct omap_dss_device *dssdev)
 {
+       struct backlight_properties props;
+       struct backlight_device *bl;
+       struct sharp_data *sd;
+       int r;
+
        dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
                OMAP_DSS_LCD_IHS;
        dssdev->panel.acb = 0x28;
        dssdev->panel.timings = sharp_ls_timings;
 
+       sd = kzalloc(sizeof(*sd), GFP_KERNEL);
+       if (!sd)
+               return -ENOMEM;
+
+       dev_set_drvdata(&dssdev->dev, sd);
+
+       memset(&props, 0, sizeof(struct backlight_properties));
+       props.max_brightness = dssdev->max_backlight_level;
+
+       bl = backlight_device_register("sharp-ls", &dssdev->dev, dssdev,
+                       &sharp_ls_bl_ops, &props);
+       if (IS_ERR(bl)) {
+               r = PTR_ERR(bl);
+               kfree(sd);
+               return r;
+       }
+       sd->bl = bl;
+
+       bl->props.fb_blank = FB_BLANK_UNBLANK;
+       bl->props.power = FB_BLANK_UNBLANK;
+       bl->props.brightness = dssdev->max_backlight_level;
+       r = sharp_ls_bl_update_status(bl);
+       if (r < 0)
+               dev_err(&dssdev->dev, "failed to set lcd brightness\n");
+
        return 0;
 }
 
 static void sharp_ls_panel_remove(struct omap_dss_device *dssdev)
 {
+       struct sharp_data *sd = dev_get_drvdata(&dssdev->dev);
+       struct backlight_device *bl = sd->bl;
+
+       bl->props.power = FB_BLANK_POWERDOWN;
+       sharp_ls_bl_update_status(bl);
+       backlight_device_unregister(bl);
+
+       kfree(sd);
 }
 
 static int sharp_ls_power_on(struct omap_dss_device *dssdev)
index 4f3988a..aaf5d30 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/completion.h>
 #include <linux/workqueue.h>
 #include <linux/slab.h>
+#include <linux/mutex.h>
 
 #include <plat/display.h>
 
@@ -67,6 +68,8 @@
 static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable);
 
 struct taal_data {
+       struct mutex lock;
+
        struct backlight_device *bldev;
 
        unsigned long   hw_guard_end;   /* next value of jiffies when we can
@@ -510,6 +513,8 @@ static int taal_probe(struct omap_dss_device *dssdev)
        }
        td->dssdev = dssdev;
 
+       mutex_init(&td->lock);
+
        td->esd_wq = create_singlethread_workqueue("taal_esd");
        if (td->esd_wq == NULL) {
                dev_err(&dssdev->dev, "can't create ESD workqueue\n");
@@ -697,10 +702,9 @@ static int taal_power_on(struct omap_dss_device *dssdev)
 
        return 0;
 err:
-       dsi_bus_unlock();
-
        omapdss_dsi_display_disable(dssdev);
 err0:
+       dsi_bus_unlock();
        if (dssdev->platform_disable)
                dssdev->platform_disable(dssdev);
 
@@ -733,54 +737,96 @@ static void taal_power_off(struct omap_dss_device *dssdev)
 
 static int taal_enable(struct omap_dss_device *dssdev)
 {
+       struct taal_data *td = dev_get_drvdata(&dssdev->dev);
        int r;
+
        dev_dbg(&dssdev->dev, "enable\n");
 
-       if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)
-               return -EINVAL;
+       mutex_lock(&td->lock);
+
+       if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
+               r = -EINVAL;
+               goto err;
+       }
 
        r = taal_power_on(dssdev);
        if (r)
-               return r;
+               goto err;
 
        dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
 
+       mutex_unlock(&td->lock);
+
+       return 0;
+err:
+       dev_dbg(&dssdev->dev, "enable failed\n");
+       mutex_unlock(&td->lock);
        return r;
 }
 
 static void taal_disable(struct omap_dss_device *dssdev)
 {
+       struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+
        dev_dbg(&dssdev->dev, "disable\n");
 
+       mutex_lock(&td->lock);
+
        if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
                taal_power_off(dssdev);
 
        dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+
+       mutex_unlock(&td->lock);
 }
 
 static int taal_suspend(struct omap_dss_device *dssdev)
 {
+       struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+       int r;
+
        dev_dbg(&dssdev->dev, "suspend\n");
 
-       if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
-               return -EINVAL;
+       mutex_lock(&td->lock);
+
+       if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
+               r = -EINVAL;
+               goto err;
+       }
 
        taal_power_off(dssdev);
        dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
 
+       mutex_unlock(&td->lock);
+
        return 0;
+err:
+       mutex_unlock(&td->lock);
+       return r;
 }
 
 static int taal_resume(struct omap_dss_device *dssdev)
 {
+       struct taal_data *td = dev_get_drvdata(&dssdev->dev);
        int r;
+
        dev_dbg(&dssdev->dev, "resume\n");
 
-       if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED)
-               return -EINVAL;
+       mutex_lock(&td->lock);
+
+       if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
+               r = -EINVAL;
+               goto err;
+       }
 
        r = taal_power_on(dssdev);
        dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+       mutex_unlock(&td->lock);
+
+       return r;
+err:
+       mutex_unlock(&td->lock);
        return r;
 }
 
@@ -799,6 +845,7 @@ static int taal_update(struct omap_dss_device *dssdev,
 
        dev_dbg(&dssdev->dev, "update %d, %d, %d x %d\n", x, y, w, h);
 
+       mutex_lock(&td->lock);
        dsi_bus_lock();
 
        if (!td->enabled) {
@@ -820,18 +867,24 @@ static int taal_update(struct omap_dss_device *dssdev,
                goto err;
 
        /* note: no bus_unlock here. unlock is in framedone_cb */
+       mutex_unlock(&td->lock);
        return 0;
 err:
        dsi_bus_unlock();
+       mutex_unlock(&td->lock);
        return r;
 }
 
 static int taal_sync(struct omap_dss_device *dssdev)
 {
+       struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+
        dev_dbg(&dssdev->dev, "sync\n");
 
+       mutex_lock(&td->lock);
        dsi_bus_lock();
        dsi_bus_unlock();
+       mutex_unlock(&td->lock);
 
        dev_dbg(&dssdev->dev, "sync done\n");
 
@@ -861,13 +914,16 @@ static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable)
 
 static int taal_enable_te(struct omap_dss_device *dssdev, bool enable)
 {
+       struct taal_data *td = dev_get_drvdata(&dssdev->dev);
        int r;
 
+       mutex_lock(&td->lock);
        dsi_bus_lock();
 
        r = _taal_enable_te(dssdev, enable);
 
        dsi_bus_unlock();
+       mutex_unlock(&td->lock);
 
        return r;
 }
@@ -875,7 +931,13 @@ static int taal_enable_te(struct omap_dss_device *dssdev, bool enable)
 static int taal_get_te(struct omap_dss_device *dssdev)
 {
        struct taal_data *td = dev_get_drvdata(&dssdev->dev);
-       return td->te_enabled;
+       int r;
+
+       mutex_lock(&td->lock);
+       r = td->te_enabled;
+       mutex_unlock(&td->lock);
+
+       return r;
 }
 
 static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
@@ -885,6 +947,7 @@ static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
 
        dev_dbg(&dssdev->dev, "rotate %d\n", rotate);
 
+       mutex_lock(&td->lock);
        dsi_bus_lock();
 
        if (td->enabled) {
@@ -896,16 +959,24 @@ static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
        td->rotate = rotate;
 
        dsi_bus_unlock();
+       mutex_unlock(&td->lock);
        return 0;
 err:
        dsi_bus_unlock();
+       mutex_unlock(&td->lock);
        return r;
 }
 
 static u8 taal_get_rotate(struct omap_dss_device *dssdev)
 {
        struct taal_data *td = dev_get_drvdata(&dssdev->dev);
-       return td->rotate;
+       int r;
+
+       mutex_lock(&td->lock);
+       r = td->rotate;
+       mutex_unlock(&td->lock);
+
+       return r;
 }
 
 static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
@@ -915,6 +986,7 @@ static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
 
        dev_dbg(&dssdev->dev, "mirror %d\n", enable);
 
+       mutex_lock(&td->lock);
        dsi_bus_lock();
        if (td->enabled) {
                r = taal_set_addr_mode(td->rotate, enable);
@@ -925,23 +997,33 @@ static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
        td->mirror = enable;
 
        dsi_bus_unlock();
+       mutex_unlock(&td->lock);
        return 0;
 err:
        dsi_bus_unlock();
+       mutex_unlock(&td->lock);
        return r;
 }
 
 static bool taal_get_mirror(struct omap_dss_device *dssdev)
 {
        struct taal_data *td = dev_get_drvdata(&dssdev->dev);
-       return td->mirror;
+       int r;
+
+       mutex_lock(&td->lock);
+       r = td->mirror;
+       mutex_unlock(&td->lock);
+
+       return r;
 }
 
 static int taal_run_test(struct omap_dss_device *dssdev, int test_num)
 {
+       struct taal_data *td = dev_get_drvdata(&dssdev->dev);
        u8 id1, id2, id3;
        int r;
 
+       mutex_lock(&td->lock);
        dsi_bus_lock();
 
        r = taal_dcs_read_1(DCS_GET_ID1, &id1);
@@ -955,9 +1037,11 @@ static int taal_run_test(struct omap_dss_device *dssdev, int test_num)
                goto err;
 
        dsi_bus_unlock();
+       mutex_unlock(&td->lock);
        return 0;
 err:
        dsi_bus_unlock();
+       mutex_unlock(&td->lock);
        return r;
 }
 
@@ -971,12 +1055,16 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
        unsigned buf_used = 0;
        struct taal_data *td = dev_get_drvdata(&dssdev->dev);
 
-       if (!td->enabled)
-               return -ENODEV;
-
        if (size < w * h * 3)
                return -ENOMEM;
 
+       mutex_lock(&td->lock);
+
+       if (!td->enabled) {
+               r = -ENODEV;
+               goto err1;
+       }
+
        size = min(w * h * 3,
                        dssdev->panel.timings.x_res *
                        dssdev->panel.timings.y_res * 3);
@@ -995,7 +1083,7 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
 
        r = dsi_vc_set_max_rx_packet_size(TCH, plen);
        if (r)
-               goto err0;
+               goto err2;
 
        while (buf_used < size) {
                u8 dcs_cmd = first ? 0x2e : 0x3e;
@@ -1006,7 +1094,7 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
 
                if (r < 0) {
                        dev_err(&dssdev->dev, "read error\n");
-                       goto err;
+                       goto err3;
                }
 
                buf_used += r;
@@ -1020,16 +1108,18 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
                        dev_err(&dssdev->dev, "signal pending, "
                                        "aborting memory read\n");
                        r = -ERESTARTSYS;
-                       goto err;
+                       goto err3;
                }
        }
 
        r = buf_used;
 
-err:
+err3:
        dsi_vc_set_max_rx_packet_size(TCH, 1);
-err0:
+err2:
        dsi_bus_unlock();
+err1:
+       mutex_unlock(&td->lock);
        return r;
 }
 
@@ -1041,8 +1131,12 @@ static void taal_esd_work(struct work_struct *work)
        u8 state1, state2;
        int r;
 
-       if (!td->enabled)
+       mutex_lock(&td->lock);
+
+       if (!td->enabled) {
+               mutex_unlock(&td->lock);
                return;
+       }
 
        dsi_bus_lock();
 
@@ -1084,16 +1178,19 @@ static void taal_esd_work(struct work_struct *work)
 
        queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD);
 
+       mutex_unlock(&td->lock);
        return;
 err:
        dev_err(&dssdev->dev, "performing LCD reset\n");
 
-       taal_disable(dssdev);
-       taal_enable(dssdev);
+       taal_power_off(dssdev);
+       taal_power_on(dssdev);
 
        dsi_bus_unlock();
 
        queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD);
+
+       mutex_unlock(&td->lock);
 }
 
 static int taal_set_update_mode(struct omap_dss_device *dssdev,
index 87afb81..43b6440 100644 (file)
@@ -36,6 +36,12 @@ config OMAP2_DSS_COLLECT_IRQ_STATS
          <debugfs>/omapdss/dispc_irq for DISPC interrupts, and
          <debugfs>/omapdss/dsi_irq for DSI interrupts.
 
+config OMAP2_DSS_DPI
+       bool "DPI support"
+       default y
+       help
+         DPI Interface. This is the Parallel Display Interface.
+
 config OMAP2_DSS_RFBI
        bool "RFBI support"
         default n
index 980c72c..d71b5d9 100644 (file)
@@ -1,5 +1,6 @@
 obj-$(CONFIG_OMAP2_DSS) += omapdss.o
-omapdss-y := core.o dss.o dispc.o dpi.o display.o manager.o overlay.o
+omapdss-y := core.o dss.o dispc.o display.o manager.o overlay.o
+omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
 omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o
 omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
 omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o
index 7ebe50b..b3a498f 100644 (file)
@@ -482,6 +482,14 @@ static void dss_uninitialize_debugfs(void)
        if (dss_debugfs_dir)
                debugfs_remove_recursive(dss_debugfs_dir);
 }
+#else /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
+static inline int dss_initialize_debugfs(void)
+{
+       return 0;
+}
+static inline void dss_uninitialize_debugfs(void)
+{
+}
 #endif /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
 
 /* PLATFORM DEVICE */
@@ -499,7 +507,7 @@ static int omap_dss_probe(struct platform_device *pdev)
 
        r = dss_get_clocks();
        if (r)
-               goto fail0;
+               goto err_clocks;
 
        dss_clk_enable_all_no_ctx();
 
@@ -515,64 +523,64 @@ static int omap_dss_probe(struct platform_device *pdev)
        r = dss_init(skip_init);
        if (r) {
                DSSERR("Failed to initialize DSS\n");
-               goto fail0;
+               goto err_dss;
        }
 
-#ifdef CONFIG_OMAP2_DSS_RFBI
        r = rfbi_init();
        if (r) {
                DSSERR("Failed to initialize rfbi\n");
-               goto fail0;
+               goto err_rfbi;
        }
-#endif
 
        r = dpi_init(pdev);
        if (r) {
                DSSERR("Failed to initialize dpi\n");
-               goto fail0;
+               goto err_dpi;
        }
 
        r = dispc_init();
        if (r) {
                DSSERR("Failed to initialize dispc\n");
-               goto fail0;
+               goto err_dispc;
        }
-#ifdef CONFIG_OMAP2_DSS_VENC
+
        r = venc_init(pdev);
        if (r) {
                DSSERR("Failed to initialize venc\n");
-               goto fail0;
+               goto err_venc;
        }
-#endif
+
        if (cpu_is_omap34xx()) {
-#ifdef CONFIG_OMAP2_DSS_SDI
                r = sdi_init(skip_init);
                if (r) {
                        DSSERR("Failed to initialize SDI\n");
-                       goto fail0;
+                       goto err_sdi;
                }
-#endif
-#ifdef CONFIG_OMAP2_DSS_DSI
+
                r = dsi_init(pdev);
                if (r) {
                        DSSERR("Failed to initialize DSI\n");
-                       goto fail0;
+                       goto err_dsi;
                }
-#endif
        }
 
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
        r = dss_initialize_debugfs();
        if (r)
-               goto fail0;
-#endif
+               goto err_debugfs;
 
        for (i = 0; i < pdata->num_devices; ++i) {
                struct omap_dss_device *dssdev = pdata->devices[i];
 
                r = omap_dss_register_device(dssdev);
-               if (r)
-                       DSSERR("device reg failed %d\n", i);
+               if (r) {
+                       DSSERR("device %d %s register failed %d\n", i,
+                               dssdev->name ?: "unnamed", r);
+
+                       while (--i >= 0)
+                               omap_dss_unregister_device(pdata->devices[i]);
+
+                       goto err_register;
+               }
 
                if (def_disp_name && strcmp(def_disp_name, dssdev->name) == 0)
                        pdata->default_device = dssdev;
@@ -582,8 +590,29 @@ static int omap_dss_probe(struct platform_device *pdev)
 
        return 0;
 
-       /* XXX fail correctly */
-fail0:
+err_register:
+       dss_uninitialize_debugfs();
+err_debugfs:
+       if (cpu_is_omap34xx())
+               dsi_exit();
+err_dsi:
+       if (cpu_is_omap34xx())
+               sdi_exit();
+err_sdi:
+       venc_exit();
+err_venc:
+       dispc_exit();
+err_dispc:
+       dpi_exit();
+err_dpi:
+       rfbi_exit();
+err_rfbi:
+       dss_exit();
+err_dss:
+       dss_clk_disable_all_no_ctx();
+       dss_put_clocks();
+err_clocks:
+
        return r;
 }
 
@@ -593,25 +622,15 @@ static int omap_dss_remove(struct platform_device *pdev)
        int i;
        int c;
 
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
        dss_uninitialize_debugfs();
-#endif
 
-#ifdef CONFIG_OMAP2_DSS_VENC
        venc_exit();
-#endif
        dispc_exit();
        dpi_exit();
-#ifdef CONFIG_OMAP2_DSS_RFBI
        rfbi_exit();
-#endif
        if (cpu_is_omap34xx()) {
-#ifdef CONFIG_OMAP2_DSS_DSI
                dsi_exit();
-#endif
-#ifdef CONFIG_OMAP2_DSS_SDI
                sdi_exit();
-#endif
        }
 
        dss_exit();
index 6a74ea1..ef8c852 100644 (file)
@@ -392,7 +392,9 @@ void dss_init_device(struct platform_device *pdev,
        int r;
 
        switch (dssdev->type) {
+#ifdef CONFIG_OMAP2_DSS_DPI
        case OMAP_DISPLAY_TYPE_DPI:
+#endif
 #ifdef CONFIG_OMAP2_DSS_RFBI
        case OMAP_DISPLAY_TYPE_DBI:
 #endif
@@ -413,9 +415,11 @@ void dss_init_device(struct platform_device *pdev,
        }
 
        switch (dssdev->type) {
+#ifdef CONFIG_OMAP2_DSS_DPI
        case OMAP_DISPLAY_TYPE_DPI:
                r = dpi_init_display(dssdev);
                break;
+#endif
 #ifdef CONFIG_OMAP2_DSS_RFBI
        case OMAP_DISPLAY_TYPE_DBI:
                r = rfbi_init_display(dssdev);
@@ -541,7 +545,10 @@ int dss_resume_all_devices(void)
 static int dss_disable_device(struct device *dev, void *data)
 {
        struct omap_dss_device *dssdev = to_dss_device(dev);
-       dssdev->driver->disable(dssdev);
+
+       if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)
+               dssdev->driver->disable(dssdev);
+
        return 0;
 }
 
index 5434418..24b1825 100644 (file)
@@ -223,7 +223,13 @@ void dss_dump_clocks(struct seq_file *s)
 
        seq_printf(s, "dpll4_ck %lu\n", dpll4_ck_rate);
 
-       seq_printf(s, "dss1_alwon_fclk = %lu / %lu * 2 = %lu\n",
+       if (cpu_is_omap3630())
+               seq_printf(s, "dss1_alwon_fclk = %lu / %lu  = %lu\n",
+                       dpll4_ck_rate,
+                       dpll4_ck_rate / dpll4_m4_ck_rate,
+                       dss_clk_get_rate(DSS_CLK_FCK1));
+       else
+               seq_printf(s, "dss1_alwon_fclk = %lu / %lu * 2 = %lu\n",
                        dpll4_ck_rate,
                        dpll4_ck_rate / dpll4_m4_ck_rate,
                        dss_clk_get_rate(DSS_CLK_FCK1));
@@ -293,7 +299,8 @@ int dss_calc_clock_rates(struct dss_clock_info *cinfo)
 {
        unsigned long prate;
 
-       if (cinfo->fck_div > 16 || cinfo->fck_div == 0)
+       if (cinfo->fck_div > (cpu_is_omap3630() ? 32 : 16) ||
+                                               cinfo->fck_div == 0)
                return -EINVAL;
 
        prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
@@ -329,7 +336,10 @@ int dss_get_clock_div(struct dss_clock_info *cinfo)
        if (cpu_is_omap34xx()) {
                unsigned long prate;
                prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
-               cinfo->fck_div = prate / (cinfo->fck / 2);
+               if (cpu_is_omap3630())
+                       cinfo->fck_div = prate / (cinfo->fck);
+               else
+                       cinfo->fck_div = prate / (cinfo->fck / 2);
        } else {
                cinfo->fck_div = 0;
        }
@@ -402,10 +412,14 @@ retry:
 
                goto found;
        } else if (cpu_is_omap34xx()) {
-               for (fck_div = 16; fck_div > 0; --fck_div) {
+               for (fck_div = (cpu_is_omap3630() ? 32 : 16);
+                                       fck_div > 0; --fck_div) {
                        struct dispc_clock_info cur_dispc;
 
-                       fck = prate / fck_div * 2;
+                       if (cpu_is_omap3630())
+                               fck = prate / fck_div;
+                       else
+                               fck = prate / fck_div * 2;
 
                        if (fck > DISPC_MAX_FCK)
                                continue;
index 24326a5..786f433 100644 (file)
@@ -242,11 +242,22 @@ int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
                struct dispc_clock_info *dispc_cinfo);
 
 /* SDI */
+#ifdef CONFIG_OMAP2_DSS_SDI
 int sdi_init(bool skip_init);
 void sdi_exit(void);
 int sdi_init_display(struct omap_dss_device *display);
+#else
+static inline int sdi_init(bool skip_init)
+{
+       return 0;
+}
+static inline void sdi_exit(void)
+{
+}
+#endif
 
 /* DSI */
+#ifdef CONFIG_OMAP2_DSS_DSI
 int dsi_init(struct platform_device *pdev);
 void dsi_exit(void);
 
@@ -270,11 +281,30 @@ void dsi_pll_uninit(void);
 void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
                u32 fifo_size, enum omap_burst_size *burst_size,
                u32 *fifo_low, u32 *fifo_high);
+#else
+static inline int dsi_init(struct platform_device *pdev)
+{
+       return 0;
+}
+static inline void dsi_exit(void)
+{
+}
+#endif
 
 /* DPI */
+#ifdef CONFIG_OMAP2_DSS_DPI
 int dpi_init(struct platform_device *pdev);
 void dpi_exit(void);
 int dpi_init_display(struct omap_dss_device *dssdev);
+#else
+static inline int dpi_init(struct platform_device *pdev)
+{
+       return 0;
+}
+static inline void dpi_exit(void)
+{
+}
+#endif
 
 /* DISPC */
 int dispc_init(void);
@@ -362,12 +392,23 @@ int dispc_get_clock_div(struct dispc_clock_info *cinfo);
 
 
 /* VENC */
+#ifdef CONFIG_OMAP2_DSS_VENC
 int venc_init(struct platform_device *pdev);
 void venc_exit(void);
 void venc_dump_regs(struct seq_file *s);
 int venc_init_display(struct omap_dss_device *display);
+#else
+static inline int venc_init(struct platform_device *pdev)
+{
+       return 0;
+}
+static inline void venc_exit(void)
+{
+}
+#endif
 
 /* RFBI */
+#ifdef CONFIG_OMAP2_DSS_RFBI
 int rfbi_init(void);
 void rfbi_exit(void);
 void rfbi_dump_regs(struct seq_file *s);
@@ -379,6 +420,15 @@ void rfbi_transfer_area(u16 width, u16 height,
 void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t);
 unsigned long rfbi_get_max_tx_rate(void);
 int rfbi_init_display(struct omap_dss_device *display);
+#else
+static inline int rfbi_init(void)
+{
+       return 0;
+}
+static inline void rfbi_exit(void)
+{
+}
+#endif
 
 
 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
index 0820986..9e1fbe5 100644 (file)
@@ -843,6 +843,7 @@ static void configure_manager(enum omap_channel channel)
 
        c = &dss_cache.manager_cache[channel];
 
+       dispc_set_default_color(channel, c->default_color);
        dispc_set_trans_key(channel, c->trans_key_type, c->trans_key);
        dispc_enable_trans_key(channel, c->trans_enabled);
        dispc_enable_alpha_blending(channel, c->alpha_enabled);
@@ -940,6 +941,22 @@ static int configure_dispc(void)
        return r;
 }
 
+/* Make the coordinates even. There are some strange problems with OMAP and
+ * partial DSI update when the update widths are odd. */
+static void make_even(u16 *x, u16 *w)
+{
+       u16 x1, x2;
+
+       x1 = *x;
+       x2 = *x + *w;
+
+       x1 &= ~1;
+       x2 = ALIGN(x2, 2);
+
+       *x = x1;
+       *w = x2 - x1;
+}
+
 /* Configure dispc for partial update. Return possibly modified update
  * area */
 void dss_setup_partial_planes(struct omap_dss_device *dssdev,
@@ -968,6 +985,8 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev,
                return;
        }
 
+       make_even(&x, &w);
+
        spin_lock_irqsave(&dss_cache.lock, flags);
 
        /* We need to show the whole overlay if it is scaled. So look for
@@ -1029,6 +1048,8 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev,
                w = x2 - x1;
                h = y2 - y1;
 
+               make_even(&x, &w);
+
                DSSDBG("changing upd area due to ovl(%d) scaling %d,%d %dx%d\n",
                                i, x, y, w, h);
        }
index 12eb404..ee07a3c 100644 (file)
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/err.h>
+#include <linux/regulator/consumer.h>
 
 #include <plat/display.h>
+#include <plat/cpu.h>
 #include "dss.h"
 
 static struct {
        bool skip_init;
        bool update_enabled;
+       struct regulator *vdds_sdi_reg;
 } sdi;
 
 static void sdi_basic_init(void)
@@ -57,6 +60,10 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
                goto err0;
        }
 
+       r = regulator_enable(sdi.vdds_sdi_reg);
+       if (r)
+               goto err1;
+
        /* In case of skip_init sdi_init has already enabled the clocks */
        if (!sdi.skip_init)
                dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
@@ -115,19 +122,12 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
 
        dssdev->manager->enable(dssdev->manager);
 
-       if (dssdev->driver->enable) {
-               r = dssdev->driver->enable(dssdev);
-               if (r)
-                       goto err3;
-       }
-
        sdi.skip_init = 0;
 
        return 0;
-err3:
-       dssdev->manager->disable(dssdev->manager);
 err2:
        dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+       regulator_disable(sdi.vdds_sdi_reg);
 err1:
        omap_dss_stop_device(dssdev);
 err0:
@@ -137,15 +137,14 @@ EXPORT_SYMBOL(omapdss_sdi_display_enable);
 
 void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
 {
-       if (dssdev->driver->disable)
-               dssdev->driver->disable(dssdev);
-
        dssdev->manager->disable(dssdev->manager);
 
        dss_sdi_disable();
 
        dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
 
+       regulator_disable(sdi.vdds_sdi_reg);
+
        omap_dss_stop_device(dssdev);
 }
 EXPORT_SYMBOL(omapdss_sdi_display_disable);
@@ -162,6 +161,11 @@ int sdi_init(bool skip_init)
        /* we store this for first display enable, then clear it */
        sdi.skip_init = skip_init;
 
+       sdi.vdds_sdi_reg = dss_get_vdds_sdi();
+       if (IS_ERR(sdi.vdds_sdi_reg)) {
+               DSSERR("can't get VDDS_SDI regulator\n");
+               return PTR_ERR(sdi.vdds_sdi_reg);
+       }
        /*
         * Enable clocks already here, otherwise there would be a toggle
         * of them until sdi_display_enable is called.
index f0ba573..eff3505 100644 (file)
@@ -479,12 +479,6 @@ static int venc_panel_enable(struct omap_dss_device *dssdev)
                goto err1;
        }
 
-       if (dssdev->platform_enable) {
-               r = dssdev->platform_enable(dssdev);
-               if (r)
-                       goto err2;
-       }
-
        venc_power_on(dssdev);
 
        venc.wss_data = 0;
@@ -494,13 +488,9 @@ static int venc_panel_enable(struct omap_dss_device *dssdev)
        /* wait couple of vsyncs until enabling the LCD */
        msleep(50);
 
-       mutex_unlock(&venc.venc_lock);
-
-       return r;
-err2:
-       venc_power_off(dssdev);
 err1:
        mutex_unlock(&venc.venc_lock);
+
        return r;
 }
 
@@ -524,9 +514,6 @@ static void venc_panel_disable(struct omap_dss_device *dssdev)
        /* wait at least 5 vsyncs after disabling the LCD */
        msleep(100);
 
-       if (dssdev->platform_disable)
-               dssdev->platform_disable(dssdev);
-
        dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
 end:
        mutex_unlock(&venc.venc_lock);
index 1ffa760..9c73618 100644 (file)
@@ -183,13 +183,14 @@ int omapfb_update_window(struct fb_info *fbi,
        struct omapfb2_device *fbdev = ofbi->fbdev;
        int r;
 
+       if (!lock_fb_info(fbi))
+               return -ENODEV;
        omapfb_lock(fbdev);
-       lock_fb_info(fbi);
 
        r = omapfb_update_window_nolock(fbi, x, y, w, h);
 
-       unlock_fb_info(fbi);
        omapfb_unlock(fbdev);
+       unlock_fb_info(fbi);
 
        return r;
 }
index 62bb88f..5179219 100644 (file)
@@ -57,7 +57,8 @@ static ssize_t store_rotate_type(struct device *dev,
        if (rot_type != OMAP_DSS_ROT_DMA && rot_type != OMAP_DSS_ROT_VRFB)
                return -EINVAL;
 
-       lock_fb_info(fbi);
+       if (!lock_fb_info(fbi))
+               return -ENODEV;
 
        r = 0;
        if (rot_type == ofbi->rotation_type)
@@ -105,7 +106,8 @@ static ssize_t store_mirror(struct device *dev,
        if (mirror != 0 && mirror != 1)
                return -EINVAL;
 
-       lock_fb_info(fbi);
+       if (!lock_fb_info(fbi))
+               return -ENODEV;
 
        ofbi->mirror = mirror;
 
@@ -137,8 +139,9 @@ static ssize_t show_overlays(struct device *dev,
        ssize_t l = 0;
        int t;
 
+       if (!lock_fb_info(fbi))
+               return -ENODEV;
        omapfb_lock(fbdev);
-       lock_fb_info(fbi);
 
        for (t = 0; t < ofbi->num_overlays; t++) {
                struct omap_overlay *ovl = ofbi->overlays[t];
@@ -154,8 +157,8 @@ static ssize_t show_overlays(struct device *dev,
 
        l += snprintf(buf + l, PAGE_SIZE - l, "\n");
 
-       unlock_fb_info(fbi);
        omapfb_unlock(fbdev);
+       unlock_fb_info(fbi);
 
        return l;
 }
@@ -195,8 +198,9 @@ static ssize_t store_overlays(struct device *dev, struct device_attribute *attr,
        if (buf[len - 1] == '\n')
                len = len - 1;
 
+       if (!lock_fb_info(fbi))
+               return -ENODEV;
        omapfb_lock(fbdev);
-       lock_fb_info(fbi);
 
        if (len > 0) {
                char *p = (char *)buf;
@@ -303,8 +307,8 @@ static ssize_t store_overlays(struct device *dev, struct device_attribute *attr,
 
        r = count;
 out:
-       unlock_fb_info(fbi);
        omapfb_unlock(fbdev);
+       unlock_fb_info(fbi);
 
        return r;
 }
@@ -317,7 +321,8 @@ static ssize_t show_overlays_rotate(struct device *dev,
        ssize_t l = 0;
        int t;
 
-       lock_fb_info(fbi);
+       if (!lock_fb_info(fbi))
+               return -ENODEV;
 
        for (t = 0; t < ofbi->num_overlays; t++) {
                l += snprintf(buf + l, PAGE_SIZE - l, "%s%d",
@@ -345,7 +350,8 @@ static ssize_t store_overlays_rotate(struct device *dev,
        if (buf[len - 1] == '\n')
                len = len - 1;
 
-       lock_fb_info(fbi);
+       if (!lock_fb_info(fbi))
+               return -ENODEV;
 
        if (len > 0) {
                char *p = (char *)buf;
@@ -416,7 +422,8 @@ static ssize_t store_size(struct device *dev, struct device_attribute *attr,
 
        size = PAGE_ALIGN(simple_strtoul(buf, NULL, 0));
 
-       lock_fb_info(fbi);
+       if (!lock_fb_info(fbi))
+               return -ENODEV;
 
        for (i = 0; i < ofbi->num_overlays; i++) {
                if (ofbi->overlays[i]->info.enabled) {
index 0cadf7a..090aa1a 100644 (file)
@@ -177,7 +177,7 @@ static void vesafb_destroy(struct fb_info *info)
 {
        if (info->screen_base)
                iounmap(info->screen_base);
-       release_mem_region(info->aperture_base, info->aperture_size);
+       release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
        framebuffer_release(info);
 }
 
@@ -295,8 +295,13 @@ static int __init vesafb_probe(struct platform_device *dev)
        info->par = NULL;
 
        /* set vesafb aperture size for generic probing */
-       info->aperture_base = screen_info.lfb_base;
-       info->aperture_size = size_total;
+       info->apertures = alloc_apertures(1);
+       if (!info->apertures) {
+               err = -ENOMEM;
+               goto err;
+       }
+       info->apertures->ranges[0].base = screen_info.lfb_base;
+       info->apertures->ranges[0].size = size_total;
 
        info->screen_base = ioremap(vesafb_fix.smem_start, vesafb_fix.smem_len);
        if (!info->screen_base) {
index bf638a4..149c47a 100644 (file)
@@ -1263,10 +1263,19 @@ static void vga16fb_imageblit(struct fb_info *info, const struct fb_image *image
                vga_imageblit_color(info, image);
 }
 
+static void vga16fb_destroy(struct fb_info *info)
+{
+       iounmap(info->screen_base);
+       fb_dealloc_cmap(&info->cmap);
+       /* XXX unshare VGA regions */
+       framebuffer_release(info);
+}
+
 static struct fb_ops vga16fb_ops = {
        .owner          = THIS_MODULE,
        .fb_open        = vga16fb_open,
        .fb_release     = vga16fb_release,
+       .fb_destroy     = vga16fb_destroy,
        .fb_check_var   = vga16fb_check_var,
        .fb_set_par     = vga16fb_set_par,
        .fb_setcolreg   = vga16fb_setcolreg,
@@ -1306,6 +1315,11 @@ static int __devinit vga16fb_probe(struct platform_device *dev)
                ret = -ENOMEM;
                goto err_fb_alloc;
        }
+       info->apertures = alloc_apertures(1);
+       if (!info->apertures) {
+               ret = -ENOMEM;
+               goto err_ioremap;
+       }
 
        /* XXX share VGA_FB_PHYS and I/O region with vgacon and others */
        info->screen_base = (void __iomem *)VGA_MAP_MEM(VGA_FB_PHYS, 0);
@@ -1335,7 +1349,7 @@ static int __devinit vga16fb_probe(struct platform_device *dev)
        info->fix = vga16fb_fix;
        /* supports rectangles with widths of multiples of 8 */
        info->pixmap.blit_x = 1 << 7 | 1 << 15 | 1 << 23 | 1 << 31;
-       info->flags = FBINFO_FLAG_DEFAULT |
+       info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
                FBINFO_HWACCEL_YPAN;
 
        i = (info->var.bits_per_pixel == 8) ? 256 : 16;
@@ -1354,6 +1368,9 @@ static int __devinit vga16fb_probe(struct platform_device *dev)
 
        vga16fb_update_fix(info);
 
+       info->apertures->ranges[0].base = VGA_FB_PHYS;
+       info->apertures->ranges[0].size = VGA_FB_PHYS_LEN;
+
        if (register_framebuffer(info) < 0) {
                printk(KERN_ERR "vga16fb: unable to register framebuffer\n");
                ret = -EINVAL;
@@ -1380,13 +1397,8 @@ static int vga16fb_remove(struct platform_device *dev)
 {
        struct fb_info *info = platform_get_drvdata(dev);
 
-       if (info) {
+       if (info)
                unregister_framebuffer(info);
-               iounmap(info->screen_base);
-               fb_dealloc_cmap(&info->cmap);
-       /* XXX unshare VGA regions */
-               framebuffer_release(info);
-       }
 
        return 0;
 }
index f0f2a43..3a84455 100644 (file)
@@ -209,6 +209,6 @@ static int udf_readdir(struct file *filp, void *dirent, filldir_t filldir)
 const struct file_operations udf_dir_operations = {
        .read                   = generic_read_dir,
        .readdir                = udf_readdir,
-       .ioctl                  = udf_ioctl,
+       .unlocked_ioctl         = udf_ioctl,
        .fsync                  = simple_fsync,
 };
index 6ebc043..baae3a7 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/quotaops.h>
 #include <linux/buffer_head.h>
 #include <linux/aio.h>
+#include <linux/smp_lock.h>
 
 #include "udf_i.h"
 #include "udf_sb.h"
@@ -144,50 +145,60 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
        return retval;
 }
 
-int udf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
-             unsigned long arg)
+long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
+       struct inode *inode = filp->f_dentry->d_inode;
        long old_block, new_block;
        int result = -EINVAL;
 
+       lock_kernel();
+
        if (file_permission(filp, MAY_READ) != 0) {
-               udf_debug("no permission to access inode %lu\n",
-                         inode->i_ino);
-               return -EPERM;
+               udf_debug("no permission to access inode %lu\n", inode->i_ino);
+               result = -EPERM;
+               goto out;
        }
 
        if (!arg) {
                udf_debug("invalid argument to udf_ioctl\n");
-               return -EINVAL;
+               result = -EINVAL;
+               goto out;
        }
 
        switch (cmd) {
        case UDF_GETVOLIDENT:
                if (copy_to_user((char __user *)arg,
                                 UDF_SB(inode->i_sb)->s_volume_ident, 32))
-                       return -EFAULT;
+                       result = -EFAULT;
                else
-                       return 0;
+                       result = 0;
+               goto out;
        case UDF_RELOCATE_BLOCKS:
-               if (!capable(CAP_SYS_ADMIN))
-                       return -EACCES;
-               if (get_user(old_block, (long __user *)arg))
-                       return -EFAULT;
+               if (!capable(CAP_SYS_ADMIN)) {
+                       result = -EACCES;
+                       goto out;
+               }
+               if (get_user(old_block, (long __user *)arg)) {
+                       result = -EFAULT;
+                       goto out;
+               }
                result = udf_relocate_blocks(inode->i_sb,
                                                old_block, &new_block);
                if (result == 0)
                        result = put_user(new_block, (long __user *)arg);
-               return result;
+               goto out;
        case UDF_GETEASIZE:
                result = put_user(UDF_I(inode)->i_lenEAttr, (int __user *)arg);
-               break;
+               goto out;
        case UDF_GETEABLOCK:
                result = copy_to_user((char __user *)arg,
                                      UDF_I(inode)->i_ext.i_data,
                                      UDF_I(inode)->i_lenEAttr) ? -EFAULT : 0;
-               break;
+               goto out;
        }
 
+out:
+       unlock_kernel();
        return result;
 }
 
@@ -207,7 +218,7 @@ static int udf_release_file(struct inode *inode, struct file *filp)
 const struct file_operations udf_file_operations = {
        .read                   = do_sync_read,
        .aio_read               = generic_file_aio_read,
-       .ioctl                  = udf_ioctl,
+       .unlocked_ioctl         = udf_ioctl,
        .open                   = dquot_file_open,
        .mmap                   = generic_file_mmap,
        .write                  = do_sync_write,
index 702a114..9079ff7 100644 (file)
@@ -130,8 +130,7 @@ extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *,
                        uint8_t *, uint8_t *);
 
 /* file.c */
-extern int udf_ioctl(struct inode *, struct file *, unsigned int,
-                    unsigned long);
+extern long udf_ioctl(struct file *, unsigned int, unsigned long);
 extern int udf_setattr(struct dentry *dentry, struct iattr *iattr);
 /* inode.c */
 extern struct inode *udf_iget(struct super_block *, struct kernel_lb_addr *);
index e5f234a..97e807c 100644 (file)
@@ -28,7 +28,8 @@ KMAP_D(15)    KM_UML_USERCOPY,
 KMAP_D(16)     KM_IRQ_PTE,
 KMAP_D(17)     KM_NMI,
 KMAP_D(18)     KM_NMI_PTE,
-KMAP_D(19)     KM_TYPE_NR
+KMAP_D(19)     KM_KDB,
+KMAP_D(20)     KM_TYPE_NR
 };
 
 #undef KMAP_D
index fc0d575..59c3e5b 100644 (file)
@@ -103,6 +103,23 @@ struct blkcipher_walk {
        unsigned int blocksize;
 };
 
+struct ablkcipher_walk {
+       struct {
+               struct page *page;
+               unsigned int offset;
+       } src, dst;
+
+       struct scatter_walk     in;
+       unsigned int            nbytes;
+       struct scatter_walk     out;
+       unsigned int            total;
+       struct list_head        buffers;
+       u8                      *iv_buffer;
+       u8                      *iv;
+       int                     flags;
+       unsigned int            blocksize;
+};
+
 extern const struct crypto_type crypto_ablkcipher_type;
 extern const struct crypto_type crypto_aead_type;
 extern const struct crypto_type crypto_blkcipher_type;
@@ -173,6 +190,12 @@ int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
                              struct blkcipher_walk *walk,
                              unsigned int blocksize);
 
+int ablkcipher_walk_done(struct ablkcipher_request *req,
+                        struct ablkcipher_walk *walk, int err);
+int ablkcipher_walk_phys(struct ablkcipher_request *req,
+                        struct ablkcipher_walk *walk);
+void __ablkcipher_walk_complete(struct ablkcipher_walk *walk);
+
 static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
 {
        return PTR_ALIGN(crypto_tfm_ctx(tfm),
@@ -283,6 +306,23 @@ static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
        walk->total = nbytes;
 }
 
+static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk,
+                                       struct scatterlist *dst,
+                                       struct scatterlist *src,
+                                       unsigned int nbytes)
+{
+       walk->in.sg = src;
+       walk->out.sg = dst;
+       walk->total = nbytes;
+       INIT_LIST_HEAD(&walk->buffers);
+}
+
+static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk)
+{
+       if (unlikely(!list_empty(&walk->buffers)))
+               __ablkcipher_walk_complete(walk);
+}
+
 static inline struct crypto_async_request *crypto_get_backlog(
        struct crypto_queue *queue)
 {
index 2f3b3a0..c1b9871 100644 (file)
@@ -1428,10 +1428,13 @@ extern void drm_sysfs_connector_remove(struct drm_connector *connector);
 /* Graphics Execution Manager library functions (drm_gem.c) */
 int drm_gem_init(struct drm_device *dev);
 void drm_gem_destroy(struct drm_device *dev);
+void drm_gem_object_release(struct drm_gem_object *obj);
 void drm_gem_object_free(struct kref *kref);
 void drm_gem_object_free_unlocked(struct kref *kref);
 struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
                                            size_t size);
+int drm_gem_object_init(struct drm_device *dev,
+                       struct drm_gem_object *obj, size_t size);
 void drm_gem_object_handle_free(struct kref *kref);
 void drm_gem_vm_open(struct vm_area_struct *vma);
 void drm_gem_vm_close(struct vm_area_struct *vma);
index 1347524..93a1a31 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/idr.h>
 
 #include <linux/fb.h>
+#include <linux/slow-work.h>
 
 struct drm_device;
 struct drm_mode_set;
@@ -271,8 +272,6 @@ struct drm_framebuffer {
        unsigned int depth;
        int bits_per_pixel;
        int flags;
-       struct fb_info *fbdev;
-       u32 pseudo_palette[17];
        struct list_head filp_head;
        /* if you are using the helper */
        void *helper_private;
@@ -369,9 +368,6 @@ struct drm_crtc_funcs {
  * @enabled: is this CRTC enabled?
  * @x: x position on screen
  * @y: y position on screen
- * @desired_mode: new desired mode
- * @desired_x: desired x for desired_mode
- * @desired_y: desired y for desired_mode
  * @funcs: CRTC control functions
  *
  * Each CRTC may have one or more connectors associated with it.  This structure
@@ -391,8 +387,6 @@ struct drm_crtc {
        struct drm_display_mode mode;
 
        int x, y;
-       struct drm_display_mode *desired_mode;
-       int desired_x, desired_y;
        const struct drm_crtc_funcs *funcs;
 
        /* CRTC gamma size for reporting to userspace */
@@ -467,6 +461,15 @@ enum drm_connector_force {
        DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */
 };
 
+/* should we poll this connector for connects and disconnects */
+/* hot plug detectable */
+#define DRM_CONNECTOR_POLL_HPD (1 << 0)
+/* poll for connections */
+#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
+/* can cleanly poll for disconnections without flickering the screen */
+/* DACs should rarely do this without a lot of testing */
+#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
+
 /**
  * drm_connector - central DRM connector control structure
  * @crtc: CRTC this connector is currently connected to, NULL if none
@@ -511,6 +514,8 @@ struct drm_connector {
        u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY];
        uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY];
 
+       uint8_t polled; /* DRM_CONNECTOR_POLL_* */
+
        /* requested DPMS state */
        int dpms;
 
@@ -521,7 +526,6 @@ struct drm_connector {
        uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
        uint32_t force_encoder_id;
        struct drm_encoder *encoder; /* currently active encoder */
-       void *fb_helper_private;
 };
 
 /**
@@ -548,16 +552,10 @@ struct drm_mode_set {
 
 /**
  * struct drm_mode_config_funcs - configure CRTCs for a given screen layout
- * @resize: adjust CRTCs as necessary for the proposed layout
- *
- * Currently only a resize hook is available.  DRM will call back into the
- * driver with a new screen width and height.  If the driver can't support
- * the proposed size, it can return false.  Otherwise it should adjust
- * the CRTC<->connector mappings as needed and update its view of the screen.
  */
 struct drm_mode_config_funcs {
        struct drm_framebuffer *(*fb_create)(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd *mode_cmd);
-       int (*fb_changed)(struct drm_device *dev);
+       void (*output_poll_changed)(struct drm_device *dev);
 };
 
 struct drm_mode_group {
@@ -590,14 +588,15 @@ struct drm_mode_config {
 
        struct list_head property_list;
 
-       /* in-kernel framebuffers - hung of filp_head in drm_framebuffer */
-       struct list_head fb_kernel_list;
-
        int min_width, min_height;
        int max_width, max_height;
        struct drm_mode_config_funcs *funcs;
        resource_size_t fb_base;
 
+       /* output poll support */
+       bool poll_enabled;
+       struct delayed_slow_work output_poll_slow_work;
+
        /* pointers to standard properties */
        struct list_head property_blob_list;
        struct drm_property *edid_property;
@@ -666,8 +665,6 @@ extern void drm_fb_release(struct drm_file *file_priv);
 extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
 extern struct edid *drm_get_edid(struct drm_connector *connector,
                                 struct i2c_adapter *adapter);
-extern int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
-                                unsigned char *buf, int len);
 extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
 extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
 extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
@@ -799,8 +796,14 @@ extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
 extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev,
                                int hdisplay, int vdisplay, int vrefresh,
                                bool interlaced, int margins);
+extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
+                               int hdisplay, int vdisplay, int vrefresh,
+                               bool interlaced, int margins, int GTF_M,
+                               int GTF_2C, int GTF_K, int GTF_2J);
 extern int drm_add_modes_noedid(struct drm_connector *connector,
                                int hdisplay, int vdisplay);
 
 extern bool drm_edid_is_valid(struct edid *edid);
+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+                                          int hsize, int vsize, int fresh);
 #endif /* __DRM_CRTC_H__ */
index b29e201..dc5873c 100644 (file)
@@ -39,7 +39,6 @@
 
 #include <linux/fb.h>
 
-#include "drm_fb_helper.h"
 struct drm_crtc_helper_funcs {
        /*
         * Control power levels on the CRTC.  If the mode passed in is
@@ -96,8 +95,6 @@ struct drm_connector_helper_funcs {
 
 extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY);
 extern void drm_helper_disable_unused_functions(struct drm_device *dev);
-extern int drm_helper_hotplug_stage_two(struct drm_device *dev);
-extern bool drm_helper_initial_config(struct drm_device *dev);
 extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
 extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
                                     struct drm_display_mode *mode,
@@ -123,12 +120,14 @@ static inline void drm_encoder_helper_add(struct drm_encoder *encoder,
        encoder->helper_private = (void *)funcs;
 }
 
-static inline int drm_connector_helper_add(struct drm_connector *connector,
+static inline void drm_connector_helper_add(struct drm_connector *connector,
                                            const struct drm_connector_helper_funcs *funcs)
 {
        connector->helper_private = (void *)funcs;
-       return drm_fb_helper_add_connector(connector);
 }
 
 extern int drm_helper_resume_force_mode(struct drm_device *dev);
+extern void drm_kms_helper_poll_init(struct drm_device *dev);
+extern void drm_kms_helper_poll_fini(struct drm_device *dev);
+extern void drm_helper_hpd_irq_event(struct drm_device *dev);
 #endif
index b420989..39e2cc5 100644 (file)
@@ -120,7 +120,7 @@ struct detailed_non_pixel {
                struct detailed_data_string str;
                struct detailed_data_monitor_range range;
                struct detailed_data_wpindex color;
-               struct std_timing timings[5];
+               struct std_timing timings[6];
                struct cvt_timing cvt[4];
        } data;
 } __attribute__((packed));
@@ -201,7 +201,4 @@ struct edid {
 
 #define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
 
-/* define the number of Extension EDID block */
-#define DRM_MAX_EDID_EXT_NUM 4
-
 #endif /* __DRM_EDID_H__ */
index 58c892a..f0a6afc 100644 (file)
 #ifndef DRM_FB_HELPER_H
 #define DRM_FB_HELPER_H
 
+struct drm_fb_helper;
+
 struct drm_fb_helper_crtc {
        uint32_t crtc_id;
        struct drm_mode_set mode_set;
-};
-
-
-struct drm_fb_helper_funcs {
-       void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green,
-                         u16 blue, int regno);
-       void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green,
-                         u16 *blue, int regno);
+       struct drm_display_mode *desired_mode;
 };
 
 /* mode specified on the command line */
@@ -57,8 +52,28 @@ struct drm_fb_helper_cmdline_mode {
        bool margins;
 };
 
+struct drm_fb_helper_surface_size {
+       u32 fb_width;
+       u32 fb_height;
+       u32 surface_width;
+       u32 surface_height;
+       u32 surface_bpp;
+       u32 surface_depth;
+};
+
+struct drm_fb_helper_funcs {
+       void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green,
+                         u16 blue, int regno);
+       void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green,
+                         u16 *blue, int regno);
+
+       int (*fb_probe)(struct drm_fb_helper *helper,
+                       struct drm_fb_helper_surface_size *sizes);
+};
+
 struct drm_fb_helper_connector {
        struct drm_fb_helper_cmdline_mode cmdline_mode;
+       struct drm_connector *connector;
 };
 
 struct drm_fb_helper {
@@ -67,24 +82,26 @@ struct drm_fb_helper {
        struct drm_display_mode *mode;
        int crtc_count;
        struct drm_fb_helper_crtc *crtc_info;
+       int connector_count;
+       struct drm_fb_helper_connector **connector_info;
        struct drm_fb_helper_funcs *funcs;
        int conn_limit;
+       struct fb_info *fbdev;
+       u32 pseudo_palette[17];
        struct list_head kernel_fb_list;
+
+       /* we got a hotplug but fbdev wasn't running the console
+          delay until next set_par */
+       bool delayed_hotplug;
 };
 
-int drm_fb_helper_single_fb_probe(struct drm_device *dev,
-                                 int preferred_bpp,
-                                 int (*fb_create)(struct drm_device *dev,
-                                                  uint32_t fb_width,
-                                                  uint32_t fb_height,
-                                                  uint32_t surface_width,
-                                                  uint32_t surface_height,
-                                                  uint32_t surface_depth,
-                                                  uint32_t surface_bpp,
-                                                  struct drm_framebuffer **fb_ptr));
-int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count,
-                                 int max_conn);
-void drm_fb_helper_free(struct drm_fb_helper *helper);
+int drm_fb_helper_single_fb_probe(struct drm_fb_helper *helper,
+                                 int preferred_bpp);
+
+int drm_fb_helper_init(struct drm_device *dev,
+                      struct drm_fb_helper *helper, int crtc_count,
+                      int max_conn);
+void drm_fb_helper_fini(struct drm_fb_helper *helper);
 int drm_fb_helper_blank(int blank, struct fb_info *info);
 int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
                              struct fb_info *info);
@@ -99,13 +116,15 @@ int drm_fb_helper_setcolreg(unsigned regno,
                            struct fb_info *info);
 
 void drm_fb_helper_restore(void);
-void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
+void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
                            uint32_t fb_width, uint32_t fb_height);
 void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
                            uint32_t depth);
 
-int drm_fb_helper_add_connector(struct drm_connector *connector);
-int drm_fb_helper_parse_command_line(struct drm_device *dev);
 int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
 
+bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
+bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
+int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
+
 #endif
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
new file mode 100644 (file)
index 0000000..4a08a66
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ */
+#ifndef DRM_FIXED_H
+#define DRM_FIXED_H
+
+typedef union dfixed {
+       u32 full;
+} fixed20_12;
+
+
+#define dfixed_const(A) (u32)(((A) << 12))/*  + ((B + 0.000122)*4096)) */
+#define dfixed_const_half(A) (u32)(((A) << 12) + 2048)
+#define dfixed_const_666(A) (u32)(((A) << 12) + 2731)
+#define dfixed_const_8(A) (u32)(((A) << 12) + 3277)
+#define dfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12)
+#define dfixed_init(A) { .full = dfixed_const((A)) }
+#define dfixed_init_half(A) { .full = dfixed_const_half((A)) }
+#define dfixed_trunc(A) ((A).full >> 12)
+
+static inline u32 dfixed_floor(fixed20_12 A)
+{
+       u32 non_frac = dfixed_trunc(A);
+
+       return dfixed_const(non_frac);
+}
+
+static inline u32 dfixed_ceil(fixed20_12 A)
+{
+       u32 non_frac = dfixed_trunc(A);
+
+       if (A.full > dfixed_const(non_frac))
+               return dfixed_const(non_frac + 1);
+       else
+               return dfixed_const(non_frac);
+}
+
+static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
+{
+       u64 tmp = ((u64)A.full << 13);
+
+       do_div(tmp, B.full);
+       tmp += 1;
+       tmp /= 2;
+       return lower_32_bits(tmp);
+}
+#endif
index 81e614b..3ff9fc0 100644 (file)
@@ -902,6 +902,7 @@ struct drm_radeon_cs {
 #define RADEON_INFO_NUM_GB_PIPES       0x01
 #define RADEON_INFO_NUM_Z_PIPES        0x02
 #define RADEON_INFO_ACCEL_WORKING      0x03
+#define RADEON_INFO_CRTC_FROM_ID       0x04
 
 struct drm_radeon_info {
        uint32_t                request;
index 81eb9f4..267a86c 100644 (file)
@@ -66,6 +66,26 @@ struct ttm_placement {
        const uint32_t  *busy_placement;
 };
 
+/**
+ * struct ttm_bus_placement
+ *
+ * @addr:              mapped virtual address
+ * @base:              bus base address
+ * @is_iomem:          is this io memory ?
+ * @size:              size in byte
+ * @offset:            offset from the base address
+ *
+ * Structure indicating the bus placement of an object.
+ */
+struct ttm_bus_placement {
+       void            *addr;
+       unsigned long   base;
+       unsigned long   size;
+       unsigned long   offset;
+       bool            is_iomem;
+       bool            io_reserved;
+};
+
 
 /**
  * struct ttm_mem_reg
@@ -75,6 +95,7 @@ struct ttm_placement {
  * @num_pages: Actual size of memory region in pages.
  * @page_alignment: Page alignment.
  * @placement: Placement flags.
+ * @bus: Placement on io bus accessible to the CPU
  *
  * Structure indicating the placement and space resources used by a
  * buffer object.
@@ -87,6 +108,7 @@ struct ttm_mem_reg {
        uint32_t page_alignment;
        uint32_t mem_type;
        uint32_t placement;
+       struct ttm_bus_placement bus;
 };
 
 /**
@@ -274,6 +296,7 @@ struct ttm_bo_kmap_obj {
                ttm_bo_map_kmap         = 3,
                ttm_bo_map_premapped    = 4 | TTM_BO_MAP_IOMEM_MASK,
        } bo_kmap_type;
+       struct ttm_buffer_object *bo;
 };
 
 /**
@@ -313,7 +336,8 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
  * @bo: The buffer object.
  * @placement: Proposed placement for the buffer object.
  * @interruptible: Sleep interruptible if sleeping.
- * @no_wait: Return immediately if the buffer is busy.
+ * @no_wait_reserve: Return immediately if other buffers are busy.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
  *
  * Changes placement and caching policy of the buffer object
  * according proposed placement.
@@ -325,7 +349,8 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
  */
 extern int ttm_bo_validate(struct ttm_buffer_object *bo,
                                struct ttm_placement *placement,
-                               bool interruptible, bool no_wait);
+                               bool interruptible, bool no_wait_reserve,
+                               bool no_wait_gpu);
 
 /**
  * ttm_bo_unref
@@ -336,6 +361,23 @@ extern int ttm_bo_validate(struct ttm_buffer_object *bo,
  */
 extern void ttm_bo_unref(struct ttm_buffer_object **bo);
 
+/**
+ * ttm_bo_lock_delayed_workqueue
+ *
+ * Prevent the delayed workqueue from running.
+ * Returns
+ * True if the workqueue was queued at the time
+ */
+extern int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev);
+
+/**
+ * ttm_bo_unlock_delayed_workqueue
+ *
+ * Allows the delayed workqueue to run.
+ */
+extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
+                                           int resched);
+
 /**
  * ttm_bo_synccpu_write_grab
  *
index 6b9db91..0ea602d 100644 (file)
@@ -176,8 +176,6 @@ struct ttm_tt {
 
 #define TTM_MEMTYPE_FLAG_FIXED         (1 << 0)        /* Fixed (on-card) PCI memory */
 #define TTM_MEMTYPE_FLAG_MAPPABLE      (1 << 1)        /* Memory mappable */
-#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2)        /* Fixed memory needs ioremap
-                                                  before kernel access. */
 #define TTM_MEMTYPE_FLAG_CMA           (1 << 3)        /* Can't map aperture */
 
 /**
@@ -189,13 +187,6 @@ struct ttm_tt {
  * managed by this memory type.
  * @gpu_offset: If used, the GPU offset of the first managed page of
  * fixed memory or the first managed location in an aperture.
- * @io_offset: The io_offset of the first managed page of IO memory or
- * the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA
- * memory, this should be set to NULL.
- * @io_size: The size of a managed IO region (fixed memory or aperture).
- * @io_addr: Virtual kernel address if the io region is pre-mapped. For
- * TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and
- * @io_addr should be set to NULL.
  * @size: Size of the managed region.
  * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
  * as defined in ttm_placement_common.h
@@ -221,9 +212,6 @@ struct ttm_mem_type_manager {
        bool use_type;
        uint32_t flags;
        unsigned long gpu_offset;
-       unsigned long io_offset;
-       unsigned long io_size;
-       void *io_addr;
        uint64_t size;
        uint32_t available_caching;
        uint32_t default_caching;
@@ -311,7 +299,8 @@ struct ttm_bo_driver {
         */
        int (*move) (struct ttm_buffer_object *bo,
                     bool evict, bool interruptible,
-                    bool no_wait, struct ttm_mem_reg *new_mem);
+                    bool no_wait_reserve, bool no_wait_gpu,
+                    struct ttm_mem_reg *new_mem);
 
        /**
         * struct ttm_bo_driver_member verify_access
@@ -351,12 +340,21 @@ struct ttm_bo_driver {
                            struct ttm_mem_reg *new_mem);
        /* notify the driver we are taking a fault on this BO
         * and have reserved it */
-       void (*fault_reserve_notify)(struct ttm_buffer_object *bo);
+       int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
 
        /**
         * notify the driver that we're about to swap out this bo
         */
        void (*swap_notify) (struct ttm_buffer_object *bo);
+
+       /**
+        * Driver callback on when mapping io memory (for bo_move_memcpy
+        * for instance). TTM will take care to call io_mem_free whenever
+        * the mapping is not use anymore. io_mem_reserve & io_mem_free
+        * are balanced.
+        */
+       int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
+       void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
 };
 
 /**
@@ -633,7 +631,8 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
  * @proposed_placement: Proposed new placement for the buffer object.
  * @mem: A struct ttm_mem_reg.
  * @interruptible: Sleep interruptible when sliping.
- * @no_wait: Don't sleep waiting for space to become available.
+ * @no_wait_reserve: Return immediately if other buffers are busy.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
  *
  * Allocate memory space for the buffer object pointed to by @bo, using
  * the placement flags in @mem, potentially evicting other idle buffer objects.
@@ -647,7 +646,8 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
 extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                                struct ttm_placement *placement,
                                struct ttm_mem_reg *mem,
-                               bool interruptible, bool no_wait);
+                               bool interruptible,
+                               bool no_wait_reserve, bool no_wait_gpu);
 /**
  * ttm_bo_wait_for_cpu
  *
@@ -682,6 +682,11 @@ extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
                             unsigned long *bus_offset,
                             unsigned long *bus_size);
 
+extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+                               struct ttm_mem_reg *mem);
+extern void ttm_mem_io_free(struct ttm_bo_device *bdev,
+                               struct ttm_mem_reg *mem);
+
 extern void ttm_bo_global_release(struct ttm_global_reference *ref);
 extern int ttm_bo_global_init(struct ttm_global_reference *ref);
 
@@ -798,7 +803,8 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
  *
  * @bo: A pointer to a struct ttm_buffer_object.
  * @evict: 1: This is an eviction. Don't try to pipeline.
- * @no_wait: Never sleep, but rather return with -EBUSY.
+ * @no_wait_reserve: Return immediately if other buffers are busy.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
  *
  * Optimized move function for a buffer object with both old and
@@ -812,15 +818,16 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
  */
 
 extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-                          bool evict, bool no_wait,
-                          struct ttm_mem_reg *new_mem);
+                          bool evict, bool no_wait_reserve,
+                          bool no_wait_gpu, struct ttm_mem_reg *new_mem);
 
 /**
  * ttm_bo_move_memcpy
  *
  * @bo: A pointer to a struct ttm_buffer_object.
  * @evict: 1: This is an eviction. Don't try to pipeline.
- * @no_wait: Never sleep, but rather return with -EBUSY.
+ * @no_wait_reserve: Return immediately if other buffers are busy.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
  *
  * Fallback move function for a mappable buffer object in mappable memory.
@@ -834,8 +841,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
  */
 
 extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
-                             bool evict,
-                             bool no_wait, struct ttm_mem_reg *new_mem);
+                             bool evict, bool no_wait_reserve,
+                             bool no_wait_gpu, struct ttm_mem_reg *new_mem);
 
 /**
  * ttm_bo_free_old_node
@@ -854,7 +861,8 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
  * @sync_obj_arg: An argument to pass to the sync object idle / wait
  * functions.
  * @evict: This is an evict move. Don't return until the buffer is idle.
- * @no_wait: Never sleep, but rather return with -EBUSY.
+ * @no_wait_reserve: Return immediately if other buffers are busy.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
  *
  * Accelerated move function to be called when an accelerated move
@@ -868,7 +876,8 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
 extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                                     void *sync_obj,
                                     void *sync_obj_arg,
-                                    bool evict, bool no_wait,
+                                    bool evict, bool no_wait_reserve,
+                                    bool no_wait_gpu,
                                     struct ttm_mem_reg *new_mem);
 /**
  * ttm_io_prot
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
new file mode 100644 (file)
index 0000000..8bb4de5
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) Red Hat Inc.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ *          Jerome Glisse <jglisse@redhat.com>
+ */
+#ifndef TTM_PAGE_ALLOC
+#define TTM_PAGE_ALLOC
+
+#include "ttm_bo_driver.h"
+#include "ttm_memory.h"
+
+/**
+ * Get count number of pages from pool to pages list.
+ *
+ * @pages: heado of empty linked list where pages are filled.
+ * @flags: ttm flags for page allocation.
+ * @cstate: ttm caching state for the page.
+ * @count: number of pages to allocate.
+ */
+int ttm_get_pages(struct list_head *pages,
+                 int flags,
+                 enum ttm_caching_state cstate,
+                 unsigned count);
+/**
+ * Put linked list of pages to pool.
+ *
+ * @pages: list of pages to free.
+ * @page_count: number of pages in the list. Zero can be passed for unknown
+ * count.
+ * @flags: ttm flags for page allocation.
+ * @cstate: ttm caching state.
+ */
+void ttm_put_pages(struct list_head *pages,
+                  unsigned page_count,
+                  int flags,
+                  enum ttm_caching_state cstate);
+/**
+ * Initialize pool allocator.
+ *
+ * Pool allocator is internaly reference counted so it can be initialized
+ * multiple times but ttm_page_alloc_fini has to be called same number of
+ * times.
+ */
+int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
+/**
+ * Free pool allocator.
+ */
+void ttm_page_alloc_fini(void);
+
+/**
+ * Output the state of pools to debugfs file
+ */
+extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
+#endif
index c10163b..1296af4 100644 (file)
@@ -403,6 +403,7 @@ struct fb_cursor {
 #include <linux/notifier.h>
 #include <linux/list.h>
 #include <linux/backlight.h>
+#include <linux/slab.h>
 #include <asm/io.h>
 
 struct vm_area_struct;
@@ -862,10 +863,22 @@ struct fb_info {
        /* we need the PCI or similiar aperture base/size not
           smem_start/size as smem_start may just be an object
           allocated inside the aperture so may not actually overlap */
-       resource_size_t aperture_base;
-       resource_size_t aperture_size;
+       struct apertures_struct {
+               unsigned int count;
+               struct aperture {
+                       resource_size_t base;
+                       resource_size_t size;
+               } ranges[0];
+       } *apertures;
 };
 
+static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
+       struct apertures_struct *a = kzalloc(sizeof(struct apertures_struct)
+                       + max_num * sizeof(struct aperture), GFP_KERNEL);
+       a->count = max_num;
+       return a;
+}
+
 #ifdef MODULE
 #define FBINFO_DEFAULT FBINFO_MODULE
 #else
@@ -958,6 +971,8 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
 /* drivers/video/fbmem.c */
 extern int register_framebuffer(struct fb_info *fb_info);
 extern int unregister_framebuffer(struct fb_info *fb_info);
+extern void remove_conflicting_framebuffers(struct apertures_struct *a,
+                               const char *name, bool primary);
 extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
 extern int fb_show_logo(struct fb_info *fb_info, int rotate);
 extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size);
diff --git a/include/linux/hdpu_features.h b/include/linux/hdpu_features.h
deleted file mode 100644 (file)
index 6a87154..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-#include <linux/spinlock.h>
-
-struct cpustate_t {
-       spinlock_t lock;
-       int excl;
-        int open_count;
-       unsigned char cached_val;
-       int inited;
-       unsigned long *set_addr;
-       unsigned long *clr_addr;
-};
-
-
-#define HDPU_CPUSTATE_NAME "hdpu cpustate"
-#define HDPU_NEXUS_NAME "hdpu nexus"
-
-#define CPUSTATE_KERNEL_MAJOR  0x10
-
-#define CPUSTATE_KERNEL_INIT_DRV   0 /* CPU State Driver Initialized */
-#define CPUSTATE_KERNEL_INIT_PCI   1 /* 64360 PCI Busses Init */
-#define CPUSTATE_KERNEL_INIT_REG   2 /* 64360 Bridge Init */
-#define CPUSTATE_KERNEL_CPU1_KICK  3 /* Boot cpu 1 */
-#define CPUSTATE_KERNEL_CPU1_OK    4  /* Cpu 1 has checked in */
-#define CPUSTATE_KERNEL_OK         5 /* Terminal state */
-#define CPUSTATE_KERNEL_RESET   14 /* Board reset via SW*/
-#define CPUSTATE_KERNEL_HALT   15 /* Board halted via SW*/
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
new file mode 100644 (file)
index 0000000..ccb2b3e
--- /dev/null
@@ -0,0 +1,117 @@
+#ifndef _KDB_H
+#define _KDB_H
+
+/*
+ * Kernel Debugger Architecture Independent Global Headers
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2007 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com>
+ * Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com>
+ */
+
+#ifdef CONFIG_KGDB_KDB
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+
+#define KDB_POLL_FUNC_MAX      5
+extern int kdb_poll_idx;
+
+/*
+ * kdb_initial_cpu is initialized to -1, and is set to the cpu
+ * number whenever the kernel debugger is entered.
+ */
+extern int kdb_initial_cpu;
+extern atomic_t kdb_event;
+
+/*
+ * kdb_diemsg
+ *
+ *     Contains a pointer to the last string supplied to the
+ *     kernel 'die' panic function.
+ */
+extern const char *kdb_diemsg;
+
+#define KDB_FLAG_EARLYKDB      (1 << 0) /* set from boot parameter kdb=early */
+#define KDB_FLAG_CATASTROPHIC  (1 << 1) /* A catastrophic event has occurred */
+#define KDB_FLAG_CMD_INTERRUPT (1 << 2) /* Previous command was interrupted */
+#define KDB_FLAG_NOIPI         (1 << 3) /* Do not send IPIs */
+#define KDB_FLAG_ONLY_DO_DUMP  (1 << 4) /* Only do a dump, used when
+                                         * kdb is off */
+#define KDB_FLAG_NO_CONSOLE    (1 << 5) /* No console is available,
+                                         * kdb is disabled */
+#define KDB_FLAG_NO_VT_CONSOLE (1 << 6) /* No VT console is available, do
+                                         * not use keyboard */
+#define KDB_FLAG_NO_I8042      (1 << 7) /* No i8042 chip is available, do
+                                         * not use keyboard */
+
+extern int kdb_flags;  /* Global flags, see kdb_state for per cpu state */
+
+extern void kdb_save_flags(void);
+extern void kdb_restore_flags(void);
+
+#define KDB_FLAG(flag)         (kdb_flags & KDB_FLAG_##flag)
+#define KDB_FLAG_SET(flag)     ((void)(kdb_flags |= KDB_FLAG_##flag))
+#define KDB_FLAG_CLEAR(flag)   ((void)(kdb_flags &= ~KDB_FLAG_##flag))
+
+/*
+ * External entry point for the kernel debugger.  The pt_regs
+ * at the time of entry are supplied along with the reason for
+ * entry to the kernel debugger.
+ */
+
+typedef enum {
+       KDB_REASON_ENTER = 1,   /* KDB_ENTER() trap/fault - regs valid */
+       KDB_REASON_ENTER_SLAVE, /* KDB_ENTER_SLAVE() trap/fault - regs valid */
+       KDB_REASON_BREAK,       /* Breakpoint inst. - regs valid */
+       KDB_REASON_DEBUG,       /* Debug Fault - regs valid */
+       KDB_REASON_OOPS,        /* Kernel Oops - regs valid */
+       KDB_REASON_SWITCH,      /* CPU switch - regs valid*/
+       KDB_REASON_KEYBOARD,    /* Keyboard entry - regs valid */
+       KDB_REASON_NMI,         /* Non-maskable interrupt; regs valid */
+       KDB_REASON_RECURSE,     /* Recursive entry to kdb;
+                                * regs probably valid */
+       KDB_REASON_SSTEP,       /* Single Step trap. - regs valid */
+} kdb_reason_t;
+
+extern int kdb_trap_printk;
+extern int vkdb_printf(const char *fmt, va_list args)
+           __attribute__ ((format (printf, 1, 0)));
+extern int kdb_printf(const char *, ...)
+           __attribute__ ((format (printf, 1, 2)));
+typedef int (*kdb_printf_t)(const char *, ...)
+            __attribute__ ((format (printf, 1, 2)));
+
+extern void kdb_init(int level);
+
+/* Access to kdb specific polling devices */
+typedef int (*get_char_func)(void);
+extern get_char_func kdb_poll_funcs[];
+extern int kdb_get_kbd_char(void);
+
+static inline
+int kdb_process_cpu(const struct task_struct *p)
+{
+       unsigned int cpu = task_thread_info(p)->cpu;
+       if (cpu > num_possible_cpus())
+               cpu = 0;
+       return cpu;
+}
+
+/* kdb access to register set for stack dumping */
+extern struct pt_regs *kdb_current_regs;
+
+#else /* ! CONFIG_KGDB_KDB */
+#define kdb_printf(...)
+#define kdb_init(x)
+#endif /* CONFIG_KGDB_KDB */
+enum {
+       KDB_NOT_INITIALIZED,
+       KDB_INIT_EARLY,
+       KDB_INIT_FULL,
+};
+#endif /* !_KDB_H */
index 19ec41a..9340f34 100644 (file)
 #include <linux/serial_8250.h>
 #include <linux/linkage.h>
 #include <linux/init.h>
-
 #include <asm/atomic.h>
+#ifdef CONFIG_HAVE_ARCH_KGDB
 #include <asm/kgdb.h>
+#endif
 
+#ifdef CONFIG_KGDB
 struct pt_regs;
 
 /**
@@ -33,20 +35,6 @@ struct pt_regs;
  */
 extern int kgdb_skipexception(int exception, struct pt_regs *regs);
 
-/**
- *     kgdb_post_primary_code - (optional) Save error vector/code numbers.
- *     @regs: Original pt_regs.
- *     @e_vector: Original error vector.
- *     @err_code: Original error code.
- *
- *     This is usually needed on architectures which support SMP and
- *     KGDB.  This function is called after all the secondary cpus have
- *     been put to a know spin state and the primary CPU has control over
- *     KGDB.
- */
-extern void kgdb_post_primary_code(struct pt_regs *regs, int e_vector,
-                                 int err_code);
-
 /**
  *     kgdb_disable_hw_debug - (optional) Disable hardware debugging hook
  *     @regs: Current &struct pt_regs.
@@ -72,6 +60,7 @@ struct uart_port;
 void kgdb_breakpoint(void);
 
 extern int kgdb_connected;
+extern int kgdb_io_module_registered;
 
 extern atomic_t                        kgdb_setting_breakpoint;
 extern atomic_t                        kgdb_cpu_doing_single_step;
@@ -202,11 +191,33 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code,
  */
 extern void kgdb_roundup_cpus(unsigned long flags);
 
+/**
+ *     kgdb_arch_set_pc - Generic call back to the program counter
+ *     @regs: Current &struct pt_regs.
+ *  @pc: The new value for the program counter
+ *
+ *     This function handles updating the program counter and requires an
+ *     architecture specific implementation.
+ */
+extern void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc);
+
+
 /* Optional functions. */
 extern int kgdb_validate_break_address(unsigned long addr);
 extern int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr);
 extern int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle);
 
+/**
+ *     kgdb_arch_late - Perform any architecture specific initalization.
+ *
+ *     This function will handle the late initalization of any
+ *     architecture specific callbacks.  This is an optional function for
+ *     handling things like late initialization of hw breakpoints.  The
+ *     default implementation does nothing.
+ */
+extern void kgdb_arch_late(void);
+
+
 /**
  * struct kgdb_arch - Describe architecture specific values.
  * @gdb_bpt_instr: The instruction to trigger a breakpoint.
@@ -247,6 +258,8 @@ struct kgdb_arch {
  * the I/O driver.
  * @post_exception: Pointer to a function that will do any cleanup work
  * for the I/O driver.
+ * @is_console: 1 if the end device is a console 0 if the I/O device is
+ * not a console
  */
 struct kgdb_io {
        const char              *name;
@@ -256,6 +269,7 @@ struct kgdb_io {
        int                     (*init) (void);
        void                    (*pre_exception) (void);
        void                    (*post_exception) (void);
+       int                     is_console;
 };
 
 extern struct kgdb_arch                arch_kgdb_ops;
@@ -264,12 +278,14 @@ extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
 
 extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
 extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
+extern struct kgdb_io *dbg_io_ops;
 
 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
 extern int kgdb_mem2hex(char *mem, char *buf, int count);
 extern int kgdb_hex2mem(char *buf, char *mem, int count);
 
 extern int kgdb_isremovedbreak(unsigned long addr);
+extern void kgdb_schedule_breakpoint(void);
 
 extern int
 kgdb_handle_exception(int ex_vector, int signo, int err_code,
@@ -278,5 +294,12 @@ extern int kgdb_nmicallback(int cpu, void *regs);
 
 extern int                     kgdb_single_step;
 extern atomic_t                        kgdb_active;
-
+#define in_dbg_master() \
+       (raw_smp_processor_id() == atomic_read(&kgdb_active))
+extern bool dbg_is_early;
+extern void __init dbg_late_init(void);
+#else /* ! CONFIG_KGDB */
+#define in_dbg_master() (0)
+#define dbg_late_init()
+#endif /* ! CONFIG_KGDB */
 #endif /* _KGDB_H_ */
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
new file mode 100644 (file)
index 0000000..d11fe0f
--- /dev/null
@@ -0,0 +1,78 @@
+/* include/linux/msm_mdp.h
+ *
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MSM_MDP_H_
+#define _MSM_MDP_H_
+
+#include <linux/types.h>
+
+#define MSMFB_IOCTL_MAGIC 'm'
+#define MSMFB_GRP_DISP          _IOW(MSMFB_IOCTL_MAGIC, 1, unsigned int)
+#define MSMFB_BLIT              _IOW(MSMFB_IOCTL_MAGIC, 2, unsigned int)
+
+enum {
+       MDP_RGB_565,            /* RGB 565 planar */
+       MDP_XRGB_8888,          /* RGB 888 padded */
+       MDP_Y_CBCR_H2V2,        /* Y and CbCr, pseudo planar w/ Cb is in MSB */
+       MDP_ARGB_8888,          /* ARGB 888 */
+       MDP_RGB_888,            /* RGB 888 planar */
+       MDP_Y_CRCB_H2V2,        /* Y and CrCb, pseudo planar w/ Cr is in MSB */
+       MDP_YCRYCB_H2V1,        /* YCrYCb interleave */
+       MDP_Y_CRCB_H2V1,        /* Y and CrCb, pseduo planar w/ Cr is in MSB */
+       MDP_Y_CBCR_H2V1,        /* Y and CrCb, pseduo planar w/ Cr is in MSB */
+       MDP_RGBA_8888,          /* ARGB 888 */
+       MDP_BGRA_8888,          /* ABGR 888 */
+       MDP_IMGTYPE_LIMIT       /* Non valid image type after this enum */
+};
+
+enum {
+       PMEM_IMG,
+       FB_IMG,
+};
+
+/* flag values */
+#define MDP_ROT_NOP    0
+#define MDP_FLIP_LR    0x1
+#define MDP_FLIP_UD    0x2
+#define MDP_ROT_90     0x4
+#define MDP_ROT_180    (MDP_FLIP_UD|MDP_FLIP_LR)
+#define MDP_ROT_270    (MDP_ROT_90|MDP_FLIP_UD|MDP_FLIP_LR)
+#define MDP_DITHER     0x8
+#define MDP_BLUR       0x10
+
+#define MDP_TRANSP_NOP 0xffffffff
+#define MDP_ALPHA_NOP  0xff
+
+struct mdp_rect {
+       u32 x, y, w, h;
+};
+
+struct mdp_img {
+       u32 width, height, format, offset;
+       int memory_id;          /* the file descriptor */
+};
+
+struct mdp_blit_req {
+       struct mdp_img src;
+       struct mdp_img dst;
+       struct mdp_rect src_rect;
+       struct mdp_rect dst_rect;
+       u32 alpha, transp_mask, flags;
+};
+
+struct mdp_blit_req_list {
+       u32 count;
+       struct mdp_blit_req req[];
+};
+
+#endif /* _MSM_MDP_H_ */
index 51611da..8d84062 100644 (file)
 #include <linux/workqueue.h>
 #include <linux/spinlock.h>
 #include <linux/list.h>
+#include <linux/timer.h>
 
+/**
+ * struct padata_priv -  Embedded to the users data structure.
+ *
+ * @list: List entry, to attach to the padata lists.
+ * @pd: Pointer to the internal control structure.
+ * @cb_cpu: Callback cpu for serializatioon.
+ * @seq_nr: Sequence number of the parallelized data object.
+ * @info: Used to pass information from the parallel to the serial function.
+ * @parallel: Parallel execution function.
+ * @serial: Serial complete function.
+ */
 struct padata_priv {
        struct list_head        list;
        struct parallel_data    *pd;
@@ -35,11 +47,29 @@ struct padata_priv {
        void                    (*serial)(struct padata_priv *padata);
 };
 
+/**
+ * struct padata_list
+ *
+ * @list: List head.
+ * @lock: List lock.
+ */
 struct padata_list {
        struct list_head        list;
        spinlock_t              lock;
 };
 
+/**
+ * struct padata_queue - The percpu padata queues.
+ *
+ * @parallel: List to wait for parallelization.
+ * @reorder: List to wait for reordering after parallel processing.
+ * @serial: List to wait for serialization after reordering.
+ * @pwork: work struct for parallelization.
+ * @swork: work struct for serialization.
+ * @pd: Backpointer to the internal control structure.
+ * @num_obj: Number of objects that are processed by this cpu.
+ * @cpu_index: Index of the cpu.
+ */
 struct padata_queue {
        struct padata_list      parallel;
        struct padata_list      reorder;
@@ -51,6 +81,20 @@ struct padata_queue {
        int                     cpu_index;
 };
 
+/**
+ * struct parallel_data - Internal control structure, covers everything
+ * that depends on the cpumask in use.
+ *
+ * @pinst: padata instance.
+ * @queue: percpu padata queues.
+ * @seq_nr: The sequence number that will be attached to the next object.
+ * @reorder_objects: Number of objects waiting in the reorder queues.
+ * @refcnt: Number of objects holding a reference on this parallel_data.
+ * @max_seq_nr:  Maximal used sequence number.
+ * @cpumask: cpumask in use.
+ * @lock: Reorder lock.
+ * @timer: Reorder timer.
+ */
 struct parallel_data {
        struct padata_instance  *pinst;
        struct padata_queue     *queue;
@@ -60,8 +104,19 @@ struct parallel_data {
        unsigned int            max_seq_nr;
        cpumask_var_t           cpumask;
        spinlock_t              lock;
+       struct timer_list       timer;
 };
 
+/**
+ * struct padata_instance - The overall control structure.
+ *
+ * @cpu_notifier: cpu hotplug notifier.
+ * @wq: The workqueue in use.
+ * @pd: The internal control structure.
+ * @cpumask: User supplied cpumask.
+ * @lock: padata instance lock.
+ * @flags: padata flags.
+ */
 struct padata_instance {
        struct notifier_block   cpu_notifier;
        struct workqueue_struct *wq;
index 09d0d2d..f10db6e 100644 (file)
@@ -250,6 +250,7 @@ struct uart_ops {
 #endif
 };
 
+#define NO_POLL_CHAR           0x00ff0000
 #define UART_CONFIG_TYPE       (1 << 0)
 #define UART_CONFIG_IRQ                (1 << 1)
 
index 5c85402..22881b5 100644 (file)
@@ -62,6 +62,7 @@
 #include <linux/sched.h>
 #include <linux/signal.h>
 #include <linux/idr.h>
+#include <linux/kgdb.h>
 #include <linux/ftrace.h>
 #include <linux/async.h>
 #include <linux/kmemcheck.h>
@@ -675,6 +676,7 @@ asmlinkage void __init start_kernel(void)
        buffer_init();
        key_init();
        security_init();
+       dbg_late_init();
        vfs_caches_init(totalram_pages);
        signals_init();
        /* rootfs populating might need page-writeback */
index 149e18e..057472f 100644 (file)
@@ -75,7 +75,7 @@ obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
 obj-$(CONFIG_GCOV_KERNEL) += gcov/
 obj-$(CONFIG_AUDIT_TREE) += audit_tree.o
 obj-$(CONFIG_KPROBES) += kprobes.o
-obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_KGDB) += debug/
 obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o
 obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o
 obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
diff --git a/kernel/debug/Makefile b/kernel/debug/Makefile
new file mode 100644 (file)
index 0000000..a85edc3
--- /dev/null
@@ -0,0 +1,6 @@
+#
+# Makefile for the linux kernel debugger
+#
+
+obj-$(CONFIG_KGDB) += debug_core.o gdbstub.o
+obj-$(CONFIG_KGDB_KDB) += kdb/
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
new file mode 100644 (file)
index 0000000..5cb7cd1
--- /dev/null
@@ -0,0 +1,983 @@
+/*
+ * Kernel Debug Core
+ *
+ * Maintainer: Jason Wessel <jason.wessel@windriver.com>
+ *
+ * Copyright (C) 2000-2001 VERITAS Software Corporation.
+ * Copyright (C) 2002-2004 Timesys Corporation
+ * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
+ * Copyright (C) 2004 Pavel Machek <pavel@suse.cz>
+ * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
+ * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
+ * Copyright (C) 2005-2009 Wind River Systems, Inc.
+ * Copyright (C) 2007 MontaVista Software, Inc.
+ * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * Contributors at various stages not listed above:
+ *  Jason Wessel ( jason.wessel@windriver.com )
+ *  George Anzinger <george@mvista.com>
+ *  Anurekh Saxena (anurekh.saxena@timesys.com)
+ *  Lake Stevens Instrument Division (Glenn Engel)
+ *  Jim Kingdon, Cygnus Support.
+ *
+ * Original KGDB stub: David Grothe <dave@gcom.com>,
+ * Tigran Aivazian <tigran@sco.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+#include <linux/pid_namespace.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/console.h>
+#include <linux/threads.h>
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/sysrq.h>
+#include <linux/init.h>
+#include <linux/kgdb.h>
+#include <linux/kdb.h>
+#include <linux/pid.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+
+#include <asm/cacheflush.h>
+#include <asm/byteorder.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+
+#include "debug_core.h"
+
+static int kgdb_break_asap;
+
+struct debuggerinfo_struct kgdb_info[NR_CPUS];
+
+/**
+ * kgdb_connected - Is a host GDB connected to us?
+ */
+int                            kgdb_connected;
+EXPORT_SYMBOL_GPL(kgdb_connected);
+
+/* All the KGDB handlers are installed */
+int                    kgdb_io_module_registered;
+
+/* Guard for recursive entry */
+static int                     exception_level;
+
+struct kgdb_io         *dbg_io_ops;
+static DEFINE_SPINLOCK(kgdb_registration_lock);
+
+/* kgdb console driver is loaded */
+static int kgdb_con_registered;
+/* determine if kgdb console output should be used */
+static int kgdb_use_con;
+/* Flag for alternate operations for early debugging */
+bool dbg_is_early = true;
+/* Next cpu to become the master debug core */
+int dbg_switch_cpu;
+
+/* Use kdb or gdbserver mode */
+int dbg_kdb_mode = 1;
+
+static int __init opt_kgdb_con(char *str)
+{
+       kgdb_use_con = 1;
+       return 0;
+}
+
+early_param("kgdbcon", opt_kgdb_con);
+
+module_param(kgdb_use_con, int, 0644);
+
+/*
+ * Holds information about breakpoints in a kernel. These breakpoints are
+ * added and removed by gdb.
+ */
+static struct kgdb_bkpt                kgdb_break[KGDB_MAX_BREAKPOINTS] = {
+       [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
+};
+
+/*
+ * The CPU# of the active CPU, or -1 if none:
+ */
+atomic_t                       kgdb_active = ATOMIC_INIT(-1);
+EXPORT_SYMBOL_GPL(kgdb_active);
+
+/*
+ * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
+ * bootup code (which might not have percpu set up yet):
+ */
+static atomic_t                        passive_cpu_wait[NR_CPUS];
+static atomic_t                        cpu_in_kgdb[NR_CPUS];
+static atomic_t                        kgdb_break_tasklet_var;
+atomic_t                       kgdb_setting_breakpoint;
+
+struct task_struct             *kgdb_usethread;
+struct task_struct             *kgdb_contthread;
+
+int                            kgdb_single_step;
+static pid_t                   kgdb_sstep_pid;
+
+/* to keep track of the CPU which is doing the single stepping*/
+atomic_t                       kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
+
+/*
+ * If you are debugging a problem where roundup (the collection of
+ * all other CPUs) is a problem [this should be extremely rare],
+ * then use the nokgdbroundup option to avoid roundup. In that case
+ * the other CPUs might interfere with your debugging context, so
+ * use this with care:
+ */
+static int kgdb_do_roundup = 1;
+
+static int __init opt_nokgdbroundup(char *str)
+{
+       kgdb_do_roundup = 0;
+
+       return 0;
+}
+
+early_param("nokgdbroundup", opt_nokgdbroundup);
+
+/*
+ * Finally, some KGDB code :-)
+ */
+
+/*
+ * Weak aliases for breakpoint management,
+ * can be overriden by architectures when needed:
+ */
+int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
+{
+       int err;
+
+       err = probe_kernel_read(saved_instr, (char *)addr, BREAK_INSTR_SIZE);
+       if (err)
+               return err;
+
+       return probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr,
+                                 BREAK_INSTR_SIZE);
+}
+
+int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
+{
+       return probe_kernel_write((char *)addr,
+                                 (char *)bundle, BREAK_INSTR_SIZE);
+}
+
+int __weak kgdb_validate_break_address(unsigned long addr)
+{
+       char tmp_variable[BREAK_INSTR_SIZE];
+       int err;
+       /* Validate setting the breakpoint and then removing it.  In the
+        * remove fails, the kernel needs to emit a bad message because we
+        * are deep trouble not being able to put things back the way we
+        * found them.
+        */
+       err = kgdb_arch_set_breakpoint(addr, tmp_variable);
+       if (err)
+               return err;
+       err = kgdb_arch_remove_breakpoint(addr, tmp_variable);
+       if (err)
+               printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
+                  "memory destroyed at: %lx", addr);
+       return err;
+}
+
+unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
+{
+       return instruction_pointer(regs);
+}
+
+int __weak kgdb_arch_init(void)
+{
+       return 0;
+}
+
+int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
+{
+       return 0;
+}
+
+/**
+ *     kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb.
+ *     @regs: Current &struct pt_regs.
+ *
+ *     This function will be called if the particular architecture must
+ *     disable hardware debugging while it is processing gdb packets or
+ *     handling exception.
+ */
+void __weak kgdb_disable_hw_debug(struct pt_regs *regs)
+{
+}
+
+/*
+ * Some architectures need cache flushes when we set/clear a
+ * breakpoint:
+ */
+static void kgdb_flush_swbreak_addr(unsigned long addr)
+{
+       if (!CACHE_FLUSH_IS_SAFE)
+               return;
+
+       if (current->mm && current->mm->mmap_cache) {
+               flush_cache_range(current->mm->mmap_cache,
+                                 addr, addr + BREAK_INSTR_SIZE);
+       }
+       /* Force flush instruction cache if it was outside the mm */
+       flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
+}
+
+/*
+ * SW breakpoint management:
+ */
+int dbg_activate_sw_breakpoints(void)
+{
+       unsigned long addr;
+       int error;
+       int ret = 0;
+       int i;
+
+       for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+               if (kgdb_break[i].state != BP_SET)
+                       continue;
+
+               addr = kgdb_break[i].bpt_addr;
+               error = kgdb_arch_set_breakpoint(addr,
+                               kgdb_break[i].saved_instr);
+               if (error) {
+                       ret = error;
+                       printk(KERN_INFO "KGDB: BP install failed: %lx", addr);
+                       continue;
+               }
+
+               kgdb_flush_swbreak_addr(addr);
+               kgdb_break[i].state = BP_ACTIVE;
+       }
+       return ret;
+}
+
+int dbg_set_sw_break(unsigned long addr)
+{
+       int err = kgdb_validate_break_address(addr);
+       int breakno = -1;
+       int i;
+
+       if (err)
+               return err;
+
+       for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+               if ((kgdb_break[i].state == BP_SET) &&
+                                       (kgdb_break[i].bpt_addr == addr))
+                       return -EEXIST;
+       }
+       for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+               if (kgdb_break[i].state == BP_REMOVED &&
+                                       kgdb_break[i].bpt_addr == addr) {
+                       breakno = i;
+                       break;
+               }
+       }
+
+       if (breakno == -1) {
+               for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+                       if (kgdb_break[i].state == BP_UNDEFINED) {
+                               breakno = i;
+                               break;
+                       }
+               }
+       }
+
+       if (breakno == -1)
+               return -E2BIG;
+
+       kgdb_break[breakno].state = BP_SET;
+       kgdb_break[breakno].type = BP_BREAKPOINT;
+       kgdb_break[breakno].bpt_addr = addr;
+
+       return 0;
+}
+
+int dbg_deactivate_sw_breakpoints(void)
+{
+       unsigned long addr;
+       int error;
+       int ret = 0;
+       int i;
+
+       for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+               if (kgdb_break[i].state != BP_ACTIVE)
+                       continue;
+               addr = kgdb_break[i].bpt_addr;
+               error = kgdb_arch_remove_breakpoint(addr,
+                                       kgdb_break[i].saved_instr);
+               if (error) {
+                       printk(KERN_INFO "KGDB: BP remove failed: %lx\n", addr);
+                       ret = error;
+               }
+
+               kgdb_flush_swbreak_addr(addr);
+               kgdb_break[i].state = BP_SET;
+       }
+       return ret;
+}
+
+int dbg_remove_sw_break(unsigned long addr)
+{
+       int i;
+
+       for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+               if ((kgdb_break[i].state == BP_SET) &&
+                               (kgdb_break[i].bpt_addr == addr)) {
+                       kgdb_break[i].state = BP_REMOVED;
+                       return 0;
+               }
+       }
+       return -ENOENT;
+}
+
+int kgdb_isremovedbreak(unsigned long addr)
+{
+       int i;
+
+       for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+               if ((kgdb_break[i].state == BP_REMOVED) &&
+                                       (kgdb_break[i].bpt_addr == addr))
+                       return 1;
+       }
+       return 0;
+}
+
+int dbg_remove_all_break(void)
+{
+       unsigned long addr;
+       int error;
+       int i;
+
+       /* Clear memory breakpoints. */
+       for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+               if (kgdb_break[i].state != BP_ACTIVE)
+                       goto setundefined;
+               addr = kgdb_break[i].bpt_addr;
+               error = kgdb_arch_remove_breakpoint(addr,
+                               kgdb_break[i].saved_instr);
+               if (error)
+                       printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
+                          addr);
+setundefined:
+               kgdb_break[i].state = BP_UNDEFINED;
+       }
+
+       /* Clear hardware breakpoints. */
+       if (arch_kgdb_ops.remove_all_hw_break)
+               arch_kgdb_ops.remove_all_hw_break();
+
+       return 0;
+}
+
+/*
+ * Return true if there is a valid kgdb I/O module.  Also if no
+ * debugger is attached a message can be printed to the console about
+ * waiting for the debugger to attach.
+ *
+ * The print_wait argument is only to be true when called from inside
+ * the core kgdb_handle_exception, because it will wait for the
+ * debugger to attach.
+ */
+static int kgdb_io_ready(int print_wait)
+{
+       if (!dbg_io_ops)
+               return 0;
+       if (kgdb_connected)
+               return 1;
+       if (atomic_read(&kgdb_setting_breakpoint))
+               return 1;
+       if (print_wait) {
+#ifdef CONFIG_KGDB_KDB
+               if (!dbg_kdb_mode)
+                       printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n");
+#else
+               printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
+#endif
+       }
+       return 1;
+}
+
+static int kgdb_reenter_check(struct kgdb_state *ks)
+{
+       unsigned long addr;
+
+       if (atomic_read(&kgdb_active) != raw_smp_processor_id())
+               return 0;
+
+       /* Panic on recursive debugger calls: */
+       exception_level++;
+       addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
+       dbg_deactivate_sw_breakpoints();
+
+       /*
+        * If the break point removed ok at the place exception
+        * occurred, try to recover and print a warning to the end
+        * user because the user planted a breakpoint in a place that
+        * KGDB needs in order to function.
+        */
+       if (dbg_remove_sw_break(addr) == 0) {
+               exception_level = 0;
+               kgdb_skipexception(ks->ex_vector, ks->linux_regs);
+               dbg_activate_sw_breakpoints();
+               printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n",
+                       addr);
+               WARN_ON_ONCE(1);
+
+               return 1;
+       }
+       dbg_remove_all_break();
+       kgdb_skipexception(ks->ex_vector, ks->linux_regs);
+
+       if (exception_level > 1) {
+               dump_stack();
+               panic("Recursive entry to debugger");
+       }
+
+       printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n");
+#ifdef CONFIG_KGDB_KDB
+       /* Allow kdb to debug itself one level */
+       return 0;
+#endif
+       dump_stack();
+       panic("Recursive entry to debugger");
+
+       return 1;
+}
+
+static void dbg_cpu_switch(int cpu, int next_cpu)
+{
+       /* Mark the cpu we are switching away from as a slave when it
+        * holds the kgdb_active token.  This must be done so that the
+        * that all the cpus wait in for the debug core will not enter
+        * again as the master. */
+       if (cpu == atomic_read(&kgdb_active)) {
+               kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE;
+               kgdb_info[cpu].exception_state &= ~DCPU_WANT_MASTER;
+       }
+       kgdb_info[next_cpu].exception_state |= DCPU_NEXT_MASTER;
+}
+
+static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs)
+{
+       unsigned long flags;
+       int sstep_tries = 100;
+       int error;
+       int i, cpu;
+       int trace_on = 0;
+acquirelock:
+       /*
+        * Interrupts will be restored by the 'trap return' code, except when
+        * single stepping.
+        */
+       local_irq_save(flags);
+
+       cpu = ks->cpu;
+       kgdb_info[cpu].debuggerinfo = regs;
+       kgdb_info[cpu].task = current;
+       kgdb_info[cpu].ret_state = 0;
+       kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
+       /*
+        * Make sure the above info reaches the primary CPU before
+        * our cpu_in_kgdb[] flag setting does:
+        */
+       atomic_inc(&cpu_in_kgdb[cpu]);
+
+       if (exception_level == 1)
+               goto cpu_master_loop;
+
+       /*
+        * CPU will loop if it is a slave or request to become a kgdb
+        * master cpu and acquire the kgdb_active lock:
+        */
+       while (1) {
+cpu_loop:
+               if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
+                       kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
+                       goto cpu_master_loop;
+               } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
+                       if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu)
+                               break;
+               } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
+                       if (!atomic_read(&passive_cpu_wait[cpu]))
+                               goto return_normal;
+               } else {
+return_normal:
+                       /* Return to normal operation by executing any
+                        * hw breakpoint fixup.
+                        */
+                       if (arch_kgdb_ops.correct_hw_break)
+                               arch_kgdb_ops.correct_hw_break();
+                       if (trace_on)
+                               tracing_on();
+                       atomic_dec(&cpu_in_kgdb[cpu]);
+                       touch_softlockup_watchdog_sync();
+                       clocksource_touch_watchdog();
+                       local_irq_restore(flags);
+                       return 0;
+               }
+               cpu_relax();
+       }
+
+       /*
+        * For single stepping, try to only enter on the processor
+        * that was single stepping.  To gaurd against a deadlock, the
+        * kernel will only try for the value of sstep_tries before
+        * giving up and continuing on.
+        */
+       if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
+           (kgdb_info[cpu].task &&
+            kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
+               atomic_set(&kgdb_active, -1);
+               touch_softlockup_watchdog_sync();
+               clocksource_touch_watchdog();
+               local_irq_restore(flags);
+
+               goto acquirelock;
+       }
+
+       if (!kgdb_io_ready(1)) {
+               kgdb_info[cpu].ret_state = 1;
+               goto kgdb_restore; /* No I/O connection, resume the system */
+       }
+
+       /*
+        * Don't enter if we have hit a removed breakpoint.
+        */
+       if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
+               goto kgdb_restore;
+
+       /* Call the I/O driver's pre_exception routine */
+       if (dbg_io_ops->pre_exception)
+               dbg_io_ops->pre_exception();
+
+       kgdb_disable_hw_debug(ks->linux_regs);
+
+       /*
+        * Get the passive CPU lock which will hold all the non-primary
+        * CPU in a spin state while the debugger is active
+        */
+       if (!kgdb_single_step) {
+               for (i = 0; i < NR_CPUS; i++)
+                       atomic_inc(&passive_cpu_wait[i]);
+       }
+
+#ifdef CONFIG_SMP
+       /* Signal the other CPUs to enter kgdb_wait() */
+       if ((!kgdb_single_step) && kgdb_do_roundup)
+               kgdb_roundup_cpus(flags);
+#endif
+
+       /*
+        * Wait for the other CPUs to be notified and be waiting for us:
+        */
+       for_each_online_cpu(i) {
+               while (kgdb_do_roundup && !atomic_read(&cpu_in_kgdb[i]))
+                       cpu_relax();
+       }
+
+       /*
+        * At this point the primary processor is completely
+        * in the debugger and all secondary CPUs are quiescent
+        */
+       dbg_deactivate_sw_breakpoints();
+       kgdb_single_step = 0;
+       kgdb_contthread = current;
+       exception_level = 0;
+       trace_on = tracing_is_on();
+       if (trace_on)
+               tracing_off();
+
+       while (1) {
+cpu_master_loop:
+               if (dbg_kdb_mode) {
+                       kgdb_connected = 1;
+                       error = kdb_stub(ks);
+               } else {
+                       error = gdb_serial_stub(ks);
+               }
+
+               if (error == DBG_PASS_EVENT) {
+                       dbg_kdb_mode = !dbg_kdb_mode;
+                       kgdb_connected = 0;
+               } else if (error == DBG_SWITCH_CPU_EVENT) {
+                       dbg_cpu_switch(cpu, dbg_switch_cpu);
+                       goto cpu_loop;
+               } else {
+                       kgdb_info[cpu].ret_state = error;
+                       break;
+               }
+       }
+
+       /* Call the I/O driver's post_exception routine */
+       if (dbg_io_ops->post_exception)
+               dbg_io_ops->post_exception();
+
+       atomic_dec(&cpu_in_kgdb[ks->cpu]);
+
+       if (!kgdb_single_step) {
+               for (i = NR_CPUS-1; i >= 0; i--)
+                       atomic_dec(&passive_cpu_wait[i]);
+               /*
+                * Wait till all the CPUs have quit from the debugger,
+                * but allow a CPU that hit an exception and is
+                * waiting to become the master to remain in the debug
+                * core.
+                */
+               for_each_online_cpu(i) {
+                       while (kgdb_do_roundup &&
+                              atomic_read(&cpu_in_kgdb[i]) &&
+                              !(kgdb_info[i].exception_state &
+                                DCPU_WANT_MASTER))
+                               cpu_relax();
+               }
+       }
+
+kgdb_restore:
+       if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
+               int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
+               if (kgdb_info[sstep_cpu].task)
+                       kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
+               else
+                       kgdb_sstep_pid = 0;
+       }
+       if (trace_on)
+               tracing_on();
+       /* Free kgdb_active */
+       atomic_set(&kgdb_active, -1);
+       touch_softlockup_watchdog_sync();
+       clocksource_touch_watchdog();
+       local_irq_restore(flags);
+
+       return kgdb_info[cpu].ret_state;
+}
+
+/*
+ * kgdb_handle_exception() - main entry point from a kernel exception
+ *
+ * Locking hierarchy:
+ *     interface locks, if any (begin_session)
+ *     kgdb lock (kgdb_active)
+ */
+int
+kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
+{
+       struct kgdb_state kgdb_var;
+       struct kgdb_state *ks = &kgdb_var;
+       int ret;
+
+       ks->cpu                 = raw_smp_processor_id();
+       ks->ex_vector           = evector;
+       ks->signo               = signo;
+       ks->err_code            = ecode;
+       ks->kgdb_usethreadid    = 0;
+       ks->linux_regs          = regs;
+
+       if (kgdb_reenter_check(ks))
+               return 0; /* Ouch, double exception ! */
+       kgdb_info[ks->cpu].exception_state |= DCPU_WANT_MASTER;
+       ret = kgdb_cpu_enter(ks, regs);
+       kgdb_info[ks->cpu].exception_state &= ~(DCPU_WANT_MASTER |
+                                               DCPU_IS_SLAVE);
+       return ret;
+}
+
+int kgdb_nmicallback(int cpu, void *regs)
+{
+#ifdef CONFIG_SMP
+       struct kgdb_state kgdb_var;
+       struct kgdb_state *ks = &kgdb_var;
+
+       memset(ks, 0, sizeof(struct kgdb_state));
+       ks->cpu                 = cpu;
+       ks->linux_regs          = regs;
+
+       if (!atomic_read(&cpu_in_kgdb[cpu]) &&
+           atomic_read(&kgdb_active) != -1 &&
+           atomic_read(&kgdb_active) != cpu) {
+               kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE;
+               kgdb_cpu_enter(ks, regs);
+               kgdb_info[cpu].exception_state &= ~DCPU_IS_SLAVE;
+               return 0;
+       }
+#endif
+       return 1;
+}
+
+static void kgdb_console_write(struct console *co, const char *s,
+   unsigned count)
+{
+       unsigned long flags;
+
+       /* If we're debugging, or KGDB has not connected, don't try
+        * and print. */
+       if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
+               return;
+
+       local_irq_save(flags);
+       gdbstub_msg_write(s, count);
+       local_irq_restore(flags);
+}
+
+static struct console kgdbcons = {
+       .name           = "kgdb",
+       .write          = kgdb_console_write,
+       .flags          = CON_PRINTBUFFER | CON_ENABLED,
+       .index          = -1,
+};
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static void sysrq_handle_dbg(int key, struct tty_struct *tty)
+{
+       if (!dbg_io_ops) {
+               printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
+               return;
+       }
+       if (!kgdb_connected) {
+#ifdef CONFIG_KGDB_KDB
+               if (!dbg_kdb_mode)
+                       printk(KERN_CRIT "KGDB or $3#33 for KDB\n");
+#else
+               printk(KERN_CRIT "Entering KGDB\n");
+#endif
+       }
+
+       kgdb_breakpoint();
+}
+
+static struct sysrq_key_op sysrq_dbg_op = {
+       .handler        = sysrq_handle_dbg,
+       .help_msg       = "debug(G)",
+       .action_msg     = "DEBUG",
+};
+#endif
+
+static int kgdb_panic_event(struct notifier_block *self,
+                           unsigned long val,
+                           void *data)
+{
+       if (dbg_kdb_mode)
+               kdb_printf("PANIC: %s\n", (char *)data);
+       kgdb_breakpoint();
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block kgdb_panic_event_nb = {
+       .notifier_call  = kgdb_panic_event,
+       .priority       = INT_MAX,
+};
+
+void __weak kgdb_arch_late(void)
+{
+}
+
+void __init dbg_late_init(void)
+{
+       dbg_is_early = false;
+       if (kgdb_io_module_registered)
+               kgdb_arch_late();
+       kdb_init(KDB_INIT_FULL);
+}
+
+static void kgdb_register_callbacks(void)
+{
+       if (!kgdb_io_module_registered) {
+               kgdb_io_module_registered = 1;
+               kgdb_arch_init();
+               if (!dbg_is_early)
+                       kgdb_arch_late();
+               atomic_notifier_chain_register(&panic_notifier_list,
+                                              &kgdb_panic_event_nb);
+#ifdef CONFIG_MAGIC_SYSRQ
+               register_sysrq_key('g', &sysrq_dbg_op);
+#endif
+               if (kgdb_use_con && !kgdb_con_registered) {
+                       register_console(&kgdbcons);
+                       kgdb_con_registered = 1;
+               }
+       }
+}
+
+static void kgdb_unregister_callbacks(void)
+{
+       /*
+        * When this routine is called KGDB should unregister from the
+        * panic handler and clean up, making sure it is not handling any
+        * break exceptions at the time.
+        */
+       if (kgdb_io_module_registered) {
+               kgdb_io_module_registered = 0;
+               atomic_notifier_chain_unregister(&panic_notifier_list,
+                                              &kgdb_panic_event_nb);
+               kgdb_arch_exit();
+#ifdef CONFIG_MAGIC_SYSRQ
+               unregister_sysrq_key('g', &sysrq_dbg_op);
+#endif
+               if (kgdb_con_registered) {
+                       unregister_console(&kgdbcons);
+                       kgdb_con_registered = 0;
+               }
+       }
+}
+
+/*
+ * There are times a tasklet needs to be used vs a compiled in
+ * break point so as to cause an exception outside a kgdb I/O module,
+ * such as is the case with kgdboe, where calling a breakpoint in the
+ * I/O driver itself would be fatal.
+ */
+static void kgdb_tasklet_bpt(unsigned long ing)
+{
+       kgdb_breakpoint();
+       atomic_set(&kgdb_break_tasklet_var, 0);
+}
+
+static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
+
+void kgdb_schedule_breakpoint(void)
+{
+       if (atomic_read(&kgdb_break_tasklet_var) ||
+               atomic_read(&kgdb_active) != -1 ||
+               atomic_read(&kgdb_setting_breakpoint))
+               return;
+       atomic_inc(&kgdb_break_tasklet_var);
+       tasklet_schedule(&kgdb_tasklet_breakpoint);
+}
+EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
+
+static void kgdb_initial_breakpoint(void)
+{
+       kgdb_break_asap = 0;
+
+       printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n");
+       kgdb_breakpoint();
+}
+
+/**
+ *     kgdb_register_io_module - register KGDB IO module
+ *     @new_dbg_io_ops: the io ops vector
+ *
+ *     Register it with the KGDB core.
+ */
+int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
+{
+       int err;
+
+       spin_lock(&kgdb_registration_lock);
+
+       if (dbg_io_ops) {
+               spin_unlock(&kgdb_registration_lock);
+
+               printk(KERN_ERR "kgdb: Another I/O driver is already "
+                               "registered with KGDB.\n");
+               return -EBUSY;
+       }
+
+       if (new_dbg_io_ops->init) {
+               err = new_dbg_io_ops->init();
+               if (err) {
+                       spin_unlock(&kgdb_registration_lock);
+                       return err;
+               }
+       }
+
+       dbg_io_ops = new_dbg_io_ops;
+
+       spin_unlock(&kgdb_registration_lock);
+
+       printk(KERN_INFO "kgdb: Registered I/O driver %s.\n",
+              new_dbg_io_ops->name);
+
+       /* Arm KGDB now. */
+       kgdb_register_callbacks();
+
+       if (kgdb_break_asap)
+               kgdb_initial_breakpoint();
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(kgdb_register_io_module);
+
+/**
+ *     kkgdb_unregister_io_module - unregister KGDB IO module
+ *     @old_dbg_io_ops: the io ops vector
+ *
+ *     Unregister it with the KGDB core.
+ */
+void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
+{
+       BUG_ON(kgdb_connected);
+
+       /*
+        * KGDB is no longer able to communicate out, so
+        * unregister our callbacks and reset state.
+        */
+       kgdb_unregister_callbacks();
+
+       spin_lock(&kgdb_registration_lock);
+
+       WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
+       dbg_io_ops = NULL;
+
+       spin_unlock(&kgdb_registration_lock);
+
+       printk(KERN_INFO
+               "kgdb: Unregistered I/O driver %s, debugger disabled.\n",
+               old_dbg_io_ops->name);
+}
+EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
+
+int dbg_io_get_char(void)
+{
+       int ret = dbg_io_ops->read_char();
+       if (ret == NO_POLL_CHAR)
+               return -1;
+       if (!dbg_kdb_mode)
+               return ret;
+       if (ret == 127)
+               return 8;
+       return ret;
+}
+
+/**
+ * kgdb_breakpoint - generate breakpoint exception
+ *
+ * This function will generate a breakpoint exception.  It is used at the
+ * beginning of a program to sync up with a debugger and can be used
+ * otherwise as a quick means to stop program execution and "break" into
+ * the debugger.
+ */
+void kgdb_breakpoint(void)
+{
+       atomic_inc(&kgdb_setting_breakpoint);
+       wmb(); /* Sync point before breakpoint */
+       arch_kgdb_breakpoint();
+       wmb(); /* Sync point after breakpoint */
+       atomic_dec(&kgdb_setting_breakpoint);
+}
+EXPORT_SYMBOL_GPL(kgdb_breakpoint);
+
+static int __init opt_kgdb_wait(char *str)
+{
+       kgdb_break_asap = 1;
+
+       kdb_init(KDB_INIT_EARLY);
+       if (kgdb_io_module_registered)
+               kgdb_initial_breakpoint();
+
+       return 0;
+}
+
+early_param("kgdbwait", opt_kgdb_wait);
diff --git a/kernel/debug/debug_core.h b/kernel/debug/debug_core.h
new file mode 100644 (file)
index 0000000..c5d753d
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Created by: Jason Wessel <jason.wessel@windriver.com>
+ *
+ * Copyright (c) 2009 Wind River Systems, Inc.  All Rights Reserved.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _DEBUG_CORE_H_
+#define _DEBUG_CORE_H_
+/*
+ * These are the private implementation headers between the kernel
+ * debugger core and the debugger front end code.
+ */
+
+/* kernel debug core data structures */
+struct kgdb_state {
+       int                     ex_vector;
+       int                     signo;
+       int                     err_code;
+       int                     cpu;
+       int                     pass_exception;
+       unsigned long           thr_query;
+       unsigned long           threadid;
+       long                    kgdb_usethreadid;
+       struct pt_regs          *linux_regs;
+};
+
+/* Exception state values */
+#define DCPU_WANT_MASTER 0x1 /* Waiting to become a master kgdb cpu */
+#define DCPU_NEXT_MASTER 0x2 /* Transition from one master cpu to another */
+#define DCPU_IS_SLAVE    0x4 /* Slave cpu enter exception */
+#define DCPU_SSTEP       0x8 /* CPU is single stepping */
+
+struct debuggerinfo_struct {
+       void                    *debuggerinfo;
+       struct task_struct      *task;
+       int                     exception_state;
+       int                     ret_state;
+       int                     irq_depth;
+};
+
+extern struct debuggerinfo_struct kgdb_info[];
+
+/* kernel debug core break point routines */
+extern int dbg_remove_all_break(void);
+extern int dbg_set_sw_break(unsigned long addr);
+extern int dbg_remove_sw_break(unsigned long addr);
+extern int dbg_activate_sw_breakpoints(void);
+extern int dbg_deactivate_sw_breakpoints(void);
+
+/* polled character access to i/o module */
+extern int dbg_io_get_char(void);
+
+/* stub return value for switching between the gdbstub and kdb */
+#define DBG_PASS_EVENT -12345
+/* Switch from one cpu to another */
+#define DBG_SWITCH_CPU_EVENT -123456
+extern int dbg_switch_cpu;
+
+/* gdbstub interface functions */
+extern int gdb_serial_stub(struct kgdb_state *ks);
+extern void gdbstub_msg_write(const char *s, int len);
+
+/* gdbstub functions used for kdb <-> gdbstub transition */
+extern int gdbstub_state(struct kgdb_state *ks, char *cmd);
+extern int dbg_kdb_mode;
+
+#ifdef CONFIG_KGDB_KDB
+extern int kdb_stub(struct kgdb_state *ks);
+extern int kdb_parse(const char *cmdstr);
+#else /* ! CONFIG_KGDB_KDB */
+static inline int kdb_stub(struct kgdb_state *ks)
+{
+       return DBG_PASS_EVENT;
+}
+#endif /* CONFIG_KGDB_KDB */
+
+#endif /* _DEBUG_CORE_H_ */
diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c
new file mode 100644 (file)
index 0000000..4b17b32
--- /dev/null
@@ -0,0 +1,1017 @@
+/*
+ * Kernel Debug Core
+ *
+ * Maintainer: Jason Wessel <jason.wessel@windriver.com>
+ *
+ * Copyright (C) 2000-2001 VERITAS Software Corporation.
+ * Copyright (C) 2002-2004 Timesys Corporation
+ * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
+ * Copyright (C) 2004 Pavel Machek <pavel@suse.cz>
+ * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
+ * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
+ * Copyright (C) 2005-2009 Wind River Systems, Inc.
+ * Copyright (C) 2007 MontaVista Software, Inc.
+ * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * Contributors at various stages not listed above:
+ *  Jason Wessel ( jason.wessel@windriver.com )
+ *  George Anzinger <george@mvista.com>
+ *  Anurekh Saxena (anurekh.saxena@timesys.com)
+ *  Lake Stevens Instrument Division (Glenn Engel)
+ *  Jim Kingdon, Cygnus Support.
+ *
+ * Original KGDB stub: David Grothe <dave@gcom.com>,
+ * Tigran Aivazian <tigran@sco.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kgdb.h>
+#include <linux/kdb.h>
+#include <linux/reboot.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/unaligned.h>
+#include "debug_core.h"
+
+#define KGDB_MAX_THREAD_QUERY 17
+
+/* Our I/O buffers. */
+static char                    remcom_in_buffer[BUFMAX];
+static char                    remcom_out_buffer[BUFMAX];
+
+/* Storage for the registers, in GDB format. */
+static unsigned long           gdb_regs[(NUMREGBYTES +
+                                       sizeof(unsigned long) - 1) /
+                                       sizeof(unsigned long)];
+
+/*
+ * GDB remote protocol parser:
+ */
+
+static int hex(char ch)
+{
+       if ((ch >= 'a') && (ch <= 'f'))
+               return ch - 'a' + 10;
+       if ((ch >= '0') && (ch <= '9'))
+               return ch - '0';
+       if ((ch >= 'A') && (ch <= 'F'))
+               return ch - 'A' + 10;
+       return -1;
+}
+
+#ifdef CONFIG_KGDB_KDB
+static int gdbstub_read_wait(void)
+{
+       int ret = -1;
+       int i;
+
+       /* poll any additional I/O interfaces that are defined */
+       while (ret < 0)
+               for (i = 0; kdb_poll_funcs[i] != NULL; i++) {
+                       ret = kdb_poll_funcs[i]();
+                       if (ret > 0)
+                               break;
+               }
+       return ret;
+}
+#else
+static int gdbstub_read_wait(void)
+{
+       int ret = dbg_io_ops->read_char();
+       while (ret == NO_POLL_CHAR)
+               ret = dbg_io_ops->read_char();
+       return ret;
+}
+#endif
+/* scan for the sequence $<data>#<checksum> */
+static void get_packet(char *buffer)
+{
+       unsigned char checksum;
+       unsigned char xmitcsum;
+       int count;
+       char ch;
+
+       do {
+               /*
+                * Spin and wait around for the start character, ignore all
+                * other characters:
+                */
+               while ((ch = (gdbstub_read_wait())) != '$')
+                       /* nothing */;
+
+               kgdb_connected = 1;
+               checksum = 0;
+               xmitcsum = -1;
+
+               count = 0;
+
+               /*
+                * now, read until a # or end of buffer is found:
+                */
+               while (count < (BUFMAX - 1)) {
+                       ch = gdbstub_read_wait();
+                       if (ch == '#')
+                               break;
+                       checksum = checksum + ch;
+                       buffer[count] = ch;
+                       count = count + 1;
+               }
+               buffer[count] = 0;
+
+               if (ch == '#') {
+                       xmitcsum = hex(gdbstub_read_wait()) << 4;
+                       xmitcsum += hex(gdbstub_read_wait());
+
+                       if (checksum != xmitcsum)
+                               /* failed checksum */
+                               dbg_io_ops->write_char('-');
+                       else
+                               /* successful transfer */
+                               dbg_io_ops->write_char('+');
+                       if (dbg_io_ops->flush)
+                               dbg_io_ops->flush();
+               }
+       } while (checksum != xmitcsum);
+}
+
+/*
+ * Send the packet in buffer.
+ * Check for gdb connection if asked for.
+ */
+static void put_packet(char *buffer)
+{
+       unsigned char checksum;
+       int count;
+       char ch;
+
+       /*
+        * $<packet info>#<checksum>.
+        */
+       while (1) {
+               dbg_io_ops->write_char('$');
+               checksum = 0;
+               count = 0;
+
+               while ((ch = buffer[count])) {
+                       dbg_io_ops->write_char(ch);
+                       checksum += ch;
+                       count++;
+               }
+
+               dbg_io_ops->write_char('#');
+               dbg_io_ops->write_char(hex_asc_hi(checksum));
+               dbg_io_ops->write_char(hex_asc_lo(checksum));
+               if (dbg_io_ops->flush)
+                       dbg_io_ops->flush();
+
+               /* Now see what we get in reply. */
+               ch = gdbstub_read_wait();
+
+               if (ch == 3)
+                       ch = gdbstub_read_wait();
+
+               /* If we get an ACK, we are done. */
+               if (ch == '+')
+                       return;
+
+               /*
+                * If we get the start of another packet, this means
+                * that GDB is attempting to reconnect.  We will NAK
+                * the packet being sent, and stop trying to send this
+                * packet.
+                */
+               if (ch == '$') {
+                       dbg_io_ops->write_char('-');
+                       if (dbg_io_ops->flush)
+                               dbg_io_ops->flush();
+                       return;
+               }
+       }
+}
+
+static char gdbmsgbuf[BUFMAX + 1];
+
+void gdbstub_msg_write(const char *s, int len)
+{
+       char *bufptr;
+       int wcount;
+       int i;
+
+       if (len == 0)
+               len = strlen(s);
+
+       /* 'O'utput */
+       gdbmsgbuf[0] = 'O';
+
+       /* Fill and send buffers... */
+       while (len > 0) {
+               bufptr = gdbmsgbuf + 1;
+
+               /* Calculate how many this time */
+               if ((len << 1) > (BUFMAX - 2))
+                       wcount = (BUFMAX - 2) >> 1;
+               else
+                       wcount = len;
+
+               /* Pack in hex chars */
+               for (i = 0; i < wcount; i++)
+                       bufptr = pack_hex_byte(bufptr, s[i]);
+               *bufptr = '\0';
+
+               /* Move up */
+               s += wcount;
+               len -= wcount;
+
+               /* Write packet */
+               put_packet(gdbmsgbuf);
+       }
+}
+
+/*
+ * Convert the memory pointed to by mem into hex, placing result in
+ * buf.  Return a pointer to the last char put in buf (null). May
+ * return an error.
+ */
+int kgdb_mem2hex(char *mem, char *buf, int count)
+{
+       char *tmp;
+       int err;
+
+       /*
+        * We use the upper half of buf as an intermediate buffer for the
+        * raw memory copy.  Hex conversion will work against this one.
+        */
+       tmp = buf + count;
+
+       err = probe_kernel_read(tmp, mem, count);
+       if (!err) {
+               while (count > 0) {
+                       buf = pack_hex_byte(buf, *tmp);
+                       tmp++;
+                       count--;
+               }
+
+               *buf = 0;
+       }
+
+       return err;
+}
+
+/*
+ * Convert the hex array pointed to by buf into binary to be placed in
+ * mem.  Return a pointer to the character AFTER the last byte
+ * written.  May return an error.
+ */
+int kgdb_hex2mem(char *buf, char *mem, int count)
+{
+       char *tmp_raw;
+       char *tmp_hex;
+
+       /*
+        * We use the upper half of buf as an intermediate buffer for the
+        * raw memory that is converted from hex.
+        */
+       tmp_raw = buf + count * 2;
+
+       tmp_hex = tmp_raw - 1;
+       while (tmp_hex >= buf) {
+               tmp_raw--;
+               *tmp_raw = hex(*tmp_hex--);
+               *tmp_raw |= hex(*tmp_hex--) << 4;
+       }
+
+       return probe_kernel_write(mem, tmp_raw, count);
+}
+
+/*
+ * While we find nice hex chars, build a long_val.
+ * Return number of chars processed.
+ */
+int kgdb_hex2long(char **ptr, unsigned long *long_val)
+{
+       int hex_val;
+       int num = 0;
+       int negate = 0;
+
+       *long_val = 0;
+
+       if (**ptr == '-') {
+               negate = 1;
+               (*ptr)++;
+       }
+       while (**ptr) {
+               hex_val = hex(**ptr);
+               if (hex_val < 0)
+                       break;
+
+               *long_val = (*long_val << 4) | hex_val;
+               num++;
+               (*ptr)++;
+       }
+
+       if (negate)
+               *long_val = -*long_val;
+
+       return num;
+}
+
+/*
+ * Copy the binary array pointed to by buf into mem.  Fix $, #, and
+ * 0x7d escaped with 0x7d. Return -EFAULT on failure or 0 on success.
+ * The input buf is overwitten with the result to write to mem.
+ */
+static int kgdb_ebin2mem(char *buf, char *mem, int count)
+{
+       int size = 0;
+       char *c = buf;
+
+       while (count-- > 0) {
+               c[size] = *buf++;
+               if (c[size] == 0x7d)
+                       c[size] = *buf++ ^ 0x20;
+               size++;
+       }
+
+       return probe_kernel_write(mem, c, size);
+}
+
+/* Write memory due to an 'M' or 'X' packet. */
+static int write_mem_msg(int binary)
+{
+       char *ptr = &remcom_in_buffer[1];
+       unsigned long addr;
+       unsigned long length;
+       int err;
+
+       if (kgdb_hex2long(&ptr, &addr) > 0 && *(ptr++) == ',' &&
+           kgdb_hex2long(&ptr, &length) > 0 && *(ptr++) == ':') {
+               if (binary)
+                       err = kgdb_ebin2mem(ptr, (char *)addr, length);
+               else
+                       err = kgdb_hex2mem(ptr, (char *)addr, length);
+               if (err)
+                       return err;
+               if (CACHE_FLUSH_IS_SAFE)
+                       flush_icache_range(addr, addr + length);
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
+static void error_packet(char *pkt, int error)
+{
+       error = -error;
+       pkt[0] = 'E';
+       pkt[1] = hex_asc[(error / 10)];
+       pkt[2] = hex_asc[(error % 10)];
+       pkt[3] = '\0';
+}
+
+/*
+ * Thread ID accessors. We represent a flat TID space to GDB, where
+ * the per CPU idle threads (which under Linux all have PID 0) are
+ * remapped to negative TIDs.
+ */
+
+#define BUF_THREAD_ID_SIZE     16
+
+static char *pack_threadid(char *pkt, unsigned char *id)
+{
+       char *limit;
+
+       limit = pkt + BUF_THREAD_ID_SIZE;
+       while (pkt < limit)
+               pkt = pack_hex_byte(pkt, *id++);
+
+       return pkt;
+}
+
+static void int_to_threadref(unsigned char *id, int value)
+{
+       unsigned char *scan;
+       int i = 4;
+
+       scan = (unsigned char *)id;
+       while (i--)
+               *scan++ = 0;
+       put_unaligned_be32(value, scan);
+}
+
+static struct task_struct *getthread(struct pt_regs *regs, int tid)
+{
+       /*
+        * Non-positive TIDs are remapped to the cpu shadow information
+        */
+       if (tid == 0 || tid == -1)
+               tid = -atomic_read(&kgdb_active) - 2;
+       if (tid < -1 && tid > -NR_CPUS - 2) {
+               if (kgdb_info[-tid - 2].task)
+                       return kgdb_info[-tid - 2].task;
+               else
+                       return idle_task(-tid - 2);
+       }
+       if (tid <= 0) {
+               printk(KERN_ERR "KGDB: Internal thread select error\n");
+               dump_stack();
+               return NULL;
+       }
+
+       /*
+        * find_task_by_pid_ns() does not take the tasklist lock anymore
+        * but is nicely RCU locked - hence is a pretty resilient
+        * thing to use:
+        */
+       return find_task_by_pid_ns(tid, &init_pid_ns);
+}
+
+
+/*
+ * Remap normal tasks to their real PID,
+ * CPU shadow threads are mapped to -CPU - 2
+ */
+static inline int shadow_pid(int realpid)
+{
+       if (realpid)
+               return realpid;
+
+       return -raw_smp_processor_id() - 2;
+}
+
+/*
+ * All the functions that start with gdb_cmd are the various
+ * operations to implement the handlers for the gdbserial protocol
+ * where KGDB is communicating with an external debugger
+ */
+
+/* Handle the '?' status packets */
+static void gdb_cmd_status(struct kgdb_state *ks)
+{
+       /*
+        * We know that this packet is only sent
+        * during initial connect.  So to be safe,
+        * we clear out our breakpoints now in case
+        * GDB is reconnecting.
+        */
+       dbg_remove_all_break();
+
+       remcom_out_buffer[0] = 'S';
+       pack_hex_byte(&remcom_out_buffer[1], ks->signo);
+}
+
+/* Handle the 'g' get registers request */
+static void gdb_cmd_getregs(struct kgdb_state *ks)
+{
+       struct task_struct *thread;
+       void *local_debuggerinfo;
+       int i;
+
+       thread = kgdb_usethread;
+       if (!thread) {
+               thread = kgdb_info[ks->cpu].task;
+               local_debuggerinfo = kgdb_info[ks->cpu].debuggerinfo;
+       } else {
+               local_debuggerinfo = NULL;
+               for_each_online_cpu(i) {
+                       /*
+                        * Try to find the task on some other
+                        * or possibly this node if we do not
+                        * find the matching task then we try
+                        * to approximate the results.
+                        */
+                       if (thread == kgdb_info[i].task)
+                               local_debuggerinfo = kgdb_info[i].debuggerinfo;
+               }
+       }
+
+       /*
+        * All threads that don't have debuggerinfo should be
+        * in schedule() sleeping, since all other CPUs
+        * are in kgdb_wait, and thus have debuggerinfo.
+        */
+       if (local_debuggerinfo) {
+               pt_regs_to_gdb_regs(gdb_regs, local_debuggerinfo);
+       } else {
+               /*
+                * Pull stuff saved during switch_to; nothing
+                * else is accessible (or even particularly
+                * relevant).
+                *
+                * This should be enough for a stack trace.
+                */
+               sleeping_thread_to_gdb_regs(gdb_regs, thread);
+       }
+       kgdb_mem2hex((char *)gdb_regs, remcom_out_buffer, NUMREGBYTES);
+}
+
+/* Handle the 'G' set registers request */
+static void gdb_cmd_setregs(struct kgdb_state *ks)
+{
+       kgdb_hex2mem(&remcom_in_buffer[1], (char *)gdb_regs, NUMREGBYTES);
+
+       if (kgdb_usethread && kgdb_usethread != current) {
+               error_packet(remcom_out_buffer, -EINVAL);
+       } else {
+               gdb_regs_to_pt_regs(gdb_regs, ks->linux_regs);
+               strcpy(remcom_out_buffer, "OK");
+       }
+}
+
+/* Handle the 'm' memory read bytes */
+static void gdb_cmd_memread(struct kgdb_state *ks)
+{
+       char *ptr = &remcom_in_buffer[1];
+       unsigned long length;
+       unsigned long addr;
+       int err;
+
+       if (kgdb_hex2long(&ptr, &addr) > 0 && *ptr++ == ',' &&
+                                       kgdb_hex2long(&ptr, &length) > 0) {
+               err = kgdb_mem2hex((char *)addr, remcom_out_buffer, length);
+               if (err)
+                       error_packet(remcom_out_buffer, err);
+       } else {
+               error_packet(remcom_out_buffer, -EINVAL);
+       }
+}
+
+/* Handle the 'M' memory write bytes */
+static void gdb_cmd_memwrite(struct kgdb_state *ks)
+{
+       int err = write_mem_msg(0);
+
+       if (err)
+               error_packet(remcom_out_buffer, err);
+       else
+               strcpy(remcom_out_buffer, "OK");
+}
+
+/* Handle the 'X' memory binary write bytes */
+static void gdb_cmd_binwrite(struct kgdb_state *ks)
+{
+       int err = write_mem_msg(1);
+
+       if (err)
+               error_packet(remcom_out_buffer, err);
+       else
+               strcpy(remcom_out_buffer, "OK");
+}
+
+/* Handle the 'D' or 'k', detach or kill packets */
+static void gdb_cmd_detachkill(struct kgdb_state *ks)
+{
+       int error;
+
+       /* The detach case */
+       if (remcom_in_buffer[0] == 'D') {
+               error = dbg_remove_all_break();
+               if (error < 0) {
+                       error_packet(remcom_out_buffer, error);
+               } else {
+                       strcpy(remcom_out_buffer, "OK");
+                       kgdb_connected = 0;
+               }
+               put_packet(remcom_out_buffer);
+       } else {
+               /*
+                * Assume the kill case, with no exit code checking,
+                * trying to force detach the debugger:
+                */
+               dbg_remove_all_break();
+               kgdb_connected = 0;
+       }
+}
+
+/* Handle the 'R' reboot packets */
+static int gdb_cmd_reboot(struct kgdb_state *ks)
+{
+       /* For now, only honor R0 */
+       if (strcmp(remcom_in_buffer, "R0") == 0) {
+               printk(KERN_CRIT "Executing emergency reboot\n");
+               strcpy(remcom_out_buffer, "OK");
+               put_packet(remcom_out_buffer);
+
+               /*
+                * Execution should not return from
+                * machine_emergency_restart()
+                */
+               machine_emergency_restart();
+               kgdb_connected = 0;
+
+               return 1;
+       }
+       return 0;
+}
+
+/* Handle the 'q' query packets */
+static void gdb_cmd_query(struct kgdb_state *ks)
+{
+       struct task_struct *g;
+       struct task_struct *p;
+       unsigned char thref[8];
+       char *ptr;
+       int i;
+       int cpu;
+       int finished = 0;
+
+       switch (remcom_in_buffer[1]) {
+       case 's':
+       case 'f':
+               if (memcmp(remcom_in_buffer + 2, "ThreadInfo", 10)) {
+                       error_packet(remcom_out_buffer, -EINVAL);
+                       break;
+               }
+
+               i = 0;
+               remcom_out_buffer[0] = 'm';
+               ptr = remcom_out_buffer + 1;
+               if (remcom_in_buffer[1] == 'f') {
+                       /* Each cpu is a shadow thread */
+                       for_each_online_cpu(cpu) {
+                               ks->thr_query = 0;
+                               int_to_threadref(thref, -cpu - 2);
+                               pack_threadid(ptr, thref);
+                               ptr += BUF_THREAD_ID_SIZE;
+                               *(ptr++) = ',';
+                               i++;
+                       }
+               }
+
+               do_each_thread(g, p) {
+                       if (i >= ks->thr_query && !finished) {
+                               int_to_threadref(thref, p->pid);
+                               pack_threadid(ptr, thref);
+                               ptr += BUF_THREAD_ID_SIZE;
+                               *(ptr++) = ',';
+                               ks->thr_query++;
+                               if (ks->thr_query % KGDB_MAX_THREAD_QUERY == 0)
+                                       finished = 1;
+                       }
+                       i++;
+               } while_each_thread(g, p);
+
+               *(--ptr) = '\0';
+               break;
+
+       case 'C':
+               /* Current thread id */
+               strcpy(remcom_out_buffer, "QC");
+               ks->threadid = shadow_pid(current->pid);
+               int_to_threadref(thref, ks->threadid);
+               pack_threadid(remcom_out_buffer + 2, thref);
+               break;
+       case 'T':
+               if (memcmp(remcom_in_buffer + 1, "ThreadExtraInfo,", 16)) {
+                       error_packet(remcom_out_buffer, -EINVAL);
+                       break;
+               }
+               ks->threadid = 0;
+               ptr = remcom_in_buffer + 17;
+               kgdb_hex2long(&ptr, &ks->threadid);
+               if (!getthread(ks->linux_regs, ks->threadid)) {
+                       error_packet(remcom_out_buffer, -EINVAL);
+                       break;
+               }
+               if ((int)ks->threadid > 0) {
+                       kgdb_mem2hex(getthread(ks->linux_regs,
+                                       ks->threadid)->comm,
+                                       remcom_out_buffer, 16);
+               } else {
+                       static char tmpstr[23 + BUF_THREAD_ID_SIZE];
+
+                       sprintf(tmpstr, "shadowCPU%d",
+                                       (int)(-ks->threadid - 2));
+                       kgdb_mem2hex(tmpstr, remcom_out_buffer, strlen(tmpstr));
+               }
+               break;
+#ifdef CONFIG_KGDB_KDB
+       case 'R':
+               if (strncmp(remcom_in_buffer, "qRcmd,", 6) == 0) {
+                       int len = strlen(remcom_in_buffer + 6);
+
+                       if ((len % 2) != 0) {
+                               strcpy(remcom_out_buffer, "E01");
+                               break;
+                       }
+                       kgdb_hex2mem(remcom_in_buffer + 6,
+                                    remcom_out_buffer, len);
+                       len = len / 2;
+                       remcom_out_buffer[len++] = 0;
+
+                       kdb_parse(remcom_out_buffer);
+                       strcpy(remcom_out_buffer, "OK");
+               }
+               break;
+#endif
+       }
+}
+
+/* Handle the 'H' task query packets */
+static void gdb_cmd_task(struct kgdb_state *ks)
+{
+       struct task_struct *thread;
+       char *ptr;
+
+       switch (remcom_in_buffer[1]) {
+       case 'g':
+               ptr = &remcom_in_buffer[2];
+               kgdb_hex2long(&ptr, &ks->threadid);
+               thread = getthread(ks->linux_regs, ks->threadid);
+               if (!thread && ks->threadid > 0) {
+                       error_packet(remcom_out_buffer, -EINVAL);
+                       break;
+               }
+               kgdb_usethread = thread;
+               ks->kgdb_usethreadid = ks->threadid;
+               strcpy(remcom_out_buffer, "OK");
+               break;
+       case 'c':
+               ptr = &remcom_in_buffer[2];
+               kgdb_hex2long(&ptr, &ks->threadid);
+               if (!ks->threadid) {
+                       kgdb_contthread = NULL;
+               } else {
+                       thread = getthread(ks->linux_regs, ks->threadid);
+                       if (!thread && ks->threadid > 0) {
+                               error_packet(remcom_out_buffer, -EINVAL);
+                               break;
+                       }
+                       kgdb_contthread = thread;
+               }
+               strcpy(remcom_out_buffer, "OK");
+               break;
+       }
+}
+
+/* Handle the 'T' thread query packets */
+static void gdb_cmd_thread(struct kgdb_state *ks)
+{
+       char *ptr = &remcom_in_buffer[1];
+       struct task_struct *thread;
+
+       kgdb_hex2long(&ptr, &ks->threadid);
+       thread = getthread(ks->linux_regs, ks->threadid);
+       if (thread)
+               strcpy(remcom_out_buffer, "OK");
+       else
+               error_packet(remcom_out_buffer, -EINVAL);
+}
+
+/* Handle the 'z' or 'Z' breakpoint remove or set packets */
+static void gdb_cmd_break(struct kgdb_state *ks)
+{
+       /*
+        * Since GDB-5.3, it's been drafted that '0' is a software
+        * breakpoint, '1' is a hardware breakpoint, so let's do that.
+        */
+       char *bpt_type = &remcom_in_buffer[1];
+       char *ptr = &remcom_in_buffer[2];
+       unsigned long addr;
+       unsigned long length;
+       int error = 0;
+
+       if (arch_kgdb_ops.set_hw_breakpoint && *bpt_type >= '1') {
+               /* Unsupported */
+               if (*bpt_type > '4')
+                       return;
+       } else {
+               if (*bpt_type != '0' && *bpt_type != '1')
+                       /* Unsupported. */
+                       return;
+       }
+
+       /*
+        * Test if this is a hardware breakpoint, and
+        * if we support it:
+        */
+       if (*bpt_type == '1' && !(arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT))
+               /* Unsupported. */
+               return;
+
+       if (*(ptr++) != ',') {
+               error_packet(remcom_out_buffer, -EINVAL);
+               return;
+       }
+       if (!kgdb_hex2long(&ptr, &addr)) {
+               error_packet(remcom_out_buffer, -EINVAL);
+               return;
+       }
+       if (*(ptr++) != ',' ||
+               !kgdb_hex2long(&ptr, &length)) {
+               error_packet(remcom_out_buffer, -EINVAL);
+               return;
+       }
+
+       if (remcom_in_buffer[0] == 'Z' && *bpt_type == '0')
+               error = dbg_set_sw_break(addr);
+       else if (remcom_in_buffer[0] == 'z' && *bpt_type == '0')
+               error = dbg_remove_sw_break(addr);
+       else if (remcom_in_buffer[0] == 'Z')
+               error = arch_kgdb_ops.set_hw_breakpoint(addr,
+                       (int)length, *bpt_type - '0');
+       else if (remcom_in_buffer[0] == 'z')
+               error = arch_kgdb_ops.remove_hw_breakpoint(addr,
+                       (int) length, *bpt_type - '0');
+
+       if (error == 0)
+               strcpy(remcom_out_buffer, "OK");
+       else
+               error_packet(remcom_out_buffer, error);
+}
+
+/* Handle the 'C' signal / exception passing packets */
+static int gdb_cmd_exception_pass(struct kgdb_state *ks)
+{
+       /* C09 == pass exception
+        * C15 == detach kgdb, pass exception
+        */
+       if (remcom_in_buffer[1] == '0' && remcom_in_buffer[2] == '9') {
+
+               ks->pass_exception = 1;
+               remcom_in_buffer[0] = 'c';
+
+       } else if (remcom_in_buffer[1] == '1' && remcom_in_buffer[2] == '5') {
+
+               ks->pass_exception = 1;
+               remcom_in_buffer[0] = 'D';
+               dbg_remove_all_break();
+               kgdb_connected = 0;
+               return 1;
+
+       } else {
+               gdbstub_msg_write("KGDB only knows signal 9 (pass)"
+                       " and 15 (pass and disconnect)\n"
+                       "Executing a continue without signal passing\n", 0);
+               remcom_in_buffer[0] = 'c';
+       }
+
+       /* Indicate fall through */
+       return -1;
+}
+
+/*
+ * This function performs all gdbserial command procesing
+ */
+int gdb_serial_stub(struct kgdb_state *ks)
+{
+       int error = 0;
+       int tmp;
+
+       /* Clear the out buffer. */
+       memset(remcom_out_buffer, 0, sizeof(remcom_out_buffer));
+
+       if (kgdb_connected) {
+               unsigned char thref[8];
+               char *ptr;
+
+               /* Reply to host that an exception has occurred */
+               ptr = remcom_out_buffer;
+               *ptr++ = 'T';
+               ptr = pack_hex_byte(ptr, ks->signo);
+               ptr += strlen(strcpy(ptr, "thread:"));
+               int_to_threadref(thref, shadow_pid(current->pid));
+               ptr = pack_threadid(ptr, thref);
+               *ptr++ = ';';
+               put_packet(remcom_out_buffer);
+       }
+
+       kgdb_usethread = kgdb_info[ks->cpu].task;
+       ks->kgdb_usethreadid = shadow_pid(kgdb_info[ks->cpu].task->pid);
+       ks->pass_exception = 0;
+
+       while (1) {
+               error = 0;
+
+               /* Clear the out buffer. */
+               memset(remcom_out_buffer, 0, sizeof(remcom_out_buffer));
+
+               get_packet(remcom_in_buffer);
+
+               switch (remcom_in_buffer[0]) {
+               case '?': /* gdbserial status */
+                       gdb_cmd_status(ks);
+                       break;
+               case 'g': /* return the value of the CPU registers */
+                       gdb_cmd_getregs(ks);
+                       break;
+               case 'G': /* set the value of the CPU registers - return OK */
+                       gdb_cmd_setregs(ks);
+                       break;
+               case 'm': /* mAA..AA,LLLL  Read LLLL bytes at address AA..AA */
+                       gdb_cmd_memread(ks);
+                       break;
+               case 'M': /* MAA..AA,LLLL: Write LLLL bytes at address AA..AA */
+                       gdb_cmd_memwrite(ks);
+                       break;
+               case 'X': /* XAA..AA,LLLL: Write LLLL bytes at address AA..AA */
+                       gdb_cmd_binwrite(ks);
+                       break;
+                       /* kill or detach. KGDB should treat this like a
+                        * continue.
+                        */
+               case 'D': /* Debugger detach */
+               case 'k': /* Debugger detach via kill */
+                       gdb_cmd_detachkill(ks);
+                       goto default_handle;
+               case 'R': /* Reboot */
+                       if (gdb_cmd_reboot(ks))
+                               goto default_handle;
+                       break;
+               case 'q': /* query command */
+                       gdb_cmd_query(ks);
+                       break;
+               case 'H': /* task related */
+                       gdb_cmd_task(ks);
+                       break;
+               case 'T': /* Query thread status */
+                       gdb_cmd_thread(ks);
+                       break;
+               case 'z': /* Break point remove */
+               case 'Z': /* Break point set */
+                       gdb_cmd_break(ks);
+                       break;
+#ifdef CONFIG_KGDB_KDB
+               case '3': /* Escape into back into kdb */
+                       if (remcom_in_buffer[1] == '\0') {
+                               gdb_cmd_detachkill(ks);
+                               return DBG_PASS_EVENT;
+                       }
+#endif
+               case 'C': /* Exception passing */
+                       tmp = gdb_cmd_exception_pass(ks);
+                       if (tmp > 0)
+                               goto default_handle;
+                       if (tmp == 0)
+                               break;
+                       /* Fall through on tmp < 0 */
+               case 'c': /* Continue packet */
+               case 's': /* Single step packet */
+                       if (kgdb_contthread && kgdb_contthread != current) {
+                               /* Can't switch threads in kgdb */
+                               error_packet(remcom_out_buffer, -EINVAL);
+                               break;
+                       }
+                       dbg_activate_sw_breakpoints();
+                       /* Fall through to default processing */
+               default:
+default_handle:
+                       error = kgdb_arch_handle_exception(ks->ex_vector,
+                                               ks->signo,
+                                               ks->err_code,
+                                               remcom_in_buffer,
+                                               remcom_out_buffer,
+                                               ks->linux_regs);
+                       /*
+                        * Leave cmd processing on error, detach,
+                        * kill, continue, or single step.
+                        */
+                       if (error >= 0 || remcom_in_buffer[0] == 'D' ||
+                           remcom_in_buffer[0] == 'k') {
+                               error = 0;
+                               goto kgdb_exit;
+                       }
+
+               }
+
+               /* reply to the request */
+               put_packet(remcom_out_buffer);
+       }
+
+kgdb_exit:
+       if (ks->pass_exception)
+               error = 1;
+       return error;
+}
+
+int gdbstub_state(struct kgdb_state *ks, char *cmd)
+{
+       int error;
+
+       switch (cmd[0]) {
+       case 'e':
+               error = kgdb_arch_handle_exception(ks->ex_vector,
+                                                  ks->signo,
+                                                  ks->err_code,
+                                                  remcom_in_buffer,
+                                                  remcom_out_buffer,
+                                                  ks->linux_regs);
+               return error;
+       case 's':
+       case 'c':
+               strcpy(remcom_in_buffer, cmd);
+               return 0;
+       case '?':
+               gdb_cmd_status(ks);
+               break;
+       case '\0':
+               strcpy(remcom_out_buffer, "");
+               break;
+       }
+       dbg_io_ops->write_char('+');
+       put_packet(remcom_out_buffer);
+       return 0;
+}
diff --git a/kernel/debug/kdb/.gitignore b/kernel/debug/kdb/.gitignore
new file mode 100644 (file)
index 0000000..396d12e
--- /dev/null
@@ -0,0 +1 @@
+gen-kdb_cmds.c
diff --git a/kernel/debug/kdb/Makefile b/kernel/debug/kdb/Makefile
new file mode 100644 (file)
index 0000000..d4fc58f
--- /dev/null
@@ -0,0 +1,25 @@
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (c) 1999-2004 Silicon Graphics, Inc.  All Rights Reserved.
+# Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
+#
+
+CCVERSION      := $(shell $(CC) -v 2>&1 | sed -ne '$$p')
+obj-y := kdb_io.o kdb_main.o kdb_support.o kdb_bt.o gen-kdb_cmds.o kdb_bp.o kdb_debugger.o
+obj-$(CONFIG_KDB_KEYBOARD)    += kdb_keyboard.o
+
+clean-files := gen-kdb_cmds.c
+
+quiet_cmd_gen-kdb = GENKDB  $@
+      cmd_gen-kdb = $(AWK) 'BEGIN {print "\#include <linux/stddef.h>"; print "\#include <linux/init.h>"} \
+               /^\#/{next} \
+               /^[ \t]*$$/{next} \
+               {gsub(/"/, "\\\"", $$0); \
+                 print "static __initdata char kdb_cmd" cmds++ "[] = \"" $$0 "\\n\";"} \
+               END {print "extern char *kdb_cmds[]; char __initdata *kdb_cmds[] = {"; for (i = 0; i < cmds; ++i) {print "  kdb_cmd" i ","}; print("  NULL\n};");}' \
+               $(filter-out %/Makefile,$^) > $@#
+
+$(obj)/gen-kdb_cmds.c: $(src)/kdb_cmds $(src)/Makefile
+       $(call cmd,gen-kdb)
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c
new file mode 100644 (file)
index 0000000..75bd9b3
--- /dev/null
@@ -0,0 +1,564 @@
+/*
+ * Kernel Debugger Architecture Independent Breakpoint Handler
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2009 Wind River Systems, Inc.  All Rights Reserved.
+ */
+
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/kdb.h>
+#include <linux/kgdb.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include "kdb_private.h"
+
+/*
+ * Table of kdb_breakpoints
+ */
+kdb_bp_t kdb_breakpoints[KDB_MAXBPT];
+
+static void kdb_setsinglestep(struct pt_regs *regs)
+{
+       KDB_STATE_SET(DOING_SS);
+}
+
+static char *kdb_rwtypes[] = {
+       "Instruction(i)",
+       "Instruction(Register)",
+       "Data Write",
+       "I/O",
+       "Data Access"
+};
+
+static char *kdb_bptype(kdb_bp_t *bp)
+{
+       if (bp->bp_type < 0 || bp->bp_type > 4)
+               return "";
+
+       return kdb_rwtypes[bp->bp_type];
+}
+
+static int kdb_parsebp(int argc, const char **argv, int *nextargp, kdb_bp_t *bp)
+{
+       int nextarg = *nextargp;
+       int diag;
+
+       bp->bph_length = 1;
+       if ((argc + 1) != nextarg) {
+               if (strnicmp(argv[nextarg], "datar", sizeof("datar")) == 0)
+                       bp->bp_type = BP_ACCESS_WATCHPOINT;
+               else if (strnicmp(argv[nextarg], "dataw", sizeof("dataw")) == 0)
+                       bp->bp_type = BP_WRITE_WATCHPOINT;
+               else if (strnicmp(argv[nextarg], "inst", sizeof("inst")) == 0)
+                       bp->bp_type = BP_HARDWARE_BREAKPOINT;
+               else
+                       return KDB_ARGCOUNT;
+
+               bp->bph_length = 1;
+
+               nextarg++;
+
+               if ((argc + 1) != nextarg) {
+                       unsigned long len;
+
+                       diag = kdbgetularg((char *)argv[nextarg],
+                                          &len);
+                       if (diag)
+                               return diag;
+
+
+                       if (len > 8)
+                               return KDB_BADLENGTH;
+
+                       bp->bph_length = len;
+                       nextarg++;
+               }
+
+               if ((argc + 1) != nextarg)
+                       return KDB_ARGCOUNT;
+       }
+
+       *nextargp = nextarg;
+       return 0;
+}
+
+static int _kdb_bp_remove(kdb_bp_t *bp)
+{
+       int ret = 1;
+       if (!bp->bp_installed)
+               return ret;
+       if (!bp->bp_type)
+               ret = dbg_remove_sw_break(bp->bp_addr);
+       else
+               ret = arch_kgdb_ops.remove_hw_breakpoint(bp->bp_addr,
+                        bp->bph_length,
+                        bp->bp_type);
+       if (ret == 0)
+               bp->bp_installed = 0;
+       return ret;
+}
+
+static void kdb_handle_bp(struct pt_regs *regs, kdb_bp_t *bp)
+{
+       if (KDB_DEBUG(BP))
+               kdb_printf("regs->ip = 0x%lx\n", instruction_pointer(regs));
+
+       /*
+        * Setup single step
+        */
+       kdb_setsinglestep(regs);
+
+       /*
+        * Reset delay attribute
+        */
+       bp->bp_delay = 0;
+       bp->bp_delayed = 1;
+}
+
+static int _kdb_bp_install(struct pt_regs *regs, kdb_bp_t *bp)
+{
+       int ret;
+       /*
+        * Install the breakpoint, if it is not already installed.
+        */
+
+       if (KDB_DEBUG(BP))
+               kdb_printf("%s: bp_installed %d\n",
+                          __func__, bp->bp_installed);
+       if (!KDB_STATE(SSBPT))
+               bp->bp_delay = 0;
+       if (bp->bp_installed)
+               return 1;
+       if (bp->bp_delay || (bp->bp_delayed && KDB_STATE(DOING_SS))) {
+               if (KDB_DEBUG(BP))
+                       kdb_printf("%s: delayed bp\n", __func__);
+               kdb_handle_bp(regs, bp);
+               return 0;
+       }
+       if (!bp->bp_type)
+               ret = dbg_set_sw_break(bp->bp_addr);
+       else
+               ret = arch_kgdb_ops.set_hw_breakpoint(bp->bp_addr,
+                        bp->bph_length,
+                        bp->bp_type);
+       if (ret == 0) {
+               bp->bp_installed = 1;
+       } else {
+               kdb_printf("%s: failed to set breakpoint at 0x%lx\n",
+                          __func__, bp->bp_addr);
+               return 1;
+       }
+       return 0;
+}
+
+/*
+ * kdb_bp_install
+ *
+ *     Install kdb_breakpoints prior to returning from the
+ *     kernel debugger.  This allows the kdb_breakpoints to be set
+ *     upon functions that are used internally by kdb, such as
+ *     printk().  This function is only called once per kdb session.
+ */
+void kdb_bp_install(struct pt_regs *regs)
+{
+       int i;
+
+       for (i = 0; i < KDB_MAXBPT; i++) {
+               kdb_bp_t *bp = &kdb_breakpoints[i];
+
+               if (KDB_DEBUG(BP)) {
+                       kdb_printf("%s: bp %d bp_enabled %d\n",
+                                  __func__, i, bp->bp_enabled);
+               }
+               if (bp->bp_enabled)
+                       _kdb_bp_install(regs, bp);
+       }
+}
+
+/*
+ * kdb_bp_remove
+ *
+ *     Remove kdb_breakpoints upon entry to the kernel debugger.
+ *
+ * Parameters:
+ *     None.
+ * Outputs:
+ *     None.
+ * Returns:
+ *     None.
+ * Locking:
+ *     None.
+ * Remarks:
+ */
+void kdb_bp_remove(void)
+{
+       int i;
+
+       for (i = KDB_MAXBPT - 1; i >= 0; i--) {
+               kdb_bp_t *bp = &kdb_breakpoints[i];
+
+               if (KDB_DEBUG(BP)) {
+                       kdb_printf("%s: bp %d bp_enabled %d\n",
+                                  __func__, i, bp->bp_enabled);
+               }
+               if (bp->bp_enabled)
+                       _kdb_bp_remove(bp);
+       }
+}
+
+
+/*
+ * kdb_printbp
+ *
+ *     Internal function to format and print a breakpoint entry.
+ *
+ * Parameters:
+ *     None.
+ * Outputs:
+ *     None.
+ * Returns:
+ *     None.
+ * Locking:
+ *     None.
+ * Remarks:
+ */
+
+static void kdb_printbp(kdb_bp_t *bp, int i)
+{
+       kdb_printf("%s ", kdb_bptype(bp));
+       kdb_printf("BP #%d at ", i);
+       kdb_symbol_print(bp->bp_addr, NULL, KDB_SP_DEFAULT);
+
+       if (bp->bp_enabled)
+               kdb_printf("\n    is enabled");
+       else
+               kdb_printf("\n    is disabled");
+
+       kdb_printf("\taddr at %016lx, hardtype=%d installed=%d\n",
+                  bp->bp_addr, bp->bp_type, bp->bp_installed);
+
+       kdb_printf("\n");
+}
+
+/*
+ * kdb_bp
+ *
+ *     Handle the bp commands.
+ *
+ *     [bp|bph] <addr-expression> [DATAR|DATAW]
+ *
+ * Parameters:
+ *     argc    Count of arguments in argv
+ *     argv    Space delimited command line arguments
+ * Outputs:
+ *     None.
+ * Returns:
+ *     Zero for success, a kdb diagnostic if failure.
+ * Locking:
+ *     None.
+ * Remarks:
+ *
+ *     bp      Set breakpoint on all cpus.  Only use hardware assist if need.
+ *     bph     Set breakpoint on all cpus.  Force hardware register
+ */
+
+static int kdb_bp(int argc, const char **argv)
+{
+       int i, bpno;
+       kdb_bp_t *bp, *bp_check;
+       int diag;
+       int free;
+       char *symname = NULL;
+       long offset = 0ul;
+       int nextarg;
+       kdb_bp_t template = {0};
+
+       if (argc == 0) {
+               /*
+                * Display breakpoint table
+                */
+               for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT;
+                    bpno++, bp++) {
+                       if (bp->bp_free)
+                               continue;
+                       kdb_printbp(bp, bpno);
+               }
+
+               return 0;
+       }
+
+       nextarg = 1;
+       diag = kdbgetaddrarg(argc, argv, &nextarg, &template.bp_addr,
+                            &offset, &symname);
+       if (diag)
+               return diag;
+       if (!template.bp_addr)
+               return KDB_BADINT;
+
+       /*
+        * Find an empty bp structure to allocate
+        */
+       free = KDB_MAXBPT;
+       for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) {
+               if (bp->bp_free)
+                       break;
+       }
+
+       if (bpno == KDB_MAXBPT)
+               return KDB_TOOMANYBPT;
+
+       if (strcmp(argv[0], "bph") == 0) {
+               template.bp_type = BP_HARDWARE_BREAKPOINT;
+               diag = kdb_parsebp(argc, argv, &nextarg, &template);
+               if (diag)
+                       return diag;
+       } else {
+               template.bp_type = BP_BREAKPOINT;
+       }
+
+       /*
+        * Check for clashing breakpoints.
+        *
+        * Note, in this design we can't have hardware breakpoints
+        * enabled for both read and write on the same address.
+        */
+       for (i = 0, bp_check = kdb_breakpoints; i < KDB_MAXBPT;
+            i++, bp_check++) {
+               if (!bp_check->bp_free &&
+                   bp_check->bp_addr == template.bp_addr) {
+                       kdb_printf("You already have a breakpoint at "
+                                  kdb_bfd_vma_fmt0 "\n", template.bp_addr);
+                       return KDB_DUPBPT;
+               }
+       }
+
+       template.bp_enabled = 1;
+
+       /*
+        * Actually allocate the breakpoint found earlier
+        */
+       *bp = template;
+       bp->bp_free = 0;
+
+       kdb_printbp(bp, bpno);
+
+       return 0;
+}
+
+/*
+ * kdb_bc
+ *
+ *     Handles the 'bc', 'be', and 'bd' commands
+ *
+ *     [bd|bc|be] <breakpoint-number>
+ *     [bd|bc|be] *
+ *
+ * Parameters:
+ *     argc    Count of arguments in argv
+ *     argv    Space delimited command line arguments
+ * Outputs:
+ *     None.
+ * Returns:
+ *     Zero for success, a kdb diagnostic for failure
+ * Locking:
+ *     None.
+ * Remarks:
+ */
+static int kdb_bc(int argc, const char **argv)
+{
+       unsigned long addr;
+       kdb_bp_t *bp = NULL;
+       int lowbp = KDB_MAXBPT;
+       int highbp = 0;
+       int done = 0;
+       int i;
+       int diag = 0;
+
+       int cmd;                        /* KDBCMD_B? */
+#define KDBCMD_BC      0
+#define KDBCMD_BE      1
+#define KDBCMD_BD      2
+
+       if (strcmp(argv[0], "be") == 0)
+               cmd = KDBCMD_BE;
+       else if (strcmp(argv[0], "bd") == 0)
+               cmd = KDBCMD_BD;
+       else
+               cmd = KDBCMD_BC;
+
+       if (argc != 1)
+               return KDB_ARGCOUNT;
+
+       if (strcmp(argv[1], "*") == 0) {
+               lowbp = 0;
+               highbp = KDB_MAXBPT;
+       } else {
+               diag = kdbgetularg(argv[1], &addr);
+               if (diag)
+                       return diag;
+
+               /*
+                * For addresses less than the maximum breakpoint number,
+                * assume that the breakpoint number is desired.
+                */
+               if (addr < KDB_MAXBPT) {
+                       bp = &kdb_breakpoints[addr];
+                       lowbp = highbp = addr;
+                       highbp++;
+               } else {
+                       for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT;
+                           i++, bp++) {
+                               if (bp->bp_addr == addr) {
+                                       lowbp = highbp = i;
+                                       highbp++;
+                                       break;
+                               }
+                       }
+               }
+       }
+
+       /*
+        * Now operate on the set of breakpoints matching the input
+        * criteria (either '*' for all, or an individual breakpoint).
+        */
+       for (bp = &kdb_breakpoints[lowbp], i = lowbp;
+           i < highbp;
+           i++, bp++) {
+               if (bp->bp_free)
+                       continue;
+
+               done++;
+
+               switch (cmd) {
+               case KDBCMD_BC:
+                       bp->bp_enabled = 0;
+
+                       kdb_printf("Breakpoint %d at "
+                                  kdb_bfd_vma_fmt " cleared\n",
+                                  i, bp->bp_addr);
+
+                       bp->bp_addr = 0;
+                       bp->bp_free = 1;
+
+                       break;
+               case KDBCMD_BE:
+                       bp->bp_enabled = 1;
+
+                       kdb_printf("Breakpoint %d at "
+                                  kdb_bfd_vma_fmt " enabled",
+                                  i, bp->bp_addr);
+
+                       kdb_printf("\n");
+                       break;
+               case KDBCMD_BD:
+                       if (!bp->bp_enabled)
+                               break;
+
+                       bp->bp_enabled = 0;
+
+                       kdb_printf("Breakpoint %d at "
+                                  kdb_bfd_vma_fmt " disabled\n",
+                                  i, bp->bp_addr);
+
+                       break;
+               }
+               if (bp->bp_delay && (cmd == KDBCMD_BC || cmd == KDBCMD_BD)) {
+                       bp->bp_delay = 0;
+                       KDB_STATE_CLEAR(SSBPT);
+               }
+       }
+
+       return (!done) ? KDB_BPTNOTFOUND : 0;
+}
+
+/*
+ * kdb_ss
+ *
+ *     Process the 'ss' (Single Step) and 'ssb' (Single Step to Branch)
+ *     commands.
+ *
+ *     ss
+ *     ssb
+ *
+ * Parameters:
+ *     argc    Argument count
+ *     argv    Argument vector
+ * Outputs:
+ *     None.
+ * Returns:
+ *     KDB_CMD_SS[B] for success, a kdb error if failure.
+ * Locking:
+ *     None.
+ * Remarks:
+ *
+ *     Set the arch specific option to trigger a debug trap after the next
+ *     instruction.
+ *
+ *     For 'ssb', set the trace flag in the debug trap handler
+ *     after printing the current insn and return directly without
+ *     invoking the kdb command processor, until a branch instruction
+ *     is encountered.
+ */
+
+static int kdb_ss(int argc, const char **argv)
+{
+       int ssb = 0;
+
+       ssb = (strcmp(argv[0], "ssb") == 0);
+       if (argc != 0)
+               return KDB_ARGCOUNT;
+       /*
+        * Set trace flag and go.
+        */
+       KDB_STATE_SET(DOING_SS);
+       if (ssb) {
+               KDB_STATE_SET(DOING_SSB);
+               return KDB_CMD_SSB;
+       }
+       return KDB_CMD_SS;
+}
+
+/* Initialize the breakpoint table and register        breakpoint commands. */
+
+void __init kdb_initbptab(void)
+{
+       int i;
+       kdb_bp_t *bp;
+
+       /*
+        * First time initialization.
+        */
+       memset(&kdb_breakpoints, '\0', sizeof(kdb_breakpoints));
+
+       for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++)
+               bp->bp_free = 1;
+
+       kdb_register_repeat("bp", kdb_bp, "[<vaddr>]",
+               "Set/Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
+       kdb_register_repeat("bl", kdb_bp, "[<vaddr>]",
+               "Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
+       if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT)
+               kdb_register_repeat("bph", kdb_bp, "[<vaddr>]",
+               "[datar [length]|dataw [length]]   Set hw brk", 0, KDB_REPEAT_NO_ARGS);
+       kdb_register_repeat("bc", kdb_bc, "<bpnum>",
+               "Clear Breakpoint", 0, KDB_REPEAT_NONE);
+       kdb_register_repeat("be", kdb_bc, "<bpnum>",
+               "Enable Breakpoint", 0, KDB_REPEAT_NONE);
+       kdb_register_repeat("bd", kdb_bc, "<bpnum>",
+               "Disable Breakpoint", 0, KDB_REPEAT_NONE);
+
+       kdb_register_repeat("ss", kdb_ss, "",
+               "Single Step", 1, KDB_REPEAT_NO_ARGS);
+       kdb_register_repeat("ssb", kdb_ss, "",
+               "Single step to branch/call", 0, KDB_REPEAT_NO_ARGS);
+       /*
+        * Architecture dependent initialization.
+        */
+}
diff --git a/kernel/debug/kdb/kdb_bt.c b/kernel/debug/kdb/kdb_bt.c
new file mode 100644 (file)
index 0000000..2f62fe8
--- /dev/null
@@ -0,0 +1,210 @@
+/*
+ * Kernel Debugger Architecture Independent Stack Traceback
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2009 Wind River Systems, Inc.  All Rights Reserved.
+ */
+
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/kdb.h>
+#include <linux/nmi.h>
+#include <asm/system.h>
+#include "kdb_private.h"
+
+
+static void kdb_show_stack(struct task_struct *p, void *addr)
+{
+       int old_lvl = console_loglevel;
+       console_loglevel = 15;
+       kdb_trap_printk++;
+       kdb_set_current_task(p);
+       if (addr) {
+               show_stack((struct task_struct *)p, addr);
+       } else if (kdb_current_regs) {
+#ifdef CONFIG_X86
+               show_stack(p, &kdb_current_regs->sp);
+#else
+               show_stack(p, NULL);
+#endif
+       } else {
+               show_stack(p, NULL);
+       }
+       console_loglevel = old_lvl;
+       kdb_trap_printk--;
+}
+
+/*
+ * kdb_bt
+ *
+ *     This function implements the 'bt' command.  Print a stack
+ *     traceback.
+ *
+ *     bt [<address-expression>]       (addr-exp is for alternate stacks)
+ *     btp <pid>                       Kernel stack for <pid>
+ *     btt <address-expression>        Kernel stack for task structure at
+ *                                     <address-expression>
+ *     bta [DRSTCZEUIMA]               All useful processes, optionally
+ *                                     filtered by state
+ *     btc [<cpu>]                     The current process on one cpu,
+ *                                     default is all cpus
+ *
+ *     bt <address-expression> refers to a address on the stack, that location
+ *     is assumed to contain a return address.
+ *
+ *     btt <address-expression> refers to the address of a struct task.
+ *
+ * Inputs:
+ *     argc    argument count
+ *     argv    argument vector
+ * Outputs:
+ *     None.
+ * Returns:
+ *     zero for success, a kdb diagnostic if error
+ * Locking:
+ *     none.
+ * Remarks:
+ *     Backtrack works best when the code uses frame pointers.  But even
+ *     without frame pointers we should get a reasonable trace.
+ *
+ *     mds comes in handy when examining the stack to do a manual traceback or
+ *     to get a starting point for bt <address-expression>.
+ */
+
+static int
+kdb_bt1(struct task_struct *p, unsigned long mask,
+       int argcount, int btaprompt)
+{
+       char buffer[2];
+       if (kdb_getarea(buffer[0], (unsigned long)p) ||
+           kdb_getarea(buffer[0], (unsigned long)(p+1)-1))
+               return KDB_BADADDR;
+       if (!kdb_task_state(p, mask))
+               return 0;
+       kdb_printf("Stack traceback for pid %d\n", p->pid);
+       kdb_ps1(p);
+       kdb_show_stack(p, NULL);
+       if (btaprompt) {
+               kdb_getstr(buffer, sizeof(buffer),
+                          "Enter <q> to end, <cr> to continue:");
+               if (buffer[0] == 'q') {
+                       kdb_printf("\n");
+                       return 1;
+               }
+       }
+       touch_nmi_watchdog();
+       return 0;
+}
+
+int
+kdb_bt(int argc, const char **argv)
+{
+       int diag;
+       int argcount = 5;
+       int btaprompt = 1;
+       int nextarg;
+       unsigned long addr;
+       long offset;
+
+       kdbgetintenv("BTARGS", &argcount);      /* Arguments to print */
+       kdbgetintenv("BTAPROMPT", &btaprompt);  /* Prompt after each
+                                                * proc in bta */
+
+       if (strcmp(argv[0], "bta") == 0) {
+               struct task_struct *g, *p;
+               unsigned long cpu;
+               unsigned long mask = kdb_task_state_string(argc ? argv[1] :
+                                                          NULL);
+               if (argc == 0)
+                       kdb_ps_suppressed();
+               /* Run the active tasks first */
+               for_each_online_cpu(cpu) {
+                       p = kdb_curr_task(cpu);
+                       if (kdb_bt1(p, mask, argcount, btaprompt))
+                               return 0;
+               }
+               /* Now the inactive tasks */
+               kdb_do_each_thread(g, p) {
+                       if (task_curr(p))
+                               continue;
+                       if (kdb_bt1(p, mask, argcount, btaprompt))
+                               return 0;
+               } kdb_while_each_thread(g, p);
+       } else if (strcmp(argv[0], "btp") == 0) {
+               struct task_struct *p;
+               unsigned long pid;
+               if (argc != 1)
+                       return KDB_ARGCOUNT;
+               diag = kdbgetularg((char *)argv[1], &pid);
+               if (diag)
+                       return diag;
+               p = find_task_by_pid_ns(pid, &init_pid_ns);
+               if (p) {
+                       kdb_set_current_task(p);
+                       return kdb_bt1(p, ~0UL, argcount, 0);
+               }
+               kdb_printf("No process with pid == %ld found\n", pid);
+               return 0;
+       } else if (strcmp(argv[0], "btt") == 0) {
+               if (argc != 1)
+                       return KDB_ARGCOUNT;
+               diag = kdbgetularg((char *)argv[1], &addr);
+               if (diag)
+                       return diag;
+               kdb_set_current_task((struct task_struct *)addr);
+               return kdb_bt1((struct task_struct *)addr, ~0UL, argcount, 0);
+       } else if (strcmp(argv[0], "btc") == 0) {
+               unsigned long cpu = ~0;
+               struct task_struct *save_current_task = kdb_current_task;
+               char buf[80];
+               if (argc > 1)
+                       return KDB_ARGCOUNT;
+               if (argc == 1) {
+                       diag = kdbgetularg((char *)argv[1], &cpu);
+                       if (diag)
+                               return diag;
+               }
+               /* Recursive use of kdb_parse, do not use argv after
+                * this point */
+               argv = NULL;
+               if (cpu != ~0) {
+                       if (cpu >= num_possible_cpus() || !cpu_online(cpu)) {
+                               kdb_printf("no process for cpu %ld\n", cpu);
+                               return 0;
+                       }
+                       sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu));
+                       kdb_parse(buf);
+                       return 0;
+               }
+               kdb_printf("btc: cpu status: ");
+               kdb_parse("cpu\n");
+               for_each_online_cpu(cpu) {
+                       sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu));
+                       kdb_parse(buf);
+                       touch_nmi_watchdog();
+               }
+               kdb_set_current_task(save_current_task);
+               return 0;
+       } else {
+               if (argc) {
+                       nextarg = 1;
+                       diag = kdbgetaddrarg(argc, argv, &nextarg, &addr,
+                                            &offset, NULL);
+                       if (diag)
+                               return diag;
+                       kdb_show_stack(kdb_current_task, (void *)addr);
+                       return 0;
+               } else {
+                       return kdb_bt1(kdb_current_task, ~0UL, argcount, 0);
+               }
+       }
+
+       /* NOTREACHED */
+       return 0;
+}
diff --git a/kernel/debug/kdb/kdb_cmds b/kernel/debug/kdb/kdb_cmds
new file mode 100644 (file)
index 0000000..56c88e4
--- /dev/null
@@ -0,0 +1,35 @@
+# Initial commands for kdb, alter to suit your needs.
+# These commands are executed in kdb_init() context, no SMP, no
+# processes.  Commands that require process data (including stack or
+# registers) are not reliable this early.  set and bp commands should
+# be safe.  Global breakpoint commands affect each cpu as it is booted.
+
+# Standard debugging information for first level support, just type archkdb
+# or archkdbcpu or archkdbshort at the kdb prompt.
+
+defcmd dumpcommon "" "Common kdb debugging"
+  set BTAPROMPT 0
+  set LINES 10000
+  -summary
+  -cpu
+  -ps
+  -dmesg 600
+  -bt
+endefcmd
+
+defcmd dumpall "" "First line debugging"
+  set BTSYMARG 1
+  set BTARGS 9
+  pid R
+  -dumpcommon
+  -bta
+endefcmd
+
+defcmd dumpcpu "" "Same as dumpall but only tasks on cpus"
+  set BTSYMARG 1
+  set BTARGS 9
+  pid R
+  -dumpcommon
+  -btc
+endefcmd
+
diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c
new file mode 100644 (file)
index 0000000..bf6e827
--- /dev/null
@@ -0,0 +1,169 @@
+/*
+ * Created by: Jason Wessel <jason.wessel@windriver.com>
+ *
+ * Copyright (c) 2009 Wind River Systems, Inc.  All Rights Reserved.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kgdb.h>
+#include <linux/kdb.h>
+#include <linux/kdebug.h>
+#include "kdb_private.h"
+#include "../debug_core.h"
+
+/*
+ * KDB interface to KGDB internals
+ */
+get_char_func kdb_poll_funcs[] = {
+       dbg_io_get_char,
+       NULL,
+       NULL,
+       NULL,
+       NULL,
+       NULL,
+};
+EXPORT_SYMBOL_GPL(kdb_poll_funcs);
+
+int kdb_poll_idx = 1;
+EXPORT_SYMBOL_GPL(kdb_poll_idx);
+
+int kdb_stub(struct kgdb_state *ks)
+{
+       int error = 0;
+       kdb_bp_t *bp;
+       unsigned long addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
+       kdb_reason_t reason = KDB_REASON_OOPS;
+       kdb_dbtrap_t db_result = KDB_DB_NOBPT;
+       int i;
+
+       if (KDB_STATE(REENTRY)) {
+               reason = KDB_REASON_SWITCH;
+               KDB_STATE_CLEAR(REENTRY);
+               addr = instruction_pointer(ks->linux_regs);
+       }
+       ks->pass_exception = 0;
+       if (atomic_read(&kgdb_setting_breakpoint))
+               reason = KDB_REASON_KEYBOARD;
+
+       for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) {
+               if ((bp->bp_enabled) && (bp->bp_addr == addr)) {
+                       reason = KDB_REASON_BREAK;
+                       db_result = KDB_DB_BPT;
+                       if (addr != instruction_pointer(ks->linux_regs))
+                               kgdb_arch_set_pc(ks->linux_regs, addr);
+                       break;
+               }
+       }
+       if (reason == KDB_REASON_BREAK || reason == KDB_REASON_SWITCH) {
+               for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) {
+                       if (bp->bp_free)
+                               continue;
+                       if (bp->bp_addr == addr) {
+                               bp->bp_delay = 1;
+                               bp->bp_delayed = 1;
+       /*
+        * SSBPT is set when the kernel debugger must single step a
+        * task in order to re-establish an instruction breakpoint
+        * which uses the instruction replacement mechanism.  It is
+        * cleared by any action that removes the need to single-step
+        * the breakpoint.
+        */
+                               reason = KDB_REASON_BREAK;
+                               db_result = KDB_DB_BPT;
+                               KDB_STATE_SET(SSBPT);
+                               break;
+                       }
+               }
+       }
+
+       if (reason != KDB_REASON_BREAK && ks->ex_vector == 0 &&
+               ks->signo == SIGTRAP) {
+               reason = KDB_REASON_SSTEP;
+               db_result = KDB_DB_BPT;
+       }
+       /* Set initial kdb state variables */
+       KDB_STATE_CLEAR(KGDB_TRANS);
+       kdb_initial_cpu = ks->cpu;
+       kdb_current_task = kgdb_info[ks->cpu].task;
+       kdb_current_regs = kgdb_info[ks->cpu].debuggerinfo;
+       /* Remove any breakpoints as needed by kdb and clear single step */
+       kdb_bp_remove();
+       KDB_STATE_CLEAR(DOING_SS);
+       KDB_STATE_CLEAR(DOING_SSB);
+       KDB_STATE_SET(PAGER);
+       /* zero out any offline cpu data */
+       for_each_present_cpu(i) {
+               if (!cpu_online(i)) {
+                       kgdb_info[i].debuggerinfo = NULL;
+                       kgdb_info[i].task = NULL;
+               }
+       }
+       if (ks->err_code == DIE_OOPS || reason == KDB_REASON_OOPS) {
+               ks->pass_exception = 1;
+               KDB_FLAG_SET(CATASTROPHIC);
+       }
+       kdb_initial_cpu = ks->cpu;
+       if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) {
+               KDB_STATE_CLEAR(SSBPT);
+               KDB_STATE_CLEAR(DOING_SS);
+       } else {
+               /* Start kdb main loop */
+               error = kdb_main_loop(KDB_REASON_ENTER, reason,
+                                     ks->err_code, db_result, ks->linux_regs);
+       }
+       /*
+        * Upon exit from the kdb main loop setup break points and restart
+        * the system based on the requested continue state
+        */
+       kdb_initial_cpu = -1;
+       kdb_current_task = NULL;
+       kdb_current_regs = NULL;
+       KDB_STATE_CLEAR(PAGER);
+       kdbnearsym_cleanup();
+       if (error == KDB_CMD_KGDB) {
+               if (KDB_STATE(DOING_KGDB) || KDB_STATE(DOING_KGDB2)) {
+       /*
+        * This inteface glue which allows kdb to transition in into
+        * the gdb stub.  In order to do this the '?' or '' gdb serial
+        * packet response is processed here.  And then control is
+        * passed to the gdbstub.
+        */
+                       if (KDB_STATE(DOING_KGDB))
+                               gdbstub_state(ks, "?");
+                       else
+                               gdbstub_state(ks, "");
+                       KDB_STATE_CLEAR(DOING_KGDB);
+                       KDB_STATE_CLEAR(DOING_KGDB2);
+               }
+               return DBG_PASS_EVENT;
+       }
+       kdb_bp_install(ks->linux_regs);
+       dbg_activate_sw_breakpoints();
+       /* Set the exit state to a single step or a continue */
+       if (KDB_STATE(DOING_SS))
+               gdbstub_state(ks, "s");
+       else
+               gdbstub_state(ks, "c");
+
+       KDB_FLAG_CLEAR(CATASTROPHIC);
+
+       /* Invoke arch specific exception handling prior to system resume */
+       kgdb_info[ks->cpu].ret_state = gdbstub_state(ks, "e");
+       if (ks->pass_exception)
+               kgdb_info[ks->cpu].ret_state = 1;
+       if (error == KDB_CMD_CPU) {
+               KDB_STATE_SET(REENTRY);
+               /*
+                * Force clear the single step bit because kdb emulates this
+                * differently vs the gdbstub
+                */
+               kgdb_single_step = 0;
+               dbg_deactivate_sw_breakpoints();
+               return DBG_SWITCH_CPU_EVENT;
+       }
+       return kgdb_info[ks->cpu].ret_state;
+}
+
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
new file mode 100644 (file)
index 0000000..c9b7f4f
--- /dev/null
@@ -0,0 +1,826 @@
+/*
+ * Kernel Debugger Architecture Independent Console I/O handler
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2006 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2009 Wind River Systems, Inc.  All Rights Reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/kdev_t.h>
+#include <linux/console.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/nmi.h>
+#include <linux/delay.h>
+#include <linux/kgdb.h>
+#include <linux/kdb.h>
+#include <linux/kallsyms.h>
+#include "kdb_private.h"
+
+#define CMD_BUFLEN 256
+char kdb_prompt_str[CMD_BUFLEN];
+
+int kdb_trap_printk;
+
+static void kgdb_transition_check(char *buffer)
+{
+       int slen = strlen(buffer);
+       if (strncmp(buffer, "$?#3f", slen) != 0 &&
+           strncmp(buffer, "$qSupported#37", slen) != 0 &&
+           strncmp(buffer, "+$qSupported#37", slen) != 0) {
+               KDB_STATE_SET(KGDB_TRANS);
+               kdb_printf("%s", buffer);
+       }
+}
+
+static int kdb_read_get_key(char *buffer, size_t bufsize)
+{
+#define ESCAPE_UDELAY 1000
+#define ESCAPE_DELAY (2*1000000/ESCAPE_UDELAY) /* 2 seconds worth of udelays */
+       char escape_data[5];    /* longest vt100 escape sequence is 4 bytes */
+       char *ped = escape_data;
+       int escape_delay = 0;
+       get_char_func *f, *f_escape = NULL;
+       int key;
+
+       for (f = &kdb_poll_funcs[0]; ; ++f) {
+               if (*f == NULL) {
+                       /* Reset NMI watchdog once per poll loop */
+                       touch_nmi_watchdog();
+                       f = &kdb_poll_funcs[0];
+               }
+               if (escape_delay == 2) {
+                       *ped = '\0';
+                       ped = escape_data;
+                       --escape_delay;
+               }
+               if (escape_delay == 1) {
+                       key = *ped++;
+                       if (!*ped)
+                               --escape_delay;
+                       break;
+               }
+               key = (*f)();
+               if (key == -1) {
+                       if (escape_delay) {
+                               udelay(ESCAPE_UDELAY);
+                               --escape_delay;
+                       }
+                       continue;
+               }
+               if (bufsize <= 2) {
+                       if (key == '\r')
+                               key = '\n';
+                       *buffer++ = key;
+                       *buffer = '\0';
+                       return -1;
+               }
+               if (escape_delay == 0 && key == '\e') {
+                       escape_delay = ESCAPE_DELAY;
+                       ped = escape_data;
+                       f_escape = f;
+               }
+               if (escape_delay) {
+                       *ped++ = key;
+                       if (f_escape != f) {
+                               escape_delay = 2;
+                               continue;
+                       }
+                       if (ped - escape_data == 1) {
+                               /* \e */
+                               continue;
+                       } else if (ped - escape_data == 2) {
+                               /* \e<something> */
+                               if (key != '[')
+                                       escape_delay = 2;
+                               continue;
+                       } else if (ped - escape_data == 3) {
+                               /* \e[<something> */
+                               int mapkey = 0;
+                               switch (key) {
+                               case 'A': /* \e[A, up arrow */
+                                       mapkey = 16;
+                                       break;
+                               case 'B': /* \e[B, down arrow */
+                                       mapkey = 14;
+                                       break;
+                               case 'C': /* \e[C, right arrow */
+                                       mapkey = 6;
+                                       break;
+                               case 'D': /* \e[D, left arrow */
+                                       mapkey = 2;
+                                       break;
+                               case '1': /* dropthrough */
+                               case '3': /* dropthrough */
+                               /* \e[<1,3,4>], may be home, del, end */
+                               case '4':
+                                       mapkey = -1;
+                                       break;
+                               }
+                               if (mapkey != -1) {
+                                       if (mapkey > 0) {
+                                               escape_data[0] = mapkey;
+                                               escape_data[1] = '\0';
+                                       }
+                                       escape_delay = 2;
+                               }
+                               continue;
+                       } else if (ped - escape_data == 4) {
+                               /* \e[<1,3,4><something> */
+                               int mapkey = 0;
+                               if (key == '~') {
+                                       switch (escape_data[2]) {
+                                       case '1': /* \e[1~, home */
+                                               mapkey = 1;
+                                               break;
+                                       case '3': /* \e[3~, del */
+                                               mapkey = 4;
+                                               break;
+                                       case '4': /* \e[4~, end */
+                                               mapkey = 5;
+                                               break;
+                                       }
+                               }
+                               if (mapkey > 0) {
+                                       escape_data[0] = mapkey;
+                                       escape_data[1] = '\0';
+                               }
+                               escape_delay = 2;
+                               continue;
+                       }
+               }
+               break;  /* A key to process */
+       }
+       return key;
+}
+
+/*
+ * kdb_read
+ *
+ *     This function reads a string of characters, terminated by
+ *     a newline, or by reaching the end of the supplied buffer,
+ *     from the current kernel debugger console device.
+ * Parameters:
+ *     buffer  - Address of character buffer to receive input characters.
+ *     bufsize - size, in bytes, of the character buffer
+ * Returns:
+ *     Returns a pointer to the buffer containing the received
+ *     character string.  This string will be terminated by a
+ *     newline character.
+ * Locking:
+ *     No locks are required to be held upon entry to this
+ *     function.  It is not reentrant - it relies on the fact
+ *     that while kdb is running on only one "master debug" cpu.
+ * Remarks:
+ *
+ * The buffer size must be >= 2.  A buffer size of 2 means that the caller only
+ * wants a single key.
+ *
+ * An escape key could be the start of a vt100 control sequence such as \e[D
+ * (left arrow) or it could be a character in its own right.  The standard
+ * method for detecting the difference is to wait for 2 seconds to see if there
+ * are any other characters.  kdb is complicated by the lack of a timer service
+ * (interrupts are off), by multiple input sources and by the need to sometimes
+ * return after just one key.  Escape sequence processing has to be done as
+ * states in the polling loop.
+ */
+
+static char *kdb_read(char *buffer, size_t bufsize)
+{
+       char *cp = buffer;
+       char *bufend = buffer+bufsize-2;        /* Reserve space for newline
+                                                * and null byte */
+       char *lastchar;
+       char *p_tmp;
+       char tmp;
+       static char tmpbuffer[CMD_BUFLEN];
+       int len = strlen(buffer);
+       int len_tmp;
+       int tab = 0;
+       int count;
+       int i;
+       int diag, dtab_count;
+       int key;
+
+
+       diag = kdbgetintenv("DTABCOUNT", &dtab_count);
+       if (diag)
+               dtab_count = 30;
+
+       if (len > 0) {
+               cp += len;
+               if (*(buffer+len-1) == '\n')
+                       cp--;
+       }
+
+       lastchar = cp;
+       *cp = '\0';
+       kdb_printf("%s", buffer);
+poll_again:
+       key = kdb_read_get_key(buffer, bufsize);
+       if (key == -1)
+               return buffer;
+       if (key != 9)
+               tab = 0;
+       switch (key) {
+       case 8: /* backspace */
+               if (cp > buffer) {
+                       if (cp < lastchar) {
+                               memcpy(tmpbuffer, cp, lastchar - cp);
+                               memcpy(cp-1, tmpbuffer, lastchar - cp);
+                       }
+                       *(--lastchar) = '\0';
+                       --cp;
+                       kdb_printf("\b%s \r", cp);
+                       tmp = *cp;
+                       *cp = '\0';
+                       kdb_printf(kdb_prompt_str);
+                       kdb_printf("%s", buffer);
+                       *cp = tmp;
+               }
+               break;
+       case 13: /* enter */
+               *lastchar++ = '\n';
+               *lastchar++ = '\0';
+               kdb_printf("\n");
+               return buffer;
+       case 4: /* Del */
+               if (cp < lastchar) {
+                       memcpy(tmpbuffer, cp+1, lastchar - cp - 1);
+                       memcpy(cp, tmpbuffer, lastchar - cp - 1);
+                       *(--lastchar) = '\0';
+                       kdb_printf("%s \r", cp);
+                       tmp = *cp;
+                       *cp = '\0';
+                       kdb_printf(kdb_prompt_str);
+                       kdb_printf("%s", buffer);
+                       *cp = tmp;
+               }
+               break;
+       case 1: /* Home */
+               if (cp > buffer) {
+                       kdb_printf("\r");
+                       kdb_printf(kdb_prompt_str);
+                       cp = buffer;
+               }
+               break;
+       case 5: /* End */
+               if (cp < lastchar) {
+                       kdb_printf("%s", cp);
+                       cp = lastchar;
+               }
+               break;
+       case 2: /* Left */
+               if (cp > buffer) {
+                       kdb_printf("\b");
+                       --cp;
+               }
+               break;
+       case 14: /* Down */
+               memset(tmpbuffer, ' ',
+                      strlen(kdb_prompt_str) + (lastchar-buffer));
+               *(tmpbuffer+strlen(kdb_prompt_str) +
+                 (lastchar-buffer)) = '\0';
+               kdb_printf("\r%s\r", tmpbuffer);
+               *lastchar = (char)key;
+               *(lastchar+1) = '\0';
+               return lastchar;
+       case 6: /* Right */
+               if (cp < lastchar) {
+                       kdb_printf("%c", *cp);
+                       ++cp;
+               }
+               break;
+       case 16: /* Up */
+               memset(tmpbuffer, ' ',
+                      strlen(kdb_prompt_str) + (lastchar-buffer));
+               *(tmpbuffer+strlen(kdb_prompt_str) +
+                 (lastchar-buffer)) = '\0';
+               kdb_printf("\r%s\r", tmpbuffer);
+               *lastchar = (char)key;
+               *(lastchar+1) = '\0';
+               return lastchar;
+       case 9: /* Tab */
+               if (tab < 2)
+                       ++tab;
+               p_tmp = buffer;
+               while (*p_tmp == ' ')
+                       p_tmp++;
+               if (p_tmp > cp)
+                       break;
+               memcpy(tmpbuffer, p_tmp, cp-p_tmp);
+               *(tmpbuffer + (cp-p_tmp)) = '\0';
+               p_tmp = strrchr(tmpbuffer, ' ');
+               if (p_tmp)
+                       ++p_tmp;
+               else
+                       p_tmp = tmpbuffer;
+               len = strlen(p_tmp);
+               count = kallsyms_symbol_complete(p_tmp,
+                                                sizeof(tmpbuffer) -
+                                                (p_tmp - tmpbuffer));
+               if (tab == 2 && count > 0) {
+                       kdb_printf("\n%d symbols are found.", count);
+                       if (count > dtab_count) {
+                               count = dtab_count;
+                               kdb_printf(" But only first %d symbols will"
+                                          " be printed.\nYou can change the"
+                                          " environment variable DTABCOUNT.",
+                                          count);
+                       }
+                       kdb_printf("\n");
+                       for (i = 0; i < count; i++) {
+                               if (kallsyms_symbol_next(p_tmp, i) < 0)
+                                       break;
+                               kdb_printf("%s ", p_tmp);
+                               *(p_tmp + len) = '\0';
+                       }
+                       if (i >= dtab_count)
+                               kdb_printf("...");
+                       kdb_printf("\n");
+                       kdb_printf(kdb_prompt_str);
+                       kdb_printf("%s", buffer);
+               } else if (tab != 2 && count > 0) {
+                       len_tmp = strlen(p_tmp);
+                       strncpy(p_tmp+len_tmp, cp, lastchar-cp+1);
+                       len_tmp = strlen(p_tmp);
+                       strncpy(cp, p_tmp+len, len_tmp-len + 1);
+                       len = len_tmp - len;
+                       kdb_printf("%s", cp);
+                       cp += len;
+                       lastchar += len;
+               }
+               kdb_nextline = 1; /* reset output line number */
+               break;
+       default:
+               if (key >= 32 && lastchar < bufend) {
+                       if (cp < lastchar) {
+                               memcpy(tmpbuffer, cp, lastchar - cp);
+                               memcpy(cp+1, tmpbuffer, lastchar - cp);
+                               *++lastchar = '\0';
+                               *cp = key;
+                               kdb_printf("%s\r", cp);
+                               ++cp;
+                               tmp = *cp;
+                               *cp = '\0';
+                               kdb_printf(kdb_prompt_str);
+                               kdb_printf("%s", buffer);
+                               *cp = tmp;
+                       } else {
+                               *++lastchar = '\0';
+                               *cp++ = key;
+                               /* The kgdb transition check will hide
+                                * printed characters if we think that
+                                * kgdb is connecting, until the check
+                                * fails */
+                               if (!KDB_STATE(KGDB_TRANS))
+                                       kgdb_transition_check(buffer);
+                               else
+                                       kdb_printf("%c", key);
+                       }
+                       /* Special escape to kgdb */
+                       if (lastchar - buffer >= 5 &&
+                           strcmp(lastchar - 5, "$?#3f") == 0) {
+                               strcpy(buffer, "kgdb");
+                               KDB_STATE_SET(DOING_KGDB);
+                               return buffer;
+                       }
+                       if (lastchar - buffer >= 14 &&
+                           strcmp(lastchar - 14, "$qSupported#37") == 0) {
+                               strcpy(buffer, "kgdb");
+                               KDB_STATE_SET(DOING_KGDB2);
+                               return buffer;
+                       }
+               }
+               break;
+       }
+       goto poll_again;
+}
+
+/*
+ * kdb_getstr
+ *
+ *     Print the prompt string and read a command from the
+ *     input device.
+ *
+ * Parameters:
+ *     buffer  Address of buffer to receive command
+ *     bufsize Size of buffer in bytes
+ *     prompt  Pointer to string to use as prompt string
+ * Returns:
+ *     Pointer to command buffer.
+ * Locking:
+ *     None.
+ * Remarks:
+ *     For SMP kernels, the processor number will be
+ *     substituted for %d, %x or %o in the prompt.
+ */
+
+char *kdb_getstr(char *buffer, size_t bufsize, char *prompt)
+{
+       if (prompt && kdb_prompt_str != prompt)
+               strncpy(kdb_prompt_str, prompt, CMD_BUFLEN);
+       kdb_printf(kdb_prompt_str);
+       kdb_nextline = 1;       /* Prompt and input resets line number */
+       return kdb_read(buffer, bufsize);
+}
+
+/*
+ * kdb_input_flush
+ *
+ *     Get rid of any buffered console input.
+ *
+ * Parameters:
+ *     none
+ * Returns:
+ *     nothing
+ * Locking:
+ *     none
+ * Remarks:
+ *     Call this function whenever you want to flush input.  If there is any
+ *     outstanding input, it ignores all characters until there has been no
+ *     data for approximately 1ms.
+ */
+
+static void kdb_input_flush(void)
+{
+       get_char_func *f;
+       int res;
+       int flush_delay = 1;
+       while (flush_delay) {
+               flush_delay--;
+empty:
+               touch_nmi_watchdog();
+               for (f = &kdb_poll_funcs[0]; *f; ++f) {
+                       res = (*f)();
+                       if (res != -1) {
+                               flush_delay = 1;
+                               goto empty;
+                       }
+               }
+               if (flush_delay)
+                       mdelay(1);
+       }
+}
+
+/*
+ * kdb_printf
+ *
+ *     Print a string to the output device(s).
+ *
+ * Parameters:
+ *     printf-like format and optional args.
+ * Returns:
+ *     0
+ * Locking:
+ *     None.
+ * Remarks:
+ *     use 'kdbcons->write()' to avoid polluting 'log_buf' with
+ *     kdb output.
+ *
+ *  If the user is doing a cmd args | grep srch
+ *  then kdb_grepping_flag is set.
+ *  In that case we need to accumulate full lines (ending in \n) before
+ *  searching for the pattern.
+ */
+
+static char kdb_buffer[256];   /* A bit too big to go on stack */
+static char *next_avail = kdb_buffer;
+static int  size_avail;
+static int  suspend_grep;
+
+/*
+ * search arg1 to see if it contains arg2
+ * (kdmain.c provides flags for ^pat and pat$)
+ *
+ * return 1 for found, 0 for not found
+ */
+static int kdb_search_string(char *searched, char *searchfor)
+{
+       char firstchar, *cp;
+       int len1, len2;
+
+       /* not counting the newline at the end of "searched" */
+       len1 = strlen(searched)-1;
+       len2 = strlen(searchfor);
+       if (len1 < len2)
+               return 0;
+       if (kdb_grep_leading && kdb_grep_trailing && len1 != len2)
+               return 0;
+       if (kdb_grep_leading) {
+               if (!strncmp(searched, searchfor, len2))
+                       return 1;
+       } else if (kdb_grep_trailing) {
+               if (!strncmp(searched+len1-len2, searchfor, len2))
+                       return 1;
+       } else {
+               firstchar = *searchfor;
+               cp = searched;
+               while ((cp = strchr(cp, firstchar))) {
+                       if (!strncmp(cp, searchfor, len2))
+                               return 1;
+                       cp++;
+               }
+       }
+       return 0;
+}
+
+int vkdb_printf(const char *fmt, va_list ap)
+{
+       int diag;
+       int linecount;
+       int logging, saved_loglevel = 0;
+       int saved_trap_printk;
+       int got_printf_lock = 0;
+       int retlen = 0;
+       int fnd, len;
+       char *cp, *cp2, *cphold = NULL, replaced_byte = ' ';
+       char *moreprompt = "more> ";
+       struct console *c = console_drivers;
+       static DEFINE_SPINLOCK(kdb_printf_lock);
+       unsigned long uninitialized_var(flags);
+
+       preempt_disable();
+       saved_trap_printk = kdb_trap_printk;
+       kdb_trap_printk = 0;
+
+       /* Serialize kdb_printf if multiple cpus try to write at once.
+        * But if any cpu goes recursive in kdb, just print the output,
+        * even if it is interleaved with any other text.
+        */
+       if (!KDB_STATE(PRINTF_LOCK)) {
+               KDB_STATE_SET(PRINTF_LOCK);
+               spin_lock_irqsave(&kdb_printf_lock, flags);
+               got_printf_lock = 1;
+               atomic_inc(&kdb_event);
+       } else {
+               __acquire(kdb_printf_lock);
+       }
+
+       diag = kdbgetintenv("LINES", &linecount);
+       if (diag || linecount <= 1)
+               linecount = 24;
+
+       diag = kdbgetintenv("LOGGING", &logging);
+       if (diag)
+               logging = 0;
+
+       if (!kdb_grepping_flag || suspend_grep) {
+               /* normally, every vsnprintf starts a new buffer */
+               next_avail = kdb_buffer;
+               size_avail = sizeof(kdb_buffer);
+       }
+       vsnprintf(next_avail, size_avail, fmt, ap);
+
+       /*
+        * If kdb_parse() found that the command was cmd xxx | grep yyy
+        * then kdb_grepping_flag is set, and kdb_grep_string contains yyy
+        *
+        * Accumulate the print data up to a newline before searching it.
+        * (vsnprintf does null-terminate the string that it generates)
+        */
+
+       /* skip the search if prints are temporarily unconditional */
+       if (!suspend_grep && kdb_grepping_flag) {
+               cp = strchr(kdb_buffer, '\n');
+               if (!cp) {
+                       /*
+                        * Special cases that don't end with newlines
+                        * but should be written without one:
+                        *   The "[nn]kdb> " prompt should
+                        *   appear at the front of the buffer.
+                        *
+                        *   The "[nn]more " prompt should also be
+                        *     (MOREPROMPT -> moreprompt)
+                        *   written *   but we print that ourselves,
+                        *   we set the suspend_grep flag to make
+                        *   it unconditional.
+                        *
+                        */
+                       if (next_avail == kdb_buffer) {
+                               /*
+                                * these should occur after a newline,
+                                * so they will be at the front of the
+                                * buffer
+                                */
+                               cp2 = kdb_buffer;
+                               len = strlen(kdb_prompt_str);
+                               if (!strncmp(cp2, kdb_prompt_str, len)) {
+                                       /*
+                                        * We're about to start a new
+                                        * command, so we can go back
+                                        * to normal mode.
+                                        */
+                                       kdb_grepping_flag = 0;
+                                       goto kdb_printit;
+                               }
+                       }
+                       /* no newline; don't search/write the buffer
+                          until one is there */
+                       len = strlen(kdb_buffer);
+                       next_avail = kdb_buffer + len;
+                       size_avail = sizeof(kdb_buffer) - len;
+                       goto kdb_print_out;
+               }
+
+               /*
+                * The newline is present; print through it or discard
+                * it, depending on the results of the search.
+                */
+               cp++;                /* to byte after the newline */
+               replaced_byte = *cp; /* remember what/where it was */
+               cphold = cp;
+               *cp = '\0';          /* end the string for our search */
+
+               /*
+                * We now have a newline at the end of the string
+                * Only continue with this output if it contains the
+                * search string.
+                */
+               fnd = kdb_search_string(kdb_buffer, kdb_grep_string);
+               if (!fnd) {
+                       /*
+                        * At this point the complete line at the start
+                        * of kdb_buffer can be discarded, as it does
+                        * not contain what the user is looking for.
+                        * Shift the buffer left.
+                        */
+                       *cphold = replaced_byte;
+                       strcpy(kdb_buffer, cphold);
+                       len = strlen(kdb_buffer);
+                       next_avail = kdb_buffer + len;
+                       size_avail = sizeof(kdb_buffer) - len;
+                       goto kdb_print_out;
+               }
+               /*
+                * at this point the string is a full line and
+                * should be printed, up to the null.
+                */
+       }
+kdb_printit:
+
+       /*
+        * Write to all consoles.
+        */
+       retlen = strlen(kdb_buffer);
+       if (!dbg_kdb_mode && kgdb_connected) {
+               gdbstub_msg_write(kdb_buffer, retlen);
+       } else {
+               if (!dbg_io_ops->is_console) {
+                       len = strlen(kdb_buffer);
+                       cp = kdb_buffer;
+                       while (len--) {
+                               dbg_io_ops->write_char(*cp);
+                               cp++;
+                       }
+               }
+               while (c) {
+                       c->write(c, kdb_buffer, retlen);
+                       touch_nmi_watchdog();
+                       c = c->next;
+               }
+       }
+       if (logging) {
+               saved_loglevel = console_loglevel;
+               console_loglevel = 0;
+               printk(KERN_INFO "%s", kdb_buffer);
+       }
+
+       if (KDB_STATE(PAGER) && strchr(kdb_buffer, '\n'))
+               kdb_nextline++;
+
+       /* check for having reached the LINES number of printed lines */
+       if (kdb_nextline == linecount) {
+               char buf1[16] = "";
+#if defined(CONFIG_SMP)
+               char buf2[32];
+#endif
+
+               /* Watch out for recursion here.  Any routine that calls
+                * kdb_printf will come back through here.  And kdb_read
+                * uses kdb_printf to echo on serial consoles ...
+                */
+               kdb_nextline = 1;       /* In case of recursion */
+
+               /*
+                * Pause until cr.
+                */
+               moreprompt = kdbgetenv("MOREPROMPT");
+               if (moreprompt == NULL)
+                       moreprompt = "more> ";
+
+#if defined(CONFIG_SMP)
+               if (strchr(moreprompt, '%')) {
+                       sprintf(buf2, moreprompt, get_cpu());
+                       put_cpu();
+                       moreprompt = buf2;
+               }
+#endif
+
+               kdb_input_flush();
+               c = console_drivers;
+
+               if (!dbg_io_ops->is_console) {
+                       len = strlen(moreprompt);
+                       cp = moreprompt;
+                       while (len--) {
+                               dbg_io_ops->write_char(*cp);
+                               cp++;
+                       }
+               }
+               while (c) {
+                       c->write(c, moreprompt, strlen(moreprompt));
+                       touch_nmi_watchdog();
+                       c = c->next;
+               }
+
+               if (logging)
+                       printk("%s", moreprompt);
+
+               kdb_read(buf1, 2); /* '2' indicates to return
+                                   * immediately after getting one key. */
+               kdb_nextline = 1;       /* Really set output line 1 */
+
+               /* empty and reset the buffer: */
+               kdb_buffer[0] = '\0';
+               next_avail = kdb_buffer;
+               size_avail = sizeof(kdb_buffer);
+               if ((buf1[0] == 'q') || (buf1[0] == 'Q')) {
+                       /* user hit q or Q */
+                       KDB_FLAG_SET(CMD_INTERRUPT); /* command interrupted */
+                       KDB_STATE_CLEAR(PAGER);
+                       /* end of command output; back to normal mode */
+                       kdb_grepping_flag = 0;
+                       kdb_printf("\n");
+               } else if (buf1[0] == ' ') {
+                       kdb_printf("\n");
+                       suspend_grep = 1; /* for this recursion */
+               } else if (buf1[0] == '\n') {
+                       kdb_nextline = linecount - 1;
+                       kdb_printf("\r");
+                       suspend_grep = 1; /* for this recursion */
+               } else if (buf1[0] && buf1[0] != '\n') {
+                       /* user hit something other than enter */
+                       suspend_grep = 1; /* for this recursion */
+                       kdb_printf("\nOnly 'q' or 'Q' are processed at more "
+                                  "prompt, input ignored\n");
+               } else if (kdb_grepping_flag) {
+                       /* user hit enter */
+                       suspend_grep = 1; /* for this recursion */
+                       kdb_printf("\n");
+               }
+               kdb_input_flush();
+       }
+
+       /*
+        * For grep searches, shift the printed string left.
+        *  replaced_byte contains the character that was overwritten with
+        *  the terminating null, and cphold points to the null.
+        * Then adjust the notion of available space in the buffer.
+        */
+       if (kdb_grepping_flag && !suspend_grep) {
+               *cphold = replaced_byte;
+               strcpy(kdb_buffer, cphold);
+               len = strlen(kdb_buffer);
+               next_avail = kdb_buffer + len;
+               size_avail = sizeof(kdb_buffer) - len;
+       }
+
+kdb_print_out:
+       suspend_grep = 0; /* end of what may have been a recursive call */
+       if (logging)
+               console_loglevel = saved_loglevel;
+       if (KDB_STATE(PRINTF_LOCK) && got_printf_lock) {
+               got_printf_lock = 0;
+               spin_unlock_irqrestore(&kdb_printf_lock, flags);
+               KDB_STATE_CLEAR(PRINTF_LOCK);
+               atomic_dec(&kdb_event);
+       } else {
+               __release(kdb_printf_lock);
+       }
+       kdb_trap_printk = saved_trap_printk;
+       preempt_enable();
+       return retlen;
+}
+
+int kdb_printf(const char *fmt, ...)
+{
+       va_list ap;
+       int r;
+
+       va_start(ap, fmt);
+       r = vkdb_printf(fmt, ap);
+       va_end(ap);
+
+       return r;
+}
+
diff --git a/kernel/debug/kdb/kdb_keyboard.c b/kernel/debug/kdb/kdb_keyboard.c
new file mode 100644 (file)
index 0000000..4bca634
--- /dev/null
@@ -0,0 +1,212 @@
+/*
+ * Kernel Debugger Architecture Dependent Console I/O handler
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.
+ *
+ * Copyright (c) 1999-2006 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2009 Wind River Systems, Inc.  All Rights Reserved.
+ */
+
+#include <linux/kdb.h>
+#include <linux/keyboard.h>
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/io.h>
+
+/* Keyboard Controller Registers on normal PCs. */
+
+#define KBD_STATUS_REG         0x64    /* Status register (R) */
+#define KBD_DATA_REG           0x60    /* Keyboard data register (R/W) */
+
+/* Status Register Bits */
+
+#define KBD_STAT_OBF           0x01    /* Keyboard output buffer full */
+#define KBD_STAT_MOUSE_OBF     0x20    /* Mouse output buffer full */
+
+static int kbd_exists;
+
+/*
+ * Check if the keyboard controller has a keypress for us.
+ * Some parts (Enter Release, LED change) are still blocking polled here,
+ * but hopefully they are all short.
+ */
+int kdb_get_kbd_char(void)
+{
+       int scancode, scanstatus;
+       static int shift_lock;  /* CAPS LOCK state (0-off, 1-on) */
+       static int shift_key;   /* Shift next keypress */
+       static int ctrl_key;
+       u_short keychar;
+
+       if (KDB_FLAG(NO_I8042) || KDB_FLAG(NO_VT_CONSOLE) ||
+           (inb(KBD_STATUS_REG) == 0xff && inb(KBD_DATA_REG) == 0xff)) {
+               kbd_exists = 0;
+               return -1;
+       }
+       kbd_exists = 1;
+
+       if ((inb(KBD_STATUS_REG) & KBD_STAT_OBF) == 0)
+               return -1;
+
+       /*
+        * Fetch the scancode
+        */
+       scancode = inb(KBD_DATA_REG);
+       scanstatus = inb(KBD_STATUS_REG);
+
+       /*
+        * Ignore mouse events.
+        */
+       if (scanstatus & KBD_STAT_MOUSE_OBF)
+               return -1;
+
+       /*
+        * Ignore release, trigger on make
+        * (except for shift keys, where we want to
+        *  keep the shift state so long as the key is
+        *  held down).
+        */
+
+       if (((scancode&0x7f) == 0x2a) || ((scancode&0x7f) == 0x36)) {
+               /*
+                * Next key may use shift table
+                */
+               if ((scancode & 0x80) == 0)
+                       shift_key = 1;
+               else
+                       shift_key = 0;
+               return -1;
+       }
+
+       if ((scancode&0x7f) == 0x1d) {
+               /*
+                * Left ctrl key
+                */
+               if ((scancode & 0x80) == 0)
+                       ctrl_key = 1;
+               else
+                       ctrl_key = 0;
+               return -1;
+       }
+
+       if ((scancode & 0x80) != 0)
+               return -1;
+
+       scancode &= 0x7f;
+
+       /*
+        * Translate scancode
+        */
+
+       if (scancode == 0x3a) {
+               /*
+                * Toggle caps lock
+                */
+               shift_lock ^= 1;
+
+#ifdef KDB_BLINK_LED
+               kdb_toggleled(0x4);
+#endif
+               return -1;
+       }
+
+       if (scancode == 0x0e) {
+               /*
+                * Backspace
+                */
+               return 8;
+       }
+
+       /* Special Key */
+       switch (scancode) {
+       case 0xF: /* Tab */
+               return 9;
+       case 0x53: /* Del */
+               return 4;
+       case 0x47: /* Home */
+               return 1;
+       case 0x4F: /* End */
+               return 5;
+       case 0x4B: /* Left */
+               return 2;
+       case 0x48: /* Up */
+               return 16;
+       case 0x50: /* Down */
+               return 14;
+       case 0x4D: /* Right */
+               return 6;
+       }
+
+       if (scancode == 0xe0)
+               return -1;
+
+       /*
+        * For Japanese 86/106 keyboards
+        *      See comment in drivers/char/pc_keyb.c.
+        *      - Masahiro Adegawa
+        */
+       if (scancode == 0x73)
+               scancode = 0x59;
+       else if (scancode == 0x7d)
+               scancode = 0x7c;
+
+       if (!shift_lock && !shift_key && !ctrl_key) {
+               keychar = plain_map[scancode];
+       } else if ((shift_lock || shift_key) && key_maps[1]) {
+               keychar = key_maps[1][scancode];
+       } else if (ctrl_key && key_maps[4]) {
+               keychar = key_maps[4][scancode];
+       } else {
+               keychar = 0x0020;
+               kdb_printf("Unknown state/scancode (%d)\n", scancode);
+       }
+       keychar &= 0x0fff;
+       if (keychar == '\t')
+               keychar = ' ';
+       switch (KTYP(keychar)) {
+       case KT_LETTER:
+       case KT_LATIN:
+               if (isprint(keychar))
+                       break;          /* printable characters */
+               /* drop through */
+       case KT_SPEC:
+               if (keychar == K_ENTER)
+                       break;
+               /* drop through */
+       default:
+               return -1;      /* ignore unprintables */
+       }
+
+       if ((scancode & 0x7f) == 0x1c) {
+               /*
+                * enter key.  All done.  Absorb the release scancode.
+                */
+               while ((inb(KBD_STATUS_REG) & KBD_STAT_OBF) == 0)
+                       ;
+
+               /*
+                * Fetch the scancode
+                */
+               scancode = inb(KBD_DATA_REG);
+               scanstatus = inb(KBD_STATUS_REG);
+
+               while (scanstatus & KBD_STAT_MOUSE_OBF) {
+                       scancode = inb(KBD_DATA_REG);
+                       scanstatus = inb(KBD_STATUS_REG);
+               }
+
+               if (scancode != 0x9c) {
+                       /*
+                        * Wasn't an enter-release,  why not?
+                        */
+                       kdb_printf("kdb: expected enter got 0x%x status 0x%x\n",
+                              scancode, scanstatus);
+               }
+
+               return 13;
+       }
+
+       return keychar & 0xff;
+}
+EXPORT_SYMBOL_GPL(kdb_get_kbd_char);
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
new file mode 100644 (file)
index 0000000..b724c79
--- /dev/null
@@ -0,0 +1,2849 @@
+/*
+ * Kernel Debugger Architecture Independent Main Code
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com>
+ * Xscale (R) modifications copyright (C) 2003 Intel Corporation.
+ * Copyright (c) 2009 Wind River Systems, Inc.  All Rights Reserved.
+ */
+
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/sysrq.h>
+#include <linux/smp.h>
+#include <linux/utsname.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/kallsyms.h>
+#include <linux/kgdb.h>
+#include <linux/kdb.h>
+#include <linux/notifier.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/nmi.h>
+#include <linux/time.h>
+#include <linux/ptrace.h>
+#include <linux/sysctl.h>
+#include <linux/cpu.h>
+#include <linux/kdebug.h>
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include "kdb_private.h"
+
+#define GREP_LEN 256
+char kdb_grep_string[GREP_LEN];
+int kdb_grepping_flag;
+EXPORT_SYMBOL(kdb_grepping_flag);
+int kdb_grep_leading;
+int kdb_grep_trailing;
+
+/*
+ * Kernel debugger state flags
+ */
+int kdb_flags;
+atomic_t kdb_event;
+
+/*
+ * kdb_lock protects updates to kdb_initial_cpu.  Used to
+ * single thread processors through the kernel debugger.
+ */
+int kdb_initial_cpu = -1;      /* cpu number that owns kdb */
+int kdb_nextline = 1;
+int kdb_state;                 /* General KDB state */
+
+struct task_struct *kdb_current_task;
+EXPORT_SYMBOL(kdb_current_task);
+struct pt_regs *kdb_current_regs;
+
+const char *kdb_diemsg;
+static int kdb_go_count;
+#ifdef CONFIG_KDB_CONTINUE_CATASTROPHIC
+static unsigned int kdb_continue_catastrophic =
+       CONFIG_KDB_CONTINUE_CATASTROPHIC;
+#else
+static unsigned int kdb_continue_catastrophic;
+#endif
+
+/* kdb_commands describes the available commands. */
+static kdbtab_t *kdb_commands;
+#define KDB_BASE_CMD_MAX 50
+static int kdb_max_commands = KDB_BASE_CMD_MAX;
+static kdbtab_t kdb_base_commands[50];
+#define for_each_kdbcmd(cmd, num)                                      \
+       for ((cmd) = kdb_base_commands, (num) = 0;                      \
+            num < kdb_max_commands;                                    \
+            num == KDB_BASE_CMD_MAX ? cmd = kdb_commands : cmd++, num++)
+
+typedef struct _kdbmsg {
+       int     km_diag;        /* kdb diagnostic */
+       char    *km_msg;        /* Corresponding message text */
+} kdbmsg_t;
+
+#define KDBMSG(msgnum, text) \
+       { KDB_##msgnum, text }
+
+static kdbmsg_t kdbmsgs[] = {
+       KDBMSG(NOTFOUND, "Command Not Found"),
+       KDBMSG(ARGCOUNT, "Improper argument count, see usage."),
+       KDBMSG(BADWIDTH, "Illegal value for BYTESPERWORD use 1, 2, 4 or 8, "
+              "8 is only allowed on 64 bit systems"),
+       KDBMSG(BADRADIX, "Illegal value for RADIX use 8, 10 or 16"),
+       KDBMSG(NOTENV, "Cannot find environment variable"),
+       KDBMSG(NOENVVALUE, "Environment variable should have value"),
+       KDBMSG(NOTIMP, "Command not implemented"),
+       KDBMSG(ENVFULL, "Environment full"),
+       KDBMSG(ENVBUFFULL, "Environment buffer full"),
+       KDBMSG(TOOMANYBPT, "Too many breakpoints defined"),
+#ifdef CONFIG_CPU_XSCALE
+       KDBMSG(TOOMANYDBREGS, "More breakpoints than ibcr registers defined"),
+#else
+       KDBMSG(TOOMANYDBREGS, "More breakpoints than db registers defined"),
+#endif
+       KDBMSG(DUPBPT, "Duplicate breakpoint address"),
+       KDBMSG(BPTNOTFOUND, "Breakpoint not found"),
+       KDBMSG(BADMODE, "Invalid IDMODE"),
+       KDBMSG(BADINT, "Illegal numeric value"),
+       KDBMSG(INVADDRFMT, "Invalid symbolic address format"),
+       KDBMSG(BADREG, "Invalid register name"),
+       KDBMSG(BADCPUNUM, "Invalid cpu number"),
+       KDBMSG(BADLENGTH, "Invalid length field"),
+       KDBMSG(NOBP, "No Breakpoint exists"),
+       KDBMSG(BADADDR, "Invalid address"),
+};
+#undef KDBMSG
+
+static const int __nkdb_err = sizeof(kdbmsgs) / sizeof(kdbmsg_t);
+
+
+/*
+ * Initial environment.   This is all kept static and local to
+ * this file.   We don't want to rely on the memory allocation
+ * mechanisms in the kernel, so we use a very limited allocate-only
+ * heap for new and altered environment variables.  The entire
+ * environment is limited to a fixed number of entries (add more
+ * to __env[] if required) and a fixed amount of heap (add more to
+ * KDB_ENVBUFSIZE if required).
+ */
+
+static char *__env[] = {
+#if defined(CONFIG_SMP)
+ "PROMPT=[%d]kdb> ",
+ "MOREPROMPT=[%d]more> ",
+#else
+ "PROMPT=kdb> ",
+ "MOREPROMPT=more> ",
+#endif
+ "RADIX=16",
+ "MDCOUNT=8",                  /* lines of md output */
+ "BTARGS=9",                   /* 9 possible args in bt */
+ KDB_PLATFORM_ENV,
+ "DTABCOUNT=30",
+ "NOSECT=1",
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+};
+
+static const int __nenv = (sizeof(__env) / sizeof(char *));
+
+struct task_struct *kdb_curr_task(int cpu)
+{
+       struct task_struct *p = curr_task(cpu);
+#ifdef _TIF_MCA_INIT
+       if ((task_thread_info(p)->flags & _TIF_MCA_INIT) && KDB_TSK(cpu))
+               p = krp->p;
+#endif
+       return p;
+}
+
+/*
+ * kdbgetenv - This function will return the character string value of
+ *     an environment variable.
+ * Parameters:
+ *     match   A character string representing an environment variable.
+ * Returns:
+ *     NULL    No environment variable matches 'match'
+ *     char*   Pointer to string value of environment variable.
+ */
+char *kdbgetenv(const char *match)
+{
+       char **ep = __env;
+       int matchlen = strlen(match);
+       int i;
+
+       for (i = 0; i < __nenv; i++) {
+               char *e = *ep++;
+
+               if (!e)
+                       continue;
+
+               if ((strncmp(match, e, matchlen) == 0)
+                && ((e[matchlen] == '\0')
+                  || (e[matchlen] == '='))) {
+                       char *cp = strchr(e, '=');
+                       return cp ? ++cp : "";
+               }
+       }
+       return NULL;
+}
+
+/*
+ * kdballocenv - This function is used to allocate bytes for
+ *     environment entries.
+ * Parameters:
+ *     match   A character string representing a numeric value
+ * Outputs:
+ *     *value  the unsigned long representation of the env variable 'match'
+ * Returns:
+ *     Zero on success, a kdb diagnostic on failure.
+ * Remarks:
+ *     We use a static environment buffer (envbuffer) to hold the values
+ *     of dynamically generated environment variables (see kdb_set).  Buffer
+ *     space once allocated is never free'd, so over time, the amount of space
+ *     (currently 512 bytes) will be exhausted if env variables are changed
+ *     frequently.
+ */
+static char *kdballocenv(size_t bytes)
+{
+#define        KDB_ENVBUFSIZE  512
+       static char envbuffer[KDB_ENVBUFSIZE];
+       static int envbufsize;
+       char *ep = NULL;
+
+       if ((KDB_ENVBUFSIZE - envbufsize) >= bytes) {
+               ep = &envbuffer[envbufsize];
+               envbufsize += bytes;
+       }
+       return ep;
+}
+
+/*
+ * kdbgetulenv - This function will return the value of an unsigned
+ *     long-valued environment variable.
+ * Parameters:
+ *     match   A character string representing a numeric value
+ * Outputs:
+ *     *value  the unsigned long represntation of the env variable 'match'
+ * Returns:
+ *     Zero on success, a kdb diagnostic on failure.
+ */
+static int kdbgetulenv(const char *match, unsigned long *value)
+{
+       char *ep;
+
+       ep = kdbgetenv(match);
+       if (!ep)
+               return KDB_NOTENV;
+       if (strlen(ep) == 0)
+               return KDB_NOENVVALUE;
+
+       *value = simple_strtoul(ep, NULL, 0);
+
+       return 0;
+}
+
+/*
+ * kdbgetintenv - This function will return the value of an
+ *     integer-valued environment variable.
+ * Parameters:
+ *     match   A character string representing an integer-valued env variable
+ * Outputs:
+ *     *value  the integer representation of the environment variable 'match'
+ * Returns:
+ *     Zero on success, a kdb diagnostic on failure.
+ */
+int kdbgetintenv(const char *match, int *value)
+{
+       unsigned long val;
+       int diag;
+
+       diag = kdbgetulenv(match, &val);
+       if (!diag)
+               *value = (int) val;
+       return diag;
+}
+
+/*
+ * kdbgetularg - This function will convert a numeric string into an
+ *     unsigned long value.
+ * Parameters:
+ *     arg     A character string representing a numeric value
+ * Outputs:
+ *     *value  the unsigned long represntation of arg.
+ * Returns:
+ *     Zero on success, a kdb diagnostic on failure.
+ */
+int kdbgetularg(const char *arg, unsigned long *value)
+{
+       char *endp;
+       unsigned long val;
+
+       val = simple_strtoul(arg, &endp, 0);
+
+       if (endp == arg) {
+               /*
+                * Try base 16, for us folks too lazy to type the
+                * leading 0x...
+                */
+               val = simple_strtoul(arg, &endp, 16);
+               if (endp == arg)
+                       return KDB_BADINT;
+       }
+
+       *value = val;
+
+       return 0;
+}
+
+/*
+ * kdb_set - This function implements the 'set' command.  Alter an
+ *     existing environment variable or create a new one.
+ */
+int kdb_set(int argc, const char **argv)
+{
+       int i;
+       char *ep;
+       size_t varlen, vallen;
+
+       /*
+        * we can be invoked two ways:
+        *   set var=value    argv[1]="var", argv[2]="value"
+        *   set var = value  argv[1]="var", argv[2]="=", argv[3]="value"
+        * - if the latter, shift 'em down.
+        */
+       if (argc == 3) {
+               argv[2] = argv[3];
+               argc--;
+       }
+
+       if (argc != 2)
+               return KDB_ARGCOUNT;
+
+       /*
+        * Check for internal variables
+        */
+       if (strcmp(argv[1], "KDBDEBUG") == 0) {
+               unsigned int debugflags;
+               char *cp;
+
+               debugflags = simple_strtoul(argv[2], &cp, 0);
+               if (cp == argv[2] || debugflags & ~KDB_DEBUG_FLAG_MASK) {
+                       kdb_printf("kdb: illegal debug flags '%s'\n",
+                                   argv[2]);
+                       return 0;
+               }
+               kdb_flags = (kdb_flags &
+                            ~(KDB_DEBUG_FLAG_MASK << KDB_DEBUG_FLAG_SHIFT))
+                       | (debugflags << KDB_DEBUG_FLAG_SHIFT);
+
+               return 0;
+       }
+
+       /*
+        * Tokenizer squashed the '=' sign.  argv[1] is variable
+        * name, argv[2] = value.
+        */
+       varlen = strlen(argv[1]);
+       vallen = strlen(argv[2]);
+       ep = kdballocenv(varlen + vallen + 2);
+       if (ep == (char *)0)
+               return KDB_ENVBUFFULL;
+
+       sprintf(ep, "%s=%s", argv[1], argv[2]);
+
+       ep[varlen+vallen+1] = '\0';
+
+       for (i = 0; i < __nenv; i++) {
+               if (__env[i]
+                && ((strncmp(__env[i], argv[1], varlen) == 0)
+                  && ((__env[i][varlen] == '\0')
+                   || (__env[i][varlen] == '=')))) {
+                       __env[i] = ep;
+                       return 0;
+               }
+       }
+
+       /*
+        * Wasn't existing variable.  Fit into slot.
+        */
+       for (i = 0; i < __nenv-1; i++) {
+               if (__env[i] == (char *)0) {
+                       __env[i] = ep;
+                       return 0;
+               }
+       }
+
+       return KDB_ENVFULL;
+}
+
+static int kdb_check_regs(void)
+{
+       if (!kdb_current_regs) {
+               kdb_printf("No current kdb registers."
+                          "  You may need to select another task\n");
+               return KDB_BADREG;
+       }
+       return 0;
+}
+
+/*
+ * kdbgetaddrarg - This function is responsible for parsing an
+ *     address-expression and returning the value of the expression,
+ *     symbol name, and offset to the caller.
+ *
+ *     The argument may consist of a numeric value (decimal or
+ *     hexidecimal), a symbol name, a register name (preceeded by the
+ *     percent sign), an environment variable with a numeric value
+ *     (preceeded by a dollar sign) or a simple arithmetic expression
+ *     consisting of a symbol name, +/-, and a numeric constant value
+ *     (offset).
+ * Parameters:
+ *     argc    - count of arguments in argv
+ *     argv    - argument vector
+ *     *nextarg - index to next unparsed argument in argv[]
+ *     regs    - Register state at time of KDB entry
+ * Outputs:
+ *     *value  - receives the value of the address-expression
+ *     *offset - receives the offset specified, if any
+ *     *name   - receives the symbol name, if any
+ *     *nextarg - index to next unparsed argument in argv[]
+ * Returns:
+ *     zero is returned on success, a kdb diagnostic code is
+ *      returned on error.
+ */
+int kdbgetaddrarg(int argc, const char **argv, int *nextarg,
+                 unsigned long *value,  long *offset,
+                 char **name)
+{
+       unsigned long addr;
+       unsigned long off = 0;
+       int positive;
+       int diag;
+       int found = 0;
+       char *symname;
+       char symbol = '\0';
+       char *cp;
+       kdb_symtab_t symtab;
+
+       /*
+        * Process arguments which follow the following syntax:
+        *
+        *  symbol | numeric-address [+/- numeric-offset]
+        *  %register
+        *  $environment-variable
+        */
+
+       if (*nextarg > argc)
+               return KDB_ARGCOUNT;
+
+       symname = (char *)argv[*nextarg];
+
+       /*
+        * If there is no whitespace between the symbol
+        * or address and the '+' or '-' symbols, we
+        * remember the character and replace it with a
+        * null so the symbol/value can be properly parsed
+        */
+       cp = strpbrk(symname, "+-");
+       if (cp != NULL) {
+               symbol = *cp;
+               *cp++ = '\0';
+       }
+
+       if (symname[0] == '$') {
+               diag = kdbgetulenv(&symname[1], &addr);
+               if (diag)
+                       return diag;
+       } else if (symname[0] == '%') {
+               diag = kdb_check_regs();
+               if (diag)
+                       return diag;
+               /* Implement register values with % at a later time as it is
+                * arch optional.
+                */
+               return KDB_NOTIMP;
+       } else {
+               found = kdbgetsymval(symname, &symtab);
+               if (found) {
+                       addr = symtab.sym_start;
+               } else {
+                       diag = kdbgetularg(argv[*nextarg], &addr);
+                       if (diag)
+                               return diag;
+               }
+       }
+
+       if (!found)
+               found = kdbnearsym(addr, &symtab);
+
+       (*nextarg)++;
+
+       if (name)
+               *name = symname;
+       if (value)
+               *value = addr;
+       if (offset && name && *name)
+               *offset = addr - symtab.sym_start;
+
+       if ((*nextarg > argc)
+        && (symbol == '\0'))
+               return 0;
+
+       /*
+        * check for +/- and offset
+        */
+
+       if (symbol == '\0') {
+               if ((argv[*nextarg][0] != '+')
+                && (argv[*nextarg][0] != '-')) {
+                       /*
+                        * Not our argument.  Return.
+                        */
+                       return 0;
+               } else {
+                       positive = (argv[*nextarg][0] == '+');
+                       (*nextarg)++;
+               }
+       } else
+               positive = (symbol == '+');
+
+       /*
+        * Now there must be an offset!
+        */
+       if ((*nextarg > argc)
+        && (symbol == '\0')) {
+               return KDB_INVADDRFMT;
+       }
+
+       if (!symbol) {
+               cp = (char *)argv[*nextarg];
+               (*nextarg)++;
+       }
+
+       diag = kdbgetularg(cp, &off);
+       if (diag)
+               return diag;
+
+       if (!positive)
+               off = -off;
+
+       if (offset)
+               *offset += off;
+
+       if (value)
+               *value += off;
+
+       return 0;
+}
+
+static void kdb_cmderror(int diag)
+{
+       int i;
+
+       if (diag >= 0) {
+               kdb_printf("no error detected (diagnostic is %d)\n", diag);
+               return;
+       }
+
+       for (i = 0; i < __nkdb_err; i++) {
+               if (kdbmsgs[i].km_diag == diag) {
+                       kdb_printf("diag: %d: %s\n", diag, kdbmsgs[i].km_msg);
+                       return;
+               }
+       }
+
+       kdb_printf("Unknown diag %d\n", -diag);
+}
+
+/*
+ * kdb_defcmd, kdb_defcmd2 - This function implements the 'defcmd'
+ *     command which defines one command as a set of other commands,
+ *     terminated by endefcmd.  kdb_defcmd processes the initial
+ *     'defcmd' command, kdb_defcmd2 is invoked from kdb_parse for
+ *     the following commands until 'endefcmd'.
+ * Inputs:
+ *     argc    argument count
+ *     argv    argument vector
+ * Returns:
+ *     zero for success, a kdb diagnostic if error
+ */
+struct defcmd_set {
+       int count;
+       int usable;
+       char *name;
+       char *usage;
+       char *help;
+       char **command;
+};
+static struct defcmd_set *defcmd_set;
+static int defcmd_set_count;
+static int defcmd_in_progress;
+
+/* Forward references */
+static int kdb_exec_defcmd(int argc, const char **argv);
+
+static int kdb_defcmd2(const char *cmdstr, const char *argv0)
+{
+       struct defcmd_set *s = defcmd_set + defcmd_set_count - 1;
+       char **save_command = s->command;
+       if (strcmp(argv0, "endefcmd") == 0) {
+               defcmd_in_progress = 0;
+               if (!s->count)
+                       s->usable = 0;
+               if (s->usable)
+                       kdb_register(s->name, kdb_exec_defcmd,
+                                    s->usage, s->help, 0);
+               return 0;
+       }
+       if (!s->usable)
+               return KDB_NOTIMP;
+       s->command = kmalloc((s->count + 1) * sizeof(*(s->command)), GFP_KDB);
+       if (!s->command) {
+               kdb_printf("Could not allocate new kdb_defcmd table for %s\n",
+                          cmdstr);
+               s->usable = 0;
+               return KDB_NOTIMP;
+       }
+       memcpy(s->command, save_command, s->count * sizeof(*(s->command)));
+       s->command[s->count++] = kdb_strdup(cmdstr, GFP_KDB);
+       kfree(save_command);
+       return 0;
+}
+
+static int kdb_defcmd(int argc, const char **argv)
+{
+       struct defcmd_set *save_defcmd_set = defcmd_set, *s;
+       if (defcmd_in_progress) {
+               kdb_printf("kdb: nested defcmd detected, assuming missing "
+                          "endefcmd\n");
+               kdb_defcmd2("endefcmd", "endefcmd");
+       }
+       if (argc == 0) {
+               int i;
+               for (s = defcmd_set; s < defcmd_set + defcmd_set_count; ++s) {
+                       kdb_printf("defcmd %s \"%s\" \"%s\"\n", s->name,
+                                  s->usage, s->help);
+                       for (i = 0; i < s->count; ++i)
+                               kdb_printf("%s", s->command[i]);
+                       kdb_printf("endefcmd\n");
+               }
+               return 0;
+       }
+       if (argc != 3)
+               return KDB_ARGCOUNT;
+       defcmd_set = kmalloc((defcmd_set_count + 1) * sizeof(*defcmd_set),
+                            GFP_KDB);
+       if (!defcmd_set) {
+               kdb_printf("Could not allocate new defcmd_set entry for %s\n",
+                          argv[1]);
+               defcmd_set = save_defcmd_set;
+               return KDB_NOTIMP;
+       }
+       memcpy(defcmd_set, save_defcmd_set,
+              defcmd_set_count * sizeof(*defcmd_set));
+       kfree(save_defcmd_set);
+       s = defcmd_set + defcmd_set_count;
+       memset(s, 0, sizeof(*s));
+       s->usable = 1;
+       s->name = kdb_strdup(argv[1], GFP_KDB);
+       s->usage = kdb_strdup(argv[2], GFP_KDB);
+       s->help = kdb_strdup(argv[3], GFP_KDB);
+       if (s->usage[0] == '"') {
+               strcpy(s->usage, s->usage+1);
+               s->usage[strlen(s->usage)-1] = '\0';
+       }
+       if (s->help[0] == '"') {
+               strcpy(s->help, s->help+1);
+               s->help[strlen(s->help)-1] = '\0';
+       }
+       ++defcmd_set_count;
+       defcmd_in_progress = 1;
+       return 0;
+}
+
+/*
+ * kdb_exec_defcmd - Execute the set of commands associated with this
+ *     defcmd name.
+ * Inputs:
+ *     argc    argument count
+ *     argv    argument vector
+ * Returns:
+ *     zero for success, a kdb diagnostic if error
+ */
+static int kdb_exec_defcmd(int argc, const char **argv)
+{
+       int i, ret;
+       struct defcmd_set *s;
+       if (argc != 0)
+               return KDB_ARGCOUNT;
+       for (s = defcmd_set, i = 0; i < defcmd_set_count; ++i, ++s) {
+               if (strcmp(s->name, argv[0]) == 0)
+                       break;
+       }
+       if (i == defcmd_set_count) {
+               kdb_printf("kdb_exec_defcmd: could not find commands for %s\n",
+                          argv[0]);
+               return KDB_NOTIMP;
+       }
+       for (i = 0; i < s->count; ++i) {
+               /* Recursive use of kdb_parse, do not use argv after
+                * this point */
+               argv = NULL;
+               kdb_printf("[%s]kdb> %s\n", s->name, s->command[i]);
+               ret = kdb_parse(s->command[i]);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+}
+
+/* Command history */
+#define KDB_CMD_HISTORY_COUNT  32
+#define CMD_BUFLEN             200     /* kdb_printf: max printline
+                                        * size == 256 */
+static unsigned int cmd_head, cmd_tail;
+static unsigned int cmdptr;
+static char cmd_hist[KDB_CMD_HISTORY_COUNT][CMD_BUFLEN];
+static char cmd_cur[CMD_BUFLEN];
+
+/*
+ * The "str" argument may point to something like  | grep xyz
+ */
+static void parse_grep(const char *str)
+{
+       int     len;
+       char    *cp = (char *)str, *cp2;
+
+       /* sanity check: we should have been called with the \ first */
+       if (*cp != '|')
+               return;
+       cp++;
+       while (isspace(*cp))
+               cp++;
+       if (strncmp(cp, "grep ", 5)) {
+               kdb_printf("invalid 'pipe', see grephelp\n");
+               return;
+       }
+       cp += 5;
+       while (isspace(*cp))
+               cp++;
+       cp2 = strchr(cp, '\n');
+       if (cp2)
+               *cp2 = '\0'; /* remove the trailing newline */
+       len = strlen(cp);
+       if (len == 0) {
+               kdb_printf("invalid 'pipe', see grephelp\n");
+               return;
+       }
+       /* now cp points to a nonzero length search string */
+       if (*cp == '"') {
+               /* allow it be "x y z" by removing the "'s - there must
+                  be two of them */
+               cp++;
+               cp2 = strchr(cp, '"');
+               if (!cp2) {
+                       kdb_printf("invalid quoted string, see grephelp\n");
+                       return;
+               }
+               *cp2 = '\0'; /* end the string where the 2nd " was */
+       }
+       kdb_grep_leading = 0;
+       if (*cp == '^') {
+               kdb_grep_leading = 1;
+               cp++;
+       }
+       len = strlen(cp);
+       kdb_grep_trailing = 0;
+       if (*(cp+len-1) == '$') {
+               kdb_grep_trailing = 1;
+               *(cp+len-1) = '\0';
+       }
+       len = strlen(cp);
+       if (!len)
+               return;
+       if (len >= GREP_LEN) {
+               kdb_printf("search string too long\n");
+               return;
+       }
+       strcpy(kdb_grep_string, cp);
+       kdb_grepping_flag++;
+       return;
+}
+
+/*
+ * kdb_parse - Parse the command line, search the command table for a
+ *     matching command and invoke the command function.  This
+ *     function may be called recursively, if it is, the second call
+ *     will overwrite argv and cbuf.  It is the caller's
+ *     responsibility to save their argv if they recursively call
+ *     kdb_parse().
+ * Parameters:
+ *      cmdstr The input command line to be parsed.
+ *     regs    The registers at the time kdb was entered.
+ * Returns:
+ *     Zero for success, a kdb diagnostic if failure.
+ * Remarks:
+ *     Limited to 20 tokens.
+ *
+ *     Real rudimentary tokenization. Basically only whitespace
+ *     is considered a token delimeter (but special consideration
+ *     is taken of the '=' sign as used by the 'set' command).
+ *
+ *     The algorithm used to tokenize the input string relies on
+ *     there being at least one whitespace (or otherwise useless)
+ *     character between tokens as the character immediately following
+ *     the token is altered in-place to a null-byte to terminate the
+ *     token string.
+ */
+
+#define MAXARGC        20
+
+int kdb_parse(const char *cmdstr)
+{
+       static char *argv[MAXARGC];
+       static int argc;
+       static char cbuf[CMD_BUFLEN+2];
+       char *cp;
+       char *cpp, quoted;
+       kdbtab_t *tp;
+       int i, escaped, ignore_errors = 0, check_grep;
+
+       /*
+        * First tokenize the command string.
+        */
+       cp = (char *)cmdstr;
+       kdb_grepping_flag = check_grep = 0;
+
+       if (KDB_FLAG(CMD_INTERRUPT)) {
+               /* Previous command was interrupted, newline must not
+                * repeat the command */
+               KDB_FLAG_CLEAR(CMD_INTERRUPT);
+               KDB_STATE_SET(PAGER);
+               argc = 0;       /* no repeat */
+       }
+
+       if (*cp != '\n' && *cp != '\0') {
+               argc = 0;
+               cpp = cbuf;
+               while (*cp) {
+                       /* skip whitespace */
+                       while (isspace(*cp))
+                               cp++;
+                       if ((*cp == '\0') || (*cp == '\n') ||
+                           (*cp == '#' && !defcmd_in_progress))
+                               break;
+                       /* special case: check for | grep pattern */
+                       if (*cp == '|') {
+                               check_grep++;
+                               break;
+                       }
+                       if (cpp >= cbuf + CMD_BUFLEN) {
+                               kdb_printf("kdb_parse: command buffer "
+                                          "overflow, command ignored\n%s\n",
+                                          cmdstr);
+                               return KDB_NOTFOUND;
+                       }
+                       if (argc >= MAXARGC - 1) {
+                               kdb_printf("kdb_parse: too many arguments, "
+                                          "command ignored\n%s\n", cmdstr);
+                               return KDB_NOTFOUND;
+                       }
+                       argv[argc++] = cpp;
+                       escaped = 0;
+                       quoted = '\0';
+                       /* Copy to next unquoted and unescaped
+                        * whitespace or '=' */
+                       while (*cp && *cp != '\n' &&
+                              (escaped || quoted || !isspace(*cp))) {
+                               if (cpp >= cbuf + CMD_BUFLEN)
+                                       break;
+                               if (escaped) {
+                                       escaped = 0;
+                                       *cpp++ = *cp++;
+                                       continue;
+                               }
+                               if (*cp == '\\') {
+                                       escaped = 1;
+                                       ++cp;
+                                       continue;
+                               }
+                               if (*cp == quoted)
+                                       quoted = '\0';
+                               else if (*cp == '\'' || *cp == '"')
+                                       quoted = *cp;
+                               *cpp = *cp++;
+                               if (*cpp == '=' && !quoted)
+                                       break;
+                               ++cpp;
+                       }
+                       *cpp++ = '\0';  /* Squash a ws or '=' character */
+               }
+       }
+       if (!argc)
+               return 0;
+       if (check_grep)
+               parse_grep(cp);
+       if (defcmd_in_progress) {
+               int result = kdb_defcmd2(cmdstr, argv[0]);
+               if (!defcmd_in_progress) {
+                       argc = 0;       /* avoid repeat on endefcmd */
+                       *(argv[0]) = '\0';
+               }
+               return result;
+       }
+       if (argv[0][0] == '-' && argv[0][1] &&
+           (argv[0][1] < '0' || argv[0][1] > '9')) {
+               ignore_errors = 1;
+               ++argv[0];
+       }
+
+       for_each_kdbcmd(tp, i) {
+               if (tp->cmd_name) {
+                       /*
+                        * If this command is allowed to be abbreviated,
+                        * check to see if this is it.
+                        */
+
+                       if (tp->cmd_minlen
+                        && (strlen(argv[0]) <= tp->cmd_minlen)) {
+                               if (strncmp(argv[0],
+                                           tp->cmd_name,
+                                           tp->cmd_minlen) == 0) {
+                                       break;
+                               }
+                       }
+
+                       if (strcmp(argv[0], tp->cmd_name) == 0)
+                               break;
+               }
+       }
+
+       /*
+        * If we don't find a command by this name, see if the first
+        * few characters of this match any of the known commands.
+        * e.g., md1c20 should match md.
+        */
+       if (i == kdb_max_commands) {
+               for_each_kdbcmd(tp, i) {
+                       if (tp->cmd_name) {
+                               if (strncmp(argv[0],
+                                           tp->cmd_name,
+                                           strlen(tp->cmd_name)) == 0) {
+                                       break;
+                               }
+                       }
+               }
+       }
+
+       if (i < kdb_max_commands) {
+               int result;
+               KDB_STATE_SET(CMD);
+               result = (*tp->cmd_func)(argc-1, (const char **)argv);
+               if (result && ignore_errors && result > KDB_CMD_GO)
+                       result = 0;
+               KDB_STATE_CLEAR(CMD);
+               switch (tp->cmd_repeat) {
+               case KDB_REPEAT_NONE:
+                       argc = 0;
+                       if (argv[0])
+                               *(argv[0]) = '\0';
+                       break;
+               case KDB_REPEAT_NO_ARGS:
+                       argc = 1;
+                       if (argv[1])
+                               *(argv[1]) = '\0';
+                       break;
+               case KDB_REPEAT_WITH_ARGS:
+                       break;
+               }
+               return result;
+       }
+
+       /*
+        * If the input with which we were presented does not
+        * map to an existing command, attempt to parse it as an
+        * address argument and display the result.   Useful for
+        * obtaining the address of a variable, or the nearest symbol
+        * to an address contained in a register.
+        */
+       {
+               unsigned long value;
+               char *name = NULL;
+               long offset;
+               int nextarg = 0;
+
+               if (kdbgetaddrarg(0, (const char **)argv, &nextarg,
+                                 &value, &offset, &name)) {
+                       return KDB_NOTFOUND;
+               }
+
+               kdb_printf("%s = ", argv[0]);
+               kdb_symbol_print(value, NULL, KDB_SP_DEFAULT);
+               kdb_printf("\n");
+               return 0;
+       }
+}
+
+
+static int handle_ctrl_cmd(char *cmd)
+{
+#define CTRL_P 16
+#define CTRL_N 14
+
+       /* initial situation */
+       if (cmd_head == cmd_tail)
+               return 0;
+       switch (*cmd) {
+       case CTRL_P:
+               if (cmdptr != cmd_tail)
+                       cmdptr = (cmdptr-1) % KDB_CMD_HISTORY_COUNT;
+               strncpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN);
+               return 1;
+       case CTRL_N:
+               if (cmdptr != cmd_head)
+                       cmdptr = (cmdptr+1) % KDB_CMD_HISTORY_COUNT;
+               strncpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN);
+               return 1;
+       }
+       return 0;
+}
+
+/*
+ * kdb_reboot - This function implements the 'reboot' command.  Reboot
+ *     the system immediately, or loop for ever on failure.
+ */
+static int kdb_reboot(int argc, const char **argv)
+{
+       emergency_restart();
+       kdb_printf("Hmm, kdb_reboot did not reboot, spinning here\n");
+       while (1)
+               cpu_relax();
+       /* NOTREACHED */
+       return 0;
+}
+
+static void kdb_dumpregs(struct pt_regs *regs)
+{
+       int old_lvl = console_loglevel;
+       console_loglevel = 15;
+       kdb_trap_printk++;
+       show_regs(regs);
+       kdb_trap_printk--;
+       kdb_printf("\n");
+       console_loglevel = old_lvl;
+}
+
+void kdb_set_current_task(struct task_struct *p)
+{
+       kdb_current_task = p;
+
+       if (kdb_task_has_cpu(p)) {
+               kdb_current_regs = KDB_TSKREGS(kdb_process_cpu(p));
+               return;
+       }
+       kdb_current_regs = NULL;
+}
+
+/*
+ * kdb_local - The main code for kdb.  This routine is invoked on a
+ *     specific processor, it is not global.  The main kdb() routine
+ *     ensures that only one processor at a time is in this routine.
+ *     This code is called with the real reason code on the first
+ *     entry to a kdb session, thereafter it is called with reason
+ *     SWITCH, even if the user goes back to the original cpu.
+ * Inputs:
+ *     reason          The reason KDB was invoked
+ *     error           The hardware-defined error code
+ *     regs            The exception frame at time of fault/breakpoint.
+ *     db_result       Result code from the break or debug point.
+ * Returns:
+ *     0       KDB was invoked for an event which it wasn't responsible
+ *     1       KDB handled the event for which it was invoked.
+ *     KDB_CMD_GO      User typed 'go'.
+ *     KDB_CMD_CPU     User switched to another cpu.
+ *     KDB_CMD_SS      Single step.
+ *     KDB_CMD_SSB     Single step until branch.
+ */
+static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
+                    kdb_dbtrap_t db_result)
+{
+       char *cmdbuf;
+       int diag;
+       struct task_struct *kdb_current =
+               kdb_curr_task(raw_smp_processor_id());
+
+       KDB_DEBUG_STATE("kdb_local 1", reason);
+       kdb_go_count = 0;
+       if (reason == KDB_REASON_DEBUG) {
+               /* special case below */
+       } else {
+               kdb_printf("\nEntering kdb (current=0x%p, pid %d) ",
+                          kdb_current, kdb_current->pid);
+#if defined(CONFIG_SMP)
+               kdb_printf("on processor %d ", raw_smp_processor_id());
+#endif
+       }
+
+       switch (reason) {
+       case KDB_REASON_DEBUG:
+       {
+               /*
+                * If re-entering kdb after a single step
+                * command, don't print the message.
+                */
+               switch (db_result) {
+               case KDB_DB_BPT:
+                       kdb_printf("\nEntering kdb (0x%p, pid %d) ",
+                                  kdb_current, kdb_current->pid);
+#if defined(CONFIG_SMP)
+                       kdb_printf("on processor %d ", raw_smp_processor_id());
+#endif
+                       kdb_printf("due to Debug @ " kdb_machreg_fmt "\n",
+                                  instruction_pointer(regs));
+                       break;
+               case KDB_DB_SSB:
+                       /*
+                        * In the midst of ssb command. Just return.
+                        */
+                       KDB_DEBUG_STATE("kdb_local 3", reason);
+                       return KDB_CMD_SSB;     /* Continue with SSB command */
+
+                       break;
+               case KDB_DB_SS:
+                       break;
+               case KDB_DB_SSBPT:
+                       KDB_DEBUG_STATE("kdb_local 4", reason);
+                       return 1;       /* kdba_db_trap did the work */
+               default:
+                       kdb_printf("kdb: Bad result from kdba_db_trap: %d\n",
+                                  db_result);
+                       break;
+               }
+
+       }
+               break;
+       case KDB_REASON_ENTER:
+               if (KDB_STATE(KEYBOARD))
+                       kdb_printf("due to Keyboard Entry\n");
+               else
+                       kdb_printf("due to KDB_ENTER()\n");
+               break;
+       case KDB_REASON_KEYBOARD:
+               KDB_STATE_SET(KEYBOARD);
+               kdb_printf("due to Keyboard Entry\n");
+               break;
+       case KDB_REASON_ENTER_SLAVE:
+               /* drop through, slaves only get released via cpu switch */
+       case KDB_REASON_SWITCH:
+               kdb_printf("due to cpu switch\n");
+               break;
+       case KDB_REASON_OOPS:
+               kdb_printf("Oops: %s\n", kdb_diemsg);
+               kdb_printf("due to oops @ " kdb_machreg_fmt "\n",
+                          instruction_pointer(regs));
+               kdb_dumpregs(regs);
+               break;
+       case KDB_REASON_NMI:
+               kdb_printf("due to NonMaskable Interrupt @ "
+                          kdb_machreg_fmt "\n",
+                          instruction_pointer(regs));
+               kdb_dumpregs(regs);
+               break;
+       case KDB_REASON_SSTEP:
+       case KDB_REASON_BREAK:
+               kdb_printf("due to %s @ " kdb_machreg_fmt "\n",
+                          reason == KDB_REASON_BREAK ?
+                          "Breakpoint" : "SS trap", instruction_pointer(regs));
+               /*
+                * Determine if this breakpoint is one that we
+                * are interested in.
+                */
+               if (db_result != KDB_DB_BPT) {
+                       kdb_printf("kdb: error return from kdba_bp_trap: %d\n",
+                                  db_result);
+                       KDB_DEBUG_STATE("kdb_local 6", reason);
+                       return 0;       /* Not for us, dismiss it */
+               }
+               break;
+       case KDB_REASON_RECURSE:
+               kdb_printf("due to Recursion @ " kdb_machreg_fmt "\n",
+                          instruction_pointer(regs));
+               break;
+       default:
+               kdb_printf("kdb: unexpected reason code: %d\n", reason);
+               KDB_DEBUG_STATE("kdb_local 8", reason);
+               return 0;       /* Not for us, dismiss it */
+       }
+
+       while (1) {
+               /*
+                * Initialize pager context.
+                */
+               kdb_nextline = 1;
+               KDB_STATE_CLEAR(SUPPRESS);
+
+               cmdbuf = cmd_cur;
+               *cmdbuf = '\0';
+               *(cmd_hist[cmd_head]) = '\0';
+
+               if (KDB_FLAG(ONLY_DO_DUMP)) {
+                       /* kdb is off but a catastrophic error requires a dump.
+                        * Take the dump and reboot.
+                        * Turn on logging so the kdb output appears in the log
+                        * buffer in the dump.
+                        */
+                       const char *setargs[] = { "set", "LOGGING", "1" };
+                       kdb_set(2, setargs);
+                       kdb_reboot(0, NULL);
+                       /*NOTREACHED*/
+               }
+
+do_full_getstr:
+#if defined(CONFIG_SMP)
+               snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"),
+                        raw_smp_processor_id());
+#else
+               snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"));
+#endif
+               if (defcmd_in_progress)
+                       strncat(kdb_prompt_str, "[defcmd]", CMD_BUFLEN);
+
+               /*
+                * Fetch command from keyboard
+                */
+               cmdbuf = kdb_getstr(cmdbuf, CMD_BUFLEN, kdb_prompt_str);
+               if (*cmdbuf != '\n') {
+                       if (*cmdbuf < 32) {
+                               if (cmdptr == cmd_head) {
+                                       strncpy(cmd_hist[cmd_head], cmd_cur,
+                                               CMD_BUFLEN);
+                                       *(cmd_hist[cmd_head] +
+                                         strlen(cmd_hist[cmd_head])-1) = '\0';
+                               }
+                               if (!handle_ctrl_cmd(cmdbuf))
+                                       *(cmd_cur+strlen(cmd_cur)-1) = '\0';
+                               cmdbuf = cmd_cur;
+                               goto do_full_getstr;
+                       } else {
+                               strncpy(cmd_hist[cmd_head], cmd_cur,
+                                       CMD_BUFLEN);
+                       }
+
+                       cmd_head = (cmd_head+1) % KDB_CMD_HISTORY_COUNT;
+                       if (cmd_head == cmd_tail)
+                               cmd_tail = (cmd_tail+1) % KDB_CMD_HISTORY_COUNT;
+               }
+
+               cmdptr = cmd_head;
+               diag = kdb_parse(cmdbuf);
+               if (diag == KDB_NOTFOUND) {
+                       kdb_printf("Unknown kdb command: '%s'\n", cmdbuf);
+                       diag = 0;
+               }
+               if (diag == KDB_CMD_GO
+                || diag == KDB_CMD_CPU
+                || diag == KDB_CMD_SS
+                || diag == KDB_CMD_SSB
+                || diag == KDB_CMD_KGDB)
+                       break;
+
+               if (diag)
+                       kdb_cmderror(diag);
+       }
+       KDB_DEBUG_STATE("kdb_local 9", diag);
+       return diag;
+}
+
+
+/*
+ * kdb_print_state - Print the state data for the current processor
+ *     for debugging.
+ * Inputs:
+ *     text            Identifies the debug point
+ *     value           Any integer value to be printed, e.g. reason code.
+ */
+void kdb_print_state(const char *text, int value)
+{
+       kdb_printf("state: %s cpu %d value %d initial %d state %x\n",
+                  text, raw_smp_processor_id(), value, kdb_initial_cpu,
+                  kdb_state);
+}
+
+/*
+ * kdb_main_loop - After initial setup and assignment of the
+ *     controlling cpu, all cpus are in this loop.  One cpu is in
+ *     control and will issue the kdb prompt, the others will spin
+ *     until 'go' or cpu switch.
+ *
+ *     To get a consistent view of the kernel stacks for all
+ *     processes, this routine is invoked from the main kdb code via
+ *     an architecture specific routine.  kdba_main_loop is
+ *     responsible for making the kernel stacks consistent for all
+ *     processes, there should be no difference between a blocked
+ *     process and a running process as far as kdb is concerned.
+ * Inputs:
+ *     reason          The reason KDB was invoked
+ *     error           The hardware-defined error code
+ *     reason2         kdb's current reason code.
+ *                     Initially error but can change
+ *                     acording to kdb state.
+ *     db_result       Result code from break or debug point.
+ *     regs            The exception frame at time of fault/breakpoint.
+ *                     should always be valid.
+ * Returns:
+ *     0       KDB was invoked for an event which it wasn't responsible
+ *     1       KDB handled the event for which it was invoked.
+ */
+int kdb_main_loop(kdb_reason_t reason, kdb_reason_t reason2, int error,
+             kdb_dbtrap_t db_result, struct pt_regs *regs)
+{
+       int result = 1;
+       /* Stay in kdb() until 'go', 'ss[b]' or an error */
+       while (1) {
+               /*
+                * All processors except the one that is in control
+                * will spin here.
+                */
+               KDB_DEBUG_STATE("kdb_main_loop 1", reason);
+               while (KDB_STATE(HOLD_CPU)) {
+                       /* state KDB is turned off by kdb_cpu to see if the
+                        * other cpus are still live, each cpu in this loop
+                        * turns it back on.
+                        */
+                       if (!KDB_STATE(KDB))
+                               KDB_STATE_SET(KDB);
+               }
+
+               KDB_STATE_CLEAR(SUPPRESS);
+               KDB_DEBUG_STATE("kdb_main_loop 2", reason);
+               if (KDB_STATE(LEAVING))
+                       break;  /* Another cpu said 'go' */
+               /* Still using kdb, this processor is in control */
+               result = kdb_local(reason2, error, regs, db_result);
+               KDB_DEBUG_STATE("kdb_main_loop 3", result);
+
+               if (result == KDB_CMD_CPU)
+                       break;
+
+               if (result == KDB_CMD_SS) {
+                       KDB_STATE_SET(DOING_SS);
+                       break;
+               }
+
+               if (result == KDB_CMD_SSB) {
+                       KDB_STATE_SET(DOING_SS);
+                       KDB_STATE_SET(DOING_SSB);
+                       break;
+               }
+
+               if (result == KDB_CMD_KGDB) {
+                       if (!(KDB_STATE(DOING_KGDB) || KDB_STATE(DOING_KGDB2)))
+                               kdb_printf("Entering please attach debugger "
+                                          "or use $D#44+ or $3#33\n");
+                       break;
+               }
+               if (result && result != 1 && result != KDB_CMD_GO)
+                       kdb_printf("\nUnexpected kdb_local return code %d\n",
+                                  result);
+               KDB_DEBUG_STATE("kdb_main_loop 4", reason);
+               break;
+       }
+       if (KDB_STATE(DOING_SS))
+               KDB_STATE_CLEAR(SSBPT);
+
+       return result;
+}
+
+/*
+ * kdb_mdr - This function implements the guts of the 'mdr', memory
+ * read command.
+ *     mdr  <addr arg>,<byte count>
+ * Inputs:
+ *     addr    Start address
+ *     count   Number of bytes
+ * Returns:
+ *     Always 0.  Any errors are detected and printed by kdb_getarea.
+ */
+static int kdb_mdr(unsigned long addr, unsigned int count)
+{
+       unsigned char c;
+       while (count--) {
+               if (kdb_getarea(c, addr))
+                       return 0;
+               kdb_printf("%02x", c);
+               addr++;
+       }
+       kdb_printf("\n");
+       return 0;
+}
+
+/*
+ * kdb_md - This function implements the 'md', 'md1', 'md2', 'md4',
+ *     'md8' 'mdr' and 'mds' commands.
+ *
+ *     md|mds  [<addr arg> [<line count> [<radix>]]]
+ *     mdWcN   [<addr arg> [<line count> [<radix>]]]
+ *             where W = is the width (1, 2, 4 or 8) and N is the count.
+ *             for eg., md1c20 reads 20 bytes, 1 at a time.
+ *     mdr  <addr arg>,<byte count>
+ */
+static void kdb_md_line(const char *fmtstr, unsigned long addr,
+                       int symbolic, int nosect, int bytesperword,
+                       int num, int repeat, int phys)
+{
+       /* print just one line of data */
+       kdb_symtab_t symtab;
+       char cbuf[32];
+       char *c = cbuf;
+       int i;
+       unsigned long word;
+
+       memset(cbuf, '\0', sizeof(cbuf));
+       if (phys)
+               kdb_printf("phys " kdb_machreg_fmt0 " ", addr);
+       else
+               kdb_printf(kdb_machreg_fmt0 " ", addr);
+
+       for (i = 0; i < num && repeat--; i++) {
+               if (phys) {
+                       if (kdb_getphysword(&word, addr, bytesperword))
+                               break;
+               } else if (kdb_getword(&word, addr, bytesperword))
+                       break;
+               kdb_printf(fmtstr, word);
+               if (symbolic)
+                       kdbnearsym(word, &symtab);
+               else
+                       memset(&symtab, 0, sizeof(symtab));
+               if (symtab.sym_name) {
+                       kdb_symbol_print(word, &symtab, 0);
+                       if (!nosect) {
+                               kdb_printf("\n");
+                               kdb_printf("                       %s %s "
+                                          kdb_machreg_fmt " "
+                                          kdb_machreg_fmt " "
+                                          kdb_machreg_fmt, symtab.mod_name,
+                                          symtab.sec_name, symtab.sec_start,
+                                          symtab.sym_start, symtab.sym_end);
+                       }
+                       addr += bytesperword;
+               } else {
+                       union {
+                               u64 word;
+                               unsigned char c[8];
+                       } wc;
+                       unsigned char *cp;
+#ifdef __BIG_ENDIAN
+                       cp = wc.c + 8 - bytesperword;
+#else
+                       cp = wc.c;
+#endif
+                       wc.word = word;
+#define printable_char(c) \
+       ({unsigned char __c = c; isascii(__c) && isprint(__c) ? __c : '.'; })
+                       switch (bytesperword) {
+                       case 8:
+                               *c++ = printable_char(*cp++);
+                               *c++ = printable_char(*cp++);
+                               *c++ = printable_char(*cp++);
+                               *c++ = printable_char(*cp++);
+                               addr += 4;
+                       case 4:
+                               *c++ = printable_char(*cp++);
+                               *c++ = printable_char(*cp++);
+                               addr += 2;
+                       case 2:
+                               *c++ = printable_char(*cp++);
+                               addr++;
+                       case 1:
+                               *c++ = printable_char(*cp++);
+                               addr++;
+                               break;
+                       }
+#undef printable_char
+               }
+       }
+       kdb_printf("%*s %s\n", (int)((num-i)*(2*bytesperword + 1)+1),
+                  " ", cbuf);
+}
+
+static int kdb_md(int argc, const char **argv)
+{
+       static unsigned long last_addr;
+       static int last_radix, last_bytesperword, last_repeat;
+       int radix = 16, mdcount = 8, bytesperword = KDB_WORD_SIZE, repeat;
+       int nosect = 0;
+       char fmtchar, fmtstr[64];
+       unsigned long addr;
+       unsigned long word;
+       long offset = 0;
+       int symbolic = 0;
+       int valid = 0;
+       int phys = 0;
+
+       kdbgetintenv("MDCOUNT", &mdcount);
+       kdbgetintenv("RADIX", &radix);
+       kdbgetintenv("BYTESPERWORD", &bytesperword);
+
+       /* Assume 'md <addr>' and start with environment values */
+       repeat = mdcount * 16 / bytesperword;
+
+       if (strcmp(argv[0], "mdr") == 0) {
+               if (argc != 2)
+                       return KDB_ARGCOUNT;
+               valid = 1;
+       } else if (isdigit(argv[0][2])) {
+               bytesperword = (int)(argv[0][2] - '0');
+               if (bytesperword == 0) {
+                       bytesperword = last_bytesperword;
+                       if (bytesperword == 0)
+                               bytesperword = 4;
+               }
+               last_bytesperword = bytesperword;
+               repeat = mdcount * 16 / bytesperword;
+               if (!argv[0][3])
+                       valid = 1;
+               else if (argv[0][3] == 'c' && argv[0][4]) {
+                       char *p;
+                       repeat = simple_strtoul(argv[0] + 4, &p, 10);
+                       mdcount = ((repeat * bytesperword) + 15) / 16;
+                       valid = !*p;
+               }
+               last_repeat = repeat;
+       } else if (strcmp(argv[0], "md") == 0)
+               valid = 1;
+       else if (strcmp(argv[0], "mds") == 0)
+               valid = 1;
+       else if (strcmp(argv[0], "mdp") == 0) {
+               phys = valid = 1;
+       }
+       if (!valid)
+               return KDB_NOTFOUND;
+
+       if (argc == 0) {
+               if (last_addr == 0)
+                       return KDB_ARGCOUNT;
+               addr = last_addr;
+               radix = last_radix;
+               bytesperword = last_bytesperword;
+               repeat = last_repeat;
+               mdcount = ((repeat * bytesperword) + 15) / 16;
+       }
+
+       if (argc) {
+               unsigned long val;
+               int diag, nextarg = 1;
+               diag = kdbgetaddrarg(argc, argv, &nextarg, &addr,
+                                    &offset, NULL);
+               if (diag)
+                       return diag;
+               if (argc > nextarg+2)
+                       return KDB_ARGCOUNT;
+
+               if (argc >= nextarg) {
+                       diag = kdbgetularg(argv[nextarg], &val);
+                       if (!diag) {
+                               mdcount = (int) val;
+                               repeat = mdcount * 16 / bytesperword;
+                       }
+               }
+               if (argc >= nextarg+1) {
+                       diag = kdbgetularg(argv[nextarg+1], &val);
+                       if (!diag)
+                               radix = (int) val;
+               }
+       }
+
+       if (strcmp(argv[0], "mdr") == 0)
+               return kdb_mdr(addr, mdcount);
+
+       switch (radix) {
+       case 10:
+               fmtchar = 'd';
+               break;
+       case 16:
+               fmtchar = 'x';
+               break;
+       case 8:
+               fmtchar = 'o';
+               break;
+       default:
+               return KDB_BADRADIX;
+       }
+
+       last_radix = radix;
+
+       if (bytesperword > KDB_WORD_SIZE)
+               return KDB_BADWIDTH;
+
+       switch (bytesperword) {
+       case 8:
+               sprintf(fmtstr, "%%16.16l%c ", fmtchar);
+               break;
+       case 4:
+               sprintf(fmtstr, "%%8.8l%c ", fmtchar);
+               break;
+       case 2:
+               sprintf(fmtstr, "%%4.4l%c ", fmtchar);
+               break;
+       case 1:
+               sprintf(fmtstr, "%%2.2l%c ", fmtchar);
+               break;
+       default:
+               return KDB_BADWIDTH;
+       }
+
+       last_repeat = repeat;
+       last_bytesperword = bytesperword;
+
+       if (strcmp(argv[0], "mds") == 0) {
+               symbolic = 1;
+               /* Do not save these changes as last_*, they are temporary mds
+                * overrides.
+                */
+               bytesperword = KDB_WORD_SIZE;
+               repeat = mdcount;
+               kdbgetintenv("NOSECT", &nosect);
+       }
+
+       /* Round address down modulo BYTESPERWORD */
+
+       addr &= ~(bytesperword-1);
+
+       while (repeat > 0) {
+               unsigned long a;
+               int n, z, num = (symbolic ? 1 : (16 / bytesperword));
+
+               if (KDB_FLAG(CMD_INTERRUPT))
+                       return 0;
+               for (a = addr, z = 0; z < repeat; a += bytesperword, ++z) {
+                       if (phys) {
+                               if (kdb_getphysword(&word, a, bytesperword)
+                                               || word)
+                                       break;
+                       } else if (kdb_getword(&word, a, bytesperword) || word)
+                               break;
+               }
+               n = min(num, repeat);
+               kdb_md_line(fmtstr, addr, symbolic, nosect, bytesperword,
+                           num, repeat, phys);
+               addr += bytesperword * n;
+               repeat -= n;
+               z = (z + num - 1) / num;
+               if (z > 2) {
+                       int s = num * (z-2);
+                       kdb_printf(kdb_machreg_fmt0 "-" kdb_machreg_fmt0
+                                  " zero suppressed\n",
+                               addr, addr + bytesperword * s - 1);
+                       addr += bytesperword * s;
+                       repeat -= s;
+               }
+       }
+       last_addr = addr;
+
+       return 0;
+}
+
+/*
+ * kdb_mm - This function implements the 'mm' command.
+ *     mm address-expression new-value
+ * Remarks:
+ *     mm works on machine words, mmW works on bytes.
+ */
+static int kdb_mm(int argc, const char **argv)
+{
+       int diag;
+       unsigned long addr;
+       long offset = 0;
+       unsigned long contents;
+       int nextarg;
+       int width;
+
+       if (argv[0][2] && !isdigit(argv[0][2]))
+               return KDB_NOTFOUND;
+
+       if (argc < 2)
+               return KDB_ARGCOUNT;
+
+       nextarg = 1;
+       diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
+       if (diag)
+               return diag;
+
+       if (nextarg > argc)
+               return KDB_ARGCOUNT;
+       diag = kdbgetaddrarg(argc, argv, &nextarg, &contents, NULL, NULL);
+       if (diag)
+               return diag;
+
+       if (nextarg != argc + 1)
+               return KDB_ARGCOUNT;
+
+       width = argv[0][2] ? (argv[0][2] - '0') : (KDB_WORD_SIZE);
+       diag = kdb_putword(addr, contents, width);
+       if (diag)
+               return diag;
+
+       kdb_printf(kdb_machreg_fmt " = " kdb_machreg_fmt "\n", addr, contents);
+
+       return 0;
+}
+
+/*
+ * kdb_go - This function implements the 'go' command.
+ *     go [address-expression]
+ */
+static int kdb_go(int argc, const char **argv)
+{
+       unsigned long addr;
+       int diag;
+       int nextarg;
+       long offset;
+
+       if (argc == 1) {
+               if (raw_smp_processor_id() != kdb_initial_cpu) {
+                       kdb_printf("go <address> must be issued from the "
+                                  "initial cpu, do cpu %d first\n",
+                                  kdb_initial_cpu);
+                       return KDB_ARGCOUNT;
+               }
+               nextarg = 1;
+               diag = kdbgetaddrarg(argc, argv, &nextarg,
+                                    &addr, &offset, NULL);
+               if (diag)
+                       return diag;
+       } else if (argc) {
+               return KDB_ARGCOUNT;
+       }
+
+       diag = KDB_CMD_GO;
+       if (KDB_FLAG(CATASTROPHIC)) {
+               kdb_printf("Catastrophic error detected\n");
+               kdb_printf("kdb_continue_catastrophic=%d, ",
+                       kdb_continue_catastrophic);
+               if (kdb_continue_catastrophic == 0 && kdb_go_count++ == 0) {
+                       kdb_printf("type go a second time if you really want "
+                                  "to continue\n");
+                       return 0;
+               }
+               if (kdb_continue_catastrophic == 2) {
+                       kdb_printf("forcing reboot\n");
+                       kdb_reboot(0, NULL);
+               }
+               kdb_printf("attempting to continue\n");
+       }
+       return diag;
+}
+
+/*
+ * kdb_rd - This function implements the 'rd' command.
+ */
+static int kdb_rd(int argc, const char **argv)
+{
+       int diag = kdb_check_regs();
+       if (diag)
+               return diag;
+
+       kdb_dumpregs(kdb_current_regs);
+       return 0;
+}
+
+/*
+ * kdb_rm - This function implements the 'rm' (register modify)  command.
+ *     rm register-name new-contents
+ * Remarks:
+ *     Currently doesn't allow modification of control or
+ *     debug registers.
+ */
+static int kdb_rm(int argc, const char **argv)
+{
+       int diag;
+       int ind = 0;
+       unsigned long contents;
+
+       if (argc != 2)
+               return KDB_ARGCOUNT;
+       /*
+        * Allow presence or absence of leading '%' symbol.
+        */
+       if (argv[1][0] == '%')
+               ind = 1;
+
+       diag = kdbgetularg(argv[2], &contents);
+       if (diag)
+               return diag;
+
+       diag = kdb_check_regs();
+       if (diag)
+               return diag;
+       kdb_printf("ERROR: Register set currently not implemented\n");
+       return 0;
+}
+
+#if defined(CONFIG_MAGIC_SYSRQ)
+/*
+ * kdb_sr - This function implements the 'sr' (SYSRQ key) command
+ *     which interfaces to the soi-disant MAGIC SYSRQ functionality.
+ *             sr <magic-sysrq-code>
+ */
+static int kdb_sr(int argc, const char **argv)
+{
+       if (argc != 1)
+               return KDB_ARGCOUNT;
+       sysrq_toggle_support(1);
+       kdb_trap_printk++;
+       handle_sysrq(*argv[1], NULL);
+       kdb_trap_printk--;
+
+       return 0;
+}
+#endif /* CONFIG_MAGIC_SYSRQ */
+
+/*
+ * kdb_ef - This function implements the 'regs' (display exception
+ *     frame) command.  This command takes an address and expects to
+ *     find an exception frame at that address, formats and prints
+ *     it.
+ *             regs address-expression
+ * Remarks:
+ *     Not done yet.
+ */
+static int kdb_ef(int argc, const char **argv)
+{
+       int diag;
+       unsigned long addr;
+       long offset;
+       int nextarg;
+
+       if (argc != 1)
+               return KDB_ARGCOUNT;
+
+       nextarg = 1;
+       diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
+       if (diag)
+               return diag;
+       show_regs((struct pt_regs *)addr);
+       return 0;
+}
+
+#if defined(CONFIG_MODULES)
+/* modules using other modules */
+struct module_use {
+       struct list_head list;
+       struct module *module_which_uses;
+};
+
+/*
+ * kdb_lsmod - This function implements the 'lsmod' command.  Lists
+ *     currently loaded kernel modules.
+ *     Mostly taken from userland lsmod.
+ */
+static int kdb_lsmod(int argc, const char **argv)
+{
+       struct module *mod;
+
+       if (argc != 0)
+               return KDB_ARGCOUNT;
+
+       kdb_printf("Module                  Size  modstruct     Used by\n");
+       list_for_each_entry(mod, kdb_modules, list) {
+
+               kdb_printf("%-20s%8u  0x%p ", mod->name,
+                          mod->core_size, (void *)mod);
+#ifdef CONFIG_MODULE_UNLOAD
+               kdb_printf("%4d ", module_refcount(mod));
+#endif
+               if (mod->state == MODULE_STATE_GOING)
+                       kdb_printf(" (Unloading)");
+               else if (mod->state == MODULE_STATE_COMING)
+                       kdb_printf(" (Loading)");
+               else
+                       kdb_printf(" (Live)");
+
+#ifdef CONFIG_MODULE_UNLOAD
+               {
+                       struct module_use *use;
+                       kdb_printf(" [ ");
+                       list_for_each_entry(use, &mod->modules_which_use_me,
+                                           list)
+                               kdb_printf("%s ", use->module_which_uses->name);
+                       kdb_printf("]\n");
+               }
+#endif
+       }
+
+       return 0;
+}
+
+#endif /* CONFIG_MODULES */
+
+/*
+ * kdb_env - This function implements the 'env' command.  Display the
+ *     current environment variables.
+ */
+
+static int kdb_env(int argc, const char **argv)
+{
+       int i;
+
+       for (i = 0; i < __nenv; i++) {
+               if (__env[i])
+                       kdb_printf("%s\n", __env[i]);
+       }
+
+       if (KDB_DEBUG(MASK))
+               kdb_printf("KDBFLAGS=0x%x\n", kdb_flags);
+
+       return 0;
+}
+
+#ifdef CONFIG_PRINTK
+/*
+ * kdb_dmesg - This function implements the 'dmesg' command to display
+ *     the contents of the syslog buffer.
+ *             dmesg [lines] [adjust]
+ */
+static int kdb_dmesg(int argc, const char **argv)
+{
+       char *syslog_data[4], *start, *end, c = '\0', *p;
+       int diag, logging, logsize, lines = 0, adjust = 0, n;
+
+       if (argc > 2)
+               return KDB_ARGCOUNT;
+       if (argc) {
+               char *cp;
+               lines = simple_strtol(argv[1], &cp, 0);
+               if (*cp)
+                       lines = 0;
+               if (argc > 1) {
+                       adjust = simple_strtoul(argv[2], &cp, 0);
+                       if (*cp || adjust < 0)
+                               adjust = 0;
+               }
+       }
+
+       /* disable LOGGING if set */
+       diag = kdbgetintenv("LOGGING", &logging);
+       if (!diag && logging) {
+               const char *setargs[] = { "set", "LOGGING", "0" };
+               kdb_set(2, setargs);
+       }
+
+       /* syslog_data[0,1] physical start, end+1.  syslog_data[2,3]
+        * logical start, end+1. */
+       kdb_syslog_data(syslog_data);
+       if (syslog_data[2] == syslog_data[3])
+               return 0;
+       logsize = syslog_data[1] - syslog_data[0];
+       start = syslog_data[2];
+       end = syslog_data[3];
+#define KDB_WRAP(p) (((p - syslog_data[0]) % logsize) + syslog_data[0])
+       for (n = 0, p = start; p < end; ++p) {
+               c = *KDB_WRAP(p);
+               if (c == '\n')
+                       ++n;
+       }
+       if (c != '\n')
+               ++n;
+       if (lines < 0) {
+               if (adjust >= n)
+                       kdb_printf("buffer only contains %d lines, nothing "
+                                  "printed\n", n);
+               else if (adjust - lines >= n)
+                       kdb_printf("buffer only contains %d lines, last %d "
+                                  "lines printed\n", n, n - adjust);
+               if (adjust) {
+                       for (; start < end && adjust; ++start) {
+                               if (*KDB_WRAP(start) == '\n')
+                                       --adjust;
+                       }
+                       if (start < end)
+                               ++start;
+               }
+               for (p = start; p < end && lines; ++p) {
+                       if (*KDB_WRAP(p) == '\n')
+                               ++lines;
+               }
+               end = p;
+       } else if (lines > 0) {
+               int skip = n - (adjust + lines);
+               if (adjust >= n) {
+                       kdb_printf("buffer only contains %d lines, "
+                                  "nothing printed\n", n);
+                       skip = n;
+               } else if (skip < 0) {
+                       lines += skip;
+                       skip = 0;
+                       kdb_printf("buffer only contains %d lines, first "
+                                  "%d lines printed\n", n, lines);
+               }
+               for (; start < end && skip; ++start) {
+                       if (*KDB_WRAP(start) == '\n')
+                               --skip;
+               }
+               for (p = start; p < end && lines; ++p) {
+                       if (*KDB_WRAP(p) == '\n')
+                               --lines;
+               }
+               end = p;
+       }
+       /* Do a line at a time (max 200 chars) to reduce protocol overhead */
+       c = '\n';
+       while (start != end) {
+               char buf[201];
+               p = buf;
+               if (KDB_FLAG(CMD_INTERRUPT))
+                       return 0;
+               while (start < end && (c = *KDB_WRAP(start)) &&
+                      (p - buf) < sizeof(buf)-1) {
+                       ++start;
+                       *p++ = c;
+                       if (c == '\n')
+                               break;
+               }
+               *p = '\0';
+               kdb_printf("%s", buf);
+       }
+       if (c != '\n')
+               kdb_printf("\n");
+
+       return 0;
+}
+#endif /* CONFIG_PRINTK */
+/*
+ * kdb_cpu - This function implements the 'cpu' command.
+ *     cpu     [<cpunum>]
+ * Returns:
+ *     KDB_CMD_CPU for success, a kdb diagnostic if error
+ */
+static void kdb_cpu_status(void)
+{
+       int i, start_cpu, first_print = 1;
+       char state, prev_state = '?';
+
+       kdb_printf("Currently on cpu %d\n", raw_smp_processor_id());
+       kdb_printf("Available cpus: ");
+       for (start_cpu = -1, i = 0; i < NR_CPUS; i++) {
+               if (!cpu_online(i)) {
+                       state = 'F';    /* cpu is offline */
+               } else {
+                       state = ' ';    /* cpu is responding to kdb */
+                       if (kdb_task_state_char(KDB_TSK(i)) == 'I')
+                               state = 'I';    /* idle task */
+               }
+               if (state != prev_state) {
+                       if (prev_state != '?') {
+                               if (!first_print)
+                                       kdb_printf(", ");
+                               first_print = 0;
+                               kdb_printf("%d", start_cpu);
+                               if (start_cpu < i-1)
+                                       kdb_printf("-%d", i-1);
+                               if (prev_state != ' ')
+                                       kdb_printf("(%c)", prev_state);
+                       }
+                       prev_state = state;
+                       start_cpu = i;
+               }
+       }
+       /* print the trailing cpus, ignoring them if they are all offline */
+       if (prev_state != 'F') {
+               if (!first_print)
+                       kdb_printf(", ");
+               kdb_printf("%d", start_cpu);
+               if (start_cpu < i-1)
+                       kdb_printf("-%d", i-1);
+               if (prev_state != ' ')
+                       kdb_printf("(%c)", prev_state);
+       }
+       kdb_printf("\n");
+}
+
+static int kdb_cpu(int argc, const char **argv)
+{
+       unsigned long cpunum;
+       int diag;
+
+       if (argc == 0) {
+               kdb_cpu_status();
+               return 0;
+       }
+
+       if (argc != 1)
+               return KDB_ARGCOUNT;
+
+       diag = kdbgetularg(argv[1], &cpunum);
+       if (diag)
+               return diag;
+
+       /*
+        * Validate cpunum
+        */
+       if ((cpunum > NR_CPUS) || !cpu_online(cpunum))
+               return KDB_BADCPUNUM;
+
+       dbg_switch_cpu = cpunum;
+
+       /*
+        * Switch to other cpu
+        */
+       return KDB_CMD_CPU;
+}
+
+/* The user may not realize that ps/bta with no parameters does not print idle
+ * or sleeping system daemon processes, so tell them how many were suppressed.
+ */
+void kdb_ps_suppressed(void)
+{
+       int idle = 0, daemon = 0;
+       unsigned long mask_I = kdb_task_state_string("I"),
+                     mask_M = kdb_task_state_string("M");
+       unsigned long cpu;
+       const struct task_struct *p, *g;
+       for_each_online_cpu(cpu) {
+               p = kdb_curr_task(cpu);
+               if (kdb_task_state(p, mask_I))
+                       ++idle;
+       }
+       kdb_do_each_thread(g, p) {
+               if (kdb_task_state(p, mask_M))
+                       ++daemon;
+       } kdb_while_each_thread(g, p);
+       if (idle || daemon) {
+               if (idle)
+                       kdb_printf("%d idle process%s (state I)%s\n",
+                                  idle, idle == 1 ? "" : "es",
+                                  daemon ? " and " : "");
+               if (daemon)
+                       kdb_printf("%d sleeping system daemon (state M) "
+                                  "process%s", daemon,
+                                  daemon == 1 ? "" : "es");
+               kdb_printf(" suppressed,\nuse 'ps A' to see all.\n");
+       }
+}
+
+/*
+ * kdb_ps - This function implements the 'ps' command which shows a
+ *     list of the active processes.
+ *             ps [DRSTCZEUIMA]   All processes, optionally filtered by state
+ */
+void kdb_ps1(const struct task_struct *p)
+{
+       int cpu;
+       unsigned long tmp;
+
+       if (!p || probe_kernel_read(&tmp, (char *)p, sizeof(unsigned long)))
+               return;
+
+       cpu = kdb_process_cpu(p);
+       kdb_printf("0x%p %8d %8d  %d %4d   %c  0x%p %c%s\n",
+                  (void *)p, p->pid, p->parent->pid,
+                  kdb_task_has_cpu(p), kdb_process_cpu(p),
+                  kdb_task_state_char(p),
+                  (void *)(&p->thread),
+                  p == kdb_curr_task(raw_smp_processor_id()) ? '*' : ' ',
+                  p->comm);
+       if (kdb_task_has_cpu(p)) {
+               if (!KDB_TSK(cpu)) {
+                       kdb_printf("  Error: no saved data for this cpu\n");
+               } else {
+                       if (KDB_TSK(cpu) != p)
+                               kdb_printf("  Error: does not match running "
+                                  "process table (0x%p)\n", KDB_TSK(cpu));
+               }
+       }
+}
+
+static int kdb_ps(int argc, const char **argv)
+{
+       struct task_struct *g, *p;
+       unsigned long mask, cpu;
+
+       if (argc == 0)
+               kdb_ps_suppressed();
+       kdb_printf("%-*s      Pid   Parent [*] cpu State %-*s Command\n",
+               (int)(2*sizeof(void *))+2, "Task Addr",
+               (int)(2*sizeof(void *))+2, "Thread");
+       mask = kdb_task_state_string(argc ? argv[1] : NULL);
+       /* Run the active tasks first */
+       for_each_online_cpu(cpu) {
+               if (KDB_FLAG(CMD_INTERRUPT))
+                       return 0;
+               p = kdb_curr_task(cpu);
+               if (kdb_task_state(p, mask))
+                       kdb_ps1(p);
+       }
+       kdb_printf("\n");
+       /* Now the real tasks */
+       kdb_do_each_thread(g, p) {
+               if (KDB_FLAG(CMD_INTERRUPT))
+                       return 0;
+               if (kdb_task_state(p, mask))
+                       kdb_ps1(p);
+       } kdb_while_each_thread(g, p);
+
+       return 0;
+}
+
+/*
+ * kdb_pid - This function implements the 'pid' command which switches
+ *     the currently active process.
+ *             pid [<pid> | R]
+ */
+static int kdb_pid(int argc, const char **argv)
+{
+       struct task_struct *p;
+       unsigned long val;
+       int diag;
+
+       if (argc > 1)
+               return KDB_ARGCOUNT;
+
+       if (argc) {
+               if (strcmp(argv[1], "R") == 0) {
+                       p = KDB_TSK(kdb_initial_cpu);
+               } else {
+                       diag = kdbgetularg(argv[1], &val);
+                       if (diag)
+                               return KDB_BADINT;
+
+                       p = find_task_by_pid_ns((pid_t)val,     &init_pid_ns);
+                       if (!p) {
+                               kdb_printf("No task with pid=%d\n", (pid_t)val);
+                               return 0;
+                       }
+               }
+               kdb_set_current_task(p);
+       }
+       kdb_printf("KDB current process is %s(pid=%d)\n",
+                  kdb_current_task->comm,
+                  kdb_current_task->pid);
+
+       return 0;
+}
+
+/*
+ * kdb_ll - This function implements the 'll' command which follows a
+ *     linked list and executes an arbitrary command for each
+ *     element.
+ */
+static int kdb_ll(int argc, const char **argv)
+{
+       int diag;
+       unsigned long addr;
+       long offset = 0;
+       unsigned long va;
+       unsigned long linkoffset;
+       int nextarg;
+       const char *command;
+
+       if (argc != 3)
+               return KDB_ARGCOUNT;
+
+       nextarg = 1;
+       diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
+       if (diag)
+               return diag;
+
+       diag = kdbgetularg(argv[2], &linkoffset);
+       if (diag)
+               return diag;
+
+       /*
+        * Using the starting address as
+        * the first element in the list, and assuming that
+        * the list ends with a null pointer.
+        */
+
+       va = addr;
+       command = kdb_strdup(argv[3], GFP_KDB);
+       if (!command) {
+               kdb_printf("%s: cannot duplicate command\n", __func__);
+               return 0;
+       }
+       /* Recursive use of kdb_parse, do not use argv after this point */
+       argv = NULL;
+
+       while (va) {
+               char buf[80];
+
+               sprintf(buf, "%s " kdb_machreg_fmt "\n", command, va);
+               diag = kdb_parse(buf);
+               if (diag)
+                       return diag;
+
+               addr = va + linkoffset;
+               if (kdb_getword(&va, addr, sizeof(va)))
+                       return 0;
+       }
+       kfree(command);
+
+       return 0;
+}
+
+static int kdb_kgdb(int argc, const char **argv)
+{
+       return KDB_CMD_KGDB;
+}
+
+/*
+ * kdb_help - This function implements the 'help' and '?' commands.
+ */
+static int kdb_help(int argc, const char **argv)
+{
+       kdbtab_t *kt;
+       int i;
+
+       kdb_printf("%-15.15s %-20.20s %s\n", "Command", "Usage", "Description");
+       kdb_printf("-----------------------------"
+                  "-----------------------------\n");
+       for_each_kdbcmd(kt, i) {
+               if (kt->cmd_name)
+                       kdb_printf("%-15.15s %-20.20s %s\n", kt->cmd_name,
+                                  kt->cmd_usage, kt->cmd_help);
+               if (KDB_FLAG(CMD_INTERRUPT))
+                       return 0;
+       }
+       return 0;
+}
+
+/*
+ * kdb_kill - This function implements the 'kill' commands.
+ */
+static int kdb_kill(int argc, const char **argv)
+{
+       long sig, pid;
+       char *endp;
+       struct task_struct *p;
+       struct siginfo info;
+
+       if (argc != 2)
+               return KDB_ARGCOUNT;
+
+       sig = simple_strtol(argv[1], &endp, 0);
+       if (*endp)
+               return KDB_BADINT;
+       if (sig >= 0) {
+               kdb_printf("Invalid signal parameter.<-signal>\n");
+               return 0;
+       }
+       sig = -sig;
+
+       pid = simple_strtol(argv[2], &endp, 0);
+       if (*endp)
+               return KDB_BADINT;
+       if (pid <= 0) {
+               kdb_printf("Process ID must be large than 0.\n");
+               return 0;
+       }
+
+       /* Find the process. */
+       p = find_task_by_pid_ns(pid, &init_pid_ns);
+       if (!p) {
+               kdb_printf("The specified process isn't found.\n");
+               return 0;
+       }
+       p = p->group_leader;
+       info.si_signo = sig;
+       info.si_errno = 0;
+       info.si_code = SI_USER;
+       info.si_pid = pid;  /* same capabilities as process being signalled */
+       info.si_uid = 0;    /* kdb has root authority */
+       kdb_send_sig_info(p, &info);
+       return 0;
+}
+
+struct kdb_tm {
+       int tm_sec;     /* seconds */
+       int tm_min;     /* minutes */
+       int tm_hour;    /* hours */
+       int tm_mday;    /* day of the month */
+       int tm_mon;     /* month */
+       int tm_year;    /* year */
+};
+
+static void kdb_gmtime(struct timespec *tv, struct kdb_tm *tm)
+{
+       /* This will work from 1970-2099, 2100 is not a leap year */
+       static int mon_day[] = { 31, 29, 31, 30, 31, 30, 31,
+                                31, 30, 31, 30, 31 };
+       memset(tm, 0, sizeof(*tm));
+       tm->tm_sec  = tv->tv_sec % (24 * 60 * 60);
+       tm->tm_mday = tv->tv_sec / (24 * 60 * 60) +
+               (2 * 365 + 1); /* shift base from 1970 to 1968 */
+       tm->tm_min =  tm->tm_sec / 60 % 60;
+       tm->tm_hour = tm->tm_sec / 60 / 60;
+       tm->tm_sec =  tm->tm_sec % 60;
+       tm->tm_year = 68 + 4*(tm->tm_mday / (4*365+1));
+       tm->tm_mday %= (4*365+1);
+       mon_day[1] = 29;
+       while (tm->tm_mday >= mon_day[tm->tm_mon]) {
+               tm->tm_mday -= mon_day[tm->tm_mon];
+               if (++tm->tm_mon == 12) {
+                       tm->tm_mon = 0;
+                       ++tm->tm_year;
+                       mon_day[1] = 28;
+               }
+       }
+       ++tm->tm_mday;
+}
+
+/*
+ * Most of this code has been lifted from kernel/timer.c::sys_sysinfo().
+ * I cannot call that code directly from kdb, it has an unconditional
+ * cli()/sti() and calls routines that take locks which can stop the debugger.
+ */
+static void kdb_sysinfo(struct sysinfo *val)
+{
+       struct timespec uptime;
+       do_posix_clock_monotonic_gettime(&uptime);
+       memset(val, 0, sizeof(*val));
+       val->uptime = uptime.tv_sec;
+       val->loads[0] = avenrun[0];
+       val->loads[1] = avenrun[1];
+       val->loads[2] = avenrun[2];
+       val->procs = nr_threads-1;
+       si_meminfo(val);
+
+       return;
+}
+
+/*
+ * kdb_summary - This function implements the 'summary' command.
+ */
+static int kdb_summary(int argc, const char **argv)
+{
+       struct kdb_tm tm;
+       struct sysinfo val;
+
+       if (argc)
+               return KDB_ARGCOUNT;
+
+       kdb_printf("sysname    %s\n", init_uts_ns.name.sysname);
+       kdb_printf("release    %s\n", init_uts_ns.name.release);
+       kdb_printf("version    %s\n", init_uts_ns.name.version);
+       kdb_printf("machine    %s\n", init_uts_ns.name.machine);
+       kdb_printf("nodename   %s\n", init_uts_ns.name.nodename);
+       kdb_printf("domainname %s\n", init_uts_ns.name.domainname);
+       kdb_printf("ccversion  %s\n", __stringify(CCVERSION));
+
+       kdb_gmtime(&xtime, &tm);
+       kdb_printf("date       %04d-%02d-%02d %02d:%02d:%02d "
+                  "tz_minuteswest %d\n",
+               1900+tm.tm_year, tm.tm_mon+1, tm.tm_mday,
+               tm.tm_hour, tm.tm_min, tm.tm_sec,
+               sys_tz.tz_minuteswest);
+
+       kdb_sysinfo(&val);
+       kdb_printf("uptime     ");
+       if (val.uptime > (24*60*60)) {
+               int days = val.uptime / (24*60*60);
+               val.uptime %= (24*60*60);
+               kdb_printf("%d day%s ", days, days == 1 ? "" : "s");
+       }
+       kdb_printf("%02ld:%02ld\n", val.uptime/(60*60), (val.uptime/60)%60);
+
+       /* lifted from fs/proc/proc_misc.c::loadavg_read_proc() */
+
+#define LOAD_INT(x) ((x) >> FSHIFT)
+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
+       kdb_printf("load avg   %ld.%02ld %ld.%02ld %ld.%02ld\n",
+               LOAD_INT(val.loads[0]), LOAD_FRAC(val.loads[0]),
+               LOAD_INT(val.loads[1]), LOAD_FRAC(val.loads[1]),
+               LOAD_INT(val.loads[2]), LOAD_FRAC(val.loads[2]));
+#undef LOAD_INT
+#undef LOAD_FRAC
+       /* Display in kilobytes */
+#define K(x) ((x) << (PAGE_SHIFT - 10))
+       kdb_printf("\nMemTotal:       %8lu kB\nMemFree:        %8lu kB\n"
+                  "Buffers:        %8lu kB\n",
+                  val.totalram, val.freeram, val.bufferram);
+       return 0;
+}
+
+/*
+ * kdb_per_cpu - This function implements the 'per_cpu' command.
+ */
+static int kdb_per_cpu(int argc, const char **argv)
+{
+       char buf[256], fmtstr[64];
+       kdb_symtab_t symtab;
+       cpumask_t suppress = CPU_MASK_NONE;
+       int cpu, diag;
+       unsigned long addr, val, bytesperword = 0, whichcpu = ~0UL;
+
+       if (argc < 1 || argc > 3)
+               return KDB_ARGCOUNT;
+
+       snprintf(buf, sizeof(buf), "per_cpu__%s", argv[1]);
+       if (!kdbgetsymval(buf, &symtab)) {
+               kdb_printf("%s is not a per_cpu variable\n", argv[1]);
+               return KDB_BADADDR;
+       }
+       if (argc >= 2) {
+               diag = kdbgetularg(argv[2], &bytesperword);
+               if (diag)
+                       return diag;
+       }
+       if (!bytesperword)
+               bytesperword = KDB_WORD_SIZE;
+       else if (bytesperword > KDB_WORD_SIZE)
+               return KDB_BADWIDTH;
+       sprintf(fmtstr, "%%0%dlx ", (int)(2*bytesperword));
+       if (argc >= 3) {
+               diag = kdbgetularg(argv[3], &whichcpu);
+               if (diag)
+                       return diag;
+               if (!cpu_online(whichcpu)) {
+                       kdb_printf("cpu %ld is not online\n", whichcpu);
+                       return KDB_BADCPUNUM;
+               }
+       }
+
+       /* Most architectures use __per_cpu_offset[cpu], some use
+        * __per_cpu_offset(cpu), smp has no __per_cpu_offset.
+        */
+#ifdef __per_cpu_offset
+#define KDB_PCU(cpu) __per_cpu_offset(cpu)
+#else
+#ifdef CONFIG_SMP
+#define KDB_PCU(cpu) __per_cpu_offset[cpu]
+#else
+#define KDB_PCU(cpu) 0
+#endif
+#endif
+
+       for_each_online_cpu(cpu) {
+               if (whichcpu != ~0UL && whichcpu != cpu)
+                       continue;
+               addr = symtab.sym_start + KDB_PCU(cpu);
+               diag = kdb_getword(&val, addr, bytesperword);
+               if (diag) {
+                       kdb_printf("%5d " kdb_bfd_vma_fmt0 " - unable to "
+                                  "read, diag=%d\n", cpu, addr, diag);
+                       continue;
+               }
+#ifdef CONFIG_SMP
+               if (!val) {
+                       cpu_set(cpu, suppress);
+                       continue;
+               }
+#endif /* CONFIG_SMP */
+               kdb_printf("%5d ", cpu);
+               kdb_md_line(fmtstr, addr,
+                       bytesperword == KDB_WORD_SIZE,
+                       1, bytesperword, 1, 1, 0);
+       }
+       if (cpus_weight(suppress) == 0)
+               return 0;
+       kdb_printf("Zero suppressed cpu(s):");
+       for (cpu = first_cpu(suppress); cpu < num_possible_cpus();
+            cpu = next_cpu(cpu, suppress)) {
+               kdb_printf(" %d", cpu);
+               if (cpu == num_possible_cpus() - 1 ||
+                   next_cpu(cpu, suppress) != cpu + 1)
+                       continue;
+               while (cpu < num_possible_cpus() &&
+                      next_cpu(cpu, suppress) == cpu + 1)
+                       ++cpu;
+               kdb_printf("-%d", cpu);
+       }
+       kdb_printf("\n");
+
+#undef KDB_PCU
+
+       return 0;
+}
+
+/*
+ * display help for the use of cmd | grep pattern
+ */
+static int kdb_grep_help(int argc, const char **argv)
+{
+       kdb_printf("Usage of  cmd args | grep pattern:\n");
+       kdb_printf("  Any command's output may be filtered through an ");
+       kdb_printf("emulated 'pipe'.\n");
+       kdb_printf("  'grep' is just a key word.\n");
+       kdb_printf("  The pattern may include a very limited set of "
+                  "metacharacters:\n");
+       kdb_printf("   pattern or ^pattern or pattern$ or ^pattern$\n");
+       kdb_printf("  And if there are spaces in the pattern, you may "
+                  "quote it:\n");
+       kdb_printf("   \"pat tern\" or \"^pat tern\" or \"pat tern$\""
+                  " or \"^pat tern$\"\n");
+       return 0;
+}
+
+/*
+ * kdb_register_repeat - This function is used to register a kernel
+ *     debugger command.
+ * Inputs:
+ *     cmd     Command name
+ *     func    Function to execute the command
+ *     usage   A simple usage string showing arguments
+ *     help    A simple help string describing command
+ *     repeat  Does the command auto repeat on enter?
+ * Returns:
+ *     zero for success, one if a duplicate command.
+ */
+#define kdb_command_extend 50  /* arbitrary */
+int kdb_register_repeat(char *cmd,
+                       kdb_func_t func,
+                       char *usage,
+                       char *help,
+                       short minlen,
+                       kdb_repeat_t repeat)
+{
+       int i;
+       kdbtab_t *kp;
+
+       /*
+        *  Brute force method to determine duplicates
+        */
+       for_each_kdbcmd(kp, i) {
+               if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) {
+                       kdb_printf("Duplicate kdb command registered: "
+                               "%s, func %p help %s\n", cmd, func, help);
+                       return 1;
+               }
+       }
+
+       /*
+        * Insert command into first available location in table
+        */
+       for_each_kdbcmd(kp, i) {
+               if (kp->cmd_name == NULL)
+                       break;
+       }
+
+       if (i >= kdb_max_commands) {
+               kdbtab_t *new = kmalloc((kdb_max_commands - KDB_BASE_CMD_MAX +
+                        kdb_command_extend) * sizeof(*new), GFP_KDB);
+               if (!new) {
+                       kdb_printf("Could not allocate new kdb_command "
+                                  "table\n");
+                       return 1;
+               }
+               if (kdb_commands) {
+                       memcpy(new, kdb_commands,
+                              kdb_max_commands * sizeof(*new));
+                       kfree(kdb_commands);
+               }
+               memset(new + kdb_max_commands, 0,
+                      kdb_command_extend * sizeof(*new));
+               kdb_commands = new;
+               kp = kdb_commands + kdb_max_commands;
+               kdb_max_commands += kdb_command_extend;
+       }
+
+       kp->cmd_name   = cmd;
+       kp->cmd_func   = func;
+       kp->cmd_usage  = usage;
+       kp->cmd_help   = help;
+       kp->cmd_flags  = 0;
+       kp->cmd_minlen = minlen;
+       kp->cmd_repeat = repeat;
+
+       return 0;
+}
+
+/*
+ * kdb_register - Compatibility register function for commands that do
+ *     not need to specify a repeat state.  Equivalent to
+ *     kdb_register_repeat with KDB_REPEAT_NONE.
+ * Inputs:
+ *     cmd     Command name
+ *     func    Function to execute the command
+ *     usage   A simple usage string showing arguments
+ *     help    A simple help string describing command
+ * Returns:
+ *     zero for success, one if a duplicate command.
+ */
+int kdb_register(char *cmd,
+            kdb_func_t func,
+            char *usage,
+            char *help,
+            short minlen)
+{
+       return kdb_register_repeat(cmd, func, usage, help, minlen,
+                                  KDB_REPEAT_NONE);
+}
+
+/*
+ * kdb_unregister - This function is used to unregister a kernel
+ *     debugger command.  It is generally called when a module which
+ *     implements kdb commands is unloaded.
+ * Inputs:
+ *     cmd     Command name
+ * Returns:
+ *     zero for success, one command not registered.
+ */
+int kdb_unregister(char *cmd)
+{
+       int i;
+       kdbtab_t *kp;
+
+       /*
+        *  find the command.
+        */
+       for (i = 0, kp = kdb_commands; i < kdb_max_commands; i++, kp++) {
+               if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) {
+                       kp->cmd_name = NULL;
+                       return 0;
+               }
+       }
+
+       /* Couldn't find it.  */
+       return 1;
+}
+
+/* Initialize the kdb command table. */
+static void __init kdb_inittab(void)
+{
+       int i;
+       kdbtab_t *kp;
+
+       for_each_kdbcmd(kp, i)
+               kp->cmd_name = NULL;
+
+       kdb_register_repeat("md", kdb_md, "<vaddr>",
+         "Display Memory Contents, also mdWcN, e.g. md8c1", 1,
+                           KDB_REPEAT_NO_ARGS);
+       kdb_register_repeat("mdr", kdb_md, "<vaddr> <bytes>",
+         "Display Raw Memory", 0, KDB_REPEAT_NO_ARGS);
+       kdb_register_repeat("mdp", kdb_md, "<paddr> <bytes>",
+         "Display Physical Memory", 0, KDB_REPEAT_NO_ARGS);
+       kdb_register_repeat("mds", kdb_md, "<vaddr>",
+         "Display Memory Symbolically", 0, KDB_REPEAT_NO_ARGS);
+       kdb_register_repeat("mm", kdb_mm, "<vaddr> <contents>",
+         "Modify Memory Contents", 0, KDB_REPEAT_NO_ARGS);
+       kdb_register_repeat("go", kdb_go, "[<vaddr>]",
+         "Continue Execution", 1, KDB_REPEAT_NONE);
+       kdb_register_repeat("rd", kdb_rd, "",
+         "Display Registers", 0, KDB_REPEAT_NONE);
+       kdb_register_repeat("rm", kdb_rm, "<reg> <contents>",
+         "Modify Registers", 0, KDB_REPEAT_NONE);
+       kdb_register_repeat("ef", kdb_ef, "<vaddr>",
+         "Display exception frame", 0, KDB_REPEAT_NONE);
+       kdb_register_repeat("bt", kdb_bt, "[<vaddr>]",
+         "Stack traceback", 1, KDB_REPEAT_NONE);
+       kdb_register_repeat("btp", kdb_bt, "<pid>",
+         "Display stack for process <pid>", 0, KDB_REPEAT_NONE);
+       kdb_register_repeat("bta", kdb_bt, "[DRSTCZEUIMA]",
+         "Display stack all processes", 0, KDB_REPEAT_NONE);
+       kdb_register_repeat("btc", kdb_bt, "",
+         "Backtrace current process on each cpu", 0, KDB_REPEAT_NONE);
+       kdb_register_repeat("btt", kdb_bt, "<vaddr>",
+         "Backtrace process given its struct task address", 0,
+                           KDB_REPEAT_NONE);
+       kdb_register_repeat("ll", kdb_ll, "<first-element> <linkoffset> <cmd>",
+         "Execute cmd for each element in linked list", 0, KDB_REPEAT_NONE);
+       kdb_register_repeat("env", kdb_env, "",
+         "Show environment variables", 0, KDB_REPEAT_NONE);
+       kdb_register_repeat("set", kdb_set, "",
+         "Set environment variables", 0, KDB_REPEAT_NONE);
+       kdb_register_repeat("help", kdb_help, "",
+         "Display Help Message", 1, KDB_REPEAT_NONE);
+       kdb_register_repeat("?", kdb_help, "",
+         "Display Help Message", 0, KDB_REPEAT_NONE);
+       kdb_register_repeat("cpu", kdb_cpu, "<cpunum>",
+         "Switch to new cpu", 0, KDB_REPEAT_NONE);
+       kdb_register_repeat("kgdb", kdb_kgdb, "",
+         "Enter kgdb mode", 0, KDB_REPEAT_NONE);
+       kdb_register_repeat("ps", kdb_ps, "[<flags>|A]",
+         "Display active task list", 0, KDB_REPEAT_NONE);
+       kdb_register_repeat("pid", kdb_pid, "<pidnum>",
+         "Switch to another task", 0, KDB_REPEAT_NONE);
+       kdb_register_repeat("reboot", kdb_reboot, "",
+         "Reboot the machine immediately", 0, KDB_REPEAT_NONE);
+#if defined(CONFIG_MODULES)
+       kdb_register_repeat("lsmod", kdb_lsmod, "",
+         "List loaded kernel modules", 0, KDB_REPEAT_NONE);
+#endif
+#if defined(CONFIG_MAGIC_SYSRQ)
+       kdb_register_repeat("sr", kdb_sr, "<key>",
+         "Magic SysRq key", 0, KDB_REPEAT_NONE);
+#endif
+#if defined(CONFIG_PRINTK)
+       kdb_register_repeat("dmesg", kdb_dmesg, "[lines]",
+         "Display syslog buffer", 0, KDB_REPEAT_NONE);
+#endif
+       kdb_register_repeat("defcmd", kdb_defcmd, "name \"usage\" \"help\"",
+         "Define a set of commands, down to endefcmd", 0, KDB_REPEAT_NONE);
+       kdb_register_repeat("kill", kdb_kill, "<-signal> <pid>",
+         "Send a signal to a process", 0, KDB_REPEAT_NONE);
+       kdb_register_repeat("summary", kdb_summary, "",
+         "Summarize the system", 4, KDB_REPEAT_NONE);
+       kdb_register_repeat("per_cpu", kdb_per_cpu, "",
+         "Display per_cpu variables", 3, KDB_REPEAT_NONE);
+       kdb_register_repeat("grephelp", kdb_grep_help, "",
+         "Display help on | grep", 0, KDB_REPEAT_NONE);
+}
+
+/* Execute any commands defined in kdb_cmds.  */
+static void __init kdb_cmd_init(void)
+{
+       int i, diag;
+       for (i = 0; kdb_cmds[i]; ++i) {
+               diag = kdb_parse(kdb_cmds[i]);
+               if (diag)
+                       kdb_printf("kdb command %s failed, kdb diag %d\n",
+                               kdb_cmds[i], diag);
+       }
+       if (defcmd_in_progress) {
+               kdb_printf("Incomplete 'defcmd' set, forcing endefcmd\n");
+               kdb_parse("endefcmd");
+       }
+}
+
+/* Intialize kdb_printf, breakpoint tables and kdb state */
+void __init kdb_init(int lvl)
+{
+       static int kdb_init_lvl = KDB_NOT_INITIALIZED;
+       int i;
+
+       if (kdb_init_lvl == KDB_INIT_FULL || lvl <= kdb_init_lvl)
+               return;
+       for (i = kdb_init_lvl; i < lvl; i++) {
+               switch (i) {
+               case KDB_NOT_INITIALIZED:
+                       kdb_inittab();          /* Initialize Command Table */
+                       kdb_initbptab();        /* Initialize Breakpoints */
+                       break;
+               case KDB_INIT_EARLY:
+                       kdb_cmd_init();         /* Build kdb_cmds tables */
+                       break;
+               }
+       }
+       kdb_init_lvl = lvl;
+}
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
new file mode 100644 (file)
index 0000000..97d3ba6
--- /dev/null
@@ -0,0 +1,300 @@
+#ifndef _KDBPRIVATE_H
+#define _KDBPRIVATE_H
+
+/*
+ * Kernel Debugger Architecture Independent Private Headers
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2009 Wind River Systems, Inc.  All Rights Reserved.
+ */
+
+#include <linux/kgdb.h>
+#include "../debug_core.h"
+
+/* Kernel Debugger Error codes.  Must not overlap with command codes. */
+#define KDB_NOTFOUND   (-1)
+#define KDB_ARGCOUNT   (-2)
+#define KDB_BADWIDTH   (-3)
+#define KDB_BADRADIX   (-4)
+#define KDB_NOTENV     (-5)
+#define KDB_NOENVVALUE (-6)
+#define KDB_NOTIMP     (-7)
+#define KDB_ENVFULL    (-8)
+#define KDB_ENVBUFFULL (-9)
+#define KDB_TOOMANYBPT (-10)
+#define KDB_TOOMANYDBREGS (-11)
+#define KDB_DUPBPT     (-12)
+#define KDB_BPTNOTFOUND        (-13)
+#define KDB_BADMODE    (-14)
+#define KDB_BADINT     (-15)
+#define KDB_INVADDRFMT  (-16)
+#define KDB_BADREG      (-17)
+#define KDB_BADCPUNUM   (-18)
+#define KDB_BADLENGTH  (-19)
+#define KDB_NOBP       (-20)
+#define KDB_BADADDR    (-21)
+
+/* Kernel Debugger Command codes.  Must not overlap with error codes. */
+#define KDB_CMD_GO     (-1001)
+#define KDB_CMD_CPU    (-1002)
+#define KDB_CMD_SS     (-1003)
+#define KDB_CMD_SSB    (-1004)
+#define KDB_CMD_KGDB (-1005)
+#define KDB_CMD_KGDB2 (-1006)
+
+/* Internal debug flags */
+#define KDB_DEBUG_FLAG_BP      0x0002  /* Breakpoint subsystem debug */
+#define KDB_DEBUG_FLAG_BB_SUMM 0x0004  /* Basic block analysis, summary only */
+#define KDB_DEBUG_FLAG_AR      0x0008  /* Activation record, generic */
+#define KDB_DEBUG_FLAG_ARA     0x0010  /* Activation record, arch specific */
+#define KDB_DEBUG_FLAG_BB      0x0020  /* All basic block analysis */
+#define KDB_DEBUG_FLAG_STATE   0x0040  /* State flags */
+#define KDB_DEBUG_FLAG_MASK    0xffff  /* All debug flags */
+#define KDB_DEBUG_FLAG_SHIFT   16      /* Shift factor for dbflags */
+
+#define KDB_DEBUG(flag)        (kdb_flags & \
+       (KDB_DEBUG_FLAG_##flag << KDB_DEBUG_FLAG_SHIFT))
+#define KDB_DEBUG_STATE(text, value) if (KDB_DEBUG(STATE)) \
+               kdb_print_state(text, value)
+
+#if BITS_PER_LONG == 32
+
+#define KDB_PLATFORM_ENV       "BYTESPERWORD=4"
+
+#define kdb_machreg_fmt                "0x%lx"
+#define kdb_machreg_fmt0       "0x%08lx"
+#define kdb_bfd_vma_fmt                "0x%lx"
+#define kdb_bfd_vma_fmt0       "0x%08lx"
+#define kdb_elfw_addr_fmt      "0x%x"
+#define kdb_elfw_addr_fmt0     "0x%08x"
+#define kdb_f_count_fmt                "%d"
+
+#elif BITS_PER_LONG == 64
+
+#define KDB_PLATFORM_ENV       "BYTESPERWORD=8"
+
+#define kdb_machreg_fmt                "0x%lx"
+#define kdb_machreg_fmt0       "0x%016lx"
+#define kdb_bfd_vma_fmt                "0x%lx"
+#define kdb_bfd_vma_fmt0       "0x%016lx"
+#define kdb_elfw_addr_fmt      "0x%x"
+#define kdb_elfw_addr_fmt0     "0x%016x"
+#define kdb_f_count_fmt                "%ld"
+
+#endif
+
+/*
+ * KDB_MAXBPT describes the total number of breakpoints
+ * supported by this architecure.
+ */
+#define KDB_MAXBPT     16
+
+/* Maximum number of arguments to a function  */
+#define KDB_MAXARGS    16
+
+typedef enum {
+       KDB_REPEAT_NONE = 0,    /* Do not repeat this command */
+       KDB_REPEAT_NO_ARGS,     /* Repeat the command without arguments */
+       KDB_REPEAT_WITH_ARGS,   /* Repeat the command including its arguments */
+} kdb_repeat_t;
+
+typedef int (*kdb_func_t)(int, const char **);
+
+/* Symbol table format returned by kallsyms. */
+typedef struct __ksymtab {
+               unsigned long value;    /* Address of symbol */
+               const char *mod_name;   /* Module containing symbol or
+                                        * "kernel" */
+               unsigned long mod_start;
+               unsigned long mod_end;
+               const char *sec_name;   /* Section containing symbol */
+               unsigned long sec_start;
+               unsigned long sec_end;
+               const char *sym_name;   /* Full symbol name, including
+                                        * any version */
+               unsigned long sym_start;
+               unsigned long sym_end;
+               } kdb_symtab_t;
+extern int kallsyms_symbol_next(char *prefix_name, int flag);
+extern int kallsyms_symbol_complete(char *prefix_name, int max_len);
+
+/* Exported Symbols for kernel loadable modules to use. */
+extern int kdb_register(char *, kdb_func_t, char *, char *, short);
+extern int kdb_register_repeat(char *, kdb_func_t, char *, char *,
+                              short, kdb_repeat_t);
+extern int kdb_unregister(char *);
+
+extern int kdb_getarea_size(void *, unsigned long, size_t);
+extern int kdb_putarea_size(unsigned long, void *, size_t);
+
+/*
+ * Like get_user and put_user, kdb_getarea and kdb_putarea take variable
+ * names, not pointers.  The underlying *_size functions take pointers.
+ */
+#define kdb_getarea(x, addr) kdb_getarea_size(&(x), addr, sizeof((x)))
+#define kdb_putarea(addr, x) kdb_putarea_size(addr, &(x), sizeof((x)))
+
+extern int kdb_getphysword(unsigned long *word,
+                       unsigned long addr, size_t size);
+extern int kdb_getword(unsigned long *, unsigned long, size_t);
+extern int kdb_putword(unsigned long, unsigned long, size_t);
+
+extern int kdbgetularg(const char *, unsigned long *);
+extern int kdb_set(int, const char **);
+extern char *kdbgetenv(const char *);
+extern int kdbgetintenv(const char *, int *);
+extern int kdbgetaddrarg(int, const char **, int*, unsigned long *,
+                        long *, char **);
+extern int kdbgetsymval(const char *, kdb_symtab_t *);
+extern int kdbnearsym(unsigned long, kdb_symtab_t *);
+extern void kdbnearsym_cleanup(void);
+extern char *kdb_strdup(const char *str, gfp_t type);
+extern void kdb_symbol_print(unsigned long, const kdb_symtab_t *, unsigned int);
+
+/* Routine for debugging the debugger state. */
+extern void kdb_print_state(const char *, int);
+
+extern int kdb_state;
+#define KDB_STATE_KDB          0x00000001      /* Cpu is inside kdb */
+#define KDB_STATE_LEAVING      0x00000002      /* Cpu is leaving kdb */
+#define KDB_STATE_CMD          0x00000004      /* Running a kdb command */
+#define KDB_STATE_KDB_CONTROL  0x00000008      /* This cpu is under
+                                                * kdb control */
+#define KDB_STATE_HOLD_CPU     0x00000010      /* Hold this cpu inside kdb */
+#define KDB_STATE_DOING_SS     0x00000020      /* Doing ss command */
+#define KDB_STATE_DOING_SSB    0x00000040      /* Doing ssb command,
+                                                * DOING_SS is also set */
+#define KDB_STATE_SSBPT                0x00000080      /* Install breakpoint
+                                                * after one ss, independent of
+                                                * DOING_SS */
+#define KDB_STATE_REENTRY      0x00000100      /* Valid re-entry into kdb */
+#define KDB_STATE_SUPPRESS     0x00000200      /* Suppress error messages */
+#define KDB_STATE_PAGER                0x00000400      /* pager is available */
+#define KDB_STATE_GO_SWITCH    0x00000800      /* go is switching
+                                                * back to initial cpu */
+#define KDB_STATE_PRINTF_LOCK  0x00001000      /* Holds kdb_printf lock */
+#define KDB_STATE_WAIT_IPI     0x00002000      /* Waiting for kdb_ipi() NMI */
+#define KDB_STATE_RECURSE      0x00004000      /* Recursive entry to kdb */
+#define KDB_STATE_IP_ADJUSTED  0x00008000      /* Restart IP has been
+                                                * adjusted */
+#define KDB_STATE_GO1          0x00010000      /* go only releases one cpu */
+#define KDB_STATE_KEYBOARD     0x00020000      /* kdb entered via
+                                                * keyboard on this cpu */
+#define KDB_STATE_KEXEC                0x00040000      /* kexec issued */
+#define KDB_STATE_DOING_KGDB   0x00080000      /* kgdb enter now issued */
+#define KDB_STATE_DOING_KGDB2  0x00100000      /* kgdb enter now issued */
+#define KDB_STATE_KGDB_TRANS   0x00200000      /* Transition to kgdb */
+#define KDB_STATE_ARCH         0xff000000      /* Reserved for arch
+                                                * specific use */
+
+#define KDB_STATE(flag) (kdb_state & KDB_STATE_##flag)
+#define KDB_STATE_SET(flag) ((void)(kdb_state |= KDB_STATE_##flag))
+#define KDB_STATE_CLEAR(flag) ((void)(kdb_state &= ~KDB_STATE_##flag))
+
+extern int kdb_nextline; /* Current number of lines displayed */
+
+typedef struct _kdb_bp {
+       unsigned long   bp_addr;        /* Address breakpoint is present at */
+       unsigned int    bp_free:1;      /* This entry is available */
+       unsigned int    bp_enabled:1;   /* Breakpoint is active in register */
+       unsigned int    bp_type:4;      /* Uses hardware register */
+       unsigned int    bp_installed:1; /* Breakpoint is installed */
+       unsigned int    bp_delay:1;     /* Do delayed bp handling */
+       unsigned int    bp_delayed:1;   /* Delayed breakpoint */
+       unsigned int    bph_length;     /* HW break length */
+} kdb_bp_t;
+
+#ifdef CONFIG_KGDB_KDB
+extern kdb_bp_t kdb_breakpoints[/* KDB_MAXBPT */];
+
+/* The KDB shell command table */
+typedef struct _kdbtab {
+       char    *cmd_name;              /* Command name */
+       kdb_func_t cmd_func;            /* Function to execute command */
+       char    *cmd_usage;             /* Usage String for this command */
+       char    *cmd_help;              /* Help message for this command */
+       short    cmd_flags;             /* Parsing flags */
+       short    cmd_minlen;            /* Minimum legal # command
+                                        * chars required */
+       kdb_repeat_t cmd_repeat;        /* Does command auto repeat on enter? */
+} kdbtab_t;
+
+extern int kdb_bt(int, const char **); /* KDB display back trace */
+
+/* KDB breakpoint management functions */
+extern void kdb_initbptab(void);
+extern void kdb_bp_install(struct pt_regs *);
+extern void kdb_bp_remove(void);
+
+typedef enum {
+       KDB_DB_BPT,     /* Breakpoint */
+       KDB_DB_SS,      /* Single-step trap */
+       KDB_DB_SSB,     /* Single step to branch */
+       KDB_DB_SSBPT,   /* Single step over breakpoint */
+       KDB_DB_NOBPT    /* Spurious breakpoint */
+} kdb_dbtrap_t;
+
+extern int kdb_main_loop(kdb_reason_t, kdb_reason_t,
+                        int, kdb_dbtrap_t, struct pt_regs *);
+
+/* Miscellaneous functions and data areas */
+extern int kdb_grepping_flag;
+extern char kdb_grep_string[];
+extern int kdb_grep_leading;
+extern int kdb_grep_trailing;
+extern char *kdb_cmds[];
+extern void kdb_syslog_data(char *syslog_data[]);
+extern unsigned long kdb_task_state_string(const char *);
+extern char kdb_task_state_char (const struct task_struct *);
+extern unsigned long kdb_task_state(const struct task_struct *p,
+                                   unsigned long mask);
+extern void kdb_ps_suppressed(void);
+extern void kdb_ps1(const struct task_struct *p);
+extern void kdb_print_nameval(const char *name, unsigned long val);
+extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info);
+extern void kdb_meminfo_proc_show(void);
+extern const char *kdb_walk_kallsyms(loff_t *pos);
+extern char *kdb_getstr(char *, size_t, char *);
+
+/* Defines for kdb_symbol_print */
+#define KDB_SP_SPACEB  0x0001          /* Space before string */
+#define KDB_SP_SPACEA  0x0002          /* Space after string */
+#define KDB_SP_PAREN   0x0004          /* Parenthesis around string */
+#define KDB_SP_VALUE   0x0008          /* Print the value of the address */
+#define KDB_SP_SYMSIZE 0x0010          /* Print the size of the symbol */
+#define KDB_SP_NEWLINE 0x0020          /* Newline after string */
+#define KDB_SP_DEFAULT (KDB_SP_VALUE|KDB_SP_PAREN)
+
+#define KDB_TSK(cpu) kgdb_info[cpu].task
+#define KDB_TSKREGS(cpu) kgdb_info[cpu].debuggerinfo
+
+extern struct task_struct *kdb_curr_task(int);
+
+#define kdb_task_has_cpu(p) (task_curr(p))
+
+/* Simplify coexistence with NPTL */
+#define        kdb_do_each_thread(g, p) do_each_thread(g, p)
+#define        kdb_while_each_thread(g, p) while_each_thread(g, p)
+
+#define GFP_KDB (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)
+
+extern void *debug_kmalloc(size_t size, gfp_t flags);
+extern void debug_kfree(void *);
+extern void debug_kusage(void);
+
+extern void kdb_set_current_task(struct task_struct *);
+extern struct task_struct *kdb_current_task;
+#ifdef CONFIG_MODULES
+extern struct list_head *kdb_modules;
+#endif /* CONFIG_MODULES */
+
+extern char kdb_prompt_str[];
+
+#define        KDB_WORD_SIZE   ((int)sizeof(unsigned long))
+
+#endif /* CONFIG_KGDB_KDB */
+#endif /* !_KDBPRIVATE_H */
diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
new file mode 100644 (file)
index 0000000..45344d5
--- /dev/null
@@ -0,0 +1,927 @@
+/*
+ * Kernel Debugger Architecture Independent Support Functions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2009 Wind River Systems, Inc.  All Rights Reserved.
+ * 03/02/13    added new 2.5 kallsyms <xavier.bru@bull.net>
+ */
+
+#include <stdarg.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/kallsyms.h>
+#include <linux/stddef.h>
+#include <linux/vmalloc.h>
+#include <linux/ptrace.h>
+#include <linux/module.h>
+#include <linux/highmem.h>
+#include <linux/hardirq.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/kdb.h>
+#include <linux/slab.h>
+#include "kdb_private.h"
+
+/*
+ * kdbgetsymval - Return the address of the given symbol.
+ *
+ * Parameters:
+ *     symname Character string containing symbol name
+ *      symtab  Structure to receive results
+ * Returns:
+ *     0       Symbol not found, symtab zero filled
+ *     1       Symbol mapped to module/symbol/section, data in symtab
+ */
+int kdbgetsymval(const char *symname, kdb_symtab_t *symtab)
+{
+       if (KDB_DEBUG(AR))
+               kdb_printf("kdbgetsymval: symname=%s, symtab=%p\n", symname,
+                          symtab);
+       memset(symtab, 0, sizeof(*symtab));
+       symtab->sym_start = kallsyms_lookup_name(symname);
+       if (symtab->sym_start) {
+               if (KDB_DEBUG(AR))
+                       kdb_printf("kdbgetsymval: returns 1, "
+                                  "symtab->sym_start=0x%lx\n",
+                                  symtab->sym_start);
+               return 1;
+       }
+       if (KDB_DEBUG(AR))
+               kdb_printf("kdbgetsymval: returns 0\n");
+       return 0;
+}
+EXPORT_SYMBOL(kdbgetsymval);
+
+static char *kdb_name_table[100];      /* arbitrary size */
+
+/*
+ * kdbnearsym -        Return the name of the symbol with the nearest address
+ *     less than 'addr'.
+ *
+ * Parameters:
+ *     addr    Address to check for symbol near
+ *     symtab  Structure to receive results
+ * Returns:
+ *     0       No sections contain this address, symtab zero filled
+ *     1       Address mapped to module/symbol/section, data in symtab
+ * Remarks:
+ *     2.6 kallsyms has a "feature" where it unpacks the name into a
+ *     string.  If that string is reused before the caller expects it
+ *     then the caller sees its string change without warning.  To
+ *     avoid cluttering up the main kdb code with lots of kdb_strdup,
+ *     tests and kfree calls, kdbnearsym maintains an LRU list of the
+ *     last few unique strings.  The list is sized large enough to
+ *     hold active strings, no kdb caller of kdbnearsym makes more
+ *     than ~20 later calls before using a saved value.
+ */
+int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
+{
+       int ret = 0;
+       unsigned long symbolsize;
+       unsigned long offset;
+#define knt1_size 128          /* must be >= kallsyms table size */
+       char *knt1 = NULL;
+
+       if (KDB_DEBUG(AR))
+               kdb_printf("kdbnearsym: addr=0x%lx, symtab=%p\n", addr, symtab);
+       memset(symtab, 0, sizeof(*symtab));
+
+       if (addr < 4096)
+               goto out;
+       knt1 = debug_kmalloc(knt1_size, GFP_ATOMIC);
+       if (!knt1) {
+               kdb_printf("kdbnearsym: addr=0x%lx cannot kmalloc knt1\n",
+                          addr);
+               goto out;
+       }
+       symtab->sym_name = kallsyms_lookup(addr, &symbolsize , &offset,
+                               (char **)(&symtab->mod_name), knt1);
+       if (offset > 8*1024*1024) {
+               symtab->sym_name = NULL;
+               addr = offset = symbolsize = 0;
+       }
+       symtab->sym_start = addr - offset;
+       symtab->sym_end = symtab->sym_start + symbolsize;
+       ret = symtab->sym_name != NULL && *(symtab->sym_name) != '\0';
+
+       if (ret) {
+               int i;
+               /* Another 2.6 kallsyms "feature".  Sometimes the sym_name is
+                * set but the buffer passed into kallsyms_lookup is not used,
+                * so it contains garbage.  The caller has to work out which
+                * buffer needs to be saved.
+                *
+                * What was Rusty smoking when he wrote that code?
+                */
+               if (symtab->sym_name != knt1) {
+                       strncpy(knt1, symtab->sym_name, knt1_size);
+                       knt1[knt1_size-1] = '\0';
+               }
+               for (i = 0; i < ARRAY_SIZE(kdb_name_table); ++i) {
+                       if (kdb_name_table[i] &&
+                           strcmp(kdb_name_table[i], knt1) == 0)
+                               break;
+               }
+               if (i >= ARRAY_SIZE(kdb_name_table)) {
+                       debug_kfree(kdb_name_table[0]);
+                       memcpy(kdb_name_table, kdb_name_table+1,
+                              sizeof(kdb_name_table[0]) *
+                              (ARRAY_SIZE(kdb_name_table)-1));
+               } else {
+                       debug_kfree(knt1);
+                       knt1 = kdb_name_table[i];
+                       memcpy(kdb_name_table+i, kdb_name_table+i+1,
+                              sizeof(kdb_name_table[0]) *
+                              (ARRAY_SIZE(kdb_name_table)-i-1));
+               }
+               i = ARRAY_SIZE(kdb_name_table) - 1;
+               kdb_name_table[i] = knt1;
+               symtab->sym_name = kdb_name_table[i];
+               knt1 = NULL;
+       }
+
+       if (symtab->mod_name == NULL)
+               symtab->mod_name = "kernel";
+       if (KDB_DEBUG(AR))
+               kdb_printf("kdbnearsym: returns %d symtab->sym_start=0x%lx, "
+                  "symtab->mod_name=%p, symtab->sym_name=%p (%s)\n", ret,
+                  symtab->sym_start, symtab->mod_name, symtab->sym_name,
+                  symtab->sym_name);
+
+out:
+       debug_kfree(knt1);
+       return ret;
+}
+
+void kdbnearsym_cleanup(void)
+{
+       int i;
+       for (i = 0; i < ARRAY_SIZE(kdb_name_table); ++i) {
+               if (kdb_name_table[i]) {
+                       debug_kfree(kdb_name_table[i]);
+                       kdb_name_table[i] = NULL;
+               }
+       }
+}
+
+static char ks_namebuf[KSYM_NAME_LEN+1], ks_namebuf_prev[KSYM_NAME_LEN+1];
+
+/*
+ * kallsyms_symbol_complete
+ *
+ * Parameters:
+ *     prefix_name     prefix of a symbol name to lookup
+ *     max_len         maximum length that can be returned
+ * Returns:
+ *     Number of symbols which match the given prefix.
+ * Notes:
+ *     prefix_name is changed to contain the longest unique prefix that
+ *     starts with this prefix (tab completion).
+ */
+int kallsyms_symbol_complete(char *prefix_name, int max_len)
+{
+       loff_t pos = 0;
+       int prefix_len = strlen(prefix_name), prev_len = 0;
+       int i, number = 0;
+       const char *name;
+
+       while ((name = kdb_walk_kallsyms(&pos))) {
+               if (strncmp(name, prefix_name, prefix_len) == 0) {
+                       strcpy(ks_namebuf, name);
+                       /* Work out the longest name that matches the prefix */
+                       if (++number == 1) {
+                               prev_len = min_t(int, max_len-1,
+                                                strlen(ks_namebuf));
+                               memcpy(ks_namebuf_prev, ks_namebuf, prev_len);
+                               ks_namebuf_prev[prev_len] = '\0';
+                               continue;
+                       }
+                       for (i = 0; i < prev_len; i++) {
+                               if (ks_namebuf[i] != ks_namebuf_prev[i]) {
+                                       prev_len = i;
+                                       ks_namebuf_prev[i] = '\0';
+                                       break;
+                               }
+                       }
+               }
+       }
+       if (prev_len > prefix_len)
+               memcpy(prefix_name, ks_namebuf_prev, prev_len+1);
+       return number;
+}
+
+/*
+ * kallsyms_symbol_next
+ *
+ * Parameters:
+ *     prefix_name     prefix of a symbol name to lookup
+ *     flag    0 means search from the head, 1 means continue search.
+ * Returns:
+ *     1 if a symbol matches the given prefix.
+ *     0 if no string found
+ */
+int kallsyms_symbol_next(char *prefix_name, int flag)
+{
+       int prefix_len = strlen(prefix_name);
+       static loff_t pos;
+       const char *name;
+
+       if (!flag)
+               pos = 0;
+
+       while ((name = kdb_walk_kallsyms(&pos))) {
+               if (strncmp(name, prefix_name, prefix_len) == 0) {
+                       strncpy(prefix_name, name, strlen(name)+1);
+                       return 1;
+               }
+       }
+       return 0;
+}
+
+/*
+ * kdb_symbol_print - Standard method for printing a symbol name and offset.
+ * Inputs:
+ *     addr    Address to be printed.
+ *     symtab  Address of symbol data, if NULL this routine does its
+ *             own lookup.
+ *     punc    Punctuation for string, bit field.
+ * Remarks:
+ *     The string and its punctuation is only printed if the address
+ *     is inside the kernel, except that the value is always printed
+ *     when requested.
+ */
+void kdb_symbol_print(unsigned long addr, const kdb_symtab_t *symtab_p,
+                     unsigned int punc)
+{
+       kdb_symtab_t symtab, *symtab_p2;
+       if (symtab_p) {
+               symtab_p2 = (kdb_symtab_t *)symtab_p;
+       } else {
+               symtab_p2 = &symtab;
+               kdbnearsym(addr, symtab_p2);
+       }
+       if (!(symtab_p2->sym_name || (punc & KDB_SP_VALUE)))
+               return;
+       if (punc & KDB_SP_SPACEB)
+               kdb_printf(" ");
+       if (punc & KDB_SP_VALUE)
+               kdb_printf(kdb_machreg_fmt0, addr);
+       if (symtab_p2->sym_name) {
+               if (punc & KDB_SP_VALUE)
+                       kdb_printf(" ");
+               if (punc & KDB_SP_PAREN)
+                       kdb_printf("(");
+               if (strcmp(symtab_p2->mod_name, "kernel"))
+                       kdb_printf("[%s]", symtab_p2->mod_name);
+               kdb_printf("%s", symtab_p2->sym_name);
+               if (addr != symtab_p2->sym_start)
+                       kdb_printf("+0x%lx", addr - symtab_p2->sym_start);
+               if (punc & KDB_SP_SYMSIZE)
+                       kdb_printf("/0x%lx",
+                                  symtab_p2->sym_end - symtab_p2->sym_start);
+               if (punc & KDB_SP_PAREN)
+                       kdb_printf(")");
+       }
+       if (punc & KDB_SP_SPACEA)
+               kdb_printf(" ");
+       if (punc & KDB_SP_NEWLINE)
+               kdb_printf("\n");
+}
+
+/*
+ * kdb_strdup - kdb equivalent of strdup, for disasm code.
+ * Inputs:
+ *     str     The string to duplicate.
+ *     type    Flags to kmalloc for the new string.
+ * Returns:
+ *     Address of the new string, NULL if storage could not be allocated.
+ * Remarks:
+ *     This is not in lib/string.c because it uses kmalloc which is not
+ *     available when string.o is used in boot loaders.
+ */
+char *kdb_strdup(const char *str, gfp_t type)
+{
+       int n = strlen(str)+1;
+       char *s = kmalloc(n, type);
+       if (!s)
+               return NULL;
+       return strcpy(s, str);
+}
+
+/*
+ * kdb_getarea_size - Read an area of data.  The kdb equivalent of
+ *     copy_from_user, with kdb messages for invalid addresses.
+ * Inputs:
+ *     res     Pointer to the area to receive the result.
+ *     addr    Address of the area to copy.
+ *     size    Size of the area.
+ * Returns:
+ *     0 for success, < 0 for error.
+ */
+int kdb_getarea_size(void *res, unsigned long addr, size_t size)
+{
+       int ret = probe_kernel_read((char *)res, (char *)addr, size);
+       if (ret) {
+               if (!KDB_STATE(SUPPRESS)) {
+                       kdb_printf("kdb_getarea: Bad address 0x%lx\n", addr);
+                       KDB_STATE_SET(SUPPRESS);
+               }
+               ret = KDB_BADADDR;
+       } else {
+               KDB_STATE_CLEAR(SUPPRESS);
+       }
+       return ret;
+}
+
+/*
+ * kdb_putarea_size - Write an area of data.  The kdb equivalent of
+ *     copy_to_user, with kdb messages for invalid addresses.
+ * Inputs:
+ *     addr    Address of the area to write to.
+ *     res     Pointer to the area holding the data.
+ *     size    Size of the area.
+ * Returns:
+ *     0 for success, < 0 for error.
+ */
+int kdb_putarea_size(unsigned long addr, void *res, size_t size)
+{
+       int ret = probe_kernel_read((char *)addr, (char *)res, size);
+       if (ret) {
+               if (!KDB_STATE(SUPPRESS)) {
+                       kdb_printf("kdb_putarea: Bad address 0x%lx\n", addr);
+                       KDB_STATE_SET(SUPPRESS);
+               }
+               ret = KDB_BADADDR;
+       } else {
+               KDB_STATE_CLEAR(SUPPRESS);
+       }
+       return ret;
+}
+
+/*
+ * kdb_getphys - Read data from a physical address. Validate the
+ *     address is in range, use kmap_atomic() to get data
+ *     similar to kdb_getarea() - but for phys addresses
+ * Inputs:
+ *     res     Pointer to the word to receive the result
+ *     addr    Physical address of the area to copy
+ *     size    Size of the area
+ * Returns:
+ *     0 for success, < 0 for error.
+ */
+static int kdb_getphys(void *res, unsigned long addr, size_t size)
+{
+       unsigned long pfn;
+       void *vaddr;
+       struct page *page;
+
+       pfn = (addr >> PAGE_SHIFT);
+       if (!pfn_valid(pfn))
+               return 1;
+       page = pfn_to_page(pfn);
+       vaddr = kmap_atomic(page, KM_KDB);
+       memcpy(res, vaddr + (addr & (PAGE_SIZE - 1)), size);
+       kunmap_atomic(vaddr, KM_KDB);
+
+       return 0;
+}
+
+/*
+ * kdb_getphysword
+ * Inputs:
+ *     word    Pointer to the word to receive the result.
+ *     addr    Address of the area to copy.
+ *     size    Size of the area.
+ * Returns:
+ *     0 for success, < 0 for error.
+ */
+int kdb_getphysword(unsigned long *word, unsigned long addr, size_t size)
+{
+       int diag;
+       __u8  w1;
+       __u16 w2;
+       __u32 w4;
+       __u64 w8;
+       *word = 0;      /* Default value if addr or size is invalid */
+
+       switch (size) {
+       case 1:
+               diag = kdb_getphys(&w1, addr, sizeof(w1));
+               if (!diag)
+                       *word = w1;
+               break;
+       case 2:
+               diag = kdb_getphys(&w2, addr, sizeof(w2));
+               if (!diag)
+                       *word = w2;
+               break;
+       case 4:
+               diag = kdb_getphys(&w4, addr, sizeof(w4));
+               if (!diag)
+                       *word = w4;
+               break;
+       case 8:
+               if (size <= sizeof(*word)) {
+                       diag = kdb_getphys(&w8, addr, sizeof(w8));
+                       if (!diag)
+                               *word = w8;
+                       break;
+               }
+               /* drop through */
+       default:
+               diag = KDB_BADWIDTH;
+               kdb_printf("kdb_getphysword: bad width %ld\n", (long) size);
+       }
+       return diag;
+}
+
+/*
+ * kdb_getword - Read a binary value.  Unlike kdb_getarea, this treats
+ *     data as numbers.
+ * Inputs:
+ *     word    Pointer to the word to receive the result.
+ *     addr    Address of the area to copy.
+ *     size    Size of the area.
+ * Returns:
+ *     0 for success, < 0 for error.
+ */
+int kdb_getword(unsigned long *word, unsigned long addr, size_t size)
+{
+       int diag;
+       __u8  w1;
+       __u16 w2;
+       __u32 w4;
+       __u64 w8;
+       *word = 0;      /* Default value if addr or size is invalid */
+       switch (size) {
+       case 1:
+               diag = kdb_getarea(w1, addr);
+               if (!diag)
+                       *word = w1;
+               break;
+       case 2:
+               diag = kdb_getarea(w2, addr);
+               if (!diag)
+                       *word = w2;
+               break;
+       case 4:
+               diag = kdb_getarea(w4, addr);
+               if (!diag)
+                       *word = w4;
+               break;
+       case 8:
+               if (size <= sizeof(*word)) {
+                       diag = kdb_getarea(w8, addr);
+                       if (!diag)
+                               *word = w8;
+                       break;
+               }
+               /* drop through */
+       default:
+               diag = KDB_BADWIDTH;
+               kdb_printf("kdb_getword: bad width %ld\n", (long) size);
+       }
+       return diag;
+}
+
+/*
+ * kdb_putword - Write a binary value.  Unlike kdb_putarea, this
+ *     treats data as numbers.
+ * Inputs:
+ *     addr    Address of the area to write to..
+ *     word    The value to set.
+ *     size    Size of the area.
+ * Returns:
+ *     0 for success, < 0 for error.
+ */
+int kdb_putword(unsigned long addr, unsigned long word, size_t size)
+{
+       int diag;
+       __u8  w1;
+       __u16 w2;
+       __u32 w4;
+       __u64 w8;
+       switch (size) {
+       case 1:
+               w1 = word;
+               diag = kdb_putarea(addr, w1);
+               break;
+       case 2:
+               w2 = word;
+               diag = kdb_putarea(addr, w2);
+               break;
+       case 4:
+               w4 = word;
+               diag = kdb_putarea(addr, w4);
+               break;
+       case 8:
+               if (size <= sizeof(word)) {
+                       w8 = word;
+                       diag = kdb_putarea(addr, w8);
+                       break;
+               }
+               /* drop through */
+       default:
+               diag = KDB_BADWIDTH;
+               kdb_printf("kdb_putword: bad width %ld\n", (long) size);
+       }
+       return diag;
+}
+
+/*
+ * kdb_task_state_string - Convert a string containing any of the
+ *     letters DRSTCZEUIMA to a mask for the process state field and
+ *     return the value.  If no argument is supplied, return the mask
+ *     that corresponds to environment variable PS, DRSTCZEU by
+ *     default.
+ * Inputs:
+ *     s       String to convert
+ * Returns:
+ *     Mask for process state.
+ * Notes:
+ *     The mask folds data from several sources into a single long value, so
+ *     be carefull not to overlap the bits.  TASK_* bits are in the LSB,
+ *     special cases like UNRUNNABLE are in the MSB.  As of 2.6.10-rc1 there
+ *     is no overlap between TASK_* and EXIT_* but that may not always be
+ *     true, so EXIT_* bits are shifted left 16 bits before being stored in
+ *     the mask.
+ */
+
+/* unrunnable is < 0 */
+#define UNRUNNABLE     (1UL << (8*sizeof(unsigned long) - 1))
+#define RUNNING                (1UL << (8*sizeof(unsigned long) - 2))
+#define IDLE           (1UL << (8*sizeof(unsigned long) - 3))
+#define DAEMON         (1UL << (8*sizeof(unsigned long) - 4))
+
+unsigned long kdb_task_state_string(const char *s)
+{
+       long res = 0;
+       if (!s) {
+               s = kdbgetenv("PS");
+               if (!s)
+                       s = "DRSTCZEU"; /* default value for ps */
+       }
+       while (*s) {
+               switch (*s) {
+               case 'D':
+                       res |= TASK_UNINTERRUPTIBLE;
+                       break;
+               case 'R':
+                       res |= RUNNING;
+                       break;
+               case 'S':
+                       res |= TASK_INTERRUPTIBLE;
+                       break;
+               case 'T':
+                       res |= TASK_STOPPED;
+                       break;
+               case 'C':
+                       res |= TASK_TRACED;
+                       break;
+               case 'Z':
+                       res |= EXIT_ZOMBIE << 16;
+                       break;
+               case 'E':
+                       res |= EXIT_DEAD << 16;
+                       break;
+               case 'U':
+                       res |= UNRUNNABLE;
+                       break;
+               case 'I':
+                       res |= IDLE;
+                       break;
+               case 'M':
+                       res |= DAEMON;
+                       break;
+               case 'A':
+                       res = ~0UL;
+                       break;
+               default:
+                         kdb_printf("%s: unknown flag '%c' ignored\n",
+                                    __func__, *s);
+                         break;
+               }
+               ++s;
+       }
+       return res;
+}
+
+/*
+ * kdb_task_state_char - Return the character that represents the task state.
+ * Inputs:
+ *     p       struct task for the process
+ * Returns:
+ *     One character to represent the task state.
+ */
+char kdb_task_state_char (const struct task_struct *p)
+{
+       int cpu;
+       char state;
+       unsigned long tmp;
+
+       if (!p || probe_kernel_read(&tmp, (char *)p, sizeof(unsigned long)))
+               return 'E';
+
+       cpu = kdb_process_cpu(p);
+       state = (p->state == 0) ? 'R' :
+               (p->state < 0) ? 'U' :
+               (p->state & TASK_UNINTERRUPTIBLE) ? 'D' :
+               (p->state & TASK_STOPPED) ? 'T' :
+               (p->state & TASK_TRACED) ? 'C' :
+               (p->exit_state & EXIT_ZOMBIE) ? 'Z' :
+               (p->exit_state & EXIT_DEAD) ? 'E' :
+               (p->state & TASK_INTERRUPTIBLE) ? 'S' : '?';
+       if (p->pid == 0) {
+               /* Idle task.  Is it really idle, apart from the kdb
+                * interrupt? */
+               if (!kdb_task_has_cpu(p) || kgdb_info[cpu].irq_depth == 1) {
+                       if (cpu != kdb_initial_cpu)
+                               state = 'I';    /* idle task */
+               }
+       } else if (!p->mm && state == 'S') {
+               state = 'M';    /* sleeping system daemon */
+       }
+       return state;
+}
+
+/*
+ * kdb_task_state - Return true if a process has the desired state
+ *     given by the mask.
+ * Inputs:
+ *     p       struct task for the process
+ *     mask    mask from kdb_task_state_string to select processes
+ * Returns:
+ *     True if the process matches at least one criteria defined by the mask.
+ */
+unsigned long kdb_task_state(const struct task_struct *p, unsigned long mask)
+{
+       char state[] = { kdb_task_state_char(p), '\0' };
+       return (mask & kdb_task_state_string(state)) != 0;
+}
+
+/*
+ * kdb_print_nameval - Print a name and its value, converting the
+ *     value to a symbol lookup if possible.
+ * Inputs:
+ *     name    field name to print
+ *     val     value of field
+ */
+void kdb_print_nameval(const char *name, unsigned long val)
+{
+       kdb_symtab_t symtab;
+       kdb_printf("  %-11.11s ", name);
+       if (kdbnearsym(val, &symtab))
+               kdb_symbol_print(val, &symtab,
+                                KDB_SP_VALUE|KDB_SP_SYMSIZE|KDB_SP_NEWLINE);
+       else
+               kdb_printf("0x%lx\n", val);
+}
+
+/* Last ditch allocator for debugging, so we can still debug even when
+ * the GFP_ATOMIC pool has been exhausted.  The algorithms are tuned
+ * for space usage, not for speed.  One smallish memory pool, the free
+ * chain is always in ascending address order to allow coalescing,
+ * allocations are done in brute force best fit.
+ */
+
+struct debug_alloc_header {
+       u32 next;       /* offset of next header from start of pool */
+       u32 size;
+       void *caller;
+};
+
+/* The memory returned by this allocator must be aligned, which means
+ * so must the header size.  Do not assume that sizeof(struct
+ * debug_alloc_header) is a multiple of the alignment, explicitly
+ * calculate the overhead of this header, including the alignment.
+ * The rest of this code must not use sizeof() on any header or
+ * pointer to a header.
+ */
+#define dah_align 8
+#define dah_overhead ALIGN(sizeof(struct debug_alloc_header), dah_align)
+
+static u64 debug_alloc_pool_aligned[256*1024/dah_align];       /* 256K pool */
+static char *debug_alloc_pool = (char *)debug_alloc_pool_aligned;
+static u32 dah_first, dah_first_call = 1, dah_used, dah_used_max;
+
+/* Locking is awkward.  The debug code is called from all contexts,
+ * including non maskable interrupts.  A normal spinlock is not safe
+ * in NMI context.  Try to get the debug allocator lock, if it cannot
+ * be obtained after a second then give up.  If the lock could not be
+ * previously obtained on this cpu then only try once.
+ *
+ * sparse has no annotation for "this function _sometimes_ acquires a
+ * lock", so fudge the acquire/release notation.
+ */
+static DEFINE_SPINLOCK(dap_lock);
+static int get_dap_lock(void)
+       __acquires(dap_lock)
+{
+       static int dap_locked = -1;
+       int count;
+       if (dap_locked == smp_processor_id())
+               count = 1;
+       else
+               count = 1000;
+       while (1) {
+               if (spin_trylock(&dap_lock)) {
+                       dap_locked = -1;
+                       return 1;
+               }
+               if (!count--)
+                       break;
+               udelay(1000);
+       }
+       dap_locked = smp_processor_id();
+       __acquire(dap_lock);
+       return 0;
+}
+
+void *debug_kmalloc(size_t size, gfp_t flags)
+{
+       unsigned int rem, h_offset;
+       struct debug_alloc_header *best, *bestprev, *prev, *h;
+       void *p = NULL;
+       if (!get_dap_lock()) {
+               __release(dap_lock);    /* we never actually got it */
+               return NULL;
+       }
+       h = (struct debug_alloc_header *)(debug_alloc_pool + dah_first);
+       if (dah_first_call) {
+               h->size = sizeof(debug_alloc_pool_aligned) - dah_overhead;
+               dah_first_call = 0;
+       }
+       size = ALIGN(size, dah_align);
+       prev = best = bestprev = NULL;
+       while (1) {
+               if (h->size >= size && (!best || h->size < best->size)) {
+                       best = h;
+                       bestprev = prev;
+                       if (h->size == size)
+                               break;
+               }
+               if (!h->next)
+                       break;
+               prev = h;
+               h = (struct debug_alloc_header *)(debug_alloc_pool + h->next);
+       }
+       if (!best)
+               goto out;
+       rem = best->size - size;
+       /* The pool must always contain at least one header */
+       if (best->next == 0 && bestprev == NULL && rem < dah_overhead)
+               goto out;
+       if (rem >= dah_overhead) {
+               best->size = size;
+               h_offset = ((char *)best - debug_alloc_pool) +
+                          dah_overhead + best->size;
+               h = (struct debug_alloc_header *)(debug_alloc_pool + h_offset);
+               h->size = rem - dah_overhead;
+               h->next = best->next;
+       } else
+               h_offset = best->next;
+       best->caller = __builtin_return_address(0);
+       dah_used += best->size;
+       dah_used_max = max(dah_used, dah_used_max);
+       if (bestprev)
+               bestprev->next = h_offset;
+       else
+               dah_first = h_offset;
+       p = (char *)best + dah_overhead;
+       memset(p, POISON_INUSE, best->size - 1);
+       *((char *)p + best->size - 1) = POISON_END;
+out:
+       spin_unlock(&dap_lock);
+       return p;
+}
+
+void debug_kfree(void *p)
+{
+       struct debug_alloc_header *h;
+       unsigned int h_offset;
+       if (!p)
+               return;
+       if ((char *)p < debug_alloc_pool ||
+           (char *)p >= debug_alloc_pool + sizeof(debug_alloc_pool_aligned)) {
+               kfree(p);
+               return;
+       }
+       if (!get_dap_lock()) {
+               __release(dap_lock);    /* we never actually got it */
+               return;         /* memory leak, cannot be helped */
+       }
+       h = (struct debug_alloc_header *)((char *)p - dah_overhead);
+       memset(p, POISON_FREE, h->size - 1);
+       *((char *)p + h->size - 1) = POISON_END;
+       h->caller = NULL;
+       dah_used -= h->size;
+       h_offset = (char *)h - debug_alloc_pool;
+       if (h_offset < dah_first) {
+               h->next = dah_first;
+               dah_first = h_offset;
+       } else {
+               struct debug_alloc_header *prev;
+               unsigned int prev_offset;
+               prev = (struct debug_alloc_header *)(debug_alloc_pool +
+                                                    dah_first);
+               while (1) {
+                       if (!prev->next || prev->next > h_offset)
+                               break;
+                       prev = (struct debug_alloc_header *)
+                               (debug_alloc_pool + prev->next);
+               }
+               prev_offset = (char *)prev - debug_alloc_pool;
+               if (prev_offset + dah_overhead + prev->size == h_offset) {
+                       prev->size += dah_overhead + h->size;
+                       memset(h, POISON_FREE, dah_overhead - 1);
+                       *((char *)h + dah_overhead - 1) = POISON_END;
+                       h = prev;
+                       h_offset = prev_offset;
+               } else {
+                       h->next = prev->next;
+                       prev->next = h_offset;
+               }
+       }
+       if (h_offset + dah_overhead + h->size == h->next) {
+               struct debug_alloc_header *next;
+               next = (struct debug_alloc_header *)
+                       (debug_alloc_pool + h->next);
+               h->size += dah_overhead + next->size;
+               h->next = next->next;
+               memset(next, POISON_FREE, dah_overhead - 1);
+               *((char *)next + dah_overhead - 1) = POISON_END;
+       }
+       spin_unlock(&dap_lock);
+}
+
+void debug_kusage(void)
+{
+       struct debug_alloc_header *h_free, *h_used;
+#ifdef CONFIG_IA64
+       /* FIXME: using dah for ia64 unwind always results in a memory leak.
+        * Fix that memory leak first, then set debug_kusage_one_time = 1 for
+        * all architectures.
+        */
+       static int debug_kusage_one_time;
+#else
+       static int debug_kusage_one_time = 1;
+#endif
+       if (!get_dap_lock()) {
+               __release(dap_lock);    /* we never actually got it */
+               return;
+       }
+       h_free = (struct debug_alloc_header *)(debug_alloc_pool + dah_first);
+       if (dah_first == 0 &&
+           (h_free->size == sizeof(debug_alloc_pool_aligned) - dah_overhead ||
+            dah_first_call))
+               goto out;
+       if (!debug_kusage_one_time)
+               goto out;
+       debug_kusage_one_time = 0;
+       kdb_printf("%s: debug_kmalloc memory leak dah_first %d\n",
+                  __func__, dah_first);
+       if (dah_first) {
+               h_used = (struct debug_alloc_header *)debug_alloc_pool;
+               kdb_printf("%s: h_used %p size %d\n", __func__, h_used,
+                          h_used->size);
+       }
+       do {
+               h_used = (struct debug_alloc_header *)
+                         ((char *)h_free + dah_overhead + h_free->size);
+               kdb_printf("%s: h_used %p size %d caller %p\n",
+                          __func__, h_used, h_used->size, h_used->caller);
+               h_free = (struct debug_alloc_header *)
+                         (debug_alloc_pool + h_free->next);
+       } while (h_free->next);
+       h_used = (struct debug_alloc_header *)
+                 ((char *)h_free + dah_overhead + h_free->size);
+       if ((char *)h_used - debug_alloc_pool !=
+           sizeof(debug_alloc_pool_aligned))
+               kdb_printf("%s: h_used %p size %d caller %p\n",
+                          __func__, h_used, h_used->size, h_used->caller);
+out:
+       spin_unlock(&dap_lock);
+}
+
+/* Maintain a small stack of kdb_flags to allow recursion without disturbing
+ * the global kdb state.
+ */
+
+static int kdb_flags_stack[4], kdb_flags_index;
+
+void kdb_save_flags(void)
+{
+       BUG_ON(kdb_flags_index >= ARRAY_SIZE(kdb_flags_stack));
+       kdb_flags_stack[kdb_flags_index++] = kdb_flags;
+}
+
+void kdb_restore_flags(void)
+{
+       BUG_ON(kdb_flags_index <= 0);
+       kdb_flags = kdb_flags_stack[--kdb_flags_index];
+}
index 13aff29..6f6d091 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/init.h>
 #include <linux/seq_file.h>
 #include <linux/fs.h>
+#include <linux/kdb.h>
 #include <linux/err.h>
 #include <linux/proc_fs.h>
 #include <linux/sched.h>       /* for cond_resched */
@@ -516,6 +517,26 @@ static int kallsyms_open(struct inode *inode, struct file *file)
        return ret;
 }
 
+#ifdef CONFIG_KGDB_KDB
+const char *kdb_walk_kallsyms(loff_t *pos)
+{
+       static struct kallsym_iter kdb_walk_kallsyms_iter;
+       if (*pos == 0) {
+               memset(&kdb_walk_kallsyms_iter, 0,
+                      sizeof(kdb_walk_kallsyms_iter));
+               reset_iter(&kdb_walk_kallsyms_iter, 0);
+       }
+       while (1) {
+               if (!update_iter(&kdb_walk_kallsyms_iter, *pos))
+                       return NULL;
+               ++*pos;
+               /* Some debugging symbols have no name.  Ignore them. */
+               if (kdb_walk_kallsyms_iter.name[0])
+                       return kdb_walk_kallsyms_iter.name;
+       }
+}
+#endif /* CONFIG_KGDB_KDB */
+
 static const struct file_operations kallsyms_operations = {
        .open = kallsyms_open,
        .read = seq_read,
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
deleted file mode 100644 (file)
index 11f3515..0000000
+++ /dev/null
@@ -1,1764 +0,0 @@
-/*
- * KGDB stub.
- *
- * Maintainer: Jason Wessel <jason.wessel@windriver.com>
- *
- * Copyright (C) 2000-2001 VERITAS Software Corporation.
- * Copyright (C) 2002-2004 Timesys Corporation
- * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
- * Copyright (C) 2004 Pavel Machek <pavel@suse.cz>
- * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
- * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
- * Copyright (C) 2005-2008 Wind River Systems, Inc.
- * Copyright (C) 2007 MontaVista Software, Inc.
- * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- *
- * Contributors at various stages not listed above:
- *  Jason Wessel ( jason.wessel@windriver.com )
- *  George Anzinger <george@mvista.com>
- *  Anurekh Saxena (anurekh.saxena@timesys.com)
- *  Lake Stevens Instrument Division (Glenn Engel)
- *  Jim Kingdon, Cygnus Support.
- *
- * Original KGDB stub: David Grothe <dave@gcom.com>,
- * Tigran Aivazian <tigran@sco.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-#include <linux/pid_namespace.h>
-#include <linux/clocksource.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/console.h>
-#include <linux/threads.h>
-#include <linux/uaccess.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/ptrace.h>
-#include <linux/reboot.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/sysrq.h>
-#include <linux/init.h>
-#include <linux/kgdb.h>
-#include <linux/pid.h>
-#include <linux/smp.h>
-#include <linux/mm.h>
-
-#include <asm/cacheflush.h>
-#include <asm/byteorder.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
-#include <asm/unaligned.h>
-
-static int kgdb_break_asap;
-
-#define KGDB_MAX_THREAD_QUERY 17
-struct kgdb_state {
-       int                     ex_vector;
-       int                     signo;
-       int                     err_code;
-       int                     cpu;
-       int                     pass_exception;
-       unsigned long           thr_query;
-       unsigned long           threadid;
-       long                    kgdb_usethreadid;
-       struct pt_regs          *linux_regs;
-};
-
-/* Exception state values */
-#define DCPU_WANT_MASTER 0x1 /* Waiting to become a master kgdb cpu */
-#define DCPU_NEXT_MASTER 0x2 /* Transition from one master cpu to another */
-#define DCPU_IS_SLAVE    0x4 /* Slave cpu enter exception */
-#define DCPU_SSTEP       0x8 /* CPU is single stepping */
-
-static struct debuggerinfo_struct {
-       void                    *debuggerinfo;
-       struct task_struct      *task;
-       int                     exception_state;
-} kgdb_info[NR_CPUS];
-
-/**
- * kgdb_connected - Is a host GDB connected to us?
- */
-int                            kgdb_connected;
-EXPORT_SYMBOL_GPL(kgdb_connected);
-
-/* All the KGDB handlers are installed */
-static int                     kgdb_io_module_registered;
-
-/* Guard for recursive entry */
-static int                     exception_level;
-
-static struct kgdb_io          *kgdb_io_ops;
-static DEFINE_SPINLOCK(kgdb_registration_lock);
-
-/* kgdb console driver is loaded */
-static int kgdb_con_registered;
-/* determine if kgdb console output should be used */
-static int kgdb_use_con;
-
-static int __init opt_kgdb_con(char *str)
-{
-       kgdb_use_con = 1;
-       return 0;
-}
-
-early_param("kgdbcon", opt_kgdb_con);
-
-module_param(kgdb_use_con, int, 0644);
-
-/*
- * Holds information about breakpoints in a kernel. These breakpoints are
- * added and removed by gdb.
- */
-static struct kgdb_bkpt                kgdb_break[KGDB_MAX_BREAKPOINTS] = {
-       [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
-};
-
-/*
- * The CPU# of the active CPU, or -1 if none:
- */
-atomic_t                       kgdb_active = ATOMIC_INIT(-1);
-
-/*
- * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
- * bootup code (which might not have percpu set up yet):
- */
-static atomic_t                        passive_cpu_wait[NR_CPUS];
-static atomic_t                        cpu_in_kgdb[NR_CPUS];
-atomic_t                       kgdb_setting_breakpoint;
-
-struct task_struct             *kgdb_usethread;
-struct task_struct             *kgdb_contthread;
-
-int                            kgdb_single_step;
-pid_t                          kgdb_sstep_pid;
-
-/* Our I/O buffers. */
-static char                    remcom_in_buffer[BUFMAX];
-static char                    remcom_out_buffer[BUFMAX];
-
-/* Storage for the registers, in GDB format. */
-static unsigned long           gdb_regs[(NUMREGBYTES +
-                                       sizeof(unsigned long) - 1) /
-                                       sizeof(unsigned long)];
-
-/* to keep track of the CPU which is doing the single stepping*/
-atomic_t                       kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
-
-/*
- * If you are debugging a problem where roundup (the collection of
- * all other CPUs) is a problem [this should be extremely rare],
- * then use the nokgdbroundup option to avoid roundup. In that case
- * the other CPUs might interfere with your debugging context, so
- * use this with care:
- */
-static int kgdb_do_roundup = 1;
-
-static int __init opt_nokgdbroundup(char *str)
-{
-       kgdb_do_roundup = 0;
-
-       return 0;
-}
-
-early_param("nokgdbroundup", opt_nokgdbroundup);
-
-/*
- * Finally, some KGDB code :-)
- */
-
-/*
- * Weak aliases for breakpoint management,
- * can be overriden by architectures when needed:
- */
-int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
-{
-       int err;
-
-       err = probe_kernel_read(saved_instr, (char *)addr, BREAK_INSTR_SIZE);
-       if (err)
-               return err;
-
-       return probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr,
-                                 BREAK_INSTR_SIZE);
-}
-
-int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
-{
-       return probe_kernel_write((char *)addr,
-                                 (char *)bundle, BREAK_INSTR_SIZE);
-}
-
-int __weak kgdb_validate_break_address(unsigned long addr)
-{
-       char tmp_variable[BREAK_INSTR_SIZE];
-       int err;
-       /* Validate setting the breakpoint and then removing it.  In the
-        * remove fails, the kernel needs to emit a bad message because we
-        * are deep trouble not being able to put things back the way we
-        * found them.
-        */
-       err = kgdb_arch_set_breakpoint(addr, tmp_variable);
-       if (err)
-               return err;
-       err = kgdb_arch_remove_breakpoint(addr, tmp_variable);
-       if (err)
-               printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
-                  "memory destroyed at: %lx", addr);
-       return err;
-}
-
-unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
-{
-       return instruction_pointer(regs);
-}
-
-int __weak kgdb_arch_init(void)
-{
-       return 0;
-}
-
-int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
-{
-       return 0;
-}
-
-void __weak
-kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code)
-{
-       return;
-}
-
-/**
- *     kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb.
- *     @regs: Current &struct pt_regs.
- *
- *     This function will be called if the particular architecture must
- *     disable hardware debugging while it is processing gdb packets or
- *     handling exception.
- */
-void __weak kgdb_disable_hw_debug(struct pt_regs *regs)
-{
-}
-
-/*
- * GDB remote protocol parser:
- */
-
-static int hex(char ch)
-{
-       if ((ch >= 'a') && (ch <= 'f'))
-               return ch - 'a' + 10;
-       if ((ch >= '0') && (ch <= '9'))
-               return ch - '0';
-       if ((ch >= 'A') && (ch <= 'F'))
-               return ch - 'A' + 10;
-       return -1;
-}
-
-/* scan for the sequence $<data>#<checksum> */
-static void get_packet(char *buffer)
-{
-       unsigned char checksum;
-       unsigned char xmitcsum;
-       int count;
-       char ch;
-
-       do {
-               /*
-                * Spin and wait around for the start character, ignore all
-                * other characters:
-                */
-               while ((ch = (kgdb_io_ops->read_char())) != '$')
-                       /* nothing */;
-
-               kgdb_connected = 1;
-               checksum = 0;
-               xmitcsum = -1;
-
-               count = 0;
-
-               /*
-                * now, read until a # or end of buffer is found:
-                */
-               while (count < (BUFMAX - 1)) {
-                       ch = kgdb_io_ops->read_char();
-                       if (ch == '#')
-                               break;
-                       checksum = checksum + ch;
-                       buffer[count] = ch;
-                       count = count + 1;
-               }
-               buffer[count] = 0;
-
-               if (ch == '#') {
-                       xmitcsum = hex(kgdb_io_ops->read_char()) << 4;
-                       xmitcsum += hex(kgdb_io_ops->read_char());
-
-                       if (checksum != xmitcsum)
-                               /* failed checksum */
-                               kgdb_io_ops->write_char('-');
-                       else
-                               /* successful transfer */
-                               kgdb_io_ops->write_char('+');
-                       if (kgdb_io_ops->flush)
-                               kgdb_io_ops->flush();
-               }
-       } while (checksum != xmitcsum);
-}
-
-/*
- * Send the packet in buffer.
- * Check for gdb connection if asked for.
- */
-static void put_packet(char *buffer)
-{
-       unsigned char checksum;
-       int count;
-       char ch;
-
-       /*
-        * $<packet info>#<checksum>.
-        */
-       while (1) {
-               kgdb_io_ops->write_char('$');
-               checksum = 0;
-               count = 0;
-
-               while ((ch = buffer[count])) {
-                       kgdb_io_ops->write_char(ch);
-                       checksum += ch;
-                       count++;
-               }
-
-               kgdb_io_ops->write_char('#');
-               kgdb_io_ops->write_char(hex_asc_hi(checksum));
-               kgdb_io_ops->write_char(hex_asc_lo(checksum));
-               if (kgdb_io_ops->flush)
-                       kgdb_io_ops->flush();
-
-               /* Now see what we get in reply. */
-               ch = kgdb_io_ops->read_char();
-
-               if (ch == 3)
-                       ch = kgdb_io_ops->read_char();
-
-               /* If we get an ACK, we are done. */
-               if (ch == '+')
-                       return;
-
-               /*
-                * If we get the start of another packet, this means
-                * that GDB is attempting to reconnect.  We will NAK
-                * the packet being sent, and stop trying to send this
-                * packet.
-                */
-               if (ch == '$') {
-                       kgdb_io_ops->write_char('-');
-                       if (kgdb_io_ops->flush)
-                               kgdb_io_ops->flush();
-                       return;
-               }
-       }
-}
-
-/*
- * Convert the memory pointed to by mem into hex, placing result in buf.
- * Return a pointer to the last char put in buf (null). May return an error.
- */
-int kgdb_mem2hex(char *mem, char *buf, int count)
-{
-       char *tmp;
-       int err;
-
-       /*
-        * We use the upper half of buf as an intermediate buffer for the
-        * raw memory copy.  Hex conversion will work against this one.
-        */
-       tmp = buf + count;
-
-       err = probe_kernel_read(tmp, mem, count);
-       if (!err) {
-               while (count > 0) {
-                       buf = pack_hex_byte(buf, *tmp);
-                       tmp++;
-                       count--;
-               }
-
-               *buf = 0;
-       }
-
-       return err;
-}
-
-/*
- * Copy the binary array pointed to by buf into mem.  Fix $, #, and
- * 0x7d escaped with 0x7d. Return -EFAULT on failure or 0 on success.
- * The input buf is overwitten with the result to write to mem.
- */
-static int kgdb_ebin2mem(char *buf, char *mem, int count)
-{
-       int size = 0;
-       char *c = buf;
-
-       while (count-- > 0) {
-               c[size] = *buf++;
-               if (c[size] == 0x7d)
-                       c[size] = *buf++ ^ 0x20;
-               size++;
-       }
-
-       return probe_kernel_write(mem, c, size);
-}
-
-/*
- * Convert the hex array pointed to by buf into binary to be placed in mem.
- * Return a pointer to the character AFTER the last byte written.
- * May return an error.
- */
-int kgdb_hex2mem(char *buf, char *mem, int count)
-{
-       char *tmp_raw;
-       char *tmp_hex;
-
-       /*
-        * We use the upper half of buf as an intermediate buffer for the
-        * raw memory that is converted from hex.
-        */
-       tmp_raw = buf + count * 2;
-
-       tmp_hex = tmp_raw - 1;
-       while (tmp_hex >= buf) {
-               tmp_raw--;
-               *tmp_raw = hex(*tmp_hex--);
-               *tmp_raw |= hex(*tmp_hex--) << 4;
-       }
-
-       return probe_kernel_write(mem, tmp_raw, count);
-}
-
-/*
- * While we find nice hex chars, build a long_val.
- * Return number of chars processed.
- */
-int kgdb_hex2long(char **ptr, unsigned long *long_val)
-{
-       int hex_val;
-       int num = 0;
-       int negate = 0;
-
-       *long_val = 0;
-
-       if (**ptr == '-') {
-               negate = 1;
-               (*ptr)++;
-       }
-       while (**ptr) {
-               hex_val = hex(**ptr);
-               if (hex_val < 0)
-                       break;
-
-               *long_val = (*long_val << 4) | hex_val;
-               num++;
-               (*ptr)++;
-       }
-
-       if (negate)
-               *long_val = -*long_val;
-
-       return num;
-}
-
-/* Write memory due to an 'M' or 'X' packet. */
-static int write_mem_msg(int binary)
-{
-       char *ptr = &remcom_in_buffer[1];
-       unsigned long addr;
-       unsigned long length;
-       int err;
-
-       if (kgdb_hex2long(&ptr, &addr) > 0 && *(ptr++) == ',' &&
-           kgdb_hex2long(&ptr, &length) > 0 && *(ptr++) == ':') {
-               if (binary)
-                       err = kgdb_ebin2mem(ptr, (char *)addr, length);
-               else
-                       err = kgdb_hex2mem(ptr, (char *)addr, length);
-               if (err)
-                       return err;
-               if (CACHE_FLUSH_IS_SAFE)
-                       flush_icache_range(addr, addr + length);
-               return 0;
-       }
-
-       return -EINVAL;
-}
-
-static void error_packet(char *pkt, int error)
-{
-       error = -error;
-       pkt[0] = 'E';
-       pkt[1] = hex_asc[(error / 10)];
-       pkt[2] = hex_asc[(error % 10)];
-       pkt[3] = '\0';
-}
-
-/*
- * Thread ID accessors. We represent a flat TID space to GDB, where
- * the per CPU idle threads (which under Linux all have PID 0) are
- * remapped to negative TIDs.
- */
-
-#define BUF_THREAD_ID_SIZE     16
-
-static char *pack_threadid(char *pkt, unsigned char *id)
-{
-       char *limit;
-
-       limit = pkt + BUF_THREAD_ID_SIZE;
-       while (pkt < limit)
-               pkt = pack_hex_byte(pkt, *id++);
-
-       return pkt;
-}
-
-static void int_to_threadref(unsigned char *id, int value)
-{
-       unsigned char *scan;
-       int i = 4;
-
-       scan = (unsigned char *)id;
-       while (i--)
-               *scan++ = 0;
-       put_unaligned_be32(value, scan);
-}
-
-static struct task_struct *getthread(struct pt_regs *regs, int tid)
-{
-       /*
-        * Non-positive TIDs are remapped to the cpu shadow information
-        */
-       if (tid == 0 || tid == -1)
-               tid = -atomic_read(&kgdb_active) - 2;
-       if (tid < -1 && tid > -NR_CPUS - 2) {
-               if (kgdb_info[-tid - 2].task)
-                       return kgdb_info[-tid - 2].task;
-               else
-                       return idle_task(-tid - 2);
-       }
-       if (tid <= 0) {
-               printk(KERN_ERR "KGDB: Internal thread select error\n");
-               dump_stack();
-               return NULL;
-       }
-
-       /*
-        * find_task_by_pid_ns() does not take the tasklist lock anymore
-        * but is nicely RCU locked - hence is a pretty resilient
-        * thing to use:
-        */
-       return find_task_by_pid_ns(tid, &init_pid_ns);
-}
-
-/*
- * Some architectures need cache flushes when we set/clear a
- * breakpoint:
- */
-static void kgdb_flush_swbreak_addr(unsigned long addr)
-{
-       if (!CACHE_FLUSH_IS_SAFE)
-               return;
-
-       if (current->mm && current->mm->mmap_cache) {
-               flush_cache_range(current->mm->mmap_cache,
-                                 addr, addr + BREAK_INSTR_SIZE);
-       }
-       /* Force flush instruction cache if it was outside the mm */
-       flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
-}
-
-/*
- * SW breakpoint management:
- */
-static int kgdb_activate_sw_breakpoints(void)
-{
-       unsigned long addr;
-       int error;
-       int ret = 0;
-       int i;
-
-       for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
-               if (kgdb_break[i].state != BP_SET)
-                       continue;
-
-               addr = kgdb_break[i].bpt_addr;
-               error = kgdb_arch_set_breakpoint(addr,
-                               kgdb_break[i].saved_instr);
-               if (error) {
-                       ret = error;
-                       printk(KERN_INFO "KGDB: BP install failed: %lx", addr);
-                       continue;
-               }
-
-               kgdb_flush_swbreak_addr(addr);
-               kgdb_break[i].state = BP_ACTIVE;
-       }
-       return ret;
-}
-
-static int kgdb_set_sw_break(unsigned long addr)
-{
-       int err = kgdb_validate_break_address(addr);
-       int breakno = -1;
-       int i;
-
-       if (err)
-               return err;
-
-       for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
-               if ((kgdb_break[i].state == BP_SET) &&
-                                       (kgdb_break[i].bpt_addr == addr))
-                       return -EEXIST;
-       }
-       for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
-               if (kgdb_break[i].state == BP_REMOVED &&
-                                       kgdb_break[i].bpt_addr == addr) {
-                       breakno = i;
-                       break;
-               }
-       }
-
-       if (breakno == -1) {
-               for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
-                       if (kgdb_break[i].state == BP_UNDEFINED) {
-                               breakno = i;
-                               break;
-                       }
-               }
-       }
-
-       if (breakno == -1)
-               return -E2BIG;
-
-       kgdb_break[breakno].state = BP_SET;
-       kgdb_break[breakno].type = BP_BREAKPOINT;
-       kgdb_break[breakno].bpt_addr = addr;
-
-       return 0;
-}
-
-static int kgdb_deactivate_sw_breakpoints(void)
-{
-       unsigned long addr;
-       int error;
-       int ret = 0;
-       int i;
-
-       for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
-               if (kgdb_break[i].state != BP_ACTIVE)
-                       continue;
-               addr = kgdb_break[i].bpt_addr;
-               error = kgdb_arch_remove_breakpoint(addr,
-                                       kgdb_break[i].saved_instr);
-               if (error) {
-                       printk(KERN_INFO "KGDB: BP remove failed: %lx\n", addr);
-                       ret = error;
-               }
-
-               kgdb_flush_swbreak_addr(addr);
-               kgdb_break[i].state = BP_SET;
-       }
-       return ret;
-}
-
-static int kgdb_remove_sw_break(unsigned long addr)
-{
-       int i;
-
-       for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
-               if ((kgdb_break[i].state == BP_SET) &&
-                               (kgdb_break[i].bpt_addr == addr)) {
-                       kgdb_break[i].state = BP_REMOVED;
-                       return 0;
-               }
-       }
-       return -ENOENT;
-}
-
-int kgdb_isremovedbreak(unsigned long addr)
-{
-       int i;
-
-       for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
-               if ((kgdb_break[i].state == BP_REMOVED) &&
-                                       (kgdb_break[i].bpt_addr == addr))
-                       return 1;
-       }
-       return 0;
-}
-
-static int remove_all_break(void)
-{
-       unsigned long addr;
-       int error;
-       int i;
-
-       /* Clear memory breakpoints. */
-       for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
-               if (kgdb_break[i].state != BP_ACTIVE)
-                       goto setundefined;
-               addr = kgdb_break[i].bpt_addr;
-               error = kgdb_arch_remove_breakpoint(addr,
-                               kgdb_break[i].saved_instr);
-               if (error)
-                       printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
-                          addr);
-setundefined:
-               kgdb_break[i].state = BP_UNDEFINED;
-       }
-
-       /* Clear hardware breakpoints. */
-       if (arch_kgdb_ops.remove_all_hw_break)
-               arch_kgdb_ops.remove_all_hw_break();
-
-       return 0;
-}
-
-/*
- * Remap normal tasks to their real PID,
- * CPU shadow threads are mapped to -CPU - 2
- */
-static inline int shadow_pid(int realpid)
-{
-       if (realpid)
-               return realpid;
-
-       return -raw_smp_processor_id() - 2;
-}
-
-static char gdbmsgbuf[BUFMAX + 1];
-
-static void kgdb_msg_write(const char *s, int len)
-{
-       char *bufptr;
-       int wcount;
-       int i;
-
-       /* 'O'utput */
-       gdbmsgbuf[0] = 'O';
-
-       /* Fill and send buffers... */
-       while (len > 0) {
-               bufptr = gdbmsgbuf + 1;
-
-               /* Calculate how many this time */
-               if ((len << 1) > (BUFMAX - 2))
-                       wcount = (BUFMAX - 2) >> 1;
-               else
-                       wcount = len;
-
-               /* Pack in hex chars */
-               for (i = 0; i < wcount; i++)
-                       bufptr = pack_hex_byte(bufptr, s[i]);
-               *bufptr = '\0';
-
-               /* Move up */
-               s += wcount;
-               len -= wcount;
-
-               /* Write packet */
-               put_packet(gdbmsgbuf);
-       }
-}
-
-/*
- * Return true if there is a valid kgdb I/O module.  Also if no
- * debugger is attached a message can be printed to the console about
- * waiting for the debugger to attach.
- *
- * The print_wait argument is only to be true when called from inside
- * the core kgdb_handle_exception, because it will wait for the
- * debugger to attach.
- */
-static int kgdb_io_ready(int print_wait)
-{
-       if (!kgdb_io_ops)
-               return 0;
-       if (kgdb_connected)
-               return 1;
-       if (atomic_read(&kgdb_setting_breakpoint))
-               return 1;
-       if (print_wait)
-               printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
-       return 1;
-}
-
-/*
- * All the functions that start with gdb_cmd are the various
- * operations to implement the handlers for the gdbserial protocol
- * where KGDB is communicating with an external debugger
- */
-
-/* Handle the '?' status packets */
-static void gdb_cmd_status(struct kgdb_state *ks)
-{
-       /*
-        * We know that this packet is only sent
-        * during initial connect.  So to be safe,
-        * we clear out our breakpoints now in case
-        * GDB is reconnecting.
-        */
-       remove_all_break();
-
-       remcom_out_buffer[0] = 'S';
-       pack_hex_byte(&remcom_out_buffer[1], ks->signo);
-}
-
-/* Handle the 'g' get registers request */
-static void gdb_cmd_getregs(struct kgdb_state *ks)
-{
-       struct task_struct *thread;
-       void *local_debuggerinfo;
-       int i;
-
-       thread = kgdb_usethread;
-       if (!thread) {
-               thread = kgdb_info[ks->cpu].task;
-               local_debuggerinfo = kgdb_info[ks->cpu].debuggerinfo;
-       } else {
-               local_debuggerinfo = NULL;
-               for_each_online_cpu(i) {
-                       /*
-                        * Try to find the task on some other
-                        * or possibly this node if we do not
-                        * find the matching task then we try
-                        * to approximate the results.
-                        */
-                       if (thread == kgdb_info[i].task)
-                               local_debuggerinfo = kgdb_info[i].debuggerinfo;
-               }
-       }
-
-       /*
-        * All threads that don't have debuggerinfo should be
-        * in schedule() sleeping, since all other CPUs
-        * are in kgdb_wait, and thus have debuggerinfo.
-        */
-       if (local_debuggerinfo) {
-               pt_regs_to_gdb_regs(gdb_regs, local_debuggerinfo);
-       } else {
-               /*
-                * Pull stuff saved during switch_to; nothing
-                * else is accessible (or even particularly
-                * relevant).
-                *
-                * This should be enough for a stack trace.
-                */
-               sleeping_thread_to_gdb_regs(gdb_regs, thread);
-       }
-       kgdb_mem2hex((char *)gdb_regs, remcom_out_buffer, NUMREGBYTES);
-}
-
-/* Handle the 'G' set registers request */
-static void gdb_cmd_setregs(struct kgdb_state *ks)
-{
-       kgdb_hex2mem(&remcom_in_buffer[1], (char *)gdb_regs, NUMREGBYTES);
-
-       if (kgdb_usethread && kgdb_usethread != current) {
-               error_packet(remcom_out_buffer, -EINVAL);
-       } else {
-               gdb_regs_to_pt_regs(gdb_regs, ks->linux_regs);
-               strcpy(remcom_out_buffer, "OK");
-       }
-}
-
-/* Handle the 'm' memory read bytes */
-static void gdb_cmd_memread(struct kgdb_state *ks)
-{
-       char *ptr = &remcom_in_buffer[1];
-       unsigned long length;
-       unsigned long addr;
-       int err;
-
-       if (kgdb_hex2long(&ptr, &addr) > 0 && *ptr++ == ',' &&
-                                       kgdb_hex2long(&ptr, &length) > 0) {
-               err = kgdb_mem2hex((char *)addr, remcom_out_buffer, length);
-               if (err)
-                       error_packet(remcom_out_buffer, err);
-       } else {
-               error_packet(remcom_out_buffer, -EINVAL);
-       }
-}
-
-/* Handle the 'M' memory write bytes */
-static void gdb_cmd_memwrite(struct kgdb_state *ks)
-{
-       int err = write_mem_msg(0);
-
-       if (err)
-               error_packet(remcom_out_buffer, err);
-       else
-               strcpy(remcom_out_buffer, "OK");
-}
-
-/* Handle the 'X' memory binary write bytes */
-static void gdb_cmd_binwrite(struct kgdb_state *ks)
-{
-       int err = write_mem_msg(1);
-
-       if (err)
-               error_packet(remcom_out_buffer, err);
-       else
-               strcpy(remcom_out_buffer, "OK");
-}
-
-/* Handle the 'D' or 'k', detach or kill packets */
-static void gdb_cmd_detachkill(struct kgdb_state *ks)
-{
-       int error;
-
-       /* The detach case */
-       if (remcom_in_buffer[0] == 'D') {
-               error = remove_all_break();
-               if (error < 0) {
-                       error_packet(remcom_out_buffer, error);
-               } else {
-                       strcpy(remcom_out_buffer, "OK");
-                       kgdb_connected = 0;
-               }
-               put_packet(remcom_out_buffer);
-       } else {
-               /*
-                * Assume the kill case, with no exit code checking,
-                * trying to force detach the debugger:
-                */
-               remove_all_break();
-               kgdb_connected = 0;
-       }
-}
-
-/* Handle the 'R' reboot packets */
-static int gdb_cmd_reboot(struct kgdb_state *ks)
-{
-       /* For now, only honor R0 */
-       if (strcmp(remcom_in_buffer, "R0") == 0) {
-               printk(KERN_CRIT "Executing emergency reboot\n");
-               strcpy(remcom_out_buffer, "OK");
-               put_packet(remcom_out_buffer);
-
-               /*
-                * Execution should not return from
-                * machine_emergency_restart()
-                */
-               machine_emergency_restart();
-               kgdb_connected = 0;
-
-               return 1;
-       }
-       return 0;
-}
-
-/* Handle the 'q' query packets */
-static void gdb_cmd_query(struct kgdb_state *ks)
-{
-       struct task_struct *g;
-       struct task_struct *p;
-       unsigned char thref[8];
-       char *ptr;
-       int i;
-       int cpu;
-       int finished = 0;
-
-       switch (remcom_in_buffer[1]) {
-       case 's':
-       case 'f':
-               if (memcmp(remcom_in_buffer + 2, "ThreadInfo", 10)) {
-                       error_packet(remcom_out_buffer, -EINVAL);
-                       break;
-               }
-
-               i = 0;
-               remcom_out_buffer[0] = 'm';
-               ptr = remcom_out_buffer + 1;
-               if (remcom_in_buffer[1] == 'f') {
-                       /* Each cpu is a shadow thread */
-                       for_each_online_cpu(cpu) {
-                               ks->thr_query = 0;
-                               int_to_threadref(thref, -cpu - 2);
-                               pack_threadid(ptr, thref);
-                               ptr += BUF_THREAD_ID_SIZE;
-                               *(ptr++) = ',';
-                               i++;
-                       }
-               }
-
-               do_each_thread(g, p) {
-                       if (i >= ks->thr_query && !finished) {
-                               int_to_threadref(thref, p->pid);
-                               pack_threadid(ptr, thref);
-                               ptr += BUF_THREAD_ID_SIZE;
-                               *(ptr++) = ',';
-                               ks->thr_query++;
-                               if (ks->thr_query % KGDB_MAX_THREAD_QUERY == 0)
-                                       finished = 1;
-                       }
-                       i++;
-               } while_each_thread(g, p);
-
-               *(--ptr) = '\0';
-               break;
-
-       case 'C':
-               /* Current thread id */
-               strcpy(remcom_out_buffer, "QC");
-               ks->threadid = shadow_pid(current->pid);
-               int_to_threadref(thref, ks->threadid);
-               pack_threadid(remcom_out_buffer + 2, thref);
-               break;
-       case 'T':
-               if (memcmp(remcom_in_buffer + 1, "ThreadExtraInfo,", 16)) {
-                       error_packet(remcom_out_buffer, -EINVAL);
-                       break;
-               }
-               ks->threadid = 0;
-               ptr = remcom_in_buffer + 17;
-               kgdb_hex2long(&ptr, &ks->threadid);
-               if (!getthread(ks->linux_regs, ks->threadid)) {
-                       error_packet(remcom_out_buffer, -EINVAL);
-                       break;
-               }
-               if ((int)ks->threadid > 0) {
-                       kgdb_mem2hex(getthread(ks->linux_regs,
-                                       ks->threadid)->comm,
-                                       remcom_out_buffer, 16);
-               } else {
-                       static char tmpstr[23 + BUF_THREAD_ID_SIZE];
-
-                       sprintf(tmpstr, "shadowCPU%d",
-                                       (int)(-ks->threadid - 2));
-                       kgdb_mem2hex(tmpstr, remcom_out_buffer, strlen(tmpstr));
-               }
-               break;
-       }
-}
-
-/* Handle the 'H' task query packets */
-static void gdb_cmd_task(struct kgdb_state *ks)
-{
-       struct task_struct *thread;
-       char *ptr;
-
-       switch (remcom_in_buffer[1]) {
-       case 'g':
-               ptr = &remcom_in_buffer[2];
-               kgdb_hex2long(&ptr, &ks->threadid);
-               thread = getthread(ks->linux_regs, ks->threadid);
-               if (!thread && ks->threadid > 0) {
-                       error_packet(remcom_out_buffer, -EINVAL);
-                       break;
-               }
-               kgdb_usethread = thread;
-               ks->kgdb_usethreadid = ks->threadid;
-               strcpy(remcom_out_buffer, "OK");
-               break;
-       case 'c':
-               ptr = &remcom_in_buffer[2];
-               kgdb_hex2long(&ptr, &ks->threadid);
-               if (!ks->threadid) {
-                       kgdb_contthread = NULL;
-               } else {
-                       thread = getthread(ks->linux_regs, ks->threadid);
-                       if (!thread && ks->threadid > 0) {
-                               error_packet(remcom_out_buffer, -EINVAL);
-                               break;
-                       }
-                       kgdb_contthread = thread;
-               }
-               strcpy(remcom_out_buffer, "OK");
-               break;
-       }
-}
-
-/* Handle the 'T' thread query packets */
-static void gdb_cmd_thread(struct kgdb_state *ks)
-{
-       char *ptr = &remcom_in_buffer[1];
-       struct task_struct *thread;
-
-       kgdb_hex2long(&ptr, &ks->threadid);
-       thread = getthread(ks->linux_regs, ks->threadid);
-       if (thread)
-               strcpy(remcom_out_buffer, "OK");
-       else
-               error_packet(remcom_out_buffer, -EINVAL);
-}
-
-/* Handle the 'z' or 'Z' breakpoint remove or set packets */
-static void gdb_cmd_break(struct kgdb_state *ks)
-{
-       /*
-        * Since GDB-5.3, it's been drafted that '0' is a software
-        * breakpoint, '1' is a hardware breakpoint, so let's do that.
-        */
-       char *bpt_type = &remcom_in_buffer[1];
-       char *ptr = &remcom_in_buffer[2];
-       unsigned long addr;
-       unsigned long length;
-       int error = 0;
-
-       if (arch_kgdb_ops.set_hw_breakpoint && *bpt_type >= '1') {
-               /* Unsupported */
-               if (*bpt_type > '4')
-                       return;
-       } else {
-               if (*bpt_type != '0' && *bpt_type != '1')
-                       /* Unsupported. */
-                       return;
-       }
-
-       /*
-        * Test if this is a hardware breakpoint, and
-        * if we support it:
-        */
-       if (*bpt_type == '1' && !(arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT))
-               /* Unsupported. */
-               return;
-
-       if (*(ptr++) != ',') {
-               error_packet(remcom_out_buffer, -EINVAL);
-               return;
-       }
-       if (!kgdb_hex2long(&ptr, &addr)) {
-               error_packet(remcom_out_buffer, -EINVAL);
-               return;
-       }
-       if (*(ptr++) != ',' ||
-               !kgdb_hex2long(&ptr, &length)) {
-               error_packet(remcom_out_buffer, -EINVAL);
-               return;
-       }
-
-       if (remcom_in_buffer[0] == 'Z' && *bpt_type == '0')
-               error = kgdb_set_sw_break(addr);
-       else if (remcom_in_buffer[0] == 'z' && *bpt_type == '0')
-               error = kgdb_remove_sw_break(addr);
-       else if (remcom_in_buffer[0] == 'Z')
-               error = arch_kgdb_ops.set_hw_breakpoint(addr,
-                       (int)length, *bpt_type - '0');
-       else if (remcom_in_buffer[0] == 'z')
-               error = arch_kgdb_ops.remove_hw_breakpoint(addr,
-                       (int) length, *bpt_type - '0');
-
-       if (error == 0)
-               strcpy(remcom_out_buffer, "OK");
-       else
-               error_packet(remcom_out_buffer, error);
-}
-
-/* Handle the 'C' signal / exception passing packets */
-static int gdb_cmd_exception_pass(struct kgdb_state *ks)
-{
-       /* C09 == pass exception
-        * C15 == detach kgdb, pass exception
-        */
-       if (remcom_in_buffer[1] == '0' && remcom_in_buffer[2] == '9') {
-
-               ks->pass_exception = 1;
-               remcom_in_buffer[0] = 'c';
-
-       } else if (remcom_in_buffer[1] == '1' && remcom_in_buffer[2] == '5') {
-
-               ks->pass_exception = 1;
-               remcom_in_buffer[0] = 'D';
-               remove_all_break();
-               kgdb_connected = 0;
-               return 1;
-
-       } else {
-               kgdb_msg_write("KGDB only knows signal 9 (pass)"
-                       " and 15 (pass and disconnect)\n"
-                       "Executing a continue without signal passing\n", 0);
-               remcom_in_buffer[0] = 'c';
-       }
-
-       /* Indicate fall through */
-       return -1;
-}
-
-/*
- * This function performs all gdbserial command procesing
- */
-static int gdb_serial_stub(struct kgdb_state *ks)
-{
-       int error = 0;
-       int tmp;
-
-       /* Clear the out buffer. */
-       memset(remcom_out_buffer, 0, sizeof(remcom_out_buffer));
-
-       if (kgdb_connected) {
-               unsigned char thref[8];
-               char *ptr;
-
-               /* Reply to host that an exception has occurred */
-               ptr = remcom_out_buffer;
-               *ptr++ = 'T';
-               ptr = pack_hex_byte(ptr, ks->signo);
-               ptr += strlen(strcpy(ptr, "thread:"));
-               int_to_threadref(thref, shadow_pid(current->pid));
-               ptr = pack_threadid(ptr, thref);
-               *ptr++ = ';';
-               put_packet(remcom_out_buffer);
-       }
-
-       kgdb_usethread = kgdb_info[ks->cpu].task;
-       ks->kgdb_usethreadid = shadow_pid(kgdb_info[ks->cpu].task->pid);
-       ks->pass_exception = 0;
-
-       while (1) {
-               error = 0;
-
-               /* Clear the out buffer. */
-               memset(remcom_out_buffer, 0, sizeof(remcom_out_buffer));
-
-               get_packet(remcom_in_buffer);
-
-               switch (remcom_in_buffer[0]) {
-               case '?': /* gdbserial status */
-                       gdb_cmd_status(ks);
-                       break;
-               case 'g': /* return the value of the CPU registers */
-                       gdb_cmd_getregs(ks);
-                       break;
-               case 'G': /* set the value of the CPU registers - return OK */
-                       gdb_cmd_setregs(ks);
-                       break;
-               case 'm': /* mAA..AA,LLLL  Read LLLL bytes at address AA..AA */
-                       gdb_cmd_memread(ks);
-                       break;
-               case 'M': /* MAA..AA,LLLL: Write LLLL bytes at address AA..AA */
-                       gdb_cmd_memwrite(ks);
-                       break;
-               case 'X': /* XAA..AA,LLLL: Write LLLL bytes at address AA..AA */
-                       gdb_cmd_binwrite(ks);
-                       break;
-                       /* kill or detach. KGDB should treat this like a
-                        * continue.
-                        */
-               case 'D': /* Debugger detach */
-               case 'k': /* Debugger detach via kill */
-                       gdb_cmd_detachkill(ks);
-                       goto default_handle;
-               case 'R': /* Reboot */
-                       if (gdb_cmd_reboot(ks))
-                               goto default_handle;
-                       break;
-               case 'q': /* query command */
-                       gdb_cmd_query(ks);
-                       break;
-               case 'H': /* task related */
-                       gdb_cmd_task(ks);
-                       break;
-               case 'T': /* Query thread status */
-                       gdb_cmd_thread(ks);
-                       break;
-               case 'z': /* Break point remove */
-               case 'Z': /* Break point set */
-                       gdb_cmd_break(ks);
-                       break;
-               case 'C': /* Exception passing */
-                       tmp = gdb_cmd_exception_pass(ks);
-                       if (tmp > 0)
-                               goto default_handle;
-                       if (tmp == 0)
-                               break;
-                       /* Fall through on tmp < 0 */
-               case 'c': /* Continue packet */
-               case 's': /* Single step packet */
-                       if (kgdb_contthread && kgdb_contthread != current) {
-                               /* Can't switch threads in kgdb */
-                               error_packet(remcom_out_buffer, -EINVAL);
-                               break;
-                       }
-                       kgdb_activate_sw_breakpoints();
-                       /* Fall through to default processing */
-               default:
-default_handle:
-                       error = kgdb_arch_handle_exception(ks->ex_vector,
-                                               ks->signo,
-                                               ks->err_code,
-                                               remcom_in_buffer,
-                                               remcom_out_buffer,
-                                               ks->linux_regs);
-                       /*
-                        * Leave cmd processing on error, detach,
-                        * kill, continue, or single step.
-                        */
-                       if (error >= 0 || remcom_in_buffer[0] == 'D' ||
-                           remcom_in_buffer[0] == 'k') {
-                               error = 0;
-                               goto kgdb_exit;
-                       }
-
-               }
-
-               /* reply to the request */
-               put_packet(remcom_out_buffer);
-       }
-
-kgdb_exit:
-       if (ks->pass_exception)
-               error = 1;
-       return error;
-}
-
-static int kgdb_reenter_check(struct kgdb_state *ks)
-{
-       unsigned long addr;
-
-       if (atomic_read(&kgdb_active) != raw_smp_processor_id())
-               return 0;
-
-       /* Panic on recursive debugger calls: */
-       exception_level++;
-       addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
-       kgdb_deactivate_sw_breakpoints();
-
-       /*
-        * If the break point removed ok at the place exception
-        * occurred, try to recover and print a warning to the end
-        * user because the user planted a breakpoint in a place that
-        * KGDB needs in order to function.
-        */
-       if (kgdb_remove_sw_break(addr) == 0) {
-               exception_level = 0;
-               kgdb_skipexception(ks->ex_vector, ks->linux_regs);
-               kgdb_activate_sw_breakpoints();
-               printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n",
-                       addr);
-               WARN_ON_ONCE(1);
-
-               return 1;
-       }
-       remove_all_break();
-       kgdb_skipexception(ks->ex_vector, ks->linux_regs);
-
-       if (exception_level > 1) {
-               dump_stack();
-               panic("Recursive entry to debugger");
-       }
-
-       printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n");
-       dump_stack();
-       panic("Recursive entry to debugger");
-
-       return 1;
-}
-
-static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs)
-{
-       unsigned long flags;
-       int sstep_tries = 100;
-       int error = 0;
-       int i, cpu;
-       int trace_on = 0;
-acquirelock:
-       /*
-        * Interrupts will be restored by the 'trap return' code, except when
-        * single stepping.
-        */
-       local_irq_save(flags);
-
-       cpu = ks->cpu;
-       kgdb_info[cpu].debuggerinfo = regs;
-       kgdb_info[cpu].task = current;
-       /*
-        * Make sure the above info reaches the primary CPU before
-        * our cpu_in_kgdb[] flag setting does:
-        */
-       atomic_inc(&cpu_in_kgdb[cpu]);
-
-       /*
-        * CPU will loop if it is a slave or request to become a kgdb
-        * master cpu and acquire the kgdb_active lock:
-        */
-       while (1) {
-               if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
-                       if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu)
-                               break;
-               } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
-                       if (!atomic_read(&passive_cpu_wait[cpu]))
-                               goto return_normal;
-               } else {
-return_normal:
-                       /* Return to normal operation by executing any
-                        * hw breakpoint fixup.
-                        */
-                       if (arch_kgdb_ops.correct_hw_break)
-                               arch_kgdb_ops.correct_hw_break();
-                       if (trace_on)
-                               tracing_on();
-                       atomic_dec(&cpu_in_kgdb[cpu]);
-                       touch_softlockup_watchdog_sync();
-                       clocksource_touch_watchdog();
-                       local_irq_restore(flags);
-                       return 0;
-               }
-               cpu_relax();
-       }
-
-       /*
-        * For single stepping, try to only enter on the processor
-        * that was single stepping.  To gaurd against a deadlock, the
-        * kernel will only try for the value of sstep_tries before
-        * giving up and continuing on.
-        */
-       if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
-           (kgdb_info[cpu].task &&
-            kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
-               atomic_set(&kgdb_active, -1);
-               touch_softlockup_watchdog_sync();
-               clocksource_touch_watchdog();
-               local_irq_restore(flags);
-
-               goto acquirelock;
-       }
-
-       if (!kgdb_io_ready(1)) {
-               error = 1;
-               goto kgdb_restore; /* No I/O connection, so resume the system */
-       }
-
-       /*
-        * Don't enter if we have hit a removed breakpoint.
-        */
-       if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
-               goto kgdb_restore;
-
-       /* Call the I/O driver's pre_exception routine */
-       if (kgdb_io_ops->pre_exception)
-               kgdb_io_ops->pre_exception();
-
-       kgdb_disable_hw_debug(ks->linux_regs);
-
-       /*
-        * Get the passive CPU lock which will hold all the non-primary
-        * CPU in a spin state while the debugger is active
-        */
-       if (!kgdb_single_step) {
-               for (i = 0; i < NR_CPUS; i++)
-                       atomic_inc(&passive_cpu_wait[i]);
-       }
-
-#ifdef CONFIG_SMP
-       /* Signal the other CPUs to enter kgdb_wait() */
-       if ((!kgdb_single_step) && kgdb_do_roundup)
-               kgdb_roundup_cpus(flags);
-#endif
-
-       /*
-        * Wait for the other CPUs to be notified and be waiting for us:
-        */
-       for_each_online_cpu(i) {
-               while (!atomic_read(&cpu_in_kgdb[i]))
-                       cpu_relax();
-       }
-
-       /*
-        * At this point the primary processor is completely
-        * in the debugger and all secondary CPUs are quiescent
-        */
-       kgdb_post_primary_code(ks->linux_regs, ks->ex_vector, ks->err_code);
-       kgdb_deactivate_sw_breakpoints();
-       kgdb_single_step = 0;
-       kgdb_contthread = current;
-       exception_level = 0;
-       trace_on = tracing_is_on();
-       if (trace_on)
-               tracing_off();
-
-       /* Talk to debugger with gdbserial protocol */
-       error = gdb_serial_stub(ks);
-
-       /* Call the I/O driver's post_exception routine */
-       if (kgdb_io_ops->post_exception)
-               kgdb_io_ops->post_exception();
-
-       atomic_dec(&cpu_in_kgdb[ks->cpu]);
-
-       if (!kgdb_single_step) {
-               for (i = NR_CPUS-1; i >= 0; i--)
-                       atomic_dec(&passive_cpu_wait[i]);
-               /*
-                * Wait till all the CPUs have quit
-                * from the debugger.
-                */
-               for_each_online_cpu(i) {
-                       while (atomic_read(&cpu_in_kgdb[i]))
-                               cpu_relax();
-               }
-       }
-
-kgdb_restore:
-       if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
-               int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
-               if (kgdb_info[sstep_cpu].task)
-                       kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
-               else
-                       kgdb_sstep_pid = 0;
-       }
-       if (trace_on)
-               tracing_on();
-       /* Free kgdb_active */
-       atomic_set(&kgdb_active, -1);
-       touch_softlockup_watchdog_sync();
-       clocksource_touch_watchdog();
-       local_irq_restore(flags);
-
-       return error;
-}
-
-/*
- * kgdb_handle_exception() - main entry point from a kernel exception
- *
- * Locking hierarchy:
- *     interface locks, if any (begin_session)
- *     kgdb lock (kgdb_active)
- */
-int
-kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
-{
-       struct kgdb_state kgdb_var;
-       struct kgdb_state *ks = &kgdb_var;
-       int ret;
-
-       ks->cpu                 = raw_smp_processor_id();
-       ks->ex_vector           = evector;
-       ks->signo               = signo;
-       ks->ex_vector           = evector;
-       ks->err_code            = ecode;
-       ks->kgdb_usethreadid    = 0;
-       ks->linux_regs          = regs;
-
-       if (kgdb_reenter_check(ks))
-               return 0; /* Ouch, double exception ! */
-       kgdb_info[ks->cpu].exception_state |= DCPU_WANT_MASTER;
-       ret = kgdb_cpu_enter(ks, regs);
-       kgdb_info[ks->cpu].exception_state &= ~DCPU_WANT_MASTER;
-       return ret;
-}
-
-int kgdb_nmicallback(int cpu, void *regs)
-{
-#ifdef CONFIG_SMP
-       struct kgdb_state kgdb_var;
-       struct kgdb_state *ks = &kgdb_var;
-
-       memset(ks, 0, sizeof(struct kgdb_state));
-       ks->cpu                 = cpu;
-       ks->linux_regs          = regs;
-
-       if (!atomic_read(&cpu_in_kgdb[cpu]) &&
-           atomic_read(&kgdb_active) != -1 &&
-           atomic_read(&kgdb_active) != cpu) {
-               kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE;
-               kgdb_cpu_enter(ks, regs);
-               kgdb_info[cpu].exception_state &= ~DCPU_IS_SLAVE;
-               return 0;
-       }
-#endif
-       return 1;
-}
-
-static void kgdb_console_write(struct console *co, const char *s,
-   unsigned count)
-{
-       unsigned long flags;
-
-       /* If we're debugging, or KGDB has not connected, don't try
-        * and print. */
-       if (!kgdb_connected || atomic_read(&kgdb_active) != -1)
-               return;
-
-       local_irq_save(flags);
-       kgdb_msg_write(s, count);
-       local_irq_restore(flags);
-}
-
-static struct console kgdbcons = {
-       .name           = "kgdb",
-       .write          = kgdb_console_write,
-       .flags          = CON_PRINTBUFFER | CON_ENABLED,
-       .index          = -1,
-};
-
-#ifdef CONFIG_MAGIC_SYSRQ
-static void sysrq_handle_gdb(int key, struct tty_struct *tty)
-{
-       if (!kgdb_io_ops) {
-               printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
-               return;
-       }
-       if (!kgdb_connected)
-               printk(KERN_CRIT "Entering KGDB\n");
-
-       kgdb_breakpoint();
-}
-
-static struct sysrq_key_op sysrq_gdb_op = {
-       .handler        = sysrq_handle_gdb,
-       .help_msg       = "debug(G)",
-       .action_msg     = "DEBUG",
-};
-#endif
-
-static void kgdb_register_callbacks(void)
-{
-       if (!kgdb_io_module_registered) {
-               kgdb_io_module_registered = 1;
-               kgdb_arch_init();
-#ifdef CONFIG_MAGIC_SYSRQ
-               register_sysrq_key('g', &sysrq_gdb_op);
-#endif
-               if (kgdb_use_con && !kgdb_con_registered) {
-                       register_console(&kgdbcons);
-                       kgdb_con_registered = 1;
-               }
-       }
-}
-
-static void kgdb_unregister_callbacks(void)
-{
-       /*
-        * When this routine is called KGDB should unregister from the
-        * panic handler and clean up, making sure it is not handling any
-        * break exceptions at the time.
-        */
-       if (kgdb_io_module_registered) {
-               kgdb_io_module_registered = 0;
-               kgdb_arch_exit();
-#ifdef CONFIG_MAGIC_SYSRQ
-               unregister_sysrq_key('g', &sysrq_gdb_op);
-#endif
-               if (kgdb_con_registered) {
-                       unregister_console(&kgdbcons);
-                       kgdb_con_registered = 0;
-               }
-       }
-}
-
-static void kgdb_initial_breakpoint(void)
-{
-       kgdb_break_asap = 0;
-
-       printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n");
-       kgdb_breakpoint();
-}
-
-/**
- *     kgdb_register_io_module - register KGDB IO module
- *     @new_kgdb_io_ops: the io ops vector
- *
- *     Register it with the KGDB core.
- */
-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
-{
-       int err;
-
-       spin_lock(&kgdb_registration_lock);
-
-       if (kgdb_io_ops) {
-               spin_unlock(&kgdb_registration_lock);
-
-               printk(KERN_ERR "kgdb: Another I/O driver is already "
-                               "registered with KGDB.\n");
-               return -EBUSY;
-       }
-
-       if (new_kgdb_io_ops->init) {
-               err = new_kgdb_io_ops->init();
-               if (err) {
-                       spin_unlock(&kgdb_registration_lock);
-                       return err;
-               }
-       }
-
-       kgdb_io_ops = new_kgdb_io_ops;
-
-       spin_unlock(&kgdb_registration_lock);
-
-       printk(KERN_INFO "kgdb: Registered I/O driver %s.\n",
-              new_kgdb_io_ops->name);
-
-       /* Arm KGDB now. */
-       kgdb_register_callbacks();
-
-       if (kgdb_break_asap)
-               kgdb_initial_breakpoint();
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(kgdb_register_io_module);
-
-/**
- *     kkgdb_unregister_io_module - unregister KGDB IO module
- *     @old_kgdb_io_ops: the io ops vector
- *
- *     Unregister it with the KGDB core.
- */
-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
-{
-       BUG_ON(kgdb_connected);
-
-       /*
-        * KGDB is no longer able to communicate out, so
-        * unregister our callbacks and reset state.
-        */
-       kgdb_unregister_callbacks();
-
-       spin_lock(&kgdb_registration_lock);
-
-       WARN_ON_ONCE(kgdb_io_ops != old_kgdb_io_ops);
-       kgdb_io_ops = NULL;
-
-       spin_unlock(&kgdb_registration_lock);
-
-       printk(KERN_INFO
-               "kgdb: Unregistered I/O driver %s, debugger disabled.\n",
-               old_kgdb_io_ops->name);
-}
-EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
-
-/**
- * kgdb_breakpoint - generate breakpoint exception
- *
- * This function will generate a breakpoint exception.  It is used at the
- * beginning of a program to sync up with a debugger and can be used
- * otherwise as a quick means to stop program execution and "break" into
- * the debugger.
- */
-void kgdb_breakpoint(void)
-{
-       atomic_inc(&kgdb_setting_breakpoint);
-       wmb(); /* Sync point before breakpoint */
-       arch_kgdb_breakpoint();
-       wmb(); /* Sync point after breakpoint */
-       atomic_dec(&kgdb_setting_breakpoint);
-}
-EXPORT_SYMBOL_GPL(kgdb_breakpoint);
-
-static int __init opt_kgdb_wait(char *str)
-{
-       kgdb_break_asap = 1;
-
-       if (kgdb_io_module_registered)
-               kgdb_initial_breakpoint();
-
-       return 0;
-}
-
-early_param("kgdbwait", opt_kgdb_wait);
index 5e14483..3c4fc4b 100644 (file)
 DEFINE_MUTEX(module_mutex);
 EXPORT_SYMBOL_GPL(module_mutex);
 static LIST_HEAD(modules);
+#ifdef CONFIG_KGDB_KDB
+struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
+#endif /* CONFIG_KGDB_KDB */
+
 
 /* Block module loading/unloading? */
 int modules_disabled = 0;
index fd03513..b1c9857 100644 (file)
@@ -29,7 +29,7 @@
 #include <linux/rcupdate.h>
 
 #define MAX_SEQ_NR INT_MAX - NR_CPUS
-#define MAX_OBJ_NUM 10000 * NR_CPUS
+#define MAX_OBJ_NUM 1000
 
 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
 {
@@ -88,7 +88,7 @@ static void padata_parallel_worker(struct work_struct *work)
        local_bh_enable();
 }
 
-/*
+/**
  * padata_do_parallel - padata parallelization function
  *
  * @pinst: padata instance
@@ -152,6 +152,23 @@ out:
 }
 EXPORT_SYMBOL(padata_do_parallel);
 
+/*
+ * padata_get_next - Get the next object that needs serialization.
+ *
+ * Return values are:
+ *
+ * A pointer to the control struct of the next object that needs
+ * serialization, if present in one of the percpu reorder queues.
+ *
+ * NULL, if all percpu reorder queues are empty.
+ *
+ * -EINPROGRESS, if the next object that needs serialization will
+ *  be parallel processed by another cpu and is not yet present in
+ *  the cpu's reorder queue.
+ *
+ * -ENODATA, if this cpu has to do the parallel processing for
+ *  the next object.
+ */
 static struct padata_priv *padata_get_next(struct parallel_data *pd)
 {
        int cpu, num_cpus, empty, calc_seq_nr;
@@ -173,7 +190,7 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
 
                /*
                 * Calculate the seq_nr of the object that should be
-                * next in this queue.
+                * next in this reorder queue.
                 */
                overrun = 0;
                calc_seq_nr = (atomic_read(&queue->num_obj) * num_cpus)
@@ -231,7 +248,8 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
                goto out;
        }
 
-       if (next_nr % num_cpus == next_queue->cpu_index) {
+       queue = per_cpu_ptr(pd->queue, smp_processor_id());
+       if (queue->cpu_index == next_queue->cpu_index) {
                padata = ERR_PTR(-ENODATA);
                goto out;
        }
@@ -247,19 +265,40 @@ static void padata_reorder(struct parallel_data *pd)
        struct padata_queue *queue;
        struct padata_instance *pinst = pd->pinst;
 
-try_again:
+       /*
+        * We need to ensure that only one cpu can work on dequeueing of
+        * the reorder queue the time. Calculating in which percpu reorder
+        * queue the next object will arrive takes some time. A spinlock
+        * would be highly contended. Also it is not clear in which order
+        * the objects arrive to the reorder queues. So a cpu could wait to
+        * get the lock just to notice that there is nothing to do at the
+        * moment. Therefore we use a trylock and let the holder of the lock
+        * care for all the objects enqueued during the holdtime of the lock.
+        */
        if (!spin_trylock_bh(&pd->lock))
-               goto out;
+               return;
 
        while (1) {
                padata = padata_get_next(pd);
 
+               /*
+                * All reorder queues are empty, or the next object that needs
+                * serialization is parallel processed by another cpu and is
+                * still on it's way to the cpu's reorder queue, nothing to
+                * do for now.
+                */
                if (!padata || PTR_ERR(padata) == -EINPROGRESS)
                        break;
 
+               /*
+                * This cpu has to do the parallel processing of the next
+                * object. It's waiting in the cpu's parallelization queue,
+                * so exit imediately.
+                */
                if (PTR_ERR(padata) == -ENODATA) {
+                       del_timer(&pd->timer);
                        spin_unlock_bh(&pd->lock);
-                       goto out;
+                       return;
                }
 
                queue = per_cpu_ptr(pd->queue, padata->cb_cpu);
@@ -273,13 +312,27 @@ try_again:
 
        spin_unlock_bh(&pd->lock);
 
-       if (atomic_read(&pd->reorder_objects))
-               goto try_again;
+       /*
+        * The next object that needs serialization might have arrived to
+        * the reorder queues in the meantime, we will be called again
+        * from the timer function if noone else cares for it.
+        */
+       if (atomic_read(&pd->reorder_objects)
+                       && !(pinst->flags & PADATA_RESET))
+               mod_timer(&pd->timer, jiffies + HZ);
+       else
+               del_timer(&pd->timer);
 
-out:
        return;
 }
 
+static void padata_reorder_timer(unsigned long arg)
+{
+       struct parallel_data *pd = (struct parallel_data *)arg;
+
+       padata_reorder(pd);
+}
+
 static void padata_serial_worker(struct work_struct *work)
 {
        struct padata_queue *queue;
@@ -308,7 +361,7 @@ static void padata_serial_worker(struct work_struct *work)
        local_bh_enable();
 }
 
-/*
+/**
  * padata_do_serial - padata serialization function
  *
  * @padata: object to be serialized.
@@ -338,6 +391,7 @@ void padata_do_serial(struct padata_priv *padata)
 }
 EXPORT_SYMBOL(padata_do_serial);
 
+/* Allocate and initialize the internal cpumask dependend resources. */
 static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
                                             const struct cpumask *cpumask)
 {
@@ -358,17 +412,15 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
        if (!alloc_cpumask_var(&pd->cpumask, GFP_KERNEL))
                goto err_free_queue;
 
-       for_each_possible_cpu(cpu) {
+       cpumask_and(pd->cpumask, cpumask, cpu_active_mask);
+
+       for_each_cpu(cpu, pd->cpumask) {
                queue = per_cpu_ptr(pd->queue, cpu);
 
                queue->pd = pd;
 
-               if (cpumask_test_cpu(cpu, cpumask)
-                   && cpumask_test_cpu(cpu, cpu_active_mask)) {
-                       queue->cpu_index = cpu_index;
-                       cpu_index++;
-               } else
-                       queue->cpu_index = -1;
+               queue->cpu_index = cpu_index;
+               cpu_index++;
 
                INIT_LIST_HEAD(&queue->reorder.list);
                INIT_LIST_HEAD(&queue->parallel.list);
@@ -382,11 +434,10 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
                atomic_set(&queue->num_obj, 0);
        }
 
-       cpumask_and(pd->cpumask, cpumask, cpu_active_mask);
-
        num_cpus = cpumask_weight(pd->cpumask);
        pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1;
 
+       setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
        atomic_set(&pd->seq_nr, -1);
        atomic_set(&pd->reorder_objects, 0);
        atomic_set(&pd->refcnt, 0);
@@ -410,6 +461,31 @@ static void padata_free_pd(struct parallel_data *pd)
        kfree(pd);
 }
 
+/* Flush all objects out of the padata queues. */
+static void padata_flush_queues(struct parallel_data *pd)
+{
+       int cpu;
+       struct padata_queue *queue;
+
+       for_each_cpu(cpu, pd->cpumask) {
+               queue = per_cpu_ptr(pd->queue, cpu);
+               flush_work(&queue->pwork);
+       }
+
+       del_timer_sync(&pd->timer);
+
+       if (atomic_read(&pd->reorder_objects))
+               padata_reorder(pd);
+
+       for_each_cpu(cpu, pd->cpumask) {
+               queue = per_cpu_ptr(pd->queue, cpu);
+               flush_work(&queue->swork);
+       }
+
+       BUG_ON(atomic_read(&pd->refcnt) != 0);
+}
+
+/* Replace the internal control stucture with a new one. */
 static void padata_replace(struct padata_instance *pinst,
                           struct parallel_data *pd_new)
 {
@@ -421,17 +497,13 @@ static void padata_replace(struct padata_instance *pinst,
 
        synchronize_rcu();
 
-       while (atomic_read(&pd_old->refcnt) != 0)
-               yield();
-
-       flush_workqueue(pinst->wq);
-
+       padata_flush_queues(pd_old);
        padata_free_pd(pd_old);
 
        pinst->flags &= ~PADATA_RESET;
 }
 
-/*
+/**
  * padata_set_cpumask - set the cpumask that padata should use
  *
  * @pinst: padata instance
@@ -443,10 +515,10 @@ int padata_set_cpumask(struct padata_instance *pinst,
        struct parallel_data *pd;
        int err = 0;
 
-       might_sleep();
-
        mutex_lock(&pinst->lock);
 
+       get_online_cpus();
+
        pd = padata_alloc_pd(pinst, cpumask);
        if (!pd) {
                err = -ENOMEM;
@@ -458,6 +530,8 @@ int padata_set_cpumask(struct padata_instance *pinst,
        padata_replace(pinst, pd);
 
 out:
+       put_online_cpus();
+
        mutex_unlock(&pinst->lock);
 
        return err;
@@ -479,7 +553,7 @@ static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
        return 0;
 }
 
-/*
+/**
  * padata_add_cpu - add a cpu to the padata cpumask
  *
  * @pinst: padata instance
@@ -489,12 +563,12 @@ int padata_add_cpu(struct padata_instance *pinst, int cpu)
 {
        int err;
 
-       might_sleep();
-
        mutex_lock(&pinst->lock);
 
+       get_online_cpus();
        cpumask_set_cpu(cpu, pinst->cpumask);
        err = __padata_add_cpu(pinst, cpu);
+       put_online_cpus();
 
        mutex_unlock(&pinst->lock);
 
@@ -517,7 +591,7 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
        return 0;
 }
 
-/*
+/**
  * padata_remove_cpu - remove a cpu from the padata cpumask
  *
  * @pinst: padata instance
@@ -527,12 +601,12 @@ int padata_remove_cpu(struct padata_instance *pinst, int cpu)
 {
        int err;
 
-       might_sleep();
-
        mutex_lock(&pinst->lock);
 
+       get_online_cpus();
        cpumask_clear_cpu(cpu, pinst->cpumask);
        err = __padata_remove_cpu(pinst, cpu);
+       put_online_cpus();
 
        mutex_unlock(&pinst->lock);
 
@@ -540,38 +614,35 @@ int padata_remove_cpu(struct padata_instance *pinst, int cpu)
 }
 EXPORT_SYMBOL(padata_remove_cpu);
 
-/*
+/**
  * padata_start - start the parallel processing
  *
  * @pinst: padata instance to start
  */
 void padata_start(struct padata_instance *pinst)
 {
-       might_sleep();
-
        mutex_lock(&pinst->lock);
        pinst->flags |= PADATA_INIT;
        mutex_unlock(&pinst->lock);
 }
 EXPORT_SYMBOL(padata_start);
 
-/*
+/**
  * padata_stop - stop the parallel processing
  *
  * @pinst: padata instance to stop
  */
 void padata_stop(struct padata_instance *pinst)
 {
-       might_sleep();
-
        mutex_lock(&pinst->lock);
        pinst->flags &= ~PADATA_INIT;
        mutex_unlock(&pinst->lock);
 }
 EXPORT_SYMBOL(padata_stop);
 
-static int __cpuinit padata_cpu_callback(struct notifier_block *nfb,
-                                        unsigned long action, void *hcpu)
+#ifdef CONFIG_HOTPLUG_CPU
+static int padata_cpu_callback(struct notifier_block *nfb,
+                              unsigned long action, void *hcpu)
 {
        int err;
        struct padata_instance *pinst;
@@ -621,8 +692,9 @@ static int __cpuinit padata_cpu_callback(struct notifier_block *nfb,
 
        return NOTIFY_OK;
 }
+#endif
 
-/*
+/**
  * padata_alloc - allocate and initialize a padata instance
  *
  * @cpumask: cpumask that padata uses for parallelization
@@ -631,7 +703,6 @@ static int __cpuinit padata_cpu_callback(struct notifier_block *nfb,
 struct padata_instance *padata_alloc(const struct cpumask *cpumask,
                                     struct workqueue_struct *wq)
 {
-       int err;
        struct padata_instance *pinst;
        struct parallel_data *pd;
 
@@ -639,6 +710,8 @@ struct padata_instance *padata_alloc(const struct cpumask *cpumask,
        if (!pinst)
                goto err;
 
+       get_online_cpus();
+
        pd = padata_alloc_pd(pinst, cpumask);
        if (!pd)
                goto err_free_inst;
@@ -654,31 +727,32 @@ struct padata_instance *padata_alloc(const struct cpumask *cpumask,
 
        pinst->flags = 0;
 
+#ifdef CONFIG_HOTPLUG_CPU
        pinst->cpu_notifier.notifier_call = padata_cpu_callback;
        pinst->cpu_notifier.priority = 0;
-       err = register_hotcpu_notifier(&pinst->cpu_notifier);
-       if (err)
-               goto err_free_cpumask;
+       register_hotcpu_notifier(&pinst->cpu_notifier);
+#endif
+
+       put_online_cpus();
 
        mutex_init(&pinst->lock);
 
        return pinst;
 
-err_free_cpumask:
-       free_cpumask_var(pinst->cpumask);
 err_free_pd:
        padata_free_pd(pd);
 err_free_inst:
        kfree(pinst);
+       put_online_cpus();
 err:
        return NULL;
 }
 EXPORT_SYMBOL(padata_alloc);
 
-/*
+/**
  * padata_free - free a padata instance
  *
- * @ padata_inst: padata instance to free
+ * @padata_inst: padata instance to free
  */
 void padata_free(struct padata_instance *pinst)
 {
@@ -686,10 +760,13 @@ void padata_free(struct padata_instance *pinst)
 
        synchronize_rcu();
 
-       while (atomic_read(&pinst->pd->refcnt) != 0)
-               yield();
-
+#ifdef CONFIG_HOTPLUG_CPU
        unregister_hotcpu_notifier(&pinst->cpu_notifier);
+#endif
+       get_online_cpus();
+       padata_flush_queues(pinst->pd);
+       put_online_cpus();
+
        padata_free_pd(pinst->pd);
        free_cpumask_var(pinst->cpumask);
        kfree(pinst);
index 75077ad..444b770 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/bootmem.h>
 #include <linux/syscalls.h>
 #include <linux/kexec.h>
+#include <linux/kdb.h>
 #include <linux/ratelimit.h>
 #include <linux/kmsg_dump.h>
 #include <linux/syslog.h>
@@ -413,6 +414,22 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
        return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
 }
 
+#ifdef CONFIG_KGDB_KDB
+/* kdb dmesg command needs access to the syslog buffer.  do_syslog()
+ * uses locks so it cannot be used during debugging.  Just tell kdb
+ * where the start and end of the physical and logical logs are.  This
+ * is equivalent to do_syslog(3).
+ */
+void kdb_syslog_data(char *syslog_data[4])
+{
+       syslog_data[0] = log_buf;
+       syslog_data[1] = log_buf + log_buf_len;
+       syslog_data[2] = log_buf + log_end -
+               (logged_chars < log_buf_len ? logged_chars : log_buf_len);
+       syslog_data[3] = log_buf + log_end;
+}
+#endif /* CONFIG_KGDB_KDB */
+
 /*
  * Call the console drivers on a range of log_buf
  */
@@ -586,6 +603,14 @@ asmlinkage int printk(const char *fmt, ...)
        va_list args;
        int r;
 
+#ifdef CONFIG_KGDB_KDB
+       if (unlikely(kdb_trap_printk)) {
+               va_start(args, fmt);
+               r = vkdb_printf(fmt, args);
+               va_end(args);
+               return r;
+       }
+#endif
        va_start(args, fmt);
        r = vprintk(fmt, args);
        va_end(args);
index d9c0368..054a601 100644 (file)
@@ -7759,9 +7759,9 @@ void normalize_rt_tasks(void)
 
 #endif /* CONFIG_MAGIC_SYSRQ */
 
-#ifdef CONFIG_IA64
+#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
 /*
- * These functions are only useful for the IA64 MCA handling.
+ * These functions are only useful for the IA64 MCA handling, or kdb.
  *
  * They can only be called when the whole system has been
  * stopped - every CPU needs to be quiescent, and no scheduling
@@ -7781,6 +7781,9 @@ struct task_struct *curr_task(int cpu)
        return cpu_curr(cpu);
 }
 
+#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
+
+#ifdef CONFIG_IA64
 /**
  * set_curr_task - set the current task for a given cpu.
  * @cpu: the processor in question.
index dbd7fe0..825a3f2 100644 (file)
@@ -2735,3 +2735,43 @@ void __init signals_init(void)
 {
        sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
 }
+
+#ifdef CONFIG_KGDB_KDB
+#include <linux/kdb.h>
+/*
+ * kdb_send_sig_info - Allows kdb to send signals without exposing
+ * signal internals.  This function checks if the required locks are
+ * available before calling the main signal code, to avoid kdb
+ * deadlocks.
+ */
+void
+kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
+{
+       static struct task_struct *kdb_prev_t;
+       int sig, new_t;
+       if (!spin_trylock(&t->sighand->siglock)) {
+               kdb_printf("Can't do kill command now.\n"
+                          "The sigmask lock is held somewhere else in "
+                          "kernel, try again later\n");
+               return;
+       }
+       spin_unlock(&t->sighand->siglock);
+       new_t = kdb_prev_t != t;
+       kdb_prev_t = t;
+       if (t->state != TASK_RUNNING && new_t) {
+               kdb_printf("Process is not RUNNING, sending a signal from "
+                          "kdb risks deadlock\n"
+                          "on the run queue locks. "
+                          "The signal has _not_ been sent.\n"
+                          "Reissue the kill command if you want to risk "
+                          "the deadlock.\n");
+               return;
+       }
+       sig = info->si_signo;
+       if (send_sig_info(sig, info, t))
+               kdb_printf("Fail to deliver Signal %d to process %d.\n",
+                          sig, t->pid);
+       else
+               kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
+}
+#endif /* CONFIG_KGDB_KDB */
index b125830..30acc6c 100644 (file)
@@ -2083,20 +2083,20 @@ static void proc_skip_char(char **buf, size_t *size, const char v)
 
 #define TMPBUFLEN 22
 /**
- * proc_get_long - reads an ASCII formated integer from a user buffer
+ * proc_get_long - reads an ASCII formatted integer from a user buffer
  *
- * @buf - a kernel buffer
- * @size - size of the kernel buffer
- * @val - this is where the number will be stored
- * @neg - set to %TRUE if number is negative
- * @perm_tr - a vector which contains the allowed trailers
- * @perm_tr_len - size of the perm_tr vector
- * @tr - pointer to store the trailer character
+ * @buf: a kernel buffer
+ * @size: size of the kernel buffer
+ * @val: this is where the number will be stored
+ * @neg: set to %TRUE if number is negative
+ * @perm_tr: a vector which contains the allowed trailers
+ * @perm_tr_len: size of the perm_tr vector
+ * @tr: pointer to store the trailer character
  *
- * In case of success 0 is returned and buf and size are updated with
- * the amount of bytes read. If tr is non NULL and a trailing
- * character exist (size is non zero after returning from this
- * function) tr is updated with the trailing character.
+ * In case of success %0 is returned and @buf and @size are updated with
+ * the amount of bytes read. If @tr is non-NULL and a trailing
+ * character exists (size is non-zero after returning from this
+ * function), @tr is updated with the trailing character.
  */
 static int proc_get_long(char **buf, size_t *size,
                          unsigned long *val, bool *neg,
@@ -2147,15 +2147,15 @@ static int proc_get_long(char **buf, size_t *size,
 }
 
 /**
- * proc_put_long - coverts an integer to a decimal ASCII formated string
+ * proc_put_long - converts an integer to a decimal ASCII formatted string
  *
- * @buf - the user buffer
- * @size - the size of the user buffer
- * @val - the integer to be converted
- * @neg - sign of the number, %TRUE for negative
+ * @buf: the user buffer
+ * @size: the size of the user buffer
+ * @val: the integer to be converted
+ * @neg: sign of the number, %TRUE for negative
  *
- * In case of success 0 is returned and buf and size are updated with
- * the amount of bytes read.
+ * In case of success %0 is returned and @buf and @size are updated with
+ * the amount of bytes written.
  */
 static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
                          bool neg)
index 9b5d1d7..43cb93f 100644 (file)
@@ -3,7 +3,7 @@ config HAVE_ARCH_KGDB
        bool
 
 menuconfig KGDB
-       bool "KGDB: kernel debugging with remote gdb"
+       bool "KGDB: kernel debugger"
        depends on HAVE_ARCH_KGDB
        depends on DEBUG_KERNEL && EXPERIMENTAL
        help
@@ -57,4 +57,26 @@ config KGDB_TESTS_BOOT_STRING
          information about other strings you could use beyond the
          default of V1F100.
 
+config KGDB_LOW_LEVEL_TRAP
+       bool "KGDB: Allow debugging with traps in notifiers"
+       depends on X86 || MIPS
+       default n
+       help
+         This will add an extra call back to kgdb for the breakpoint
+         exception handler on which will will allow kgdb to step
+         through a notify handler.
+
+config KGDB_KDB
+       bool "KGDB_KDB: include kdb frontend for kgdb"
+       default n
+       help
+         KDB frontend for kernel
+
+config KDB_KEYBOARD
+       bool "KGDB_KDB: keyboard as input device"
+       depends on VT && KGDB_KDB
+       default n
+       help
+         KDB can use a PS/2 type keyboard for an input device
+
 endif # KGDB
index 6776d1c..7e267c9 100644 (file)
@@ -116,12 +116,9 @@ static void pmf_gpio_exit(struct gpio_runtime *rt)
        mutex_destroy(&rt->line_in_notify.mutex);
        mutex_destroy(&rt->line_out_notify.mutex);
 
-       if (rt->headphone_notify.gpio_private)
-               kfree(rt->headphone_notify.gpio_private);
-       if (rt->line_in_notify.gpio_private)
-               kfree(rt->line_in_notify.gpio_private);
-       if (rt->line_out_notify.gpio_private)
-               kfree(rt->line_out_notify.gpio_private);
+       kfree(rt->headphone_notify.gpio_private);
+       kfree(rt->line_in_notify.gpio_private);
+       kfree(rt->line_out_notify.gpio_private);
 }
 
 static void pmf_handle_notify_irq(void *data)