Merge branches 'davinci/cleanup', 'imx/cleanup', 'omap/cleanup' and 'pxa/cleanup...
authorArnd Bergmann <arnd@arndb.de>
Sun, 17 Jul 2011 19:28:52 +0000 (21:28 +0200)
committerArnd Bergmann <arnd@arndb.de>
Sun, 17 Jul 2011 19:28:52 +0000 (21:28 +0200)
500 files changed:
CREDITS
Documentation/hwmon/f71882fg
Documentation/hwmon/k10temp
Documentation/power/devices.txt
Documentation/power/runtime_pm.txt
Documentation/usb/error-codes.txt
MAINTAINERS
Makefile
README
arch/alpha/include/asm/mmzone.h
arch/arm/Kconfig
arch/arm/boot/compressed/head.S
arch/arm/configs/cm_x300_defconfig
arch/arm/include/asm/assembler.h
arch/arm/include/asm/entry-macro-multi.S
arch/arm/include/asm/hardware/scoop.h
arch/arm/kernel/module.c
arch/arm/kernel/smp.c
arch/arm/mach-at91/at91cap9.c
arch/arm/mach-at91/at91cap9_devices.c
arch/arm/mach-at91/at91rm9200.c
arch/arm/mach-at91/at91rm9200_devices.c
arch/arm/mach-at91/at91sam9260_devices.c
arch/arm/mach-at91/at91sam9261_devices.c
arch/arm/mach-at91/at91sam9263_devices.c
arch/arm/mach-at91/at91sam9g45.c
arch/arm/mach-at91/at91sam9g45_devices.c
arch/arm/mach-at91/at91sam9rl.c
arch/arm/mach-at91/at91sam9rl_devices.c
arch/arm/mach-at91/board-cap9adk.c
arch/arm/mach-at91/board-sam9260ek.c
arch/arm/mach-at91/board-sam9261ek.c
arch/arm/mach-at91/board-sam9263ek.c
arch/arm/mach-at91/board-sam9g20ek.c
arch/arm/mach-at91/board-sam9m10g45ek.c
arch/arm/mach-at91/include/mach/system_rev.h
arch/arm/mach-davinci/board-dm646x-evm.c
arch/arm/mach-davinci/clock.c
arch/arm/mach-davinci/clock.h
arch/arm/mach-davinci/dm646x.c
arch/arm/mach-davinci/include/mach/dm646x.h
arch/arm/mach-davinci/include/mach/psc.h
arch/arm/mach-exynos4/init.c
arch/arm/mach-h720x/Kconfig
arch/arm/mach-integrator/include/mach/bits.h [deleted file]
arch/arm/mach-omap1/board-ams-delta.c
arch/arm/mach-omap1/board-fsample.c
arch/arm/mach-omap1/board-generic.c
arch/arm/mach-omap1/board-h2.c
arch/arm/mach-omap1/board-h3.c
arch/arm/mach-omap1/board-htcherald.c
arch/arm/mach-omap1/board-innovator.c
arch/arm/mach-omap1/board-nokia770.c
arch/arm/mach-omap1/board-osk.c
arch/arm/mach-omap1/board-palmte.c
arch/arm/mach-omap1/board-palmtt.c
arch/arm/mach-omap1/board-palmz71.c
arch/arm/mach-omap1/board-perseus2.c
arch/arm/mach-omap1/board-sx1.c
arch/arm/mach-omap1/board-voiceblue.c
arch/arm/mach-omap1/irq.c
arch/arm/mach-omap1/mcbsp.c
arch/arm/mach-omap1/time.c
arch/arm/mach-omap1/timer32k.c
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/board-2430sdp.c
arch/arm/mach-omap2/board-3430sdp.c
arch/arm/mach-omap2/board-3630sdp.c
arch/arm/mach-omap2/board-4430sdp.c
arch/arm/mach-omap2/board-am3517crane.c
arch/arm/mach-omap2/board-am3517evm.c
arch/arm/mach-omap2/board-apollon.c
arch/arm/mach-omap2/board-cm-t35.c
arch/arm/mach-omap2/board-cm-t3517.c
arch/arm/mach-omap2/board-devkit8000.c
arch/arm/mach-omap2/board-flash.c
arch/arm/mach-omap2/board-generic.c
arch/arm/mach-omap2/board-h4.c
arch/arm/mach-omap2/board-igep0020.c
arch/arm/mach-omap2/board-ldp.c
arch/arm/mach-omap2/board-n8x0.c
arch/arm/mach-omap2/board-omap3beagle.c
arch/arm/mach-omap2/board-omap3evm.c
arch/arm/mach-omap2/board-omap3logic.c
arch/arm/mach-omap2/board-omap3pandora.c
arch/arm/mach-omap2/board-omap3stalker.c
arch/arm/mach-omap2/board-omap3touchbook.c
arch/arm/mach-omap2/board-omap4panda.c
arch/arm/mach-omap2/board-overo.c
arch/arm/mach-omap2/board-rm680.c
arch/arm/mach-omap2/board-rx51-peripherals.c
arch/arm/mach-omap2/board-rx51.c
arch/arm/mach-omap2/board-ti8168evm.c
arch/arm/mach-omap2/board-zoom-peripherals.c
arch/arm/mach-omap2/board-zoom.c
arch/arm/mach-omap2/clock44xx.h
arch/arm/mach-omap2/clock44xx_data.c
arch/arm/mach-omap2/clockdomains44xx_data.c
arch/arm/mach-omap2/cm-regbits-44xx.h
arch/arm/mach-omap2/cm1_44xx.h
arch/arm/mach-omap2/cm2_44xx.h
arch/arm/mach-omap2/common-board-devices.c
arch/arm/mach-omap2/common-board-devices.h
arch/arm/mach-omap2/gpmc-nand.c
arch/arm/mach-omap2/io.c
arch/arm/mach-omap2/irq.c
arch/arm/mach-omap2/omap4-common.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod_2420_data.c
arch/arm/mach-omap2/omap_hwmod_2430_data.c
arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_interconnect_data.c [new file with mode: 0644]
arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c [new file with mode: 0644]
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c [new file with mode: 0644]
arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c [new file with mode: 0644]
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
arch/arm/mach-omap2/omap_hwmod_common_data.c
arch/arm/mach-omap2/omap_hwmod_common_data.h
arch/arm/mach-omap2/pm-debug.c
arch/arm/mach-omap2/pm.h
arch/arm/mach-omap2/pm24xx.c
arch/arm/mach-omap2/pm34xx.c
arch/arm/mach-omap2/powerdomains44xx_data.c
arch/arm/mach-omap2/prcm_mpu44xx.h
arch/arm/mach-omap2/prm44xx.h
arch/arm/mach-omap2/smartreflex.c
arch/arm/mach-omap2/timer-gp.c [deleted file]
arch/arm/mach-omap2/timer-gp.h [deleted file]
arch/arm/mach-omap2/timer.c [new file with mode: 0644]
arch/arm/mach-omap2/twl-common.c [new file with mode: 0644]
arch/arm/mach-omap2/twl-common.h [new file with mode: 0644]
arch/arm/mach-pxa/cm-x300.c
arch/arm/mach-pxa/hx4700.c
arch/arm/mach-pxa/include/mach/magician.h
arch/arm/mach-pxa/magician.c
arch/arm/mach-pxa/mioa701.c
arch/arm/mach-pxa/saarb.c
arch/arm/mach-shmobile/board-ag5evm.c
arch/arm/mach-shmobile/board-ap4evb.c
arch/arm/mach-shmobile/board-mackerel.c
arch/arm/mach-ux500/board-mop500-pins.c
arch/arm/mach-ux500/board-mop500.c
arch/arm/mm/proc-v7.S
arch/arm/plat-iop/cp6.c
arch/arm/plat-omap/Kconfig
arch/arm/plat-omap/counter_32k.c
arch/arm/plat-omap/dmtimer.c
arch/arm/plat-omap/include/plat/clock.h
arch/arm/plat-omap/include/plat/common.h
arch/arm/plat-omap/include/plat/dmtimer.h
arch/arm/plat-omap/include/plat/irqs.h
arch/arm/plat-omap/include/plat/mcbsp.h
arch/arm/plat-omap/include/plat/nand.h
arch/arm/plat-omap/include/plat/omap-pm.h
arch/arm/plat-omap/include/plat/omap_hwmod.h
arch/arm/plat-omap/mcbsp.c
arch/arm/plat-omap/omap_device.c
arch/arm/plat-samsung/include/plat/regs-serial.h
arch/m32r/include/asm/mmzone.h
arch/mn10300/include/asm/uaccess.h
arch/parisc/include/asm/mmzone.h
arch/powerpc/boot/dts/p1022ds.dts
arch/powerpc/configs/pseries_defconfig
arch/powerpc/include/asm/mmzone.h
arch/powerpc/kernel/rtas-rtc.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/traps.c
arch/powerpc/mm/fault.c
arch/powerpc/sysdev/fsl_rio.c
arch/powerpc/sysdev/mpic.c
arch/s390/Kconfig
arch/s390/kernel/smp.c
arch/s390/oprofile/init.c
arch/sh/Kconfig
arch/sh/configs/sh7757lcr_defconfig
arch/sh/include/asm/mmzone.h
arch/sh/kernel/cpu/sh4a/setup-sh7757.c
arch/sh/kernel/irq.c
arch/sh/mm/alignment.c
arch/sparc/include/asm/mmzone.h
arch/tile/include/asm/mmzone.h
arch/um/include/asm/percpu.h [new file with mode: 0644]
arch/x86/include/asm/apb_timer.h
arch/x86/include/asm/mmzone_32.h
arch/x86/include/asm/mmzone_64.h
arch/x86/kvm/emulate.c
arch/x86/pci/acpi.c
arch/x86/pci/xen.c
arch/x86/xen/mmu.c
block/blk-throttle.c
block/cfq-iosched.c
block/genhd.c
crypto/deflate.c
crypto/zlib.c
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/ata/libata-scsi.c
drivers/ata/pata_marvell.c
drivers/ata/sata_dwc_460ex.c
drivers/base/platform.c
drivers/base/power/clock_ops.c
drivers/base/power/main.c
drivers/connector/connector.c
drivers/crypto/caam/caamalg.c
drivers/firmware/google/Kconfig
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/nouveau/nouveau_state.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/nid.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_bios.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-multitouch.c
drivers/hwmon/Kconfig
drivers/hwmon/adm1275.c
drivers/hwmon/emc6w201.c
drivers/hwmon/f71882fg.c
drivers/hwmon/hwmon-vid.c
drivers/hwmon/pmbus.c
drivers/hwmon/pmbus_core.c
drivers/hwmon/sch5627.c
drivers/i2c/busses/i2c-taos-evm.c
drivers/i2c/muxes/pca954x.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/mem.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/qib/qib_iba7322.c
drivers/infiniband/hw/qib/qib_intr.c
drivers/leds/leds-lp5521.c
drivers/leds/leds-lp5523.c
drivers/md/md.c
drivers/misc/cb710/sgbuf2.c
drivers/misc/ioc4.c
drivers/misc/lkdtm.c
drivers/misc/pti.c
drivers/misc/ti-st/st_core.c
drivers/misc/ti-st/st_kim.c
drivers/mmc/card/block.c
drivers/mmc/card/queue.c
drivers/mmc/card/queue.h
drivers/mmc/core/core.c
drivers/mmc/core/sdio.c
drivers/mmc/core/sdio_bus.c
drivers/mmc/host/of_mmc_spi.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/sh_mobile_sdhi.c
drivers/mmc/host/tmio_mmc_pio.c
drivers/mmc/host/vub300.c
drivers/mtd/nand/fsl_elbc_nand.c
drivers/net/Kconfig
drivers/net/bnx2x/bnx2x_main.c
drivers/net/can/Kconfig
drivers/net/cxgb3/sge.c
drivers/net/ppp_deflate.c
drivers/net/r8169.c
drivers/net/rionet.c
drivers/net/usb/kalmia.c
drivers/net/usb/zaurus.c
drivers/net/wireless/rtlwifi/pci.c
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/pci/pci-driver.c
drivers/pci/pci.c
drivers/pci/probe.c
drivers/pci/quirks.c
drivers/pcmcia/pxa2xx_sharpsl.c
drivers/pcmcia/pxa2xx_trizeps4.c
drivers/rtc/rtc-ds1307.c
drivers/rtc/rtc-vt8500.c
drivers/scsi/Kconfig
drivers/scsi/Makefile
drivers/scsi/hpsa.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/isci/Makefile [new file with mode: 0644]
drivers/scsi/isci/firmware/Makefile [new file with mode: 0644]
drivers/scsi/isci/firmware/README [new file with mode: 0644]
drivers/scsi/isci/firmware/create_fw.c [new file with mode: 0644]
drivers/scsi/isci/firmware/create_fw.h [new file with mode: 0644]
drivers/scsi/isci/host.c [new file with mode: 0644]
drivers/scsi/isci/host.h [new file with mode: 0644]
drivers/scsi/isci/init.c [new file with mode: 0644]
drivers/scsi/isci/isci.h [new file with mode: 0644]
drivers/scsi/isci/phy.c [new file with mode: 0644]
drivers/scsi/isci/phy.h [new file with mode: 0644]
drivers/scsi/isci/port.c [new file with mode: 0644]
drivers/scsi/isci/port.h [new file with mode: 0644]
drivers/scsi/isci/port_config.c [new file with mode: 0644]
drivers/scsi/isci/probe_roms.c [new file with mode: 0644]
drivers/scsi/isci/probe_roms.h [new file with mode: 0644]
drivers/scsi/isci/registers.h [new file with mode: 0644]
drivers/scsi/isci/remote_device.c [new file with mode: 0644]
drivers/scsi/isci/remote_device.h [new file with mode: 0644]
drivers/scsi/isci/remote_node_context.c [new file with mode: 0644]
drivers/scsi/isci/remote_node_context.h [new file with mode: 0644]
drivers/scsi/isci/remote_node_table.c [new file with mode: 0644]
drivers/scsi/isci/remote_node_table.h [new file with mode: 0644]
drivers/scsi/isci/request.c [new file with mode: 0644]
drivers/scsi/isci/request.h [new file with mode: 0644]
drivers/scsi/isci/sas.h [new file with mode: 0644]
drivers/scsi/isci/scu_completion_codes.h [new file with mode: 0644]
drivers/scsi/isci/scu_event_codes.h [new file with mode: 0644]
drivers/scsi/isci/scu_remote_node_context.h [new file with mode: 0644]
drivers/scsi/isci/scu_task_context.h [new file with mode: 0644]
drivers/scsi/isci/task.c [new file with mode: 0644]
drivers/scsi/isci/task.h [new file with mode: 0644]
drivers/scsi/isci/unsolicited_frame_control.c [new file with mode: 0644]
drivers/scsi/isci/unsolicited_frame_control.h [new file with mode: 0644]
drivers/staging/brcm80211/Kconfig
drivers/staging/comedi/Kconfig
drivers/staging/iio/Kconfig
drivers/staging/iio/accel/adis16204.h
drivers/staging/iio/accel/adis16209.h
drivers/staging/iio/gyro/adis16260.h
drivers/staging/iio/imu/adis16400.h
drivers/staging/mei/init.c
drivers/staging/mei/wd.c
drivers/target/loopback/tcm_loop.c
drivers/target/target_core_configfs.c
drivers/target/target_core_device.c
drivers/target/target_core_pr.c
drivers/target/target_core_tmr.c
drivers/target/target_core_transport.c
drivers/target/tcm_fc/tcm_fc.h
drivers/target/tcm_fc/tfc_cmd.c
drivers/target/tcm_fc/tfc_io.c
drivers/target/tcm_fc/tfc_sess.c
drivers/tty/n_gsm.c
drivers/tty/n_tty.c
drivers/tty/serial/8250.c
drivers/tty/serial/8250_pci.c
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/bcm63xx_uart.c
drivers/tty/serial/jsm/jsm_driver.c
drivers/tty/serial/mrst_max3110.c
drivers/tty/serial/s5pv210.c
drivers/tty/tty_ldisc.c
drivers/usb/core/driver.c
drivers/usb/core/hub.c
drivers/usb/core/message.c
drivers/usb/host/ehci-ath79.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/isp1760-hcd.c
drivers/usb/host/ohci-hcd.c
drivers/usb/host/r8a66597-hcd.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_host.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio.h
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/ti_usb_3410_5052.c
drivers/video/amba-clcd.c
drivers/video/fsl-diu-fb.c
drivers/video/geode/gx1fb_core.c
drivers/video/hecubafb.c
drivers/video/sh_mobile_meram.c
drivers/video/sm501fb.c
drivers/video/udlfb.c
drivers/video/vesafb.c
drivers/watchdog/Kconfig
drivers/watchdog/at32ap700x_wdt.c
drivers/watchdog/gef_wdt.c
drivers/watchdog/intel_scu_watchdog.c
drivers/watchdog/mtx-1_wdt.c
drivers/watchdog/wm831x_wdt.c
firmware/Makefile
firmware/isci/isci_firmware.bin.ihex [new file with mode: 0644]
fs/block_dev.c
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c
fs/btrfs/delayed-inode.h
fs/btrfs/extent-tree.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/cifs/Kconfig
fs/cifs/cifs_fs_sb.h
fs/cifs/cifsfs.c
fs/cifs/cifsproto.h
fs/cifs/connect.c
fs/cifs/smbencrypt.c
fs/ext4/ext4_extents.h
fs/ext4/extents.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/move_extent.c
fs/ext4/super.c
fs/inode.c
fs/jbd2/checkpoint.c
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/jbd2/transaction.c
fs/jfs/file.c
fs/jfs/jfs_imap.c
fs/jfs/jfs_incore.h
fs/jfs/resize.c
fs/lockd/clntproc.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/nfs4filelayout.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4xdr.c
fs/nfs/objlayout/objio_osd.c
fs/nfs/objlayout/objlayout.c
fs/nfs/pagelist.c
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/nfs/pnfs_dev.c
fs/omfs/file.c
fs/proc/base.c
fs/romfs/mmap-nommu.c
fs/xfs/xfs_attr.c
fs/xfs/xfs_iget.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_vnodeops.c
include/linux/amba/serial.h
include/linux/blk_types.h
include/linux/blktrace_api.h
include/linux/compat.h
include/linux/connector.h
include/linux/device.h
include/linux/fs.h
include/linux/fsl-diu-fb.h
include/linux/hrtimer.h
include/linux/jbd2.h
include/linux/mmzone.h
include/linux/nfs_page.h
include/linux/nfs_xdr.h
include/linux/pci_ids.h
include/linux/pm.h
include/linux/shmem_fs.h
include/linux/sunrpc/sched.h
include/linux/swap.h
include/net/sock.h
include/sound/sb16_csp.h
include/sound/soc.h
include/trace/events/ext4.h
init/calibrate.c
kernel/power/user.c
kernel/taskstats.c
kernel/time/alarmtimer.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory.c
mm/memory_hotplug.c
mm/rmap.c
mm/shmem.c
mm/swapfile.c
mm/truncate.c
mm/vmscan.c
net/bluetooth/hci_conn.c
net/bluetooth/l2cap_core.c
net/bridge/br_multicast.c
net/ipv4/ip_output.c
net/ipv4/netfilter.c
net/ipv4/netfilter/ipt_REJECT.c
net/ipv4/udp.c
net/ipv6/udp.c
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/clnt.c
net/sunrpc/sched.c
security/keys/request_key.c
sound/atmel/abdac.c
sound/atmel/ac97c.c
sound/pci/asihpi/asihpi.c
sound/pci/cs5535audio/cs5535audio_pcm.c
sound/pci/hda/hda_eld.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_via.c
sound/pci/rme9652/hdspm.c
sound/soc/codecs/wm8991.c
sound/soc/imx/Kconfig
sound/soc/imx/imx-pcm-dma-mx2.c
sound/soc/imx/imx-ssi.c
sound/soc/pxa/pxa2xx-pcm.c
sound/soc/soc-cache.c
sound/spi/at73c213.c

diff --git a/CREDITS b/CREDITS
index d78359f..1deb331 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -518,7 +518,7 @@ N: Zach Brown
 E: zab@zabbo.net
 D: maestro pci sound
 
-M: David Brownell
+N: David Brownell
 D: Kernel engineer, mentor, and friend.  Maintained USB EHCI and
 D: gadget layers, SPI subsystem, GPIO subsystem, and more than a few
 D: device drivers.  His encouragement also helped many engineers get
index 84d2623..de91c0d 100644 (file)
@@ -22,6 +22,10 @@ Supported chips:
     Prefix: 'f71869'
     Addresses scanned: none, address read from Super I/O config space
     Datasheet: Available from the Fintek website
+  * Fintek F71869A
+    Prefix: 'f71869a'
+    Addresses scanned: none, address read from Super I/O config space
+    Datasheet: Not public
   * Fintek F71882FG and F71883FG
     Prefix: 'f71882fg'
     Addresses scanned: none, address read from Super I/O config space
index 0393c89..a10f736 100644 (file)
@@ -9,8 +9,8 @@ Supported chips:
   Socket S1G3: Athlon II, Sempron, Turion II
 * AMD Family 11h processors:
   Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra)
-* AMD Family 12h processors: "Llano"
-* AMD Family 14h processors: "Brazos" (C/E/G-Series)
+* AMD Family 12h processors: "Llano" (E2/A4/A6/A8-Series)
+* AMD Family 14h processors: "Brazos" (C/E/G/Z-Series)
 * AMD Family 15h processors: "Bulldozer"
 
   Prefix: 'k10temp'
@@ -20,12 +20,16 @@ Supported chips:
     http://support.amd.com/us/Processor_TechDocs/31116.pdf
   BIOS and Kernel Developer's Guide (BKDG) for AMD Family 11h Processors:
     http://support.amd.com/us/Processor_TechDocs/41256.pdf
+  BIOS and Kernel Developer's Guide (BKDG) for AMD Family 12h Processors:
+    http://support.amd.com/us/Processor_TechDocs/41131.pdf
   BIOS and Kernel Developer's Guide (BKDG) for AMD Family 14h Models 00h-0Fh Processors:
     http://support.amd.com/us/Processor_TechDocs/43170.pdf
   Revision Guide for AMD Family 10h Processors:
     http://support.amd.com/us/Processor_TechDocs/41322.pdf
   Revision Guide for AMD Family 11h Processors:
     http://support.amd.com/us/Processor_TechDocs/41788.pdf
+  Revision Guide for AMD Family 12h Processors:
+    http://support.amd.com/us/Processor_TechDocs/44739.pdf
   Revision Guide for AMD Family 14h Models 00h-0Fh Processors:
     http://support.amd.com/us/Processor_TechDocs/47534.pdf
   AMD Family 11h Processor Power and Thermal Data Sheet for Notebooks:
index 8888083..64565aa 100644 (file)
@@ -520,59 +520,20 @@ Support for power domains is provided through the pwr_domain field of struct
 device.  This field is a pointer to an object of type struct dev_power_domain,
 defined in include/linux/pm.h, providing a set of power management callbacks
 analogous to the subsystem-level and device driver callbacks that are executed
-for the given device during all power transitions, in addition to the respective
-subsystem-level callbacks.  Specifically, the power domain "suspend" callbacks
-(i.e. ->runtime_suspend(), ->suspend(), ->freeze(), ->poweroff(), etc.) are
-executed after the analogous subsystem-level callbacks, while the power domain
-"resume" callbacks (i.e. ->runtime_resume(), ->resume(), ->thaw(), ->restore,
-etc.) are executed before the analogous subsystem-level callbacks.  Error codes
-returned by the "suspend" and "resume" power domain callbacks are ignored.
-
-Power domain ->runtime_idle() callback is executed before the subsystem-level
-->runtime_idle() callback and the result returned by it is not ignored.  Namely,
-if it returns error code, the subsystem-level ->runtime_idle() callback will not
-be called and the helper function rpm_idle() executing it will return error
-code.  This mechanism is intended to help platforms where saving device state
-is a time consuming operation and should only be carried out if all devices
-in the power domain are idle, before turning off the shared power resource(s).
-Namely, the power domain ->runtime_idle() callback may return error code until
-the pm_runtime_idle() helper (or its asychronous version) has been called for
-all devices in the power domain (it is recommended that the returned error code
-be -EBUSY in those cases), preventing the subsystem-level ->runtime_idle()
-callback from being run prematurely.
-
-The support for device power domains is only relevant to platforms needing to
-use the same subsystem-level (e.g. platform bus type) and device driver power
-management callbacks in many different power domain configurations and wanting
-to avoid incorporating the support for power domains into the subsystem-level
-callbacks.  The other platforms need not implement it or take it into account
-in any way.
-
-
-System Devices
---------------
-System devices (sysdevs) follow a slightly different API, which can be found in
-
-       include/linux/sysdev.h
-       drivers/base/sys.c
-
-System devices will be suspended with interrupts disabled, and after all other
-devices have been suspended.  On resume, they will be resumed before any other
-devices, and also with interrupts disabled.  These things occur in special
-"sysdev_driver" phases, which affect only system devices.
-
-Thus, after the suspend_noirq (or freeze_noirq or poweroff_noirq) phase, when
-the non-boot CPUs are all offline and IRQs are disabled on the remaining online
-CPU, then a sysdev_driver.suspend phase is carried out, and the system enters a
-sleep state (or a system image is created).  During resume (or after the image
-has been created or loaded) a sysdev_driver.resume phase is carried out, IRQs
-are enabled on the only online CPU, the non-boot CPUs are enabled, and the
-resume_noirq (or thaw_noirq or restore_noirq) phase begins.
-
-Code to actually enter and exit the system-wide low power state sometimes
-involves hardware details that are only known to the boot firmware, and
-may leave a CPU running software (from SRAM or flash memory) that monitors
-the system and manages its wakeup sequence.
+for the given device during all power transitions, instead of the respective
+subsystem-level callbacks.  Specifically, if a device's pm_domain pointer is
+not NULL, the ->suspend() callback from the object pointed to by it will be
+executed instead of its subsystem's (e.g. bus type's) ->suspend() callback and
+anlogously for all of the remaining callbacks.  In other words, power management
+domain callbacks, if defined for the given device, always take precedence over
+the callbacks provided by the device's subsystem (e.g. bus type).
+
+The support for device power management domains is only relevant to platforms
+needing to use the same device driver power management callbacks in many
+different power domain configurations and wanting to avoid incorporating the
+support for power domains into subsystem-level callbacks, for example by
+modifying the platform bus type.  Other platforms need not implement it or take
+it into account in any way.
 
 
 Device Low Power (suspend) States
index 654097b..b24875b 100644 (file)
@@ -501,13 +501,29 @@ helper functions described in Section 4.  In that case, pm_runtime_resume()
 should be used.  Of course, for this purpose the device's run-time PM has to be
 enabled earlier by calling pm_runtime_enable().
 
-If the device bus type's or driver's ->probe() or ->remove() callback runs
+If the device bus type's or driver's ->probe() callback runs
 pm_runtime_suspend() or pm_runtime_idle() or their asynchronous counterparts,
 they will fail returning -EAGAIN, because the device's usage counter is
-incremented by the core before executing ->probe() and ->remove().  Still, it
-may be desirable to suspend the device as soon as ->probe() or ->remove() has
-finished, so the PM core uses pm_runtime_idle_sync() to invoke the
-subsystem-level idle callback for the device at that time.
+incremented by the driver core before executing ->probe().  Still, it may be
+desirable to suspend the device as soon as ->probe() has finished, so the driver
+core uses pm_runtime_put_sync() to invoke the subsystem-level idle callback for
+the device at that time.
+
+Moreover, the driver core prevents runtime PM callbacks from racing with the bus
+notifier callback in __device_release_driver(), which is necessary, because the
+notifier is used by some subsystems to carry out operations affecting the
+runtime PM functionality.  It does so by calling pm_runtime_get_sync() before
+driver_sysfs_remove() and the BUS_NOTIFY_UNBIND_DRIVER notifications.  This
+resumes the device if it's in the suspended state and prevents it from
+being suspended again while those routines are being executed.
+
+To allow bus types and drivers to put devices into the suspended state by
+calling pm_runtime_suspend() from their ->remove() routines, the driver core
+executes pm_runtime_put_sync() after running the BUS_NOTIFY_UNBIND_DRIVER
+notifications in __device_release_driver().  This requires bus types and
+drivers to make their ->remove() callbacks avoid races with runtime PM directly,
+but also it allows of more flexibility in the handling of devices during the
+removal of their drivers.
 
 The user space can effectively disallow the driver of the device to power manage
 it at run time by changing the value of its /sys/devices/.../power/control
@@ -566,11 +582,6 @@ to do this is:
        pm_runtime_set_active(dev);
        pm_runtime_enable(dev);
 
-The PM core always increments the run-time usage counter before calling the
-->prepare() callback and decrements it after calling the ->complete() callback.
-Hence disabling run-time PM temporarily like this will not cause any run-time
-suspend callbacks to be lost.
-
 7. Generic subsystem callbacks
 
 Subsystems may wish to conserve code space by using the set of generic power
index d83703e..b3f606b 100644 (file)
@@ -76,6 +76,13 @@ A transfer's actual_length may be positive even when an error has been
 reported.  That's because transfers often involve several packets, so that
 one or more packets could finish before an error stops further endpoint I/O.
 
+For isochronous URBs, the urb status value is non-zero only if the URB is
+unlinked, the device is removed, the host controller is disabled, or the total
+transferred length is less than the requested length and the URB_SHORT_NOT_OK
+flag is set.  Completion handlers for isochronous URBs should only see
+urb->status set to zero, -ENOENT, -ECONNRESET, -ESHUTDOWN, or -EREMOTEIO.
+Individual frame descriptor status fields may report more status codes.
+
 
 0                      Transfer completed successfully
 
@@ -132,7 +139,7 @@ one or more packets could finish before an error stops further endpoint I/O.
                        device removal events immediately.
 
 -EXDEV                 ISO transfer only partially completed
-                       look at individual frame status for details
+                       (only set in iso_frame_desc[n].status, not urb->status)
 
 -EINVAL                        ISO madness, if this happens: Log off and go home
 
index f0358cd..ae563fa 100644 (file)
@@ -1345,16 +1345,18 @@ F:      drivers/auxdisplay/
 F:     include/linux/cfag12864b.h
 
 AVR32 ARCHITECTURE
-M:     Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
+M:     Haavard Skinnemoen <hskinnemoen@gmail.com>
+M:     Hans-Christian Egtvedt <egtvedt@samfundet.no>
 W:     http://www.atmel.com/products/AVR32/
 W:     http://avr32linux.org/
 W:     http://avrfreaks.net/
-S:     Supported
+S:     Maintained
 F:     arch/avr32/
 
 AVR32/AT32AP MACHINE SUPPORT
-M:     Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
-S:     Supported
+M:     Haavard Skinnemoen <hskinnemoen@gmail.com>
+M:     Hans-Christian Egtvedt <egtvedt@samfundet.no>
+S:     Maintained
 F:     arch/avr32/mach-at32ap/
 
 AX.25 NETWORK LAYER
@@ -1390,7 +1392,6 @@ F:        include/linux/backlight.h
 BATMAN ADVANCED
 M:     Marek Lindner <lindner_marek@yahoo.de>
 M:     Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
-M:     Sven Eckelmann <sven@narfation.org>
 L:     b.a.t.m.a.n@lists.open-mesh.org
 W:     http://www.open-mesh.org/
 S:     Maintained
@@ -1423,7 +1424,6 @@ S:        Supported
 F:     arch/blackfin/
 
 BLACKFIN EMAC DRIVER
-M:     Michael Hennerich <michael.hennerich@analog.com>
 L:     uclinux-dist-devel@blackfin.uclinux.org
 W:     http://blackfin.uclinux.org
 S:     Supported
@@ -1639,7 +1639,7 @@ CAN NETWORK LAYER
 M:     Oliver Hartkopp <socketcan@hartkopp.net>
 M:     Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
 M:     Urs Thuermann <urs.thuermann@volkswagen.de>
-L:     socketcan-core@lists.berlios.de
+L:     socketcan-core@lists.berlios.de (subscribers-only)
 L:     netdev@vger.kernel.org
 W:     http://developer.berlios.de/projects/socketcan/
 S:     Maintained
@@ -1651,7 +1651,7 @@ F:        include/linux/can/raw.h
 
 CAN NETWORK DRIVERS
 M:     Wolfgang Grandegger <wg@grandegger.com>
-L:     socketcan-core@lists.berlios.de
+L:     socketcan-core@lists.berlios.de (subscribers-only)
 L:     netdev@vger.kernel.org
 W:     http://developer.berlios.de/projects/socketcan/
 S:     Maintained
@@ -5181,6 +5181,7 @@ S:        Supported
 F:     drivers/net/qlcnic/
 
 QLOGIC QLGE 10Gb ETHERNET DRIVER
+M:     Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
 M:     Ron Mercer <ron.mercer@qlogic.com>
 M:     linux-driver@qlogic.com
 L:     netdev@vger.kernel.org
@@ -6434,8 +6435,9 @@ S:        Maintained
 F:     drivers/usb/misc/rio500*
 
 USB EHCI DRIVER
+M:     Alan Stern <stern@rowland.harvard.edu>
 L:     linux-usb@vger.kernel.org
-S:     Orphan
+S:     Maintained
 F:     Documentation/usb/ehci.txt
 F:     drivers/usb/host/ehci*
 
@@ -6465,6 +6467,12 @@ S:       Maintained
 F:     Documentation/hid/hiddev.txt
 F:     drivers/hid/usbhid/
 
+USB/IP DRIVERS
+M:     Matt Mooney <mfm@muteddisk.com>
+L:     linux-usb@vger.kernel.org
+S:     Maintained
+F:     drivers/staging/usbip/
+
 USB ISP116X DRIVER
 M:     Olav Kongas <ok@artecdesign.ee>
 L:     linux-usb@vger.kernel.org
@@ -6494,8 +6502,9 @@ S:        Maintained
 F:     sound/usb/midi.*
 
 USB OHCI DRIVER
+M:     Alan Stern <stern@rowland.harvard.edu>
 L:     linux-usb@vger.kernel.org
-S:     Orphan
+S:     Maintained
 F:     Documentation/usb/ohci.txt
 F:     drivers/usb/host/ohci*
 
index 41330a0..86f47a0 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
 NAME = Sneaky Weasel
 
 # *DOCUMENTATION*
diff --git a/README b/README
index 8510017..0d5a7dd 100644 (file)
--- a/README
+++ b/README
@@ -1,6 +1,6 @@
-       Linux kernel release 2.6.xx <http://kernel.org/>
+       Linux kernel release 3.x <http://kernel.org/>
 
-These are the release notes for Linux version 2.6.  Read them carefully,
+These are the release notes for Linux version 3.  Read them carefully,
 as they tell you what this is all about, explain how to install the
 kernel, and what to do if something goes wrong. 
 
@@ -62,10 +62,10 @@ INSTALLING the kernel source:
    directory where you have permissions (eg. your home directory) and
    unpack it:
 
-               gzip -cd linux-2.6.XX.tar.gz | tar xvf -
+               gzip -cd linux-3.X.tar.gz | tar xvf -
 
    or
-               bzip2 -dc linux-2.6.XX.tar.bz2 | tar xvf -
+               bzip2 -dc linux-3.X.tar.bz2 | tar xvf -
 
 
    Replace "XX" with the version number of the latest kernel.
@@ -75,15 +75,15 @@ INSTALLING the kernel source:
    files.  They should match the library, and not get messed up by
    whatever the kernel-du-jour happens to be.
 
- - You can also upgrade between 2.6.xx releases by patching.  Patches are
+ - You can also upgrade between 3.x releases by patching.  Patches are
    distributed in the traditional gzip and the newer bzip2 format.  To
    install by patching, get all the newer patch files, enter the
-   top level directory of the kernel source (linux-2.6.xx) and execute:
+   top level directory of the kernel source (linux-3.x) and execute:
 
-               gzip -cd ../patch-2.6.xx.gz | patch -p1
+               gzip -cd ../patch-3.x.gz | patch -p1
 
    or
-               bzip2 -dc ../patch-2.6.xx.bz2 | patch -p1
+               bzip2 -dc ../patch-3.x.bz2 | patch -p1
 
    (repeat xx for all versions bigger than the version of your current
    source tree, _in_order_) and you should be ok.  You may want to remove
@@ -91,9 +91,9 @@ INSTALLING the kernel source:
    failed patches (xxx# or xxx.rej). If there are, either you or me has
    made a mistake.
 
-   Unlike patches for the 2.6.x kernels, patches for the 2.6.x.y kernels
+   Unlike patches for the 3.x kernels, patches for the 3.x.y kernels
    (also known as the -stable kernels) are not incremental but instead apply
-   directly to the base 2.6.x kernel.  Please read
+   directly to the base 3.x kernel.  Please read
    Documentation/applying-patches.txt for more information.
 
    Alternatively, the script patch-kernel can be used to automate this
@@ -107,14 +107,14 @@ INSTALLING the kernel source:
    an alternative directory can be specified as the second argument.
 
  - If you are upgrading between releases using the stable series patches
-   (for example, patch-2.6.xx.y), note that these "dot-releases" are
-   not incremental and must be applied to the 2.6.xx base tree. For
-   example, if your base kernel is 2.6.12 and you want to apply the
-   2.6.12.3 patch, you do not and indeed must not first apply the
-   2.6.12.1 and 2.6.12.2 patches. Similarly, if you are running kernel
-   version 2.6.12.2 and want to jump to 2.6.12.3, you must first
-   reverse the 2.6.12.2 patch (that is, patch -R) _before_ applying
-   the 2.6.12.3 patch.
+   (for example, patch-3.x.y), note that these "dot-releases" are
+   not incremental and must be applied to the 3.x base tree. For
+   example, if your base kernel is 3.0 and you want to apply the
+   3.0.3 patch, you do not and indeed must not first apply the
+   3.0.1 and 3.0.2 patches. Similarly, if you are running kernel
+   version 3.0.2 and want to jump to 3.0.3, you must first
+   reverse the 3.0.2 patch (that is, patch -R) _before_ applying
+   the 3.0.3 patch.
    You can read more on this in Documentation/applying-patches.txt
 
  - Make sure you have no stale .o files and dependencies lying around:
@@ -126,7 +126,7 @@ INSTALLING the kernel source:
 
 SOFTWARE REQUIREMENTS
 
-   Compiling and running the 2.6.xx kernels requires up-to-date
+   Compiling and running the 3.x kernels requires up-to-date
    versions of various software packages.  Consult
    Documentation/Changes for the minimum version numbers required
    and how to get updates for these packages.  Beware that using
@@ -142,11 +142,11 @@ BUILD directory for the kernel:
    Using the option "make O=output/dir" allow you to specify an alternate
    place for the output files (including .config).
    Example:
-     kernel source code:       /usr/src/linux-2.6.N
+     kernel source code:       /usr/src/linux-3.N
      build directory:          /home/name/build/kernel
 
    To configure and build the kernel use:
-   cd /usr/src/linux-2.6.N
+   cd /usr/src/linux-3.N
    make O=/home/name/build/kernel menuconfig
    make O=/home/name/build/kernel
    sudo make O=/home/name/build/kernel modules_install install
index 8af56ce..445dc42 100644 (file)
@@ -56,7 +56,6 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
  * Given a kernel address, find the home node of the underlying memory.
  */
 #define kvaddr_to_nid(kaddr)   pa_to_nid(__pa(kaddr))
-#define node_start_pfn(nid)    (NODE_DATA(nid)->node_start_pfn)
 
 /*
  * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory
index 9adc278..c1795c9 100644 (file)
@@ -852,6 +852,7 @@ config ARCH_OMAP
        select HAVE_CLK
        select ARCH_REQUIRE_GPIOLIB
        select ARCH_HAS_CPUFREQ
+       select CLKSRC_MMIO
        select GENERIC_CLOCKEVENTS
        select HAVE_SCHED_CLOCK
        select ARCH_HAS_HOLES_MEMORYMODEL
index 942fad9..940b201 100644 (file)
@@ -597,6 +597,8 @@ __common_mmu_cache_on:
                sub     pc, lr, r0, lsr #32     @ properly flush pipeline
 #endif
 
+#define PROC_ENTRY_SIZE (4*5)
+
 /*
  * Here follow the relocatable cache support functions for the
  * various processors.  This is a generic hook for locating an
@@ -624,7 +626,7 @@ call_cache_fn:      adr     r12, proc_types
  ARM(          addeq   pc, r12, r3             ) @ call cache function
  THUMB(                addeq   r12, r3                 )
  THUMB(                moveq   pc, r12                 ) @ call cache function
-               add     r12, r12, #4*5
+               add     r12, r12, #PROC_ENTRY_SIZE
                b       1b
 
 /*
@@ -794,6 +796,16 @@ proc_types:
 
                .size   proc_types, . - proc_types
 
+               /*
+                * If you get a "non-constant expression in ".if" statement"
+                * error from the assembler on this line, check that you have
+                * not accidentally written a "b" instruction where you should
+                * have written W(b).
+                */
+               .if (. - proc_types) % PROC_ENTRY_SIZE != 0
+               .error "The size of one or more proc_types entries is wrong."
+               .endif
+
 /*
  * Turn off the Cache and MMU.  ARMv3 does not support
  * reading the control register, but ARMv4 does.
index 921e56a..f4b7672 100644 (file)
@@ -5,7 +5,6 @@ CONFIG_SYSVIPC=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=18
-CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_MODULES=y
@@ -13,6 +12,7 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_ARCH_PXA=y
+CONFIG_GPIO_PCA953X=y
 CONFIG_MACH_CM_X300=y
 CONFIG_NO_HZ=y
 CONFIG_AEABI=y
@@ -23,7 +23,6 @@ CONFIG_CMDLINE="root=/dev/mtdblock5 rootfstype=ubifs console=ttyS2,38400"
 CONFIG_CPU_FREQ=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
 CONFIG_APM_EMULATION=y
 CONFIG_NET=y
 CONFIG_PACKET=y
@@ -40,8 +39,8 @@ CONFIG_IP_PNP_RARP=y
 # CONFIG_INET_DIAG is not set
 # CONFIG_IPV6 is not set
 CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
 CONFIG_BT_RFCOMM=m
 CONFIG_BT_RFCOMM_TTY=y
 CONFIG_BT_BNEP=m
@@ -60,7 +59,6 @@ CONFIG_MTD_NAND_PXA3xx=y
 CONFIG_MTD_UBI=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_NETDEVICES=y
@@ -81,16 +79,15 @@ CONFIG_TOUCHSCREEN_WM97XX=m
 # CONFIG_TOUCHSCREEN_WM9705 is not set
 # CONFIG_TOUCHSCREEN_WM9713 is not set
 # CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
 CONFIG_SERIAL_PXA=y
 CONFIG_SERIAL_PXA_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
 # CONFIG_HW_RANDOM is not set
 CONFIG_I2C=y
 CONFIG_I2C_PXA=y
 CONFIG_SPI=y
 CONFIG_SPI_GPIO=y
 CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_PCA953X=y
 # CONFIG_HWMON is not set
 CONFIG_PMIC_DA903X=y
 CONFIG_REGULATOR=y
@@ -102,7 +99,6 @@ CONFIG_LCD_CLASS_DEVICE=y
 CONFIG_LCD_TDO24M=y
 # CONFIG_BACKLIGHT_GENERIC is not set
 CONFIG_BACKLIGHT_DA903X=m
-# CONFIG_VGA_CONSOLE is not set
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
 CONFIG_FONTS=y
@@ -131,7 +127,6 @@ CONFIG_HID_GREENASIA=y
 CONFIG_HID_SMARTJOYPLUS=y
 CONFIG_HID_TOPSEED=y
 CONFIG_HID_THRUSTMASTER=y
-CONFIG_HID_WACOM=m
 CONFIG_HID_ZEROPLUS=y
 CONFIG_USB=y
 CONFIG_USB_DEVICEFS=y
@@ -152,7 +147,6 @@ CONFIG_RTC_DRV_PXA=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_FS_XATTR is not set
-CONFIG_INOTIFY=y
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_TMPFS=y
@@ -164,7 +158,6 @@ CONFIG_NFS_V3=y
 CONFIG_NFS_V3_ACL=y
 CONFIG_NFS_V4=y
 CONFIG_ROOT_NFS=y
-CONFIG_SMB_FS=m
 CONFIG_CIFS=m
 CONFIG_CIFS_WEAK_PW_HASH=y
 CONFIG_PARTITION_ADVANCED=y
@@ -172,9 +165,7 @@ CONFIG_NLS_CODEPAGE_437=m
 CONFIG_NLS_ISO8859_1=m
 CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
-# CONFIG_DETECT_SOFTLOCKUP is not set
 # CONFIG_SCHED_DEBUG is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 CONFIG_SYSCTL_SYSCALL_CHECK=y
 # CONFIG_FTRACE is not set
 CONFIG_DEBUG_USER=y
@@ -182,7 +173,6 @@ CONFIG_DEBUG_LL=y
 CONFIG_CRYPTO_ECB=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_AES=m
-CONFIG_CRYPTO_ARC4=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC_T10DIF=y
index bc2d2d7..65c3f24 100644 (file)
@@ -13,6 +13,9 @@
  *  Do not include any C declarations in this file - it is included by
  *  assembler source.
  */
+#ifndef __ASM_ASSEMBLER_H__
+#define __ASM_ASSEMBLER_H__
+
 #ifndef __ASSEMBLY__
 #error "Only include this from assembly code"
 #endif
        .macro  ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
        usracc  ldr, \reg, \ptr, \inc, \cond, \rept, \abort
        .endm
+#endif /* __ASM_ASSEMBLER_H__ */
index ec0bbf7..2da8547 100644 (file)
@@ -1,3 +1,5 @@
+#include <asm/assembler.h>
+
 /*
  * Interrupt handling.  Preserves r7, r8, r9
  */
index ebb3cea..58cdf5d 100644 (file)
@@ -61,7 +61,6 @@ struct scoop_pcmcia_dev {
 struct scoop_pcmcia_config {
        struct scoop_pcmcia_dev *devs;
        int num_devs;
-       void (*pcmcia_init)(void);
        void (*power_ctrl)(struct device *scoop, unsigned short cpr, int nr);
 };
 
index fee7c36..016d6a0 100644 (file)
@@ -193,8 +193,17 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
                                offset -= 0x02000000;
                        offset += sym->st_value - loc;
 
-                       /* only Thumb addresses allowed (no interworking) */
-                       if (!(offset & 1) ||
+                       /*
+                        * For function symbols, only Thumb addresses are
+                        * allowed (no interworking).
+                        *
+                        * For non-function symbols, the destination
+                        * has no specific ARM/Thumb disposition, so
+                        * the branch is resolved under the assumption
+                        * that interworking is not required.
+                        */
+                       if ((ELF32_ST_TYPE(sym->st_info) == STT_FUNC &&
+                               !(offset & 1)) ||
                            offset <= (s32)0xff000000 ||
                            offset >= (s32)0x01000000) {
                                pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
index 344e52b..e7f92a4 100644 (file)
@@ -318,9 +318,13 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
        smp_store_cpu_info(cpu);
 
        /*
-        * OK, now it's safe to let the boot CPU continue
+        * OK, now it's safe to let the boot CPU continue.  Wait for
+        * the CPU migration code to notice that the CPU is online
+        * before we continue.
         */
        set_cpu_online(cpu, true);
+       while (!cpu_active(cpu))
+               cpu_relax();
 
        /*
         * OK, it's off to the idle thread for us
index 17fae4a..f1013d0 100644 (file)
@@ -223,15 +223,15 @@ static struct clk *periph_clocks[] __initdata = {
 };
 
 static struct clk_lookup periph_clocks_lookups[] = {
-       CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk),
-       CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk),
+       CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc", &utmi_clk),
+       CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc", &udphs_clk),
        CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk),
        CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.1", &mmc1_clk),
        CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
        CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
        CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb_clk),
-       CLKDEV_CON_DEV_ID("ssc", "ssc.0", &ssc0_clk),
-       CLKDEV_CON_DEV_ID("ssc", "ssc.1", &ssc1_clk),
+       CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
+       CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
 };
 
 static struct clk_lookup usart_clocks_lookups[] = {
index cd850ed..dba0d8d 100644 (file)
@@ -1220,7 +1220,7 @@ void __init at91_set_serial_console(unsigned portnr)
 {
        if (portnr < ATMEL_MAX_UART) {
                atmel_default_console_device = at91_uarts[portnr];
-               at91cap9_set_console_clock(portnr);
+               at91cap9_set_console_clock(at91_uarts[portnr]->id);
        }
 }
 
index b228ce9..83a1a3f 100644 (file)
@@ -199,9 +199,9 @@ static struct clk_lookup periph_clocks_lookups[] = {
        CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk),
        CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk),
        CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk),
-       CLKDEV_CON_DEV_ID("ssc", "ssc.0", &ssc0_clk),
-       CLKDEV_CON_DEV_ID("ssc", "ssc.1", &ssc1_clk),
-       CLKDEV_CON_DEV_ID("ssc", "ssc.2", &ssc2_clk),
+       CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
+       CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
+       CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk),
 };
 
 static struct clk_lookup usart_clocks_lookups[] = {
index a0ba475..7227755 100644 (file)
@@ -1135,7 +1135,7 @@ void __init at91_set_serial_console(unsigned portnr)
 {
        if (portnr < ATMEL_MAX_UART) {
                atmel_default_console_device = at91_uarts[portnr];
-               at91rm9200_set_console_clock(portnr);
+               at91rm9200_set_console_clock(at91_uarts[portnr]->id);
        }
 }
 
index 1fdeb90..39f81f4 100644 (file)
@@ -1173,7 +1173,7 @@ void __init at91_set_serial_console(unsigned portnr)
 {
        if (portnr < ATMEL_MAX_UART) {
                atmel_default_console_device = at91_uarts[portnr];
-               at91sam9260_set_console_clock(portnr);
+               at91sam9260_set_console_clock(at91_uarts[portnr]->id);
        }
 }
 
index 3eb4538..5004bf0 100644 (file)
@@ -1013,7 +1013,7 @@ void __init at91_set_serial_console(unsigned portnr)
 {
        if (portnr < ATMEL_MAX_UART) {
                atmel_default_console_device = at91_uarts[portnr];
-               at91sam9261_set_console_clock(portnr);
+               at91sam9261_set_console_clock(at91_uarts[portnr]->id);
        }
 }
 
index ffe081b..a050f41 100644 (file)
@@ -1395,7 +1395,7 @@ void __init at91_set_serial_console(unsigned portnr)
 {
        if (portnr < ATMEL_MAX_UART) {
                atmel_default_console_device = at91_uarts[portnr];
-               at91sam9263_set_console_clock(portnr);
+               at91sam9263_set_console_clock(at91_uarts[portnr]->id);
        }
 }
 
index 2bb6ff9..11e2141 100644 (file)
@@ -217,11 +217,11 @@ static struct clk *periph_clocks[] __initdata = {
 static struct clk_lookup periph_clocks_lookups[] = {
        /* One additional fake clock for ohci */
        CLKDEV_CON_ID("ohci_clk", &uhphs_clk),
-       CLKDEV_CON_DEV_ID("ehci_clk", "atmel-ehci.0", &uhphs_clk),
-       CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk),
-       CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk),
-       CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk),
-       CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.1", &mmc1_clk),
+       CLKDEV_CON_DEV_ID("ehci_clk", "atmel-ehci", &uhphs_clk),
+       CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc", &utmi_clk),
+       CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc", &udphs_clk),
+       CLKDEV_CON_DEV_ID("mci_clk", "atmel_mci.0", &mmc0_clk),
+       CLKDEV_CON_DEV_ID("mci_clk", "atmel_mci.1", &mmc1_clk),
        CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
        CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
        CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb0_clk),
index 0567486..600bffb 100644 (file)
@@ -1550,7 +1550,7 @@ void __init at91_set_serial_console(unsigned portnr)
 {
        if (portnr < ATMEL_MAX_UART) {
                atmel_default_console_device = at91_uarts[portnr];
-               at91sam9g45_set_console_clock(portnr);
+               at91sam9g45_set_console_clock(at91_uarts[portnr]->id);
        }
 }
 
index 1a40f16..29dff18 100644 (file)
@@ -191,8 +191,8 @@ static struct clk *periph_clocks[] __initdata = {
 };
 
 static struct clk_lookup periph_clocks_lookups[] = {
-       CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk),
-       CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk),
+       CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc", &utmi_clk),
+       CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc", &udphs_clk),
        CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
        CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
        CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
index c296045..aacb19d 100644 (file)
@@ -1168,7 +1168,7 @@ void __init at91_set_serial_console(unsigned portnr)
 {
        if (portnr < ATMEL_MAX_UART) {
                atmel_default_console_device = at91_uarts[portnr];
-               at91sam9rl_set_console_clock(portnr);
+               at91sam9rl_set_console_clock(at91_uarts[portnr]->id);
        }
 }
 
index 1904fdf..cdb65d4 100644 (file)
@@ -215,7 +215,7 @@ static void __init cap9adk_add_device_nand(void)
        csa = at91_sys_read(AT91_MATRIX_EBICSA);
        at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_EBI_VDDIOMSEL_3_3V);
 
-       cap9adk_nand_data.bus_width_16 = !board_have_nand_8bit();
+       cap9adk_nand_data.bus_width_16 = board_have_nand_16bit();
        /* setup bus-width (8 or 16) */
        if (cap9adk_nand_data.bus_width_16)
                cap9adk_nand_smc_config.mode |= AT91_SMC_DBW_16;
index d600dc1..5c24074 100644 (file)
@@ -214,7 +214,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
 
 static void __init ek_add_device_nand(void)
 {
-       ek_nand_data.bus_width_16 = !board_have_nand_8bit();
+       ek_nand_data.bus_width_16 = board_have_nand_16bit();
        /* setup bus-width (8 or 16) */
        if (ek_nand_data.bus_width_16)
                ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
index f897f84..b60c22b 100644 (file)
@@ -220,7 +220,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
 
 static void __init ek_add_device_nand(void)
 {
-       ek_nand_data.bus_width_16 = !board_have_nand_8bit();
+       ek_nand_data.bus_width_16 = board_have_nand_16bit();
        /* setup bus-width (8 or 16) */
        if (ek_nand_data.bus_width_16)
                ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
index 605b26f..9bbdc92 100644 (file)
@@ -221,7 +221,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
 
 static void __init ek_add_device_nand(void)
 {
-       ek_nand_data.bus_width_16 = !board_have_nand_8bit();
+       ek_nand_data.bus_width_16 = board_have_nand_16bit();
        /* setup bus-width (8 or 16) */
        if (ek_nand_data.bus_width_16)
                ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
index 7624cf0..1325a50 100644 (file)
@@ -198,7 +198,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
 
 static void __init ek_add_device_nand(void)
 {
-       ek_nand_data.bus_width_16 = !board_have_nand_8bit();
+       ek_nand_data.bus_width_16 = board_have_nand_16bit();
        /* setup bus-width (8 or 16) */
        if (ek_nand_data.bus_width_16)
                ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
index 063c95d..33eaa13 100644 (file)
@@ -178,7 +178,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
 
 static void __init ek_add_device_nand(void)
 {
-       ek_nand_data.bus_width_16 = !board_have_nand_8bit();
+       ek_nand_data.bus_width_16 = board_have_nand_16bit();
        /* setup bus-width (8 or 16) */
        if (ek_nand_data.bus_width_16)
                ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
index b855ee7..8f48660 100644 (file)
  * the 16-31 bit are reserved for at91 generic information
  *
  * bit 31:
- *     0 => nand 16 bit
- *     1 => nand 8 bit
+ *     0 => nand 8 bit
+ *     1 => nand 16 bit
  */
-#define BOARD_HAVE_NAND_8BIT   (1 << 31)
-static int inline board_have_nand_8bit(void)
+#define BOARD_HAVE_NAND_16BIT  (1 << 31)
+static inline int board_have_nand_16bit(void)
 {
-       return system_rev & BOARD_HAVE_NAND_8BIT;
+       return system_rev & BOARD_HAVE_NAND_16BIT;
 }
 
 #endif /* __ARCH_SYSTEM_REV_H__ */
index f6ac9ba..9db9838 100644 (file)
@@ -719,9 +719,15 @@ static void __init cdce_clk_init(void)
        }
 }
 
+#define DM6467T_EVM_REF_FREQ           33000000
+
 static void __init davinci_map_io(void)
 {
        dm646x_init();
+
+       if (machine_is_davinci_dm6467tevm())
+               davinci_set_refclk_rate(DM6467T_EVM_REF_FREQ);
+
        cdce_clk_init();
 }
 
@@ -785,17 +791,6 @@ static __init void evm_init(void)
        soc_info->emac_pdata->phy_id = DM646X_EVM_PHY_ID;
 }
 
-#define DM646X_EVM_REF_FREQ            27000000
-#define DM6467T_EVM_REF_FREQ           33000000
-
-void __init dm646x_board_setup_refclk(struct clk *clk)
-{
-       if (machine_is_davinci_dm6467tevm())
-               clk->rate = DM6467T_EVM_REF_FREQ;
-       else
-               clk->rate = DM646X_EVM_REF_FREQ;
-}
-
 MACHINE_START(DAVINCI_DM6467_EVM, "DaVinci DM646x EVM")
        .boot_params  = (0x80000100),
        .map_io       = davinci_map_io,
index e4e3af1..ae65319 100644 (file)
@@ -368,6 +368,12 @@ static unsigned long clk_leafclk_recalc(struct clk *clk)
        return clk->parent->rate;
 }
 
+int davinci_simple_set_rate(struct clk *clk, unsigned long rate)
+{
+       clk->rate = rate;
+       return 0;
+}
+
 static unsigned long clk_pllclk_recalc(struct clk *clk)
 {
        u32 ctrl, mult = 1, prediv = 1, postdiv = 1;
@@ -506,6 +512,38 @@ int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv,
 }
 EXPORT_SYMBOL(davinci_set_pllrate);
 
+/**
+ * davinci_set_refclk_rate() - Set the reference clock rate
+ * @rate:      The new rate.
+ *
+ * Sets the reference clock rate to a given value. This will most likely
+ * result in the entire clock tree getting updated.
+ *
+ * This is used to support boards which use a reference clock different
+ * than that used by default in <soc>.c file. The reference clock rate
+ * should be updated early in the boot process; ideally soon after the
+ * clock tree has been initialized once with the default reference clock
+ * rate (davinci_common_init()).
+ *
+ * Returns 0 on success, error otherwise.
+ */
+int davinci_set_refclk_rate(unsigned long rate)
+{
+       struct clk *refclk;
+
+       refclk = clk_get(NULL, "ref");
+       if (IS_ERR(refclk)) {
+               pr_err("%s: failed to get reference clock.\n", __func__);
+               return PTR_ERR(refclk);
+       }
+
+       clk_set_rate(refclk, rate);
+
+       clk_put(refclk);
+
+       return 0;
+}
+
 int __init davinci_clk_init(struct clk_lookup *clocks)
   {
        struct clk_lookup *c;
index 0dd2203..50b2482 100644 (file)
@@ -123,6 +123,8 @@ int davinci_clk_init(struct clk_lookup *clocks);
 int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv,
                                unsigned int mult, unsigned int postdiv);
 int davinci_set_sysclk_rate(struct clk *clk, unsigned long rate);
+int davinci_set_refclk_rate(unsigned long rate);
+int davinci_simple_set_rate(struct clk *clk, unsigned long rate);
 
 extern struct platform_device davinci_wdt_device;
 extern void davinci_watchdog_reset(struct platform_device *);
index 1e0f809..46739c9 100644 (file)
@@ -42,6 +42,7 @@
 /*
  * Device specific clocks
  */
+#define DM646X_REF_FREQ                27000000
 #define DM646X_AUX_FREQ                24000000
 
 static struct pll_data pll1_data = {
@@ -56,6 +57,8 @@ static struct pll_data pll2_data = {
 
 static struct clk ref_clk = {
        .name = "ref_clk",
+       .rate = DM646X_REF_FREQ,
+       .set_rate = davinci_simple_set_rate,
 };
 
 static struct clk aux_clkin = {
@@ -901,7 +904,6 @@ int __init dm646x_init_edma(struct edma_rsv_info *rsv)
 
 void __init dm646x_init(void)
 {
-       dm646x_board_setup_refclk(&ref_clk);
        davinci_common_init(&davinci_soc_info_dm646x);
 }
 
index 7a27f3f..2a00fe5 100644 (file)
@@ -15,7 +15,6 @@
 #include <mach/asp.h>
 #include <linux/i2c.h>
 #include <linux/videodev2.h>
-#include <linux/clk.h>
 #include <linux/davinci_emac.h>
 
 #define DM646X_EMAC_BASE               (0x01C80000)
@@ -31,7 +30,6 @@
 void __init dm646x_init(void);
 void __init dm646x_init_mcasp0(struct snd_platform_data *pdata);
 void __init dm646x_init_mcasp1(struct snd_platform_data *pdata);
-void __init dm646x_board_setup_refclk(struct clk *clk);
 int __init dm646x_init_edma(struct edma_rsv_info *rsv);
 
 void dm646x_video_init(void);
index a47e6f2..1110fdd 100644 (file)
 #define        DAVINCI_PWR_SLEEP_CNTRL_BASE    0x01C41000
 
 /* Power and Sleep Controller (PSC) Domains */
-#define DAVINCI_GPSC_ARMDOMAIN      0
-#define DAVINCI_GPSC_DSPDOMAIN      1
+#define DAVINCI_GPSC_ARMDOMAIN         0
+#define DAVINCI_GPSC_DSPDOMAIN         1
 
-#define DAVINCI_LPSC_VPSSMSTR       0
-#define DAVINCI_LPSC_VPSSSLV        1
-#define DAVINCI_LPSC_TPCC           2
-#define DAVINCI_LPSC_TPTC0          3
-#define DAVINCI_LPSC_TPTC1          4
-#define DAVINCI_LPSC_EMAC           5
-#define DAVINCI_LPSC_EMAC_WRAPPER   6
-#define DAVINCI_LPSC_USB            9
-#define DAVINCI_LPSC_ATA            10
-#define DAVINCI_LPSC_VLYNQ          11
-#define DAVINCI_LPSC_UHPI           12
-#define DAVINCI_LPSC_DDR_EMIF       13
-#define DAVINCI_LPSC_AEMIF          14
-#define DAVINCI_LPSC_MMC_SD         15
-#define DAVINCI_LPSC_McBSP          17
-#define DAVINCI_LPSC_I2C            18
-#define DAVINCI_LPSC_UART0          19
-#define DAVINCI_LPSC_UART1          20
-#define DAVINCI_LPSC_UART2          21
-#define DAVINCI_LPSC_SPI            22
-#define DAVINCI_LPSC_PWM0           23
-#define DAVINCI_LPSC_PWM1           24
-#define DAVINCI_LPSC_PWM2           25
-#define DAVINCI_LPSC_GPIO           26
-#define DAVINCI_LPSC_TIMER0         27
-#define DAVINCI_LPSC_TIMER1         28
-#define DAVINCI_LPSC_TIMER2         29
-#define DAVINCI_LPSC_SYSTEM_SUBSYS  30
-#define DAVINCI_LPSC_ARM            31
-#define DAVINCI_LPSC_SCR2           32
-#define DAVINCI_LPSC_SCR3           33
-#define DAVINCI_LPSC_SCR4           34
-#define DAVINCI_LPSC_CROSSBAR       35
-#define DAVINCI_LPSC_CFG27          36
-#define DAVINCI_LPSC_CFG3           37
-#define DAVINCI_LPSC_CFG5           38
-#define DAVINCI_LPSC_GEM            39
-#define DAVINCI_LPSC_IMCOP          40
+#define DAVINCI_LPSC_VPSSMSTR          0
+#define DAVINCI_LPSC_VPSSSLV           1
+#define DAVINCI_LPSC_TPCC              2
+#define DAVINCI_LPSC_TPTC0             3
+#define DAVINCI_LPSC_TPTC1             4
+#define DAVINCI_LPSC_EMAC              5
+#define DAVINCI_LPSC_EMAC_WRAPPER      6
+#define DAVINCI_LPSC_USB               9
+#define DAVINCI_LPSC_ATA               10
+#define DAVINCI_LPSC_VLYNQ             11
+#define DAVINCI_LPSC_UHPI              12
+#define DAVINCI_LPSC_DDR_EMIF          13
+#define DAVINCI_LPSC_AEMIF             14
+#define DAVINCI_LPSC_MMC_SD            15
+#define DAVINCI_LPSC_McBSP             17
+#define DAVINCI_LPSC_I2C               18
+#define DAVINCI_LPSC_UART0             19
+#define DAVINCI_LPSC_UART1             20
+#define DAVINCI_LPSC_UART2             21
+#define DAVINCI_LPSC_SPI               22
+#define DAVINCI_LPSC_PWM0              23
+#define DAVINCI_LPSC_PWM1              24
+#define DAVINCI_LPSC_PWM2              25
+#define DAVINCI_LPSC_GPIO              26
+#define DAVINCI_LPSC_TIMER0            27
+#define DAVINCI_LPSC_TIMER1            28
+#define DAVINCI_LPSC_TIMER2            29
+#define DAVINCI_LPSC_SYSTEM_SUBSYS     30
+#define DAVINCI_LPSC_ARM               31
+#define DAVINCI_LPSC_SCR2              32
+#define DAVINCI_LPSC_SCR3              33
+#define DAVINCI_LPSC_SCR4              34
+#define DAVINCI_LPSC_CROSSBAR          35
+#define DAVINCI_LPSC_CFG27             36
+#define DAVINCI_LPSC_CFG3              37
+#define DAVINCI_LPSC_CFG5              38
+#define DAVINCI_LPSC_GEM               39
+#define DAVINCI_LPSC_IMCOP             40
 
 #define DM355_LPSC_TIMER3              5
 #define DM355_LPSC_SPI1                        6
 /*
  * LPSC Assignments
  */
-#define DM646X_LPSC_ARM            0
-#define DM646X_LPSC_C64X_CPU       1
-#define DM646X_LPSC_HDVICP0        2
-#define DM646X_LPSC_HDVICP1        3
-#define DM646X_LPSC_TPCC           4
-#define DM646X_LPSC_TPTC0          5
-#define DM646X_LPSC_TPTC1          6
-#define DM646X_LPSC_TPTC2          7
-#define DM646X_LPSC_TPTC3          8
-#define DM646X_LPSC_PCI            13
-#define DM646X_LPSC_EMAC           14
-#define DM646X_LPSC_VDCE           15
-#define DM646X_LPSC_VPSSMSTR       16
-#define DM646X_LPSC_VPSSSLV        17
-#define DM646X_LPSC_TSIF0          18
-#define DM646X_LPSC_TSIF1          19
-#define DM646X_LPSC_DDR_EMIF       20
-#define DM646X_LPSC_AEMIF          21
-#define DM646X_LPSC_McASP0         22
-#define DM646X_LPSC_McASP1         23
-#define DM646X_LPSC_CRGEN0         24
-#define DM646X_LPSC_CRGEN1         25
-#define DM646X_LPSC_UART0          26
-#define DM646X_LPSC_UART1          27
-#define DM646X_LPSC_UART2          28
-#define DM646X_LPSC_PWM0           29
-#define DM646X_LPSC_PWM1           30
-#define DM646X_LPSC_I2C            31
-#define DM646X_LPSC_SPI            32
-#define DM646X_LPSC_GPIO           33
-#define DM646X_LPSC_TIMER0         34
-#define DM646X_LPSC_TIMER1         35
-#define DM646X_LPSC_ARM_INTC       45
+#define DM646X_LPSC_ARM                0
+#define DM646X_LPSC_C64X_CPU   1
+#define DM646X_LPSC_HDVICP0    2
+#define DM646X_LPSC_HDVICP1    3
+#define DM646X_LPSC_TPCC       4
+#define DM646X_LPSC_TPTC0      5
+#define DM646X_LPSC_TPTC1      6
+#define DM646X_LPSC_TPTC2      7
+#define DM646X_LPSC_TPTC3      8
+#define DM646X_LPSC_PCI                13
+#define DM646X_LPSC_EMAC       14
+#define DM646X_LPSC_VDCE       15
+#define DM646X_LPSC_VPSSMSTR   16
+#define DM646X_LPSC_VPSSSLV    17
+#define DM646X_LPSC_TSIF0      18
+#define DM646X_LPSC_TSIF1      19
+#define DM646X_LPSC_DDR_EMIF   20
+#define DM646X_LPSC_AEMIF      21
+#define DM646X_LPSC_McASP0     22
+#define DM646X_LPSC_McASP1     23
+#define DM646X_LPSC_CRGEN0     24
+#define DM646X_LPSC_CRGEN1     25
+#define DM646X_LPSC_UART0      26
+#define DM646X_LPSC_UART1      27
+#define DM646X_LPSC_UART2      28
+#define DM646X_LPSC_PWM0       29
+#define DM646X_LPSC_PWM1       30
+#define DM646X_LPSC_I2C                31
+#define DM646X_LPSC_SPI                32
+#define DM646X_LPSC_GPIO       33
+#define DM646X_LPSC_TIMER0     34
+#define DM646X_LPSC_TIMER1     35
+#define DM646X_LPSC_ARM_INTC   45
 
 /* PSC0 defines */
 #define DA8XX_LPSC0_TPCC               0
 #define PSC_STATE_DISABLE      2
 #define PSC_STATE_ENABLE       3
 
-#define MDSTAT_STATE_MASK 0x1f
+#define MDSTAT_STATE_MASK      0x1f
 
 #ifndef __ASSEMBLER__
 
index cf91f50..a8a83e3 100644 (file)
@@ -35,6 +35,7 @@ void __init exynos4_common_init_uarts(struct s3c2410_uartcfg *cfg, int no)
                        tcfg->clocks = exynos4_serial_clocks;
                        tcfg->clocks_size = ARRAY_SIZE(exynos4_serial_clocks);
                }
+               tcfg->flags |= NO_NEED_CHECK_CLKSRC;
        }
 
        s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no);
index 9b6982e..abf356c 100644 (file)
@@ -6,12 +6,14 @@ config ARCH_H7201
        bool "gms30c7201"
        depends on ARCH_H720X
        select CPU_H7201
+       select ZONE_DMA
        help
          Say Y here if you are using the Hynix GMS30C7201 Reference Board
 
 config ARCH_H7202
        bool "hms30c7202"
        select CPU_H7202
+       select ZONE_DMA
        depends on ARCH_H720X
        help
          Say Y here if you are using the Hynix HMS30C7202 Reference Board
diff --git a/arch/arm/mach-integrator/include/mach/bits.h b/arch/arm/mach-integrator/include/mach/bits.h
deleted file mode 100644 (file)
index 09b024e..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-/* DO NOT EDIT!! - this file automatically generated
- *                 from .s file by awk -f s2h.awk
- */
-/*  Bit field definitions
- *  Copyright (C) ARM Limited 1998. All rights reserved.
- */
-
-#ifndef __bits_h
-#define __bits_h                        1
-
-#define BIT0                            0x00000001
-#define BIT1                            0x00000002
-#define BIT2                            0x00000004
-#define BIT3                            0x00000008
-#define BIT4                            0x00000010
-#define BIT5                            0x00000020
-#define BIT6                            0x00000040
-#define BIT7                            0x00000080
-#define BIT8                            0x00000100
-#define BIT9                            0x00000200
-#define BIT10                           0x00000400
-#define BIT11                           0x00000800
-#define BIT12                           0x00001000
-#define BIT13                           0x00002000
-#define BIT14                           0x00004000
-#define BIT15                           0x00008000
-#define BIT16                           0x00010000
-#define BIT17                           0x00020000
-#define BIT18                           0x00040000
-#define BIT19                           0x00080000
-#define BIT20                           0x00100000
-#define BIT21                           0x00200000
-#define BIT22                           0x00400000
-#define BIT23                           0x00800000
-#define BIT24                           0x01000000
-#define BIT25                           0x02000000
-#define BIT26                           0x04000000
-#define BIT27                           0x08000000
-#define BIT28                           0x10000000
-#define BIT29                           0x20000000
-#define BIT30                           0x40000000
-#define BIT31                           0x80000000
-
-#endif
-
-/*         END */
index de88c92..f1ac7fb 100644 (file)
@@ -138,7 +138,7 @@ void ams_delta_latch2_write(u16 mask, u16 value)
 static void __init ams_delta_init_irq(void)
 {
        omap1_init_common_hw();
-       omap_init_irq();
+       omap1_init_irq();
 }
 
 static struct map_desc ams_delta_io_desc[] __initdata = {
@@ -391,7 +391,7 @@ MACHINE_START(AMS_DELTA, "Amstrad E3 (Delta)")
        .reserve        = omap_reserve,
        .init_irq       = ams_delta_init_irq,
        .init_machine   = ams_delta_init,
-       .timer          = &omap_timer,
+       .timer          = &omap1_timer,
 MACHINE_END
 
 EXPORT_SYMBOL(ams_delta_latch1_write);
index 87f173d..a6b1bea 100644 (file)
@@ -329,7 +329,7 @@ static void __init omap_fsample_init(void)
 static void __init omap_fsample_init_irq(void)
 {
        omap1_init_common_hw();
-       omap_init_irq();
+       omap1_init_irq();
 }
 
 /* Only FPGA needs to be mapped here. All others are done with ioremap */
@@ -394,5 +394,5 @@ MACHINE_START(OMAP_FSAMPLE, "OMAP730 F-Sample")
        .reserve        = omap_reserve,
        .init_irq       = omap_fsample_init_irq,
        .init_machine   = omap_fsample_init,
-       .timer          = &omap_timer,
+       .timer          = &omap1_timer,
 MACHINE_END
index 23f4ab9..04fc356 100644 (file)
@@ -31,7 +31,7 @@
 static void __init omap_generic_init_irq(void)
 {
        omap1_init_common_hw();
-       omap_init_irq();
+       omap1_init_irq();
 }
 
 /* assume no Mini-AB port */
@@ -99,5 +99,5 @@ MACHINE_START(OMAP_GENERIC, "Generic OMAP1510/1610/1710")
        .reserve        = omap_reserve,
        .init_irq       = omap_generic_init_irq,
        .init_machine   = omap_generic_init,
-       .timer          = &omap_timer,
+       .timer          = &omap1_timer,
 MACHINE_END
index ba3bd09..cb7fb1a 100644 (file)
@@ -376,7 +376,7 @@ static struct i2c_board_info __initdata h2_i2c_board_info[] = {
 static void __init h2_init_irq(void)
 {
        omap1_init_common_hw();
-       omap_init_irq();
+       omap1_init_irq();
 }
 
 static struct omap_usb_config h2_usb_config __initdata = {
@@ -466,5 +466,5 @@ MACHINE_START(OMAP_H2, "TI-H2")
        .reserve        = omap_reserve,
        .init_irq       = h2_init_irq,
        .init_machine   = h2_init,
-       .timer          = &omap_timer,
+       .timer          = &omap1_timer,
 MACHINE_END
index ac48677..31f3487 100644 (file)
@@ -439,7 +439,7 @@ static void __init h3_init(void)
 static void __init h3_init_irq(void)
 {
        omap1_init_common_hw();
-       omap_init_irq();
+       omap1_init_irq();
 }
 
 static void __init h3_map_io(void)
@@ -454,5 +454,5 @@ MACHINE_START(OMAP_H3, "TI OMAP1710 H3 board")
        .reserve        = omap_reserve,
        .init_irq       = h3_init_irq,
        .init_machine   = h3_init,
-       .timer          = &omap_timer,
+       .timer          = &omap1_timer,
 MACHINE_END
index ba05a51..36e06ea 100644 (file)
@@ -605,7 +605,7 @@ static void __init htcherald_init_irq(void)
 {
        printk(KERN_INFO "htcherald_init_irq.\n");
        omap1_init_common_hw();
-       omap_init_irq();
+       omap1_init_irq();
 }
 
 MACHINE_START(HERALD, "HTC Herald")
@@ -616,5 +616,5 @@ MACHINE_START(HERALD, "HTC Herald")
        .reserve        = omap_reserve,
        .init_irq       = htcherald_init_irq,
        .init_machine   = htcherald_init,
-       .timer          = &omap_timer,
+       .timer          = &omap1_timer,
 MACHINE_END
index 2d9b8cb..0b1ba46 100644 (file)
@@ -292,7 +292,7 @@ static void __init innovator_init_smc91x(void)
 static void __init innovator_init_irq(void)
 {
        omap1_init_common_hw();
-       omap_init_irq();
+       omap1_init_irq();
 }
 
 #ifdef CONFIG_ARCH_OMAP15XX
@@ -464,5 +464,5 @@ MACHINE_START(OMAP_INNOVATOR, "TI-Innovator")
        .reserve        = omap_reserve,
        .init_irq       = innovator_init_irq,
        .init_machine   = innovator_init,
-       .timer          = &omap_timer,
+       .timer          = &omap1_timer,
 MACHINE_END
index cfd0849..5469ce2 100644 (file)
@@ -51,7 +51,7 @@ static void __init omap_nokia770_init_irq(void)
        omap_writew((omap_readw(0xfffb5004) & ~2), 0xfffb5004);
 
        omap1_init_common_hw();
-       omap_init_irq();
+       omap1_init_irq();
 }
 
 static const unsigned int nokia770_keymap[] = {
@@ -269,5 +269,5 @@ MACHINE_START(NOKIA770, "Nokia 770")
        .reserve        = omap_reserve,
        .init_irq       = omap_nokia770_init_irq,
        .init_machine   = omap_nokia770_init,
-       .timer          = &omap_timer,
+       .timer          = &omap1_timer,
 MACHINE_END
index e68dfde..b08a213 100644 (file)
@@ -282,7 +282,7 @@ static void __init osk_init_cf(void)
 static void __init osk_init_irq(void)
 {
        omap1_init_common_hw();
-       omap_init_irq();
+       omap1_init_irq();
 }
 
 static struct omap_usb_config osk_usb_config __initdata = {
@@ -588,5 +588,5 @@ MACHINE_START(OMAP_OSK, "TI-OSK")
        .reserve        = omap_reserve,
        .init_irq       = osk_init_irq,
        .init_machine   = osk_init,
-       .timer          = &omap_timer,
+       .timer          = &omap1_timer,
 MACHINE_END
index c9d38f4..459cb6b 100644 (file)
@@ -62,7 +62,7 @@
 static void __init omap_palmte_init_irq(void)
 {
        omap1_init_common_hw();
-       omap_init_irq();
+       omap1_init_irq();
 }
 
 static const unsigned int palmte_keymap[] = {
@@ -280,5 +280,5 @@ MACHINE_START(OMAP_PALMTE, "OMAP310 based Palm Tungsten E")
        .reserve        = omap_reserve,
        .init_irq       = omap_palmte_init_irq,
        .init_machine   = omap_palmte_init,
-       .timer          = &omap_timer,
+       .timer          = &omap1_timer,
 MACHINE_END
index f04f2d3..b214f45 100644 (file)
@@ -266,7 +266,7 @@ static struct spi_board_info __initdata palmtt_boardinfo[] = {
 static void __init omap_palmtt_init_irq(void)
 {
        omap1_init_common_hw();
-       omap_init_irq();
+       omap1_init_irq();
 }
 
 static struct omap_usb_config palmtt_usb_config __initdata = {
@@ -326,5 +326,5 @@ MACHINE_START(OMAP_PALMTT, "OMAP1510 based Palm Tungsten|T")
        .reserve        = omap_reserve,
        .init_irq       = omap_palmtt_init_irq,
        .init_machine   = omap_palmtt_init,
-       .timer          = &omap_timer,
+       .timer          = &omap1_timer,
 MACHINE_END
index 45f01d2..9b0ea48 100644 (file)
@@ -61,7 +61,7 @@ static void __init
 omap_palmz71_init_irq(void)
 {
        omap1_init_common_hw();
-       omap_init_irq();
+       omap1_init_irq();
 }
 
 static const unsigned int palmz71_keymap[] = {
@@ -346,5 +346,5 @@ MACHINE_START(OMAP_PALMZ71, "OMAP310 based Palm Zire71")
        .reserve        = omap_reserve,
        .init_irq       = omap_palmz71_init_irq,
        .init_machine   = omap_palmz71_init,
-       .timer          = &omap_timer,
+       .timer          = &omap1_timer,
 MACHINE_END
index 3c8ee84..67acd41 100644 (file)
@@ -297,7 +297,7 @@ static void __init omap_perseus2_init(void)
 static void __init omap_perseus2_init_irq(void)
 {
        omap1_init_common_hw();
-       omap_init_irq();
+       omap1_init_irq();
 }
 /* Only FPGA needs to be mapped here. All others are done with ioremap */
 static struct map_desc omap_perseus2_io_desc[] __initdata = {
@@ -355,5 +355,5 @@ MACHINE_START(OMAP_PERSEUS2, "OMAP730 Perseus2")
        .reserve        = omap_reserve,
        .init_irq       = omap_perseus2_init_irq,
        .init_machine   = omap_perseus2_init,
-       .timer          = &omap_timer,
+       .timer          = &omap1_timer,
 MACHINE_END
index 0ad781d..9c3b7c5 100644 (file)
@@ -411,7 +411,7 @@ static void __init omap_sx1_init(void)
 static void __init omap_sx1_init_irq(void)
 {
        omap1_init_common_hw();
-       omap_init_irq();
+       omap1_init_irq();
 }
 /*----------------------------------------*/
 
@@ -426,5 +426,5 @@ MACHINE_START(SX1, "OMAP310 based Siemens SX1")
        .reserve        = omap_reserve,
        .init_irq       = omap_sx1_init_irq,
        .init_machine   = omap_sx1_init,
-       .timer          = &omap_timer,
+       .timer          = &omap1_timer,
 MACHINE_END
index 65d2420..036edc0 100644 (file)
@@ -162,7 +162,7 @@ static struct omap_board_config_kernel voiceblue_config[] = {
 static void __init voiceblue_init_irq(void)
 {
        omap1_init_common_hw();
-       omap_init_irq();
+       omap1_init_irq();
 }
 
 static void __init voiceblue_map_io(void)
@@ -306,5 +306,5 @@ MACHINE_START(VOICEBLUE, "VoiceBlue OMAP5910")
        .reserve        = omap_reserve,
        .init_irq       = voiceblue_init_irq,
        .init_machine   = voiceblue_init,
-       .timer          = &omap_timer,
+       .timer          = &omap1_timer,
 MACHINE_END
index 5d3da7a..e2b9c90 100644 (file)
@@ -175,7 +175,7 @@ static struct irq_chip omap_irq_chip = {
        .irq_set_wake   = omap_wake_irq,
 };
 
-void __init omap_init_irq(void)
+void __init omap1_init_irq(void)
 {
        int i, j;
 
index d9af981..ab7395d 100644 (file)
@@ -38,7 +38,7 @@ static void omap1_mcbsp_request(unsigned int id)
         * On 1510, 1610 and 1710, McBSP1 and McBSP3
         * are DSP public peripherals.
         */
-       if (id == OMAP_MCBSP1 || id == OMAP_MCBSP3) {
+       if (id == 0 || id == 2) {
                if (dsp_use++ == 0) {
                        api_clk = clk_get(NULL, "api_ck");
                        dsp_clk = clk_get(NULL, "dsp_ck");
@@ -59,7 +59,7 @@ static void omap1_mcbsp_request(unsigned int id)
 
 static void omap1_mcbsp_free(unsigned int id)
 {
-       if (id == OMAP_MCBSP1 || id == OMAP_MCBSP3) {
+       if (id == 0 || id == 2) {
                if (--dsp_use == 0) {
                        if (!IS_ERR(api_clk)) {
                                clk_disable(api_clk);
index 03e1e10..a183777 100644 (file)
@@ -297,7 +297,7 @@ static inline int omap_32k_timer_usable(void)
  * Timer initialization
  * ---------------------------------------------------------------------------
  */
-static void __init omap_timer_init(void)
+static void __init omap1_timer_init(void)
 {
        if (omap_32k_timer_usable()) {
                preferred_sched_clock_init(1);
@@ -307,6 +307,6 @@ static void __init omap_timer_init(void)
        }
 }
 
-struct sys_timer omap_timer = {
-       .init           = omap_timer_init,
+struct sys_timer omap1_timer = {
+       .init           = omap1_timer_init,
 };
index 13d7b8f..96604a5 100644 (file)
@@ -183,10 +183,6 @@ static __init void omap_init_32k_timer(void)
 bool __init omap_32k_timer_init(void)
 {
        omap_init_clocksource_32k();
-
-#ifdef CONFIG_OMAP_DM_TIMER
-       omap_dm_timer_init();
-#endif
        omap_init_32k_timer();
 
        return true;
index b148077..f343365 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 # Common support
-obj-y := id.o io.o control.o mux.o devices.o serial.o gpmc.o timer-gp.o pm.o \
+obj-y := id.o io.o control.o mux.o devices.o serial.o gpmc.o timer.o pm.o \
         common.o gpio.o dma.o wd_timer.o
 
 omap-2-3-common                                = irq.o sdrc.o
@@ -145,9 +145,19 @@ obj-$(CONFIG_SOC_OMAP2420)         += opp2420_data.o
 obj-$(CONFIG_SOC_OMAP2430)             += opp2430_data.o
 
 # hwmod data
-obj-$(CONFIG_SOC_OMAP2420)             += omap_hwmod_2420_data.o
-obj-$(CONFIG_SOC_OMAP2430)             += omap_hwmod_2430_data.o
-obj-$(CONFIG_ARCH_OMAP3)               += omap_hwmod_3xxx_data.o
+obj-$(CONFIG_SOC_OMAP2420)             += omap_hwmod_2xxx_ipblock_data.o \
+                                          omap_hwmod_2xxx_3xxx_ipblock_data.o \
+                                          omap_hwmod_2xxx_interconnect_data.o \
+                                          omap_hwmod_2xxx_3xxx_interconnect_data.o \
+                                          omap_hwmod_2420_data.o
+obj-$(CONFIG_SOC_OMAP2430)             += omap_hwmod_2xxx_ipblock_data.o \
+                                          omap_hwmod_2xxx_3xxx_ipblock_data.o \
+                                          omap_hwmod_2xxx_interconnect_data.o \
+                                          omap_hwmod_2xxx_3xxx_interconnect_data.o \
+                                          omap_hwmod_2430_data.o
+obj-$(CONFIG_ARCH_OMAP3)               += omap_hwmod_2xxx_3xxx_ipblock_data.o \
+                                          omap_hwmod_2xxx_3xxx_interconnect_data.o \
+                                          omap_hwmod_3xxx_data.o
 obj-$(CONFIG_ARCH_OMAP4)               += omap_hwmod_44xx_data.o
 
 # EMU peripherals
@@ -269,4 +279,4 @@ obj-$(CONFIG_ARCH_OMAP4)            += hwspinlock.o
 disp-$(CONFIG_OMAP2_DSS)               := display.o
 obj-y                                  += $(disp-m) $(disp-y)
 
-obj-y                                  += common-board-devices.o
+obj-y                                  += common-board-devices.o twl-common.o
index 5de6eac..2028464 100644 (file)
@@ -260,7 +260,7 @@ MACHINE_START(OMAP_2430SDP, "OMAP2430 sdp2430 board")
        .reserve        = omap_reserve,
        .map_io         = omap_2430sdp_map_io,
        .init_early     = omap_2430sdp_init_early,
-       .init_irq       = omap_init_irq,
+       .init_irq       = omap2_init_irq,
        .init_machine   = omap_2430sdp_init,
-       .timer          = &omap_timer,
+       .timer          = &omap2_timer,
 MACHINE_END
index 5dac974..bd600cf 100644 (file)
@@ -231,22 +231,6 @@ static void __init omap_3430sdp_init_early(void)
        omap2_init_common_devices(hyb18m512160af6_sdrc_params, NULL);
 }
 
-static int sdp3430_batt_table[] = {
-/* 0 C*/
-30800, 29500, 28300, 27100,
-26000, 24900, 23900, 22900, 22000, 21100, 20300, 19400, 18700, 17900,
-17200, 16500, 15900, 15300, 14700, 14100, 13600, 13100, 12600, 12100,
-11600, 11200, 10800, 10400, 10000, 9630,   9280,   8950,   8620,   8310,
-8020,   7730,   7460,   7200,   6950,   6710,   6470,   6250,   6040,   5830,
-5640,   5450,   5260,   5090,   4920,   4760,   4600,   4450,   4310,   4170,
-4040,   3910,   3790,   3670,   3550
-};
-
-static struct twl4030_bci_platform_data sdp3430_bci_data = {
-       .battery_tmp_tbl        = sdp3430_batt_table,
-       .tblsize                = ARRAY_SIZE(sdp3430_batt_table),
-};
-
 static struct omap2_hsmmc_info mmc[] = {
        {
                .mmc            = 1,
@@ -292,14 +276,6 @@ static struct twl4030_gpio_platform_data sdp3430_gpio_data = {
        .setup          = sdp3430_twl_gpio_setup,
 };
 
-static struct twl4030_usb_data sdp3430_usb_data = {
-       .usb_mode       = T2_USB_MODE_ULPI,
-};
-
-static struct twl4030_madc_platform_data sdp3430_madc_data = {
-       .irq_line       = 1,
-};
-
 /* regulator consumer mappings */
 
 /* ads7846 on SPI */
@@ -307,16 +283,6 @@ static struct regulator_consumer_supply sdp3430_vaux3_supplies[] = {
        REGULATOR_SUPPLY("vcc", "spi1.0"),
 };
 
-static struct regulator_consumer_supply sdp3430_vdda_dac_supplies[] = {
-       REGULATOR_SUPPLY("vdda_dac", "omapdss_venc"),
-};
-
-/* VPLL2 for digital video outputs */
-static struct regulator_consumer_supply sdp3430_vpll2_supplies[] = {
-       REGULATOR_SUPPLY("vdds_dsi", "omapdss"),
-       REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi1"),
-};
-
 static struct regulator_consumer_supply sdp3430_vmmc1_supplies[] = {
        REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
 };
@@ -433,54 +399,10 @@ static struct regulator_init_data sdp3430_vsim = {
        .consumer_supplies      = sdp3430_vsim_supplies,
 };
 
-/* VDAC for DSS driving S-Video */
-static struct regulator_init_data sdp3430_vdac = {
-       .constraints = {
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .apply_uV               = true,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask         = REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-       .num_consumer_supplies  = ARRAY_SIZE(sdp3430_vdda_dac_supplies),
-       .consumer_supplies      = sdp3430_vdda_dac_supplies,
-};
-
-static struct regulator_init_data sdp3430_vpll2 = {
-       .constraints = {
-               .name                   = "VDVI",
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .apply_uV               = true,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask         = REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-       .num_consumer_supplies  = ARRAY_SIZE(sdp3430_vpll2_supplies),
-       .consumer_supplies      = sdp3430_vpll2_supplies,
-};
-
-static struct twl4030_codec_audio_data sdp3430_audio;
-
-static struct twl4030_codec_data sdp3430_codec = {
-       .audio_mclk = 26000000,
-       .audio = &sdp3430_audio,
-};
-
 static struct twl4030_platform_data sdp3430_twldata = {
-       .irq_base       = TWL4030_IRQ_BASE,
-       .irq_end        = TWL4030_IRQ_END,
-
        /* platform_data for children goes here */
-       .bci            = &sdp3430_bci_data,
        .gpio           = &sdp3430_gpio_data,
-       .madc           = &sdp3430_madc_data,
        .keypad         = &sdp3430_kp_data,
-       .usb            = &sdp3430_usb_data,
-       .codec          = &sdp3430_codec,
 
        .vaux1          = &sdp3430_vaux1,
        .vaux2          = &sdp3430_vaux2,
@@ -489,14 +411,21 @@ static struct twl4030_platform_data sdp3430_twldata = {
        .vmmc1          = &sdp3430_vmmc1,
        .vmmc2          = &sdp3430_vmmc2,
        .vsim           = &sdp3430_vsim,
-       .vdac           = &sdp3430_vdac,
-       .vpll2          = &sdp3430_vpll2,
 };
 
 static int __init omap3430_i2c_init(void)
 {
        /* i2c1 for PMIC only */
+       omap3_pmic_get_config(&sdp3430_twldata,
+                       TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_BCI |
+                       TWL_COMMON_PDATA_MADC | TWL_COMMON_PDATA_AUDIO,
+                       TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
+       sdp3430_twldata.vdac->constraints.apply_uV = true;
+       sdp3430_twldata.vpll2->constraints.apply_uV = true;
+       sdp3430_twldata.vpll2->constraints.name = "VDVI";
+
        omap3_pmic_init("twl4030", &sdp3430_twldata);
+
        /* i2c2 on camera connector (for sensor control) and optional isp1301 */
        omap_register_i2c_bus(2, 400, NULL, 0);
        /* i2c3 on display connector (for DVI, tfp410) */
@@ -804,7 +733,7 @@ MACHINE_START(OMAP_3430SDP, "OMAP3430 3430SDP board")
        .reserve        = omap_reserve,
        .map_io         = omap3_map_io,
        .init_early     = omap_3430sdp_init_early,
-       .init_irq       = omap_init_irq,
+       .init_irq       = omap3_init_irq,
        .init_machine   = omap_3430sdp_init,
-       .timer          = &omap_timer,
+       .timer          = &omap3_timer,
 MACHINE_END
index a5933cc..e4f37b5 100644 (file)
@@ -219,7 +219,7 @@ MACHINE_START(OMAP_3630SDP, "OMAP 3630SDP board")
        .reserve        = omap_reserve,
        .map_io         = omap3_map_io,
        .init_early     = omap_sdp_init_early,
-       .init_irq       = omap_init_irq,
+       .init_irq       = omap3_init_irq,
        .init_machine   = omap_sdp_init,
-       .timer          = &omap_timer,
+       .timer          = &omap3_timer,
 MACHINE_END
index 63de2d3..933b25b 100644 (file)
@@ -40,7 +40,6 @@
 
 #include "mux.h"
 #include "hsmmc.h"
-#include "timer-gp.h"
 #include "control.h"
 #include "common-board-devices.h"
 
@@ -295,9 +294,6 @@ static void __init omap_4430sdp_init_early(void)
 {
        omap2_init_common_infrastructure();
        omap2_init_common_devices(NULL, NULL);
-#ifdef CONFIG_OMAP_32K_TIMER
-       omap2_gp_clockevent_set_gptimer(1);
-#endif
 }
 
 static struct omap_musb_board_data musb_board_data = {
@@ -306,14 +302,6 @@ static struct omap_musb_board_data musb_board_data = {
        .power                  = 100,
 };
 
-static struct twl4030_usb_data omap4_usbphy_data = {
-       .phy_init       = omap4430_phy_init,
-       .phy_exit       = omap4430_phy_exit,
-       .phy_power      = omap4430_phy_power,
-       .phy_set_clock  = omap4430_phy_set_clk,
-       .phy_suspend    = omap4430_phy_suspend,
-};
-
 static struct omap2_hsmmc_info mmc[] = {
        {
                .mmc            = 2,
@@ -333,16 +321,7 @@ static struct omap2_hsmmc_info mmc[] = {
 };
 
 static struct regulator_consumer_supply sdp4430_vaux_supply[] = {
-       {
-               .supply = "vmmc",
-               .dev_name = "omap_hsmmc.1",
-       },
-};
-static struct regulator_consumer_supply sdp4430_vmmc_supply[] = {
-       {
-               .supply = "vmmc",
-               .dev_name = "omap_hsmmc.0",
-       },
+       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"),
 };
 
 static int omap4_twl6030_hsmmc_late_init(struct device *dev)
@@ -399,65 +378,10 @@ static struct regulator_init_data sdp4430_vaux1 = {
                                        | REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
+       .num_consumer_supplies  = ARRAY_SIZE(sdp4430_vaux_supply),
        .consumer_supplies      = sdp4430_vaux_supply,
 };
 
-static struct regulator_init_data sdp4430_vaux2 = {
-       .constraints = {
-               .min_uV                 = 1200000,
-               .max_uV                 = 2800000,
-               .apply_uV               = true,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask  = REGULATOR_CHANGE_VOLTAGE
-                                       | REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-};
-
-static struct regulator_init_data sdp4430_vaux3 = {
-       .constraints = {
-               .min_uV                 = 1000000,
-               .max_uV                 = 3000000,
-               .apply_uV               = true,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask  = REGULATOR_CHANGE_VOLTAGE
-                                       | REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-};
-
-/* VMMC1 for MMC1 card */
-static struct regulator_init_data sdp4430_vmmc = {
-       .constraints = {
-               .min_uV                 = 1200000,
-               .max_uV                 = 3000000,
-               .apply_uV               = true,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask  = REGULATOR_CHANGE_VOLTAGE
-                                       | REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = sdp4430_vmmc_supply,
-};
-
-static struct regulator_init_data sdp4430_vpp = {
-       .constraints = {
-               .min_uV                 = 1800000,
-               .max_uV                 = 2500000,
-               .apply_uV               = true,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask  = REGULATOR_CHANGE_VOLTAGE
-                                       | REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-};
-
 static struct regulator_init_data sdp4430_vusim = {
        .constraints = {
                .min_uV                 = 1200000,
@@ -471,74 +395,10 @@ static struct regulator_init_data sdp4430_vusim = {
        },
 };
 
-static struct regulator_init_data sdp4430_vana = {
-       .constraints = {
-               .min_uV                 = 2100000,
-               .max_uV                 = 2100000,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask  = REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-};
-
-static struct regulator_init_data sdp4430_vcxio = {
-       .constraints = {
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask  = REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-};
-
-static struct regulator_init_data sdp4430_vdac = {
-       .constraints = {
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask  = REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-};
-
-static struct regulator_init_data sdp4430_vusb = {
-       .constraints = {
-               .min_uV                 = 3300000,
-               .max_uV                 = 3300000,
-               .apply_uV               = true,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask  =      REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-};
-
-static struct regulator_init_data sdp4430_clk32kg = {
-       .constraints = {
-               .valid_ops_mask         = REGULATOR_CHANGE_STATUS,
-       },
-};
-
 static struct twl4030_platform_data sdp4430_twldata = {
-       .irq_base       = TWL6030_IRQ_BASE,
-       .irq_end        = TWL6030_IRQ_END,
-
        /* Regulators */
-       .vmmc           = &sdp4430_vmmc,
-       .vpp            = &sdp4430_vpp,
        .vusim          = &sdp4430_vusim,
-       .vana           = &sdp4430_vana,
-       .vcxio          = &sdp4430_vcxio,
-       .vdac           = &sdp4430_vdac,
-       .vusb           = &sdp4430_vusb,
        .vaux1          = &sdp4430_vaux1,
-       .vaux2          = &sdp4430_vaux2,
-       .vaux3          = &sdp4430_vaux3,
-       .clk32kg        = &sdp4430_clk32kg,
-       .usb            = &omap4_usbphy_data
 };
 
 static struct i2c_board_info __initdata sdp4430_i2c_3_boardinfo[] = {
@@ -556,6 +416,16 @@ static struct i2c_board_info __initdata sdp4430_i2c_4_boardinfo[] = {
 };
 static int __init omap4_i2c_init(void)
 {
+       omap4_pmic_get_config(&sdp4430_twldata, TWL_COMMON_PDATA_USB,
+                       TWL_COMMON_REGULATOR_VDAC |
+                       TWL_COMMON_REGULATOR_VAUX2 |
+                       TWL_COMMON_REGULATOR_VAUX3 |
+                       TWL_COMMON_REGULATOR_VMMC |
+                       TWL_COMMON_REGULATOR_VPP |
+                       TWL_COMMON_REGULATOR_VANA |
+                       TWL_COMMON_REGULATOR_VCXIO |
+                       TWL_COMMON_REGULATOR_VUSB |
+                       TWL_COMMON_REGULATOR_CLK32KG);
        omap4_pmic_init("twl6030", &sdp4430_twldata);
        omap_register_i2c_bus(2, 400, NULL, 0);
        omap_register_i2c_bus(3, 400, sdp4430_i2c_3_boardinfo,
@@ -773,5 +643,5 @@ MACHINE_START(OMAP_4430SDP, "OMAP4430 4430SDP board")
        .init_early     = omap_4430sdp_init_early,
        .init_irq       = gic_init_irq,
        .init_machine   = omap_4430sdp_init,
-       .timer          = &omap_timer,
+       .timer          = &omap4_timer,
 MACHINE_END
index 5e438a7..5f2b55f 100644 (file)
@@ -104,7 +104,7 @@ MACHINE_START(CRANEBOARD, "AM3517/05 CRANEBOARD")
        .reserve        = omap_reserve,
        .map_io         = omap3_map_io,
        .init_early     = am3517_crane_init_early,
-       .init_irq       = omap_init_irq,
+       .init_irq       = omap3_init_irq,
        .init_machine   = am3517_crane_init,
-       .timer          = &omap_timer,
+       .timer          = &omap3_timer,
 MACHINE_END
index 63af417..f3006c3 100644 (file)
@@ -494,7 +494,7 @@ MACHINE_START(OMAP3517EVM, "OMAP3517/AM3517 EVM")
        .reserve        = omap_reserve,
        .map_io         = omap3_map_io,
        .init_early     = am3517_evm_init_early,
-       .init_irq       = omap_init_irq,
+       .init_irq       = omap3_init_irq,
        .init_machine   = am3517_evm_init,
-       .timer          = &omap_timer,
+       .timer          = &omap3_timer,
 MACHINE_END
index b124bdf..7021170 100644 (file)
@@ -354,7 +354,7 @@ MACHINE_START(OMAP_APOLLON, "OMAP24xx Apollon")
        .reserve        = omap_reserve,
        .map_io         = omap_apollon_map_io,
        .init_early     = omap_apollon_init_early,
-       .init_irq       = omap_init_irq,
+       .init_irq       = omap2_init_irq,
        .init_machine   = omap_apollon_init,
-       .timer          = &omap_timer,
+       .timer          = &omap2_timer,
 MACHINE_END
index 77456de..35891d4 100644 (file)
@@ -162,9 +162,7 @@ static struct mtd_partition cm_t35_nand_partitions[] = {
 static struct omap_nand_platform_data cm_t35_nand_data = {
        .parts                  = cm_t35_nand_partitions,
        .nr_parts               = ARRAY_SIZE(cm_t35_nand_partitions),
-       .dma_channel            = -1,   /* disable DMA in OMAP NAND driver */
        .cs                     = 0,
-
 };
 
 static void __init cm_t35_init_nand(void)
@@ -337,19 +335,17 @@ static void __init cm_t35_init_display(void)
        }
 }
 
-static struct regulator_consumer_supply cm_t35_vmmc1_supply = {
-       .supply                 = "vmmc",
+static struct regulator_consumer_supply cm_t35_vmmc1_supply[] = {
+       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
 };
 
-static struct regulator_consumer_supply cm_t35_vsim_supply = {
-       .supply                 = "vmmc_aux",
+static struct regulator_consumer_supply cm_t35_vsim_supply[] = {
+       REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.0"),
 };
 
-static struct regulator_consumer_supply cm_t35_vdac_supply =
-       REGULATOR_SUPPLY("vdda_dac", "omapdss_venc");
-
-static struct regulator_consumer_supply cm_t35_vdvi_supply =
-       REGULATOR_SUPPLY("vdvi", "omapdss");
+static struct regulator_consumer_supply cm_t35_vdvi_supply[] = {
+       REGULATOR_SUPPLY("vdvi", "omapdss"),
+};
 
 /* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
 static struct regulator_init_data cm_t35_vmmc1 = {
@@ -362,8 +358,8 @@ static struct regulator_init_data cm_t35_vmmc1 = {
                                        | REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &cm_t35_vmmc1_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(cm_t35_vmmc1_supply),
+       .consumer_supplies      = cm_t35_vmmc1_supply,
 };
 
 /* VSIM for MMC1 pins DAT4..DAT7 (2 mA, plus card == max 50 mA) */
@@ -377,41 +373,8 @@ static struct regulator_init_data cm_t35_vsim = {
                                        | REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &cm_t35_vsim_supply,
-};
-
-/* VDAC for DSS driving S-Video (8 mA unloaded, max 65 mA) */
-static struct regulator_init_data cm_t35_vdac = {
-       .constraints = {
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask         = REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &cm_t35_vdac_supply,
-};
-
-/* VPLL2 for digital video outputs */
-static struct regulator_init_data cm_t35_vpll2 = {
-       .constraints = {
-               .name                   = "VDVI",
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask         = REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &cm_t35_vdvi_supply,
-};
-
-static struct twl4030_usb_data cm_t35_usb_data = {
-       .usb_mode       = T2_USB_MODE_ULPI,
+       .num_consumer_supplies  = ARRAY_SIZE(cm_t35_vsim_supply),
+       .consumer_supplies      = cm_t35_vsim_supply,
 };
 
 static uint32_t cm_t35_keymap[] = {
@@ -481,10 +444,6 @@ static int cm_t35_twl_gpio_setup(struct device *dev, unsigned gpio,
        mmc[0].gpio_cd = gpio + 0;
        omap2_hsmmc_init(mmc);
 
-       /* link regulators to MMC adapters */
-       cm_t35_vmmc1_supply.dev = mmc[0].dev;
-       cm_t35_vsim_supply.dev = mmc[0].dev;
-
        return 0;
 }
 
@@ -496,21 +455,23 @@ static struct twl4030_gpio_platform_data cm_t35_gpio_data = {
 };
 
 static struct twl4030_platform_data cm_t35_twldata = {
-       .irq_base       = TWL4030_IRQ_BASE,
-       .irq_end        = TWL4030_IRQ_END,
-
        /* platform_data for children goes here */
        .keypad         = &cm_t35_kp_data,
-       .usb            = &cm_t35_usb_data,
        .gpio           = &cm_t35_gpio_data,
        .vmmc1          = &cm_t35_vmmc1,
        .vsim           = &cm_t35_vsim,
-       .vdac           = &cm_t35_vdac,
-       .vpll2          = &cm_t35_vpll2,
 };
 
 static void __init cm_t35_init_i2c(void)
 {
+       omap3_pmic_get_config(&cm_t35_twldata, TWL_COMMON_PDATA_USB,
+                       TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
+
+       cm_t35_twldata.vpll2->constraints.name = "VDVI";
+       cm_t35_twldata.vpll2->num_consumer_supplies =
+                                               ARRAY_SIZE(cm_t35_vdvi_supply);
+       cm_t35_twldata.vpll2->consumer_supplies = cm_t35_vdvi_supply;
+
        omap3_pmic_init("tps65930", &cm_t35_twldata);
 }
 
@@ -646,7 +607,7 @@ MACHINE_START(CM_T35, "Compulab CM-T35")
        .reserve        = omap_reserve,
        .map_io         = omap3_map_io,
        .init_early     = cm_t35_init_early,
-       .init_irq       = omap_init_irq,
+       .init_irq       = omap3_init_irq,
        .init_machine   = cm_t35_init,
-       .timer          = &omap_timer,
+       .timer          = &omap3_timer,
 MACHINE_END
index c3a9fd3..05c72f4 100644 (file)
@@ -236,7 +236,6 @@ static struct mtd_partition cm_t3517_nand_partitions[] = {
 static struct omap_nand_platform_data cm_t3517_nand_data = {
        .parts                  = cm_t3517_nand_partitions,
        .nr_parts               = ARRAY_SIZE(cm_t3517_nand_partitions),
-       .dma_channel            = -1,   /* disable DMA in OMAP NAND driver */
        .cs                     = 0,
 };
 
@@ -304,7 +303,7 @@ MACHINE_START(CM_T3517, "Compulab CM-T3517")
        .reserve        = omap_reserve,
        .map_io         = omap3_map_io,
        .init_early     = cm_t3517_init_early,
-       .init_irq       = omap_init_irq,
+       .init_irq       = omap3_init_irq,
        .init_machine   = cm_t3517_init,
-       .timer          = &omap_timer,
+       .timer          = &omap3_timer,
 MACHINE_END
index 34956ec..b6002ec 100644 (file)
@@ -58,7 +58,6 @@
 
 #include "mux.h"
 #include "hsmmc.h"
-#include "timer-gp.h"
 #include "common-board-devices.h"
 
 #define OMAP_DM9000_GPIO_IRQ   25
@@ -130,13 +129,14 @@ static void devkit8000_panel_disable_dvi(struct omap_dss_device *dssdev)
                gpio_set_value_cansleep(dssdev->reset_gpio, 0);
 }
 
-static struct regulator_consumer_supply devkit8000_vmmc1_supply =
-       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0");
-
+static struct regulator_consumer_supply devkit8000_vmmc1_supply[] = {
+       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
+};
 
 /* ads7846 on SPI */
-static struct regulator_consumer_supply devkit8000_vio_supply =
-       REGULATOR_SUPPLY("vcc", "spi2.0");
+static struct regulator_consumer_supply devkit8000_vio_supply[] = {
+       REGULATOR_SUPPLY("vcc", "spi2.0"),
+};
 
 static struct panel_generic_dpi_data lcd_panel = {
        .name                   = "generic",
@@ -186,9 +186,6 @@ static struct omap_dss_board_info devkit8000_dss_data = {
        .default_device = &devkit8000_lcd_device,
 };
 
-static struct regulator_consumer_supply devkit8000_vdda_dac_supply =
-       REGULATOR_SUPPLY("vdda_dac", "omapdss_venc");
-
 static uint32_t board_keymap[] = {
        KEY(0, 0, KEY_1),
        KEY(1, 0, KEY_2),
@@ -284,22 +281,8 @@ static struct regulator_init_data devkit8000_vmmc1 = {
                                        | REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &devkit8000_vmmc1_supply,
-};
-
-/* VDAC for DSS driving S-Video (8 mA unloaded, max 65 mA) */
-static struct regulator_init_data devkit8000_vdac = {
-       .constraints = {
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask         = REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &devkit8000_vdda_dac_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(devkit8000_vmmc1_supply),
+       .consumer_supplies      = devkit8000_vmmc1_supply,
 };
 
 /* VPLL1 for digital video outputs */
@@ -327,31 +310,14 @@ static struct regulator_init_data devkit8000_vio = {
                .valid_ops_mask         = REGULATOR_CHANGE_MODE
                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &devkit8000_vio_supply,
-};
-
-static struct twl4030_usb_data devkit8000_usb_data = {
-       .usb_mode       = T2_USB_MODE_ULPI,
-};
-
-static struct twl4030_codec_audio_data devkit8000_audio_data;
-
-static struct twl4030_codec_data devkit8000_codec_data = {
-       .audio_mclk = 26000000,
-       .audio = &devkit8000_audio_data,
+       .num_consumer_supplies  = ARRAY_SIZE(devkit8000_vio_supply),
+       .consumer_supplies      = devkit8000_vio_supply,
 };
 
 static struct twl4030_platform_data devkit8000_twldata = {
-       .irq_base       = TWL4030_IRQ_BASE,
-       .irq_end        = TWL4030_IRQ_END,
-
        /* platform_data for children goes here */
-       .usb            = &devkit8000_usb_data,
        .gpio           = &devkit8000_gpio_data,
-       .codec          = &devkit8000_codec_data,
        .vmmc1          = &devkit8000_vmmc1,
-       .vdac           = &devkit8000_vdac,
        .vpll1          = &devkit8000_vpll1,
        .vio            = &devkit8000_vio,
        .keypad         = &devkit8000_kp_data,
@@ -359,6 +325,9 @@ static struct twl4030_platform_data devkit8000_twldata = {
 
 static int __init devkit8000_i2c_init(void)
 {
+       omap3_pmic_get_config(&devkit8000_twldata,
+                         TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_AUDIO,
+                         TWL_COMMON_REGULATOR_VDAC);
        omap3_pmic_init("tps65930", &devkit8000_twldata);
        /* Bus 3 is attached to the DVI port where devices like the pico DLP
         * projector don't work reliably with 400kHz */
@@ -438,10 +407,7 @@ static void __init devkit8000_init_early(void)
 
 static void __init devkit8000_init_irq(void)
 {
-       omap_init_irq();
-#ifdef CONFIG_OMAP_32K_TIMER
-       omap2_gp_clockevent_set_gptimer(12);
-#endif
+       omap3_init_irq();
 }
 
 #define OMAP_DM9000_BASE       0x2c000000
@@ -707,5 +673,5 @@ MACHINE_START(DEVKIT8000, "OMAP3 Devkit8000")
        .init_early     = devkit8000_init_early,
        .init_irq       = devkit8000_init_irq,
        .init_machine   = devkit8000_init,
-       .timer          = &omap_timer,
+       .timer          = &omap3_secure_timer,
 MACHINE_END
index 729892f..aa1b0cb 100644 (file)
@@ -132,11 +132,7 @@ static struct gpmc_timings nand_timings = {
 };
 
 static struct omap_nand_platform_data board_nand_data = {
-       .nand_setup     = NULL,
        .gpmc_t         = &nand_timings,
-       .dma_channel    = -1,           /* disable DMA in OMAP NAND driver */
-       .dev_ready      = NULL,
-       .devsize        = 0,    /* '0' for 8-bit, '1' for 16-bit device */
 };
 
 void
index 73e3c31..54db41a 100644 (file)
@@ -70,7 +70,7 @@ MACHINE_START(OMAP_GENERIC, "Generic OMAP24xx")
        .reserve        = omap_reserve,
        .map_io         = omap_generic_map_io,
        .init_early     = omap_generic_init_early,
-       .init_irq       = omap_init_irq,
+       .init_irq       = omap2_init_irq,
        .init_machine   = omap_generic_init,
-       .timer          = &omap_timer,
+       .timer          = &omap2_timer,
 MACHINE_END
index bac7933..45de2b3 100644 (file)
@@ -298,7 +298,7 @@ static void __init omap_h4_init_early(void)
 
 static void __init omap_h4_init_irq(void)
 {
-       omap_init_irq();
+       omap2_init_irq();
 }
 
 static struct at24_platform_data m24c01 = {
@@ -388,5 +388,5 @@ MACHINE_START(OMAP_H4, "OMAP2420 H4 board")
        .init_early     = omap_h4_init_early,
        .init_irq       = omap_h4_init_irq,
        .init_machine   = omap_h4_init,
-       .timer          = &omap_timer,
+       .timer          = &omap2_timer,
 MACHINE_END
index 0c1bfca..35be778 100644 (file)
@@ -222,8 +222,9 @@ static inline void __init igep2_init_smsc911x(void)
 static inline void __init igep2_init_smsc911x(void) { }
 #endif
 
-static struct regulator_consumer_supply igep_vmmc1_supply =
-       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0");
+static struct regulator_consumer_supply igep_vmmc1_supply[] = {
+       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
+};
 
 /* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */
 static struct regulator_init_data igep_vmmc1 = {
@@ -236,12 +237,13 @@ static struct regulator_init_data igep_vmmc1 = {
                                        | REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &igep_vmmc1_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(igep_vmmc1_supply),
+       .consumer_supplies      = igep_vmmc1_supply,
 };
 
-static struct regulator_consumer_supply igep_vio_supply =
-       REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.1");
+static struct regulator_consumer_supply igep_vio_supply[] = {
+       REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.1"),
+};
 
 static struct regulator_init_data igep_vio = {
        .constraints = {
@@ -254,20 +256,21 @@ static struct regulator_init_data igep_vio = {
                                        | REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &igep_vio_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(igep_vio_supply),
+       .consumer_supplies      = igep_vio_supply,
 };
 
-static struct regulator_consumer_supply igep_vmmc2_supply =
-       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1");
+static struct regulator_consumer_supply igep_vmmc2_supply[] = {
+       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"),
+};
 
 static struct regulator_init_data igep_vmmc2 = {
        .constraints            = {
                .valid_modes_mask       = REGULATOR_MODE_NORMAL,
                .always_on              = 1,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &igep_vmmc2_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(igep_vmmc2_supply),
+       .consumer_supplies      = igep_vmmc2_supply,
 };
 
 static struct fixed_voltage_config igep_vwlan = {
@@ -440,10 +443,6 @@ static struct twl4030_gpio_platform_data igep_twl4030_gpio_pdata = {
        .setup          = igep_twl_gpio_setup,
 };
 
-static struct twl4030_usb_data igep_usb_data = {
-       .usb_mode       = T2_USB_MODE_ULPI,
-};
-
 static int igep2_enable_dvi(struct omap_dss_device *dssdev)
 {
        gpio_direction_output(IGEP2_GPIO_DVI_PUP, 1);
@@ -480,26 +479,6 @@ static struct omap_dss_board_info igep2_dss_data = {
        .default_device = &igep2_dvi_device,
 };
 
-static struct regulator_consumer_supply igep2_vpll2_supplies[] = {
-       REGULATOR_SUPPLY("vdds_dsi", "omapdss"),
-       REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi1"),
-};
-
-static struct regulator_init_data igep2_vpll2 = {
-       .constraints = {
-               .name                   = "VDVI",
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .apply_uV               = true,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask         = REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-       .num_consumer_supplies  = ARRAY_SIZE(igep2_vpll2_supplies),
-       .consumer_supplies      = igep2_vpll2_supplies,
-};
-
 static void __init igep2_display_init(void)
 {
        int err = gpio_request_one(IGEP2_GPIO_DVI_PUP, GPIOF_OUT_INIT_HIGH,
@@ -519,13 +498,6 @@ static void __init igep_init_early(void)
                                  m65kxxxxam_sdrc_params);
 }
 
-static struct twl4030_codec_audio_data igep2_audio_data;
-
-static struct twl4030_codec_data igep2_codec_data = {
-       .audio_mclk = 26000000,
-       .audio = &igep2_audio_data,
-};
-
 static int igep2_keymap[] = {
        KEY(0, 0, KEY_LEFT),
        KEY(0, 1, KEY_RIGHT),
@@ -558,11 +530,7 @@ static struct twl4030_keypad_data igep2_keypad_pdata = {
 };
 
 static struct twl4030_platform_data igep_twldata = {
-       .irq_base       = TWL4030_IRQ_BASE,
-       .irq_end        = TWL4030_IRQ_END,
-
        /* platform_data for children goes here */
-       .usb            = &igep_usb_data,
        .gpio           = &igep_twl4030_gpio_pdata,
        .vmmc1          = &igep_vmmc1,
        .vio            = &igep_vio,
@@ -578,6 +546,8 @@ static void __init igep_i2c_init(void)
 {
        int ret;
 
+       omap3_pmic_get_config(&igep_twldata, TWL_COMMON_PDATA_USB, 0);
+
        if (machine_is_igep0020()) {
                /*
                 * Bus 3 is attached to the DVI port where devices like the
@@ -588,9 +558,12 @@ static void __init igep_i2c_init(void)
                if (ret)
                        pr_warning("IGEP2: Could not register I2C3 bus (%d)\n", ret);
 
-               igep_twldata.codec      = &igep2_codec_data;
                igep_twldata.keypad     = &igep2_keypad_pdata;
-               igep_twldata.vpll2      = &igep2_vpll2;
+               /* Get common pmic data */
+               omap3_pmic_get_config(&igep_twldata, TWL_COMMON_PDATA_AUDIO,
+                                     TWL_COMMON_REGULATOR_VPLL2);
+               igep_twldata.vpll2->constraints.apply_uV = true;
+               igep_twldata.vpll2->constraints.name = "VDVI";
        }
 
        omap3_pmic_init("twl4030", &igep_twldata);
@@ -703,9 +676,9 @@ MACHINE_START(IGEP0020, "IGEP v2 board")
        .reserve        = omap_reserve,
        .map_io         = omap3_map_io,
        .init_early     = igep_init_early,
-       .init_irq       = omap_init_irq,
+       .init_irq       = omap3_init_irq,
        .init_machine   = igep_init,
-       .timer          = &omap_timer,
+       .timer          = &omap3_timer,
 MACHINE_END
 
 MACHINE_START(IGEP0030, "IGEP OMAP3 module")
@@ -713,7 +686,7 @@ MACHINE_START(IGEP0030, "IGEP OMAP3 module")
        .reserve        = omap_reserve,
        .map_io         = omap3_map_io,
        .init_early     = igep_init_early,
-       .init_irq       = omap_init_irq,
+       .init_irq       = omap3_init_irq,
        .init_machine   = igep_init,
-       .timer          = &omap_timer,
+       .timer          = &omap3_timer,
 MACHINE_END
index f7d6038..218764c 100644 (file)
@@ -199,22 +199,14 @@ static void __init omap_ldp_init_early(void)
        omap2_init_common_devices(NULL, NULL);
 }
 
-static struct twl4030_usb_data ldp_usb_data = {
-       .usb_mode       = T2_USB_MODE_ULPI,
-};
-
 static struct twl4030_gpio_platform_data ldp_gpio_data = {
        .gpio_base      = OMAP_MAX_GPIO_LINES,
        .irq_base       = TWL4030_GPIO_IRQ_BASE,
        .irq_end        = TWL4030_GPIO_IRQ_END,
 };
 
-static struct twl4030_madc_platform_data ldp_madc_data = {
-       .irq_line       = 1,
-};
-
-static struct regulator_consumer_supply ldp_vmmc1_supply = {
-       .supply                 = "vmmc",
+static struct regulator_consumer_supply ldp_vmmc1_supply[] = {
+       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
 };
 
 /* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
@@ -228,8 +220,8 @@ static struct regulator_init_data ldp_vmmc1 = {
                                        | REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &ldp_vmmc1_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(ldp_vmmc1_supply),
+       .consumer_supplies      = ldp_vmmc1_supply,
 };
 
 /* ads7846 on SPI */
@@ -253,12 +245,7 @@ static struct regulator_init_data ldp_vaux1 = {
 };
 
 static struct twl4030_platform_data ldp_twldata = {
-       .irq_base       = TWL4030_IRQ_BASE,
-       .irq_end        = TWL4030_IRQ_END,
-
        /* platform_data for children goes here */
-       .madc           = &ldp_madc_data,
-       .usb            = &ldp_usb_data,
        .vmmc1          = &ldp_vmmc1,
        .vaux1          = &ldp_vaux1,
        .gpio           = &ldp_gpio_data,
@@ -267,6 +254,8 @@ static struct twl4030_platform_data ldp_twldata = {
 
 static int __init omap_i2c_init(void)
 {
+       omap3_pmic_get_config(&ldp_twldata,
+                         TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_MADC, 0);
        omap3_pmic_init("twl4030", &ldp_twldata);
        omap_register_i2c_bus(2, 400, NULL, 0);
        omap_register_i2c_bus(3, 400, NULL, 0);
@@ -341,8 +330,6 @@ static void __init omap_ldp_init(void)
                ARRAY_SIZE(ldp_nand_partitions), ZOOM_NAND_CS, 0);
 
        omap2_hsmmc_init(mmc);
-       /* link regulators to MMC adapters */
-       ldp_vmmc1_supply.dev = mmc[0].dev;
 }
 
 MACHINE_START(OMAP_LDP, "OMAP LDP board")
@@ -350,7 +337,7 @@ MACHINE_START(OMAP_LDP, "OMAP LDP board")
        .reserve        = omap_reserve,
        .map_io         = omap3_map_io,
        .init_early     = omap_ldp_init_early,
-       .init_irq       = omap_init_irq,
+       .init_irq       = omap3_init_irq,
        .init_machine   = omap_ldp_init,
-       .timer          = &omap_timer,
+       .timer          = &omap3_timer,
 MACHINE_END
index 8d74318..e11f0c5 100644 (file)
@@ -699,9 +699,9 @@ MACHINE_START(NOKIA_N800, "Nokia N800")
        .reserve        = omap_reserve,
        .map_io         = n8x0_map_io,
        .init_early     = n8x0_init_early,
-       .init_irq       = omap_init_irq,
+       .init_irq       = omap2_init_irq,
        .init_machine   = n8x0_init_machine,
-       .timer          = &omap_timer,
+       .timer          = &omap2_timer,
 MACHINE_END
 
 MACHINE_START(NOKIA_N810, "Nokia N810")
@@ -709,9 +709,9 @@ MACHINE_START(NOKIA_N810, "Nokia N810")
        .reserve        = omap_reserve,
        .map_io         = n8x0_map_io,
        .init_early     = n8x0_init_early,
-       .init_irq       = omap_init_irq,
+       .init_irq       = omap2_init_irq,
        .init_machine   = n8x0_init_machine,
-       .timer          = &omap_timer,
+       .timer          = &omap2_timer,
 MACHINE_END
 
 MACHINE_START(NOKIA_N810_WIMAX, "Nokia N810 WiMAX")
@@ -719,7 +719,7 @@ MACHINE_START(NOKIA_N810_WIMAX, "Nokia N810 WiMAX")
        .reserve        = omap_reserve,
        .map_io         = n8x0_map_io,
        .init_early     = n8x0_init_early,
-       .init_irq       = omap_init_irq,
+       .init_irq       = omap2_init_irq,
        .init_machine   = n8x0_init_machine,
-       .timer          = &omap_timer,
+       .timer          = &omap2_timer,
 MACHINE_END
index 7f21d24..34f8411 100644 (file)
@@ -50,7 +50,6 @@
 
 #include "mux.h"
 #include "hsmmc.h"
-#include "timer-gp.h"
 #include "pm.h"
 #include "common-board-devices.h"
 
@@ -210,14 +209,6 @@ static struct omap_dss_board_info beagle_dss_data = {
        .default_device = &beagle_dvi_device,
 };
 
-static struct regulator_consumer_supply beagle_vdac_supply =
-       REGULATOR_SUPPLY("vdda_dac", "omapdss_venc");
-
-static struct regulator_consumer_supply beagle_vdvi_supplies[] = {
-       REGULATOR_SUPPLY("vdds_dsi", "omapdss"),
-       REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi1"),
-};
-
 static void __init beagle_display_init(void)
 {
        int r;
@@ -239,12 +230,12 @@ static struct omap2_hsmmc_info mmc[] = {
        {}      /* Terminator */
 };
 
-static struct regulator_consumer_supply beagle_vmmc1_supply = {
-       .supply                 = "vmmc",
+static struct regulator_consumer_supply beagle_vmmc1_supply[] = {
+       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
 };
 
-static struct regulator_consumer_supply beagle_vsim_supply = {
-       .supply                 = "vmmc_aux",
+static struct regulator_consumer_supply beagle_vsim_supply[] = {
+       REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.0"),
 };
 
 static struct gpio_led gpio_leds[];
@@ -267,10 +258,6 @@ static int beagle_twl_gpio_setup(struct device *dev,
        mmc[0].gpio_cd = gpio + 0;
        omap2_hsmmc_init(mmc);
 
-       /* link regulators to MMC adapters */
-       beagle_vmmc1_supply.dev = mmc[0].dev;
-       beagle_vsim_supply.dev = mmc[0].dev;
-
        /*
         * TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, XM active
         * high / others active low)
@@ -336,8 +323,8 @@ static struct regulator_init_data beagle_vmmc1 = {
                                        | REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &beagle_vmmc1_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(beagle_vmmc1_supply),
+       .consumer_supplies      = beagle_vmmc1_supply,
 };
 
 /* VSIM for MMC1 pins DAT4..DAT7 (2 mA, plus card == max 50 mA) */
@@ -351,62 +338,15 @@ static struct regulator_init_data beagle_vsim = {
                                        | REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &beagle_vsim_supply,
-};
-
-/* VDAC for DSS driving S-Video (8 mA unloaded, max 65 mA) */
-static struct regulator_init_data beagle_vdac = {
-       .constraints = {
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask         = REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &beagle_vdac_supply,
-};
-
-/* VPLL2 for digital video outputs */
-static struct regulator_init_data beagle_vpll2 = {
-       .constraints = {
-               .name                   = "VDVI",
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask         = REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-       .num_consumer_supplies  = ARRAY_SIZE(beagle_vdvi_supplies),
-       .consumer_supplies      = beagle_vdvi_supplies,
-};
-
-static struct twl4030_usb_data beagle_usb_data = {
-       .usb_mode       = T2_USB_MODE_ULPI,
-};
-
-static struct twl4030_codec_audio_data beagle_audio_data;
-
-static struct twl4030_codec_data beagle_codec_data = {
-       .audio_mclk = 26000000,
-       .audio = &beagle_audio_data,
+       .num_consumer_supplies  = ARRAY_SIZE(beagle_vsim_supply),
+       .consumer_supplies      = beagle_vsim_supply,
 };
 
 static struct twl4030_platform_data beagle_twldata = {
-       .irq_base       = TWL4030_IRQ_BASE,
-       .irq_end        = TWL4030_IRQ_END,
-
        /* platform_data for children goes here */
-       .usb            = &beagle_usb_data,
        .gpio           = &beagle_gpio_data,
-       .codec          = &beagle_codec_data,
        .vmmc1          = &beagle_vmmc1,
        .vsim           = &beagle_vsim,
-       .vdac           = &beagle_vdac,
-       .vpll2          = &beagle_vpll2,
 };
 
 static struct i2c_board_info __initdata beagle_i2c_eeprom[] = {
@@ -417,6 +357,12 @@ static struct i2c_board_info __initdata beagle_i2c_eeprom[] = {
 
 static int __init omap3_beagle_i2c_init(void)
 {
+       omap3_pmic_get_config(&beagle_twldata,
+                       TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_AUDIO,
+                       TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
+
+       beagle_twldata.vpll2->constraints.name = "VDVI";
+
        omap3_pmic_init("twl4030", &beagle_twldata);
        /* Bus 3 is attached to the DVI port where devices like the pico DLP
         * projector don't work reliably with 400kHz */
@@ -486,10 +432,7 @@ static void __init omap3_beagle_init_early(void)
 
 static void __init omap3_beagle_init_irq(void)
 {
-       omap_init_irq();
-#ifdef CONFIG_OMAP_32K_TIMER
-       omap2_gp_clockevent_set_gptimer(12);
-#endif
+       omap3_init_irq();
 }
 
 static struct platform_device *omap3_beagle_devices[] __initdata = {
@@ -599,5 +542,5 @@ MACHINE_START(OMAP3_BEAGLE, "OMAP3 Beagle Board")
        .init_early     = omap3_beagle_init_early,
        .init_irq       = omap3_beagle_init_irq,
        .init_machine   = omap3_beagle_init,
-       .timer          = &omap_timer,
+       .timer          = &omap3_secure_timer,
 MACHINE_END
index b4d4346..c452b3f 100644 (file)
@@ -273,12 +273,12 @@ static struct omap_dss_board_info omap3_evm_dss_data = {
        .default_device = &omap3_evm_lcd_device,
 };
 
-static struct regulator_consumer_supply omap3evm_vmmc1_supply = {
-       .supply                 = "vmmc",
+static struct regulator_consumer_supply omap3evm_vmmc1_supply[] = {
+       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
 };
 
-static struct regulator_consumer_supply omap3evm_vsim_supply = {
-       .supply                 = "vmmc_aux",
+static struct regulator_consumer_supply omap3evm_vsim_supply[] = {
+       REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.0"),
 };
 
 /* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
@@ -292,8 +292,8 @@ static struct regulator_init_data omap3evm_vmmc1 = {
                                        | REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &omap3evm_vmmc1_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(omap3evm_vmmc1_supply),
+       .consumer_supplies      = omap3evm_vmmc1_supply,
 };
 
 /* VSIM for MMC1 pins DAT4..DAT7 (2 mA, plus card == max 50 mA) */
@@ -307,8 +307,8 @@ static struct regulator_init_data omap3evm_vsim = {
                                        | REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &omap3evm_vsim_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(omap3evm_vsim_supply),
+       .consumer_supplies      = omap3evm_vsim_supply,
 };
 
 static struct omap2_hsmmc_info mmc[] = {
@@ -365,10 +365,6 @@ static int omap3evm_twl_gpio_setup(struct device *dev,
        mmc[0].gpio_cd = gpio + 0;
        omap2_hsmmc_init(mmc);
 
-       /* link regulators to MMC adapters */
-       omap3evm_vmmc1_supply.dev = mmc[0].dev;
-       omap3evm_vsim_supply.dev = mmc[0].dev;
-
        /*
         * Most GPIOs are for USB OTG.  Some are mostly sent to
         * the P2 connector; notably LEDA for the LCD backlight.
@@ -400,10 +396,6 @@ static struct twl4030_gpio_platform_data omap3evm_gpio_data = {
        .setup          = omap3evm_twl_gpio_setup,
 };
 
-static struct twl4030_usb_data omap3evm_usb_data = {
-       .usb_mode       = T2_USB_MODE_ULPI,
-};
-
 static uint32_t board_keymap[] = {
        KEY(0, 0, KEY_LEFT),
        KEY(0, 1, KEY_DOWN),
@@ -438,58 +430,10 @@ static struct twl4030_keypad_data omap3evm_kp_data = {
        .rep            = 1,
 };
 
-static struct twl4030_madc_platform_data omap3evm_madc_data = {
-       .irq_line       = 1,
-};
-
-static struct twl4030_codec_audio_data omap3evm_audio_data;
-
-static struct twl4030_codec_data omap3evm_codec_data = {
-       .audio_mclk = 26000000,
-       .audio = &omap3evm_audio_data,
-};
-
-static struct regulator_consumer_supply omap3_evm_vdda_dac_supply =
-       REGULATOR_SUPPLY("vdda_dac", "omapdss_venc");
-
-/* VDAC for DSS driving S-Video */
-static struct regulator_init_data omap3_evm_vdac = {
-       .constraints = {
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .apply_uV               = true,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask         = REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &omap3_evm_vdda_dac_supply,
-};
-
-/* VPLL2 for digital video outputs */
-static struct regulator_consumer_supply omap3_evm_vpll2_supplies[] = {
-       REGULATOR_SUPPLY("vdds_dsi", "omapdss"),
-       REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi1"),
-};
-
-static struct regulator_init_data omap3_evm_vpll2 = {
-       .constraints = {
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .apply_uV               = true,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask         = REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-       .num_consumer_supplies  = ARRAY_SIZE(omap3_evm_vpll2_supplies),
-       .consumer_supplies      = omap3_evm_vpll2_supplies,
-};
-
 /* ads7846 on SPI */
-static struct regulator_consumer_supply omap3evm_vio_supply =
-       REGULATOR_SUPPLY("vcc", "spi1.0");
+static struct regulator_consumer_supply omap3evm_vio_supply[] = {
+       REGULATOR_SUPPLY("vcc", "spi1.0"),
+};
 
 /* VIO for ads7846 */
 static struct regulator_init_data omap3evm_vio = {
@@ -502,8 +446,8 @@ static struct regulator_init_data omap3evm_vio = {
                .valid_ops_mask         = REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &omap3evm_vio_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(omap3evm_vio_supply),
+       .consumer_supplies      = omap3evm_vio_supply,
 };
 
 #ifdef CONFIG_WL12XX_PLATFORM_DATA
@@ -511,16 +455,17 @@ static struct regulator_init_data omap3evm_vio = {
 #define OMAP3EVM_WLAN_PMENA_GPIO       (150)
 #define OMAP3EVM_WLAN_IRQ_GPIO         (149)
 
-static struct regulator_consumer_supply omap3evm_vmmc2_supply =
-       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1");
+static struct regulator_consumer_supply omap3evm_vmmc2_supply[] = {
+       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"),
+};
 
 /* VMMC2 for driving the WL12xx module */
 static struct regulator_init_data omap3evm_vmmc2 = {
        .constraints = {
                .valid_ops_mask = REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies = &omap3evm_vmmc2_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(omap3evm_vmmc2_supply),
+       .consumer_supplies      = omap3evm_vmmc2_supply,
 };
 
 static struct fixed_voltage_config omap3evm_vwlan = {
@@ -548,17 +493,9 @@ struct wl12xx_platform_data omap3evm_wlan_data __initdata = {
 #endif
 
 static struct twl4030_platform_data omap3evm_twldata = {
-       .irq_base       = TWL4030_IRQ_BASE,
-       .irq_end        = TWL4030_IRQ_END,
-
        /* platform_data for children goes here */
        .keypad         = &omap3evm_kp_data,
-       .madc           = &omap3evm_madc_data,
-       .usb            = &omap3evm_usb_data,
        .gpio           = &omap3evm_gpio_data,
-       .codec          = &omap3evm_codec_data,
-       .vdac           = &omap3_evm_vdac,
-       .vpll2          = &omap3_evm_vpll2,
        .vio            = &omap3evm_vio,
        .vmmc1          = &omap3evm_vmmc1,
        .vsim           = &omap3evm_vsim,
@@ -566,6 +503,14 @@ static struct twl4030_platform_data omap3evm_twldata = {
 
 static int __init omap3_evm_i2c_init(void)
 {
+       omap3_pmic_get_config(&omap3evm_twldata,
+                       TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_MADC |
+                       TWL_COMMON_PDATA_AUDIO,
+                       TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
+
+       omap3evm_twldata.vdac->constraints.apply_uV = true;
+       omap3evm_twldata.vpll2->constraints.apply_uV = true;
+
        omap3_pmic_init("twl4030", &omap3evm_twldata);
        omap_register_i2c_bus(2, 400, NULL, 0);
        omap_register_i2c_bus(3, 400, NULL, 0);
@@ -740,7 +685,7 @@ MACHINE_START(OMAP3EVM, "OMAP3 EVM")
        .reserve        = omap_reserve,
        .map_io         = omap3_map_io,
        .init_early     = omap3_evm_init_early,
-       .init_irq       = omap_init_irq,
+       .init_irq       = omap3_init_irq,
        .init_machine   = omap3_evm_init,
-       .timer          = &omap_timer,
+       .timer          = &omap3_timer,
 MACHINE_END
index 60d9be4..703aeb5 100644 (file)
@@ -35,7 +35,6 @@
 
 #include "mux.h"
 #include "hsmmc.h"
-#include "timer-gp.h"
 #include "control.h"
 #include "common-board-devices.h"
 
@@ -55,8 +54,8 @@
 #define OMAP3_TORPEDO_MMC_GPIO_CD              127
 #define OMAP3_TORPEDO_SMSC911X_GPIO_IRQ                129
 
-static struct regulator_consumer_supply omap3logic_vmmc1_supply = {
-       .supply                 = "vmmc",
+static struct regulator_consumer_supply omap3logic_vmmc1_supply[] = {
+       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
 };
 
 /* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
@@ -71,8 +70,8 @@ static struct regulator_init_data omap3logic_vmmc1 = {
                                        | REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &omap3logic_vmmc1_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(omap3logic_vmmc1_supply),
+       .consumer_supplies      = omap3logic_vmmc1_supply,
 };
 
 static struct twl4030_gpio_platform_data omap3logic_gpio_data = {
@@ -130,8 +129,6 @@ static void __init board_mmc_init(void)
        }
 
        omap2_hsmmc_init(board_mmc_info);
-       /* link regulators to MMC adapters */
-       omap3logic_vmmc1_supply.dev = board_mmc_info[0].dev;
 }
 
 static struct omap_smsc911x_platform_data __initdata board_smsc911x_data = {
@@ -215,16 +212,16 @@ MACHINE_START(OMAP3_TORPEDO, "Logic OMAP3 Torpedo board")
        .boot_params    = 0x80000100,
        .map_io         = omap3_map_io,
        .init_early     = omap3logic_init_early,
-       .init_irq       = omap_init_irq,
+       .init_irq       = omap3_init_irq,
        .init_machine   = omap3logic_init,
-       .timer          = &omap_timer,
+       .timer          = &omap3_timer,
 MACHINE_END
 
 MACHINE_START(OMAP3530_LV_SOM, "OMAP Logic 3530 LV SOM board")
        .boot_params    = 0x80000100,
        .map_io         = omap3_map_io,
        .init_early     = omap3logic_init_early,
-       .init_irq       = omap_init_irq,
+       .init_irq       = omap3_init_irq,
        .init_machine   = omap3logic_init,
-       .timer          = &omap_timer,
+       .timer          = &omap3_timer,
 MACHINE_END
index 23f71d4..080d7bd 100644 (file)
@@ -320,17 +320,17 @@ static struct twl4030_gpio_platform_data omap3pandora_gpio_data = {
        .setup          = omap3pandora_twl_gpio_setup,
 };
 
-static struct regulator_consumer_supply pandora_vmmc1_supply =
-       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0");
-
-static struct regulator_consumer_supply pandora_vmmc2_supply =
-       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1");
+static struct regulator_consumer_supply pandora_vmmc1_supply[] = {
+       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
+};
 
-static struct regulator_consumer_supply pandora_vmmc3_supply =
-       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.2");
+static struct regulator_consumer_supply pandora_vmmc2_supply[] = {
+       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1")
+};
 
-static struct regulator_consumer_supply pandora_vdda_dac_supply =
-       REGULATOR_SUPPLY("vdda_dac", "omapdss_venc");
+static struct regulator_consumer_supply pandora_vmmc3_supply[] = {
+       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.2"),
+};
 
 static struct regulator_consumer_supply pandora_vdds_supplies[] = {
        REGULATOR_SUPPLY("vdds_sdi", "omapdss"),
@@ -338,11 +338,13 @@ static struct regulator_consumer_supply pandora_vdds_supplies[] = {
        REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi1"),
 };
 
-static struct regulator_consumer_supply pandora_vcc_lcd_supply =
-       REGULATOR_SUPPLY("vcc", "display0");
+static struct regulator_consumer_supply pandora_vcc_lcd_supply[] = {
+       REGULATOR_SUPPLY("vcc", "display0"),
+};
 
-static struct regulator_consumer_supply pandora_usb_phy_supply =
-       REGULATOR_SUPPLY("hsusb0", "ehci-omap.0");
+static struct regulator_consumer_supply pandora_usb_phy_supply[] = {
+       REGULATOR_SUPPLY("hsusb0", "ehci-omap.0"),
+};
 
 /* ads7846 on SPI and 2 nub controllers on I2C */
 static struct regulator_consumer_supply pandora_vaux4_supplies[] = {
@@ -351,8 +353,9 @@ static struct regulator_consumer_supply pandora_vaux4_supplies[] = {
        REGULATOR_SUPPLY("vcc", "3-0067"),
 };
 
-static struct regulator_consumer_supply pandora_adac_supply =
-       REGULATOR_SUPPLY("vcc", "soc-audio");
+static struct regulator_consumer_supply pandora_adac_supply[] = {
+       REGULATOR_SUPPLY("vcc", "soc-audio"),
+};
 
 /* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
 static struct regulator_init_data pandora_vmmc1 = {
@@ -365,8 +368,8 @@ static struct regulator_init_data pandora_vmmc1 = {
                                        | REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &pandora_vmmc1_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(pandora_vmmc1_supply),
+       .consumer_supplies      = pandora_vmmc1_supply,
 };
 
 /* VMMC2 for MMC2 pins CMD, CLK, DAT0..DAT3 (max 100 mA) */
@@ -380,38 +383,8 @@ static struct regulator_init_data pandora_vmmc2 = {
                                        | REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &pandora_vmmc2_supply,
-};
-
-/* VDAC for DSS driving S-Video */
-static struct regulator_init_data pandora_vdac = {
-       .constraints = {
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .apply_uV               = true,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask         = REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &pandora_vdda_dac_supply,
-};
-
-/* VPLL2 for digital video outputs */
-static struct regulator_init_data pandora_vpll2 = {
-       .constraints = {
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .apply_uV               = true,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask         = REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-       .num_consumer_supplies  = ARRAY_SIZE(pandora_vdds_supplies),
-       .consumer_supplies      = pandora_vdds_supplies,
+       .num_consumer_supplies  = ARRAY_SIZE(pandora_vmmc2_supply),
+       .consumer_supplies      = pandora_vmmc2_supply,
 };
 
 /* VAUX1 for LCD */
@@ -425,8 +398,8 @@ static struct regulator_init_data pandora_vaux1 = {
                .valid_ops_mask         = REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &pandora_vcc_lcd_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(pandora_vcc_lcd_supply),
+       .consumer_supplies      = pandora_vcc_lcd_supply,
 };
 
 /* VAUX2 for USB host PHY */
@@ -440,8 +413,8 @@ static struct regulator_init_data pandora_vaux2 = {
                .valid_ops_mask         = REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &pandora_usb_phy_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(pandora_usb_phy_supply),
+       .consumer_supplies      = pandora_usb_phy_supply,
 };
 
 /* VAUX4 for ads7846 and nubs */
@@ -470,8 +443,8 @@ static struct regulator_init_data pandora_vsim = {
                .valid_ops_mask         = REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &pandora_adac_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(pandora_adac_supply),
+       .consumer_supplies      = pandora_adac_supply,
 };
 
 /* Fixed regulator internal to Wifi module */
@@ -479,8 +452,8 @@ static struct regulator_init_data pandora_vmmc3 = {
        .constraints = {
                .valid_ops_mask         = REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &pandora_vmmc3_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(pandora_vmmc3_supply),
+       .consumer_supplies      = pandora_vmmc3_supply,
 };
 
 static struct fixed_voltage_config pandora_vwlan = {
@@ -501,29 +474,12 @@ static struct platform_device pandora_vwlan_device = {
        },
 };
 
-static struct twl4030_usb_data omap3pandora_usb_data = {
-       .usb_mode       = T2_USB_MODE_ULPI,
-};
-
-static struct twl4030_codec_audio_data omap3pandora_audio_data;
-
-static struct twl4030_codec_data omap3pandora_codec_data = {
-       .audio_mclk = 26000000,
-       .audio = &omap3pandora_audio_data,
-};
-
 static struct twl4030_bci_platform_data pandora_bci_data;
 
 static struct twl4030_platform_data omap3pandora_twldata = {
-       .irq_base       = TWL4030_IRQ_BASE,
-       .irq_end        = TWL4030_IRQ_END,
        .gpio           = &omap3pandora_gpio_data,
-       .usb            = &omap3pandora_usb_data,
-       .codec          = &omap3pandora_codec_data,
        .vmmc1          = &pandora_vmmc1,
        .vmmc2          = &pandora_vmmc2,
-       .vdac           = &pandora_vdac,
-       .vpll2          = &pandora_vpll2,
        .vaux1          = &pandora_vaux1,
        .vaux2          = &pandora_vaux2,
        .vaux4          = &pandora_vaux4,
@@ -541,6 +497,17 @@ static struct i2c_board_info __initdata omap3pandora_i2c3_boardinfo[] = {
 
 static int __init omap3pandora_i2c_init(void)
 {
+       omap3_pmic_get_config(&omap3pandora_twldata,
+                       TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_AUDIO,
+                       TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
+
+       omap3pandora_twldata.vdac->constraints.apply_uV = true;
+
+       omap3pandora_twldata.vpll2->constraints.apply_uV = true;
+       omap3pandora_twldata.vpll2->num_consumer_supplies =
+                                       ARRAY_SIZE(pandora_vdds_supplies);
+       omap3pandora_twldata.vpll2->consumer_supplies = pandora_vdds_supplies;
+
        omap3_pmic_init("tps65950", &omap3pandora_twldata);
        /* i2c2 pins are not connected */
        omap_register_i2c_bus(3, 100, omap3pandora_i2c3_boardinfo,
@@ -643,7 +610,7 @@ MACHINE_START(OMAP3_PANDORA, "Pandora Handheld Console")
        .reserve        = omap_reserve,
        .map_io         = omap3_map_io,
        .init_early     = omap3pandora_init_early,
-       .init_irq       = omap_init_irq,
+       .init_irq       = omap3_init_irq,
        .init_machine   = omap3pandora_init,
-       .timer          = &omap_timer,
+       .timer          = &omap3_timer,
 MACHINE_END
index 0c108a2..8e10498 100644 (file)
@@ -52,7 +52,6 @@
 #include "sdram-micron-mt46h32m32lf-6.h"
 #include "mux.h"
 #include "hsmmc.h"
-#include "timer-gp.h"
 #include "common-board-devices.h"
 
 #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
@@ -206,12 +205,12 @@ static struct omap_dss_board_info omap3_stalker_dss_data = {
        .default_device = &omap3_stalker_dvi_device,
 };
 
-static struct regulator_consumer_supply omap3stalker_vmmc1_supply = {
-       .supply         = "vmmc",
+static struct regulator_consumer_supply omap3stalker_vmmc1_supply[] = {
+       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
 };
 
-static struct regulator_consumer_supply omap3stalker_vsim_supply = {
-       .supply         = "vmmc_aux",
+static struct regulator_consumer_supply omap3stalker_vsim_supply[] = {
+       REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.0"),
 };
 
 /* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
@@ -224,8 +223,8 @@ static struct regulator_init_data omap3stalker_vmmc1 = {
                .valid_ops_mask         = REGULATOR_CHANGE_VOLTAGE
                | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &omap3stalker_vmmc1_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(omap3stalker_vmmc1_supply),
+       .consumer_supplies      = omap3stalker_vmmc1_supply,
 };
 
 /* VSIM for MMC1 pins DAT4..DAT7 (2 mA, plus card == max 50 mA) */
@@ -238,8 +237,8 @@ static struct regulator_init_data omap3stalker_vsim = {
                .valid_ops_mask         = REGULATOR_CHANGE_VOLTAGE
                | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &omap3stalker_vsim_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(omap3stalker_vsim_supply),
+       .consumer_supplies      = omap3stalker_vsim_supply,
 };
 
 static struct omap2_hsmmc_info mmc[] = {
@@ -321,10 +320,6 @@ omap3stalker_twl_gpio_setup(struct device *dev,
        mmc[0].gpio_cd = gpio + 0;
        omap2_hsmmc_init(mmc);
 
-       /* link regulators to MMC adapters */
-       omap3stalker_vmmc1_supply.dev = mmc[0].dev;
-       omap3stalker_vsim_supply.dev = mmc[0].dev;
-
        /*
         * Most GPIOs are for USB OTG.  Some are mostly sent to
         * the P2 connector; notably LEDA for the LCD backlight.
@@ -354,10 +349,6 @@ static struct twl4030_gpio_platform_data omap3stalker_gpio_data = {
        .setup          = omap3stalker_twl_gpio_setup,
 };
 
-static struct twl4030_usb_data omap3stalker_usb_data = {
-       .usb_mode       = T2_USB_MODE_ULPI,
-};
-
 static uint32_t board_keymap[] = {
        KEY(0, 0, KEY_LEFT),
        KEY(0, 1, KEY_DOWN),
@@ -392,68 +383,10 @@ static struct twl4030_keypad_data omap3stalker_kp_data = {
        .rep            = 1,
 };
 
-static struct twl4030_madc_platform_data omap3stalker_madc_data = {
-       .irq_line       = 1,
-};
-
-static struct twl4030_codec_audio_data omap3stalker_audio_data;
-
-static struct twl4030_codec_data omap3stalker_codec_data = {
-       .audio_mclk     = 26000000,
-       .audio          = &omap3stalker_audio_data,
-};
-
-static struct regulator_consumer_supply omap3_stalker_vdda_dac_supply =
-       REGULATOR_SUPPLY("vdda_dac", "omapdss_venc");
-
-/* VDAC for DSS driving S-Video */
-static struct regulator_init_data omap3_stalker_vdac = {
-       .constraints            = {
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .apply_uV               = true,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-               | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask         = REGULATOR_CHANGE_MODE
-               | REGULATOR_CHANGE_STATUS,
-       },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &omap3_stalker_vdda_dac_supply,
-};
-
-/* VPLL2 for digital video outputs */
-static struct regulator_consumer_supply omap3_stalker_vpll2_supplies[] = {
-       REGULATOR_SUPPLY("vdds_dsi", "omapdss"),
-       REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi1"),
-};
-
-static struct regulator_init_data omap3_stalker_vpll2 = {
-       .constraints            = {
-               .name                   = "VDVI",
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .apply_uV = true,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-               | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask         = REGULATOR_CHANGE_MODE
-               | REGULATOR_CHANGE_STATUS,
-       },
-       .num_consumer_supplies  = ARRAY_SIZE(omap3_stalker_vpll2_supplies),
-       .consumer_supplies      = omap3_stalker_vpll2_supplies,
-};
-
 static struct twl4030_platform_data omap3stalker_twldata = {
-       .irq_base       = TWL4030_IRQ_BASE,
-       .irq_end        = TWL4030_IRQ_END,
-
        /* platform_data for children goes here */
        .keypad         = &omap3stalker_kp_data,
-       .madc           = &omap3stalker_madc_data,
-       .usb            = &omap3stalker_usb_data,
        .gpio           = &omap3stalker_gpio_data,
-       .codec          = &omap3stalker_codec_data,
-       .vdac           = &omap3_stalker_vdac,
-       .vpll2          = &omap3_stalker_vpll2,
        .vmmc1          = &omap3stalker_vmmc1,
        .vsim           = &omap3stalker_vsim,
 };
@@ -474,6 +407,15 @@ static struct i2c_board_info __initdata omap3stalker_i2c_boardinfo3[] = {
 
 static int __init omap3_stalker_i2c_init(void)
 {
+       omap3_pmic_get_config(&omap3stalker_twldata,
+                       TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_MADC |
+                       TWL_COMMON_PDATA_AUDIO,
+                       TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
+
+       omap3stalker_twldata.vdac->constraints.apply_uV = true;
+       omap3stalker_twldata.vpll2->constraints.apply_uV = true;
+       omap3stalker_twldata.vpll2->constraints.name = "VDVI";
+
        omap3_pmic_init("twl4030", &omap3stalker_twldata);
        omap_register_i2c_bus(2, 400, NULL, 0);
        omap_register_i2c_bus(3, 400, omap3stalker_i2c_boardinfo3,
@@ -494,10 +436,7 @@ static void __init omap3_stalker_init_early(void)
 
 static void __init omap3_stalker_init_irq(void)
 {
-       omap_init_irq();
-#ifdef CONFIG_OMAP_32K_TIMER
-       omap2_gp_clockevent_set_gptimer(12);
-#endif
+       omap3_init_irq();
 }
 
 static struct platform_device *omap3_stalker_devices[] __initdata = {
@@ -560,5 +499,5 @@ MACHINE_START(SBC3530, "OMAP3 STALKER")
        .init_early             = omap3_stalker_init_early,
        .init_irq               = omap3_stalker_init_irq,
        .init_machine           = omap3_stalker_init,
-       .timer                  = &omap_timer,
+       .timer                  = &omap3_secure_timer,
 MACHINE_END
index 5f649fa..852ea04 100644 (file)
@@ -51,7 +51,6 @@
 
 #include "mux.h"
 #include "hsmmc.h"
-#include "timer-gp.h"
 #include "common-board-devices.h"
 
 #include <asm/setup.h>
@@ -114,12 +113,12 @@ static struct omap_lcd_config omap3_touchbook_lcd_config __initdata = {
        .ctrl_name      = "internal",
 };
 
-static struct regulator_consumer_supply touchbook_vmmc1_supply = {
-       .supply                 = "vmmc",
+static struct regulator_consumer_supply touchbook_vmmc1_supply[] = {
+       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
 };
 
-static struct regulator_consumer_supply touchbook_vsim_supply = {
-       .supply                 = "vmmc_aux",
+static struct regulator_consumer_supply touchbook_vsim_supply[] = {
+       REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.0"),
 };
 
 static struct gpio_led gpio_leds[];
@@ -137,10 +136,6 @@ static int touchbook_twl_gpio_setup(struct device *dev,
        mmc[0].gpio_cd = gpio + 0;
        omap2_hsmmc_init(mmc);
 
-       /* link regulators to MMC adapters */
-       touchbook_vmmc1_supply.dev = mmc[0].dev;
-       touchbook_vsim_supply.dev = mmc[0].dev;
-
        /* REVISIT: need ehci-omap hooks for external VBUS
         * power switch and overcurrent detect
         */
@@ -167,14 +162,18 @@ static struct twl4030_gpio_platform_data touchbook_gpio_data = {
        .setup          = touchbook_twl_gpio_setup,
 };
 
-static struct regulator_consumer_supply touchbook_vdac_supply = {
+static struct regulator_consumer_supply touchbook_vdac_supply[] = {
+{
        .supply         = "vdac",
        .dev            = &omap3_touchbook_lcd_device.dev,
+},
 };
 
-static struct regulator_consumer_supply touchbook_vdvi_supply = {
+static struct regulator_consumer_supply touchbook_vdvi_supply[] = {
+{
        .supply         = "vdvi",
        .dev            = &omap3_touchbook_lcd_device.dev,
+},
 };
 
 /* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
@@ -188,8 +187,8 @@ static struct regulator_init_data touchbook_vmmc1 = {
                                        | REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &touchbook_vmmc1_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(touchbook_vmmc1_supply),
+       .consumer_supplies      = touchbook_vmmc1_supply,
 };
 
 /* VSIM for MMC1 pins DAT4..DAT7 (2 mA, plus card == max 50 mA) */
@@ -203,62 +202,15 @@ static struct regulator_init_data touchbook_vsim = {
                                        | REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &touchbook_vsim_supply,
-};
-
-/* VDAC for DSS driving S-Video (8 mA unloaded, max 65 mA) */
-static struct regulator_init_data touchbook_vdac = {
-       .constraints = {
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask         = REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &touchbook_vdac_supply,
-};
-
-/* VPLL2 for digital video outputs */
-static struct regulator_init_data touchbook_vpll2 = {
-       .constraints = {
-               .name                   = "VDVI",
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask         = REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &touchbook_vdvi_supply,
-};
-
-static struct twl4030_usb_data touchbook_usb_data = {
-       .usb_mode       = T2_USB_MODE_ULPI,
-};
-
-static struct twl4030_codec_audio_data touchbook_audio_data;
-
-static struct twl4030_codec_data touchbook_codec_data = {
-       .audio_mclk = 26000000,
-       .audio = &touchbook_audio_data,
+       .num_consumer_supplies  = ARRAY_SIZE(touchbook_vsim_supply),
+       .consumer_supplies      = touchbook_vsim_supply,
 };
 
 static struct twl4030_platform_data touchbook_twldata = {
-       .irq_base       = TWL4030_IRQ_BASE,
-       .irq_end        = TWL4030_IRQ_END,
-
        /* platform_data for children goes here */
-       .usb            = &touchbook_usb_data,
        .gpio           = &touchbook_gpio_data,
-       .codec          = &touchbook_codec_data,
        .vmmc1          = &touchbook_vmmc1,
        .vsim           = &touchbook_vsim,
-       .vdac           = &touchbook_vdac,
-       .vpll2          = &touchbook_vpll2,
 };
 
 static struct i2c_board_info __initdata touchBook_i2c_boardinfo[] = {
@@ -270,8 +222,20 @@ static struct i2c_board_info __initdata touchBook_i2c_boardinfo[] = {
 static int __init omap3_touchbook_i2c_init(void)
 {
        /* Standard TouchBook bus */
-       omap3_pmic_init("twl4030", &touchbook_twldata);
+       omap3_pmic_get_config(&touchbook_twldata,
+                       TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_AUDIO,
+                       TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
+
+       touchbook_twldata.vdac->num_consumer_supplies =
+                                       ARRAY_SIZE(touchbook_vdac_supply);
+       touchbook_twldata.vdac->consumer_supplies = touchbook_vdac_supply;
 
+       touchbook_twldata.vpll2->constraints.name = "VDVI";
+       touchbook_twldata.vpll2->num_consumer_supplies =
+                                       ARRAY_SIZE(touchbook_vdvi_supply);
+       touchbook_twldata.vpll2->consumer_supplies = touchbook_vdvi_supply;
+
+       omap3_pmic_init("twl4030", &touchbook_twldata);
        /* Additional TouchBook bus */
        omap_register_i2c_bus(3, 100, touchBook_i2c_boardinfo,
                        ARRAY_SIZE(touchBook_i2c_boardinfo));
@@ -371,10 +335,7 @@ static void __init omap3_touchbook_init_early(void)
 
 static void __init omap3_touchbook_init_irq(void)
 {
-       omap_init_irq();
-#ifdef CONFIG_OMAP_32K_TIMER
-       omap2_gp_clockevent_set_gptimer(12);
-#endif
+       omap3_init_irq();
 }
 
 static struct platform_device *omap3_touchbook_devices[] __initdata = {
@@ -449,5 +410,5 @@ MACHINE_START(TOUCHBOOK, "OMAP3 touchbook Board")
        .init_early     = omap3_touchbook_init_early,
        .init_irq       = omap3_touchbook_init_irq,
        .init_machine   = omap3_touchbook_init,
-       .timer          = &omap_timer,
+       .timer          = &omap3_secure_timer,
 MACHINE_END
index 0cfe200..9aaa960 100644 (file)
@@ -41,7 +41,6 @@
 #include <plat/usb.h>
 #include <plat/mmc.h>
 #include <video/omap-panel-generic-dpi.h>
-#include "timer-gp.h"
 
 #include "hsmmc.h"
 #include "control.h"
@@ -155,14 +154,6 @@ static struct omap_musb_board_data musb_board_data = {
        .power                  = 100,
 };
 
-static struct twl4030_usb_data omap4_usbphy_data = {
-       .phy_init       = omap4430_phy_init,
-       .phy_exit       = omap4430_phy_exit,
-       .phy_power      = omap4430_phy_power,
-       .phy_set_clock  = omap4430_phy_set_clk,
-       .phy_suspend    = omap4430_phy_suspend,
-};
-
 static struct omap2_hsmmc_info mmc[] = {
        {
                .mmc            = 1,
@@ -182,24 +173,16 @@ static struct omap2_hsmmc_info mmc[] = {
        {}      /* Terminator */
 };
 
-static struct regulator_consumer_supply omap4_panda_vmmc_supply[] = {
-       {
-               .supply = "vmmc",
-               .dev_name = "omap_hsmmc.0",
-       },
-};
-
-static struct regulator_consumer_supply omap4_panda_vmmc5_supply = {
-       .supply = "vmmc",
-       .dev_name = "omap_hsmmc.4",
+static struct regulator_consumer_supply omap4_panda_vmmc5_supply[] = {
+       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.4"),
 };
 
 static struct regulator_init_data panda_vmmc5 = {
        .constraints = {
                .valid_ops_mask = REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies = 1,
-       .consumer_supplies = &omap4_panda_vmmc5_supply,
+       .num_consumer_supplies = ARRAY_SIZE(omap4_panda_vmmc5_supply),
+       .consumer_supplies = omap4_panda_vmmc5_supply,
 };
 
 static struct fixed_voltage_config panda_vwlan = {
@@ -274,128 +257,8 @@ static int __init omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers)
        return 0;
 }
 
-static struct regulator_init_data omap4_panda_vaux2 = {
-       .constraints = {
-               .min_uV                 = 1200000,
-               .max_uV                 = 2800000,
-               .apply_uV               = true,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask  = REGULATOR_CHANGE_VOLTAGE
-                                       | REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-};
-
-static struct regulator_init_data omap4_panda_vaux3 = {
-       .constraints = {
-               .min_uV                 = 1000000,
-               .max_uV                 = 3000000,
-               .apply_uV               = true,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask  = REGULATOR_CHANGE_VOLTAGE
-                                       | REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-};
-
-/* VMMC1 for MMC1 card */
-static struct regulator_init_data omap4_panda_vmmc = {
-       .constraints = {
-               .min_uV                 = 1200000,
-               .max_uV                 = 3000000,
-               .apply_uV               = true,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask  = REGULATOR_CHANGE_VOLTAGE
-                                       | REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = omap4_panda_vmmc_supply,
-};
-
-static struct regulator_init_data omap4_panda_vpp = {
-       .constraints = {
-               .min_uV                 = 1800000,
-               .max_uV                 = 2500000,
-               .apply_uV               = true,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask  = REGULATOR_CHANGE_VOLTAGE
-                                       | REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-};
-
-static struct regulator_init_data omap4_panda_vana = {
-       .constraints = {
-               .min_uV                 = 2100000,
-               .max_uV                 = 2100000,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask  = REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-};
-
-static struct regulator_init_data omap4_panda_vcxio = {
-       .constraints = {
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask  = REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-};
-
-static struct regulator_init_data omap4_panda_vdac = {
-       .constraints = {
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask  = REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-};
-
-static struct regulator_init_data omap4_panda_vusb = {
-       .constraints = {
-               .min_uV                 = 3300000,
-               .max_uV                 = 3300000,
-               .apply_uV               = true,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask  =      REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-};
-
-static struct regulator_init_data omap4_panda_clk32kg = {
-       .constraints = {
-               .valid_ops_mask         = REGULATOR_CHANGE_STATUS,
-       },
-};
-
-static struct twl4030_platform_data omap4_panda_twldata = {
-       .irq_base       = TWL6030_IRQ_BASE,
-       .irq_end        = TWL6030_IRQ_END,
-
-       /* Regulators */
-       .vmmc           = &omap4_panda_vmmc,
-       .vpp            = &omap4_panda_vpp,
-       .vana           = &omap4_panda_vana,
-       .vcxio          = &omap4_panda_vcxio,
-       .vdac           = &omap4_panda_vdac,
-       .vusb           = &omap4_panda_vusb,
-       .vaux2          = &omap4_panda_vaux2,
-       .vaux3          = &omap4_panda_vaux3,
-       .clk32kg        = &omap4_panda_clk32kg,
-       .usb            = &omap4_usbphy_data,
-};
+/* Panda board uses the common PMIC configuration */
+static struct twl4030_platform_data omap4_panda_twldata;
 
 /*
  * Display monitor features are burnt in their EEPROM as EDID data. The EEPROM
@@ -409,6 +272,16 @@ static struct i2c_board_info __initdata panda_i2c_eeprom[] = {
 
 static int __init omap4_panda_i2c_init(void)
 {
+       omap4_pmic_get_config(&omap4_panda_twldata, TWL_COMMON_PDATA_USB,
+                       TWL_COMMON_REGULATOR_VDAC |
+                       TWL_COMMON_REGULATOR_VAUX2 |
+                       TWL_COMMON_REGULATOR_VAUX3 |
+                       TWL_COMMON_REGULATOR_VMMC |
+                       TWL_COMMON_REGULATOR_VPP |
+                       TWL_COMMON_REGULATOR_VANA |
+                       TWL_COMMON_REGULATOR_VCXIO |
+                       TWL_COMMON_REGULATOR_VUSB |
+                       TWL_COMMON_REGULATOR_CLK32KG);
        omap4_pmic_init("twl6030", &omap4_panda_twldata);
        omap_register_i2c_bus(2, 400, NULL, 0);
        /*
@@ -716,5 +589,5 @@ MACHINE_START(OMAP4_PANDA, "OMAP4 Panda board")
        .init_early     = omap4_panda_init_early,
        .init_irq       = gic_init_irq,
        .init_machine   = omap4_panda_init,
-       .timer          = &omap_timer,
+       .timer          = &omap4_timer,
 MACHINE_END
index 175e1ab..f1f18d0 100644 (file)
        defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
 
 /* fixed regulator for ads7846 */
-static struct regulator_consumer_supply ads7846_supply =
-       REGULATOR_SUPPLY("vcc", "spi1.0");
+static struct regulator_consumer_supply ads7846_supply[] = {
+       REGULATOR_SUPPLY("vcc", "spi1.0"),
+};
 
 static struct regulator_init_data vads7846_regulator = {
        .constraints = {
                .valid_ops_mask         = REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &ads7846_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(ads7846_supply),
+       .consumer_supplies      = ads7846_supply,
 };
 
 static struct fixed_voltage_config vads7846 = {
@@ -264,14 +265,6 @@ static struct omap_dss_board_info overo_dss_data = {
        .default_device = &overo_dvi_device,
 };
 
-static struct regulator_consumer_supply overo_vdda_dac_supply =
-       REGULATOR_SUPPLY("vdda_dac", "omapdss_venc");
-
-static struct regulator_consumer_supply overo_vdds_dsi_supply[] = {
-       REGULATOR_SUPPLY("vdds_dsi", "omapdss"),
-       REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi1"),
-};
-
 static struct mtd_partition overo_nand_partitions[] = {
        {
                .name           = "xloader",
@@ -319,8 +312,8 @@ static struct omap2_hsmmc_info mmc[] = {
        {}      /* Terminator */
 };
 
-static struct regulator_consumer_supply overo_vmmc1_supply = {
-       .supply                 = "vmmc",
+static struct regulator_consumer_supply overo_vmmc1_supply[] = {
+       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
 };
 
 #if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
@@ -415,8 +408,6 @@ static int overo_twl_gpio_setup(struct device *dev,
 {
        omap2_hsmmc_init(mmc);
 
-       overo_vmmc1_supply.dev = mmc[0].dev;
-
 #if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
        /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
        gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
@@ -433,10 +424,6 @@ static struct twl4030_gpio_platform_data overo_gpio_data = {
        .setup          = overo_twl_gpio_setup,
 };
 
-static struct twl4030_usb_data overo_usb_data = {
-       .usb_mode       = T2_USB_MODE_ULPI,
-};
-
 static struct regulator_init_data overo_vmmc1 = {
        .constraints = {
                .min_uV                 = 1850000,
@@ -447,59 +434,23 @@ static struct regulator_init_data overo_vmmc1 = {
                                        | REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &overo_vmmc1_supply,
-};
-
-/* VDAC for DSS driving S-Video (8 mA unloaded, max 65 mA) */
-static struct regulator_init_data overo_vdac = {
-       .constraints = {
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask         = REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &overo_vdda_dac_supply,
-};
-
-/* VPLL2 for digital video outputs */
-static struct regulator_init_data overo_vpll2 = {
-       .constraints = {
-               .name                   = "VDVI",
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask         = REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-       .num_consumer_supplies  = ARRAY_SIZE(overo_vdds_dsi_supply),
-       .consumer_supplies      = overo_vdds_dsi_supply,
-};
-
-static struct twl4030_codec_audio_data overo_audio_data;
-
-static struct twl4030_codec_data overo_codec_data = {
-       .audio_mclk = 26000000,
-       .audio = &overo_audio_data,
+       .num_consumer_supplies  = ARRAY_SIZE(overo_vmmc1_supply),
+       .consumer_supplies      = overo_vmmc1_supply,
 };
 
 static struct twl4030_platform_data overo_twldata = {
-       .irq_base       = TWL4030_IRQ_BASE,
-       .irq_end        = TWL4030_IRQ_END,
        .gpio           = &overo_gpio_data,
-       .usb            = &overo_usb_data,
-       .codec          = &overo_codec_data,
        .vmmc1          = &overo_vmmc1,
-       .vdac           = &overo_vdac,
-       .vpll2          = &overo_vpll2,
 };
 
 static int __init overo_i2c_init(void)
 {
+       omap3_pmic_get_config(&overo_twldata,
+                       TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_AUDIO,
+                       TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
+
+       overo_twldata.vpll2->constraints.name = "VDVI";
+
        omap3_pmic_init("tps65950", &overo_twldata);
        /* i2c2 pins are used for gpio */
        omap_register_i2c_bus(3, 400, NULL, 0);
@@ -615,7 +566,7 @@ MACHINE_START(OVERO, "Gumstix Overo")
        .reserve        = omap_reserve,
        .map_io         = omap3_map_io,
        .init_early     = overo_init_early,
-       .init_irq       = omap_init_irq,
+       .init_irq       = omap3_init_irq,
        .init_machine   = overo_init,
-       .timer          = &omap_timer,
+       .timer          = &omap3_timer,
 MACHINE_END
index 42d10b1..7dfed24 100644 (file)
@@ -79,20 +79,14 @@ static struct twl4030_gpio_platform_data rm680_gpio_data = {
        .pulldowns              = BIT(1) | BIT(2) | BIT(8) | BIT(15),
 };
 
-static struct twl4030_usb_data rm680_usb_data = {
-       .usb_mode               = T2_USB_MODE_ULPI,
-};
-
 static struct twl4030_platform_data rm680_twl_data = {
-       .irq_base               = TWL4030_IRQ_BASE,
-       .irq_end                = TWL4030_IRQ_END,
        .gpio                   = &rm680_gpio_data,
-       .usb                    = &rm680_usb_data,
        /* add rest of the children here */
 };
 
 static void __init rm680_i2c_init(void)
 {
+       omap3_pmic_get_config(&rm680_twl_data, TWL_COMMON_PDATA_USB, 0);
        omap_pmic_init(1, 2900, "twl5031", INT_34XX_SYS_NIRQ, &rm680_twl_data);
        omap_register_i2c_bus(2, 400, NULL, 0);
        omap_register_i2c_bus(3, 400, NULL, 0);
@@ -163,7 +157,7 @@ MACHINE_START(NOKIA_RM680, "Nokia RM-680 board")
        .reserve        = omap_reserve,
        .map_io         = rm680_map_io,
        .init_early     = rm680_init_early,
-       .init_irq       = omap_init_irq,
+       .init_irq       = omap3_init_irq,
        .init_machine   = rm680_init,
-       .timer          = &omap_timer,
+       .timer          = &omap3_timer,
 MACHINE_END
index 9903667..6140290 100644 (file)
@@ -288,10 +288,6 @@ static struct twl4030_keypad_data rx51_kp_data = {
        .rep            = 1,
 };
 
-static struct twl4030_madc_platform_data rx51_madc_data = {
-       .irq_line               = 1,
-};
-
 /* Enable input logic and pull all lines up when eMMC is on. */
 static struct omap_board_mux rx51_mmc2_on_mux[] = {
        OMAP3_MUX(SDMMC2_CMD, OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
@@ -358,14 +354,17 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
        {}      /* Terminator */
 };
 
-static struct regulator_consumer_supply rx51_vmmc1_supply =
-       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0");
+static struct regulator_consumer_supply rx51_vmmc1_supply[] = {
+       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
+};
 
-static struct regulator_consumer_supply rx51_vaux3_supply =
-       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1");
+static struct regulator_consumer_supply rx51_vaux3_supply[] = {
+       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"),
+};
 
-static struct regulator_consumer_supply rx51_vsim_supply =
-       REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.1");
+static struct regulator_consumer_supply rx51_vsim_supply[] = {
+       REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.1"),
+};
 
 static struct regulator_consumer_supply rx51_vmmc2_supplies[] = {
        /* tlv320aic3x analog supplies */
@@ -395,10 +394,6 @@ static struct regulator_consumer_supply rx51_vaux1_consumers[] = {
        REGULATOR_SUPPLY("vdd", "2-0063"),
 };
 
-static struct regulator_consumer_supply rx51_vdac_supply[] = {
-       REGULATOR_SUPPLY("vdda_dac", "omapdss_venc"),
-};
-
 static struct regulator_init_data rx51_vaux1 = {
        .constraints = {
                .name                   = "V28",
@@ -452,8 +447,8 @@ static struct regulator_init_data rx51_vaux3_mmc = {
                                        | REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &rx51_vaux3_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(rx51_vaux3_supply),
+       .consumer_supplies      = rx51_vaux3_supply,
 };
 
 static struct regulator_init_data rx51_vaux4 = {
@@ -479,8 +474,8 @@ static struct regulator_init_data rx51_vmmc1 = {
                                        | REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &rx51_vmmc1_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(rx51_vmmc1_supply),
+       .consumer_supplies      = rx51_vmmc1_supply,
 };
 
 static struct regulator_init_data rx51_vmmc2 = {
@@ -511,23 +506,8 @@ static struct regulator_init_data rx51_vsim = {
                .valid_ops_mask         = REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &rx51_vsim_supply,
-};
-
-static struct regulator_init_data rx51_vdac = {
-       .constraints = {
-               .name                   = "VDAC",
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .apply_uV               = true,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask         = REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = rx51_vdac_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(rx51_vsim_supply),
+       .consumer_supplies      = rx51_vsim_supply,
 };
 
 static struct regulator_init_data rx51_vio = {
@@ -600,10 +580,6 @@ static struct twl4030_gpio_platform_data rx51_gpio_data = {
        .setup                  = rx51_twlgpio_setup,
 };
 
-static struct twl4030_usb_data rx51_usb_data = {
-       .usb_mode               = T2_USB_MODE_ULPI,
-};
-
 static struct twl4030_ins sleep_on_seq[] __initdata = {
 /*
  * Turn off everything
@@ -775,14 +751,9 @@ struct twl4030_codec_data rx51_codec_data __initdata = {
 };
 
 static struct twl4030_platform_data rx51_twldata __initdata = {
-       .irq_base               = TWL4030_IRQ_BASE,
-       .irq_end                = TWL4030_IRQ_END,
-
        /* platform_data for children goes here */
        .gpio                   = &rx51_gpio_data,
        .keypad                 = &rx51_kp_data,
-       .madc                   = &rx51_madc_data,
-       .usb                    = &rx51_usb_data,
        .power                  = &rx51_t2scripts_data,
        .codec                  = &rx51_codec_data,
 
@@ -791,7 +762,6 @@ static struct twl4030_platform_data rx51_twldata __initdata = {
        .vaux4                  = &rx51_vaux4,
        .vmmc1                  = &rx51_vmmc1,
        .vsim                   = &rx51_vsim,
-       .vdac                   = &rx51_vdac,
        .vio                    = &rx51_vio,
 };
 
@@ -847,6 +817,13 @@ static int __init rx51_i2c_init(void)
                rx51_twldata.vaux3 = &rx51_vaux3_cam;
        }
        rx51_twldata.vmmc2 = &rx51_vmmc2;
+       omap3_pmic_get_config(&rx51_twldata,
+                       TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_MADC,
+                       TWL_COMMON_REGULATOR_VDAC);
+
+       rx51_twldata.vdac->constraints.apply_uV = true;
+       rx51_twldata.vdac->constraints.name = "VDAC";
+
        omap_pmic_init(1, 2200, "twl5030", INT_34XX_SYS_NIRQ, &rx51_twldata);
        omap_register_i2c_bus(2, 100, rx51_peripherals_i2c_board_info_2,
                              ARRAY_SIZE(rx51_peripherals_i2c_board_info_2));
index fec4cac..5ea142f 100644 (file)
@@ -160,7 +160,7 @@ MACHINE_START(NOKIA_RX51, "Nokia RX-51 board")
        .reserve        = rx51_reserve,
        .map_io         = rx51_map_io,
        .init_early     = rx51_init_early,
-       .init_irq       = omap_init_irq,
+       .init_irq       = omap3_init_irq,
        .init_machine   = rx51_init,
-       .timer          = &omap_timer,
+       .timer          = &omap3_timer,
 MACHINE_END
index 09fa7bf..a85d5b0 100644 (file)
@@ -33,11 +33,6 @@ static void __init ti8168_init_early(void)
        omap2_init_common_devices(NULL, NULL);
 }
 
-static void __init ti8168_evm_init_irq(void)
-{
-       omap_init_irq();
-}
-
 static void __init ti8168_evm_init(void)
 {
        omap_serial_init();
@@ -56,7 +51,7 @@ MACHINE_START(TI8168EVM, "ti8168evm")
        .boot_params    = 0x80000100,
        .map_io         = ti8168_evm_map_io,
        .init_early     = ti8168_init_early,
-       .init_irq       = ti8168_evm_init_irq,
-       .timer          = &omap_timer,
+       .init_irq       = ti816x_init_irq,
+       .timer          = &omap3_timer,
        .init_machine   = ti8168_evm_init,
 MACHINE_END
index 118c6f5..13a6442 100644 (file)
@@ -105,21 +105,20 @@ static struct twl4030_keypad_data zoom_kp_twl4030_data = {
        .rep            = 1,
 };
 
-static struct regulator_consumer_supply zoom_vmmc1_supply = {
-       .supply         = "vmmc",
+static struct regulator_consumer_supply zoom_vmmc1_supply[] = {
+       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
 };
 
-static struct regulator_consumer_supply zoom_vsim_supply = {
-       .supply         = "vmmc_aux",
+static struct regulator_consumer_supply zoom_vsim_supply[] = {
+       REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.0"),
 };
 
-static struct regulator_consumer_supply zoom_vmmc2_supply = {
-       .supply         = "vmmc",
+static struct regulator_consumer_supply zoom_vmmc2_supply[] = {
+       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"),
 };
 
-static struct regulator_consumer_supply zoom_vmmc3_supply = {
-       .supply         = "vmmc",
-       .dev_name       = "omap_hsmmc.2",
+static struct regulator_consumer_supply zoom_vmmc3_supply[] = {
+       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.2"),
 };
 
 /* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */
@@ -133,8 +132,8 @@ static struct regulator_init_data zoom_vmmc1 = {
                                        | REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &zoom_vmmc1_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(zoom_vmmc1_supply),
+       .consumer_supplies      = zoom_vmmc1_supply,
 };
 
 /* VMMC2 for MMC2 card */
@@ -148,8 +147,8 @@ static struct regulator_init_data zoom_vmmc2 = {
                .valid_ops_mask         = REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &zoom_vmmc2_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(zoom_vmmc2_supply),
+       .consumer_supplies      = zoom_vmmc2_supply,
 };
 
 /* VSIM for OMAP VDD_MMC1A (i/o for DAT4..DAT7) */
@@ -163,16 +162,16 @@ static struct regulator_init_data zoom_vsim = {
                                        | REGULATOR_CHANGE_MODE
                                        | REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &zoom_vsim_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(zoom_vsim_supply),
+       .consumer_supplies      = zoom_vsim_supply,
 };
 
 static struct regulator_init_data zoom_vmmc3 = {
        .constraints = {
                .valid_ops_mask = REGULATOR_CHANGE_STATUS,
        },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies = &zoom_vmmc3_supply,
+       .num_consumer_supplies  = ARRAY_SIZE(zoom_vmmc3_supply),
+       .consumer_supplies      = zoom_vmmc3_supply,
 };
 
 static struct fixed_voltage_config zoom_vwlan = {
@@ -227,40 +226,6 @@ static struct omap2_hsmmc_info mmc[] = {
        {}      /* Terminator */
 };
 
-static struct regulator_consumer_supply zoom_vpll2_supplies[] = {
-       REGULATOR_SUPPLY("vdds_dsi", "omapdss"),
-       REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi1"),
-};
-
-static struct regulator_consumer_supply zoom_vdda_dac_supply =
-       REGULATOR_SUPPLY("vdda_dac", "omapdss_venc");
-
-static struct regulator_init_data zoom_vpll2 = {
-       .constraints = {
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask         = REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-       .num_consumer_supplies          = ARRAY_SIZE(zoom_vpll2_supplies),
-       .consumer_supplies              = zoom_vpll2_supplies,
-};
-
-static struct regulator_init_data zoom_vdac = {
-       .constraints = {
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask         = REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-       .num_consumer_supplies          = 1,
-       .consumer_supplies              = &zoom_vdda_dac_supply,
-};
-
 static int zoom_twl_gpio_setup(struct device *dev,
                unsigned gpio, unsigned ngpio)
 {
@@ -270,13 +235,6 @@ static int zoom_twl_gpio_setup(struct device *dev,
        mmc[0].gpio_cd = gpio + 0;
        omap2_hsmmc_init(mmc);
 
-       /* link regulators to MMC adapters ... we "know" the
-        * regulators will be set up only *after* we return.
-       */
-       zoom_vmmc1_supply.dev = mmc[0].dev;
-       zoom_vsim_supply.dev = mmc[0].dev;
-       zoom_vmmc2_supply.dev = mmc[1].dev;
-
        ret = gpio_request_one(LCD_PANEL_ENABLE_GPIO, GPIOF_OUT_INIT_LOW,
                               "lcd enable");
        if (ret)
@@ -292,26 +250,6 @@ static void zoom2_set_hs_extmute(int mute)
        gpio_set_value(ZOOM2_HEADSET_EXTMUTE_GPIO, mute);
 }
 
-static int zoom_batt_table[] = {
-/* 0 C*/
-30800, 29500, 28300, 27100,
-26000, 24900, 23900, 22900, 22000, 21100, 20300, 19400, 18700, 17900,
-17200, 16500, 15900, 15300, 14700, 14100, 13600, 13100, 12600, 12100,
-11600, 11200, 10800, 10400, 10000, 9630,  9280,  8950,  8620,  8310,
-8020,  7730,  7460,  7200,  6950,  6710,  6470,  6250,  6040,  5830,
-5640,  5450,  5260,  5090,  4920,  4760,  4600,  4450,  4310,  4170,
-4040,  3910,  3790,  3670,  3550
-};
-
-static struct twl4030_bci_platform_data zoom_bci_data = {
-       .battery_tmp_tbl        = zoom_batt_table,
-       .tblsize                = ARRAY_SIZE(zoom_batt_table),
-};
-
-static struct twl4030_usb_data zoom_usb_data = {
-       .usb_mode       = T2_USB_MODE_ULPI,
-};
-
 static struct twl4030_gpio_platform_data zoom_gpio_data = {
        .gpio_base      = OMAP_MAX_GPIO_LINES,
        .irq_base       = TWL4030_GPIO_IRQ_BASE,
@@ -319,41 +257,29 @@ static struct twl4030_gpio_platform_data zoom_gpio_data = {
        .setup          = zoom_twl_gpio_setup,
 };
 
-static struct twl4030_madc_platform_data zoom_madc_data = {
-       .irq_line       = 1,
-};
-
-static struct twl4030_codec_audio_data zoom_audio_data;
-
-static struct twl4030_codec_data zoom_codec_data = {
-       .audio_mclk = 26000000,
-       .audio = &zoom_audio_data,
-};
-
 static struct twl4030_platform_data zoom_twldata = {
-       .irq_base       = TWL4030_IRQ_BASE,
-       .irq_end        = TWL4030_IRQ_END,
-
        /* platform_data for children goes here */
-       .bci            = &zoom_bci_data,
-       .madc           = &zoom_madc_data,
-       .usb            = &zoom_usb_data,
        .gpio           = &zoom_gpio_data,
        .keypad         = &zoom_kp_twl4030_data,
-       .codec          = &zoom_codec_data,
        .vmmc1          = &zoom_vmmc1,
        .vmmc2          = &zoom_vmmc2,
        .vsim           = &zoom_vsim,
-       .vpll2          = &zoom_vpll2,
-       .vdac           = &zoom_vdac,
 };
 
 static int __init omap_i2c_init(void)
 {
+       omap3_pmic_get_config(&zoom_twldata,
+                       TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_BCI |
+                       TWL_COMMON_PDATA_MADC | TWL_COMMON_PDATA_AUDIO,
+                       TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
+
        if (machine_is_omap_zoom2()) {
-               zoom_audio_data.ramp_delay_value = 3;   /* 161 ms */
-               zoom_audio_data.hs_extmute = 1;
-               zoom_audio_data.set_hs_extmute = zoom2_set_hs_extmute;
+               struct twl4030_codec_audio_data *audio_data;
+               audio_data = zoom_twldata.codec->audio;
+
+               audio_data->ramp_delay_value = 3;       /* 161 ms */
+               audio_data->hs_extmute = 1;
+               audio_data->set_hs_extmute = zoom2_set_hs_extmute;
        }
        omap_pmic_init(1, 2400, "twl5030", INT_34XX_SYS_NIRQ, &zoom_twldata);
        omap_register_i2c_bus(2, 400, NULL, 0);
index 4b133d7..8a98c3c 100644 (file)
@@ -137,9 +137,9 @@ MACHINE_START(OMAP_ZOOM2, "OMAP Zoom2 board")
        .reserve        = omap_reserve,
        .map_io         = omap3_map_io,
        .init_early     = omap_zoom_init_early,
-       .init_irq       = omap_init_irq,
+       .init_irq       = omap3_init_irq,
        .init_machine   = omap_zoom_init,
-       .timer          = &omap_timer,
+       .timer          = &omap3_timer,
 MACHINE_END
 
 MACHINE_START(OMAP_ZOOM3, "OMAP Zoom3 board")
@@ -147,7 +147,7 @@ MACHINE_START(OMAP_ZOOM3, "OMAP Zoom3 board")
        .reserve        = omap_reserve,
        .map_io         = omap3_map_io,
        .init_early     = omap_zoom_init_early,
-       .init_irq       = omap_init_irq,
+       .init_irq       = omap3_init_irq,
        .init_machine   = omap_zoom_init,
-       .timer          = &omap_timer,
+       .timer          = &omap3_timer,
 MACHINE_END
index 6be1095..7ceb870 100644 (file)
@@ -8,13 +8,6 @@
 #ifndef __ARCH_ARM_MACH_OMAP2_CLOCK44XX_H
 #define __ARCH_ARM_MACH_OMAP2_CLOCK44XX_H
 
-/*
- * XXX Missing values for the OMAP4 DPLL_USB
- * XXX Missing min_multiplier values for all OMAP4 DPLLs
- */
-#define OMAP4430_MAX_DPLL_MULT 2047
-#define OMAP4430_MAX_DPLL_DIV  128
-
 int omap4xxx_clk_init(void);
 
 #endif
index 8c96567..044df38 100644 (file)
@@ -53,9 +53,9 @@ static struct clk extalt_clkin_ck = {
 static struct clk pad_clks_ck = {
        .name           = "pad_clks_ck",
        .rate           = 12000000,
-       .ops            = &clkops_omap2_dflt,
-       .enable_reg     = OMAP4430_CM_CLKSEL_ABE,
-       .enable_bit     = OMAP4430_PAD_CLKS_GATE_SHIFT,
+       .ops            = &clkops_omap2_dflt,
+       .enable_reg     = OMAP4430_CM_CLKSEL_ABE,
+       .enable_bit     = OMAP4430_PAD_CLKS_GATE_SHIFT,
 };
 
 static struct clk pad_slimbus_core_clks_ck = {
@@ -73,9 +73,9 @@ static struct clk secure_32k_clk_src_ck = {
 static struct clk slimbus_clk = {
        .name           = "slimbus_clk",
        .rate           = 12000000,
-       .ops            = &clkops_omap2_dflt,
-       .enable_reg     = OMAP4430_CM_CLKSEL_ABE,
-       .enable_bit     = OMAP4430_SLIMBUS_CLK_GATE_SHIFT,
+       .ops            = &clkops_omap2_dflt,
+       .enable_reg     = OMAP4430_CM_CLKSEL_ABE,
+       .enable_bit     = OMAP4430_SLIMBUS_CLK_GATE_SHIFT,
 };
 
 static struct clk sys_32k_ck = {
@@ -258,8 +258,8 @@ static struct dpll_data dpll_abe_dd = {
        .enable_mask    = OMAP4430_DPLL_EN_MASK,
        .autoidle_mask  = OMAP4430_AUTO_DPLL_MODE_MASK,
        .idlest_mask    = OMAP4430_ST_DPLL_CLK_MASK,
-       .max_multiplier = OMAP4430_MAX_DPLL_MULT,
-       .max_divider    = OMAP4430_MAX_DPLL_DIV,
+       .max_multiplier = 2047,
+       .max_divider    = 128,
        .min_divider    = 1,
 };
 
@@ -278,10 +278,10 @@ static struct clk dpll_abe_ck = {
 static struct clk dpll_abe_x2_ck = {
        .name           = "dpll_abe_x2_ck",
        .parent         = &dpll_abe_ck,
+       .clksel_reg     = OMAP4430_CM_DIV_M2_DPLL_ABE,
        .flags          = CLOCK_CLKOUTX2,
        .ops            = &clkops_omap4_dpllmx_ops,
        .recalc         = &omap3_clkoutx2_recalc,
-       .clksel_reg     = OMAP4430_CM_DIV_M2_DPLL_ABE,
 };
 
 static const struct clksel_rate div31_1to31_rates[] = {
@@ -434,8 +434,8 @@ static struct dpll_data dpll_core_dd = {
        .enable_mask    = OMAP4430_DPLL_EN_MASK,
        .autoidle_mask  = OMAP4430_AUTO_DPLL_MODE_MASK,
        .idlest_mask    = OMAP4430_ST_DPLL_CLK_MASK,
-       .max_multiplier = OMAP4430_MAX_DPLL_MULT,
-       .max_divider    = OMAP4430_MAX_DPLL_DIV,
+       .max_multiplier = 2047,
+       .max_divider    = 128,
        .min_divider    = 1,
 };
 
@@ -622,11 +622,11 @@ static struct clk dpll_core_m3x2_ck = {
        .clksel_reg     = OMAP4430_CM_DIV_M3_DPLL_CORE,
        .clksel_mask    = OMAP4430_DPLL_CLKOUTHIF_DIV_MASK,
        .ops            = &clkops_omap2_dflt,
-       .enable_reg     = OMAP4430_CM_DIV_M3_DPLL_CORE,
-       .enable_bit     = OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_SHIFT,
        .recalc         = &omap2_clksel_recalc,
        .round_rate     = &omap2_clksel_round_rate,
        .set_rate       = &omap2_clksel_set_rate,
+       .enable_reg     = OMAP4430_CM_DIV_M3_DPLL_CORE,
+       .enable_bit     = OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_SHIFT,
 };
 
 static struct clk dpll_core_m7x2_ck = {
@@ -672,8 +672,8 @@ static struct dpll_data dpll_iva_dd = {
        .enable_mask    = OMAP4430_DPLL_EN_MASK,
        .autoidle_mask  = OMAP4430_AUTO_DPLL_MODE_MASK,
        .idlest_mask    = OMAP4430_ST_DPLL_CLK_MASK,
-       .max_multiplier = OMAP4430_MAX_DPLL_MULT,
-       .max_divider    = OMAP4430_MAX_DPLL_DIV,
+       .max_multiplier = 2047,
+       .max_divider    = 128,
        .min_divider    = 1,
 };
 
@@ -740,8 +740,8 @@ static struct dpll_data dpll_mpu_dd = {
        .enable_mask    = OMAP4430_DPLL_EN_MASK,
        .autoidle_mask  = OMAP4430_AUTO_DPLL_MODE_MASK,
        .idlest_mask    = OMAP4430_ST_DPLL_CLK_MASK,
-       .max_multiplier = OMAP4430_MAX_DPLL_MULT,
-       .max_divider    = OMAP4430_MAX_DPLL_DIV,
+       .max_multiplier = 2047,
+       .max_divider    = 128,
        .min_divider    = 1,
 };
 
@@ -813,8 +813,8 @@ static struct dpll_data dpll_per_dd = {
        .enable_mask    = OMAP4430_DPLL_EN_MASK,
        .autoidle_mask  = OMAP4430_AUTO_DPLL_MODE_MASK,
        .idlest_mask    = OMAP4430_ST_DPLL_CLK_MASK,
-       .max_multiplier = OMAP4430_MAX_DPLL_MULT,
-       .max_divider    = OMAP4430_MAX_DPLL_DIV,
+       .max_multiplier = 2047,
+       .max_divider    = 128,
        .min_divider    = 1,
 };
 
@@ -850,10 +850,10 @@ static struct clk dpll_per_m2_ck = {
 static struct clk dpll_per_x2_ck = {
        .name           = "dpll_per_x2_ck",
        .parent         = &dpll_per_ck,
+       .clksel_reg     = OMAP4430_CM_DIV_M2_DPLL_PER,
        .flags          = CLOCK_CLKOUTX2,
        .ops            = &clkops_omap4_dpllmx_ops,
        .recalc         = &omap3_clkoutx2_recalc,
-       .clksel_reg     = OMAP4430_CM_DIV_M2_DPLL_PER,
 };
 
 static const struct clksel dpll_per_m2x2_div[] = {
@@ -880,11 +880,11 @@ static struct clk dpll_per_m3x2_ck = {
        .clksel_reg     = OMAP4430_CM_DIV_M3_DPLL_PER,
        .clksel_mask    = OMAP4430_DPLL_CLKOUTHIF_DIV_MASK,
        .ops            = &clkops_omap2_dflt,
-       .enable_reg     = OMAP4430_CM_DIV_M3_DPLL_PER,
-       .enable_bit     = OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_SHIFT,
        .recalc         = &omap2_clksel_recalc,
        .round_rate     = &omap2_clksel_round_rate,
        .set_rate       = &omap2_clksel_set_rate,
+       .enable_reg     = OMAP4430_CM_DIV_M3_DPLL_PER,
+       .enable_bit     = OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_SHIFT,
 };
 
 static struct clk dpll_per_m4x2_ck = {
@@ -935,63 +935,6 @@ static struct clk dpll_per_m7x2_ck = {
        .set_rate       = &omap2_clksel_set_rate,
 };
 
-/* DPLL_UNIPRO */
-static struct dpll_data dpll_unipro_dd = {
-       .mult_div1_reg  = OMAP4430_CM_CLKSEL_DPLL_UNIPRO,
-       .clk_bypass     = &sys_clkin_ck,
-       .clk_ref        = &sys_clkin_ck,
-       .control_reg    = OMAP4430_CM_CLKMODE_DPLL_UNIPRO,
-       .modes          = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
-       .autoidle_reg   = OMAP4430_CM_AUTOIDLE_DPLL_UNIPRO,
-       .idlest_reg     = OMAP4430_CM_IDLEST_DPLL_UNIPRO,
-       .mult_mask      = OMAP4430_DPLL_MULT_MASK,
-       .div1_mask      = OMAP4430_DPLL_DIV_MASK,
-       .enable_mask    = OMAP4430_DPLL_EN_MASK,
-       .autoidle_mask  = OMAP4430_AUTO_DPLL_MODE_MASK,
-       .idlest_mask    = OMAP4430_ST_DPLL_CLK_MASK,
-       .sddiv_mask     = OMAP4430_DPLL_SD_DIV_MASK,
-       .max_multiplier = OMAP4430_MAX_DPLL_MULT,
-       .max_divider    = OMAP4430_MAX_DPLL_DIV,
-       .min_divider    = 1,
-};
-
-
-static struct clk dpll_unipro_ck = {
-       .name           = "dpll_unipro_ck",
-       .parent         = &sys_clkin_ck,
-       .dpll_data      = &dpll_unipro_dd,
-       .init           = &omap2_init_dpll_parent,
-       .ops            = &clkops_omap3_noncore_dpll_ops,
-       .recalc         = &omap3_dpll_recalc,
-       .round_rate     = &omap2_dpll_round_rate,
-       .set_rate       = &omap3_noncore_dpll_set_rate,
-};
-
-static struct clk dpll_unipro_x2_ck = {
-       .name           = "dpll_unipro_x2_ck",
-       .parent         = &dpll_unipro_ck,
-       .flags          = CLOCK_CLKOUTX2,
-       .ops            = &clkops_null,
-       .recalc         = &omap3_clkoutx2_recalc,
-};
-
-static const struct clksel dpll_unipro_m2x2_div[] = {
-       { .parent = &dpll_unipro_x2_ck, .rates = div31_1to31_rates },
-       { .parent = NULL },
-};
-
-static struct clk dpll_unipro_m2x2_ck = {
-       .name           = "dpll_unipro_m2x2_ck",
-       .parent         = &dpll_unipro_x2_ck,
-       .clksel         = dpll_unipro_m2x2_div,
-       .clksel_reg     = OMAP4430_CM_DIV_M2_DPLL_UNIPRO,
-       .clksel_mask    = OMAP4430_DPLL_CLKOUT_DIV_MASK,
-       .ops            = &clkops_omap4_dpllmx_ops,
-       .recalc         = &omap2_clksel_recalc,
-       .round_rate     = &omap2_clksel_round_rate,
-       .set_rate       = &omap2_clksel_set_rate,
-};
-
 static struct clk usb_hs_clk_div_ck = {
        .name           = "usb_hs_clk_div_ck",
        .parent         = &dpll_abe_m3x2_ck,
@@ -1015,8 +958,9 @@ static struct dpll_data dpll_usb_dd = {
        .enable_mask    = OMAP4430_DPLL_EN_MASK,
        .autoidle_mask  = OMAP4430_AUTO_DPLL_MODE_MASK,
        .idlest_mask    = OMAP4430_ST_DPLL_CLK_MASK,
-       .max_multiplier = OMAP4430_MAX_DPLL_MULT,
-       .max_divider    = OMAP4430_MAX_DPLL_DIV,
+       .sddiv_mask     = OMAP4430_DPLL_SD_DIV_MASK,
+       .max_multiplier = 4095,
+       .max_divider    = 256,
        .min_divider    = 1,
 };
 
@@ -1035,8 +979,8 @@ static struct clk dpll_usb_ck = {
 static struct clk dpll_usb_clkdcoldo_ck = {
        .name           = "dpll_usb_clkdcoldo_ck",
        .parent         = &dpll_usb_ck,
-       .ops            = &clkops_omap4_dpllmx_ops,
        .clksel_reg     = OMAP4430_CM_CLKDCOLDO_DPLL_USB,
+       .ops            = &clkops_omap4_dpllmx_ops,
        .recalc         = &followparent_recalc,
 };
 
@@ -1169,19 +1113,6 @@ static struct clk func_96m_fclk = {
        .set_rate       = &omap2_clksel_set_rate,
 };
 
-static const struct clksel hsmmc6_fclk_sel[] = {
-       { .parent = &func_64m_fclk, .rates = div_1_0_rates },
-       { .parent = &func_96m_fclk, .rates = div_1_1_rates },
-       { .parent = NULL },
-};
-
-static struct clk hsmmc6_fclk = {
-       .name           = "hsmmc6_fclk",
-       .parent         = &func_64m_fclk,
-       .ops            = &clkops_null,
-       .recalc         = &followparent_recalc,
-};
-
 static const struct clksel_rate div2_1to8_rates[] = {
        { .div = 1, .val = 0, .flags = RATE_IN_4430 },
        { .div = 8, .val = 1, .flags = RATE_IN_4430 },
@@ -1264,6 +1195,21 @@ static struct clk l4_wkup_clk_mux_ck = {
        .recalc         = &omap2_clksel_recalc,
 };
 
+static struct clk ocp_abe_iclk = {
+       .name           = "ocp_abe_iclk",
+       .parent         = &aess_fclk,
+       .ops            = &clkops_null,
+       .recalc         = &followparent_recalc,
+};
+
+static struct clk per_abe_24m_fclk = {
+       .name           = "per_abe_24m_fclk",
+       .parent         = &dpll_abe_m2_ck,
+       .ops            = &clkops_null,
+       .fixed_div      = 4,
+       .recalc         = &omap_fixed_divisor_recalc,
+};
+
 static const struct clksel per_abe_nc_fclk_div[] = {
        { .parent = &dpll_abe_m2_ck, .rates = div2_1to2_rates },
        { .parent = NULL },
@@ -1281,41 +1227,6 @@ static struct clk per_abe_nc_fclk = {
        .set_rate       = &omap2_clksel_set_rate,
 };
 
-static const struct clksel mcasp2_fclk_sel[] = {
-       { .parent = &func_96m_fclk, .rates = div_1_0_rates },
-       { .parent = &per_abe_nc_fclk, .rates = div_1_1_rates },
-       { .parent = NULL },
-};
-
-static struct clk mcasp2_fclk = {
-       .name           = "mcasp2_fclk",
-       .parent         = &func_96m_fclk,
-       .ops            = &clkops_null,
-       .recalc         = &followparent_recalc,
-};
-
-static struct clk mcasp3_fclk = {
-       .name           = "mcasp3_fclk",
-       .parent         = &func_96m_fclk,
-       .ops            = &clkops_null,
-       .recalc         = &followparent_recalc,
-};
-
-static struct clk ocp_abe_iclk = {
-       .name           = "ocp_abe_iclk",
-       .parent         = &aess_fclk,
-       .ops            = &clkops_null,
-       .recalc         = &followparent_recalc,
-};
-
-static struct clk per_abe_24m_fclk = {
-       .name           = "per_abe_24m_fclk",
-       .parent         = &dpll_abe_m2_ck,
-       .ops            = &clkops_null,
-       .fixed_div      = 4,
-       .recalc         = &omap_fixed_divisor_recalc,
-};
-
 static const struct clksel pmd_stm_clock_mux_sel[] = {
        { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
        { .parent = &dpll_core_m6x2_ck, .rates = div_1_1_rates },
@@ -1846,8 +1757,8 @@ static struct clk l3_instr_ick = {
        .ops            = &clkops_omap2_dflt,
        .enable_reg     = OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL,
        .enable_bit     = OMAP4430_MODULEMODE_HWCTRL,
-       .clkdm_name     = "l3_instr_clkdm",
        .flags          = ENABLE_ON_INIT,
+       .clkdm_name     = "l3_instr_clkdm",
        .parent         = &l3_div_ck,
        .recalc         = &followparent_recalc,
 };
@@ -1857,8 +1768,8 @@ static struct clk l3_main_3_ick = {
        .ops            = &clkops_omap2_dflt,
        .enable_reg     = OMAP4430_CM_L3INSTR_L3_3_CLKCTRL,
        .enable_bit     = OMAP4430_MODULEMODE_HWCTRL,
-       .clkdm_name     = "l3_instr_clkdm",
        .flags          = ENABLE_ON_INIT,
+       .clkdm_name     = "l3_instr_clkdm",
        .parent         = &l3_div_ck,
        .recalc         = &followparent_recalc,
 };
@@ -1995,10 +1906,16 @@ static struct clk mcbsp3_fck = {
        .clkdm_name     = "abe_clkdm",
 };
 
+static const struct clksel mcbsp4_sync_mux_sel[] = {
+       { .parent = &func_96m_fclk, .rates = div_1_0_rates },
+       { .parent = &per_abe_nc_fclk, .rates = div_1_1_rates },
+       { .parent = NULL },
+};
+
 static struct clk mcbsp4_sync_mux_ck = {
        .name           = "mcbsp4_sync_mux_ck",
        .parent         = &func_96m_fclk,
-       .clksel         = mcasp2_fclk_sel,
+       .clksel         = mcbsp4_sync_mux_sel,
        .init           = &omap2_init_clksel_parent,
        .clksel_reg     = OMAP4430_CM_L4PER_MCBSP4_CLKCTRL,
        .clksel_mask    = OMAP4430_CLKSEL_INTERNAL_SOURCE_MASK,
@@ -2077,11 +1994,17 @@ static struct clk mcspi4_fck = {
        .recalc         = &followparent_recalc,
 };
 
+static const struct clksel hsmmc1_fclk_sel[] = {
+       { .parent = &func_64m_fclk, .rates = div_1_0_rates },
+       { .parent = &func_96m_fclk, .rates = div_1_1_rates },
+       { .parent = NULL },
+};
+
 /* Merged hsmmc1_fclk into mmc1 */
 static struct clk mmc1_fck = {
        .name           = "mmc1_fck",
        .parent         = &func_64m_fclk,
-       .clksel         = hsmmc6_fclk_sel,
+       .clksel         = hsmmc1_fclk_sel,
        .init           = &omap2_init_clksel_parent,
        .clksel_reg     = OMAP4430_CM_L3INIT_MMC1_CLKCTRL,
        .clksel_mask    = OMAP4430_CLKSEL_MASK,
@@ -2096,7 +2019,7 @@ static struct clk mmc1_fck = {
 static struct clk mmc2_fck = {
        .name           = "mmc2_fck",
        .parent         = &func_64m_fclk,
-       .clksel         = hsmmc6_fclk_sel,
+       .clksel         = hsmmc1_fclk_sel,
        .init           = &omap2_init_clksel_parent,
        .clksel_reg     = OMAP4430_CM_L3INIT_MMC2_CLKCTRL,
        .clksel_mask    = OMAP4430_CLKSEL_MASK,
@@ -2162,8 +2085,8 @@ static struct clk ocp_wp_noc_ick = {
        .ops            = &clkops_omap2_dflt,
        .enable_reg     = OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL,
        .enable_bit     = OMAP4430_MODULEMODE_HWCTRL,
-       .clkdm_name     = "l3_instr_clkdm",
        .flags          = ENABLE_ON_INIT,
+       .clkdm_name     = "l3_instr_clkdm",
        .parent         = &l3_div_ck,
        .recalc         = &followparent_recalc,
 };
@@ -2895,6 +2818,7 @@ static struct clk auxclk2_ck = {
        .enable_reg     = OMAP4_SCRM_AUXCLK2,
        .enable_bit     = OMAP4_ENABLE_SHIFT,
 };
+
 static struct clk auxclk3_ck = {
        .name           = "auxclk3_ck",
        .parent         = &sys_clkin_ck,
@@ -3077,9 +3001,6 @@ static struct omap_clk omap44xx_clks[] = {
        CLK(NULL,       "dpll_per_m5x2_ck",             &dpll_per_m5x2_ck,      CK_443X),
        CLK(NULL,       "dpll_per_m6x2_ck",             &dpll_per_m6x2_ck,      CK_443X),
        CLK(NULL,       "dpll_per_m7x2_ck",             &dpll_per_m7x2_ck,      CK_443X),
-       CLK(NULL,       "dpll_unipro_ck",               &dpll_unipro_ck,        CK_443X),
-       CLK(NULL,       "dpll_unipro_x2_ck",            &dpll_unipro_x2_ck,     CK_443X),
-       CLK(NULL,       "dpll_unipro_m2x2_ck",          &dpll_unipro_m2x2_ck,   CK_443X),
        CLK(NULL,       "usb_hs_clk_div_ck",            &usb_hs_clk_div_ck,     CK_443X),
        CLK(NULL,       "dpll_usb_ck",                  &dpll_usb_ck,   CK_443X),
        CLK(NULL,       "dpll_usb_clkdcoldo_ck",        &dpll_usb_clkdcoldo_ck, CK_443X),
@@ -3092,17 +3013,14 @@ static struct omap_clk omap44xx_clks[] = {
        CLK(NULL,       "func_48mc_fclk",               &func_48mc_fclk,        CK_443X),
        CLK(NULL,       "func_64m_fclk",                &func_64m_fclk, CK_443X),
        CLK(NULL,       "func_96m_fclk",                &func_96m_fclk, CK_443X),
-       CLK(NULL,       "hsmmc6_fclk",                  &hsmmc6_fclk,   CK_443X),
        CLK(NULL,       "init_60m_fclk",                &init_60m_fclk, CK_443X),
        CLK(NULL,       "l3_div_ck",                    &l3_div_ck,     CK_443X),
        CLK(NULL,       "l4_div_ck",                    &l4_div_ck,     CK_443X),
        CLK(NULL,       "lp_clk_div_ck",                &lp_clk_div_ck, CK_443X),
        CLK(NULL,       "l4_wkup_clk_mux_ck",           &l4_wkup_clk_mux_ck,    CK_443X),
-       CLK(NULL,       "per_abe_nc_fclk",              &per_abe_nc_fclk,       CK_443X),
-       CLK(NULL,       "mcasp2_fclk",                  &mcasp2_fclk,   CK_443X),
-       CLK(NULL,       "mcasp3_fclk",                  &mcasp3_fclk,   CK_443X),
        CLK(NULL,       "ocp_abe_iclk",                 &ocp_abe_iclk,  CK_443X),
        CLK(NULL,       "per_abe_24m_fclk",             &per_abe_24m_fclk,      CK_443X),
+       CLK(NULL,       "per_abe_nc_fclk",              &per_abe_nc_fclk,       CK_443X),
        CLK(NULL,       "pmd_stm_clock_mux_ck",         &pmd_stm_clock_mux_ck,  CK_443X),
        CLK(NULL,       "pmd_trace_clk_mux_ck",         &pmd_trace_clk_mux_ck,  CK_443X),
        CLK(NULL,       "syc_clk_div_ck",               &syc_clk_div_ck,        CK_443X),
@@ -3204,7 +3122,6 @@ static struct omap_clk omap44xx_clks[] = {
        CLK(NULL,       "uart2_fck",                    &uart2_fck,     CK_443X),
        CLK(NULL,       "uart3_fck",                    &uart3_fck,     CK_443X),
        CLK(NULL,       "uart4_fck",                    &uart4_fck,     CK_443X),
-       CLK(NULL,       "usb_host_fs_fck",              &usb_host_fs_fck,       CK_443X),
        CLK("usbhs-omap.0",     "fs_fck",               &usb_host_fs_fck,       CK_443X),
        CLK(NULL,       "utmi_p1_gfclk",                &utmi_p1_gfclk, CK_443X),
        CLK(NULL,       "usb_host_hs_utmi_p1_clk",      &usb_host_hs_utmi_p1_clk,       CK_443X),
@@ -3216,9 +3133,7 @@ static struct omap_clk omap44xx_clks[] = {
        CLK(NULL,       "usb_host_hs_hsic60m_p2_clk",   &usb_host_hs_hsic60m_p2_clk,    CK_443X),
        CLK(NULL,       "usb_host_hs_hsic480m_p2_clk",  &usb_host_hs_hsic480m_p2_clk,   CK_443X),
        CLK(NULL,       "usb_host_hs_func48mclk",       &usb_host_hs_func48mclk,        CK_443X),
-       CLK(NULL,       "usb_host_hs_fck",              &usb_host_hs_fck,       CK_443X),
        CLK("usbhs-omap.0",     "hs_fck",               &usb_host_hs_fck,       CK_443X),
-       CLK("usbhs-omap.0",     "usbhost_ick",          &dummy_ck,              CK_443X),
        CLK(NULL,       "otg_60m_gfclk",                &otg_60m_gfclk, CK_443X),
        CLK(NULL,       "usb_otg_hs_xclk",              &usb_otg_hs_xclk,       CK_443X),
        CLK("musb-omap2430",    "ick",                          &usb_otg_hs_ick,        CK_443X),
@@ -3226,17 +3141,26 @@ static struct omap_clk omap44xx_clks[] = {
        CLK(NULL,       "usb_tll_hs_usb_ch2_clk",       &usb_tll_hs_usb_ch2_clk,        CK_443X),
        CLK(NULL,       "usb_tll_hs_usb_ch0_clk",       &usb_tll_hs_usb_ch0_clk,        CK_443X),
        CLK(NULL,       "usb_tll_hs_usb_ch1_clk",       &usb_tll_hs_usb_ch1_clk,        CK_443X),
-       CLK(NULL,       "usb_tll_hs_ick",               &usb_tll_hs_ick,        CK_443X),
        CLK("usbhs-omap.0",     "usbtll_ick",           &usb_tll_hs_ick,        CK_443X),
-       CLK("usbhs-omap.0",     "usbtll_fck",           &dummy_ck,      CK_443X),
        CLK(NULL,       "usim_ck",                      &usim_ck,       CK_443X),
        CLK(NULL,       "usim_fclk",                    &usim_fclk,     CK_443X),
        CLK(NULL,       "usim_fck",                     &usim_fck,      CK_443X),
        CLK("omap_wdt", "fck",                          &wd_timer2_fck, CK_443X),
-       CLK(NULL,       "mailboxes_ick",                &dummy_ck,      CK_443X),
        CLK(NULL,       "wd_timer3_fck",                &wd_timer3_fck, CK_443X),
        CLK(NULL,       "stm_clk_div_ck",               &stm_clk_div_ck,        CK_443X),
        CLK(NULL,       "trace_clk_div_ck",             &trace_clk_div_ck,      CK_443X),
+       CLK(NULL,       "auxclk0_ck",                   &auxclk0_ck,    CK_443X),
+       CLK(NULL,       "auxclk1_ck",                   &auxclk1_ck,    CK_443X),
+       CLK(NULL,       "auxclk2_ck",                   &auxclk2_ck,    CK_443X),
+       CLK(NULL,       "auxclk3_ck",                   &auxclk3_ck,    CK_443X),
+       CLK(NULL,       "auxclk4_ck",                   &auxclk4_ck,    CK_443X),
+       CLK(NULL,       "auxclk5_ck",                   &auxclk5_ck,    CK_443X),
+       CLK(NULL,       "auxclkreq0_ck",                &auxclkreq0_ck, CK_443X),
+       CLK(NULL,       "auxclkreq1_ck",                &auxclkreq1_ck, CK_443X),
+       CLK(NULL,       "auxclkreq2_ck",                &auxclkreq2_ck, CK_443X),
+       CLK(NULL,       "auxclkreq3_ck",                &auxclkreq3_ck, CK_443X),
+       CLK(NULL,       "auxclkreq4_ck",                &auxclkreq4_ck, CK_443X),
+       CLK(NULL,       "auxclkreq5_ck",                &auxclkreq5_ck, CK_443X),
        CLK(NULL,       "gpmc_ck",                      &dummy_ck,      CK_443X),
        CLK(NULL,       "gpt1_ick",                     &dummy_ck,      CK_443X),
        CLK(NULL,       "gpt2_ick",                     &dummy_ck,      CK_443X),
@@ -3253,6 +3177,7 @@ static struct omap_clk omap44xx_clks[] = {
        CLK("omap_i2c.2",       "ick",                          &dummy_ck,      CK_443X),
        CLK("omap_i2c.3",       "ick",                          &dummy_ck,      CK_443X),
        CLK("omap_i2c.4",       "ick",                          &dummy_ck,      CK_443X),
+       CLK(NULL,       "mailboxes_ick",                &dummy_ck,      CK_443X),
        CLK("omap_hsmmc.0",     "ick",                          &dummy_ck,      CK_443X),
        CLK("omap_hsmmc.1",     "ick",                          &dummy_ck,      CK_443X),
        CLK("omap_hsmmc.2",     "ick",                          &dummy_ck,      CK_443X),
@@ -3270,19 +3195,9 @@ static struct omap_clk omap44xx_clks[] = {
        CLK(NULL,       "uart2_ick",                    &dummy_ck,      CK_443X),
        CLK(NULL,       "uart3_ick",                    &dummy_ck,      CK_443X),
        CLK(NULL,       "uart4_ick",                    &dummy_ck,      CK_443X),
+       CLK("usbhs-omap.0",     "usbhost_ick",          &dummy_ck,              CK_443X),
+       CLK("usbhs-omap.0",     "usbtll_fck",           &dummy_ck,      CK_443X),
        CLK("omap_wdt", "ick",                          &dummy_ck,      CK_443X),
-       CLK(NULL,       "auxclk0_ck",                   &auxclk0_ck,    CK_443X),
-       CLK(NULL,       "auxclk1_ck",                   &auxclk1_ck,    CK_443X),
-       CLK(NULL,       "auxclk2_ck",                   &auxclk2_ck,    CK_443X),
-       CLK(NULL,       "auxclk3_ck",                   &auxclk3_ck,    CK_443X),
-       CLK(NULL,       "auxclk4_ck",                   &auxclk4_ck,    CK_443X),
-       CLK(NULL,       "auxclk5_ck",                   &auxclk5_ck,    CK_443X),
-       CLK(NULL,       "auxclkreq0_ck",                &auxclkreq0_ck, CK_443X),
-       CLK(NULL,       "auxclkreq1_ck",                &auxclkreq1_ck, CK_443X),
-       CLK(NULL,       "auxclkreq2_ck",                &auxclkreq2_ck, CK_443X),
-       CLK(NULL,       "auxclkreq3_ck",                &auxclkreq3_ck, CK_443X),
-       CLK(NULL,       "auxclkreq4_ck",                &auxclkreq4_ck, CK_443X),
-       CLK(NULL,       "auxclkreq5_ck",                &auxclkreq5_ck, CK_443X),
 };
 
 int __init omap4xxx_clk_init(void)
index a607ec1..66090f2 100644 (file)
@@ -1,11 +1,12 @@
 /*
  * OMAP4 Clock domains framework
  *
- * Copyright (C) 2009 Texas Instruments, Inc.
- * Copyright (C) 2009 Nokia Corporation
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ * Copyright (C) 2009-2011 Nokia Corporation
  *
  * Abhijit Pagare (abhijitpagare@ti.com)
  * Benoit Cousson (b-cousson@ti.com)
+ * Paul Walmsley (paul@pwsan.com)
  *
  * This file is automatically generated from the OMAP hardware databases.
  * We respectfully ask that any modifications to this file be coordinated
@@ -32,7 +33,7 @@
 
 /* Static Dependencies for OMAP4 Clock Domains */
 
-static struct clkdm_dep ducati_wkup_sleep_deps[] = {
+static struct clkdm_dep d2d_wkup_sleep_deps[] = {
        {
                .clkdm_name      = "abe_clkdm",
                .omap_chip       = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
@@ -50,103 +51,103 @@ static struct clkdm_dep ducati_wkup_sleep_deps[] = {
                .omap_chip       = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
        },
        {
-               .clkdm_name      = "l3_dss_clkdm",
+               .clkdm_name      = "l3_emif_clkdm",
                .omap_chip       = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
        },
        {
-               .clkdm_name      = "l3_emif_clkdm",
+               .clkdm_name      = "l3_init_clkdm",
                .omap_chip       = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
        },
        {
-               .clkdm_name      = "l3_gfx_clkdm",
+               .clkdm_name      = "l4_cfg_clkdm",
                .omap_chip       = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
        },
        {
-               .clkdm_name      = "l3_init_clkdm",
+               .clkdm_name      = "l4_per_clkdm",
                .omap_chip       = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
        },
+       { NULL },
+};
+
+static struct clkdm_dep ducati_wkup_sleep_deps[] = {
        {
-               .clkdm_name      = "l4_cfg_clkdm",
+               .clkdm_name      = "abe_clkdm",
                .omap_chip       = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
        },
        {
-               .clkdm_name      = "l4_per_clkdm",
+               .clkdm_name      = "ivahd_clkdm",
                .omap_chip       = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
        },
        {
-               .clkdm_name      = "l4_secure_clkdm",
+               .clkdm_name      = "l3_1_clkdm",
                .omap_chip       = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
        },
        {
-               .clkdm_name      = "l4_wkup_clkdm",
+               .clkdm_name      = "l3_2_clkdm",
                .omap_chip       = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
        },
        {
-               .clkdm_name      = "tesla_clkdm",
+               .clkdm_name      = "l3_dss_clkdm",
                .omap_chip       = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
        },
-       { NULL },
-};
-
-static struct clkdm_dep iss_wkup_sleep_deps[] = {
        {
-               .clkdm_name      = "ivahd_clkdm",
+               .clkdm_name      = "l3_emif_clkdm",
                .omap_chip       = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
        },
        {
-               .clkdm_name      = "l3_1_clkdm",
+               .clkdm_name      = "l3_gfx_clkdm",
                .omap_chip       = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
        },
        {
-               .clkdm_name      = "l3_emif_clkdm",
+               .clkdm_name      = "l3_init_clkdm",
                .omap_chip       = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
        },
-       { NULL },
-};
-
-static struct clkdm_dep ivahd_wkup_sleep_deps[] = {
        {
-               .clkdm_name      = "l3_1_clkdm",
+               .clkdm_name      = "l4_cfg_clkdm",
                .omap_chip       = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
        },
        {
-               .clkdm_name      = "l3_emif_clkdm",
+               .clkdm_name      = "l4_per_clkdm",
                .omap_chip       = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
        },
-       { NULL },
-};
-
-static struct clkdm_dep l3_d2d_wkup_sleep_deps[] = {
        {
-               .clkdm_name      = "abe_clkdm",
+               .clkdm_name      = "l4_secure_clkdm",
                .omap_chip       = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
        },
        {
-               .clkdm_name      = "ivahd_clkdm",
+               .clkdm_name      = "l4_wkup_clkdm",
                .omap_chip       = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
        },
        {
-               .clkdm_name      = "l3_1_clkdm",
+               .clkdm_name      = "tesla_clkdm",
                .omap_chip       = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
        },
+       { NULL },
+};
+
+static struct clkdm_dep iss_wkup_sleep_deps[] = {
        {
-               .clkdm_name      = "l3_2_clkdm",
+               .clkdm_name      = "ivahd_clkdm",
                .omap_chip       = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
        },
        {
-               .clkdm_name      = "l3_emif_clkdm",
+               .clkdm_name      = "l3_1_clkdm",
                .omap_chip       = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
        },
        {
-               .clkdm_name      = "l3_init_clkdm",
+               .clkdm_name      = "l3_emif_clkdm",
                .omap_chip       = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
        },
+       { NULL },
+};
+
+static struct clkdm_dep ivahd_wkup_sleep_deps[] = {
        {
-               .clkdm_name      = "l4_cfg_clkdm",
+               .clkdm_name      = "l3_1_clkdm",
                .omap_chip       = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
        },
        {
-               .clkdm_name      = "l4_per_clkdm",
+               .clkdm_name      = "l3_emif_clkdm",
                .omap_chip       = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
        },
        { NULL },
@@ -280,7 +281,7 @@ static struct clkdm_dep l4_secure_wkup_sleep_deps[] = {
        { NULL },
 };
 
-static struct clkdm_dep mpuss_wkup_sleep_deps[] = {
+static struct clkdm_dep mpu_wkup_sleep_deps[] = {
        {
                .clkdm_name      = "abe_clkdm",
                .omap_chip       = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
@@ -497,14 +498,14 @@ static struct clockdomain l3_init_44xx_clkdm = {
        .omap_chip        = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
 };
 
-static struct clockdomain mpuss_44xx_clkdm = {
-       .name             = "mpuss_clkdm",
-       .pwrdm            = { .name = "mpu_pwrdm" },
-       .prcm_partition   = OMAP4430_CM1_PARTITION,
-       .cm_inst          = OMAP4430_CM1_MPU_INST,
-       .clkdm_offs       = OMAP4430_CM1_MPU_MPU_CDOFFS,
-       .wkdep_srcs       = mpuss_wkup_sleep_deps,
-       .sleepdep_srcs    = mpuss_wkup_sleep_deps,
+static struct clockdomain d2d_44xx_clkdm = {
+       .name             = "d2d_clkdm",
+       .pwrdm            = { .name = "core_pwrdm" },
+       .prcm_partition   = OMAP4430_CM2_PARTITION,
+       .cm_inst          = OMAP4430_CM2_CORE_INST,
+       .clkdm_offs       = OMAP4430_CM2_CORE_D2D_CDOFFS,
+       .wkdep_srcs       = d2d_wkup_sleep_deps,
+       .sleepdep_srcs    = d2d_wkup_sleep_deps,
        .flags            = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
        .omap_chip        = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
 };
@@ -563,6 +564,18 @@ static struct clockdomain ducati_44xx_clkdm = {
        .omap_chip        = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
 };
 
+static struct clockdomain mpu_44xx_clkdm = {
+       .name             = "mpu_clkdm",
+       .pwrdm            = { .name = "mpu_pwrdm" },
+       .prcm_partition   = OMAP4430_CM1_PARTITION,
+       .cm_inst          = OMAP4430_CM1_MPU_INST,
+       .clkdm_offs       = OMAP4430_CM1_MPU_MPU_CDOFFS,
+       .wkdep_srcs       = mpu_wkup_sleep_deps,
+       .sleepdep_srcs    = mpu_wkup_sleep_deps,
+       .flags            = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
+       .omap_chip        = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
 static struct clockdomain l3_2_44xx_clkdm = {
        .name             = "l3_2_clkdm",
        .pwrdm            = { .name = "core_pwrdm" },
@@ -585,18 +598,6 @@ static struct clockdomain l3_1_44xx_clkdm = {
        .omap_chip        = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
 };
 
-static struct clockdomain l3_d2d_44xx_clkdm = {
-       .name             = "l3_d2d_clkdm",
-       .pwrdm            = { .name = "core_pwrdm" },
-       .prcm_partition   = OMAP4430_CM2_PARTITION,
-       .cm_inst          = OMAP4430_CM2_CORE_INST,
-       .clkdm_offs       = OMAP4430_CM2_CORE_D2D_CDOFFS,
-       .wkdep_srcs       = l3_d2d_wkup_sleep_deps,
-       .sleepdep_srcs    = l3_d2d_wkup_sleep_deps,
-       .flags            = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
-       .omap_chip        = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
 static struct clockdomain iss_44xx_clkdm = {
        .name             = "iss_clkdm",
        .pwrdm            = { .name = "cam_pwrdm" },
@@ -655,6 +656,7 @@ static struct clockdomain l3_dma_44xx_clkdm = {
        .omap_chip        = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
 };
 
+/* As clockdomains are added or removed above, this list must also be changed */
 static struct clockdomain *clockdomains_omap44xx[] __initdata = {
        &l4_cefuse_44xx_clkdm,
        &l4_cfg_44xx_clkdm,
@@ -666,21 +668,21 @@ static struct clockdomain *clockdomains_omap44xx[] __initdata = {
        &abe_44xx_clkdm,
        &l3_instr_44xx_clkdm,
        &l3_init_44xx_clkdm,
-       &mpuss_44xx_clkdm,
+       &d2d_44xx_clkdm,
        &mpu0_44xx_clkdm,
        &mpu1_44xx_clkdm,
        &l3_emif_44xx_clkdm,
        &l4_ao_44xx_clkdm,
        &ducati_44xx_clkdm,
+       &mpu_44xx_clkdm,
        &l3_2_44xx_clkdm,
        &l3_1_44xx_clkdm,
-       &l3_d2d_44xx_clkdm,
        &iss_44xx_clkdm,
        &l3_dss_44xx_clkdm,
        &l4_wkup_44xx_clkdm,
        &emu_sys_44xx_clkdm,
        &l3_dma_44xx_clkdm,
-       NULL,
+       NULL
 };
 
 void __init omap44xx_clockdomains_init(void)
index 9d47a05..0e77945 100644 (file)
 #ifndef __ARCH_ARM_MACH_OMAP2_CM_REGBITS_44XX_H
 #define __ARCH_ARM_MACH_OMAP2_CM_REGBITS_44XX_H
 
-/*
- * Used by CM_L3_1_DYNAMICDEP, CM_L3_1_DYNAMICDEP_RESTORE, CM_MPU_DYNAMICDEP,
- * CM_TESLA_DYNAMICDEP
- */
+/* Used by CM_L3_1_DYNAMICDEP, CM_MPU_DYNAMICDEP, CM_TESLA_DYNAMICDEP */
 #define OMAP4430_ABE_DYNDEP_SHIFT                              3
 #define OMAP4430_ABE_DYNDEP_MASK                               (1 << 3)
 
 /*
- * Used by CM_D2D_STATICDEP, CM_D2D_STATICDEP_RESTORE, CM_DUCATI_STATICDEP,
- * CM_L3INIT_STATICDEP, CM_MPU_STATICDEP, CM_SDMA_STATICDEP,
- * CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
+ * Used by CM_D2D_STATICDEP, CM_DUCATI_STATICDEP, CM_L3INIT_STATICDEP,
+ * CM_MPU_STATICDEP, CM_SDMA_STATICDEP, CM_TESLA_STATICDEP
  */
 #define OMAP4430_ABE_STATDEP_SHIFT                             3
 #define OMAP4430_ABE_STATDEP_MASK                              (1 << 3)
 
-/* Used by CM_L4CFG_DYNAMICDEP, CM_L4CFG_DYNAMICDEP_RESTORE */
+/* Used by CM_L4CFG_DYNAMICDEP */
 #define OMAP4430_ALWONCORE_DYNDEP_SHIFT                                16
 #define OMAP4430_ALWONCORE_DYNDEP_MASK                         (1 << 16)
 
 
 /*
  * Used by CM_AUTOIDLE_DPLL_ABE, CM_AUTOIDLE_DPLL_CORE,
- * CM_AUTOIDLE_DPLL_CORE_RESTORE, CM_AUTOIDLE_DPLL_DDRPHY,
- * CM_AUTOIDLE_DPLL_IVA, CM_AUTOIDLE_DPLL_MPU, CM_AUTOIDLE_DPLL_PER,
- * CM_AUTOIDLE_DPLL_UNIPRO, CM_AUTOIDLE_DPLL_USB
+ * CM_AUTOIDLE_DPLL_DDRPHY, CM_AUTOIDLE_DPLL_IVA, CM_AUTOIDLE_DPLL_MPU,
+ * CM_AUTOIDLE_DPLL_PER, CM_AUTOIDLE_DPLL_UNIPRO, CM_AUTOIDLE_DPLL_USB
  */
 #define OMAP4430_AUTO_DPLL_MODE_SHIFT                          0
 #define OMAP4430_AUTO_DPLL_MODE_MASK                           (0x7 << 0)
 
-/* Used by CM_L4CFG_DYNAMICDEP, CM_L4CFG_DYNAMICDEP_RESTORE */
+/* Used by CM_L4CFG_DYNAMICDEP */
 #define OMAP4430_CEFUSE_DYNDEP_SHIFT                           17
 #define OMAP4430_CEFUSE_DYNDEP_MASK                            (1 << 17)
 
 #define OMAP4430_CLKACTIVITY_ABE_X2_CLK_SHIFT                  8
 #define OMAP4430_CLKACTIVITY_ABE_X2_CLK_MASK                   (1 << 8)
 
-/* Used by CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE */
+/* Used by CM_MEMIF_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_ASYNC_DLL_CLK_SHIFT               11
 #define OMAP4430_CLKACTIVITY_ASYNC_DLL_CLK_MASK                        (1 << 11)
 
-/* Used by CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE */
+/* Used by CM_MEMIF_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_ASYNC_PHY1_CLK_SHIFT              12
 #define OMAP4430_CLKACTIVITY_ASYNC_PHY1_CLK_MASK               (1 << 12)
 
-/* Used by CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE */
+/* Used by CM_MEMIF_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_ASYNC_PHY2_CLK_SHIFT              13
 #define OMAP4430_CLKACTIVITY_ASYNC_PHY2_CLK_MASK               (1 << 13)
 
 #define OMAP4430_CLKACTIVITY_CUST_EFUSE_SYS_CLK_SHIFT          9
 #define OMAP4430_CLKACTIVITY_CUST_EFUSE_SYS_CLK_MASK           (1 << 9)
 
-/* Used by CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE */
+/* Used by CM_MEMIF_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_DLL_CLK_SHIFT                     9
 #define OMAP4430_CLKACTIVITY_DLL_CLK_MASK                      (1 << 9)
 
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_DMT10_GFCLK_SHIFT                 9
 #define OMAP4430_CLKACTIVITY_DMT10_GFCLK_MASK                  (1 << 9)
 
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_DMT11_GFCLK_SHIFT                 10
 #define OMAP4430_CLKACTIVITY_DMT11_GFCLK_MASK                  (1 << 10)
 
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_DMT2_GFCLK_SHIFT                  11
 #define OMAP4430_CLKACTIVITY_DMT2_GFCLK_MASK                   (1 << 11)
 
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_DMT3_GFCLK_SHIFT                  12
 #define OMAP4430_CLKACTIVITY_DMT3_GFCLK_MASK                   (1 << 12)
 
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_DMT4_GFCLK_SHIFT                  13
 #define OMAP4430_CLKACTIVITY_DMT4_GFCLK_MASK                   (1 << 13)
 
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_DMT9_GFCLK_SHIFT                  14
 #define OMAP4430_CLKACTIVITY_DMT9_GFCLK_MASK                   (1 << 14)
 
 #define OMAP4430_CLKACTIVITY_FDIF_GFCLK_SHIFT                  10
 #define OMAP4430_CLKACTIVITY_FDIF_GFCLK_MASK                   (1 << 10)
 
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_FUNC_12M_GFCLK_SHIFT              15
 #define OMAP4430_CLKACTIVITY_FUNC_12M_GFCLK_MASK               (1 << 15)
 
 #define OMAP4430_CLKACTIVITY_HDMI_PHY_48MHZ_GFCLK_SHIFT                11
 #define OMAP4430_CLKACTIVITY_HDMI_PHY_48MHZ_GFCLK_MASK         (1 << 11)
 
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_HSIC_P1_480M_GFCLK_SHIFT          20
 #define OMAP4430_CLKACTIVITY_HSIC_P1_480M_GFCLK_MASK           (1 << 20)
 
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_HSIC_P1_GFCLK_SHIFT               26
 #define OMAP4430_CLKACTIVITY_HSIC_P1_GFCLK_MASK                        (1 << 26)
 
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_HSIC_P2_480M_GFCLK_SHIFT          21
 #define OMAP4430_CLKACTIVITY_HSIC_P2_480M_GFCLK_MASK           (1 << 21)
 
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_HSIC_P2_GFCLK_SHIFT               27
 #define OMAP4430_CLKACTIVITY_HSIC_P2_GFCLK_MASK                        (1 << 27)
 
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_INIT_48MC_GFCLK_SHIFT             13
 #define OMAP4430_CLKACTIVITY_INIT_48MC_GFCLK_MASK              (1 << 13)
 
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_INIT_48M_GFCLK_SHIFT              12
 #define OMAP4430_CLKACTIVITY_INIT_48M_GFCLK_MASK               (1 << 12)
 
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_INIT_60M_P1_GFCLK_SHIFT           28
 #define OMAP4430_CLKACTIVITY_INIT_60M_P1_GFCLK_MASK            (1 << 28)
 
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_INIT_60M_P2_GFCLK_SHIFT           29
 #define OMAP4430_CLKACTIVITY_INIT_60M_P2_GFCLK_MASK            (1 << 29)
 
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_INIT_96M_GFCLK_SHIFT              11
 #define OMAP4430_CLKACTIVITY_INIT_96M_GFCLK_MASK               (1 << 11)
 
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_INIT_HSI_GFCLK_SHIFT              16
 #define OMAP4430_CLKACTIVITY_INIT_HSI_GFCLK_MASK               (1 << 16)
 
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_INIT_HSMMC1_GFCLK_SHIFT           17
 #define OMAP4430_CLKACTIVITY_INIT_HSMMC1_GFCLK_MASK            (1 << 17)
 
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_INIT_HSMMC2_GFCLK_SHIFT           18
 #define OMAP4430_CLKACTIVITY_INIT_HSMMC2_GFCLK_MASK            (1 << 18)
 
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_INIT_HSMMC6_GFCLK_SHIFT           19
 #define OMAP4430_CLKACTIVITY_INIT_HSMMC6_GFCLK_MASK            (1 << 19)
 
 #define OMAP4430_CLKACTIVITY_L3X2_D2D_GICLK_SHIFT              10
 #define OMAP4430_CLKACTIVITY_L3X2_D2D_GICLK_MASK               (1 << 10)
 
-/* Used by CM_L3_1_CLKSTCTRL, CM_L3_1_CLKSTCTRL_RESTORE */
+/* Used by CM_L3_1_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_L3_1_GICLK_SHIFT                  8
 #define OMAP4430_CLKACTIVITY_L3_1_GICLK_MASK                   (1 << 8)
 
-/* Used by CM_L3_2_CLKSTCTRL, CM_L3_2_CLKSTCTRL_RESTORE */
+/* Used by CM_L3_2_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_L3_2_GICLK_SHIFT                  8
 #define OMAP4430_CLKACTIVITY_L3_2_GICLK_MASK                   (1 << 8)
 
 #define OMAP4430_CLKACTIVITY_L3_DSS_GICLK_SHIFT                        8
 #define OMAP4430_CLKACTIVITY_L3_DSS_GICLK_MASK                 (1 << 8)
 
-/* Used by CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE */
+/* Used by CM_MEMIF_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_L3_EMIF_GICLK_SHIFT               8
 #define OMAP4430_CLKACTIVITY_L3_EMIF_GICLK_MASK                        (1 << 8)
 
 #define OMAP4430_CLKACTIVITY_L3_GFX_GICLK_SHIFT                        8
 #define OMAP4430_CLKACTIVITY_L3_GFX_GICLK_MASK                 (1 << 8)
 
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_L3_INIT_GICLK_SHIFT               8
 #define OMAP4430_CLKACTIVITY_L3_INIT_GICLK_MASK                        (1 << 8)
 
 #define OMAP4430_CLKACTIVITY_L4_CEFUSE_GICLK_SHIFT             8
 #define OMAP4430_CLKACTIVITY_L4_CEFUSE_GICLK_MASK              (1 << 8)
 
-/* Used by CM_L4CFG_CLKSTCTRL, CM_L4CFG_CLKSTCTRL_RESTORE */
+/* Used by CM_L4CFG_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_L4_CFG_GICLK_SHIFT                        8
 #define OMAP4430_CLKACTIVITY_L4_CFG_GICLK_MASK                 (1 << 8)
 
 #define OMAP4430_CLKACTIVITY_L4_D2D_GICLK_SHIFT                        9
 #define OMAP4430_CLKACTIVITY_L4_D2D_GICLK_MASK                 (1 << 9)
 
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_L4_INIT_GICLK_SHIFT               9
 #define OMAP4430_CLKACTIVITY_L4_INIT_GICLK_MASK                        (1 << 9)
 
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_L4_PER_GICLK_SHIFT                        8
 #define OMAP4430_CLKACTIVITY_L4_PER_GICLK_MASK                 (1 << 8)
 
 #define OMAP4430_CLKACTIVITY_L4_WKUP_GICLK_SHIFT               12
 #define OMAP4430_CLKACTIVITY_L4_WKUP_GICLK_MASK                        (1 << 12)
 
-/* Used by CM_MPU_CLKSTCTRL, CM_MPU_CLKSTCTRL_RESTORE */
+/* Used by CM_MPU_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_MPU_DPLL_CLK_SHIFT                        8
 #define OMAP4430_CLKACTIVITY_MPU_DPLL_CLK_MASK                 (1 << 8)
 
 #define OMAP4430_CLKACTIVITY_OCP_ABE_GICLK_SHIFT               9
 #define OMAP4430_CLKACTIVITY_OCP_ABE_GICLK_MASK                        (1 << 9)
 
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_PER_24MC_GFCLK_SHIFT              16
 #define OMAP4430_CLKACTIVITY_PER_24MC_GFCLK_MASK               (1 << 16)
 
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_PER_32K_GFCLK_SHIFT               17
 #define OMAP4430_CLKACTIVITY_PER_32K_GFCLK_MASK                        (1 << 17)
 
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_PER_48M_GFCLK_SHIFT               18
 #define OMAP4430_CLKACTIVITY_PER_48M_GFCLK_MASK                        (1 << 18)
 
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_PER_96M_GFCLK_SHIFT               19
 #define OMAP4430_CLKACTIVITY_PER_96M_GFCLK_MASK                        (1 << 19)
 
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_PER_ABE_24M_GFCLK_SHIFT           25
 #define OMAP4430_CLKACTIVITY_PER_ABE_24M_GFCLK_MASK            (1 << 25)
 
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_PER_MCASP2_GFCLK_SHIFT            20
 #define OMAP4430_CLKACTIVITY_PER_MCASP2_GFCLK_MASK             (1 << 20)
 
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_PER_MCASP3_GFCLK_SHIFT            21
 #define OMAP4430_CLKACTIVITY_PER_MCASP3_GFCLK_MASK             (1 << 21)
 
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_PER_MCBSP4_GFCLK_SHIFT            22
 #define OMAP4430_CLKACTIVITY_PER_MCBSP4_GFCLK_MASK             (1 << 22)
 
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_PER_SYS_GFCLK_SHIFT               24
 #define OMAP4430_CLKACTIVITY_PER_SYS_GFCLK_MASK                        (1 << 24)
 
-/* Used by CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE */
+/* Used by CM_MEMIF_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_PHY_ROOT_CLK_SHIFT                        10
 #define OMAP4430_CLKACTIVITY_PHY_ROOT_CLK_MASK                 (1 << 10)
 
 #define OMAP4430_CLKACTIVITY_TESLA_ROOT_CLK_SHIFT              8
 #define OMAP4430_CLKACTIVITY_TESLA_ROOT_CLK_MASK               (1 << 8)
 
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_TLL_CH0_GFCLK_SHIFT               22
 #define OMAP4430_CLKACTIVITY_TLL_CH0_GFCLK_MASK                        (1 << 22)
 
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_TLL_CH1_GFCLK_SHIFT               23
 #define OMAP4430_CLKACTIVITY_TLL_CH1_GFCLK_MASK                        (1 << 23)
 
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_TLL_CH2_GFCLK_SHIFT               24
 #define OMAP4430_CLKACTIVITY_TLL_CH2_GFCLK_MASK                        (1 << 24)
 
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_UNIPRO_DPLL_CLK_SHIFT             10
 #define OMAP4430_CLKACTIVITY_UNIPRO_DPLL_CLK_MASK              (1 << 10)
 
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_USB_DPLL_CLK_SHIFT                        14
 #define OMAP4430_CLKACTIVITY_USB_DPLL_CLK_MASK                 (1 << 14)
 
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_USB_DPLL_HS_CLK_SHIFT             15
 #define OMAP4430_CLKACTIVITY_USB_DPLL_HS_CLK_MASK              (1 << 15)
 
 #define OMAP4430_CLKACTIVITY_USIM_GFCLK_SHIFT                  10
 #define OMAP4430_CLKACTIVITY_USIM_GFCLK_MASK                   (1 << 10)
 
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_UTMI_P3_GFCLK_SHIFT               30
 #define OMAP4430_CLKACTIVITY_UTMI_P3_GFCLK_MASK                        (1 << 30)
 
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
 #define OMAP4430_CLKACTIVITY_UTMI_ROOT_GFCLK_SHIFT             25
 #define OMAP4430_CLKACTIVITY_UTMI_ROOT_GFCLK_MASK              (1 << 25)
 
 
 /*
  * Renamed from CLKSEL Used by CM_ABE_DSS_SYS_CLKSEL, CM_ABE_PLL_REF_CLKSEL,
- * CM_L4_WKUP_CLKSEL, CM_CLKSEL_DUCATI_ISS_ROOT, CM_CLKSEL_USB_60MHZ
+ * CM_CLKSEL_DUCATI_ISS_ROOT, CM_CLKSEL_USB_60MHZ, CM_L4_WKUP_CLKSEL
  */
 #define OMAP4430_CLKSEL_0_0_SHIFT                              0
 #define OMAP4430_CLKSEL_0_0_MASK                               (1 << 0)
 #define OMAP4430_CLKSEL_AESS_FCLK_SHIFT                                24
 #define OMAP4430_CLKSEL_AESS_FCLK_MASK                         (1 << 24)
 
-/* Used by CM_CLKSEL_CORE, CM_CLKSEL_CORE_RESTORE */
+/* Used by CM_CLKSEL_CORE */
 #define OMAP4430_CLKSEL_CORE_SHIFT                             0
 #define OMAP4430_CLKSEL_CORE_MASK                              (1 << 0)
 
-/*
- * Renamed from CLKSEL_CORE Used by CM_SHADOW_FREQ_CONFIG2_RESTORE,
- * CM_SHADOW_FREQ_CONFIG2
- */
+/* Renamed from CLKSEL_CORE Used by CM_SHADOW_FREQ_CONFIG2 */
 #define OMAP4430_CLKSEL_CORE_1_1_SHIFT                         1
 #define OMAP4430_CLKSEL_CORE_1_1_MASK                          (1 << 1)
 
 #define OMAP4430_CLKSEL_INTERNAL_SOURCE_CM1_ABE_DMIC_SHIFT     26
 #define OMAP4430_CLKSEL_INTERNAL_SOURCE_CM1_ABE_DMIC_MASK      (0x3 << 26)
 
-/* Used by CM_CLKSEL_CORE, CM_CLKSEL_CORE_RESTORE */
+/* Used by CM_CLKSEL_CORE */
 #define OMAP4430_CLKSEL_L3_SHIFT                               4
 #define OMAP4430_CLKSEL_L3_MASK                                        (1 << 4)
 
-/*
- * Renamed from CLKSEL_L3 Used by CM_SHADOW_FREQ_CONFIG2_RESTORE,
- * CM_SHADOW_FREQ_CONFIG2
- */
+/* Renamed from CLKSEL_L3 Used by CM_SHADOW_FREQ_CONFIG2 */
 #define OMAP4430_CLKSEL_L3_SHADOW_SHIFT                                2
 #define OMAP4430_CLKSEL_L3_SHADOW_MASK                         (1 << 2)
 
-/* Used by CM_CLKSEL_CORE, CM_CLKSEL_CORE_RESTORE */
+/* Used by CM_CLKSEL_CORE */
 #define OMAP4430_CLKSEL_L4_SHIFT                               8
 #define OMAP4430_CLKSEL_L4_MASK                                        (1 << 8)
 
 #define OMAP4430_CLKSEL_SOURCE_24_24_SHIFT                     24
 #define OMAP4430_CLKSEL_SOURCE_24_24_MASK                      (1 << 24)
 
-/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL */
 #define OMAP4430_CLKSEL_UTMI_P1_SHIFT                          24
 #define OMAP4430_CLKSEL_UTMI_P1_MASK                           (1 << 24)
 
-/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL */
 #define OMAP4430_CLKSEL_UTMI_P2_SHIFT                          25
 #define OMAP4430_CLKSEL_UTMI_P2_MASK                           (1 << 25)
 
  * Used by CM1_ABE_CLKSTCTRL, CM_ALWON_CLKSTCTRL, CM_CAM_CLKSTCTRL,
  * CM_CEFUSE_CLKSTCTRL, CM_D2D_CLKSTCTRL, CM_DSS_CLKSTCTRL,
  * CM_DUCATI_CLKSTCTRL, CM_EMU_CLKSTCTRL, CM_GFX_CLKSTCTRL, CM_IVAHD_CLKSTCTRL,
- * CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE, CM_L3INSTR_CLKSTCTRL,
- * CM_L3_1_CLKSTCTRL, CM_L3_1_CLKSTCTRL_RESTORE, CM_L3_2_CLKSTCTRL,
- * CM_L3_2_CLKSTCTRL_RESTORE, CM_L4CFG_CLKSTCTRL, CM_L4CFG_CLKSTCTRL_RESTORE,
- * CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE, CM_L4SEC_CLKSTCTRL,
- * CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE, CM_MPU_CLKSTCTRL,
- * CM_MPU_CLKSTCTRL_RESTORE, CM_SDMA_CLKSTCTRL, CM_TESLA_CLKSTCTRL,
- * CM_WKUP_CLKSTCTRL
+ * CM_L3INIT_CLKSTCTRL, CM_L3INSTR_CLKSTCTRL, CM_L3_1_CLKSTCTRL,
+ * CM_L3_2_CLKSTCTRL, CM_L4CFG_CLKSTCTRL, CM_L4PER_CLKSTCTRL,
+ * CM_L4SEC_CLKSTCTRL, CM_MEMIF_CLKSTCTRL, CM_MPU_CLKSTCTRL, CM_SDMA_CLKSTCTRL,
+ * CM_TESLA_CLKSTCTRL, CM_WKUP_CLKSTCTRL
  */
 #define OMAP4430_CLKTRCTRL_SHIFT                               0
 #define OMAP4430_CLKTRCTRL_MASK                                        (0x3 << 0)
 #define OMAP4430_CUSTOM_SHIFT                                  6
 #define OMAP4430_CUSTOM_MASK                                   (0x3 << 6)
 
-/*
- * Used by CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE, CM_L4CFG_DYNAMICDEP,
- * CM_L4CFG_DYNAMICDEP_RESTORE
- */
+/* Used by CM_L3_2_DYNAMICDEP, CM_L4CFG_DYNAMICDEP */
 #define OMAP4430_D2D_DYNDEP_SHIFT                              18
 #define OMAP4430_D2D_DYNDEP_MASK                               (1 << 18)
 
 
 /*
  * Used by CM_SSC_DELTAMSTEP_DPLL_ABE, CM_SSC_DELTAMSTEP_DPLL_CORE,
- * CM_SSC_DELTAMSTEP_DPLL_CORE_RESTORE, CM_SSC_DELTAMSTEP_DPLL_DDRPHY,
- * CM_SSC_DELTAMSTEP_DPLL_IVA, CM_SSC_DELTAMSTEP_DPLL_MPU,
- * CM_SSC_DELTAMSTEP_DPLL_PER, CM_SSC_DELTAMSTEP_DPLL_UNIPRO,
- * CM_SSC_DELTAMSTEP_DPLL_USB
+ * CM_SSC_DELTAMSTEP_DPLL_DDRPHY, CM_SSC_DELTAMSTEP_DPLL_IVA,
+ * CM_SSC_DELTAMSTEP_DPLL_MPU, CM_SSC_DELTAMSTEP_DPLL_PER,
+ * CM_SSC_DELTAMSTEP_DPLL_UNIPRO, CM_SSC_DELTAMSTEP_DPLL_USB
  */
 #define OMAP4430_DELTAMSTEP_SHIFT                              0
 #define OMAP4430_DELTAMSTEP_MASK                               (0xfffff << 0)
 
-/* Used by CM_SHADOW_FREQ_CONFIG1, CM_SHADOW_FREQ_CONFIG1_RESTORE */
-#define OMAP4430_DLL_OVERRIDE_SHIFT                            2
-#define OMAP4430_DLL_OVERRIDE_MASK                             (1 << 2)
+/* Used by CM_DLL_CTRL */
+#define OMAP4430_DLL_OVERRIDE_SHIFT                            0
+#define OMAP4430_DLL_OVERRIDE_MASK                             (1 << 0)
 
-/* Renamed from DLL_OVERRIDE Used by CM_DLL_CTRL */
-#define OMAP4430_DLL_OVERRIDE_0_0_SHIFT                                0
-#define OMAP4430_DLL_OVERRIDE_0_0_MASK                         (1 << 0)
+/* Renamed from DLL_OVERRIDE Used by CM_SHADOW_FREQ_CONFIG1 */
+#define OMAP4430_DLL_OVERRIDE_2_2_SHIFT                                2
+#define OMAP4430_DLL_OVERRIDE_2_2_MASK                         (1 << 2)
 
-/* Used by CM_SHADOW_FREQ_CONFIG1, CM_SHADOW_FREQ_CONFIG1_RESTORE */
+/* Used by CM_SHADOW_FREQ_CONFIG1 */
 #define OMAP4430_DLL_RESET_SHIFT                               3
 #define OMAP4430_DLL_RESET_MASK                                        (1 << 3)
 
 /*
- * Used by CM_CLKSEL_DPLL_ABE, CM_CLKSEL_DPLL_CORE,
- * CM_CLKSEL_DPLL_CORE_RESTORE, CM_CLKSEL_DPLL_DDRPHY, CM_CLKSEL_DPLL_IVA,
- * CM_CLKSEL_DPLL_MPU, CM_CLKSEL_DPLL_PER, CM_CLKSEL_DPLL_UNIPRO,
- * CM_CLKSEL_DPLL_USB
+ * Used by CM_CLKSEL_DPLL_ABE, CM_CLKSEL_DPLL_CORE, CM_CLKSEL_DPLL_DDRPHY,
+ * CM_CLKSEL_DPLL_IVA, CM_CLKSEL_DPLL_MPU, CM_CLKSEL_DPLL_PER,
+ * CM_CLKSEL_DPLL_UNIPRO, CM_CLKSEL_DPLL_USB
  */
 #define OMAP4430_DPLL_BYP_CLKSEL_SHIFT                         23
 #define OMAP4430_DPLL_BYP_CLKSEL_MASK                          (1 << 23)
 #define OMAP4430_DPLL_CLKDCOLDO_GATE_CTRL_SHIFT                        8
 #define OMAP4430_DPLL_CLKDCOLDO_GATE_CTRL_MASK                 (1 << 8)
 
-/* Used by CM_CLKSEL_DPLL_CORE, CM_CLKSEL_DPLL_CORE_RESTORE */
+/* Used by CM_CLKSEL_DPLL_CORE */
 #define OMAP4430_DPLL_CLKOUTHIF_CLKSEL_SHIFT                   20
 #define OMAP4430_DPLL_CLKOUTHIF_CLKSEL_MASK                    (1 << 20)
 
-/*
- * Used by CM_DIV_M3_DPLL_ABE, CM_DIV_M3_DPLL_CORE,
- * CM_DIV_M3_DPLL_CORE_RESTORE, CM_DIV_M3_DPLL_PER
- */
+/* Used by CM_DIV_M3_DPLL_ABE, CM_DIV_M3_DPLL_CORE, CM_DIV_M3_DPLL_PER */
 #define OMAP4430_DPLL_CLKOUTHIF_DIV_SHIFT                      0
 #define OMAP4430_DPLL_CLKOUTHIF_DIV_MASK                       (0x1f << 0)
 
-/*
- * Used by CM_DIV_M3_DPLL_ABE, CM_DIV_M3_DPLL_CORE,
- * CM_DIV_M3_DPLL_CORE_RESTORE, CM_DIV_M3_DPLL_PER
- */
+/* Used by CM_DIV_M3_DPLL_ABE, CM_DIV_M3_DPLL_CORE, CM_DIV_M3_DPLL_PER */
 #define OMAP4430_DPLL_CLKOUTHIF_DIVCHACK_SHIFT                 5
 #define OMAP4430_DPLL_CLKOUTHIF_DIVCHACK_MASK                  (1 << 5)
 
-/*
- * Used by CM_DIV_M3_DPLL_ABE, CM_DIV_M3_DPLL_CORE,
- * CM_DIV_M3_DPLL_CORE_RESTORE, CM_DIV_M3_DPLL_PER
- */
+/* Used by CM_DIV_M3_DPLL_ABE, CM_DIV_M3_DPLL_CORE, CM_DIV_M3_DPLL_PER */
 #define OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_SHIFT                        8
 #define OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_MASK                 (1 << 8)
 
 #define OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK                  (1 << 10)
 
 /*
- * Used by CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE,
- * CM_DIV_M2_DPLL_CORE_RESTORE, CM_DIV_M2_DPLL_DDRPHY, CM_DIV_M2_DPLL_MPU,
- * CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_UNIPRO
+ * Used by CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE, CM_DIV_M2_DPLL_DDRPHY,
+ * CM_DIV_M2_DPLL_MPU, CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_UNIPRO
  */
 #define OMAP4430_DPLL_CLKOUT_DIV_SHIFT                         0
 #define OMAP4430_DPLL_CLKOUT_DIV_MASK                          (0x1f << 0)
 #define OMAP4430_DPLL_CLKOUT_DIV_0_6_MASK                      (0x7f << 0)
 
 /*
- * Used by CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE,
- * CM_DIV_M2_DPLL_CORE_RESTORE, CM_DIV_M2_DPLL_DDRPHY, CM_DIV_M2_DPLL_MPU,
- * CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_UNIPRO
+ * Used by CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE, CM_DIV_M2_DPLL_DDRPHY,
+ * CM_DIV_M2_DPLL_MPU, CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_UNIPRO
  */
 #define OMAP4430_DPLL_CLKOUT_DIVCHACK_SHIFT                    5
 #define OMAP4430_DPLL_CLKOUT_DIVCHACK_MASK                     (1 << 5)
 #define OMAP4430_DPLL_CLKOUT_DIVCHACK_M2_USB_MASK              (1 << 7)
 
 /*
- * Used by CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE,
- * CM_DIV_M2_DPLL_CORE_RESTORE, CM_DIV_M2_DPLL_DDRPHY, CM_DIV_M2_DPLL_MPU,
- * CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_USB
+ * Used by CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE, CM_DIV_M2_DPLL_DDRPHY,
+ * CM_DIV_M2_DPLL_MPU, CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_USB
  */
 #define OMAP4430_DPLL_CLKOUT_GATE_CTRL_SHIFT                   8
 #define OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK                    (1 << 8)
 
-/* Used by CM_SHADOW_FREQ_CONFIG1, CM_SHADOW_FREQ_CONFIG1_RESTORE */
+/* Used by CM_SHADOW_FREQ_CONFIG1 */
 #define OMAP4430_DPLL_CORE_DPLL_EN_SHIFT                       8
 #define OMAP4430_DPLL_CORE_DPLL_EN_MASK                                (0x7 << 8)
 
-/* Used by CM_SHADOW_FREQ_CONFIG1, CM_SHADOW_FREQ_CONFIG1_RESTORE */
+/* Used by CM_SHADOW_FREQ_CONFIG1 */
 #define OMAP4430_DPLL_CORE_M2_DIV_SHIFT                                11
 #define OMAP4430_DPLL_CORE_M2_DIV_MASK                         (0x1f << 11)
 
-/* Used by CM_SHADOW_FREQ_CONFIG2, CM_SHADOW_FREQ_CONFIG2_RESTORE */
+/* Used by CM_SHADOW_FREQ_CONFIG2 */
 #define OMAP4430_DPLL_CORE_M5_DIV_SHIFT                                3
 #define OMAP4430_DPLL_CORE_M5_DIV_MASK                         (0x1f << 3)
 
 /*
- * Used by CM_CLKSEL_DPLL_ABE, CM_CLKSEL_DPLL_CORE,
- * CM_CLKSEL_DPLL_CORE_RESTORE, CM_CLKSEL_DPLL_DDRPHY, CM_CLKSEL_DPLL_IVA,
- * CM_CLKSEL_DPLL_MPU, CM_CLKSEL_DPLL_PER, CM_CLKSEL_DPLL_UNIPRO
+ * Used by CM_CLKSEL_DPLL_ABE, CM_CLKSEL_DPLL_CORE, CM_CLKSEL_DPLL_DDRPHY,
+ * CM_CLKSEL_DPLL_IVA, CM_CLKSEL_DPLL_MPU, CM_CLKSEL_DPLL_PER,
+ * CM_CLKSEL_DPLL_UNIPRO
  */
 #define OMAP4430_DPLL_DIV_SHIFT                                        0
 #define OMAP4430_DPLL_DIV_MASK                                 (0x7f << 0)
 #define OMAP4430_DPLL_DIV_0_7_MASK                             (0xff << 0)
 
 /*
- * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
- * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA,
- * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER
+ * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDRPHY,
+ * CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER
  */
 #define OMAP4430_DPLL_DRIFTGUARD_EN_SHIFT                      8
 #define OMAP4430_DPLL_DRIFTGUARD_EN_MASK                       (1 << 8)
 #define OMAP4430_DPLL_DRIFTGUARD_EN_3_3_MASK                   (1 << 3)
 
 /*
- * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
- * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA,
- * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO,
- * CM_CLKMODE_DPLL_USB
+ * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDRPHY,
+ * CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER,
+ * CM_CLKMODE_DPLL_UNIPRO, CM_CLKMODE_DPLL_USB
  */
 #define OMAP4430_DPLL_EN_SHIFT                                 0
 #define OMAP4430_DPLL_EN_MASK                                  (0x7 << 0)
 
 /*
- * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
- * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA,
- * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO
+ * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDRPHY,
+ * CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER,
+ * CM_CLKMODE_DPLL_UNIPRO
  */
 #define OMAP4430_DPLL_LPMODE_EN_SHIFT                          10
 #define OMAP4430_DPLL_LPMODE_EN_MASK                           (1 << 10)
 
 /*
- * Used by CM_CLKSEL_DPLL_ABE, CM_CLKSEL_DPLL_CORE,
- * CM_CLKSEL_DPLL_CORE_RESTORE, CM_CLKSEL_DPLL_DDRPHY, CM_CLKSEL_DPLL_IVA,
- * CM_CLKSEL_DPLL_MPU, CM_CLKSEL_DPLL_PER, CM_CLKSEL_DPLL_UNIPRO
+ * Used by CM_CLKSEL_DPLL_ABE, CM_CLKSEL_DPLL_CORE, CM_CLKSEL_DPLL_DDRPHY,
+ * CM_CLKSEL_DPLL_IVA, CM_CLKSEL_DPLL_MPU, CM_CLKSEL_DPLL_PER,
+ * CM_CLKSEL_DPLL_UNIPRO
  */
 #define OMAP4430_DPLL_MULT_SHIFT                               8
 #define OMAP4430_DPLL_MULT_MASK                                        (0x7ff << 8)
 #define OMAP4430_DPLL_MULT_USB_MASK                            (0xfff << 8)
 
 /*
- * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
- * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA,
- * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO
+ * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDRPHY,
+ * CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER,
+ * CM_CLKMODE_DPLL_UNIPRO
  */
 #define OMAP4430_DPLL_REGM4XEN_SHIFT                           11
 #define OMAP4430_DPLL_REGM4XEN_MASK                            (1 << 11)
 #define OMAP4430_DPLL_SD_DIV_MASK                              (0xff << 24)
 
 /*
- * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
- * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA,
- * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO,
- * CM_CLKMODE_DPLL_USB
+ * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDRPHY,
+ * CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER,
+ * CM_CLKMODE_DPLL_UNIPRO, CM_CLKMODE_DPLL_USB
  */
 #define OMAP4430_DPLL_SSC_ACK_SHIFT                            13
 #define OMAP4430_DPLL_SSC_ACK_MASK                             (1 << 13)
 
 /*
- * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
- * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA,
- * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO,
- * CM_CLKMODE_DPLL_USB
+ * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDRPHY,
+ * CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER,
+ * CM_CLKMODE_DPLL_UNIPRO, CM_CLKMODE_DPLL_USB
  */
 #define OMAP4430_DPLL_SSC_DOWNSPREAD_SHIFT                     14
 #define OMAP4430_DPLL_SSC_DOWNSPREAD_MASK                      (1 << 14)
 
 /*
- * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
- * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA,
- * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO,
- * CM_CLKMODE_DPLL_USB
+ * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDRPHY,
+ * CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER,
+ * CM_CLKMODE_DPLL_UNIPRO, CM_CLKMODE_DPLL_USB
  */
 #define OMAP4430_DPLL_SSC_EN_SHIFT                             12
 #define OMAP4430_DPLL_SSC_EN_MASK                              (1 << 12)
 
-/*
- * Used by CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE, CM_L4CFG_DYNAMICDEP,
- * CM_L4CFG_DYNAMICDEP_RESTORE, CM_L4PER_DYNAMICDEP, CM_L4PER_DYNAMICDEP_RESTORE
- */
+/* Used by CM_L3_2_DYNAMICDEP, CM_L4CFG_DYNAMICDEP, CM_L4PER_DYNAMICDEP */
 #define OMAP4430_DSS_DYNDEP_SHIFT                              8
 #define OMAP4430_DSS_DYNDEP_MASK                               (1 << 8)
 
-/*
- * Used by CM_DUCATI_STATICDEP, CM_MPU_STATICDEP, CM_SDMA_STATICDEP,
- * CM_SDMA_STATICDEP_RESTORE
- */
+/* Used by CM_DUCATI_STATICDEP, CM_MPU_STATICDEP, CM_SDMA_STATICDEP */
 #define OMAP4430_DSS_STATDEP_SHIFT                             8
 #define OMAP4430_DSS_STATDEP_MASK                              (1 << 8)
 
-/* Used by CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE */
+/* Used by CM_L3_2_DYNAMICDEP */
 #define OMAP4430_DUCATI_DYNDEP_SHIFT                           0
 #define OMAP4430_DUCATI_DYNDEP_MASK                            (1 << 0)
 
-/* Used by CM_MPU_STATICDEP, CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE */
+/* Used by CM_MPU_STATICDEP, CM_SDMA_STATICDEP */
 #define OMAP4430_DUCATI_STATDEP_SHIFT                          0
 #define OMAP4430_DUCATI_STATDEP_MASK                           (1 << 0)
 
-/* Used by CM_SHADOW_FREQ_CONFIG1, CM_SHADOW_FREQ_CONFIG1_RESTORE */
+/* Used by CM_SHADOW_FREQ_CONFIG1 */
 #define OMAP4430_FREQ_UPDATE_SHIFT                             0
 #define OMAP4430_FREQ_UPDATE_MASK                              (1 << 0)
 
 #define OMAP4430_FUNC_SHIFT                                    16
 #define OMAP4430_FUNC_MASK                                     (0xfff << 16)
 
-/* Used by CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE */
+/* Used by CM_L3_2_DYNAMICDEP */
 #define OMAP4430_GFX_DYNDEP_SHIFT                              10
 #define OMAP4430_GFX_DYNDEP_MASK                               (1 << 10)
 
 #define OMAP4430_GFX_STATDEP_SHIFT                             10
 #define OMAP4430_GFX_STATDEP_MASK                              (1 << 10)
 
-/* Used by CM_SHADOW_FREQ_CONFIG2, CM_SHADOW_FREQ_CONFIG2_RESTORE */
+/* Used by CM_SHADOW_FREQ_CONFIG2 */
 #define OMAP4430_GPMC_FREQ_UPDATE_SHIFT                                0
 #define OMAP4430_GPMC_FREQ_UPDATE_MASK                         (1 << 0)
 
 /*
- * Used by CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_CORE_RESTORE,
- * CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA, CM_DIV_M4_DPLL_PER
+ * Used by CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA,
+ * CM_DIV_M4_DPLL_PER
  */
 #define OMAP4430_HSDIVIDER_CLKOUT1_DIV_SHIFT                   0
 #define OMAP4430_HSDIVIDER_CLKOUT1_DIV_MASK                    (0x1f << 0)
 
 /*
- * Used by CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_CORE_RESTORE,
- * CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA, CM_DIV_M4_DPLL_PER
+ * Used by CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA,
+ * CM_DIV_M4_DPLL_PER
  */
 #define OMAP4430_HSDIVIDER_CLKOUT1_DIVCHACK_SHIFT              5
 #define OMAP4430_HSDIVIDER_CLKOUT1_DIVCHACK_MASK               (1 << 5)
 
 /*
- * Used by CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_CORE_RESTORE,
- * CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA, CM_DIV_M4_DPLL_PER
+ * Used by CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA,
+ * CM_DIV_M4_DPLL_PER
  */
 #define OMAP4430_HSDIVIDER_CLKOUT1_GATE_CTRL_SHIFT             8
 #define OMAP4430_HSDIVIDER_CLKOUT1_GATE_CTRL_MASK              (1 << 8)
 
 /*
- * Used by CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_CORE_RESTORE,
- * CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA, CM_DIV_M4_DPLL_PER
+ * Used by CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA,
+ * CM_DIV_M4_DPLL_PER
  */
 #define OMAP4430_HSDIVIDER_CLKOUT1_PWDN_SHIFT                  12
 #define OMAP4430_HSDIVIDER_CLKOUT1_PWDN_MASK                   (1 << 12)
 
 /*
- * Used by CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_CORE_RESTORE,
- * CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA, CM_DIV_M5_DPLL_PER
+ * Used by CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA,
+ * CM_DIV_M5_DPLL_PER
  */
 #define OMAP4430_HSDIVIDER_CLKOUT2_DIV_SHIFT                   0
 #define OMAP4430_HSDIVIDER_CLKOUT2_DIV_MASK                    (0x1f << 0)
 
 /*
- * Used by CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_CORE_RESTORE,
- * CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA, CM_DIV_M5_DPLL_PER
+ * Used by CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA,
+ * CM_DIV_M5_DPLL_PER
  */
 #define OMAP4430_HSDIVIDER_CLKOUT2_DIVCHACK_SHIFT              5
 #define OMAP4430_HSDIVIDER_CLKOUT2_DIVCHACK_MASK               (1 << 5)
 
 /*
- * Used by CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_CORE_RESTORE,
- * CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA, CM_DIV_M5_DPLL_PER
+ * Used by CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA,
+ * CM_DIV_M5_DPLL_PER
  */
 #define OMAP4430_HSDIVIDER_CLKOUT2_GATE_CTRL_SHIFT             8
 #define OMAP4430_HSDIVIDER_CLKOUT2_GATE_CTRL_MASK              (1 << 8)
 
 /*
- * Used by CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_CORE_RESTORE,
- * CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA, CM_DIV_M5_DPLL_PER
+ * Used by CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA,
+ * CM_DIV_M5_DPLL_PER
  */
 #define OMAP4430_HSDIVIDER_CLKOUT2_PWDN_SHIFT                  12
 #define OMAP4430_HSDIVIDER_CLKOUT2_PWDN_MASK                   (1 << 12)
 
-/*
- * Used by CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_CORE_RESTORE,
- * CM_DIV_M6_DPLL_DDRPHY, CM_DIV_M6_DPLL_PER
- */
+/* Used by CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_DDRPHY, CM_DIV_M6_DPLL_PER */
 #define OMAP4430_HSDIVIDER_CLKOUT3_DIV_SHIFT                   0
 #define OMAP4430_HSDIVIDER_CLKOUT3_DIV_MASK                    (0x1f << 0)
 
-/*
- * Used by CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_CORE_RESTORE,
- * CM_DIV_M6_DPLL_DDRPHY, CM_DIV_M6_DPLL_PER
- */
+/* Used by CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_DDRPHY, CM_DIV_M6_DPLL_PER */
 #define OMAP4430_HSDIVIDER_CLKOUT3_DIVCHACK_SHIFT              5
 #define OMAP4430_HSDIVIDER_CLKOUT3_DIVCHACK_MASK               (1 << 5)
 
-/*
- * Used by CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_CORE_RESTORE,
- * CM_DIV_M6_DPLL_DDRPHY, CM_DIV_M6_DPLL_PER
- */
+/* Used by CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_DDRPHY, CM_DIV_M6_DPLL_PER */
 #define OMAP4430_HSDIVIDER_CLKOUT3_GATE_CTRL_SHIFT             8
 #define OMAP4430_HSDIVIDER_CLKOUT3_GATE_CTRL_MASK              (1 << 8)
 
-/*
- * Used by CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_CORE_RESTORE,
- * CM_DIV_M6_DPLL_DDRPHY, CM_DIV_M6_DPLL_PER
- */
+/* Used by CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_DDRPHY, CM_DIV_M6_DPLL_PER */
 #define OMAP4430_HSDIVIDER_CLKOUT3_PWDN_SHIFT                  12
 #define OMAP4430_HSDIVIDER_CLKOUT3_PWDN_MASK                   (1 << 12)
 
-/*
- * Used by CM_DIV_M7_DPLL_CORE, CM_DIV_M7_DPLL_CORE_RESTORE,
- * CM_DIV_M7_DPLL_PER
- */
+/* Used by CM_DIV_M7_DPLL_CORE, CM_DIV_M7_DPLL_PER */
 #define OMAP4430_HSDIVIDER_CLKOUT4_DIV_SHIFT                   0
 #define OMAP4430_HSDIVIDER_CLKOUT4_DIV_MASK                    (0x1f << 0)
 
-/*
- * Used by CM_DIV_M7_DPLL_CORE, CM_DIV_M7_DPLL_CORE_RESTORE,
- * CM_DIV_M7_DPLL_PER
- */
+/* Used by CM_DIV_M7_DPLL_CORE, CM_DIV_M7_DPLL_PER */
 #define OMAP4430_HSDIVIDER_CLKOUT4_DIVCHACK_SHIFT              5
 #define OMAP4430_HSDIVIDER_CLKOUT4_DIVCHACK_MASK               (1 << 5)
 
-/*
- * Used by CM_DIV_M7_DPLL_CORE, CM_DIV_M7_DPLL_CORE_RESTORE,
- * CM_DIV_M7_DPLL_PER
- */
+/* Used by CM_DIV_M7_DPLL_CORE, CM_DIV_M7_DPLL_PER */
 #define OMAP4430_HSDIVIDER_CLKOUT4_GATE_CTRL_SHIFT             8
 #define OMAP4430_HSDIVIDER_CLKOUT4_GATE_CTRL_MASK              (1 << 8)
 
-/*
- * Used by CM_DIV_M7_DPLL_CORE, CM_DIV_M7_DPLL_CORE_RESTORE,
- * CM_DIV_M7_DPLL_PER
- */
+/* Used by CM_DIV_M7_DPLL_CORE, CM_DIV_M7_DPLL_PER */
 #define OMAP4430_HSDIVIDER_CLKOUT4_PWDN_SHIFT                  12
 #define OMAP4430_HSDIVIDER_CLKOUT4_PWDN_MASK                   (1 << 12)
 
  * CM1_ABE_TIMER8_CLKCTRL, CM1_ABE_WDT3_CLKCTRL, CM_ALWON_MDMINTC_CLKCTRL,
  * CM_ALWON_SR_CORE_CLKCTRL, CM_ALWON_SR_IVA_CLKCTRL, CM_ALWON_SR_MPU_CLKCTRL,
  * CM_CAM_FDIF_CLKCTRL, CM_CAM_ISS_CLKCTRL, CM_CEFUSE_CEFUSE_CLKCTRL,
- * CM_CM1_PROFILING_CLKCTRL, CM_CM1_PROFILING_CLKCTRL_RESTORE,
- * CM_CM2_PROFILING_CLKCTRL, CM_CM2_PROFILING_CLKCTRL_RESTORE,
+ * CM_CM1_PROFILING_CLKCTRL, CM_CM2_PROFILING_CLKCTRL,
  * CM_D2D_MODEM_ICR_CLKCTRL, CM_D2D_SAD2D_CLKCTRL, CM_D2D_SAD2D_FW_CLKCTRL,
  * CM_DSS_DEISS_CLKCTRL, CM_DSS_DSS_CLKCTRL, CM_DUCATI_DUCATI_CLKCTRL,
  * CM_EMU_DEBUGSS_CLKCTRL, CM_GFX_GFX_CLKCTRL, CM_IVAHD_IVAHD_CLKCTRL,
  * CM_L3INIT_MMC6_CLKCTRL, CM_L3INIT_P1500_CLKCTRL, CM_L3INIT_PCIESS_CLKCTRL,
  * CM_L3INIT_SATA_CLKCTRL, CM_L3INIT_TPPSS_CLKCTRL, CM_L3INIT_UNIPRO1_CLKCTRL,
  * CM_L3INIT_USBPHYOCP2SCP_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL,
- * CM_L3INIT_USB_HOST_CLKCTRL_RESTORE, CM_L3INIT_USB_HOST_FS_CLKCTRL,
- * CM_L3INIT_USB_OTG_CLKCTRL, CM_L3INIT_USB_TLL_CLKCTRL,
- * CM_L3INIT_USB_TLL_CLKCTRL_RESTORE, CM_L3INIT_XHPI_CLKCTRL,
- * CM_L3INSTR_L3_3_CLKCTRL, CM_L3INSTR_L3_3_CLKCTRL_RESTORE,
- * CM_L3INSTR_L3_INSTR_CLKCTRL, CM_L3INSTR_L3_INSTR_CLKCTRL_RESTORE,
- * CM_L3INSTR_OCP_WP1_CLKCTRL, CM_L3INSTR_OCP_WP1_CLKCTRL_RESTORE,
+ * CM_L3INIT_USB_HOST_FS_CLKCTRL, CM_L3INIT_USB_OTG_CLKCTRL,
+ * CM_L3INIT_USB_TLL_CLKCTRL, CM_L3INIT_XHPI_CLKCTRL, CM_L3INSTR_L3_3_CLKCTRL,
+ * CM_L3INSTR_L3_INSTR_CLKCTRL, CM_L3INSTR_OCP_WP1_CLKCTRL,
  * CM_L3_1_L3_1_CLKCTRL, CM_L3_2_GPMC_CLKCTRL, CM_L3_2_L3_2_CLKCTRL,
  * CM_L3_2_OCMC_RAM_CLKCTRL, CM_L4CFG_HW_SEM_CLKCTRL, CM_L4CFG_L4_CFG_CLKCTRL,
  * CM_L4CFG_MAILBOX_CLKCTRL, CM_L4CFG_SAR_ROM_CLKCTRL, CM_L4PER_ADC_CLKCTRL,
  * CM_L4PER_DMTIMER10_CLKCTRL, CM_L4PER_DMTIMER11_CLKCTRL,
  * CM_L4PER_DMTIMER2_CLKCTRL, CM_L4PER_DMTIMER3_CLKCTRL,
  * CM_L4PER_DMTIMER4_CLKCTRL, CM_L4PER_DMTIMER9_CLKCTRL, CM_L4PER_ELM_CLKCTRL,
- * CM_L4PER_GPIO2_CLKCTRL, CM_L4PER_GPIO2_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO3_CLKCTRL, CM_L4PER_GPIO3_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO4_CLKCTRL, CM_L4PER_GPIO4_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO5_CLKCTRL, CM_L4PER_GPIO5_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO6_CLKCTRL, CM_L4PER_GPIO6_CLKCTRL_RESTORE,
- * CM_L4PER_HDQ1W_CLKCTRL, CM_L4PER_HECC1_CLKCTRL, CM_L4PER_HECC2_CLKCTRL,
- * CM_L4PER_I2C1_CLKCTRL, CM_L4PER_I2C2_CLKCTRL, CM_L4PER_I2C3_CLKCTRL,
- * CM_L4PER_I2C4_CLKCTRL, CM_L4PER_I2C5_CLKCTRL, CM_L4PER_L4PER_CLKCTRL,
- * CM_L4PER_MCASP2_CLKCTRL, CM_L4PER_MCASP3_CLKCTRL, CM_L4PER_MCBSP4_CLKCTRL,
- * CM_L4PER_MCSPI1_CLKCTRL, CM_L4PER_MCSPI2_CLKCTRL, CM_L4PER_MCSPI3_CLKCTRL,
- * CM_L4PER_MCSPI4_CLKCTRL, CM_L4PER_MGATE_CLKCTRL, CM_L4PER_MMCSD3_CLKCTRL,
- * CM_L4PER_MMCSD4_CLKCTRL, CM_L4PER_MMCSD5_CLKCTRL, CM_L4PER_MSPROHG_CLKCTRL,
+ * CM_L4PER_GPIO2_CLKCTRL, CM_L4PER_GPIO3_CLKCTRL, CM_L4PER_GPIO4_CLKCTRL,
+ * CM_L4PER_GPIO5_CLKCTRL, CM_L4PER_GPIO6_CLKCTRL, CM_L4PER_HDQ1W_CLKCTRL,
+ * CM_L4PER_HECC1_CLKCTRL, CM_L4PER_HECC2_CLKCTRL, CM_L4PER_I2C1_CLKCTRL,
+ * CM_L4PER_I2C2_CLKCTRL, CM_L4PER_I2C3_CLKCTRL, CM_L4PER_I2C4_CLKCTRL,
+ * CM_L4PER_I2C5_CLKCTRL, CM_L4PER_L4PER_CLKCTRL, CM_L4PER_MCASP2_CLKCTRL,
+ * CM_L4PER_MCASP3_CLKCTRL, CM_L4PER_MCBSP4_CLKCTRL, CM_L4PER_MCSPI1_CLKCTRL,
+ * CM_L4PER_MCSPI2_CLKCTRL, CM_L4PER_MCSPI3_CLKCTRL, CM_L4PER_MCSPI4_CLKCTRL,
+ * CM_L4PER_MGATE_CLKCTRL, CM_L4PER_MMCSD3_CLKCTRL, CM_L4PER_MMCSD4_CLKCTRL,
+ * CM_L4PER_MMCSD5_CLKCTRL, CM_L4PER_MSPROHG_CLKCTRL,
  * CM_L4PER_SLIMBUS2_CLKCTRL, CM_L4PER_UART1_CLKCTRL, CM_L4PER_UART2_CLKCTRL,
  * CM_L4PER_UART3_CLKCTRL, CM_L4PER_UART4_CLKCTRL, CM_L4SEC_AES1_CLKCTRL,
  * CM_L4SEC_AES2_CLKCTRL, CM_L4SEC_CRYPTODMA_CLKCTRL, CM_L4SEC_DES3DES_CLKCTRL,
 #define OMAP4430_IDLEST_SHIFT                                  16
 #define OMAP4430_IDLEST_MASK                                   (0x3 << 16)
 
-/*
- * Used by CM_DUCATI_DYNAMICDEP, CM_L3_2_DYNAMICDEP,
- * CM_L3_2_DYNAMICDEP_RESTORE, CM_L4CFG_DYNAMICDEP, CM_L4CFG_DYNAMICDEP_RESTORE
- */
+/* Used by CM_DUCATI_DYNAMICDEP, CM_L3_2_DYNAMICDEP, CM_L4CFG_DYNAMICDEP */
 #define OMAP4430_ISS_DYNDEP_SHIFT                              9
 #define OMAP4430_ISS_DYNDEP_MASK                               (1 << 9)
 
 /*
  * Used by CM_DUCATI_STATICDEP, CM_MPU_STATICDEP, CM_SDMA_STATICDEP,
- * CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
+ * CM_TESLA_STATICDEP
  */
 #define OMAP4430_ISS_STATDEP_SHIFT                             9
 #define OMAP4430_ISS_STATDEP_MASK                              (1 << 9)
 
-/* Used by CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE, CM_TESLA_DYNAMICDEP */
+/* Used by CM_L3_2_DYNAMICDEP, CM_TESLA_DYNAMICDEP */
 #define OMAP4430_IVAHD_DYNDEP_SHIFT                            2
 #define OMAP4430_IVAHD_DYNDEP_MASK                             (1 << 2)
 
 /*
- * Used by CM_CAM_STATICDEP, CM_D2D_STATICDEP, CM_D2D_STATICDEP_RESTORE,
- * CM_DSS_STATICDEP, CM_DUCATI_STATICDEP, CM_GFX_STATICDEP,
- * CM_L3INIT_STATICDEP, CM_MPU_STATICDEP, CM_SDMA_STATICDEP,
- * CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
+ * Used by CM_CAM_STATICDEP, CM_D2D_STATICDEP, CM_DSS_STATICDEP,
+ * CM_DUCATI_STATICDEP, CM_GFX_STATICDEP, CM_L3INIT_STATICDEP,
+ * CM_MPU_STATICDEP, CM_SDMA_STATICDEP, CM_TESLA_STATICDEP
  */
 #define OMAP4430_IVAHD_STATDEP_SHIFT                           2
 #define OMAP4430_IVAHD_STATDEP_MASK                            (1 << 2)
 
-/*
- * Used by CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE, CM_L4CFG_DYNAMICDEP,
- * CM_L4CFG_DYNAMICDEP_RESTORE, CM_L4PER_DYNAMICDEP, CM_L4PER_DYNAMICDEP_RESTORE
- */
+/* Used by CM_L3_2_DYNAMICDEP, CM_L4CFG_DYNAMICDEP, CM_L4PER_DYNAMICDEP */
 #define OMAP4430_L3INIT_DYNDEP_SHIFT                           7
 #define OMAP4430_L3INIT_DYNDEP_MASK                            (1 << 7)
 
 /*
- * Used by CM_D2D_STATICDEP, CM_D2D_STATICDEP_RESTORE, CM_DUCATI_STATICDEP,
- * CM_MPU_STATICDEP, CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE,
- * CM_TESLA_STATICDEP
+ * Used by CM_D2D_STATICDEP, CM_DUCATI_STATICDEP, CM_MPU_STATICDEP,
+ * CM_SDMA_STATICDEP, CM_TESLA_STATICDEP
  */
 #define OMAP4430_L3INIT_STATDEP_SHIFT                          7
 #define OMAP4430_L3INIT_STATDEP_MASK                           (1 << 7)
 
 /*
  * Used by CM_DSS_DYNAMICDEP, CM_L3INIT_DYNAMICDEP, CM_L3_2_DYNAMICDEP,
- * CM_L3_2_DYNAMICDEP_RESTORE, CM_L4CFG_DYNAMICDEP,
- * CM_L4CFG_DYNAMICDEP_RESTORE, CM_MPU_DYNAMICDEP, CM_TESLA_DYNAMICDEP
+ * CM_L4CFG_DYNAMICDEP, CM_MPU_DYNAMICDEP, CM_TESLA_DYNAMICDEP
  */
 #define OMAP4430_L3_1_DYNDEP_SHIFT                             5
 #define OMAP4430_L3_1_DYNDEP_MASK                              (1 << 5)
 
 /*
- * Used by CM_CAM_STATICDEP, CM_D2D_STATICDEP, CM_D2D_STATICDEP_RESTORE,
- * CM_DSS_STATICDEP, CM_DUCATI_STATICDEP, CM_GFX_STATICDEP, CM_IVAHD_STATICDEP,
+ * Used by CM_CAM_STATICDEP, CM_D2D_STATICDEP, CM_DSS_STATICDEP,
+ * CM_DUCATI_STATICDEP, CM_GFX_STATICDEP, CM_IVAHD_STATICDEP,
  * CM_L3INIT_STATICDEP, CM_L4SEC_STATICDEP, CM_MPU_STATICDEP,
- * CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
+ * CM_SDMA_STATICDEP, CM_TESLA_STATICDEP
  */
 #define OMAP4430_L3_1_STATDEP_SHIFT                            5
 #define OMAP4430_L3_1_STATDEP_MASK                             (1 << 5)
 
 /*
- * Used by CM_CAM_DYNAMICDEP, CM_D2D_DYNAMICDEP, CM_D2D_DYNAMICDEP_RESTORE,
- * CM_DUCATI_DYNAMICDEP, CM_EMU_DYNAMICDEP, CM_GFX_DYNAMICDEP,
- * CM_IVAHD_DYNAMICDEP, CM_L3INIT_DYNAMICDEP, CM_L3_1_DYNAMICDEP,
- * CM_L3_1_DYNAMICDEP_RESTORE, CM_L4CFG_DYNAMICDEP,
- * CM_L4CFG_DYNAMICDEP_RESTORE, CM_L4SEC_DYNAMICDEP, CM_SDMA_DYNAMICDEP
+ * Used by CM_CAM_DYNAMICDEP, CM_D2D_DYNAMICDEP, CM_DUCATI_DYNAMICDEP,
+ * CM_EMU_DYNAMICDEP, CM_GFX_DYNAMICDEP, CM_IVAHD_DYNAMICDEP,
+ * CM_L3INIT_DYNAMICDEP, CM_L3_1_DYNAMICDEP, CM_L4CFG_DYNAMICDEP,
+ * CM_L4SEC_DYNAMICDEP, CM_SDMA_DYNAMICDEP
  */
 #define OMAP4430_L3_2_DYNDEP_SHIFT                             6
 #define OMAP4430_L3_2_DYNDEP_MASK                              (1 << 6)
 
 /*
- * Used by CM_CAM_STATICDEP, CM_D2D_STATICDEP, CM_D2D_STATICDEP_RESTORE,
- * CM_DSS_STATICDEP, CM_DUCATI_STATICDEP, CM_GFX_STATICDEP, CM_IVAHD_STATICDEP,
+ * Used by CM_CAM_STATICDEP, CM_D2D_STATICDEP, CM_DSS_STATICDEP,
+ * CM_DUCATI_STATICDEP, CM_GFX_STATICDEP, CM_IVAHD_STATICDEP,
  * CM_L3INIT_STATICDEP, CM_L4SEC_STATICDEP, CM_MPU_STATICDEP,
- * CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
+ * CM_SDMA_STATICDEP, CM_TESLA_STATICDEP
  */
 #define OMAP4430_L3_2_STATDEP_SHIFT                            6
 #define OMAP4430_L3_2_STATDEP_MASK                             (1 << 6)
 
-/* Used by CM_L3_1_DYNAMICDEP, CM_L3_1_DYNAMICDEP_RESTORE */
+/* Used by CM_L3_1_DYNAMICDEP */
 #define OMAP4430_L4CFG_DYNDEP_SHIFT                            12
 #define OMAP4430_L4CFG_DYNDEP_MASK                             (1 << 12)
 
 /*
- * Used by CM_D2D_STATICDEP, CM_D2D_STATICDEP_RESTORE, CM_DUCATI_STATICDEP,
- * CM_L3INIT_STATICDEP, CM_MPU_STATICDEP, CM_SDMA_STATICDEP,
- * CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
+ * Used by CM_D2D_STATICDEP, CM_DUCATI_STATICDEP, CM_L3INIT_STATICDEP,
+ * CM_MPU_STATICDEP, CM_SDMA_STATICDEP, CM_TESLA_STATICDEP
  */
 #define OMAP4430_L4CFG_STATDEP_SHIFT                           12
 #define OMAP4430_L4CFG_STATDEP_MASK                            (1 << 12)
 
-/* Used by CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE */
+/* Used by CM_L3_2_DYNAMICDEP */
 #define OMAP4430_L4PER_DYNDEP_SHIFT                            13
 #define OMAP4430_L4PER_DYNDEP_MASK                             (1 << 13)
 
 /*
- * Used by CM_D2D_STATICDEP, CM_D2D_STATICDEP_RESTORE, CM_DUCATI_STATICDEP,
- * CM_L3INIT_STATICDEP, CM_L4SEC_STATICDEP, CM_MPU_STATICDEP,
- * CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
+ * Used by CM_D2D_STATICDEP, CM_DUCATI_STATICDEP, CM_L3INIT_STATICDEP,
+ * CM_L4SEC_STATICDEP, CM_MPU_STATICDEP, CM_SDMA_STATICDEP, CM_TESLA_STATICDEP
  */
 #define OMAP4430_L4PER_STATDEP_SHIFT                           13
 #define OMAP4430_L4PER_STATDEP_MASK                            (1 << 13)
 
-/*
- * Used by CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE, CM_L4PER_DYNAMICDEP,
- * CM_L4PER_DYNAMICDEP_RESTORE
- */
+/* Used by CM_L3_2_DYNAMICDEP, CM_L4PER_DYNAMICDEP */
 #define OMAP4430_L4SEC_DYNDEP_SHIFT                            14
 #define OMAP4430_L4SEC_DYNDEP_MASK                             (1 << 14)
 
 /*
  * Used by CM_DUCATI_STATICDEP, CM_L3INIT_STATICDEP, CM_MPU_STATICDEP,
- * CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE
+ * CM_SDMA_STATICDEP
  */
 #define OMAP4430_L4SEC_STATDEP_SHIFT                           14
 #define OMAP4430_L4SEC_STATDEP_MASK                            (1 << 14)
 
-/* Used by CM_L4CFG_DYNAMICDEP, CM_L4CFG_DYNAMICDEP_RESTORE */
+/* Used by CM_L4CFG_DYNAMICDEP */
 #define OMAP4430_L4WKUP_DYNDEP_SHIFT                           15
 #define OMAP4430_L4WKUP_DYNDEP_MASK                            (1 << 15)
 
 /*
  * Used by CM_DUCATI_STATICDEP, CM_L3INIT_STATICDEP, CM_MPU_STATICDEP,
- * CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
+ * CM_SDMA_STATICDEP, CM_TESLA_STATICDEP
  */
 #define OMAP4430_L4WKUP_STATDEP_SHIFT                          15
 #define OMAP4430_L4WKUP_STATDEP_MASK                           (1 << 15)
 
 /*
- * Used by CM_D2D_DYNAMICDEP, CM_D2D_DYNAMICDEP_RESTORE, CM_L3_1_DYNAMICDEP,
- * CM_L3_1_DYNAMICDEP_RESTORE, CM_L4CFG_DYNAMICDEP,
- * CM_L4CFG_DYNAMICDEP_RESTORE, CM_MPU_DYNAMICDEP
+ * Used by CM_D2D_DYNAMICDEP, CM_L3_1_DYNAMICDEP, CM_L4CFG_DYNAMICDEP,
+ * CM_MPU_DYNAMICDEP
  */
 #define OMAP4430_MEMIF_DYNDEP_SHIFT                            4
 #define OMAP4430_MEMIF_DYNDEP_MASK                             (1 << 4)
 
 /*
- * Used by CM_CAM_STATICDEP, CM_D2D_STATICDEP, CM_D2D_STATICDEP_RESTORE,
- * CM_DSS_STATICDEP, CM_DUCATI_STATICDEP, CM_GFX_STATICDEP, CM_IVAHD_STATICDEP,
+ * Used by CM_CAM_STATICDEP, CM_D2D_STATICDEP, CM_DSS_STATICDEP,
+ * CM_DUCATI_STATICDEP, CM_GFX_STATICDEP, CM_IVAHD_STATICDEP,
  * CM_L3INIT_STATICDEP, CM_L4SEC_STATICDEP, CM_MPU_STATICDEP,
- * CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
+ * CM_SDMA_STATICDEP, CM_TESLA_STATICDEP
  */
 #define OMAP4430_MEMIF_STATDEP_SHIFT                           4
 #define OMAP4430_MEMIF_STATDEP_MASK                            (1 << 4)
 
 /*
  * Used by CM_SSC_MODFREQDIV_DPLL_ABE, CM_SSC_MODFREQDIV_DPLL_CORE,
- * CM_SSC_MODFREQDIV_DPLL_CORE_RESTORE, CM_SSC_MODFREQDIV_DPLL_DDRPHY,
- * CM_SSC_MODFREQDIV_DPLL_IVA, CM_SSC_MODFREQDIV_DPLL_MPU,
- * CM_SSC_MODFREQDIV_DPLL_PER, CM_SSC_MODFREQDIV_DPLL_UNIPRO,
- * CM_SSC_MODFREQDIV_DPLL_USB
+ * CM_SSC_MODFREQDIV_DPLL_DDRPHY, CM_SSC_MODFREQDIV_DPLL_IVA,
+ * CM_SSC_MODFREQDIV_DPLL_MPU, CM_SSC_MODFREQDIV_DPLL_PER,
+ * CM_SSC_MODFREQDIV_DPLL_UNIPRO, CM_SSC_MODFREQDIV_DPLL_USB
  */
 #define OMAP4430_MODFREQDIV_EXPONENT_SHIFT                     8
 #define OMAP4430_MODFREQDIV_EXPONENT_MASK                      (0x7 << 8)
 
 /*
  * Used by CM_SSC_MODFREQDIV_DPLL_ABE, CM_SSC_MODFREQDIV_DPLL_CORE,
- * CM_SSC_MODFREQDIV_DPLL_CORE_RESTORE, CM_SSC_MODFREQDIV_DPLL_DDRPHY,
- * CM_SSC_MODFREQDIV_DPLL_IVA, CM_SSC_MODFREQDIV_DPLL_MPU,
- * CM_SSC_MODFREQDIV_DPLL_PER, CM_SSC_MODFREQDIV_DPLL_UNIPRO,
- * CM_SSC_MODFREQDIV_DPLL_USB
+ * CM_SSC_MODFREQDIV_DPLL_DDRPHY, CM_SSC_MODFREQDIV_DPLL_IVA,
+ * CM_SSC_MODFREQDIV_DPLL_MPU, CM_SSC_MODFREQDIV_DPLL_PER,
+ * CM_SSC_MODFREQDIV_DPLL_UNIPRO, CM_SSC_MODFREQDIV_DPLL_USB
  */
 #define OMAP4430_MODFREQDIV_MANTISSA_SHIFT                     0
 #define OMAP4430_MODFREQDIV_MANTISSA_MASK                      (0x7f << 0)
  * CM1_ABE_TIMER8_CLKCTRL, CM1_ABE_WDT3_CLKCTRL, CM_ALWON_MDMINTC_CLKCTRL,
  * CM_ALWON_SR_CORE_CLKCTRL, CM_ALWON_SR_IVA_CLKCTRL, CM_ALWON_SR_MPU_CLKCTRL,
  * CM_CAM_FDIF_CLKCTRL, CM_CAM_ISS_CLKCTRL, CM_CEFUSE_CEFUSE_CLKCTRL,
- * CM_CM1_PROFILING_CLKCTRL, CM_CM1_PROFILING_CLKCTRL_RESTORE,
- * CM_CM2_PROFILING_CLKCTRL, CM_CM2_PROFILING_CLKCTRL_RESTORE,
+ * CM_CM1_PROFILING_CLKCTRL, CM_CM2_PROFILING_CLKCTRL,
  * CM_D2D_MODEM_ICR_CLKCTRL, CM_D2D_SAD2D_CLKCTRL, CM_D2D_SAD2D_FW_CLKCTRL,
  * CM_DSS_DEISS_CLKCTRL, CM_DSS_DSS_CLKCTRL, CM_DUCATI_DUCATI_CLKCTRL,
  * CM_EMU_DEBUGSS_CLKCTRL, CM_GFX_GFX_CLKCTRL, CM_IVAHD_IVAHD_CLKCTRL,
  * CM_L3INIT_MMC6_CLKCTRL, CM_L3INIT_P1500_CLKCTRL, CM_L3INIT_PCIESS_CLKCTRL,
  * CM_L3INIT_SATA_CLKCTRL, CM_L3INIT_TPPSS_CLKCTRL, CM_L3INIT_UNIPRO1_CLKCTRL,
  * CM_L3INIT_USBPHYOCP2SCP_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL,
- * CM_L3INIT_USB_HOST_CLKCTRL_RESTORE, CM_L3INIT_USB_HOST_FS_CLKCTRL,
- * CM_L3INIT_USB_OTG_CLKCTRL, CM_L3INIT_USB_TLL_CLKCTRL,
- * CM_L3INIT_USB_TLL_CLKCTRL_RESTORE, CM_L3INIT_XHPI_CLKCTRL,
- * CM_L3INSTR_L3_3_CLKCTRL, CM_L3INSTR_L3_3_CLKCTRL_RESTORE,
- * CM_L3INSTR_L3_INSTR_CLKCTRL, CM_L3INSTR_L3_INSTR_CLKCTRL_RESTORE,
- * CM_L3INSTR_OCP_WP1_CLKCTRL, CM_L3INSTR_OCP_WP1_CLKCTRL_RESTORE,
+ * CM_L3INIT_USB_HOST_FS_CLKCTRL, CM_L3INIT_USB_OTG_CLKCTRL,
+ * CM_L3INIT_USB_TLL_CLKCTRL, CM_L3INIT_XHPI_CLKCTRL, CM_L3INSTR_L3_3_CLKCTRL,
+ * CM_L3INSTR_L3_INSTR_CLKCTRL, CM_L3INSTR_OCP_WP1_CLKCTRL,
  * CM_L3_1_L3_1_CLKCTRL, CM_L3_2_GPMC_CLKCTRL, CM_L3_2_L3_2_CLKCTRL,
  * CM_L3_2_OCMC_RAM_CLKCTRL, CM_L4CFG_HW_SEM_CLKCTRL, CM_L4CFG_L4_CFG_CLKCTRL,
  * CM_L4CFG_MAILBOX_CLKCTRL, CM_L4CFG_SAR_ROM_CLKCTRL, CM_L4PER_ADC_CLKCTRL,
  * CM_L4PER_DMTIMER10_CLKCTRL, CM_L4PER_DMTIMER11_CLKCTRL,
  * CM_L4PER_DMTIMER2_CLKCTRL, CM_L4PER_DMTIMER3_CLKCTRL,
  * CM_L4PER_DMTIMER4_CLKCTRL, CM_L4PER_DMTIMER9_CLKCTRL, CM_L4PER_ELM_CLKCTRL,
- * CM_L4PER_GPIO2_CLKCTRL, CM_L4PER_GPIO2_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO3_CLKCTRL, CM_L4PER_GPIO3_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO4_CLKCTRL, CM_L4PER_GPIO4_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO5_CLKCTRL, CM_L4PER_GPIO5_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO6_CLKCTRL, CM_L4PER_GPIO6_CLKCTRL_RESTORE,
- * CM_L4PER_HDQ1W_CLKCTRL, CM_L4PER_HECC1_CLKCTRL, CM_L4PER_HECC2_CLKCTRL,
- * CM_L4PER_I2C1_CLKCTRL, CM_L4PER_I2C2_CLKCTRL, CM_L4PER_I2C3_CLKCTRL,
- * CM_L4PER_I2C4_CLKCTRL, CM_L4PER_I2C5_CLKCTRL, CM_L4PER_L4PER_CLKCTRL,
- * CM_L4PER_MCASP2_CLKCTRL, CM_L4PER_MCASP3_CLKCTRL, CM_L4PER_MCBSP4_CLKCTRL,
- * CM_L4PER_MCSPI1_CLKCTRL, CM_L4PER_MCSPI2_CLKCTRL, CM_L4PER_MCSPI3_CLKCTRL,
- * CM_L4PER_MCSPI4_CLKCTRL, CM_L4PER_MGATE_CLKCTRL, CM_L4PER_MMCSD3_CLKCTRL,
- * CM_L4PER_MMCSD4_CLKCTRL, CM_L4PER_MMCSD5_CLKCTRL, CM_L4PER_MSPROHG_CLKCTRL,
+ * CM_L4PER_GPIO2_CLKCTRL, CM_L4PER_GPIO3_CLKCTRL, CM_L4PER_GPIO4_CLKCTRL,
+ * CM_L4PER_GPIO5_CLKCTRL, CM_L4PER_GPIO6_CLKCTRL, CM_L4PER_HDQ1W_CLKCTRL,
+ * CM_L4PER_HECC1_CLKCTRL, CM_L4PER_HECC2_CLKCTRL, CM_L4PER_I2C1_CLKCTRL,
+ * CM_L4PER_I2C2_CLKCTRL, CM_L4PER_I2C3_CLKCTRL, CM_L4PER_I2C4_CLKCTRL,
+ * CM_L4PER_I2C5_CLKCTRL, CM_L4PER_L4PER_CLKCTRL, CM_L4PER_MCASP2_CLKCTRL,
+ * CM_L4PER_MCASP3_CLKCTRL, CM_L4PER_MCBSP4_CLKCTRL, CM_L4PER_MCSPI1_CLKCTRL,
+ * CM_L4PER_MCSPI2_CLKCTRL, CM_L4PER_MCSPI3_CLKCTRL, CM_L4PER_MCSPI4_CLKCTRL,
+ * CM_L4PER_MGATE_CLKCTRL, CM_L4PER_MMCSD3_CLKCTRL, CM_L4PER_MMCSD4_CLKCTRL,
+ * CM_L4PER_MMCSD5_CLKCTRL, CM_L4PER_MSPROHG_CLKCTRL,
  * CM_L4PER_SLIMBUS2_CLKCTRL, CM_L4PER_UART1_CLKCTRL, CM_L4PER_UART2_CLKCTRL,
  * CM_L4PER_UART3_CLKCTRL, CM_L4PER_UART4_CLKCTRL, CM_L4SEC_AES1_CLKCTRL,
  * CM_L4SEC_AES2_CLKCTRL, CM_L4SEC_CRYPTODMA_CLKCTRL, CM_L4SEC_DES3DES_CLKCTRL,
 #define OMAP4430_OPTFCLKEN_CTRLCLK_MASK                                (1 << 8)
 
 /*
- * Used by CM_L4PER_GPIO2_CLKCTRL, CM_L4PER_GPIO2_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO3_CLKCTRL, CM_L4PER_GPIO3_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO4_CLKCTRL, CM_L4PER_GPIO4_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO5_CLKCTRL, CM_L4PER_GPIO5_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO6_CLKCTRL, CM_L4PER_GPIO6_CLKCTRL_RESTORE, CM_WKUP_GPIO1_CLKCTRL
+ * Used by CM_L4PER_GPIO2_CLKCTRL, CM_L4PER_GPIO3_CLKCTRL,
+ * CM_L4PER_GPIO4_CLKCTRL, CM_L4PER_GPIO5_CLKCTRL, CM_L4PER_GPIO6_CLKCTRL,
+ * CM_WKUP_GPIO1_CLKCTRL
  */
 #define OMAP4430_OPTFCLKEN_DBCLK_SHIFT                         8
 #define OMAP4430_OPTFCLKEN_DBCLK_MASK                          (1 << 8)
 #define OMAP4430_OPTFCLKEN_FCLK2_SHIFT                         10
 #define OMAP4430_OPTFCLKEN_FCLK2_MASK                          (1 << 10)
 
-/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL */
 #define OMAP4430_OPTFCLKEN_FUNC48MCLK_SHIFT                    15
 #define OMAP4430_OPTFCLKEN_FUNC48MCLK_MASK                     (1 << 15)
 
-/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL */
 #define OMAP4430_OPTFCLKEN_HSIC480M_P1_CLK_SHIFT               13
 #define OMAP4430_OPTFCLKEN_HSIC480M_P1_CLK_MASK                        (1 << 13)
 
-/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL */
 #define OMAP4430_OPTFCLKEN_HSIC480M_P2_CLK_SHIFT               14
 #define OMAP4430_OPTFCLKEN_HSIC480M_P2_CLK_MASK                        (1 << 14)
 
-/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL */
 #define OMAP4430_OPTFCLKEN_HSIC60M_P1_CLK_SHIFT                        11
 #define OMAP4430_OPTFCLKEN_HSIC60M_P1_CLK_MASK                 (1 << 11)
 
-/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL */
 #define OMAP4430_OPTFCLKEN_HSIC60M_P2_CLK_SHIFT                        12
 #define OMAP4430_OPTFCLKEN_HSIC60M_P2_CLK_MASK                 (1 << 12)
 
 #define OMAP4430_OPTFCLKEN_TXPHYCLK_SHIFT                      8
 #define OMAP4430_OPTFCLKEN_TXPHYCLK_MASK                       (1 << 8)
 
-/* Used by CM_L3INIT_USB_TLL_CLKCTRL, CM_L3INIT_USB_TLL_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_TLL_CLKCTRL */
 #define OMAP4430_OPTFCLKEN_USB_CH0_CLK_SHIFT                   8
 #define OMAP4430_OPTFCLKEN_USB_CH0_CLK_MASK                    (1 << 8)
 
-/* Used by CM_L3INIT_USB_TLL_CLKCTRL, CM_L3INIT_USB_TLL_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_TLL_CLKCTRL */
 #define OMAP4430_OPTFCLKEN_USB_CH1_CLK_SHIFT                   9
 #define OMAP4430_OPTFCLKEN_USB_CH1_CLK_MASK                    (1 << 9)
 
-/* Used by CM_L3INIT_USB_TLL_CLKCTRL, CM_L3INIT_USB_TLL_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_TLL_CLKCTRL */
 #define OMAP4430_OPTFCLKEN_USB_CH2_CLK_SHIFT                   10
 #define OMAP4430_OPTFCLKEN_USB_CH2_CLK_MASK                    (1 << 10)
 
-/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL */
 #define OMAP4430_OPTFCLKEN_UTMI_P1_CLK_SHIFT                   8
 #define OMAP4430_OPTFCLKEN_UTMI_P1_CLK_MASK                    (1 << 8)
 
-/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL */
 #define OMAP4430_OPTFCLKEN_UTMI_P2_CLK_SHIFT                   9
 #define OMAP4430_OPTFCLKEN_UTMI_P2_CLK_MASK                    (1 << 9)
 
-/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL */
 #define OMAP4430_OPTFCLKEN_UTMI_P3_CLK_SHIFT                   10
 #define OMAP4430_OPTFCLKEN_UTMI_P3_CLK_MASK                    (1 << 10)
 
 #define OMAP4430_PMD_TRACE_MUX_CTRL_SHIFT                      22
 #define OMAP4430_PMD_TRACE_MUX_CTRL_MASK                       (0x3 << 22)
 
-/* Used by CM_DYN_DEP_PRESCAL, CM_DYN_DEP_PRESCAL_RESTORE */
+/* Used by CM_DYN_DEP_PRESCAL */
 #define OMAP4430_PRESCAL_SHIFT                                 0
 #define OMAP4430_PRESCAL_MASK                                  (0x3f << 0)
 
 #define OMAP4430_R_RTL_SHIFT                                   11
 #define OMAP4430_R_RTL_MASK                                    (0x1f << 11)
 
-/*
- * Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE,
- * CM_L3INIT_USB_TLL_CLKCTRL, CM_L3INIT_USB_TLL_CLKCTRL_RESTORE
- */
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_TLL_CLKCTRL */
 #define OMAP4430_SAR_MODE_SHIFT                                        4
 #define OMAP4430_SAR_MODE_MASK                                 (1 << 4)
 
 #define OMAP4430_SCHEME_SHIFT                                  30
 #define OMAP4430_SCHEME_MASK                                   (0x3 << 30)
 
-/* Used by CM_L4CFG_DYNAMICDEP, CM_L4CFG_DYNAMICDEP_RESTORE */
+/* Used by CM_L4CFG_DYNAMICDEP */
 #define OMAP4430_SDMA_DYNDEP_SHIFT                             11
 #define OMAP4430_SDMA_DYNDEP_MASK                              (1 << 11)
 
  * CM_L3INIT_HSI_CLKCTRL, CM_L3INIT_MMC1_CLKCTRL, CM_L3INIT_MMC2_CLKCTRL,
  * CM_L3INIT_MMC6_CLKCTRL, CM_L3INIT_P1500_CLKCTRL, CM_L3INIT_PCIESS_CLKCTRL,
  * CM_L3INIT_SATA_CLKCTRL, CM_L3INIT_TPPSS_CLKCTRL, CM_L3INIT_UNIPRO1_CLKCTRL,
- * CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE,
- * CM_L3INIT_USB_HOST_FS_CLKCTRL, CM_L3INIT_USB_OTG_CLKCTRL,
- * CM_L3INIT_XHPI_CLKCTRL, CM_L4SEC_CRYPTODMA_CLKCTRL, CM_MPU_MPU_CLKCTRL,
- * CM_SDMA_SDMA_CLKCTRL, CM_TESLA_TESLA_CLKCTRL
+ * CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_FS_CLKCTRL,
+ * CM_L3INIT_USB_OTG_CLKCTRL, CM_L3INIT_XHPI_CLKCTRL,
+ * CM_L4SEC_CRYPTODMA_CLKCTRL, CM_MPU_MPU_CLKCTRL, CM_SDMA_SDMA_CLKCTRL,
+ * CM_TESLA_TESLA_CLKCTRL
  */
 #define OMAP4430_STBYST_SHIFT                                  18
 #define OMAP4430_STBYST_MASK                                   (1 << 18)
 #define OMAP4430_ST_DPLL_CLKDCOLDO_MASK                                (1 << 9)
 
 /*
- * Used by CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE,
- * CM_DIV_M2_DPLL_CORE_RESTORE, CM_DIV_M2_DPLL_DDRPHY, CM_DIV_M2_DPLL_MPU,
- * CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_USB
+ * Used by CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE, CM_DIV_M2_DPLL_DDRPHY,
+ * CM_DIV_M2_DPLL_MPU, CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_USB
  */
 #define OMAP4430_ST_DPLL_CLKOUT_SHIFT                          9
 #define OMAP4430_ST_DPLL_CLKOUT_MASK                           (1 << 9)
 
-/*
- * Used by CM_DIV_M3_DPLL_ABE, CM_DIV_M3_DPLL_CORE,
- * CM_DIV_M3_DPLL_CORE_RESTORE, CM_DIV_M3_DPLL_PER
- */
+/* Used by CM_DIV_M3_DPLL_ABE, CM_DIV_M3_DPLL_CORE, CM_DIV_M3_DPLL_PER */
 #define OMAP4430_ST_DPLL_CLKOUTHIF_SHIFT                       9
 #define OMAP4430_ST_DPLL_CLKOUTHIF_MASK                                (1 << 9)
 
 #define OMAP4430_ST_DPLL_CLKOUTX2_MASK                         (1 << 11)
 
 /*
- * Used by CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_CORE_RESTORE,
- * CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA, CM_DIV_M4_DPLL_PER
+ * Used by CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA,
+ * CM_DIV_M4_DPLL_PER
  */
 #define OMAP4430_ST_HSDIVIDER_CLKOUT1_SHIFT                    9
 #define OMAP4430_ST_HSDIVIDER_CLKOUT1_MASK                     (1 << 9)
 
 /*
- * Used by CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_CORE_RESTORE,
- * CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA, CM_DIV_M5_DPLL_PER
+ * Used by CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA,
+ * CM_DIV_M5_DPLL_PER
  */
 #define OMAP4430_ST_HSDIVIDER_CLKOUT2_SHIFT                    9
 #define OMAP4430_ST_HSDIVIDER_CLKOUT2_MASK                     (1 << 9)
 
-/*
- * Used by CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_CORE_RESTORE,
- * CM_DIV_M6_DPLL_DDRPHY, CM_DIV_M6_DPLL_PER
- */
+/* Used by CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_DDRPHY, CM_DIV_M6_DPLL_PER */
 #define OMAP4430_ST_HSDIVIDER_CLKOUT3_SHIFT                    9
 #define OMAP4430_ST_HSDIVIDER_CLKOUT3_MASK                     (1 << 9)
 
-/*
- * Used by CM_DIV_M7_DPLL_CORE, CM_DIV_M7_DPLL_CORE_RESTORE,
- * CM_DIV_M7_DPLL_PER
- */
+/* Used by CM_DIV_M7_DPLL_CORE, CM_DIV_M7_DPLL_PER */
 #define OMAP4430_ST_HSDIVIDER_CLKOUT4_SHIFT                    9
 #define OMAP4430_ST_HSDIVIDER_CLKOUT4_MASK                     (1 << 9)
 
 #define OMAP4430_SYS_CLKSEL_SHIFT                              0
 #define OMAP4430_SYS_CLKSEL_MASK                               (0x7 << 0)
 
-/* Used by CM_L4CFG_DYNAMICDEP, CM_L4CFG_DYNAMICDEP_RESTORE */
+/* Used by CM_L4CFG_DYNAMICDEP */
 #define OMAP4430_TESLA_DYNDEP_SHIFT                            1
 #define OMAP4430_TESLA_DYNDEP_MASK                             (1 << 1)
 
 #define OMAP4430_TESLA_STATDEP_MASK                            (1 << 1)
 
 /*
- * Used by CM_D2D_DYNAMICDEP, CM_D2D_DYNAMICDEP_RESTORE, CM_DUCATI_DYNAMICDEP,
- * CM_EMU_DYNAMICDEP, CM_L3_1_DYNAMICDEP, CM_L3_1_DYNAMICDEP_RESTORE,
- * CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE, CM_L4CFG_DYNAMICDEP,
- * CM_L4CFG_DYNAMICDEP_RESTORE, CM_L4PER_DYNAMICDEP,
- * CM_L4PER_DYNAMICDEP_RESTORE, CM_MPU_DYNAMICDEP, CM_TESLA_DYNAMICDEP
+ * Used by CM_D2D_DYNAMICDEP, CM_DUCATI_DYNAMICDEP, CM_EMU_DYNAMICDEP,
+ * CM_L3_1_DYNAMICDEP, CM_L3_2_DYNAMICDEP, CM_L4CFG_DYNAMICDEP,
+ * CM_L4PER_DYNAMICDEP, CM_MPU_DYNAMICDEP, CM_TESLA_DYNAMICDEP
  */
 #define OMAP4430_WINDOWSIZE_SHIFT                              24
 #define OMAP4430_WINDOWSIZE_MASK                               (0xf << 24)
index e2d7a56..1bc00dc 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * OMAP44xx CM1 instance offset macros
  *
- * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
  * Copyright (C) 2009-2010 Nokia Corporation
  *
  * Paul Walmsley (paul@pwsan.com)
@@ -41,9 +41,9 @@
 #define OMAP4430_CM1_INSTR_INST                0x0f00
 
 /* CM1 clockdomain register offsets (from instance start) */
-#define OMAP4430_CM1_ABE_ABE_CDOFFS            0x0000
-#define OMAP4430_CM1_MPU_MPU_CDOFFS            0x0000
-#define OMAP4430_CM1_TESLA_TESLA_CDOFFS                0x0000
+#define OMAP4430_CM1_MPU_MPU_CDOFFS    0x0000
+#define OMAP4430_CM1_TESLA_TESLA_CDOFFS        0x0000
+#define OMAP4430_CM1_ABE_ABE_CDOFFS    0x0000
 
 /* CM1 */
 
@@ -82,8 +82,8 @@
 #define OMAP4430_CM_DIV_M7_DPLL_CORE                   OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0044)
 #define OMAP4_CM_SSC_DELTAMSTEP_DPLL_CORE_OFFSET       0x0048
 #define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_CORE           OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0048)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_CORE_OFFSET      0x004c
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_CORE          OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x004c)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_CORE_OFFSET       0x004c
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_CORE           OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x004c)
 #define OMAP4_CM_EMU_OVERRIDE_DPLL_CORE_OFFSET         0x0050
 #define OMAP4430_CM_EMU_OVERRIDE_DPLL_CORE             OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0050)
 #define OMAP4_CM_CLKMODE_DPLL_MPU_OFFSET               0x0060
@@ -98,8 +98,8 @@
 #define OMAP4430_CM_DIV_M2_DPLL_MPU                    OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0070)
 #define OMAP4_CM_SSC_DELTAMSTEP_DPLL_MPU_OFFSET                0x0088
 #define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_MPU            OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0088)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_MPU_OFFSET               0x008c
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_MPU           OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x008c)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_MPU_OFFSET                0x008c
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_MPU            OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x008c)
 #define OMAP4_CM_BYPCLK_DPLL_MPU_OFFSET                        0x009c
 #define OMAP4430_CM_BYPCLK_DPLL_MPU                    OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x009c)
 #define OMAP4_CM_CLKMODE_DPLL_IVA_OFFSET               0x00a0
 #define OMAP4430_CM_DIV_M5_DPLL_IVA                    OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00bc)
 #define OMAP4_CM_SSC_DELTAMSTEP_DPLL_IVA_OFFSET                0x00c8
 #define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_IVA            OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00c8)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_IVA_OFFSET               0x00cc
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_IVA           OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00cc)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_IVA_OFFSET                0x00cc
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_IVA            OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00cc)
 #define OMAP4_CM_BYPCLK_DPLL_IVA_OFFSET                        0x00dc
 #define OMAP4430_CM_BYPCLK_DPLL_IVA                    OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00dc)
 #define OMAP4_CM_CLKMODE_DPLL_ABE_OFFSET               0x00e0
 #define OMAP4430_CM_DIV_M3_DPLL_ABE                    OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00f4)
 #define OMAP4_CM_SSC_DELTAMSTEP_DPLL_ABE_OFFSET                0x0108
 #define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_ABE            OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0108)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_ABE_OFFSET               0x010c
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_ABE           OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x010c)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_ABE_OFFSET                0x010c
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_ABE            OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x010c)
 #define OMAP4_CM_CLKMODE_DPLL_DDRPHY_OFFSET            0x0120
 #define OMAP4430_CM_CLKMODE_DPLL_DDRPHY                        OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0120)
 #define OMAP4_CM_IDLEST_DPLL_DDRPHY_OFFSET             0x0124
 #define OMAP4430_CM_DIV_M6_DPLL_DDRPHY                 OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0140)
 #define OMAP4_CM_SSC_DELTAMSTEP_DPLL_DDRPHY_OFFSET     0x0148
 #define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_DDRPHY         OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0148)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_DDRPHY_OFFSET    0x014c
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_DDRPHY                OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x014c)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_DDRPHY_OFFSET     0x014c
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_DDRPHY         OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x014c)
 #define OMAP4_CM_SHADOW_FREQ_CONFIG1_OFFSET            0x0160
 #define OMAP4430_CM_SHADOW_FREQ_CONFIG1                        OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0160)
 #define OMAP4_CM_SHADOW_FREQ_CONFIG2_OFFSET            0x0164
 #define OMAP4_CM1_ABE_WDT3_CLKCTRL_OFFSET              0x0088
 #define OMAP4430_CM1_ABE_WDT3_CLKCTRL                  OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_INST, 0x0088)
 
-/* CM1.RESTORE_CM1 register offsets */
-#define OMAP4_CM_CLKSEL_CORE_RESTORE_OFFSET            0x0000
-#define OMAP4430_CM_CLKSEL_CORE_RESTORE                        OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0000)
-#define OMAP4_CM_DIV_M2_DPLL_CORE_RESTORE_OFFSET       0x0004
-#define OMAP4430_CM_DIV_M2_DPLL_CORE_RESTORE           OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0004)
-#define OMAP4_CM_DIV_M3_DPLL_CORE_RESTORE_OFFSET       0x0008
-#define OMAP4430_CM_DIV_M3_DPLL_CORE_RESTORE           OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0008)
-#define OMAP4_CM_DIV_M4_DPLL_CORE_RESTORE_OFFSET       0x000c
-#define OMAP4430_CM_DIV_M4_DPLL_CORE_RESTORE           OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x000c)
-#define OMAP4_CM_DIV_M5_DPLL_CORE_RESTORE_OFFSET       0x0010
-#define OMAP4430_CM_DIV_M5_DPLL_CORE_RESTORE           OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0010)
-#define OMAP4_CM_DIV_M6_DPLL_CORE_RESTORE_OFFSET       0x0014
-#define OMAP4430_CM_DIV_M6_DPLL_CORE_RESTORE           OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0014)
-#define OMAP4_CM_DIV_M7_DPLL_CORE_RESTORE_OFFSET       0x0018
-#define OMAP4430_CM_DIV_M7_DPLL_CORE_RESTORE           OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0018)
-#define OMAP4_CM_CLKSEL_DPLL_CORE_RESTORE_OFFSET       0x001c
-#define OMAP4430_CM_CLKSEL_DPLL_CORE_RESTORE           OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x001c)
-#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_CORE_RESTORE_OFFSET       0x0020
-#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_CORE_RESTORE   OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0020)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_CORE_RESTORE_OFFSET      0x0024
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_CORE_RESTORE  OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0024)
-#define OMAP4_CM_CLKMODE_DPLL_CORE_RESTORE_OFFSET      0x0028
-#define OMAP4430_CM_CLKMODE_DPLL_CORE_RESTORE          OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0028)
-#define OMAP4_CM_SHADOW_FREQ_CONFIG2_RESTORE_OFFSET    0x002c
-#define OMAP4430_CM_SHADOW_FREQ_CONFIG2_RESTORE                OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x002c)
-#define OMAP4_CM_SHADOW_FREQ_CONFIG1_RESTORE_OFFSET    0x0030
-#define OMAP4430_CM_SHADOW_FREQ_CONFIG1_RESTORE                OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0030)
-#define OMAP4_CM_AUTOIDLE_DPLL_CORE_RESTORE_OFFSET     0x0034
-#define OMAP4430_CM_AUTOIDLE_DPLL_CORE_RESTORE         OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0034)
-#define OMAP4_CM_MPU_CLKSTCTRL_RESTORE_OFFSET          0x0038
-#define OMAP4430_CM_MPU_CLKSTCTRL_RESTORE              OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0038)
-#define OMAP4_CM_CM1_PROFILING_CLKCTRL_RESTORE_OFFSET  0x003c
-#define OMAP4430_CM_CM1_PROFILING_CLKCTRL_RESTORE      OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x003c)
-#define OMAP4_CM_DYN_DEP_PRESCAL_RESTORE_OFFSET                0x0040
-#define OMAP4430_CM_DYN_DEP_PRESCAL_RESTORE            OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0040)
-
 /* Function prototypes */
 extern u32 omap4_cm1_read_inst_reg(s16 inst, u16 idx);
 extern void omap4_cm1_write_inst_reg(u32 val, s16 inst, u16 idx);
index aa47450..b9de72d 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * OMAP44xx CM2 instance offset macros
  *
- * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
  * Copyright (C) 2009-2010 Nokia Corporation
  *
  * Paul Walmsley (paul@pwsan.com)
@@ -40,9 +40,9 @@
 #define OMAP4430_CM2_CAM_INST          0x1000
 #define OMAP4430_CM2_DSS_INST          0x1100
 #define OMAP4430_CM2_GFX_INST          0x1200
-#define OMAP4430_CM2_L3INIT_INST               0x1300
+#define OMAP4430_CM2_L3INIT_INST       0x1300
 #define OMAP4430_CM2_L4PER_INST                0x1400
-#define OMAP4430_CM2_CEFUSE_INST               0x1600
+#define OMAP4430_CM2_CEFUSE_INST       0x1600
 #define OMAP4430_CM2_RESTORE_INST      0x1e00
 #define OMAP4430_CM2_INSTR_INST                0x1f00
 
@@ -65,7 +65,6 @@
 #define OMAP4430_CM2_L4PER_L4SEC_CDOFFS                0x0180
 #define OMAP4430_CM2_CEFUSE_CEFUSE_CDOFFS      0x0000
 
-
 /* CM2 */
 
 /* CM2.OCP_SOCKET_CM2 register offsets */
 #define OMAP4430_CM_DIV_M7_DPLL_PER                    OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0064)
 #define OMAP4_CM_SSC_DELTAMSTEP_DPLL_PER_OFFSET                0x0068
 #define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_PER            OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0068)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_PER_OFFSET               0x006c
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_PER           OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x006c)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_PER_OFFSET                0x006c
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_PER            OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x006c)
 #define OMAP4_CM_CLKMODE_DPLL_USB_OFFSET               0x0080
 #define OMAP4430_CM_CLKMODE_DPLL_USB                   OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0080)
 #define OMAP4_CM_IDLEST_DPLL_USB_OFFSET                        0x0084
 #define OMAP4430_CM_DIV_M2_DPLL_USB                    OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0090)
 #define OMAP4_CM_SSC_DELTAMSTEP_DPLL_USB_OFFSET                0x00a8
 #define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_USB            OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00a8)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_USB_OFFSET               0x00ac
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_USB           OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00ac)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_USB_OFFSET                0x00ac
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_USB            OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00ac)
 #define OMAP4_CM_CLKDCOLDO_DPLL_USB_OFFSET             0x00b4
 #define OMAP4430_CM_CLKDCOLDO_DPLL_USB                 OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00b4)
 #define OMAP4_CM_CLKMODE_DPLL_UNIPRO_OFFSET            0x00c0
 #define OMAP4430_CM_DIV_M2_DPLL_UNIPRO                 OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00d0)
 #define OMAP4_CM_SSC_DELTAMSTEP_DPLL_UNIPRO_OFFSET     0x00e8
 #define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_UNIPRO         OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00e8)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_UNIPRO_OFFSET    0x00ec
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_UNIPRO                OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00ec)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_UNIPRO_OFFSET     0x00ec
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_UNIPRO         OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00ec)
 
 /* CM2.ALWAYS_ON_CM2 register offsets */
 #define OMAP4_CM_ALWON_CLKSTCTRL_OFFSET                        0x0000
 #define OMAP4430_CM_D2D_DYNAMICDEP                     OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0508)
 #define OMAP4_CM_D2D_SAD2D_CLKCTRL_OFFSET              0x0520
 #define OMAP4430_CM_D2D_SAD2D_CLKCTRL                  OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0520)
-#define OMAP4_CM_D2D_INSTEM_ICR_CLKCTRL_OFFSET         0x0528
-#define OMAP4430_CM_D2D_INSTEM_ICR_CLKCTRL             OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0528)
+#define OMAP4_CM_D2D_MODEM_ICR_CLKCTRL_OFFSET          0x0528
+#define OMAP4430_CM_D2D_MODEM_ICR_CLKCTRL              OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0528)
 #define OMAP4_CM_D2D_SAD2D_FW_CLKCTRL_OFFSET           0x0530
 #define OMAP4430_CM_D2D_SAD2D_FW_CLKCTRL               OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0530)
 #define OMAP4_CM_L4CFG_CLKSTCTRL_OFFSET                        0x0600
 #define OMAP4_CM_CEFUSE_CEFUSE_CLKCTRL_OFFSET          0x0020
 #define OMAP4430_CM_CEFUSE_CEFUSE_CLKCTRL              OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CEFUSE_INST, 0x0020)
 
-/* CM2.RESTORE_CM2 register offsets */
-#define OMAP4_CM_L3_1_CLKSTCTRL_RESTORE_OFFSET         0x0000
-#define OMAP4430_CM_L3_1_CLKSTCTRL_RESTORE             OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0000)
-#define OMAP4_CM_L3_2_CLKSTCTRL_RESTORE_OFFSET         0x0004
-#define OMAP4430_CM_L3_2_CLKSTCTRL_RESTORE             OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0004)
-#define OMAP4_CM_L4CFG_CLKSTCTRL_RESTORE_OFFSET                0x0008
-#define OMAP4430_CM_L4CFG_CLKSTCTRL_RESTORE            OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0008)
-#define OMAP4_CM_MEMIF_CLKSTCTRL_RESTORE_OFFSET                0x000c
-#define OMAP4430_CM_MEMIF_CLKSTCTRL_RESTORE            OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x000c)
-#define OMAP4_CM_L4PER_CLKSTCTRL_RESTORE_OFFSET                0x0010
-#define OMAP4430_CM_L4PER_CLKSTCTRL_RESTORE            OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0010)
-#define OMAP4_CM_L3INIT_CLKSTCTRL_RESTORE_OFFSET       0x0014
-#define OMAP4430_CM_L3INIT_CLKSTCTRL_RESTORE           OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0014)
-#define OMAP4_CM_L3INSTR_L3_3_CLKCTRL_RESTORE_OFFSET   0x0018
-#define OMAP4430_CM_L3INSTR_L3_3_CLKCTRL_RESTORE       OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0018)
-#define OMAP4_CM_L3INSTR_L3_INSTR_CLKCTRL_RESTORE_OFFSET       0x001c
-#define OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL_RESTORE   OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x001c)
-#define OMAP4_CM_L3INSTR_OCP_WP1_CLKCTRL_RESTORE_OFFSET        0x0020
-#define OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL_RESTORE    OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0020)
-#define OMAP4_CM_CM2_PROFILING_CLKCTRL_RESTORE_OFFSET  0x0024
-#define OMAP4430_CM_CM2_PROFILING_CLKCTRL_RESTORE      OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0024)
-#define OMAP4_CM_D2D_STATICDEP_RESTORE_OFFSET          0x0028
-#define OMAP4430_CM_D2D_STATICDEP_RESTORE              OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0028)
-#define OMAP4_CM_L3_1_DYNAMICDEP_RESTORE_OFFSET                0x002c
-#define OMAP4430_CM_L3_1_DYNAMICDEP_RESTORE            OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x002c)
-#define OMAP4_CM_L3_2_DYNAMICDEP_RESTORE_OFFSET                0x0030
-#define OMAP4430_CM_L3_2_DYNAMICDEP_RESTORE            OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0030)
-#define OMAP4_CM_D2D_DYNAMICDEP_RESTORE_OFFSET         0x0034
-#define OMAP4430_CM_D2D_DYNAMICDEP_RESTORE             OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0034)
-#define OMAP4_CM_L4CFG_DYNAMICDEP_RESTORE_OFFSET       0x0038
-#define OMAP4430_CM_L4CFG_DYNAMICDEP_RESTORE           OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0038)
-#define OMAP4_CM_L4PER_DYNAMICDEP_RESTORE_OFFSET       0x003c
-#define OMAP4430_CM_L4PER_DYNAMICDEP_RESTORE           OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x003c)
-#define OMAP4_CM_L4PER_GPIO2_CLKCTRL_RESTORE_OFFSET    0x0040
-#define OMAP4430_CM_L4PER_GPIO2_CLKCTRL_RESTORE                OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0040)
-#define OMAP4_CM_L4PER_GPIO3_CLKCTRL_RESTORE_OFFSET    0x0044
-#define OMAP4430_CM_L4PER_GPIO3_CLKCTRL_RESTORE                OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0044)
-#define OMAP4_CM_L4PER_GPIO4_CLKCTRL_RESTORE_OFFSET    0x0048
-#define OMAP4430_CM_L4PER_GPIO4_CLKCTRL_RESTORE                OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0048)
-#define OMAP4_CM_L4PER_GPIO5_CLKCTRL_RESTORE_OFFSET    0x004c
-#define OMAP4430_CM_L4PER_GPIO5_CLKCTRL_RESTORE                OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x004c)
-#define OMAP4_CM_L4PER_GPIO6_CLKCTRL_RESTORE_OFFSET    0x0050
-#define OMAP4430_CM_L4PER_GPIO6_CLKCTRL_RESTORE                OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0050)
-#define OMAP4_CM_L3INIT_USB_HOST_CLKCTRL_RESTORE_OFFSET        0x0054
-#define OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL_RESTORE    OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0054)
-#define OMAP4_CM_L3INIT_USB_TLL_CLKCTRL_RESTORE_OFFSET 0x0058
-#define OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL_RESTORE     OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0058)
-#define OMAP4_CM_SDMA_STATICDEP_RESTORE_OFFSET         0x005c
-#define OMAP4430_CM_SDMA_STATICDEP_RESTORE             OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x005c)
-
 /* Function prototypes */
 extern u32 omap4_cm2_read_inst_reg(s16 inst, u16 idx);
 extern void omap4_cm2_write_inst_reg(u32 val, s16 inst, u16 idx);
index 94ccf46..bcb0c58 100644 (file)
  *
  */
 
-#include <linux/i2c.h>
-#include <linux/i2c/twl.h>
-
 #include <linux/gpio.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/ads7846.h>
 
-#include <plat/i2c.h>
 #include <plat/mcspi.h>
 #include <plat/nand.h>
 
 #include "common-board-devices.h"
 
-static struct i2c_board_info __initdata pmic_i2c_board_info = {
-       .addr           = 0x48,
-       .flags          = I2C_CLIENT_WAKE,
-};
-
-void __init omap_pmic_init(int bus, u32 clkrate,
-                          const char *pmic_type, int pmic_irq,
-                          struct twl4030_platform_data *pmic_data)
-{
-       strncpy(pmic_i2c_board_info.type, pmic_type,
-               sizeof(pmic_i2c_board_info.type));
-       pmic_i2c_board_info.irq = pmic_irq;
-       pmic_i2c_board_info.platform_data = pmic_data;
-
-       omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1);
-}
-
 #if defined(CONFIG_TOUCHSCREEN_ADS7846) || \
        defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
 static struct omap2_mcspi_device_config ads7846_mcspi_config = {
@@ -115,9 +94,7 @@ void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
 #endif
 
 #if defined(CONFIG_MTD_NAND_OMAP2) || defined(CONFIG_MTD_NAND_OMAP2_MODULE)
-static struct omap_nand_platform_data nand_data = {
-       .dma_channel    = -1,           /* disable DMA in OMAP NAND driver */
-};
+static struct omap_nand_platform_data nand_data;
 
 void __init omap_nand_flash_init(int options, struct mtd_partition *parts,
                                 int nr_parts)
@@ -148,7 +125,7 @@ void __init omap_nand_flash_init(int options, struct mtd_partition *parts,
                nand_data.cs = nandcs;
                nand_data.parts = parts;
                nand_data.nr_parts = nr_parts;
-               nand_data.options = options;
+               nand_data.devsize = options;
 
                printk(KERN_INFO "Registering NAND on CS%d\n", nandcs);
                if (gpmc_nand_init(&nand_data) < 0)
index 6797190..a0b4a42 100644 (file)
@@ -1,33 +1,11 @@
 #ifndef __OMAP_COMMON_BOARD_DEVICES__
 #define __OMAP_COMMON_BOARD_DEVICES__
 
+#include "twl-common.h"
+
 #define NAND_BLOCK_SIZE        SZ_128K
 
-struct twl4030_platform_data;
 struct mtd_partition;
-
-void omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq,
-                   struct twl4030_platform_data *pmic_data);
-
-static inline void omap2_pmic_init(const char *pmic_type,
-                                  struct twl4030_platform_data *pmic_data)
-{
-       omap_pmic_init(2, 2600, pmic_type, INT_24XX_SYS_NIRQ, pmic_data);
-}
-
-static inline void omap3_pmic_init(const char *pmic_type,
-                                  struct twl4030_platform_data *pmic_data)
-{
-       omap_pmic_init(1, 2600, pmic_type, INT_34XX_SYS_NIRQ, pmic_data);
-}
-
-static inline void omap4_pmic_init(const char *pmic_type,
-                                  struct twl4030_platform_data *pmic_data)
-{
-       /* Phoenix Audio IC needs I2C1 to start with 400 KHz or less */
-       omap_pmic_init(1, 400, pmic_type, OMAP44XX_IRQ_SYS_1N, pmic_data);
-}
-
 struct ads7846_platform_data;
 
 void omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
index c1791d0..8ad210b 100644 (file)
@@ -20,8 +20,6 @@
 #include <plat/board.h>
 #include <plat/gpmc.h>
 
-static struct omap_nand_platform_data *gpmc_nand_data;
-
 static struct resource gpmc_nand_resource = {
        .flags          = IORESOURCE_MEM,
 };
@@ -33,7 +31,7 @@ static struct platform_device gpmc_nand_device = {
        .resource       = &gpmc_nand_resource,
 };
 
-static int omap2_nand_gpmc_retime(void)
+static int omap2_nand_gpmc_retime(struct omap_nand_platform_data *gpmc_nand_data)
 {
        struct gpmc_timings t;
        int err;
@@ -83,13 +81,11 @@ static int omap2_nand_gpmc_retime(void)
        return 0;
 }
 
-int __init gpmc_nand_init(struct omap_nand_platform_data *_nand_data)
+int __init gpmc_nand_init(struct omap_nand_platform_data *gpmc_nand_data)
 {
        int err = 0;
        struct device *dev = &gpmc_nand_device.dev;
 
-       gpmc_nand_data = _nand_data;
-       gpmc_nand_data->nand_setup = omap2_nand_gpmc_retime;
        gpmc_nand_device.dev.platform_data = gpmc_nand_data;
 
        err = gpmc_cs_request(gpmc_nand_data->cs, NAND_IO_SIZE,
@@ -100,7 +96,7 @@ int __init gpmc_nand_init(struct omap_nand_platform_data *_nand_data)
        }
 
         /* Set timings in GPMC */
-       err = omap2_nand_gpmc_retime();
+       err = omap2_nand_gpmc_retime(gpmc_nand_data);
        if (err < 0) {
                dev_err(dev, "Unable to set gpmc timings: %d\n", err);
                return err;
index 441e79d..2ce1ce6 100644 (file)
@@ -333,23 +333,9 @@ static int _set_hwmod_postsetup_state(struct omap_hwmod *oh, void *data)
        return omap_hwmod_set_postsetup_state(oh, *(u8 *)data);
 }
 
+/* See irq.c, omap4-common.c and entry-macro.S */
 void __iomem *omap_irq_base;
 
-/*
- * Initialize asm_irq_base for entry-macro.S
- */
-static inline void omap_irq_base_init(void)
-{
-       if (cpu_is_omap24xx())
-               omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP24XX_IC_BASE);
-       else if (cpu_is_omap34xx())
-               omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP34XX_IC_BASE);
-       else if (cpu_is_omap44xx())
-               omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP44XX_GIC_CPU_BASE);
-       else
-               pr_err("Could not initialize omap_irq_base\n");
-}
-
 void __init omap2_init_common_infrastructure(void)
 {
        u8 postsetup_state;
@@ -422,7 +408,6 @@ void __init omap2_init_common_devices(struct omap_sdrc_params *sdrc_cs0,
                _omap2_init_reprogram_sdrc();
        }
 
-       omap_irq_base_init();
 }
 
 /*
index 3af2b7a..3a12f75 100644 (file)
@@ -141,25 +141,20 @@ omap_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
                                IRQ_NOREQUEST | IRQ_NOPROBE, 0);
 }
 
-void __init omap_init_irq(void)
+static void __init omap_init_irq(u32 base, int nr_irqs)
 {
        unsigned long nr_of_irqs = 0;
        unsigned int nr_banks = 0;
        int i, j;
 
+       omap_irq_base = ioremap(base, SZ_4K);
+       if (WARN_ON(!omap_irq_base))
+               return;
+
        for (i = 0; i < ARRAY_SIZE(irq_banks); i++) {
-               unsigned long base = 0;
                struct omap_irq_bank *bank = irq_banks + i;
 
-               if (cpu_is_omap24xx())
-                       base = OMAP24XX_IC_BASE;
-               else if (cpu_is_omap34xx())
-                       base = OMAP34XX_IC_BASE;
-
-               BUG_ON(!base);
-
-               if (cpu_is_ti816x())
-                       bank->nr_irqs = 128;
+               bank->nr_irqs = nr_irqs;
 
                /* Static mapping, never released */
                bank->base_reg = ioremap(base, SZ_4K);
@@ -181,6 +176,21 @@ void __init omap_init_irq(void)
               nr_of_irqs, nr_banks, nr_banks > 1 ? "s" : "");
 }
 
+void __init omap2_init_irq(void)
+{
+       omap_init_irq(OMAP24XX_IC_BASE, 96);
+}
+
+void __init omap3_init_irq(void)
+{
+       omap_init_irq(OMAP34XX_IC_BASE, 96);
+}
+
+void __init ti816x_init_irq(void)
+{
+       omap_init_irq(OMAP34XX_IC_BASE, 128);
+}
+
 #ifdef CONFIG_ARCH_OMAP3
 static struct omap3_intc_regs intc_context[ARRAY_SIZE(irq_banks)];
 
index 9ef8c29..35ac3e5 100644 (file)
@@ -19,6 +19,8 @@
 #include <asm/hardware/gic.h>
 #include <asm/hardware/cache-l2x0.h>
 
+#include <plat/irqs.h>
+
 #include <mach/hardware.h>
 #include <mach/omap4-common.h>
 
@@ -31,17 +33,15 @@ void __iomem *gic_dist_base_addr;
 
 void __init gic_init_irq(void)
 {
-       void __iomem *gic_cpu_base;
-
        /* Static mapping, never released */
        gic_dist_base_addr = ioremap(OMAP44XX_GIC_DIST_BASE, SZ_4K);
        BUG_ON(!gic_dist_base_addr);
 
        /* Static mapping, never released */
-       gic_cpu_base = ioremap(OMAP44XX_GIC_CPU_BASE, SZ_512);
-       BUG_ON(!gic_cpu_base);
+       omap_irq_base = ioremap(OMAP44XX_GIC_CPU_BASE, SZ_512);
+       BUG_ON(!omap_irq_base);
 
-       gic_init(0, 29, gic_dist_base_addr, gic_cpu_base);
+       gic_init(0, 29, gic_dist_base_addr, omap_irq_base);
 }
 
 #ifdef CONFIG_CACHE_L2X0
index 293fa6c..7d242c9 100644 (file)
@@ -2,6 +2,7 @@
  * omap_hwmod implementation for OMAP2/3/4
  *
  * Copyright (C) 2009-2011 Nokia Corporation
+ * Copyright (C) 2011 Texas Instruments, Inc.
  *
  * Paul Walmsley, Benoît Cousson, Kevin Hilman
  *
@@ -387,11 +388,10 @@ static int _set_module_autoidle(struct omap_hwmod *oh, u8 autoidle,
  */
 static int _enable_wakeup(struct omap_hwmod *oh, u32 *v)
 {
-       u32 wakeup_mask;
-
        if (!oh->class->sysc ||
            !((oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP) ||
-             (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)))
+             (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP) ||
+             (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP)))
                return -EINVAL;
 
        if (!oh->class->sysc->sysc_fields) {
@@ -399,12 +399,13 @@ static int _enable_wakeup(struct omap_hwmod *oh, u32 *v)
                return -EINVAL;
        }
 
-       wakeup_mask = (0x1 << oh->class->sysc->sysc_fields->enwkup_shift);
-
-       *v |= wakeup_mask;
+       if (oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP)
+               *v |= 0x1 << oh->class->sysc->sysc_fields->enwkup_shift;
 
        if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)
                _set_slave_idlemode(oh, HWMOD_IDLEMODE_SMART_WKUP, v);
+       if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP)
+               _set_master_standbymode(oh, HWMOD_IDLEMODE_SMART_WKUP, v);
 
        /* XXX test pwrdm_get_wken for this hwmod's subsystem */
 
@@ -422,11 +423,10 @@ static int _enable_wakeup(struct omap_hwmod *oh, u32 *v)
  */
 static int _disable_wakeup(struct omap_hwmod *oh, u32 *v)
 {
-       u32 wakeup_mask;
-
        if (!oh->class->sysc ||
            !((oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP) ||
-             (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)))
+             (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP) ||
+             (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP)))
                return -EINVAL;
 
        if (!oh->class->sysc->sysc_fields) {
@@ -434,12 +434,13 @@ static int _disable_wakeup(struct omap_hwmod *oh, u32 *v)
                return -EINVAL;
        }
 
-       wakeup_mask = (0x1 << oh->class->sysc->sysc_fields->enwkup_shift);
-
-       *v &= ~wakeup_mask;
+       if (oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP)
+               *v &= ~(0x1 << oh->class->sysc->sysc_fields->enwkup_shift);
 
        if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)
                _set_slave_idlemode(oh, HWMOD_IDLEMODE_SMART, v);
+       if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP)
+               _set_master_standbymode(oh, HWMOD_IDLEMODE_SMART_WKUP, v);
 
        /* XXX test pwrdm_get_wken for this hwmod's subsystem */
 
@@ -677,6 +678,75 @@ static void _disable_optional_clocks(struct omap_hwmod *oh)
                }
 }
 
+/**
+ * _count_mpu_irqs - count the number of MPU IRQ lines associated with @oh
+ * @oh: struct omap_hwmod *oh
+ *
+ * Count and return the number of MPU IRQs associated with the hwmod
+ * @oh.  Used to allocate struct resource data.  Returns 0 if @oh is
+ * NULL.
+ */
+static int _count_mpu_irqs(struct omap_hwmod *oh)
+{
+       struct omap_hwmod_irq_info *ohii;
+       int i = 0;
+
+       if (!oh || !oh->mpu_irqs)
+               return 0;
+
+       do {
+               ohii = &oh->mpu_irqs[i++];
+       } while (ohii->irq != -1);
+
+       return i;
+}
+
+/**
+ * _count_sdma_reqs - count the number of SDMA request lines associated with @oh
+ * @oh: struct omap_hwmod *oh
+ *
+ * Count and return the number of SDMA request lines associated with
+ * the hwmod @oh.  Used to allocate struct resource data.  Returns 0
+ * if @oh is NULL.
+ */
+static int _count_sdma_reqs(struct omap_hwmod *oh)
+{
+       struct omap_hwmod_dma_info *ohdi;
+       int i = 0;
+
+       if (!oh || !oh->sdma_reqs)
+               return 0;
+
+       do {
+               ohdi = &oh->sdma_reqs[i++];
+       } while (ohdi->dma_req != -1);
+
+       return i;
+}
+
+/**
+ * _count_ocp_if_addr_spaces - count the number of address space entries for @oh
+ * @oh: struct omap_hwmod *oh
+ *
+ * Count and return the number of address space ranges associated with
+ * the hwmod @oh.  Used to allocate struct resource data.  Returns 0
+ * if @oh is NULL.
+ */
+static int _count_ocp_if_addr_spaces(struct omap_hwmod_ocp_if *os)
+{
+       struct omap_hwmod_addr_space *mem;
+       int i = 0;
+
+       if (!os || !os->addr)
+               return 0;
+
+       do {
+               mem = &os->addr[i++];
+       } while (mem->pa_start != mem->pa_end);
+
+       return i;
+}
+
 /**
  * _find_mpu_port_index - find hwmod OCP slave port ID intended for MPU use
  * @oh: struct omap_hwmod *
@@ -722,8 +792,7 @@ static void __iomem * __init _find_mpu_rt_base(struct omap_hwmod *oh, u8 index)
 {
        struct omap_hwmod_ocp_if *os;
        struct omap_hwmod_addr_space *mem;
-       int i;
-       int found = 0;
+       int i = 0, found = 0;
        void __iomem *va_start;
 
        if (!oh || oh->slaves_cnt == 0)
@@ -731,12 +800,14 @@ static void __iomem * __init _find_mpu_rt_base(struct omap_hwmod *oh, u8 index)
 
        os = oh->slaves[index];
 
-       for (i = 0, mem = os->addr; i < os->addr_cnt; i++, mem++) {
-               if (mem->flags & ADDR_TYPE_RT) {
+       if (!os->addr)
+               return NULL;
+
+       do {
+               mem = &os->addr[i++];
+               if (mem->flags & ADDR_TYPE_RT)
                        found = 1;
-                       break;
-               }
-       }
+       } while (!found && mem->pa_start != mem->pa_end);
 
        if (found) {
                va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start);
@@ -781,8 +852,16 @@ static void _enable_sysc(struct omap_hwmod *oh)
        }
 
        if (sf & SYSC_HAS_MIDLEMODE) {
-               idlemode = (oh->flags & HWMOD_SWSUP_MSTANDBY) ?
-                       HWMOD_IDLEMODE_NO : HWMOD_IDLEMODE_SMART;
+               if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
+                       idlemode = HWMOD_IDLEMODE_NO;
+               } else {
+                       if (sf & SYSC_HAS_ENAWAKEUP)
+                               _enable_wakeup(oh, &v);
+                       if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP)
+                               idlemode = HWMOD_IDLEMODE_SMART_WKUP;
+                       else
+                               idlemode = HWMOD_IDLEMODE_SMART;
+               }
                _set_master_standbymode(oh, idlemode, &v);
        }
 
@@ -840,8 +919,16 @@ static void _idle_sysc(struct omap_hwmod *oh)
        }
 
        if (sf & SYSC_HAS_MIDLEMODE) {
-               idlemode = (oh->flags & HWMOD_SWSUP_MSTANDBY) ?
-                       HWMOD_IDLEMODE_FORCE : HWMOD_IDLEMODE_SMART;
+               if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
+                       idlemode = HWMOD_IDLEMODE_FORCE;
+               } else {
+                       if (sf & SYSC_HAS_ENAWAKEUP)
+                               _enable_wakeup(oh, &v);
+                       if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP)
+                               idlemode = HWMOD_IDLEMODE_SMART_WKUP;
+                       else
+                               idlemode = HWMOD_IDLEMODE_SMART;
+               }
                _set_master_standbymode(oh, idlemode, &v);
        }
 
@@ -928,6 +1015,8 @@ static int _init_clocks(struct omap_hwmod *oh, void *data)
 
        if (!ret)
                oh->_state = _HWMOD_STATE_CLKS_INITED;
+       else
+               pr_warning("omap_hwmod: %s: cannot _init_clocks\n", oh->name);
 
        return ret;
 }
@@ -1224,6 +1313,8 @@ static int _enable(struct omap_hwmod *oh)
 {
        int r;
 
+       pr_debug("omap_hwmod: %s: enabling\n", oh->name);
+
        if (oh->_state != _HWMOD_STATE_INITIALIZED &&
            oh->_state != _HWMOD_STATE_IDLE &&
            oh->_state != _HWMOD_STATE_DISABLED) {
@@ -1232,17 +1323,6 @@ static int _enable(struct omap_hwmod *oh)
                return -EINVAL;
        }
 
-       pr_debug("omap_hwmod: %s: enabling\n", oh->name);
-
-       /*
-        * If an IP contains only one HW reset line, then de-assert it in order
-        * to allow to enable the clocks. Otherwise the PRCM will return
-        * Intransition status, and the init will failed.
-        */
-       if ((oh->_state == _HWMOD_STATE_INITIALIZED ||
-            oh->_state == _HWMOD_STATE_DISABLED) && oh->rst_lines_cnt == 1)
-               _deassert_hardreset(oh, oh->rst_lines[0].name);
-
        /* Mux pins for device runtime if populated */
        if (oh->mux && (!oh->mux->enabled ||
                        ((oh->_state == _HWMOD_STATE_IDLE) &&
@@ -1252,20 +1332,31 @@ static int _enable(struct omap_hwmod *oh)
        _add_initiator_dep(oh, mpu_oh);
        _enable_clocks(oh);
 
-       r = _wait_target_ready(oh);
-       if (!r) {
-               oh->_state = _HWMOD_STATE_ENABLED;
+       /*
+        * If an IP contains only one HW reset line, then de-assert it in order
+        * to allow the module state transition. Otherwise the PRCM will return
+        * Intransition status, and the init will failed.
+        */
+       if ((oh->_state == _HWMOD_STATE_INITIALIZED ||
+            oh->_state == _HWMOD_STATE_DISABLED) && oh->rst_lines_cnt == 1)
+               _deassert_hardreset(oh, oh->rst_lines[0].name);
 
-               /* Access the sysconfig only if the target is ready */
-               if (oh->class->sysc) {
-                       if (!(oh->_int_flags & _HWMOD_SYSCONFIG_LOADED))
-                               _update_sysc_cache(oh);
-                       _enable_sysc(oh);
-               }
-       } else {
-               _disable_clocks(oh);
+       r = _wait_target_ready(oh);
+       if (r) {
                pr_debug("omap_hwmod: %s: _wait_target_ready: %d\n",
                         oh->name, r);
+               _disable_clocks(oh);
+
+               return r;
+       }
+
+       oh->_state = _HWMOD_STATE_ENABLED;
+
+       /* Access the sysconfig only if the target is ready */
+       if (oh->class->sysc) {
+               if (!(oh->_int_flags & _HWMOD_SYSCONFIG_LOADED))
+                       _update_sysc_cache(oh);
+               _enable_sysc(oh);
        }
 
        return r;
@@ -1281,14 +1372,14 @@ static int _enable(struct omap_hwmod *oh)
  */
 static int _idle(struct omap_hwmod *oh)
 {
+       pr_debug("omap_hwmod: %s: idling\n", oh->name);
+
        if (oh->_state != _HWMOD_STATE_ENABLED) {
                WARN(1, "omap_hwmod: %s: idle state can only be entered from "
                     "enabled state\n", oh->name);
                return -EINVAL;
        }
 
-       pr_debug("omap_hwmod: %s: idling\n", oh->name);
-
        if (oh->class->sysc)
                _idle_sysc(oh);
        _del_initiator_dep(oh, mpu_oh);
@@ -1374,15 +1465,11 @@ static int _shutdown(struct omap_hwmod *oh)
                }
        }
 
-       if (oh->class->sysc)
+       if (oh->class->sysc) {
+               if (oh->_state == _HWMOD_STATE_IDLE)
+                       _enable(oh);
                _shutdown_sysc(oh);
-
-       /*
-        * If an IP contains only one HW reset line, then assert it
-        * before disabling the clocks and shutting down the IP.
-        */
-       if (oh->rst_lines_cnt == 1)
-               _assert_hardreset(oh, oh->rst_lines[0].name);
+       }
 
        /* clocks and deps are already disabled in idle */
        if (oh->_state == _HWMOD_STATE_ENABLED) {
@@ -1392,6 +1479,13 @@ static int _shutdown(struct omap_hwmod *oh)
        }
        /* XXX Should this code also force-disable the optional clocks? */
 
+       /*
+        * If an IP contains only one HW reset line, then assert it
+        * after disabling the clocks and before shutting down the IP.
+        */
+       if (oh->rst_lines_cnt == 1)
+               _assert_hardreset(oh, oh->rst_lines[0].name);
+
        /* Mux pins to safe mode or use populated off mode values */
        if (oh->mux)
                omap_hwmod_mux(oh->mux, _HWMOD_STATE_DISABLED);
@@ -1685,9 +1779,6 @@ static int __init _populate_mpu_rt_base(struct omap_hwmod *oh, void *data)
                return 0;
 
        oh->_mpu_rt_va = _find_mpu_rt_base(oh, oh->_mpu_port_index);
-       if (!oh->_mpu_rt_va)
-               pr_warning("omap_hwmod: %s found no _mpu_rt_va for %s\n",
-                               __func__, oh->name);
 
        return 0;
 }
@@ -1939,10 +2030,10 @@ int omap_hwmod_count_resources(struct omap_hwmod *oh)
 {
        int ret, i;
 
-       ret = oh->mpu_irqs_cnt + oh->sdma_reqs_cnt;
+       ret = _count_mpu_irqs(oh) + _count_sdma_reqs(oh);
 
        for (i = 0; i < oh->slaves_cnt; i++)
-               ret += oh->slaves[i]->addr_cnt;
+               ret += _count_ocp_if_addr_spaces(oh->slaves[i]);
 
        return ret;
 }
@@ -1959,12 +2050,13 @@ int omap_hwmod_count_resources(struct omap_hwmod *oh)
  */
 int omap_hwmod_fill_resources(struct omap_hwmod *oh, struct resource *res)
 {
-       int i, j;
+       int i, j, mpu_irqs_cnt, sdma_reqs_cnt;
        int r = 0;
 
        /* For each IRQ, DMA, memory area, fill in array.*/
 
-       for (i = 0; i < oh->mpu_irqs_cnt; i++) {
+       mpu_irqs_cnt = _count_mpu_irqs(oh);
+       for (i = 0; i < mpu_irqs_cnt; i++) {
                (res + r)->name = (oh->mpu_irqs + i)->name;
                (res + r)->start = (oh->mpu_irqs + i)->irq;
                (res + r)->end = (oh->mpu_irqs + i)->irq;
@@ -1972,7 +2064,8 @@ int omap_hwmod_fill_resources(struct omap_hwmod *oh, struct resource *res)
                r++;
        }
 
-       for (i = 0; i < oh->sdma_reqs_cnt; i++) {
+       sdma_reqs_cnt = _count_sdma_reqs(oh);
+       for (i = 0; i < sdma_reqs_cnt; i++) {
                (res + r)->name = (oh->sdma_reqs + i)->name;
                (res + r)->start = (oh->sdma_reqs + i)->dma_req;
                (res + r)->end = (oh->sdma_reqs + i)->dma_req;
@@ -1982,10 +2075,12 @@ int omap_hwmod_fill_resources(struct omap_hwmod *oh, struct resource *res)
 
        for (i = 0; i < oh->slaves_cnt; i++) {
                struct omap_hwmod_ocp_if *os;
+               int addr_cnt;
 
                os = oh->slaves[i];
+               addr_cnt = _count_ocp_if_addr_spaces(os);
 
-               for (j = 0; j < os->addr_cnt; j++) {
+               for (j = 0; j < addr_cnt; j++) {
                        (res + r)->name = (os->addr + j)->name;
                        (res + r)->start = (os->addr + j)->pa_start;
                        (res + r)->end = (os->addr + j)->pa_end;
index c4d0ae8..f3901ab 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * omap_hwmod_2420_data.c - hardware modules present on the OMAP2420 chips
  *
- * Copyright (C) 2009-2010 Nokia Corporation
+ * Copyright (C) 2009-2011 Nokia Corporation
  * Paul Walmsley
  *
  * This program is free software; you can redistribute it and/or modify
@@ -114,38 +114,20 @@ static struct omap_hwmod omap2420_mcbsp1_hwmod;
 static struct omap_hwmod omap2420_mcbsp2_hwmod;
 
 /* l4 core -> mcspi1 interface */
-static struct omap_hwmod_addr_space omap2420_mcspi1_addr_space[] = {
-       {
-               .pa_start       = 0x48098000,
-               .pa_end         = 0x480980ff,
-               .flags          = ADDR_TYPE_RT,
-       },
-};
-
 static struct omap_hwmod_ocp_if omap2420_l4_core__mcspi1 = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_mcspi1_hwmod,
        .clk            = "mcspi1_ick",
-       .addr           = omap2420_mcspi1_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap2420_mcspi1_addr_space),
+       .addr           = omap2_mcspi1_addr_space,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
 /* l4 core -> mcspi2 interface */
-static struct omap_hwmod_addr_space omap2420_mcspi2_addr_space[] = {
-       {
-               .pa_start       = 0x4809a000,
-               .pa_end         = 0x4809a0ff,
-               .flags          = ADDR_TYPE_RT,
-       },
-};
-
 static struct omap_hwmod_ocp_if omap2420_l4_core__mcspi2 = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_mcspi2_hwmod,
        .clk            = "mcspi2_ick",
-       .addr           = omap2420_mcspi2_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap2420_mcspi2_addr_space),
+       .addr           = omap2_mcspi2_addr_space,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -157,95 +139,47 @@ static struct omap_hwmod_ocp_if omap2420_l4_core__l4_wkup = {
 };
 
 /* L4 CORE -> UART1 interface */
-static struct omap_hwmod_addr_space omap2420_uart1_addr_space[] = {
-       {
-               .pa_start       = OMAP2_UART1_BASE,
-               .pa_end         = OMAP2_UART1_BASE + SZ_8K - 1,
-               .flags          = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
-       },
-};
-
 static struct omap_hwmod_ocp_if omap2_l4_core__uart1 = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_uart1_hwmod,
        .clk            = "uart1_ick",
-       .addr           = omap2420_uart1_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap2420_uart1_addr_space),
+       .addr           = omap2xxx_uart1_addr_space,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
 /* L4 CORE -> UART2 interface */
-static struct omap_hwmod_addr_space omap2420_uart2_addr_space[] = {
-       {
-               .pa_start       = OMAP2_UART2_BASE,
-               .pa_end         = OMAP2_UART2_BASE + SZ_1K - 1,
-               .flags          = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
-       },
-};
-
 static struct omap_hwmod_ocp_if omap2_l4_core__uart2 = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_uart2_hwmod,
        .clk            = "uart2_ick",
-       .addr           = omap2420_uart2_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap2420_uart2_addr_space),
+       .addr           = omap2xxx_uart2_addr_space,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
 /* L4 PER -> UART3 interface */
-static struct omap_hwmod_addr_space omap2420_uart3_addr_space[] = {
-       {
-               .pa_start       = OMAP2_UART3_BASE,
-               .pa_end         = OMAP2_UART3_BASE + SZ_1K - 1,
-               .flags          = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
-       },
-};
-
 static struct omap_hwmod_ocp_if omap2_l4_core__uart3 = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_uart3_hwmod,
        .clk            = "uart3_ick",
-       .addr           = omap2420_uart3_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap2420_uart3_addr_space),
+       .addr           = omap2xxx_uart3_addr_space,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* I2C IP block address space length (in bytes) */
-#define OMAP2_I2C_AS_LEN               128
-
 /* L4 CORE -> I2C1 interface */
-static struct omap_hwmod_addr_space omap2420_i2c1_addr_space[] = {
-       {
-               .pa_start       = 0x48070000,
-               .pa_end         = 0x48070000 + OMAP2_I2C_AS_LEN - 1,
-               .flags          = ADDR_TYPE_RT,
-       },
-};
-
 static struct omap_hwmod_ocp_if omap2420_l4_core__i2c1 = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_i2c1_hwmod,
        .clk            = "i2c1_ick",
-       .addr           = omap2420_i2c1_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap2420_i2c1_addr_space),
+       .addr           = omap2_i2c1_addr_space,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
 /* L4 CORE -> I2C2 interface */
-static struct omap_hwmod_addr_space omap2420_i2c2_addr_space[] = {
-       {
-               .pa_start       = 0x48072000,
-               .pa_end         = 0x48072000 + OMAP2_I2C_AS_LEN - 1,
-               .flags          = ADDR_TYPE_RT,
-       },
-};
-
 static struct omap_hwmod_ocp_if omap2420_l4_core__i2c2 = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_i2c2_hwmod,
        .clk            = "i2c2_ick",
-       .addr           = omap2420_i2c2_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap2420_i2c2_addr_space),
+       .addr           = omap2_i2c2_addr_space,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -340,29 +274,8 @@ static struct omap_hwmod omap2420_iva_hwmod = {
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
 };
 
-/* Timer Common */
-static struct omap_hwmod_class_sysconfig omap2420_timer_sysc = {
-       .rev_offs       = 0x0000,
-       .sysc_offs      = 0x0010,
-       .syss_offs      = 0x0014,
-       .sysc_flags     = (SYSC_HAS_SIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
-                          SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
-                          SYSC_HAS_AUTOIDLE),
-       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2420_timer_hwmod_class = {
-       .name = "timer",
-       .sysc = &omap2420_timer_sysc,
-       .rev = OMAP_TIMER_IP_VERSION_1,
-};
-
 /* timer1 */
 static struct omap_hwmod omap2420_timer1_hwmod;
-static struct omap_hwmod_irq_info omap2420_timer1_mpu_irqs[] = {
-       { .irq = 37, },
-};
 
 static struct omap_hwmod_addr_space omap2420_timer1_addrs[] = {
        {
@@ -370,6 +283,7 @@ static struct omap_hwmod_addr_space omap2420_timer1_addrs[] = {
                .pa_end         = 0x48028000 + SZ_1K - 1,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_wkup -> timer1 */
@@ -378,7 +292,6 @@ static struct omap_hwmod_ocp_if omap2420_l4_wkup__timer1 = {
        .slave          = &omap2420_timer1_hwmod,
        .clk            = "gpt1_ick",
        .addr           = omap2420_timer1_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2420_timer1_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -390,8 +303,7 @@ static struct omap_hwmod_ocp_if *omap2420_timer1_slaves[] = {
 /* timer1 hwmod */
 static struct omap_hwmod omap2420_timer1_hwmod = {
        .name           = "timer1",
-       .mpu_irqs       = omap2420_timer1_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2420_timer1_mpu_irqs),
+       .mpu_irqs       = omap2_timer1_mpu_irqs,
        .main_clk       = "gpt1_fck",
        .prcm           = {
                .omap2 = {
@@ -404,31 +316,19 @@ static struct omap_hwmod omap2420_timer1_hwmod = {
        },
        .slaves         = omap2420_timer1_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_timer1_slaves),
-       .class          = &omap2420_timer_hwmod_class,
+       .class          = &omap2xxx_timer_hwmod_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
 };
 
 /* timer2 */
 static struct omap_hwmod omap2420_timer2_hwmod;
-static struct omap_hwmod_irq_info omap2420_timer2_mpu_irqs[] = {
-       { .irq = 38, },
-};
-
-static struct omap_hwmod_addr_space omap2420_timer2_addrs[] = {
-       {
-               .pa_start       = 0x4802a000,
-               .pa_end         = 0x4802a000 + SZ_1K - 1,
-               .flags          = ADDR_TYPE_RT
-       },
-};
 
 /* l4_core -> timer2 */
 static struct omap_hwmod_ocp_if omap2420_l4_core__timer2 = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_timer2_hwmod,
        .clk            = "gpt2_ick",
-       .addr           = omap2420_timer2_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2420_timer2_addrs),
+       .addr           = omap2xxx_timer2_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -440,8 +340,7 @@ static struct omap_hwmod_ocp_if *omap2420_timer2_slaves[] = {
 /* timer2 hwmod */
 static struct omap_hwmod omap2420_timer2_hwmod = {
        .name           = "timer2",
-       .mpu_irqs       = omap2420_timer2_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2420_timer2_mpu_irqs),
+       .mpu_irqs       = omap2_timer2_mpu_irqs,
        .main_clk       = "gpt2_fck",
        .prcm           = {
                .omap2 = {
@@ -454,31 +353,19 @@ static struct omap_hwmod omap2420_timer2_hwmod = {
        },
        .slaves         = omap2420_timer2_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_timer2_slaves),
-       .class          = &omap2420_timer_hwmod_class,
+       .class          = &omap2xxx_timer_hwmod_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
 };
 
 /* timer3 */
 static struct omap_hwmod omap2420_timer3_hwmod;
-static struct omap_hwmod_irq_info omap2420_timer3_mpu_irqs[] = {
-       { .irq = 39, },
-};
-
-static struct omap_hwmod_addr_space omap2420_timer3_addrs[] = {
-       {
-               .pa_start       = 0x48078000,
-               .pa_end         = 0x48078000 + SZ_1K - 1,
-               .flags          = ADDR_TYPE_RT
-       },
-};
 
 /* l4_core -> timer3 */
 static struct omap_hwmod_ocp_if omap2420_l4_core__timer3 = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_timer3_hwmod,
        .clk            = "gpt3_ick",
-       .addr           = omap2420_timer3_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2420_timer3_addrs),
+       .addr           = omap2xxx_timer3_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -490,8 +377,7 @@ static struct omap_hwmod_ocp_if *omap2420_timer3_slaves[] = {
 /* timer3 hwmod */
 static struct omap_hwmod omap2420_timer3_hwmod = {
        .name           = "timer3",
-       .mpu_irqs       = omap2420_timer3_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2420_timer3_mpu_irqs),
+       .mpu_irqs       = omap2_timer3_mpu_irqs,
        .main_clk       = "gpt3_fck",
        .prcm           = {
                .omap2 = {
@@ -504,31 +390,19 @@ static struct omap_hwmod omap2420_timer3_hwmod = {
        },
        .slaves         = omap2420_timer3_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_timer3_slaves),
-       .class          = &omap2420_timer_hwmod_class,
+       .class          = &omap2xxx_timer_hwmod_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
 };
 
 /* timer4 */
 static struct omap_hwmod omap2420_timer4_hwmod;
-static struct omap_hwmod_irq_info omap2420_timer4_mpu_irqs[] = {
-       { .irq = 40, },
-};
-
-static struct omap_hwmod_addr_space omap2420_timer4_addrs[] = {
-       {
-               .pa_start       = 0x4807a000,
-               .pa_end         = 0x4807a000 + SZ_1K - 1,
-               .flags          = ADDR_TYPE_RT
-       },
-};
 
 /* l4_core -> timer4 */
 static struct omap_hwmod_ocp_if omap2420_l4_core__timer4 = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_timer4_hwmod,
        .clk            = "gpt4_ick",
-       .addr           = omap2420_timer4_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2420_timer4_addrs),
+       .addr           = omap2xxx_timer4_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -540,8 +414,7 @@ static struct omap_hwmod_ocp_if *omap2420_timer4_slaves[] = {
 /* timer4 hwmod */
 static struct omap_hwmod omap2420_timer4_hwmod = {
        .name           = "timer4",
-       .mpu_irqs       = omap2420_timer4_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2420_timer4_mpu_irqs),
+       .mpu_irqs       = omap2_timer4_mpu_irqs,
        .main_clk       = "gpt4_fck",
        .prcm           = {
                .omap2 = {
@@ -554,31 +427,19 @@ static struct omap_hwmod omap2420_timer4_hwmod = {
        },
        .slaves         = omap2420_timer4_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_timer4_slaves),
-       .class          = &omap2420_timer_hwmod_class,
+       .class          = &omap2xxx_timer_hwmod_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
 };
 
 /* timer5 */
 static struct omap_hwmod omap2420_timer5_hwmod;
-static struct omap_hwmod_irq_info omap2420_timer5_mpu_irqs[] = {
-       { .irq = 41, },
-};
-
-static struct omap_hwmod_addr_space omap2420_timer5_addrs[] = {
-       {
-               .pa_start       = 0x4807c000,
-               .pa_end         = 0x4807c000 + SZ_1K - 1,
-               .flags          = ADDR_TYPE_RT
-       },
-};
 
 /* l4_core -> timer5 */
 static struct omap_hwmod_ocp_if omap2420_l4_core__timer5 = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_timer5_hwmod,
        .clk            = "gpt5_ick",
-       .addr           = omap2420_timer5_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2420_timer5_addrs),
+       .addr           = omap2xxx_timer5_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -590,8 +451,7 @@ static struct omap_hwmod_ocp_if *omap2420_timer5_slaves[] = {
 /* timer5 hwmod */
 static struct omap_hwmod omap2420_timer5_hwmod = {
        .name           = "timer5",
-       .mpu_irqs       = omap2420_timer5_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2420_timer5_mpu_irqs),
+       .mpu_irqs       = omap2_timer5_mpu_irqs,
        .main_clk       = "gpt5_fck",
        .prcm           = {
                .omap2 = {
@@ -604,32 +464,20 @@ static struct omap_hwmod omap2420_timer5_hwmod = {
        },
        .slaves         = omap2420_timer5_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_timer5_slaves),
-       .class          = &omap2420_timer_hwmod_class,
+       .class          = &omap2xxx_timer_hwmod_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
 };
 
 
 /* timer6 */
 static struct omap_hwmod omap2420_timer6_hwmod;
-static struct omap_hwmod_irq_info omap2420_timer6_mpu_irqs[] = {
-       { .irq = 42, },
-};
-
-static struct omap_hwmod_addr_space omap2420_timer6_addrs[] = {
-       {
-               .pa_start       = 0x4807e000,
-               .pa_end         = 0x4807e000 + SZ_1K - 1,
-               .flags          = ADDR_TYPE_RT
-       },
-};
 
 /* l4_core -> timer6 */
 static struct omap_hwmod_ocp_if omap2420_l4_core__timer6 = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_timer6_hwmod,
        .clk            = "gpt6_ick",
-       .addr           = omap2420_timer6_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2420_timer6_addrs),
+       .addr           = omap2xxx_timer6_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -641,8 +489,7 @@ static struct omap_hwmod_ocp_if *omap2420_timer6_slaves[] = {
 /* timer6 hwmod */
 static struct omap_hwmod omap2420_timer6_hwmod = {
        .name           = "timer6",
-       .mpu_irqs       = omap2420_timer6_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2420_timer6_mpu_irqs),
+       .mpu_irqs       = omap2_timer6_mpu_irqs,
        .main_clk       = "gpt6_fck",
        .prcm           = {
                .omap2 = {
@@ -655,31 +502,19 @@ static struct omap_hwmod omap2420_timer6_hwmod = {
        },
        .slaves         = omap2420_timer6_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_timer6_slaves),
-       .class          = &omap2420_timer_hwmod_class,
+       .class          = &omap2xxx_timer_hwmod_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
 };
 
 /* timer7 */
 static struct omap_hwmod omap2420_timer7_hwmod;
-static struct omap_hwmod_irq_info omap2420_timer7_mpu_irqs[] = {
-       { .irq = 43, },
-};
-
-static struct omap_hwmod_addr_space omap2420_timer7_addrs[] = {
-       {
-               .pa_start       = 0x48080000,
-               .pa_end         = 0x48080000 + SZ_1K - 1,
-               .flags          = ADDR_TYPE_RT
-       },
-};
 
 /* l4_core -> timer7 */
 static struct omap_hwmod_ocp_if omap2420_l4_core__timer7 = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_timer7_hwmod,
        .clk            = "gpt7_ick",
-       .addr           = omap2420_timer7_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2420_timer7_addrs),
+       .addr           = omap2xxx_timer7_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -691,8 +526,7 @@ static struct omap_hwmod_ocp_if *omap2420_timer7_slaves[] = {
 /* timer7 hwmod */
 static struct omap_hwmod omap2420_timer7_hwmod = {
        .name           = "timer7",
-       .mpu_irqs       = omap2420_timer7_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2420_timer7_mpu_irqs),
+       .mpu_irqs       = omap2_timer7_mpu_irqs,
        .main_clk       = "gpt7_fck",
        .prcm           = {
                .omap2 = {
@@ -705,31 +539,19 @@ static struct omap_hwmod omap2420_timer7_hwmod = {
        },
        .slaves         = omap2420_timer7_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_timer7_slaves),
-       .class          = &omap2420_timer_hwmod_class,
+       .class          = &omap2xxx_timer_hwmod_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
 };
 
 /* timer8 */
 static struct omap_hwmod omap2420_timer8_hwmod;
-static struct omap_hwmod_irq_info omap2420_timer8_mpu_irqs[] = {
-       { .irq = 44, },
-};
-
-static struct omap_hwmod_addr_space omap2420_timer8_addrs[] = {
-       {
-               .pa_start       = 0x48082000,
-               .pa_end         = 0x48082000 + SZ_1K - 1,
-               .flags          = ADDR_TYPE_RT
-       },
-};
 
 /* l4_core -> timer8 */
 static struct omap_hwmod_ocp_if omap2420_l4_core__timer8 = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_timer8_hwmod,
        .clk            = "gpt8_ick",
-       .addr           = omap2420_timer8_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2420_timer8_addrs),
+       .addr           = omap2xxx_timer8_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -741,8 +563,7 @@ static struct omap_hwmod_ocp_if *omap2420_timer8_slaves[] = {
 /* timer8 hwmod */
 static struct omap_hwmod omap2420_timer8_hwmod = {
        .name           = "timer8",
-       .mpu_irqs       = omap2420_timer8_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2420_timer8_mpu_irqs),
+       .mpu_irqs       = omap2_timer8_mpu_irqs,
        .main_clk       = "gpt8_fck",
        .prcm           = {
                .omap2 = {
@@ -755,31 +576,19 @@ static struct omap_hwmod omap2420_timer8_hwmod = {
        },
        .slaves         = omap2420_timer8_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_timer8_slaves),
-       .class          = &omap2420_timer_hwmod_class,
+       .class          = &omap2xxx_timer_hwmod_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
 };
 
 /* timer9 */
 static struct omap_hwmod omap2420_timer9_hwmod;
-static struct omap_hwmod_irq_info omap2420_timer9_mpu_irqs[] = {
-       { .irq = 45, },
-};
-
-static struct omap_hwmod_addr_space omap2420_timer9_addrs[] = {
-       {
-               .pa_start       = 0x48084000,
-               .pa_end         = 0x48084000 + SZ_1K - 1,
-               .flags          = ADDR_TYPE_RT
-       },
-};
 
 /* l4_core -> timer9 */
 static struct omap_hwmod_ocp_if omap2420_l4_core__timer9 = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_timer9_hwmod,
        .clk            = "gpt9_ick",
-       .addr           = omap2420_timer9_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2420_timer9_addrs),
+       .addr           = omap2xxx_timer9_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -791,8 +600,7 @@ static struct omap_hwmod_ocp_if *omap2420_timer9_slaves[] = {
 /* timer9 hwmod */
 static struct omap_hwmod omap2420_timer9_hwmod = {
        .name           = "timer9",
-       .mpu_irqs       = omap2420_timer9_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2420_timer9_mpu_irqs),
+       .mpu_irqs       = omap2_timer9_mpu_irqs,
        .main_clk       = "gpt9_fck",
        .prcm           = {
                .omap2 = {
@@ -805,31 +613,19 @@ static struct omap_hwmod omap2420_timer9_hwmod = {
        },
        .slaves         = omap2420_timer9_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_timer9_slaves),
-       .class          = &omap2420_timer_hwmod_class,
+       .class          = &omap2xxx_timer_hwmod_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
 };
 
 /* timer10 */
 static struct omap_hwmod omap2420_timer10_hwmod;
-static struct omap_hwmod_irq_info omap2420_timer10_mpu_irqs[] = {
-       { .irq = 46, },
-};
-
-static struct omap_hwmod_addr_space omap2420_timer10_addrs[] = {
-       {
-               .pa_start       = 0x48086000,
-               .pa_end         = 0x48086000 + SZ_1K - 1,
-               .flags          = ADDR_TYPE_RT
-       },
-};
 
 /* l4_core -> timer10 */
 static struct omap_hwmod_ocp_if omap2420_l4_core__timer10 = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_timer10_hwmod,
        .clk            = "gpt10_ick",
-       .addr           = omap2420_timer10_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2420_timer10_addrs),
+       .addr           = omap2_timer10_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -841,8 +637,7 @@ static struct omap_hwmod_ocp_if *omap2420_timer10_slaves[] = {
 /* timer10 hwmod */
 static struct omap_hwmod omap2420_timer10_hwmod = {
        .name           = "timer10",
-       .mpu_irqs       = omap2420_timer10_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2420_timer10_mpu_irqs),
+       .mpu_irqs       = omap2_timer10_mpu_irqs,
        .main_clk       = "gpt10_fck",
        .prcm           = {
                .omap2 = {
@@ -855,31 +650,19 @@ static struct omap_hwmod omap2420_timer10_hwmod = {
        },
        .slaves         = omap2420_timer10_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_timer10_slaves),
-       .class          = &omap2420_timer_hwmod_class,
+       .class          = &omap2xxx_timer_hwmod_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
 };
 
 /* timer11 */
 static struct omap_hwmod omap2420_timer11_hwmod;
-static struct omap_hwmod_irq_info omap2420_timer11_mpu_irqs[] = {
-       { .irq = 47, },
-};
-
-static struct omap_hwmod_addr_space omap2420_timer11_addrs[] = {
-       {
-               .pa_start       = 0x48088000,
-               .pa_end         = 0x48088000 + SZ_1K - 1,
-               .flags          = ADDR_TYPE_RT
-       },
-};
 
 /* l4_core -> timer11 */
 static struct omap_hwmod_ocp_if omap2420_l4_core__timer11 = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_timer11_hwmod,
        .clk            = "gpt11_ick",
-       .addr           = omap2420_timer11_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2420_timer11_addrs),
+       .addr           = omap2_timer11_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -891,8 +674,7 @@ static struct omap_hwmod_ocp_if *omap2420_timer11_slaves[] = {
 /* timer11 hwmod */
 static struct omap_hwmod omap2420_timer11_hwmod = {
        .name           = "timer11",
-       .mpu_irqs       = omap2420_timer11_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2420_timer11_mpu_irqs),
+       .mpu_irqs       = omap2_timer11_mpu_irqs,
        .main_clk       = "gpt11_fck",
        .prcm           = {
                .omap2 = {
@@ -905,31 +687,19 @@ static struct omap_hwmod omap2420_timer11_hwmod = {
        },
        .slaves         = omap2420_timer11_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_timer11_slaves),
-       .class          = &omap2420_timer_hwmod_class,
+       .class          = &omap2xxx_timer_hwmod_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
 };
 
 /* timer12 */
 static struct omap_hwmod omap2420_timer12_hwmod;
-static struct omap_hwmod_irq_info omap2420_timer12_mpu_irqs[] = {
-       { .irq = 48, },
-};
-
-static struct omap_hwmod_addr_space omap2420_timer12_addrs[] = {
-       {
-               .pa_start       = 0x4808a000,
-               .pa_end         = 0x4808a000 + SZ_1K - 1,
-               .flags          = ADDR_TYPE_RT
-       },
-};
 
 /* l4_core -> timer12 */
 static struct omap_hwmod_ocp_if omap2420_l4_core__timer12 = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_timer12_hwmod,
        .clk            = "gpt12_ick",
-       .addr           = omap2420_timer12_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2420_timer12_addrs),
+       .addr           = omap2xxx_timer12_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -941,8 +711,7 @@ static struct omap_hwmod_ocp_if *omap2420_timer12_slaves[] = {
 /* timer12 hwmod */
 static struct omap_hwmod omap2420_timer12_hwmod = {
        .name           = "timer12",
-       .mpu_irqs       = omap2420_timer12_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2420_timer12_mpu_irqs),
+       .mpu_irqs       = omap2xxx_timer12_mpu_irqs,
        .main_clk       = "gpt12_fck",
        .prcm           = {
                .omap2 = {
@@ -955,7 +724,7 @@ static struct omap_hwmod omap2420_timer12_hwmod = {
        },
        .slaves         = omap2420_timer12_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_timer12_slaves),
-       .class          = &omap2420_timer_hwmod_class,
+       .class          = &omap2xxx_timer_hwmod_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
 };
 
@@ -966,6 +735,7 @@ static struct omap_hwmod_addr_space omap2420_wd_timer2_addrs[] = {
                .pa_end         = 0x4802207f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap2420_l4_wkup__wd_timer2 = {
@@ -973,31 +743,9 @@ static struct omap_hwmod_ocp_if omap2420_l4_wkup__wd_timer2 = {
        .slave          = &omap2420_wd_timer2_hwmod,
        .clk            = "mpu_wdt_ick",
        .addr           = omap2420_wd_timer2_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2420_wd_timer2_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/*
- * 'wd_timer' class
- * 32-bit watchdog upward counter that generates a pulse on the reset pin on
- * overflow condition
- */
-
-static struct omap_hwmod_class_sysconfig omap2420_wd_timer_sysc = {
-       .rev_offs       = 0x0000,
-       .sysc_offs      = 0x0010,
-       .syss_offs      = 0x0014,
-       .sysc_flags     = (SYSC_HAS_EMUFREE | SYSC_HAS_SOFTRESET |
-                          SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2420_wd_timer_hwmod_class = {
-       .name           = "wd_timer",
-       .sysc           = &omap2420_wd_timer_sysc,
-       .pre_shutdown   = &omap2_wd_timer_disable
-};
-
 /* wd_timer2 */
 static struct omap_hwmod_ocp_if *omap2420_wd_timer2_slaves[] = {
        &omap2420_l4_wkup__wd_timer2,
@@ -1005,7 +753,7 @@ static struct omap_hwmod_ocp_if *omap2420_wd_timer2_slaves[] = {
 
 static struct omap_hwmod omap2420_wd_timer2_hwmod = {
        .name           = "wd_timer2",
-       .class          = &omap2420_wd_timer_hwmod_class,
+       .class          = &omap2xxx_wd_timer_hwmod_class,
        .main_clk       = "mpu_wdt_fck",
        .prcm           = {
                .omap2 = {
@@ -1021,45 +769,16 @@ static struct omap_hwmod omap2420_wd_timer2_hwmod = {
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
 };
 
-/* UART */
-
-static struct omap_hwmod_class_sysconfig uart_sysc = {
-       .rev_offs       = 0x50,
-       .sysc_offs      = 0x54,
-       .syss_offs      = 0x58,
-       .sysc_flags     = (SYSC_HAS_SIDLEMODE |
-                          SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
-                          SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
-       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class uart_class = {
-       .name = "uart",
-       .sysc = &uart_sysc,
-};
-
 /* UART1 */
 
-static struct omap_hwmod_irq_info uart1_mpu_irqs[] = {
-       { .irq = INT_24XX_UART1_IRQ, },
-};
-
-static struct omap_hwmod_dma_info uart1_sdma_reqs[] = {
-       { .name = "rx", .dma_req = OMAP24XX_DMA_UART1_RX, },
-       { .name = "tx", .dma_req = OMAP24XX_DMA_UART1_TX, },
-};
-
 static struct omap_hwmod_ocp_if *omap2420_uart1_slaves[] = {
        &omap2_l4_core__uart1,
 };
 
 static struct omap_hwmod omap2420_uart1_hwmod = {
        .name           = "uart1",
-       .mpu_irqs       = uart1_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(uart1_mpu_irqs),
-       .sdma_reqs      = uart1_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(uart1_sdma_reqs),
+       .mpu_irqs       = omap2_uart1_mpu_irqs,
+       .sdma_reqs      = omap2_uart1_sdma_reqs,
        .main_clk       = "uart1_fck",
        .prcm           = {
                .omap2 = {
@@ -1072,31 +791,20 @@ static struct omap_hwmod omap2420_uart1_hwmod = {
        },
        .slaves         = omap2420_uart1_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_uart1_slaves),
-       .class          = &uart_class,
+       .class          = &omap2_uart_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
 };
 
 /* UART2 */
 
-static struct omap_hwmod_irq_info uart2_mpu_irqs[] = {
-       { .irq = INT_24XX_UART2_IRQ, },
-};
-
-static struct omap_hwmod_dma_info uart2_sdma_reqs[] = {
-       { .name = "rx", .dma_req = OMAP24XX_DMA_UART2_RX, },
-       { .name = "tx", .dma_req = OMAP24XX_DMA_UART2_TX, },
-};
-
 static struct omap_hwmod_ocp_if *omap2420_uart2_slaves[] = {
        &omap2_l4_core__uart2,
 };
 
 static struct omap_hwmod omap2420_uart2_hwmod = {
        .name           = "uart2",
-       .mpu_irqs       = uart2_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(uart2_mpu_irqs),
-       .sdma_reqs      = uart2_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(uart2_sdma_reqs),
+       .mpu_irqs       = omap2_uart2_mpu_irqs,
+       .sdma_reqs      = omap2_uart2_sdma_reqs,
        .main_clk       = "uart2_fck",
        .prcm           = {
                .omap2 = {
@@ -1109,31 +817,20 @@ static struct omap_hwmod omap2420_uart2_hwmod = {
        },
        .slaves         = omap2420_uart2_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_uart2_slaves),
-       .class          = &uart_class,
+       .class          = &omap2_uart_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
 };
 
 /* UART3 */
 
-static struct omap_hwmod_irq_info uart3_mpu_irqs[] = {
-       { .irq = INT_24XX_UART3_IRQ, },
-};
-
-static struct omap_hwmod_dma_info uart3_sdma_reqs[] = {
-       { .name = "rx", .dma_req = OMAP24XX_DMA_UART3_RX, },
-       { .name = "tx", .dma_req = OMAP24XX_DMA_UART3_TX, },
-};
-
 static struct omap_hwmod_ocp_if *omap2420_uart3_slaves[] = {
        &omap2_l4_core__uart3,
 };
 
 static struct omap_hwmod omap2420_uart3_hwmod = {
        .name           = "uart3",
-       .mpu_irqs       = uart3_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(uart3_mpu_irqs),
-       .sdma_reqs      = uart3_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(uart3_sdma_reqs),
+       .mpu_irqs       = omap2_uart3_mpu_irqs,
+       .sdma_reqs      = omap2_uart3_sdma_reqs,
        .main_clk       = "uart3_fck",
        .prcm           = {
                .omap2 = {
@@ -1146,53 +843,22 @@ static struct omap_hwmod omap2420_uart3_hwmod = {
        },
        .slaves         = omap2420_uart3_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_uart3_slaves),
-       .class          = &uart_class,
+       .class          = &omap2_uart_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
 };
 
-/*
- * 'dss' class
- * display sub-system
- */
-
-static struct omap_hwmod_class_sysconfig omap2420_dss_sysc = {
-       .rev_offs       = 0x0000,
-       .sysc_offs      = 0x0010,
-       .syss_offs      = 0x0014,
-       .sysc_flags     = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2420_dss_hwmod_class = {
-       .name = "dss",
-       .sysc = &omap2420_dss_sysc,
-};
-
-static struct omap_hwmod_dma_info omap2420_dss_sdma_chs[] = {
-       { .name = "dispc", .dma_req = 5 },
-};
-
 /* dss */
 /* dss master ports */
 static struct omap_hwmod_ocp_if *omap2420_dss_masters[] = {
        &omap2420_dss__l3,
 };
 
-static struct omap_hwmod_addr_space omap2420_dss_addrs[] = {
-       {
-               .pa_start       = 0x48050000,
-               .pa_end         = 0x480503FF,
-               .flags          = ADDR_TYPE_RT
-       },
-};
-
 /* l4_core -> dss */
 static struct omap_hwmod_ocp_if omap2420_l4_core__dss = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_dss_core_hwmod,
        .clk            = "dss_ick",
-       .addr           = omap2420_dss_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2420_dss_addrs),
+       .addr           = omap2_dss_addrs,
        .fw = {
                .omap2 = {
                        .l4_fw_region  = OMAP2420_L4_CORE_FW_DSS_CORE_REGION,
@@ -1214,10 +880,9 @@ static struct omap_hwmod_opt_clk dss_opt_clks[] = {
 
 static struct omap_hwmod omap2420_dss_core_hwmod = {
        .name           = "dss_core",
-       .class          = &omap2420_dss_hwmod_class,
+       .class          = &omap2_dss_hwmod_class,
        .main_clk       = "dss1_fck", /* instead of dss_fck */
-       .sdma_reqs      = omap2420_dss_sdma_chs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap2420_dss_sdma_chs),
+       .sdma_reqs      = omap2xxx_dss_sdma_chs,
        .prcm           = {
                .omap2 = {
                        .prcm_reg_id = 1,
@@ -1237,46 +902,12 @@ static struct omap_hwmod omap2420_dss_core_hwmod = {
        .flags          = HWMOD_NO_IDLEST,
 };
 
-/*
- * 'dispc' class
- * display controller
- */
-
-static struct omap_hwmod_class_sysconfig omap2420_dispc_sysc = {
-       .rev_offs       = 0x0000,
-       .sysc_offs      = 0x0010,
-       .syss_offs      = 0x0014,
-       .sysc_flags     = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
-                          SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
-       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-                          MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2420_dispc_hwmod_class = {
-       .name = "dispc",
-       .sysc = &omap2420_dispc_sysc,
-};
-
-static struct omap_hwmod_irq_info omap2420_dispc_irqs[] = {
-       { .irq = 25 },
-};
-
-static struct omap_hwmod_addr_space omap2420_dss_dispc_addrs[] = {
-       {
-               .pa_start       = 0x48050400,
-               .pa_end         = 0x480507FF,
-               .flags          = ADDR_TYPE_RT
-       },
-};
-
 /* l4_core -> dss_dispc */
 static struct omap_hwmod_ocp_if omap2420_l4_core__dss_dispc = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_dss_dispc_hwmod,
        .clk            = "dss_ick",
-       .addr           = omap2420_dss_dispc_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2420_dss_dispc_addrs),
+       .addr           = omap2_dss_dispc_addrs,
        .fw = {
                .omap2 = {
                        .l4_fw_region  = OMAP2420_L4_CORE_FW_DSS_DISPC_REGION,
@@ -1293,9 +924,8 @@ static struct omap_hwmod_ocp_if *omap2420_dss_dispc_slaves[] = {
 
 static struct omap_hwmod omap2420_dss_dispc_hwmod = {
        .name           = "dss_dispc",
-       .class          = &omap2420_dispc_hwmod_class,
-       .mpu_irqs       = omap2420_dispc_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2420_dispc_irqs),
+       .class          = &omap2_dispc_hwmod_class,
+       .mpu_irqs       = omap2_dispc_irqs,
        .main_clk       = "dss1_fck",
        .prcm           = {
                .omap2 = {
@@ -1312,41 +942,12 @@ static struct omap_hwmod omap2420_dss_dispc_hwmod = {
        .flags          = HWMOD_NO_IDLEST,
 };
 
-/*
- * 'rfbi' class
- * remote frame buffer interface
- */
-
-static struct omap_hwmod_class_sysconfig omap2420_rfbi_sysc = {
-       .rev_offs       = 0x0000,
-       .sysc_offs      = 0x0010,
-       .syss_offs      = 0x0014,
-       .sysc_flags     = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
-                          SYSC_HAS_AUTOIDLE),
-       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2420_rfbi_hwmod_class = {
-       .name = "rfbi",
-       .sysc = &omap2420_rfbi_sysc,
-};
-
-static struct omap_hwmod_addr_space omap2420_dss_rfbi_addrs[] = {
-       {
-               .pa_start       = 0x48050800,
-               .pa_end         = 0x48050BFF,
-               .flags          = ADDR_TYPE_RT
-       },
-};
-
 /* l4_core -> dss_rfbi */
 static struct omap_hwmod_ocp_if omap2420_l4_core__dss_rfbi = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_dss_rfbi_hwmod,
        .clk            = "dss_ick",
-       .addr           = omap2420_dss_rfbi_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2420_dss_rfbi_addrs),
+       .addr           = omap2_dss_rfbi_addrs,
        .fw = {
                .omap2 = {
                        .l4_fw_region  = OMAP2420_L4_CORE_FW_DSS_CORE_REGION,
@@ -1363,7 +964,7 @@ static struct omap_hwmod_ocp_if *omap2420_dss_rfbi_slaves[] = {
 
 static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
        .name           = "dss_rfbi",
-       .class          = &omap2420_rfbi_hwmod_class,
+       .class          = &omap2_rfbi_hwmod_class,
        .main_clk       = "dss1_fck",
        .prcm           = {
                .omap2 = {
@@ -1378,31 +979,12 @@ static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
        .flags          = HWMOD_NO_IDLEST,
 };
 
-/*
- * 'venc' class
- * video encoder
- */
-
-static struct omap_hwmod_class omap2420_venc_hwmod_class = {
-       .name = "venc",
-};
-
-/* dss_venc */
-static struct omap_hwmod_addr_space omap2420_dss_venc_addrs[] = {
-       {
-               .pa_start       = 0x48050C00,
-               .pa_end         = 0x48050FFF,
-               .flags          = ADDR_TYPE_RT
-       },
-};
-
 /* l4_core -> dss_venc */
 static struct omap_hwmod_ocp_if omap2420_l4_core__dss_venc = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_dss_venc_hwmod,
        .clk            = "dss_54m_fck",
-       .addr           = omap2420_dss_venc_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2420_dss_venc_addrs),
+       .addr           = omap2_dss_venc_addrs,
        .fw = {
                .omap2 = {
                        .l4_fw_region  = OMAP2420_L4_CORE_FW_DSS_VENC_REGION,
@@ -1420,7 +1002,7 @@ static struct omap_hwmod_ocp_if *omap2420_dss_venc_slaves[] = {
 
 static struct omap_hwmod omap2420_dss_venc_hwmod = {
        .name           = "dss_venc",
-       .class          = &omap2420_venc_hwmod_class,
+       .class          = &omap2_venc_hwmod_class,
        .main_clk       = "dss1_fck",
        .prcm           = {
                .omap2 = {
@@ -1453,25 +1035,14 @@ static struct omap_i2c_dev_attr i2c_dev_attr;
 
 /* I2C1 */
 
-static struct omap_hwmod_irq_info i2c1_mpu_irqs[] = {
-       { .irq = INT_24XX_I2C1_IRQ, },
-};
-
-static struct omap_hwmod_dma_info i2c1_sdma_reqs[] = {
-       { .name = "tx", .dma_req = OMAP24XX_DMA_I2C1_TX },
-       { .name = "rx", .dma_req = OMAP24XX_DMA_I2C1_RX },
-};
-
 static struct omap_hwmod_ocp_if *omap2420_i2c1_slaves[] = {
        &omap2420_l4_core__i2c1,
 };
 
 static struct omap_hwmod omap2420_i2c1_hwmod = {
        .name           = "i2c1",
-       .mpu_irqs       = i2c1_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(i2c1_mpu_irqs),
-       .sdma_reqs      = i2c1_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(i2c1_sdma_reqs),
+       .mpu_irqs       = omap2_i2c1_mpu_irqs,
+       .sdma_reqs      = omap2_i2c1_sdma_reqs,
        .main_clk       = "i2c1_fck",
        .prcm           = {
                .omap2 = {
@@ -1492,25 +1063,14 @@ static struct omap_hwmod omap2420_i2c1_hwmod = {
 
 /* I2C2 */
 
-static struct omap_hwmod_irq_info i2c2_mpu_irqs[] = {
-       { .irq = INT_24XX_I2C2_IRQ, },
-};
-
-static struct omap_hwmod_dma_info i2c2_sdma_reqs[] = {
-       { .name = "tx", .dma_req = OMAP24XX_DMA_I2C2_TX },
-       { .name = "rx", .dma_req = OMAP24XX_DMA_I2C2_RX },
-};
-
 static struct omap_hwmod_ocp_if *omap2420_i2c2_slaves[] = {
        &omap2420_l4_core__i2c2,
 };
 
 static struct omap_hwmod omap2420_i2c2_hwmod = {
        .name           = "i2c2",
-       .mpu_irqs       = i2c2_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(i2c2_mpu_irqs),
-       .sdma_reqs      = i2c2_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(i2c2_sdma_reqs),
+       .mpu_irqs       = omap2_i2c2_mpu_irqs,
+       .sdma_reqs      = omap2_i2c2_sdma_reqs,
        .main_clk       = "i2c2_fck",
        .prcm           = {
                .omap2 = {
@@ -1536,6 +1096,7 @@ static struct omap_hwmod_addr_space omap2420_gpio1_addr_space[] = {
                .pa_end         = 0x480181ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap2420_l4_wkup__gpio1 = {
@@ -1543,7 +1104,6 @@ static struct omap_hwmod_ocp_if omap2420_l4_wkup__gpio1 = {
        .slave          = &omap2420_gpio1_hwmod,
        .clk            = "gpios_ick",
        .addr           = omap2420_gpio1_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap2420_gpio1_addr_space),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1554,6 +1114,7 @@ static struct omap_hwmod_addr_space omap2420_gpio2_addr_space[] = {
                .pa_end         = 0x4801a1ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap2420_l4_wkup__gpio2 = {
@@ -1561,7 +1122,6 @@ static struct omap_hwmod_ocp_if omap2420_l4_wkup__gpio2 = {
        .slave          = &omap2420_gpio2_hwmod,
        .clk            = "gpios_ick",
        .addr           = omap2420_gpio2_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap2420_gpio2_addr_space),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1572,6 +1132,7 @@ static struct omap_hwmod_addr_space omap2420_gpio3_addr_space[] = {
                .pa_end         = 0x4801c1ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap2420_l4_wkup__gpio3 = {
@@ -1579,7 +1140,6 @@ static struct omap_hwmod_ocp_if omap2420_l4_wkup__gpio3 = {
        .slave          = &omap2420_gpio3_hwmod,
        .clk            = "gpios_ick",
        .addr           = omap2420_gpio3_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap2420_gpio3_addr_space),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1590,6 +1150,7 @@ static struct omap_hwmod_addr_space omap2420_gpio4_addr_space[] = {
                .pa_end         = 0x4801e1ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap2420_l4_wkup__gpio4 = {
@@ -1597,7 +1158,6 @@ static struct omap_hwmod_ocp_if omap2420_l4_wkup__gpio4 = {
        .slave          = &omap2420_gpio4_hwmod,
        .clk            = "gpios_ick",
        .addr           = omap2420_gpio4_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap2420_gpio4_addr_space),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1607,32 +1167,7 @@ static struct omap_gpio_dev_attr gpio_dev_attr = {
        .dbck_flag = false,
 };
 
-static struct omap_hwmod_class_sysconfig omap242x_gpio_sysc = {
-       .rev_offs       = 0x0000,
-       .sysc_offs      = 0x0010,
-       .syss_offs      = 0x0014,
-       .sysc_flags     = (SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
-                          SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
-                          SYSS_HAS_RESET_STATUS),
-       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-/*
- * 'gpio' class
- * general purpose io module
- */
-static struct omap_hwmod_class omap242x_gpio_hwmod_class = {
-       .name = "gpio",
-       .sysc = &omap242x_gpio_sysc,
-       .rev = 0,
-};
-
 /* gpio1 */
-static struct omap_hwmod_irq_info omap242x_gpio1_irqs[] = {
-       { .irq = 29 }, /* INT_24XX_GPIO_BANK1 */
-};
-
 static struct omap_hwmod_ocp_if *omap2420_gpio1_slaves[] = {
        &omap2420_l4_wkup__gpio1,
 };
@@ -1640,8 +1175,7 @@ static struct omap_hwmod_ocp_if *omap2420_gpio1_slaves[] = {
 static struct omap_hwmod omap2420_gpio1_hwmod = {
        .name           = "gpio1",
        .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
-       .mpu_irqs       = omap242x_gpio1_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap242x_gpio1_irqs),
+       .mpu_irqs       = omap2_gpio1_irqs,
        .main_clk       = "gpios_fck",
        .prcm           = {
                .omap2 = {
@@ -1654,16 +1188,12 @@ static struct omap_hwmod omap2420_gpio1_hwmod = {
        },
        .slaves         = omap2420_gpio1_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_gpio1_slaves),
-       .class          = &omap242x_gpio_hwmod_class,
+       .class          = &omap2xxx_gpio_hwmod_class,
        .dev_attr       = &gpio_dev_attr,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
 };
 
 /* gpio2 */
-static struct omap_hwmod_irq_info omap242x_gpio2_irqs[] = {
-       { .irq = 30 }, /* INT_24XX_GPIO_BANK2 */
-};
-
 static struct omap_hwmod_ocp_if *omap2420_gpio2_slaves[] = {
        &omap2420_l4_wkup__gpio2,
 };
@@ -1671,8 +1201,7 @@ static struct omap_hwmod_ocp_if *omap2420_gpio2_slaves[] = {
 static struct omap_hwmod omap2420_gpio2_hwmod = {
        .name           = "gpio2",
        .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
-       .mpu_irqs       = omap242x_gpio2_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap242x_gpio2_irqs),
+       .mpu_irqs       = omap2_gpio2_irqs,
        .main_clk       = "gpios_fck",
        .prcm           = {
                .omap2 = {
@@ -1685,16 +1214,12 @@ static struct omap_hwmod omap2420_gpio2_hwmod = {
        },
        .slaves         = omap2420_gpio2_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_gpio2_slaves),
-       .class          = &omap242x_gpio_hwmod_class,
+       .class          = &omap2xxx_gpio_hwmod_class,
        .dev_attr       = &gpio_dev_attr,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
 };
 
 /* gpio3 */
-static struct omap_hwmod_irq_info omap242x_gpio3_irqs[] = {
-       { .irq = 31 }, /* INT_24XX_GPIO_BANK3 */
-};
-
 static struct omap_hwmod_ocp_if *omap2420_gpio3_slaves[] = {
        &omap2420_l4_wkup__gpio3,
 };
@@ -1702,8 +1227,7 @@ static struct omap_hwmod_ocp_if *omap2420_gpio3_slaves[] = {
 static struct omap_hwmod omap2420_gpio3_hwmod = {
        .name           = "gpio3",
        .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
-       .mpu_irqs       = omap242x_gpio3_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap242x_gpio3_irqs),
+       .mpu_irqs       = omap2_gpio3_irqs,
        .main_clk       = "gpios_fck",
        .prcm           = {
                .omap2 = {
@@ -1716,16 +1240,12 @@ static struct omap_hwmod omap2420_gpio3_hwmod = {
        },
        .slaves         = omap2420_gpio3_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_gpio3_slaves),
-       .class          = &omap242x_gpio_hwmod_class,
+       .class          = &omap2xxx_gpio_hwmod_class,
        .dev_attr       = &gpio_dev_attr,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
 };
 
 /* gpio4 */
-static struct omap_hwmod_irq_info omap242x_gpio4_irqs[] = {
-       { .irq = 32 }, /* INT_24XX_GPIO_BANK4 */
-};
-
 static struct omap_hwmod_ocp_if *omap2420_gpio4_slaves[] = {
        &omap2420_l4_wkup__gpio4,
 };
@@ -1733,8 +1253,7 @@ static struct omap_hwmod_ocp_if *omap2420_gpio4_slaves[] = {
 static struct omap_hwmod omap2420_gpio4_hwmod = {
        .name           = "gpio4",
        .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
-       .mpu_irqs       = omap242x_gpio4_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap242x_gpio4_irqs),
+       .mpu_irqs       = omap2_gpio4_irqs,
        .main_clk       = "gpios_fck",
        .prcm           = {
                .omap2 = {
@@ -1747,28 +1266,11 @@ static struct omap_hwmod omap2420_gpio4_hwmod = {
        },
        .slaves         = omap2420_gpio4_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_gpio4_slaves),
-       .class          = &omap242x_gpio_hwmod_class,
+       .class          = &omap2xxx_gpio_hwmod_class,
        .dev_attr       = &gpio_dev_attr,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
 };
 
-/* system dma */
-static struct omap_hwmod_class_sysconfig omap2420_dma_sysc = {
-       .rev_offs       = 0x0000,
-       .sysc_offs      = 0x002c,
-       .syss_offs      = 0x0028,
-       .sysc_flags     = (SYSC_HAS_SOFTRESET | SYSC_HAS_MIDLEMODE |
-                          SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_EMUFREE |
-                          SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
-       .idlemodes      = (MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2420_dma_hwmod_class = {
-       .name = "dma",
-       .sysc = &omap2420_dma_sysc,
-};
-
 /* dma attributes */
 static struct omap_dma_dev_attr dma_dev_attr = {
        .dev_caps  = RESERVE_CHANNEL | DMA_LINKED_LCH | GLOBAL_PRIORITY |
@@ -1776,21 +1278,6 @@ static struct omap_dma_dev_attr dma_dev_attr = {
        .lch_count = 32,
 };
 
-static struct omap_hwmod_irq_info omap2420_dma_system_irqs[] = {
-       { .name = "0", .irq = 12 }, /* INT_24XX_SDMA_IRQ0 */
-       { .name = "1", .irq = 13 }, /* INT_24XX_SDMA_IRQ1 */
-       { .name = "2", .irq = 14 }, /* INT_24XX_SDMA_IRQ2 */
-       { .name = "3", .irq = 15 }, /* INT_24XX_SDMA_IRQ3 */
-};
-
-static struct omap_hwmod_addr_space omap2420_dma_system_addrs[] = {
-       {
-               .pa_start       = 0x48056000,
-               .pa_end         = 0x48056fff,
-               .flags          = ADDR_TYPE_RT
-       },
-};
-
 /* dma_system -> L3 */
 static struct omap_hwmod_ocp_if omap2420_dma_system__l3 = {
        .master         = &omap2420_dma_system_hwmod,
@@ -1809,8 +1296,7 @@ static struct omap_hwmod_ocp_if omap2420_l4_core__dma_system = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_dma_system_hwmod,
        .clk            = "sdma_ick",
-       .addr           = omap2420_dma_system_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2420_dma_system_addrs),
+       .addr           = omap2_dma_system_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1821,9 +1307,8 @@ static struct omap_hwmod_ocp_if *omap2420_dma_system_slaves[] = {
 
 static struct omap_hwmod omap2420_dma_system_hwmod = {
        .name           = "dma",
-       .class          = &omap2420_dma_hwmod_class,
-       .mpu_irqs       = omap2420_dma_system_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2420_dma_system_irqs),
+       .class          = &omap2xxx_dma_hwmod_class,
+       .mpu_irqs       = omap2_dma_system_irqs,
        .main_clk       = "core_l3_ck",
        .slaves         = omap2420_dma_system_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_dma_system_slaves),
@@ -1834,48 +1319,19 @@ static struct omap_hwmod omap2420_dma_system_hwmod = {
        .flags          = HWMOD_NO_IDLEST,
 };
 
-/*
- * 'mailbox' class
- * mailbox module allowing communication between the on-chip processors
- * using a queued mailbox-interrupt mechanism.
- */
-
-static struct omap_hwmod_class_sysconfig omap2420_mailbox_sysc = {
-       .rev_offs       = 0x000,
-       .sysc_offs      = 0x010,
-       .syss_offs      = 0x014,
-       .sysc_flags     = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
-                          SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
-       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2420_mailbox_hwmod_class = {
-       .name = "mailbox",
-       .sysc = &omap2420_mailbox_sysc,
-};
-
 /* mailbox */
 static struct omap_hwmod omap2420_mailbox_hwmod;
 static struct omap_hwmod_irq_info omap2420_mailbox_irqs[] = {
        { .name = "dsp", .irq = 26 },
        { .name = "iva", .irq = 34 },
-};
-
-static struct omap_hwmod_addr_space omap2420_mailbox_addrs[] = {
-       {
-               .pa_start       = 0x48094000,
-               .pa_end         = 0x480941ff,
-               .flags          = ADDR_TYPE_RT,
-       },
+       { .irq = -1 }
 };
 
 /* l4_core -> mailbox */
 static struct omap_hwmod_ocp_if omap2420_l4_core__mailbox = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_mailbox_hwmod,
-       .addr           = omap2420_mailbox_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2420_mailbox_addrs),
+       .addr           = omap2_mailbox_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1886,9 +1342,8 @@ static struct omap_hwmod_ocp_if *omap2420_mailbox_slaves[] = {
 
 static struct omap_hwmod omap2420_mailbox_hwmod = {
        .name           = "mailbox",
-       .class          = &omap2420_mailbox_hwmod_class,
+       .class          = &omap2xxx_mailbox_hwmod_class,
        .mpu_irqs       = omap2420_mailbox_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2420_mailbox_irqs),
        .main_clk       = "mailboxes_ick",
        .prcm           = {
                .omap2 = {
@@ -1904,45 +1359,7 @@ static struct omap_hwmod omap2420_mailbox_hwmod = {
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
 };
 
-/*
- * 'mcspi' class
- * multichannel serial port interface (mcspi) / master/slave synchronous serial
- * bus
- */
-
-static struct omap_hwmod_class_sysconfig omap2420_mcspi_sysc = {
-       .rev_offs       = 0x0000,
-       .sysc_offs      = 0x0010,
-       .syss_offs      = 0x0014,
-       .sysc_flags     = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
-                               SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
-                               SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
-       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2420_mcspi_class = {
-       .name = "mcspi",
-       .sysc = &omap2420_mcspi_sysc,
-       .rev = OMAP2_MCSPI_REV,
-};
-
 /* mcspi1 */
-static struct omap_hwmod_irq_info omap2420_mcspi1_mpu_irqs[] = {
-       { .irq = 65 },
-};
-
-static struct omap_hwmod_dma_info omap2420_mcspi1_sdma_reqs[] = {
-       { .name = "tx0", .dma_req = 35 }, /* DMA_SPI1_TX0 */
-       { .name = "rx0", .dma_req = 36 }, /* DMA_SPI1_RX0 */
-       { .name = "tx1", .dma_req = 37 }, /* DMA_SPI1_TX1 */
-       { .name = "rx1", .dma_req = 38 }, /* DMA_SPI1_RX1 */
-       { .name = "tx2", .dma_req = 39 }, /* DMA_SPI1_TX2 */
-       { .name = "rx2", .dma_req = 40 }, /* DMA_SPI1_RX2 */
-       { .name = "tx3", .dma_req = 41 }, /* DMA_SPI1_TX3 */
-       { .name = "rx3", .dma_req = 42 }, /* DMA_SPI1_RX3 */
-};
-
 static struct omap_hwmod_ocp_if *omap2420_mcspi1_slaves[] = {
        &omap2420_l4_core__mcspi1,
 };
@@ -1953,10 +1370,8 @@ static struct omap2_mcspi_dev_attr omap_mcspi1_dev_attr = {
 
 static struct omap_hwmod omap2420_mcspi1_hwmod = {
        .name           = "mcspi1_hwmod",
-       .mpu_irqs       = omap2420_mcspi1_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2420_mcspi1_mpu_irqs),
-       .sdma_reqs      = omap2420_mcspi1_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap2420_mcspi1_sdma_reqs),
+       .mpu_irqs       = omap2_mcspi1_mpu_irqs,
+       .sdma_reqs      = omap2_mcspi1_sdma_reqs,
        .main_clk       = "mcspi1_fck",
        .prcm           = {
                .omap2 = {
@@ -1969,23 +1384,12 @@ static struct omap_hwmod omap2420_mcspi1_hwmod = {
        },
        .slaves         = omap2420_mcspi1_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_mcspi1_slaves),
-       .class          = &omap2420_mcspi_class,
-       .dev_attr       = &omap_mcspi1_dev_attr,
+       .class          = &omap2xxx_mcspi_class,
+       .dev_attr       = &omap_mcspi1_dev_attr,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
 };
 
 /* mcspi2 */
-static struct omap_hwmod_irq_info omap2420_mcspi2_mpu_irqs[] = {
-       { .irq = 66 },
-};
-
-static struct omap_hwmod_dma_info omap2420_mcspi2_sdma_reqs[] = {
-       { .name = "tx0", .dma_req = 43 }, /* DMA_SPI2_TX0 */
-       { .name = "rx0", .dma_req = 44 }, /* DMA_SPI2_RX0 */
-       { .name = "tx1", .dma_req = 45 }, /* DMA_SPI2_TX1 */
-       { .name = "rx1", .dma_req = 46 }, /* DMA_SPI2_RX1 */
-};
-
 static struct omap_hwmod_ocp_if *omap2420_mcspi2_slaves[] = {
        &omap2420_l4_core__mcspi2,
 };
@@ -1996,10 +1400,8 @@ static struct omap2_mcspi_dev_attr omap_mcspi2_dev_attr = {
 
 static struct omap_hwmod omap2420_mcspi2_hwmod = {
        .name           = "mcspi2_hwmod",
-       .mpu_irqs       = omap2420_mcspi2_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2420_mcspi2_mpu_irqs),
-       .sdma_reqs      = omap2420_mcspi2_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap2420_mcspi2_sdma_reqs),
+       .mpu_irqs       = omap2_mcspi2_mpu_irqs,
+       .sdma_reqs      = omap2_mcspi2_sdma_reqs,
        .main_clk       = "mcspi2_fck",
        .prcm           = {
                .omap2 = {
@@ -2012,8 +1414,8 @@ static struct omap_hwmod omap2420_mcspi2_hwmod = {
        },
        .slaves         = omap2420_mcspi2_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_mcspi2_slaves),
-       .class          = &omap2420_mcspi_class,
-       .dev_attr       = &omap_mcspi2_dev_attr,
+       .class          = &omap2xxx_mcspi_class,
+       .dev_attr       = &omap_mcspi2_dev_attr,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
 };
 
@@ -2030,20 +1432,7 @@ static struct omap_hwmod_class omap2420_mcbsp_hwmod_class = {
 static struct omap_hwmod_irq_info omap2420_mcbsp1_irqs[] = {
        { .name = "tx", .irq = 59 },
        { .name = "rx", .irq = 60 },
-};
-
-static struct omap_hwmod_dma_info omap2420_mcbsp1_sdma_chs[] = {
-       { .name = "rx", .dma_req = 32 },
-       { .name = "tx", .dma_req = 31 },
-};
-
-static struct omap_hwmod_addr_space omap2420_mcbsp1_addrs[] = {
-       {
-               .name           = "mpu",
-               .pa_start       = 0x48074000,
-               .pa_end         = 0x480740ff,
-               .flags          = ADDR_TYPE_RT
-       },
+       { .irq = -1 }
 };
 
 /* l4_core -> mcbsp1 */
@@ -2051,8 +1440,7 @@ static struct omap_hwmod_ocp_if omap2420_l4_core__mcbsp1 = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_mcbsp1_hwmod,
        .clk            = "mcbsp1_ick",
-       .addr           = omap2420_mcbsp1_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2420_mcbsp1_addrs),
+       .addr           = omap2_mcbsp1_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2065,9 +1453,7 @@ static struct omap_hwmod omap2420_mcbsp1_hwmod = {
        .name           = "mcbsp1",
        .class          = &omap2420_mcbsp_hwmod_class,
        .mpu_irqs       = omap2420_mcbsp1_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2420_mcbsp1_irqs),
-       .sdma_reqs      = omap2420_mcbsp1_sdma_chs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap2420_mcbsp1_sdma_chs),
+       .sdma_reqs      = omap2_mcbsp1_sdma_reqs,
        .main_clk       = "mcbsp1_fck",
        .prcm           = {
                .omap2 = {
@@ -2087,20 +1473,7 @@ static struct omap_hwmod omap2420_mcbsp1_hwmod = {
 static struct omap_hwmod_irq_info omap2420_mcbsp2_irqs[] = {
        { .name = "tx", .irq = 62 },
        { .name = "rx", .irq = 63 },
-};
-
-static struct omap_hwmod_dma_info omap2420_mcbsp2_sdma_chs[] = {
-       { .name = "rx", .dma_req = 34 },
-       { .name = "tx", .dma_req = 33 },
-};
-
-static struct omap_hwmod_addr_space omap2420_mcbsp2_addrs[] = {
-       {
-               .name           = "mpu",
-               .pa_start       = 0x48076000,
-               .pa_end         = 0x480760ff,
-               .flags          = ADDR_TYPE_RT
-       },
+       { .irq = -1 }
 };
 
 /* l4_core -> mcbsp2 */
@@ -2108,8 +1481,7 @@ static struct omap_hwmod_ocp_if omap2420_l4_core__mcbsp2 = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_mcbsp2_hwmod,
        .clk            = "mcbsp2_ick",
-       .addr           = omap2420_mcbsp2_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2420_mcbsp2_addrs),
+       .addr           = omap2xxx_mcbsp2_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2122,9 +1494,7 @@ static struct omap_hwmod omap2420_mcbsp2_hwmod = {
        .name           = "mcbsp2",
        .class          = &omap2420_mcbsp_hwmod_class,
        .mpu_irqs       = omap2420_mcbsp2_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2420_mcbsp2_irqs),
-       .sdma_reqs      = omap2420_mcbsp2_sdma_chs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap2420_mcbsp2_sdma_chs),
+       .sdma_reqs      = omap2_mcbsp2_sdma_reqs,
        .main_clk       = "mcbsp2_fck",
        .prcm           = {
                .omap2 = {
index 9682dd5..2a52f02 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * omap_hwmod_2430_data.c - hardware modules present on the OMAP2430 chips
  *
- * Copyright (C) 2009-2010 Nokia Corporation
+ * Copyright (C) 2009-2011 Nokia Corporation
  * Paul Walmsley
  *
  * This program is free software; you can redistribute it and/or modify
@@ -131,42 +131,21 @@ static struct omap_hwmod_ocp_if omap2430_usbhsotg__l3 = {
        .user           = OCP_USER_MPU,
 };
 
-/* I2C IP block address space length (in bytes) */
-#define OMAP2_I2C_AS_LEN               128
-
 /* L4 CORE -> I2C1 interface */
-static struct omap_hwmod_addr_space omap2430_i2c1_addr_space[] = {
-       {
-               .pa_start       = 0x48070000,
-               .pa_end         = 0x48070000 + OMAP2_I2C_AS_LEN - 1,
-               .flags          = ADDR_TYPE_RT,
-       },
-};
-
 static struct omap_hwmod_ocp_if omap2430_l4_core__i2c1 = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_i2c1_hwmod,
        .clk            = "i2c1_ick",
-       .addr           = omap2430_i2c1_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap2430_i2c1_addr_space),
+       .addr           = omap2_i2c1_addr_space,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
 /* L4 CORE -> I2C2 interface */
-static struct omap_hwmod_addr_space omap2430_i2c2_addr_space[] = {
-       {
-               .pa_start       = 0x48072000,
-               .pa_end         = 0x48072000 + OMAP2_I2C_AS_LEN - 1,
-               .flags          = ADDR_TYPE_RT,
-       },
-};
-
 static struct omap_hwmod_ocp_if omap2430_l4_core__i2c2 = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_i2c2_hwmod,
        .clk            = "i2c2_ick",
-       .addr           = omap2430_i2c2_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap2430_i2c2_addr_space),
+       .addr           = omap2_i2c2_addr_space,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -178,56 +157,29 @@ static struct omap_hwmod_ocp_if omap2430_l4_core__l4_wkup = {
 };
 
 /* L4 CORE -> UART1 interface */
-static struct omap_hwmod_addr_space omap2430_uart1_addr_space[] = {
-       {
-               .pa_start       = OMAP2_UART1_BASE,
-               .pa_end         = OMAP2_UART1_BASE + SZ_8K - 1,
-               .flags          = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
-       },
-};
-
 static struct omap_hwmod_ocp_if omap2_l4_core__uart1 = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_uart1_hwmod,
        .clk            = "uart1_ick",
-       .addr           = omap2430_uart1_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap2430_uart1_addr_space),
+       .addr           = omap2xxx_uart1_addr_space,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
 /* L4 CORE -> UART2 interface */
-static struct omap_hwmod_addr_space omap2430_uart2_addr_space[] = {
-       {
-               .pa_start       = OMAP2_UART2_BASE,
-               .pa_end         = OMAP2_UART2_BASE + SZ_1K - 1,
-               .flags          = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
-       },
-};
-
 static struct omap_hwmod_ocp_if omap2_l4_core__uart2 = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_uart2_hwmod,
        .clk            = "uart2_ick",
-       .addr           = omap2430_uart2_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap2430_uart2_addr_space),
+       .addr           = omap2xxx_uart2_addr_space,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
 /* L4 PER -> UART3 interface */
-static struct omap_hwmod_addr_space omap2430_uart3_addr_space[] = {
-       {
-               .pa_start       = OMAP2_UART3_BASE,
-               .pa_end         = OMAP2_UART3_BASE + SZ_1K - 1,
-               .flags          = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
-       },
-};
-
 static struct omap_hwmod_ocp_if omap2_l4_core__uart3 = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_uart3_hwmod,
        .clk            = "uart3_ick",
-       .addr           = omap2430_uart3_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap2430_uart3_addr_space),
+       .addr           = omap2xxx_uart3_addr_space,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -248,7 +200,6 @@ static struct omap_hwmod_ocp_if omap2430_l4_core__usbhsotg = {
        .slave          = &omap2430_usbhsotg_hwmod,
        .clk            = "usb_l4_ick",
        .addr           = omap2430_usbhsotg_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2430_usbhsotg_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -261,38 +212,20 @@ static struct omap_hwmod_ocp_if *omap2430_usbhsotg_slaves[] = {
 };
 
 /* L4 CORE -> MMC1 interface */
-static struct omap_hwmod_addr_space omap2430_mmc1_addr_space[] = {
-       {
-               .pa_start       = 0x4809c000,
-               .pa_end         = 0x4809c1ff,
-               .flags          = ADDR_TYPE_RT,
-       },
-};
-
 static struct omap_hwmod_ocp_if omap2430_l4_core__mmc1 = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_mmc1_hwmod,
        .clk            = "mmchs1_ick",
        .addr           = omap2430_mmc1_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap2430_mmc1_addr_space),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
 /* L4 CORE -> MMC2 interface */
-static struct omap_hwmod_addr_space omap2430_mmc2_addr_space[] = {
-       {
-               .pa_start       = 0x480b4000,
-               .pa_end         = 0x480b41ff,
-               .flags          = ADDR_TYPE_RT,
-       },
-};
-
 static struct omap_hwmod_ocp_if omap2430_l4_core__mmc2 = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_mmc2_hwmod,
-       .addr           = omap2430_mmc2_addr_space,
        .clk            = "mmchs2_ick",
-       .addr_cnt       = ARRAY_SIZE(omap2430_mmc2_addr_space),
+       .addr           = omap2430_mmc2_addr_space,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -333,56 +266,29 @@ static struct omap_hwmod_ocp_if *omap2430_l4_wkup_masters[] = {
 };
 
 /* l4 core -> mcspi1 interface */
-static struct omap_hwmod_addr_space omap2430_mcspi1_addr_space[] = {
-       {
-               .pa_start       = 0x48098000,
-               .pa_end         = 0x480980ff,
-               .flags          = ADDR_TYPE_RT,
-       },
-};
-
 static struct omap_hwmod_ocp_if omap2430_l4_core__mcspi1 = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_mcspi1_hwmod,
        .clk            = "mcspi1_ick",
-       .addr           = omap2430_mcspi1_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap2430_mcspi1_addr_space),
+       .addr           = omap2_mcspi1_addr_space,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
 /* l4 core -> mcspi2 interface */
-static struct omap_hwmod_addr_space omap2430_mcspi2_addr_space[] = {
-       {
-               .pa_start       = 0x4809a000,
-               .pa_end         = 0x4809a0ff,
-               .flags          = ADDR_TYPE_RT,
-       },
-};
-
 static struct omap_hwmod_ocp_if omap2430_l4_core__mcspi2 = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_mcspi2_hwmod,
        .clk            = "mcspi2_ick",
-       .addr           = omap2430_mcspi2_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap2430_mcspi2_addr_space),
+       .addr           = omap2_mcspi2_addr_space,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
 /* l4 core -> mcspi3 interface */
-static struct omap_hwmod_addr_space omap2430_mcspi3_addr_space[] = {
-       {
-               .pa_start       = 0x480b8000,
-               .pa_end         = 0x480b80ff,
-               .flags          = ADDR_TYPE_RT,
-       },
-};
-
 static struct omap_hwmod_ocp_if omap2430_l4_core__mcspi3 = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_mcspi3_hwmod,
        .clk            = "mcspi3_ick",
        .addr           = omap2430_mcspi3_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap2430_mcspi3_addr_space),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -441,29 +347,8 @@ static struct omap_hwmod omap2430_iva_hwmod = {
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
 };
 
-/* Timer Common */
-static struct omap_hwmod_class_sysconfig omap2430_timer_sysc = {
-       .rev_offs       = 0x0000,
-       .sysc_offs      = 0x0010,
-       .syss_offs      = 0x0014,
-       .sysc_flags     = (SYSC_HAS_SIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
-                          SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
-                          SYSC_HAS_AUTOIDLE),
-       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2430_timer_hwmod_class = {
-       .name = "timer",
-       .sysc = &omap2430_timer_sysc,
-       .rev = OMAP_TIMER_IP_VERSION_1,
-};
-
 /* timer1 */
 static struct omap_hwmod omap2430_timer1_hwmod;
-static struct omap_hwmod_irq_info omap2430_timer1_mpu_irqs[] = {
-       { .irq = 37, },
-};
 
 static struct omap_hwmod_addr_space omap2430_timer1_addrs[] = {
        {
@@ -471,6 +356,7 @@ static struct omap_hwmod_addr_space omap2430_timer1_addrs[] = {
                .pa_end         = 0x49018000 + SZ_1K - 1,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_wkup -> timer1 */
@@ -479,7 +365,6 @@ static struct omap_hwmod_ocp_if omap2430_l4_wkup__timer1 = {
        .slave          = &omap2430_timer1_hwmod,
        .clk            = "gpt1_ick",
        .addr           = omap2430_timer1_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2430_timer1_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -491,8 +376,7 @@ static struct omap_hwmod_ocp_if *omap2430_timer1_slaves[] = {
 /* timer1 hwmod */
 static struct omap_hwmod omap2430_timer1_hwmod = {
        .name           = "timer1",
-       .mpu_irqs       = omap2430_timer1_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_timer1_mpu_irqs),
+       .mpu_irqs       = omap2_timer1_mpu_irqs,
        .main_clk       = "gpt1_fck",
        .prcm           = {
                .omap2 = {
@@ -505,31 +389,19 @@ static struct omap_hwmod omap2430_timer1_hwmod = {
        },
        .slaves         = omap2430_timer1_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_timer1_slaves),
-       .class          = &omap2430_timer_hwmod_class,
+       .class          = &omap2xxx_timer_hwmod_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
 };
 
 /* timer2 */
 static struct omap_hwmod omap2430_timer2_hwmod;
-static struct omap_hwmod_irq_info omap2430_timer2_mpu_irqs[] = {
-       { .irq = 38, },
-};
-
-static struct omap_hwmod_addr_space omap2430_timer2_addrs[] = {
-       {
-               .pa_start       = 0x4802a000,
-               .pa_end         = 0x4802a000 + SZ_1K - 1,
-               .flags          = ADDR_TYPE_RT
-       },
-};
 
 /* l4_core -> timer2 */
 static struct omap_hwmod_ocp_if omap2430_l4_core__timer2 = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_timer2_hwmod,
        .clk            = "gpt2_ick",
-       .addr           = omap2430_timer2_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2430_timer2_addrs),
+       .addr           = omap2xxx_timer2_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -541,8 +413,7 @@ static struct omap_hwmod_ocp_if *omap2430_timer2_slaves[] = {
 /* timer2 hwmod */
 static struct omap_hwmod omap2430_timer2_hwmod = {
        .name           = "timer2",
-       .mpu_irqs       = omap2430_timer2_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_timer2_mpu_irqs),
+       .mpu_irqs       = omap2_timer2_mpu_irqs,
        .main_clk       = "gpt2_fck",
        .prcm           = {
                .omap2 = {
@@ -555,31 +426,19 @@ static struct omap_hwmod omap2430_timer2_hwmod = {
        },
        .slaves         = omap2430_timer2_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_timer2_slaves),
-       .class          = &omap2430_timer_hwmod_class,
+       .class          = &omap2xxx_timer_hwmod_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
 };
 
 /* timer3 */
 static struct omap_hwmod omap2430_timer3_hwmod;
-static struct omap_hwmod_irq_info omap2430_timer3_mpu_irqs[] = {
-       { .irq = 39, },
-};
-
-static struct omap_hwmod_addr_space omap2430_timer3_addrs[] = {
-       {
-               .pa_start       = 0x48078000,
-               .pa_end         = 0x48078000 + SZ_1K - 1,
-               .flags          = ADDR_TYPE_RT
-       },
-};
 
 /* l4_core -> timer3 */
 static struct omap_hwmod_ocp_if omap2430_l4_core__timer3 = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_timer3_hwmod,
        .clk            = "gpt3_ick",
-       .addr           = omap2430_timer3_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2430_timer3_addrs),
+       .addr           = omap2xxx_timer3_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -591,8 +450,7 @@ static struct omap_hwmod_ocp_if *omap2430_timer3_slaves[] = {
 /* timer3 hwmod */
 static struct omap_hwmod omap2430_timer3_hwmod = {
        .name           = "timer3",
-       .mpu_irqs       = omap2430_timer3_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_timer3_mpu_irqs),
+       .mpu_irqs       = omap2_timer3_mpu_irqs,
        .main_clk       = "gpt3_fck",
        .prcm           = {
                .omap2 = {
@@ -605,31 +463,19 @@ static struct omap_hwmod omap2430_timer3_hwmod = {
        },
        .slaves         = omap2430_timer3_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_timer3_slaves),
-       .class          = &omap2430_timer_hwmod_class,
+       .class          = &omap2xxx_timer_hwmod_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
 };
 
 /* timer4 */
 static struct omap_hwmod omap2430_timer4_hwmod;
-static struct omap_hwmod_irq_info omap2430_timer4_mpu_irqs[] = {
-       { .irq = 40, },
-};
-
-static struct omap_hwmod_addr_space omap2430_timer4_addrs[] = {
-       {
-               .pa_start       = 0x4807a000,
-               .pa_end         = 0x4807a000 + SZ_1K - 1,
-               .flags          = ADDR_TYPE_RT
-       },
-};
 
 /* l4_core -> timer4 */
 static struct omap_hwmod_ocp_if omap2430_l4_core__timer4 = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_timer4_hwmod,
        .clk            = "gpt4_ick",
-       .addr           = omap2430_timer4_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2430_timer4_addrs),
+       .addr           = omap2xxx_timer4_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -641,8 +487,7 @@ static struct omap_hwmod_ocp_if *omap2430_timer4_slaves[] = {
 /* timer4 hwmod */
 static struct omap_hwmod omap2430_timer4_hwmod = {
        .name           = "timer4",
-       .mpu_irqs       = omap2430_timer4_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_timer4_mpu_irqs),
+       .mpu_irqs       = omap2_timer4_mpu_irqs,
        .main_clk       = "gpt4_fck",
        .prcm           = {
                .omap2 = {
@@ -655,31 +500,19 @@ static struct omap_hwmod omap2430_timer4_hwmod = {
        },
        .slaves         = omap2430_timer4_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_timer4_slaves),
-       .class          = &omap2430_timer_hwmod_class,
+       .class          = &omap2xxx_timer_hwmod_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
 };
 
 /* timer5 */
 static struct omap_hwmod omap2430_timer5_hwmod;
-static struct omap_hwmod_irq_info omap2430_timer5_mpu_irqs[] = {
-       { .irq = 41, },
-};
-
-static struct omap_hwmod_addr_space omap2430_timer5_addrs[] = {
-       {
-               .pa_start       = 0x4807c000,
-               .pa_end         = 0x4807c000 + SZ_1K - 1,
-               .flags          = ADDR_TYPE_RT
-       },
-};
 
 /* l4_core -> timer5 */
 static struct omap_hwmod_ocp_if omap2430_l4_core__timer5 = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_timer5_hwmod,
        .clk            = "gpt5_ick",
-       .addr           = omap2430_timer5_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2430_timer5_addrs),
+       .addr           = omap2xxx_timer5_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -691,8 +524,7 @@ static struct omap_hwmod_ocp_if *omap2430_timer5_slaves[] = {
 /* timer5 hwmod */
 static struct omap_hwmod omap2430_timer5_hwmod = {
        .name           = "timer5",
-       .mpu_irqs       = omap2430_timer5_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_timer5_mpu_irqs),
+       .mpu_irqs       = omap2_timer5_mpu_irqs,
        .main_clk       = "gpt5_fck",
        .prcm           = {
                .omap2 = {
@@ -705,31 +537,19 @@ static struct omap_hwmod omap2430_timer5_hwmod = {
        },
        .slaves         = omap2430_timer5_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_timer5_slaves),
-       .class          = &omap2430_timer_hwmod_class,
+       .class          = &omap2xxx_timer_hwmod_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
 };
 
 /* timer6 */
 static struct omap_hwmod omap2430_timer6_hwmod;
-static struct omap_hwmod_irq_info omap2430_timer6_mpu_irqs[] = {
-       { .irq = 42, },
-};
-
-static struct omap_hwmod_addr_space omap2430_timer6_addrs[] = {
-       {
-               .pa_start       = 0x4807e000,
-               .pa_end         = 0x4807e000 + SZ_1K - 1,
-               .flags          = ADDR_TYPE_RT
-       },
-};
 
 /* l4_core -> timer6 */
 static struct omap_hwmod_ocp_if omap2430_l4_core__timer6 = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_timer6_hwmod,
        .clk            = "gpt6_ick",
-       .addr           = omap2430_timer6_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2430_timer6_addrs),
+       .addr           = omap2xxx_timer6_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -741,8 +561,7 @@ static struct omap_hwmod_ocp_if *omap2430_timer6_slaves[] = {
 /* timer6 hwmod */
 static struct omap_hwmod omap2430_timer6_hwmod = {
        .name           = "timer6",
-       .mpu_irqs       = omap2430_timer6_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_timer6_mpu_irqs),
+       .mpu_irqs       = omap2_timer6_mpu_irqs,
        .main_clk       = "gpt6_fck",
        .prcm           = {
                .omap2 = {
@@ -755,31 +574,19 @@ static struct omap_hwmod omap2430_timer6_hwmod = {
        },
        .slaves         = omap2430_timer6_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_timer6_slaves),
-       .class          = &omap2430_timer_hwmod_class,
+       .class          = &omap2xxx_timer_hwmod_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
 };
 
 /* timer7 */
 static struct omap_hwmod omap2430_timer7_hwmod;
-static struct omap_hwmod_irq_info omap2430_timer7_mpu_irqs[] = {
-       { .irq = 43, },
-};
-
-static struct omap_hwmod_addr_space omap2430_timer7_addrs[] = {
-       {
-               .pa_start       = 0x48080000,
-               .pa_end         = 0x48080000 + SZ_1K - 1,
-               .flags          = ADDR_TYPE_RT
-       },
-};
 
 /* l4_core -> timer7 */
 static struct omap_hwmod_ocp_if omap2430_l4_core__timer7 = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_timer7_hwmod,
        .clk            = "gpt7_ick",
-       .addr           = omap2430_timer7_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2430_timer7_addrs),
+       .addr           = omap2xxx_timer7_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -791,8 +598,7 @@ static struct omap_hwmod_ocp_if *omap2430_timer7_slaves[] = {
 /* timer7 hwmod */
 static struct omap_hwmod omap2430_timer7_hwmod = {
        .name           = "timer7",
-       .mpu_irqs       = omap2430_timer7_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_timer7_mpu_irqs),
+       .mpu_irqs       = omap2_timer7_mpu_irqs,
        .main_clk       = "gpt7_fck",
        .prcm           = {
                .omap2 = {
@@ -805,31 +611,19 @@ static struct omap_hwmod omap2430_timer7_hwmod = {
        },
        .slaves         = omap2430_timer7_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_timer7_slaves),
-       .class          = &omap2430_timer_hwmod_class,
+       .class          = &omap2xxx_timer_hwmod_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
 };
 
 /* timer8 */
 static struct omap_hwmod omap2430_timer8_hwmod;
-static struct omap_hwmod_irq_info omap2430_timer8_mpu_irqs[] = {
-       { .irq = 44, },
-};
-
-static struct omap_hwmod_addr_space omap2430_timer8_addrs[] = {
-       {
-               .pa_start       = 0x48082000,
-               .pa_end         = 0x48082000 + SZ_1K - 1,
-               .flags          = ADDR_TYPE_RT
-       },
-};
 
 /* l4_core -> timer8 */
 static struct omap_hwmod_ocp_if omap2430_l4_core__timer8 = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_timer8_hwmod,
        .clk            = "gpt8_ick",
-       .addr           = omap2430_timer8_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2430_timer8_addrs),
+       .addr           = omap2xxx_timer8_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -841,8 +635,7 @@ static struct omap_hwmod_ocp_if *omap2430_timer8_slaves[] = {
 /* timer8 hwmod */
 static struct omap_hwmod omap2430_timer8_hwmod = {
        .name           = "timer8",
-       .mpu_irqs       = omap2430_timer8_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_timer8_mpu_irqs),
+       .mpu_irqs       = omap2_timer8_mpu_irqs,
        .main_clk       = "gpt8_fck",
        .prcm           = {
                .omap2 = {
@@ -855,31 +648,19 @@ static struct omap_hwmod omap2430_timer8_hwmod = {
        },
        .slaves         = omap2430_timer8_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_timer8_slaves),
-       .class          = &omap2430_timer_hwmod_class,
+       .class          = &omap2xxx_timer_hwmod_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
 };
 
 /* timer9 */
 static struct omap_hwmod omap2430_timer9_hwmod;
-static struct omap_hwmod_irq_info omap2430_timer9_mpu_irqs[] = {
-       { .irq = 45, },
-};
-
-static struct omap_hwmod_addr_space omap2430_timer9_addrs[] = {
-       {
-               .pa_start       = 0x48084000,
-               .pa_end         = 0x48084000 + SZ_1K - 1,
-               .flags          = ADDR_TYPE_RT
-       },
-};
 
 /* l4_core -> timer9 */
 static struct omap_hwmod_ocp_if omap2430_l4_core__timer9 = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_timer9_hwmod,
        .clk            = "gpt9_ick",
-       .addr           = omap2430_timer9_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2430_timer9_addrs),
+       .addr           = omap2xxx_timer9_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -891,8 +672,7 @@ static struct omap_hwmod_ocp_if *omap2430_timer9_slaves[] = {
 /* timer9 hwmod */
 static struct omap_hwmod omap2430_timer9_hwmod = {
        .name           = "timer9",
-       .mpu_irqs       = omap2430_timer9_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_timer9_mpu_irqs),
+       .mpu_irqs       = omap2_timer9_mpu_irqs,
        .main_clk       = "gpt9_fck",
        .prcm           = {
                .omap2 = {
@@ -905,31 +685,19 @@ static struct omap_hwmod omap2430_timer9_hwmod = {
        },
        .slaves         = omap2430_timer9_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_timer9_slaves),
-       .class          = &omap2430_timer_hwmod_class,
+       .class          = &omap2xxx_timer_hwmod_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
 };
 
 /* timer10 */
 static struct omap_hwmod omap2430_timer10_hwmod;
-static struct omap_hwmod_irq_info omap2430_timer10_mpu_irqs[] = {
-       { .irq = 46, },
-};
-
-static struct omap_hwmod_addr_space omap2430_timer10_addrs[] = {
-       {
-               .pa_start       = 0x48086000,
-               .pa_end         = 0x48086000 + SZ_1K - 1,
-               .flags          = ADDR_TYPE_RT
-       },
-};
 
 /* l4_core -> timer10 */
 static struct omap_hwmod_ocp_if omap2430_l4_core__timer10 = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_timer10_hwmod,
        .clk            = "gpt10_ick",
-       .addr           = omap2430_timer10_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2430_timer10_addrs),
+       .addr           = omap2_timer10_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -941,8 +709,7 @@ static struct omap_hwmod_ocp_if *omap2430_timer10_slaves[] = {
 /* timer10 hwmod */
 static struct omap_hwmod omap2430_timer10_hwmod = {
        .name           = "timer10",
-       .mpu_irqs       = omap2430_timer10_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_timer10_mpu_irqs),
+       .mpu_irqs       = omap2_timer10_mpu_irqs,
        .main_clk       = "gpt10_fck",
        .prcm           = {
                .omap2 = {
@@ -955,31 +722,19 @@ static struct omap_hwmod omap2430_timer10_hwmod = {
        },
        .slaves         = omap2430_timer10_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_timer10_slaves),
-       .class          = &omap2430_timer_hwmod_class,
+       .class          = &omap2xxx_timer_hwmod_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
 };
 
 /* timer11 */
 static struct omap_hwmod omap2430_timer11_hwmod;
-static struct omap_hwmod_irq_info omap2430_timer11_mpu_irqs[] = {
-       { .irq = 47, },
-};
-
-static struct omap_hwmod_addr_space omap2430_timer11_addrs[] = {
-       {
-               .pa_start       = 0x48088000,
-               .pa_end         = 0x48088000 + SZ_1K - 1,
-               .flags          = ADDR_TYPE_RT
-       },
-};
 
 /* l4_core -> timer11 */
 static struct omap_hwmod_ocp_if omap2430_l4_core__timer11 = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_timer11_hwmod,
        .clk            = "gpt11_ick",
-       .addr           = omap2430_timer11_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2430_timer11_addrs),
+       .addr           = omap2_timer11_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -991,8 +746,7 @@ static struct omap_hwmod_ocp_if *omap2430_timer11_slaves[] = {
 /* timer11 hwmod */
 static struct omap_hwmod omap2430_timer11_hwmod = {
        .name           = "timer11",
-       .mpu_irqs       = omap2430_timer11_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_timer11_mpu_irqs),
+       .mpu_irqs       = omap2_timer11_mpu_irqs,
        .main_clk       = "gpt11_fck",
        .prcm           = {
                .omap2 = {
@@ -1005,31 +759,19 @@ static struct omap_hwmod omap2430_timer11_hwmod = {
        },
        .slaves         = omap2430_timer11_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_timer11_slaves),
-       .class          = &omap2430_timer_hwmod_class,
+       .class          = &omap2xxx_timer_hwmod_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
 };
 
 /* timer12 */
 static struct omap_hwmod omap2430_timer12_hwmod;
-static struct omap_hwmod_irq_info omap2430_timer12_mpu_irqs[] = {
-       { .irq = 48, },
-};
-
-static struct omap_hwmod_addr_space omap2430_timer12_addrs[] = {
-       {
-               .pa_start       = 0x4808a000,
-               .pa_end         = 0x4808a000 + SZ_1K - 1,
-               .flags          = ADDR_TYPE_RT
-       },
-};
 
 /* l4_core -> timer12 */
 static struct omap_hwmod_ocp_if omap2430_l4_core__timer12 = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_timer12_hwmod,
        .clk            = "gpt12_ick",
-       .addr           = omap2430_timer12_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2430_timer12_addrs),
+       .addr           = omap2xxx_timer12_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1041,8 +783,7 @@ static struct omap_hwmod_ocp_if *omap2430_timer12_slaves[] = {
 /* timer12 hwmod */
 static struct omap_hwmod omap2430_timer12_hwmod = {
        .name           = "timer12",
-       .mpu_irqs       = omap2430_timer12_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_timer12_mpu_irqs),
+       .mpu_irqs       = omap2xxx_timer12_mpu_irqs,
        .main_clk       = "gpt12_fck",
        .prcm           = {
                .omap2 = {
@@ -1055,7 +796,7 @@ static struct omap_hwmod omap2430_timer12_hwmod = {
        },
        .slaves         = omap2430_timer12_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_timer12_slaves),
-       .class          = &omap2430_timer_hwmod_class,
+       .class          = &omap2xxx_timer_hwmod_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
 };
 
@@ -1066,6 +807,7 @@ static struct omap_hwmod_addr_space omap2430_wd_timer2_addrs[] = {
                .pa_end         = 0x4901607f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap2430_l4_wkup__wd_timer2 = {
@@ -1073,31 +815,9 @@ static struct omap_hwmod_ocp_if omap2430_l4_wkup__wd_timer2 = {
        .slave          = &omap2430_wd_timer2_hwmod,
        .clk            = "mpu_wdt_ick",
        .addr           = omap2430_wd_timer2_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2430_wd_timer2_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/*
- * 'wd_timer' class
- * 32-bit watchdog upward counter that generates a pulse on the reset pin on
- * overflow condition
- */
-
-static struct omap_hwmod_class_sysconfig omap2430_wd_timer_sysc = {
-       .rev_offs       = 0x0,
-       .sysc_offs      = 0x0010,
-       .syss_offs      = 0x0014,
-       .sysc_flags     = (SYSC_HAS_EMUFREE | SYSC_HAS_SOFTRESET |
-                          SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2430_wd_timer_hwmod_class = {
-       .name           = "wd_timer",
-       .sysc           = &omap2430_wd_timer_sysc,
-       .pre_shutdown   = &omap2_wd_timer_disable
-};
-
 /* wd_timer2 */
 static struct omap_hwmod_ocp_if *omap2430_wd_timer2_slaves[] = {
        &omap2430_l4_wkup__wd_timer2,
@@ -1105,7 +825,7 @@ static struct omap_hwmod_ocp_if *omap2430_wd_timer2_slaves[] = {
 
 static struct omap_hwmod omap2430_wd_timer2_hwmod = {
        .name           = "wd_timer2",
-       .class          = &omap2430_wd_timer_hwmod_class,
+       .class          = &omap2xxx_wd_timer_hwmod_class,
        .main_clk       = "mpu_wdt_fck",
        .prcm           = {
                .omap2 = {
@@ -1121,45 +841,16 @@ static struct omap_hwmod omap2430_wd_timer2_hwmod = {
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
 };
 
-/* UART */
-
-static struct omap_hwmod_class_sysconfig uart_sysc = {
-       .rev_offs       = 0x50,
-       .sysc_offs      = 0x54,
-       .syss_offs      = 0x58,
-       .sysc_flags     = (SYSC_HAS_SIDLEMODE |
-                          SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
-                          SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
-       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class uart_class = {
-       .name = "uart",
-       .sysc = &uart_sysc,
-};
-
 /* UART1 */
 
-static struct omap_hwmod_irq_info uart1_mpu_irqs[] = {
-       { .irq = INT_24XX_UART1_IRQ, },
-};
-
-static struct omap_hwmod_dma_info uart1_sdma_reqs[] = {
-       { .name = "rx", .dma_req = OMAP24XX_DMA_UART1_RX, },
-       { .name = "tx", .dma_req = OMAP24XX_DMA_UART1_TX, },
-};
-
 static struct omap_hwmod_ocp_if *omap2430_uart1_slaves[] = {
        &omap2_l4_core__uart1,
 };
 
 static struct omap_hwmod omap2430_uart1_hwmod = {
        .name           = "uart1",
-       .mpu_irqs       = uart1_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(uart1_mpu_irqs),
-       .sdma_reqs      = uart1_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(uart1_sdma_reqs),
+       .mpu_irqs       = omap2_uart1_mpu_irqs,
+       .sdma_reqs      = omap2_uart1_sdma_reqs,
        .main_clk       = "uart1_fck",
        .prcm           = {
                .omap2 = {
@@ -1172,31 +863,20 @@ static struct omap_hwmod omap2430_uart1_hwmod = {
        },
        .slaves         = omap2430_uart1_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_uart1_slaves),
-       .class          = &uart_class,
+       .class          = &omap2_uart_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
 };
 
 /* UART2 */
 
-static struct omap_hwmod_irq_info uart2_mpu_irqs[] = {
-       { .irq = INT_24XX_UART2_IRQ, },
-};
-
-static struct omap_hwmod_dma_info uart2_sdma_reqs[] = {
-       { .name = "rx", .dma_req = OMAP24XX_DMA_UART2_RX, },
-       { .name = "tx", .dma_req = OMAP24XX_DMA_UART2_TX, },
-};
-
 static struct omap_hwmod_ocp_if *omap2430_uart2_slaves[] = {
        &omap2_l4_core__uart2,
 };
 
 static struct omap_hwmod omap2430_uart2_hwmod = {
        .name           = "uart2",
-       .mpu_irqs       = uart2_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(uart2_mpu_irqs),
-       .sdma_reqs      = uart2_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(uart2_sdma_reqs),
+       .mpu_irqs       = omap2_uart2_mpu_irqs,
+       .sdma_reqs      = omap2_uart2_sdma_reqs,
        .main_clk       = "uart2_fck",
        .prcm           = {
                .omap2 = {
@@ -1209,31 +889,20 @@ static struct omap_hwmod omap2430_uart2_hwmod = {
        },
        .slaves         = omap2430_uart2_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_uart2_slaves),
-       .class          = &uart_class,
+       .class          = &omap2_uart_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
 };
 
 /* UART3 */
 
-static struct omap_hwmod_irq_info uart3_mpu_irqs[] = {
-       { .irq = INT_24XX_UART3_IRQ, },
-};
-
-static struct omap_hwmod_dma_info uart3_sdma_reqs[] = {
-       { .name = "rx", .dma_req = OMAP24XX_DMA_UART3_RX, },
-       { .name = "tx", .dma_req = OMAP24XX_DMA_UART3_TX, },
-};
-
 static struct omap_hwmod_ocp_if *omap2430_uart3_slaves[] = {
        &omap2_l4_core__uart3,
 };
 
 static struct omap_hwmod omap2430_uart3_hwmod = {
        .name           = "uart3",
-       .mpu_irqs       = uart3_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(uart3_mpu_irqs),
-       .sdma_reqs      = uart3_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(uart3_sdma_reqs),
+       .mpu_irqs       = omap2_uart3_mpu_irqs,
+       .sdma_reqs      = omap2_uart3_sdma_reqs,
        .main_clk       = "uart3_fck",
        .prcm           = {
                .omap2 = {
@@ -1246,53 +915,22 @@ static struct omap_hwmod omap2430_uart3_hwmod = {
        },
        .slaves         = omap2430_uart3_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_uart3_slaves),
-       .class          = &uart_class,
+       .class          = &omap2_uart_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
 };
 
-/*
- * 'dss' class
- * display sub-system
- */
-
-static struct omap_hwmod_class_sysconfig omap2430_dss_sysc = {
-       .rev_offs       = 0x0000,
-       .sysc_offs      = 0x0010,
-       .syss_offs      = 0x0014,
-       .sysc_flags     = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2430_dss_hwmod_class = {
-       .name = "dss",
-       .sysc = &omap2430_dss_sysc,
-};
-
-static struct omap_hwmod_dma_info omap2430_dss_sdma_chs[] = {
-       { .name = "dispc", .dma_req = 5 },
-};
-
 /* dss */
 /* dss master ports */
 static struct omap_hwmod_ocp_if *omap2430_dss_masters[] = {
        &omap2430_dss__l3,
 };
 
-static struct omap_hwmod_addr_space omap2430_dss_addrs[] = {
-       {
-               .pa_start       = 0x48050000,
-               .pa_end         = 0x480503FF,
-               .flags          = ADDR_TYPE_RT
-       },
-};
-
 /* l4_core -> dss */
 static struct omap_hwmod_ocp_if omap2430_l4_core__dss = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_dss_core_hwmod,
        .clk            = "dss_ick",
-       .addr           = omap2430_dss_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2430_dss_addrs),
+       .addr           = omap2_dss_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1308,10 +946,9 @@ static struct omap_hwmod_opt_clk dss_opt_clks[] = {
 
 static struct omap_hwmod omap2430_dss_core_hwmod = {
        .name           = "dss_core",
-       .class          = &omap2430_dss_hwmod_class,
+       .class          = &omap2_dss_hwmod_class,
        .main_clk       = "dss1_fck", /* instead of dss_fck */
-       .sdma_reqs      = omap2430_dss_sdma_chs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap2430_dss_sdma_chs),
+       .sdma_reqs      = omap2xxx_dss_sdma_chs,
        .prcm           = {
                .omap2 = {
                        .prcm_reg_id = 1,
@@ -1331,46 +968,12 @@ static struct omap_hwmod omap2430_dss_core_hwmod = {
        .flags          = HWMOD_NO_IDLEST,
 };
 
-/*
- * 'dispc' class
- * display controller
- */
-
-static struct omap_hwmod_class_sysconfig omap2430_dispc_sysc = {
-       .rev_offs       = 0x0000,
-       .sysc_offs      = 0x0010,
-       .syss_offs      = 0x0014,
-       .sysc_flags     = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
-                          SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
-       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-                          MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2430_dispc_hwmod_class = {
-       .name = "dispc",
-       .sysc = &omap2430_dispc_sysc,
-};
-
-static struct omap_hwmod_irq_info omap2430_dispc_irqs[] = {
-       { .irq = 25 },
-};
-
-static struct omap_hwmod_addr_space omap2430_dss_dispc_addrs[] = {
-       {
-               .pa_start       = 0x48050400,
-               .pa_end         = 0x480507FF,
-               .flags          = ADDR_TYPE_RT
-       },
-};
-
 /* l4_core -> dss_dispc */
 static struct omap_hwmod_ocp_if omap2430_l4_core__dss_dispc = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_dss_dispc_hwmod,
        .clk            = "dss_ick",
-       .addr           = omap2430_dss_dispc_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2430_dss_dispc_addrs),
+       .addr           = omap2_dss_dispc_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1381,9 +984,8 @@ static struct omap_hwmod_ocp_if *omap2430_dss_dispc_slaves[] = {
 
 static struct omap_hwmod omap2430_dss_dispc_hwmod = {
        .name           = "dss_dispc",
-       .class          = &omap2430_dispc_hwmod_class,
-       .mpu_irqs       = omap2430_dispc_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_dispc_irqs),
+       .class          = &omap2_dispc_hwmod_class,
+       .mpu_irqs       = omap2_dispc_irqs,
        .main_clk       = "dss1_fck",
        .prcm           = {
                .omap2 = {
@@ -1400,41 +1002,12 @@ static struct omap_hwmod omap2430_dss_dispc_hwmod = {
        .flags          = HWMOD_NO_IDLEST,
 };
 
-/*
- * 'rfbi' class
- * remote frame buffer interface
- */
-
-static struct omap_hwmod_class_sysconfig omap2430_rfbi_sysc = {
-       .rev_offs       = 0x0000,
-       .sysc_offs      = 0x0010,
-       .syss_offs      = 0x0014,
-       .sysc_flags     = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
-                          SYSC_HAS_AUTOIDLE),
-       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2430_rfbi_hwmod_class = {
-       .name = "rfbi",
-       .sysc = &omap2430_rfbi_sysc,
-};
-
-static struct omap_hwmod_addr_space omap2430_dss_rfbi_addrs[] = {
-       {
-               .pa_start       = 0x48050800,
-               .pa_end         = 0x48050BFF,
-               .flags          = ADDR_TYPE_RT
-       },
-};
-
 /* l4_core -> dss_rfbi */
 static struct omap_hwmod_ocp_if omap2430_l4_core__dss_rfbi = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_dss_rfbi_hwmod,
        .clk            = "dss_ick",
-       .addr           = omap2430_dss_rfbi_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2430_dss_rfbi_addrs),
+       .addr           = omap2_dss_rfbi_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1445,7 +1018,7 @@ static struct omap_hwmod_ocp_if *omap2430_dss_rfbi_slaves[] = {
 
 static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
        .name           = "dss_rfbi",
-       .class          = &omap2430_rfbi_hwmod_class,
+       .class          = &omap2_rfbi_hwmod_class,
        .main_clk       = "dss1_fck",
        .prcm           = {
                .omap2 = {
@@ -1460,31 +1033,12 @@ static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
        .flags          = HWMOD_NO_IDLEST,
 };
 
-/*
- * 'venc' class
- * video encoder
- */
-
-static struct omap_hwmod_class omap2430_venc_hwmod_class = {
-       .name = "venc",
-};
-
-/* dss_venc */
-static struct omap_hwmod_addr_space omap2430_dss_venc_addrs[] = {
-       {
-               .pa_start       = 0x48050C00,
-               .pa_end         = 0x48050FFF,
-               .flags          = ADDR_TYPE_RT
-       },
-};
-
 /* l4_core -> dss_venc */
 static struct omap_hwmod_ocp_if omap2430_l4_core__dss_venc = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_dss_venc_hwmod,
        .clk            = "dss_54m_fck",
-       .addr           = omap2430_dss_venc_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2430_dss_venc_addrs),
+       .addr           = omap2_dss_venc_addrs,
        .flags          = OCPIF_SWSUP_IDLE,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
@@ -1496,7 +1050,7 @@ static struct omap_hwmod_ocp_if *omap2430_dss_venc_slaves[] = {
 
 static struct omap_hwmod omap2430_dss_venc_hwmod = {
        .name           = "dss_venc",
-       .class          = &omap2430_venc_hwmod_class,
+       .class          = &omap2_venc_hwmod_class,
        .main_clk       = "dss1_fck",
        .prcm           = {
                .omap2 = {
@@ -1532,25 +1086,14 @@ static struct omap_i2c_dev_attr i2c_dev_attr = {
 
 /* I2C1 */
 
-static struct omap_hwmod_irq_info i2c1_mpu_irqs[] = {
-       { .irq = INT_24XX_I2C1_IRQ, },
-};
-
-static struct omap_hwmod_dma_info i2c1_sdma_reqs[] = {
-       { .name = "tx", .dma_req = OMAP24XX_DMA_I2C1_TX },
-       { .name = "rx", .dma_req = OMAP24XX_DMA_I2C1_RX },
-};
-
 static struct omap_hwmod_ocp_if *omap2430_i2c1_slaves[] = {
        &omap2430_l4_core__i2c1,
 };
 
 static struct omap_hwmod omap2430_i2c1_hwmod = {
        .name           = "i2c1",
-       .mpu_irqs       = i2c1_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(i2c1_mpu_irqs),
-       .sdma_reqs      = i2c1_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(i2c1_sdma_reqs),
+       .mpu_irqs       = omap2_i2c1_mpu_irqs,
+       .sdma_reqs      = omap2_i2c1_sdma_reqs,
        .main_clk       = "i2chs1_fck",
        .prcm           = {
                .omap2 = {
@@ -1578,25 +1121,14 @@ static struct omap_hwmod omap2430_i2c1_hwmod = {
 
 /* I2C2 */
 
-static struct omap_hwmod_irq_info i2c2_mpu_irqs[] = {
-       { .irq = INT_24XX_I2C2_IRQ, },
-};
-
-static struct omap_hwmod_dma_info i2c2_sdma_reqs[] = {
-       { .name = "tx", .dma_req = OMAP24XX_DMA_I2C2_TX },
-       { .name = "rx", .dma_req = OMAP24XX_DMA_I2C2_RX },
-};
-
 static struct omap_hwmod_ocp_if *omap2430_i2c2_slaves[] = {
        &omap2430_l4_core__i2c2,
 };
 
 static struct omap_hwmod omap2430_i2c2_hwmod = {
        .name           = "i2c2",
-       .mpu_irqs       = i2c2_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(i2c2_mpu_irqs),
-       .sdma_reqs      = i2c2_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(i2c2_sdma_reqs),
+       .mpu_irqs       = omap2_i2c2_mpu_irqs,
+       .sdma_reqs      = omap2_i2c2_sdma_reqs,
        .main_clk       = "i2chs2_fck",
        .prcm           = {
                .omap2 = {
@@ -1621,6 +1153,7 @@ static struct omap_hwmod_addr_space omap2430_gpio1_addr_space[] = {
                .pa_end         = 0x4900C1ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap2430_l4_wkup__gpio1 = {
@@ -1628,7 +1161,6 @@ static struct omap_hwmod_ocp_if omap2430_l4_wkup__gpio1 = {
        .slave          = &omap2430_gpio1_hwmod,
        .clk            = "gpios_ick",
        .addr           = omap2430_gpio1_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap2430_gpio1_addr_space),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1639,6 +1171,7 @@ static struct omap_hwmod_addr_space omap2430_gpio2_addr_space[] = {
                .pa_end         = 0x4900E1ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap2430_l4_wkup__gpio2 = {
@@ -1646,7 +1179,6 @@ static struct omap_hwmod_ocp_if omap2430_l4_wkup__gpio2 = {
        .slave          = &omap2430_gpio2_hwmod,
        .clk            = "gpios_ick",
        .addr           = omap2430_gpio2_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap2430_gpio2_addr_space),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1657,6 +1189,7 @@ static struct omap_hwmod_addr_space omap2430_gpio3_addr_space[] = {
                .pa_end         = 0x490101ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap2430_l4_wkup__gpio3 = {
@@ -1664,7 +1197,6 @@ static struct omap_hwmod_ocp_if omap2430_l4_wkup__gpio3 = {
        .slave          = &omap2430_gpio3_hwmod,
        .clk            = "gpios_ick",
        .addr           = omap2430_gpio3_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap2430_gpio3_addr_space),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1675,6 +1207,7 @@ static struct omap_hwmod_addr_space omap2430_gpio4_addr_space[] = {
                .pa_end         = 0x490121ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap2430_l4_wkup__gpio4 = {
@@ -1682,7 +1215,6 @@ static struct omap_hwmod_ocp_if omap2430_l4_wkup__gpio4 = {
        .slave          = &omap2430_gpio4_hwmod,
        .clk            = "gpios_ick",
        .addr           = omap2430_gpio4_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap2430_gpio4_addr_space),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1693,6 +1225,7 @@ static struct omap_hwmod_addr_space omap2430_gpio5_addr_space[] = {
                .pa_end         = 0x480B61ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap2430_l4_core__gpio5 = {
@@ -1700,7 +1233,6 @@ static struct omap_hwmod_ocp_if omap2430_l4_core__gpio5 = {
        .slave          = &omap2430_gpio5_hwmod,
        .clk            = "gpio5_ick",
        .addr           = omap2430_gpio5_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap2430_gpio5_addr_space),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1710,32 +1242,7 @@ static struct omap_gpio_dev_attr gpio_dev_attr = {
        .dbck_flag = false,
 };
 
-static struct omap_hwmod_class_sysconfig omap243x_gpio_sysc = {
-       .rev_offs       = 0x0000,
-       .sysc_offs      = 0x0010,
-       .syss_offs      = 0x0014,
-       .sysc_flags     = (SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
-                          SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
-                          SYSS_HAS_RESET_STATUS),
-       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-/*
- * 'gpio' class
- * general purpose io module
- */
-static struct omap_hwmod_class omap243x_gpio_hwmod_class = {
-       .name = "gpio",
-       .sysc = &omap243x_gpio_sysc,
-       .rev = 0,
-};
-
 /* gpio1 */
-static struct omap_hwmod_irq_info omap243x_gpio1_irqs[] = {
-       { .irq = 29 }, /* INT_24XX_GPIO_BANK1 */
-};
-
 static struct omap_hwmod_ocp_if *omap2430_gpio1_slaves[] = {
        &omap2430_l4_wkup__gpio1,
 };
@@ -1743,8 +1250,7 @@ static struct omap_hwmod_ocp_if *omap2430_gpio1_slaves[] = {
 static struct omap_hwmod omap2430_gpio1_hwmod = {
        .name           = "gpio1",
        .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
-       .mpu_irqs       = omap243x_gpio1_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap243x_gpio1_irqs),
+       .mpu_irqs       = omap2_gpio1_irqs,
        .main_clk       = "gpios_fck",
        .prcm           = {
                .omap2 = {
@@ -1757,16 +1263,12 @@ static struct omap_hwmod omap2430_gpio1_hwmod = {
        },
        .slaves         = omap2430_gpio1_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_gpio1_slaves),
-       .class          = &omap243x_gpio_hwmod_class,
+       .class          = &omap2xxx_gpio_hwmod_class,
        .dev_attr       = &gpio_dev_attr,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
 };
 
 /* gpio2 */
-static struct omap_hwmod_irq_info omap243x_gpio2_irqs[] = {
-       { .irq = 30 }, /* INT_24XX_GPIO_BANK2 */
-};
-
 static struct omap_hwmod_ocp_if *omap2430_gpio2_slaves[] = {
        &omap2430_l4_wkup__gpio2,
 };
@@ -1774,8 +1276,7 @@ static struct omap_hwmod_ocp_if *omap2430_gpio2_slaves[] = {
 static struct omap_hwmod omap2430_gpio2_hwmod = {
        .name           = "gpio2",
        .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
-       .mpu_irqs       = omap243x_gpio2_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap243x_gpio2_irqs),
+       .mpu_irqs       = omap2_gpio2_irqs,
        .main_clk       = "gpios_fck",
        .prcm           = {
                .omap2 = {
@@ -1788,16 +1289,12 @@ static struct omap_hwmod omap2430_gpio2_hwmod = {
        },
        .slaves         = omap2430_gpio2_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_gpio2_slaves),
-       .class          = &omap243x_gpio_hwmod_class,
+       .class          = &omap2xxx_gpio_hwmod_class,
        .dev_attr       = &gpio_dev_attr,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
 };
 
 /* gpio3 */
-static struct omap_hwmod_irq_info omap243x_gpio3_irqs[] = {
-       { .irq = 31 }, /* INT_24XX_GPIO_BANK3 */
-};
-
 static struct omap_hwmod_ocp_if *omap2430_gpio3_slaves[] = {
        &omap2430_l4_wkup__gpio3,
 };
@@ -1805,8 +1302,7 @@ static struct omap_hwmod_ocp_if *omap2430_gpio3_slaves[] = {
 static struct omap_hwmod omap2430_gpio3_hwmod = {
        .name           = "gpio3",
        .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
-       .mpu_irqs       = omap243x_gpio3_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap243x_gpio3_irqs),
+       .mpu_irqs       = omap2_gpio3_irqs,
        .main_clk       = "gpios_fck",
        .prcm           = {
                .omap2 = {
@@ -1819,16 +1315,12 @@ static struct omap_hwmod omap2430_gpio3_hwmod = {
        },
        .slaves         = omap2430_gpio3_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_gpio3_slaves),
-       .class          = &omap243x_gpio_hwmod_class,
+       .class          = &omap2xxx_gpio_hwmod_class,
        .dev_attr       = &gpio_dev_attr,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
 };
 
 /* gpio4 */
-static struct omap_hwmod_irq_info omap243x_gpio4_irqs[] = {
-       { .irq = 32 }, /* INT_24XX_GPIO_BANK4 */
-};
-
 static struct omap_hwmod_ocp_if *omap2430_gpio4_slaves[] = {
        &omap2430_l4_wkup__gpio4,
 };
@@ -1836,8 +1328,7 @@ static struct omap_hwmod_ocp_if *omap2430_gpio4_slaves[] = {
 static struct omap_hwmod omap2430_gpio4_hwmod = {
        .name           = "gpio4",
        .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
-       .mpu_irqs       = omap243x_gpio4_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap243x_gpio4_irqs),
+       .mpu_irqs       = omap2_gpio4_irqs,
        .main_clk       = "gpios_fck",
        .prcm           = {
                .omap2 = {
@@ -1850,7 +1341,7 @@ static struct omap_hwmod omap2430_gpio4_hwmod = {
        },
        .slaves         = omap2430_gpio4_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_gpio4_slaves),
-       .class          = &omap243x_gpio_hwmod_class,
+       .class          = &omap2xxx_gpio_hwmod_class,
        .dev_attr       = &gpio_dev_attr,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
 };
@@ -1858,6 +1349,7 @@ static struct omap_hwmod omap2430_gpio4_hwmod = {
 /* gpio5 */
 static struct omap_hwmod_irq_info omap243x_gpio5_irqs[] = {
        { .irq = 33 }, /* INT_24XX_GPIO_BANK5 */
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_ocp_if *omap2430_gpio5_slaves[] = {
@@ -1868,7 +1360,6 @@ static struct omap_hwmod omap2430_gpio5_hwmod = {
        .name           = "gpio5",
        .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
        .mpu_irqs       = omap243x_gpio5_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap243x_gpio5_irqs),
        .main_clk       = "gpio5_fck",
        .prcm           = {
                .omap2 = {
@@ -1881,28 +1372,11 @@ static struct omap_hwmod omap2430_gpio5_hwmod = {
        },
        .slaves         = omap2430_gpio5_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_gpio5_slaves),
-       .class          = &omap243x_gpio_hwmod_class,
+       .class          = &omap2xxx_gpio_hwmod_class,
        .dev_attr       = &gpio_dev_attr,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
 };
 
-/* dma_system */
-static struct omap_hwmod_class_sysconfig omap2430_dma_sysc = {
-       .rev_offs       = 0x0000,
-       .sysc_offs      = 0x002c,
-       .syss_offs      = 0x0028,
-       .sysc_flags     = (SYSC_HAS_SOFTRESET | SYSC_HAS_MIDLEMODE |
-                          SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_EMUFREE |
-                          SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
-       .idlemodes      = (MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2430_dma_hwmod_class = {
-       .name = "dma",
-       .sysc = &omap2430_dma_sysc,
-};
-
 /* dma attributes */
 static struct omap_dma_dev_attr dma_dev_attr = {
        .dev_caps  = RESERVE_CHANNEL | DMA_LINKED_LCH | GLOBAL_PRIORITY |
@@ -1910,21 +1384,6 @@ static struct omap_dma_dev_attr dma_dev_attr = {
        .lch_count = 32,
 };
 
-static struct omap_hwmod_irq_info omap2430_dma_system_irqs[] = {
-       { .name = "0", .irq = 12 }, /* INT_24XX_SDMA_IRQ0 */
-       { .name = "1", .irq = 13 }, /* INT_24XX_SDMA_IRQ1 */
-       { .name = "2", .irq = 14 }, /* INT_24XX_SDMA_IRQ2 */
-       { .name = "3", .irq = 15 }, /* INT_24XX_SDMA_IRQ3 */
-};
-
-static struct omap_hwmod_addr_space omap2430_dma_system_addrs[] = {
-       {
-               .pa_start       = 0x48056000,
-               .pa_end         = 0x48056fff,
-               .flags          = ADDR_TYPE_RT
-       },
-};
-
 /* dma_system -> L3 */
 static struct omap_hwmod_ocp_if omap2430_dma_system__l3 = {
        .master         = &omap2430_dma_system_hwmod,
@@ -1943,8 +1402,7 @@ static struct omap_hwmod_ocp_if omap2430_l4_core__dma_system = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_dma_system_hwmod,
        .clk            = "sdma_ick",
-       .addr           = omap2430_dma_system_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2430_dma_system_addrs),
+       .addr           = omap2_dma_system_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1955,9 +1413,8 @@ static struct omap_hwmod_ocp_if *omap2430_dma_system_slaves[] = {
 
 static struct omap_hwmod omap2430_dma_system_hwmod = {
        .name           = "dma",
-       .class          = &omap2430_dma_hwmod_class,
-       .mpu_irqs       = omap2430_dma_system_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_dma_system_irqs),
+       .class          = &omap2xxx_dma_hwmod_class,
+       .mpu_irqs       = omap2_dma_system_irqs,
        .main_clk       = "core_l3_ck",
        .slaves         = omap2430_dma_system_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_dma_system_slaves),
@@ -1968,47 +1425,18 @@ static struct omap_hwmod omap2430_dma_system_hwmod = {
        .flags          = HWMOD_NO_IDLEST,
 };
 
-/*
- * 'mailbox' class
- * mailbox module allowing communication between the on-chip processors
- * using a queued mailbox-interrupt mechanism.
- */
-
-static struct omap_hwmod_class_sysconfig omap2430_mailbox_sysc = {
-       .rev_offs       = 0x000,
-       .sysc_offs      = 0x010,
-       .syss_offs      = 0x014,
-       .sysc_flags     = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
-                               SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
-       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2430_mailbox_hwmod_class = {
-       .name = "mailbox",
-       .sysc = &omap2430_mailbox_sysc,
-};
-
 /* mailbox */
 static struct omap_hwmod omap2430_mailbox_hwmod;
 static struct omap_hwmod_irq_info omap2430_mailbox_irqs[] = {
        { .irq = 26 },
-};
-
-static struct omap_hwmod_addr_space omap2430_mailbox_addrs[] = {
-       {
-               .pa_start       = 0x48094000,
-               .pa_end         = 0x480941ff,
-               .flags          = ADDR_TYPE_RT,
-       },
+       { .irq = -1 }
 };
 
 /* l4_core -> mailbox */
 static struct omap_hwmod_ocp_if omap2430_l4_core__mailbox = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_mailbox_hwmod,
-       .addr           = omap2430_mailbox_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2430_mailbox_addrs),
+       .addr           = omap2_mailbox_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2019,9 +1447,8 @@ static struct omap_hwmod_ocp_if *omap2430_mailbox_slaves[] = {
 
 static struct omap_hwmod omap2430_mailbox_hwmod = {
        .name           = "mailbox",
-       .class          = &omap2430_mailbox_hwmod_class,
+       .class          = &omap2xxx_mailbox_hwmod_class,
        .mpu_irqs       = omap2430_mailbox_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_mailbox_irqs),
        .main_clk       = "mailboxes_ick",
        .prcm           = {
                .omap2 = {
@@ -2037,45 +1464,7 @@ static struct omap_hwmod omap2430_mailbox_hwmod = {
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
 };
 
-/*
- * 'mcspi' class
- * multichannel serial port interface (mcspi) / master/slave synchronous serial
- * bus
- */
-
-static struct omap_hwmod_class_sysconfig omap2430_mcspi_sysc = {
-       .rev_offs       = 0x0000,
-       .sysc_offs      = 0x0010,
-       .syss_offs      = 0x0014,
-       .sysc_flags     = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
-                               SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
-                               SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
-       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2430_mcspi_class = {
-       .name = "mcspi",
-       .sysc = &omap2430_mcspi_sysc,
-       .rev = OMAP2_MCSPI_REV,
-};
-
 /* mcspi1 */
-static struct omap_hwmod_irq_info omap2430_mcspi1_mpu_irqs[] = {
-       { .irq = 65 },
-};
-
-static struct omap_hwmod_dma_info omap2430_mcspi1_sdma_reqs[] = {
-       { .name = "tx0", .dma_req = 35 }, /* DMA_SPI1_TX0 */
-       { .name = "rx0", .dma_req = 36 }, /* DMA_SPI1_RX0 */
-       { .name = "tx1", .dma_req = 37 }, /* DMA_SPI1_TX1 */
-       { .name = "rx1", .dma_req = 38 }, /* DMA_SPI1_RX1 */
-       { .name = "tx2", .dma_req = 39 }, /* DMA_SPI1_TX2 */
-       { .name = "rx2", .dma_req = 40 }, /* DMA_SPI1_RX2 */
-       { .name = "tx3", .dma_req = 41 }, /* DMA_SPI1_TX3 */
-       { .name = "rx3", .dma_req = 42 }, /* DMA_SPI1_RX3 */
-};
-
 static struct omap_hwmod_ocp_if *omap2430_mcspi1_slaves[] = {
        &omap2430_l4_core__mcspi1,
 };
@@ -2086,10 +1475,8 @@ static struct omap2_mcspi_dev_attr omap_mcspi1_dev_attr = {
 
 static struct omap_hwmod omap2430_mcspi1_hwmod = {
        .name           = "mcspi1_hwmod",
-       .mpu_irqs       = omap2430_mcspi1_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_mcspi1_mpu_irqs),
-       .sdma_reqs      = omap2430_mcspi1_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap2430_mcspi1_sdma_reqs),
+       .mpu_irqs       = omap2_mcspi1_mpu_irqs,
+       .sdma_reqs      = omap2_mcspi1_sdma_reqs,
        .main_clk       = "mcspi1_fck",
        .prcm           = {
                .omap2 = {
@@ -2102,23 +1489,12 @@ static struct omap_hwmod omap2430_mcspi1_hwmod = {
        },
        .slaves         = omap2430_mcspi1_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_mcspi1_slaves),
-       .class          = &omap2430_mcspi_class,
-       .dev_attr       = &omap_mcspi1_dev_attr,
+       .class          = &omap2xxx_mcspi_class,
+       .dev_attr       = &omap_mcspi1_dev_attr,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
 };
 
 /* mcspi2 */
-static struct omap_hwmod_irq_info omap2430_mcspi2_mpu_irqs[] = {
-       { .irq = 66 },
-};
-
-static struct omap_hwmod_dma_info omap2430_mcspi2_sdma_reqs[] = {
-       { .name = "tx0", .dma_req = 43 }, /* DMA_SPI2_TX0 */
-       { .name = "rx0", .dma_req = 44 }, /* DMA_SPI2_RX0 */
-       { .name = "tx1", .dma_req = 45 }, /* DMA_SPI2_TX1 */
-       { .name = "rx1", .dma_req = 46 }, /* DMA_SPI2_RX1 */
-};
-
 static struct omap_hwmod_ocp_if *omap2430_mcspi2_slaves[] = {
        &omap2430_l4_core__mcspi2,
 };
@@ -2129,10 +1505,8 @@ static struct omap2_mcspi_dev_attr omap_mcspi2_dev_attr = {
 
 static struct omap_hwmod omap2430_mcspi2_hwmod = {
        .name           = "mcspi2_hwmod",
-       .mpu_irqs       = omap2430_mcspi2_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_mcspi2_mpu_irqs),
-       .sdma_reqs      = omap2430_mcspi2_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap2430_mcspi2_sdma_reqs),
+       .mpu_irqs       = omap2_mcspi2_mpu_irqs,
+       .sdma_reqs      = omap2_mcspi2_sdma_reqs,
        .main_clk       = "mcspi2_fck",
        .prcm           = {
                .omap2 = {
@@ -2145,14 +1519,15 @@ static struct omap_hwmod omap2430_mcspi2_hwmod = {
        },
        .slaves         = omap2430_mcspi2_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_mcspi2_slaves),
-       .class          = &omap2430_mcspi_class,
-       .dev_attr       = &omap_mcspi2_dev_attr,
+       .class          = &omap2xxx_mcspi_class,
+       .dev_attr       = &omap_mcspi2_dev_attr,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
 };
 
 /* mcspi3 */
 static struct omap_hwmod_irq_info omap2430_mcspi3_mpu_irqs[] = {
        { .irq = 91 },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap2430_mcspi3_sdma_reqs[] = {
@@ -2160,6 +1535,7 @@ static struct omap_hwmod_dma_info omap2430_mcspi3_sdma_reqs[] = {
        { .name = "rx0", .dma_req = 16 }, /* DMA_SPI3_RX0 */
        { .name = "tx1", .dma_req = 23 }, /* DMA_SPI3_TX1 */
        { .name = "rx1", .dma_req = 24 }, /* DMA_SPI3_RX1 */
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_ocp_if *omap2430_mcspi3_slaves[] = {
@@ -2173,9 +1549,7 @@ static struct omap2_mcspi_dev_attr omap_mcspi3_dev_attr = {
 static struct omap_hwmod omap2430_mcspi3_hwmod = {
        .name           = "mcspi3_hwmod",
        .mpu_irqs       = omap2430_mcspi3_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_mcspi3_mpu_irqs),
        .sdma_reqs      = omap2430_mcspi3_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap2430_mcspi3_sdma_reqs),
        .main_clk       = "mcspi3_fck",
        .prcm           = {
                .omap2 = {
@@ -2188,8 +1562,8 @@ static struct omap_hwmod omap2430_mcspi3_hwmod = {
        },
        .slaves         = omap2430_mcspi3_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_mcspi3_slaves),
-       .class          = &omap2430_mcspi_class,
-       .dev_attr       = &omap_mcspi3_dev_attr,
+       .class          = &omap2xxx_mcspi_class,
+       .dev_attr       = &omap_mcspi3_dev_attr,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
 };
 
@@ -2218,12 +1592,12 @@ static struct omap_hwmod_irq_info omap2430_usbhsotg_mpu_irqs[] = {
 
        { .name = "mc", .irq = 92 },
        { .name = "dma", .irq = 93 },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod omap2430_usbhsotg_hwmod = {
        .name           = "usb_otg_hs",
        .mpu_irqs       = omap2430_usbhsotg_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_usbhsotg_mpu_irqs),
        .main_clk       = "usbhs_ick",
        .prcm           = {
                .omap2 = {
@@ -2273,20 +1647,7 @@ static struct omap_hwmod_irq_info omap2430_mcbsp1_irqs[] = {
        { .name = "rx",         .irq = 60 },
        { .name = "ovr",        .irq = 61 },
        { .name = "common",     .irq = 64 },
-};
-
-static struct omap_hwmod_dma_info omap2430_mcbsp1_sdma_chs[] = {
-       { .name = "rx", .dma_req = 32 },
-       { .name = "tx", .dma_req = 31 },
-};
-
-static struct omap_hwmod_addr_space omap2430_mcbsp1_addrs[] = {
-       {
-               .name           = "mpu",
-               .pa_start       = 0x48074000,
-               .pa_end         = 0x480740ff,
-               .flags          = ADDR_TYPE_RT
-       },
+       { .irq = -1 }
 };
 
 /* l4_core -> mcbsp1 */
@@ -2294,8 +1655,7 @@ static struct omap_hwmod_ocp_if omap2430_l4_core__mcbsp1 = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_mcbsp1_hwmod,
        .clk            = "mcbsp1_ick",
-       .addr           = omap2430_mcbsp1_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2430_mcbsp1_addrs),
+       .addr           = omap2_mcbsp1_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2308,9 +1668,7 @@ static struct omap_hwmod omap2430_mcbsp1_hwmod = {
        .name           = "mcbsp1",
        .class          = &omap2430_mcbsp_hwmod_class,
        .mpu_irqs       = omap2430_mcbsp1_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_mcbsp1_irqs),
-       .sdma_reqs      = omap2430_mcbsp1_sdma_chs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap2430_mcbsp1_sdma_chs),
+       .sdma_reqs      = omap2_mcbsp1_sdma_reqs,
        .main_clk       = "mcbsp1_fck",
        .prcm           = {
                .omap2 = {
@@ -2331,20 +1689,7 @@ static struct omap_hwmod_irq_info omap2430_mcbsp2_irqs[] = {
        { .name = "tx",         .irq = 62 },
        { .name = "rx",         .irq = 63 },
        { .name = "common",     .irq = 16 },
-};
-
-static struct omap_hwmod_dma_info omap2430_mcbsp2_sdma_chs[] = {
-       { .name = "rx", .dma_req = 34 },
-       { .name = "tx", .dma_req = 33 },
-};
-
-static struct omap_hwmod_addr_space omap2430_mcbsp2_addrs[] = {
-       {
-               .name           = "mpu",
-               .pa_start       = 0x48076000,
-               .pa_end         = 0x480760ff,
-               .flags          = ADDR_TYPE_RT
-       },
+       { .irq = -1 }
 };
 
 /* l4_core -> mcbsp2 */
@@ -2352,8 +1697,7 @@ static struct omap_hwmod_ocp_if omap2430_l4_core__mcbsp2 = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_mcbsp2_hwmod,
        .clk            = "mcbsp2_ick",
-       .addr           = omap2430_mcbsp2_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2430_mcbsp2_addrs),
+       .addr           = omap2xxx_mcbsp2_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2366,9 +1710,7 @@ static struct omap_hwmod omap2430_mcbsp2_hwmod = {
        .name           = "mcbsp2",
        .class          = &omap2430_mcbsp_hwmod_class,
        .mpu_irqs       = omap2430_mcbsp2_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_mcbsp2_irqs),
-       .sdma_reqs      = omap2430_mcbsp2_sdma_chs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap2430_mcbsp2_sdma_chs),
+       .sdma_reqs      = omap2_mcbsp2_sdma_reqs,
        .main_clk       = "mcbsp2_fck",
        .prcm           = {
                .omap2 = {
@@ -2389,11 +1731,7 @@ static struct omap_hwmod_irq_info omap2430_mcbsp3_irqs[] = {
        { .name = "tx",         .irq = 89 },
        { .name = "rx",         .irq = 90 },
        { .name = "common",     .irq = 17 },
-};
-
-static struct omap_hwmod_dma_info omap2430_mcbsp3_sdma_chs[] = {
-       { .name = "rx", .dma_req = 18 },
-       { .name = "tx", .dma_req = 17 },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap2430_mcbsp3_addrs[] = {
@@ -2403,6 +1741,7 @@ static struct omap_hwmod_addr_space omap2430_mcbsp3_addrs[] = {
                .pa_end         = 0x4808C0ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_core -> mcbsp3 */
@@ -2411,7 +1750,6 @@ static struct omap_hwmod_ocp_if omap2430_l4_core__mcbsp3 = {
        .slave          = &omap2430_mcbsp3_hwmod,
        .clk            = "mcbsp3_ick",
        .addr           = omap2430_mcbsp3_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2430_mcbsp3_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2424,9 +1762,7 @@ static struct omap_hwmod omap2430_mcbsp3_hwmod = {
        .name           = "mcbsp3",
        .class          = &omap2430_mcbsp_hwmod_class,
        .mpu_irqs       = omap2430_mcbsp3_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_mcbsp3_irqs),
-       .sdma_reqs      = omap2430_mcbsp3_sdma_chs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap2430_mcbsp3_sdma_chs),
+       .sdma_reqs      = omap2_mcbsp3_sdma_reqs,
        .main_clk       = "mcbsp3_fck",
        .prcm           = {
                .omap2 = {
@@ -2447,11 +1783,13 @@ static struct omap_hwmod_irq_info omap2430_mcbsp4_irqs[] = {
        { .name = "tx",         .irq = 54 },
        { .name = "rx",         .irq = 55 },
        { .name = "common",     .irq = 18 },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap2430_mcbsp4_sdma_chs[] = {
        { .name = "rx", .dma_req = 20 },
        { .name = "tx", .dma_req = 19 },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap2430_mcbsp4_addrs[] = {
@@ -2461,6 +1799,7 @@ static struct omap_hwmod_addr_space omap2430_mcbsp4_addrs[] = {
                .pa_end         = 0x4808E0ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_core -> mcbsp4 */
@@ -2469,7 +1808,6 @@ static struct omap_hwmod_ocp_if omap2430_l4_core__mcbsp4 = {
        .slave          = &omap2430_mcbsp4_hwmod,
        .clk            = "mcbsp4_ick",
        .addr           = omap2430_mcbsp4_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2430_mcbsp4_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2482,9 +1820,7 @@ static struct omap_hwmod omap2430_mcbsp4_hwmod = {
        .name           = "mcbsp4",
        .class          = &omap2430_mcbsp_hwmod_class,
        .mpu_irqs       = omap2430_mcbsp4_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_mcbsp4_irqs),
        .sdma_reqs      = omap2430_mcbsp4_sdma_chs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap2430_mcbsp4_sdma_chs),
        .main_clk       = "mcbsp4_fck",
        .prcm           = {
                .omap2 = {
@@ -2505,11 +1841,13 @@ static struct omap_hwmod_irq_info omap2430_mcbsp5_irqs[] = {
        { .name = "tx",         .irq = 81 },
        { .name = "rx",         .irq = 82 },
        { .name = "common",     .irq = 19 },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap2430_mcbsp5_sdma_chs[] = {
        { .name = "rx", .dma_req = 22 },
        { .name = "tx", .dma_req = 21 },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap2430_mcbsp5_addrs[] = {
@@ -2519,6 +1857,7 @@ static struct omap_hwmod_addr_space omap2430_mcbsp5_addrs[] = {
                .pa_end         = 0x480960ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_core -> mcbsp5 */
@@ -2527,7 +1866,6 @@ static struct omap_hwmod_ocp_if omap2430_l4_core__mcbsp5 = {
        .slave          = &omap2430_mcbsp5_hwmod,
        .clk            = "mcbsp5_ick",
        .addr           = omap2430_mcbsp5_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap2430_mcbsp5_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2540,9 +1878,7 @@ static struct omap_hwmod omap2430_mcbsp5_hwmod = {
        .name           = "mcbsp5",
        .class          = &omap2430_mcbsp_hwmod_class,
        .mpu_irqs       = omap2430_mcbsp5_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_mcbsp5_irqs),
        .sdma_reqs      = omap2430_mcbsp5_sdma_chs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap2430_mcbsp5_sdma_chs),
        .main_clk       = "mcbsp5_fck",
        .prcm           = {
                .omap2 = {
@@ -2580,11 +1916,13 @@ static struct omap_hwmod_class omap2430_mmc_class = {
 
 static struct omap_hwmod_irq_info omap2430_mmc1_mpu_irqs[] = {
        { .irq = 83 },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap2430_mmc1_sdma_reqs[] = {
        { .name = "tx", .dma_req = 61 }, /* DMA_MMC1_TX */
        { .name = "rx", .dma_req = 62 }, /* DMA_MMC1_RX */
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_opt_clk omap2430_mmc1_opt_clks[] = {
@@ -2603,9 +1941,7 @@ static struct omap_hwmod omap2430_mmc1_hwmod = {
        .name           = "mmc1",
        .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
        .mpu_irqs       = omap2430_mmc1_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_mmc1_mpu_irqs),
        .sdma_reqs      = omap2430_mmc1_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap2430_mmc1_sdma_reqs),
        .opt_clks       = omap2430_mmc1_opt_clks,
        .opt_clks_cnt   = ARRAY_SIZE(omap2430_mmc1_opt_clks),
        .main_clk       = "mmchs1_fck",
@@ -2629,11 +1965,13 @@ static struct omap_hwmod omap2430_mmc1_hwmod = {
 
 static struct omap_hwmod_irq_info omap2430_mmc2_mpu_irqs[] = {
        { .irq = 86 },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap2430_mmc2_sdma_reqs[] = {
        { .name = "tx", .dma_req = 47 }, /* DMA_MMC2_TX */
        { .name = "rx", .dma_req = 48 }, /* DMA_MMC2_RX */
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_opt_clk omap2430_mmc2_opt_clks[] = {
@@ -2648,9 +1986,7 @@ static struct omap_hwmod omap2430_mmc2_hwmod = {
        .name           = "mmc2",
        .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
        .mpu_irqs       = omap2430_mmc2_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap2430_mmc2_mpu_irqs),
        .sdma_reqs      = omap2430_mmc2_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap2430_mmc2_sdma_reqs),
        .opt_clks       = omap2430_mmc2_opt_clks,
        .opt_clks_cnt   = ARRAY_SIZE(omap2430_mmc2_opt_clks),
        .main_clk       = "mmchs2_fck",
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_interconnect_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_interconnect_data.c
new file mode 100644 (file)
index 0000000..04637fa
--- /dev/null
@@ -0,0 +1,173 @@
+/*
+ * omap_hwmod_2xxx_3xxx_interconnect_data.c - common interconnect data, OMAP2/3
+ *
+ * Copyright (C) 2009-2011 Nokia Corporation
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * XXX handle crossbar/shared link difference for L3?
+ * XXX these should be marked initdata for multi-OMAP kernels
+ */
+#include <asm/sizes.h>
+
+#include <plat/omap_hwmod.h>
+#include <plat/serial.h>
+
+#include "omap_hwmod_common_data.h"
+
+struct omap_hwmod_addr_space omap2430_mmc1_addr_space[] = {
+       {
+               .pa_start       = 0x4809c000,
+               .pa_end         = 0x4809c1ff,
+               .flags          = ADDR_TYPE_RT,
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2430_mmc2_addr_space[] = {
+       {
+               .pa_start       = 0x480b4000,
+               .pa_end         = 0x480b41ff,
+               .flags          = ADDR_TYPE_RT,
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2_i2c1_addr_space[] = {
+       {
+               .pa_start       = 0x48070000,
+               .pa_end         = 0x48070000 + SZ_128 - 1,
+               .flags          = ADDR_TYPE_RT,
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2_i2c2_addr_space[] = {
+       {
+               .pa_start       = 0x48072000,
+               .pa_end         = 0x48072000 + SZ_128 - 1,
+               .flags          = ADDR_TYPE_RT,
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2_dss_addrs[] = {
+       {
+               .pa_start       = 0x48050000,
+               .pa_end         = 0x48050000 + SZ_1K - 1,
+               .flags          = ADDR_TYPE_RT
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2_dss_dispc_addrs[] = {
+       {
+               .pa_start       = 0x48050400,
+               .pa_end         = 0x48050400 + SZ_1K - 1,
+               .flags          = ADDR_TYPE_RT
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2_dss_rfbi_addrs[] = {
+       {
+               .pa_start       = 0x48050800,
+               .pa_end         = 0x48050800 + SZ_1K - 1,
+               .flags          = ADDR_TYPE_RT
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2_dss_venc_addrs[] = {
+       {
+               .pa_start       = 0x48050C00,
+               .pa_end         = 0x48050C00 + SZ_1K - 1,
+               .flags          = ADDR_TYPE_RT
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2_timer10_addrs[] = {
+       {
+               .pa_start       = 0x48086000,
+               .pa_end         = 0x48086000 + SZ_1K - 1,
+               .flags          = ADDR_TYPE_RT
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2_timer11_addrs[] = {
+       {
+               .pa_start       = 0x48088000,
+               .pa_end         = 0x48088000 + SZ_1K - 1,
+               .flags          = ADDR_TYPE_RT
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2xxx_timer12_addrs[] = {
+       {
+               .pa_start       = 0x4808a000,
+               .pa_end         = 0x4808a000 + SZ_1K - 1,
+               .flags          = ADDR_TYPE_RT
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2_mcspi1_addr_space[] = {
+       {
+               .pa_start       = 0x48098000,
+               .pa_end         = 0x48098000 + SZ_256 - 1,
+               .flags          = ADDR_TYPE_RT,
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2_mcspi2_addr_space[] = {
+       {
+               .pa_start       = 0x4809a000,
+               .pa_end         = 0x4809a000 + SZ_256 - 1,
+               .flags          = ADDR_TYPE_RT,
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2430_mcspi3_addr_space[] = {
+       {
+               .pa_start       = 0x480b8000,
+               .pa_end         = 0x480b8000 + SZ_256 - 1,
+               .flags          = ADDR_TYPE_RT,
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2_dma_system_addrs[] = {
+       {
+               .pa_start       = 0x48056000,
+               .pa_end         = 0x48056000 + SZ_4K - 1,
+               .flags          = ADDR_TYPE_RT
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2_mailbox_addrs[] = {
+       {
+               .pa_start       = 0x48094000,
+               .pa_end         = 0x48094000 + SZ_512 - 1,
+               .flags          = ADDR_TYPE_RT,
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2_mcbsp1_addrs[] = {
+       {
+               .name           = "mpu",
+               .pa_start       = 0x48074000,
+               .pa_end         = 0x480740ff,
+               .flags          = ADDR_TYPE_RT
+       },
+       { }
+};
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
new file mode 100644 (file)
index 0000000..c451729
--- /dev/null
@@ -0,0 +1,322 @@
+/*
+ * omap_hwmod_2xxx_3xxx_ipblock_data.c - common IP block data for OMAP2/3
+ *
+ * Copyright (C) 2011 Nokia Corporation
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <plat/omap_hwmod.h>
+#include <plat/serial.h>
+#include <plat/dma.h>
+
+#include <mach/irqs.h>
+
+#include "omap_hwmod_common_data.h"
+
+/* UART */
+
+static struct omap_hwmod_class_sysconfig omap2_uart_sysc = {
+       .rev_offs       = 0x50,
+       .sysc_offs      = 0x54,
+       .syss_offs      = 0x58,
+       .sysc_flags     = (SYSC_HAS_SIDLEMODE |
+                          SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
+                          SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
+       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+       .sysc_fields    = &omap_hwmod_sysc_type1,
+};
+
+struct omap_hwmod_class omap2_uart_class = {
+       .name   = "uart",
+       .sysc   = &omap2_uart_sysc,
+};
+
+/*
+ * 'dss' class
+ * display sub-system
+ */
+
+static struct omap_hwmod_class_sysconfig omap2_dss_sysc = {
+       .rev_offs       = 0x0000,
+       .sysc_offs      = 0x0010,
+       .syss_offs      = 0x0014,
+       .sysc_flags     = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+       .sysc_fields    = &omap_hwmod_sysc_type1,
+};
+
+struct omap_hwmod_class omap2_dss_hwmod_class = {
+       .name   = "dss",
+       .sysc   = &omap2_dss_sysc,
+};
+
+/*
+ * 'dispc' class
+ * display controller
+ */
+
+static struct omap_hwmod_class_sysconfig omap2_dispc_sysc = {
+       .rev_offs       = 0x0000,
+       .sysc_offs      = 0x0010,
+       .syss_offs      = 0x0014,
+       .sysc_flags     = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
+                          SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+                          MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+       .sysc_fields    = &omap_hwmod_sysc_type1,
+};
+
+struct omap_hwmod_class omap2_dispc_hwmod_class = {
+       .name   = "dispc",
+       .sysc   = &omap2_dispc_sysc,
+};
+
+/*
+ * 'rfbi' class
+ * remote frame buffer interface
+ */
+
+static struct omap_hwmod_class_sysconfig omap2_rfbi_sysc = {
+       .rev_offs       = 0x0000,
+       .sysc_offs      = 0x0010,
+       .syss_offs      = 0x0014,
+       .sysc_flags     = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+                          SYSC_HAS_AUTOIDLE),
+       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+       .sysc_fields    = &omap_hwmod_sysc_type1,
+};
+
+struct omap_hwmod_class omap2_rfbi_hwmod_class = {
+       .name   = "rfbi",
+       .sysc   = &omap2_rfbi_sysc,
+};
+
+/*
+ * 'venc' class
+ * video encoder
+ */
+
+struct omap_hwmod_class omap2_venc_hwmod_class = {
+       .name = "venc",
+};
+
+
+/* Common DMA request line data */
+struct omap_hwmod_dma_info omap2_uart1_sdma_reqs[] = {
+       { .name = "rx", .dma_req = OMAP24XX_DMA_UART1_RX, },
+       { .name = "tx", .dma_req = OMAP24XX_DMA_UART1_TX, },
+       { .dma_req = -1 }
+};
+
+struct omap_hwmod_dma_info omap2_uart2_sdma_reqs[] = {
+       { .name = "rx", .dma_req = OMAP24XX_DMA_UART2_RX, },
+       { .name = "tx", .dma_req = OMAP24XX_DMA_UART2_TX, },
+       { .dma_req = -1 }
+};
+
+struct omap_hwmod_dma_info omap2_uart3_sdma_reqs[] = {
+       { .name = "rx", .dma_req = OMAP24XX_DMA_UART3_RX, },
+       { .name = "tx", .dma_req = OMAP24XX_DMA_UART3_TX, },
+       { .dma_req = -1 }
+};
+
+struct omap_hwmod_dma_info omap2_i2c1_sdma_reqs[] = {
+       { .name = "tx", .dma_req = OMAP24XX_DMA_I2C1_TX },
+       { .name = "rx", .dma_req = OMAP24XX_DMA_I2C1_RX },
+       { .dma_req = -1 }
+};
+
+struct omap_hwmod_dma_info omap2_i2c2_sdma_reqs[] = {
+       { .name = "tx", .dma_req = OMAP24XX_DMA_I2C2_TX },
+       { .name = "rx", .dma_req = OMAP24XX_DMA_I2C2_RX },
+       { .dma_req = -1 }
+};
+
+struct omap_hwmod_dma_info omap2_mcspi1_sdma_reqs[] = {
+       { .name = "tx0", .dma_req = 35 }, /* DMA_SPI1_TX0 */
+       { .name = "rx0", .dma_req = 36 }, /* DMA_SPI1_RX0 */
+       { .name = "tx1", .dma_req = 37 }, /* DMA_SPI1_TX1 */
+       { .name = "rx1", .dma_req = 38 }, /* DMA_SPI1_RX1 */
+       { .name = "tx2", .dma_req = 39 }, /* DMA_SPI1_TX2 */
+       { .name = "rx2", .dma_req = 40 }, /* DMA_SPI1_RX2 */
+       { .name = "tx3", .dma_req = 41 }, /* DMA_SPI1_TX3 */
+       { .name = "rx3", .dma_req = 42 }, /* DMA_SPI1_RX3 */
+       { .dma_req = -1 }
+};
+
+struct omap_hwmod_dma_info omap2_mcspi2_sdma_reqs[] = {
+       { .name = "tx0", .dma_req = 43 }, /* DMA_SPI2_TX0 */
+       { .name = "rx0", .dma_req = 44 }, /* DMA_SPI2_RX0 */
+       { .name = "tx1", .dma_req = 45 }, /* DMA_SPI2_TX1 */
+       { .name = "rx1", .dma_req = 46 }, /* DMA_SPI2_RX1 */
+       { .dma_req = -1 }
+};
+
+struct omap_hwmod_dma_info omap2_mcbsp1_sdma_reqs[] = {
+       { .name = "rx", .dma_req = 32 },
+       { .name = "tx", .dma_req = 31 },
+       { .dma_req = -1 }
+};
+
+struct omap_hwmod_dma_info omap2_mcbsp2_sdma_reqs[] = {
+       { .name = "rx", .dma_req = 34 },
+       { .name = "tx", .dma_req = 33 },
+       { .dma_req = -1 }
+};
+
+struct omap_hwmod_dma_info omap2_mcbsp3_sdma_reqs[] = {
+       { .name = "rx", .dma_req = 18 },
+       { .name = "tx", .dma_req = 17 },
+       { .dma_req = -1 }
+};
+
+/* Other IP block data */
+
+
+/*
+ * omap_hwmod class data
+ */
+
+struct omap_hwmod_class l3_hwmod_class = {
+       .name = "l3"
+};
+
+struct omap_hwmod_class l4_hwmod_class = {
+       .name = "l4"
+};
+
+struct omap_hwmod_class mpu_hwmod_class = {
+       .name = "mpu"
+};
+
+struct omap_hwmod_class iva_hwmod_class = {
+       .name = "iva"
+};
+
+/* Common MPU IRQ line data */
+
+struct omap_hwmod_irq_info omap2_timer1_mpu_irqs[] = {
+       { .irq = 37, },
+       { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_timer2_mpu_irqs[] = {
+       { .irq = 38, },
+       { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_timer3_mpu_irqs[] = {
+       { .irq = 39, },
+       { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_timer4_mpu_irqs[] = {
+       { .irq = 40, },
+       { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_timer5_mpu_irqs[] = {
+       { .irq = 41, },
+       { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_timer6_mpu_irqs[] = {
+       { .irq = 42, },
+       { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_timer7_mpu_irqs[] = {
+       { .irq = 43, },
+       { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_timer8_mpu_irqs[] = {
+       { .irq = 44, },
+       { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_timer9_mpu_irqs[] = {
+       { .irq = 45, },
+       { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_timer10_mpu_irqs[] = {
+       { .irq = 46, },
+       { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_timer11_mpu_irqs[] = {
+       { .irq = 47, },
+       { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_uart1_mpu_irqs[] = {
+       { .irq = INT_24XX_UART1_IRQ, },
+       { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_uart2_mpu_irqs[] = {
+       { .irq = INT_24XX_UART2_IRQ, },
+       { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_uart3_mpu_irqs[] = {
+       { .irq = INT_24XX_UART3_IRQ, },
+       { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_dispc_irqs[] = {
+       { .irq = 25 },
+       { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_i2c1_mpu_irqs[] = {
+       { .irq = INT_24XX_I2C1_IRQ, },
+       { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_i2c2_mpu_irqs[] = {
+       { .irq = INT_24XX_I2C2_IRQ, },
+       { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_gpio1_irqs[] = {
+       { .irq = 29 }, /* INT_24XX_GPIO_BANK1 */
+       { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_gpio2_irqs[] = {
+       { .irq = 30 }, /* INT_24XX_GPIO_BANK2 */
+       { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_gpio3_irqs[] = {
+       { .irq = 31 }, /* INT_24XX_GPIO_BANK3 */
+       { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_gpio4_irqs[] = {
+       { .irq = 32 }, /* INT_24XX_GPIO_BANK4 */
+       { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_dma_system_irqs[] = {
+       { .name = "0", .irq = 12 }, /* INT_24XX_SDMA_IRQ0 */
+       { .name = "1", .irq = 13 }, /* INT_24XX_SDMA_IRQ1 */
+       { .name = "2", .irq = 14 }, /* INT_24XX_SDMA_IRQ2 */
+       { .name = "3", .irq = 15 }, /* INT_24XX_SDMA_IRQ3 */
+       { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_mcspi1_mpu_irqs[] = {
+       { .irq = 65 },
+       { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_mcspi2_mpu_irqs[] = {
+       { .irq = 66 },
+       { .irq = -1 }
+};
+
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
new file mode 100644 (file)
index 0000000..4f3547c
--- /dev/null
@@ -0,0 +1,130 @@
+/*
+ * omap_hwmod_2xxx_interconnect_data.c - common interconnect data for OMAP2xxx
+ *
+ * Copyright (C) 2009-2011 Nokia Corporation
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * XXX handle crossbar/shared link difference for L3?
+ * XXX these should be marked initdata for multi-OMAP kernels
+ */
+#include <asm/sizes.h>
+
+#include <plat/omap_hwmod.h>
+#include <plat/serial.h>
+
+#include "omap_hwmod_common_data.h"
+
+struct omap_hwmod_addr_space omap2xxx_uart1_addr_space[] = {
+       {
+               .pa_start       = OMAP2_UART1_BASE,
+               .pa_end         = OMAP2_UART1_BASE + SZ_8K - 1,
+               .flags          = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2xxx_uart2_addr_space[] = {
+       {
+               .pa_start       = OMAP2_UART2_BASE,
+               .pa_end         = OMAP2_UART2_BASE + SZ_1K - 1,
+               .flags          = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2xxx_uart3_addr_space[] = {
+       {
+               .pa_start       = OMAP2_UART3_BASE,
+               .pa_end         = OMAP2_UART3_BASE + SZ_1K - 1,
+               .flags          = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2xxx_timer2_addrs[] = {
+       {
+               .pa_start       = 0x4802a000,
+               .pa_end         = 0x4802a000 + SZ_1K - 1,
+               .flags          = ADDR_TYPE_RT
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2xxx_timer3_addrs[] = {
+       {
+               .pa_start       = 0x48078000,
+               .pa_end         = 0x48078000 + SZ_1K - 1,
+               .flags          = ADDR_TYPE_RT
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2xxx_timer4_addrs[] = {
+       {
+               .pa_start       = 0x4807a000,
+               .pa_end         = 0x4807a000 + SZ_1K - 1,
+               .flags          = ADDR_TYPE_RT
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2xxx_timer5_addrs[] = {
+       {
+               .pa_start       = 0x4807c000,
+               .pa_end         = 0x4807c000 + SZ_1K - 1,
+               .flags          = ADDR_TYPE_RT
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2xxx_timer6_addrs[] = {
+       {
+               .pa_start       = 0x4807e000,
+               .pa_end         = 0x4807e000 + SZ_1K - 1,
+               .flags          = ADDR_TYPE_RT
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2xxx_timer7_addrs[] = {
+       {
+               .pa_start       = 0x48080000,
+               .pa_end         = 0x48080000 + SZ_1K - 1,
+               .flags          = ADDR_TYPE_RT
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2xxx_timer8_addrs[] = {
+       {
+               .pa_start       = 0x48082000,
+               .pa_end         = 0x48082000 + SZ_1K - 1,
+               .flags          = ADDR_TYPE_RT
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2xxx_timer9_addrs[] = {
+       {
+               .pa_start       = 0x48084000,
+               .pa_end         = 0x48084000 + SZ_1K - 1,
+               .flags          = ADDR_TYPE_RT
+       },
+       { }
+};
+
+struct omap_hwmod_addr_space omap2xxx_mcbsp2_addrs[] = {
+       {
+               .name           = "mpu",
+               .pa_start       = 0x48076000,
+               .pa_end         = 0x480760ff,
+               .flags          = ADDR_TYPE_RT
+       },
+       { }
+};
+
+
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
new file mode 100644 (file)
index 0000000..177dee2
--- /dev/null
@@ -0,0 +1,150 @@
+/*
+ * omap_hwmod_2xxx_ipblock_data.c - common IP block data for OMAP2xxx
+ *
+ * Copyright (C) 2011 Nokia Corporation
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <plat/omap_hwmod.h>
+#include <plat/serial.h>
+#include <plat/dma.h>
+#include <plat/dmtimer.h>
+#include <plat/mcspi.h>
+
+#include <mach/irqs.h>
+
+#include "omap_hwmod_common_data.h"
+#include "wd_timer.h"
+
+struct omap_hwmod_irq_info omap2xxx_timer12_mpu_irqs[] = {
+       { .irq = 48, },
+       { .irq = -1 }
+};
+
+struct omap_hwmod_dma_info omap2xxx_dss_sdma_chs[] = {
+       { .name = "dispc", .dma_req = 5 },
+       { .dma_req = -1 }
+};
+/* OMAP2xxx Timer Common */
+static struct omap_hwmod_class_sysconfig omap2xxx_timer_sysc = {
+       .rev_offs       = 0x0000,
+       .sysc_offs      = 0x0010,
+       .syss_offs      = 0x0014,
+       .sysc_flags     = (SYSC_HAS_SIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
+                          SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
+                          SYSC_HAS_AUTOIDLE),
+       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+       .sysc_fields    = &omap_hwmod_sysc_type1,
+};
+
+struct omap_hwmod_class omap2xxx_timer_hwmod_class = {
+       .name   = "timer",
+       .sysc   = &omap2xxx_timer_sysc,
+       .rev    = OMAP_TIMER_IP_VERSION_1,
+};
+
+/*
+ * 'wd_timer' class
+ * 32-bit watchdog upward counter that generates a pulse on the reset pin on
+ * overflow condition
+ */
+
+static struct omap_hwmod_class_sysconfig omap2xxx_wd_timer_sysc = {
+       .rev_offs       = 0x0000,
+       .sysc_offs      = 0x0010,
+       .syss_offs      = 0x0014,
+       .sysc_flags     = (SYSC_HAS_EMUFREE | SYSC_HAS_SOFTRESET |
+                          SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
+       .sysc_fields    = &omap_hwmod_sysc_type1,
+};
+
+struct omap_hwmod_class omap2xxx_wd_timer_hwmod_class = {
+       .name           = "wd_timer",
+       .sysc           = &omap2xxx_wd_timer_sysc,
+       .pre_shutdown   = &omap2_wd_timer_disable
+};
+
+/*
+ * 'gpio' class
+ * general purpose io module
+ */
+static struct omap_hwmod_class_sysconfig omap2xxx_gpio_sysc = {
+       .rev_offs       = 0x0000,
+       .sysc_offs      = 0x0010,
+       .syss_offs      = 0x0014,
+       .sysc_flags     = (SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
+                          SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
+                          SYSS_HAS_RESET_STATUS),
+       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+       .sysc_fields    = &omap_hwmod_sysc_type1,
+};
+
+struct omap_hwmod_class omap2xxx_gpio_hwmod_class = {
+       .name = "gpio",
+       .sysc = &omap2xxx_gpio_sysc,
+       .rev = 0,
+};
+
+/* system dma */
+static struct omap_hwmod_class_sysconfig omap2xxx_dma_sysc = {
+       .rev_offs       = 0x0000,
+       .sysc_offs      = 0x002c,
+       .syss_offs      = 0x0028,
+       .sysc_flags     = (SYSC_HAS_SOFTRESET | SYSC_HAS_MIDLEMODE |
+                          SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_EMUFREE |
+                          SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
+       .idlemodes      = (MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+       .sysc_fields    = &omap_hwmod_sysc_type1,
+};
+
+struct omap_hwmod_class omap2xxx_dma_hwmod_class = {
+       .name   = "dma",
+       .sysc   = &omap2xxx_dma_sysc,
+};
+
+/*
+ * 'mailbox' class
+ * mailbox module allowing communication between the on-chip processors
+ * using a queued mailbox-interrupt mechanism.
+ */
+
+static struct omap_hwmod_class_sysconfig omap2xxx_mailbox_sysc = {
+       .rev_offs       = 0x000,
+       .sysc_offs      = 0x010,
+       .syss_offs      = 0x014,
+       .sysc_flags     = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
+                          SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+       .sysc_fields    = &omap_hwmod_sysc_type1,
+};
+
+struct omap_hwmod_class omap2xxx_mailbox_hwmod_class = {
+       .name   = "mailbox",
+       .sysc   = &omap2xxx_mailbox_sysc,
+};
+
+/*
+ * 'mcspi' class
+ * multichannel serial port interface (mcspi) / master/slave synchronous serial
+ * bus
+ */
+
+static struct omap_hwmod_class_sysconfig omap2xxx_mcspi_sysc = {
+       .rev_offs       = 0x0000,
+       .sysc_offs      = 0x0010,
+       .syss_offs      = 0x0014,
+       .sysc_flags     = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
+                               SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
+                               SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
+       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+       .sysc_fields    = &omap_hwmod_sysc_type1,
+};
+
+struct omap_hwmod_class omap2xxx_mcspi_class = {
+       .name   = "mcspi",
+       .sysc   = &omap2xxx_mcspi_sysc,
+       .rev    = OMAP2_MCSPI_REV,
+};
index 909a84d..1a52716 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * omap_hwmod_3xxx_data.c - hardware modules present on the OMAP3xxx chips
  *
- * Copyright (C) 2009-2010 Nokia Corporation
+ * Copyright (C) 2009-2011 Nokia Corporation
  * Paul Walmsley
  *
  * This program is free software; you can redistribute it and/or modify
@@ -103,6 +103,7 @@ static struct omap_hwmod_ocp_if omap3xxx_l3_main__l4_per = {
 static struct omap_hwmod_irq_info omap3xxx_l3_main_irqs[] = {
        { .irq = INT_34XX_L3_DBG_IRQ },
        { .irq = INT_34XX_L3_APP_IRQ },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap3xxx_l3_main_addrs[] = {
@@ -111,6 +112,7 @@ static struct omap_hwmod_addr_space omap3xxx_l3_main_addrs[] = {
                .pa_end         = 0x6800ffff,
                .flags          = ADDR_TYPE_RT,
        },
+       { }
 };
 
 /* MPU -> L3 interface */
@@ -118,7 +120,6 @@ static struct omap_hwmod_ocp_if omap3xxx_mpu__l3_main = {
        .master   = &omap3xxx_mpu_hwmod,
        .slave    = &omap3xxx_l3_main_hwmod,
        .addr     = omap3xxx_l3_main_addrs,
-       .addr_cnt = ARRAY_SIZE(omap3xxx_l3_main_addrs),
        .user   = OCP_USER_MPU,
 };
 
@@ -150,8 +151,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_l3_main_masters[] = {
 static struct omap_hwmod omap3xxx_l3_main_hwmod = {
        .name           = "l3_main",
        .class          = &l3_hwmod_class,
-       .mpu_irqs       = omap3xxx_l3_main_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_l3_main_irqs),
+       .mpu_irqs       = omap3xxx_l3_main_irqs,
        .masters        = omap3xxx_l3_main_masters,
        .masters_cnt    = ARRAY_SIZE(omap3xxx_l3_main_masters),
        .slaves         = omap3xxx_l3_main_slaves,
@@ -190,39 +190,21 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__l4_wkup = {
 };
 
 /* L4 CORE -> MMC1 interface */
-static struct omap_hwmod_addr_space omap3xxx_mmc1_addr_space[] = {
-       {
-               .pa_start       = 0x4809c000,
-               .pa_end         = 0x4809c1ff,
-               .flags          = ADDR_TYPE_RT,
-       },
-};
-
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__mmc1 = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap3xxx_mmc1_hwmod,
        .clk            = "mmchs1_ick",
-       .addr           = omap3xxx_mmc1_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_mmc1_addr_space),
+       .addr           = omap2430_mmc1_addr_space,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
        .flags          = OMAP_FIREWALL_L4
 };
 
 /* L4 CORE -> MMC2 interface */
-static struct omap_hwmod_addr_space omap3xxx_mmc2_addr_space[] = {
-       {
-               .pa_start       = 0x480b4000,
-               .pa_end         = 0x480b41ff,
-               .flags          = ADDR_TYPE_RT,
-       },
-};
-
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__mmc2 = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap3xxx_mmc2_hwmod,
        .clk            = "mmchs2_ick",
-       .addr           = omap3xxx_mmc2_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_mmc2_addr_space),
+       .addr           = omap2430_mmc2_addr_space,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
        .flags          = OMAP_FIREWALL_L4
 };
@@ -234,6 +216,7 @@ static struct omap_hwmod_addr_space omap3xxx_mmc3_addr_space[] = {
                .pa_end         = 0x480ad1ff,
                .flags          = ADDR_TYPE_RT,
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__mmc3 = {
@@ -241,7 +224,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__mmc3 = {
        .slave          = &omap3xxx_mmc3_hwmod,
        .clk            = "mmchs3_ick",
        .addr           = omap3xxx_mmc3_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_mmc3_addr_space),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
        .flags          = OMAP_FIREWALL_L4
 };
@@ -253,6 +235,7 @@ static struct omap_hwmod_addr_space omap3xxx_uart1_addr_space[] = {
                .pa_end         = OMAP3_UART1_BASE + SZ_8K - 1,
                .flags          = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap3_l4_core__uart1 = {
@@ -260,7 +243,6 @@ static struct omap_hwmod_ocp_if omap3_l4_core__uart1 = {
        .slave          = &omap3xxx_uart1_hwmod,
        .clk            = "uart1_ick",
        .addr           = omap3xxx_uart1_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_uart1_addr_space),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -271,6 +253,7 @@ static struct omap_hwmod_addr_space omap3xxx_uart2_addr_space[] = {
                .pa_end         = OMAP3_UART2_BASE + SZ_1K - 1,
                .flags          = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap3_l4_core__uart2 = {
@@ -278,7 +261,6 @@ static struct omap_hwmod_ocp_if omap3_l4_core__uart2 = {
        .slave          = &omap3xxx_uart2_hwmod,
        .clk            = "uart2_ick",
        .addr           = omap3xxx_uart2_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_uart2_addr_space),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -289,6 +271,7 @@ static struct omap_hwmod_addr_space omap3xxx_uart3_addr_space[] = {
                .pa_end         = OMAP3_UART3_BASE + SZ_1K - 1,
                .flags          = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap3_l4_per__uart3 = {
@@ -296,7 +279,6 @@ static struct omap_hwmod_ocp_if omap3_l4_per__uart3 = {
        .slave          = &omap3xxx_uart3_hwmod,
        .clk            = "uart3_ick",
        .addr           = omap3xxx_uart3_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_uart3_addr_space),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -307,6 +289,7 @@ static struct omap_hwmod_addr_space omap3xxx_uart4_addr_space[] = {
                .pa_end         = OMAP3_UART4_BASE + SZ_1K - 1,
                .flags          = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap3_l4_per__uart4 = {
@@ -314,28 +297,15 @@ static struct omap_hwmod_ocp_if omap3_l4_per__uart4 = {
        .slave          = &omap3xxx_uart4_hwmod,
        .clk            = "uart4_ick",
        .addr           = omap3xxx_uart4_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_uart4_addr_space),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* I2C IP block address space length (in bytes) */
-#define OMAP2_I2C_AS_LEN               128
-
 /* L4 CORE -> I2C1 interface */
-static struct omap_hwmod_addr_space omap3xxx_i2c1_addr_space[] = {
-       {
-               .pa_start       = 0x48070000,
-               .pa_end         = 0x48070000 + OMAP2_I2C_AS_LEN - 1,
-               .flags          = ADDR_TYPE_RT,
-       },
-};
-
 static struct omap_hwmod_ocp_if omap3_l4_core__i2c1 = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap3xxx_i2c1_hwmod,
        .clk            = "i2c1_ick",
-       .addr           = omap3xxx_i2c1_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_i2c1_addr_space),
+       .addr           = omap2_i2c1_addr_space,
        .fw = {
                .omap2 = {
                        .l4_fw_region  = OMAP3_L4_CORE_FW_I2C1_REGION,
@@ -347,20 +317,11 @@ static struct omap_hwmod_ocp_if omap3_l4_core__i2c1 = {
 };
 
 /* L4 CORE -> I2C2 interface */
-static struct omap_hwmod_addr_space omap3xxx_i2c2_addr_space[] = {
-       {
-               .pa_start       = 0x48072000,
-               .pa_end         = 0x48072000 + OMAP2_I2C_AS_LEN - 1,
-               .flags          = ADDR_TYPE_RT,
-       },
-};
-
 static struct omap_hwmod_ocp_if omap3_l4_core__i2c2 = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap3xxx_i2c2_hwmod,
        .clk            = "i2c2_ick",
-       .addr           = omap3xxx_i2c2_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_i2c2_addr_space),
+       .addr           = omap2_i2c2_addr_space,
        .fw = {
                .omap2 = {
                        .l4_fw_region  = OMAP3_L4_CORE_FW_I2C2_REGION,
@@ -375,9 +336,10 @@ static struct omap_hwmod_ocp_if omap3_l4_core__i2c2 = {
 static struct omap_hwmod_addr_space omap3xxx_i2c3_addr_space[] = {
        {
                .pa_start       = 0x48060000,
-               .pa_end         = 0x48060000 + OMAP2_I2C_AS_LEN - 1,
+               .pa_end         = 0x48060000 + SZ_128 - 1,
                .flags          = ADDR_TYPE_RT,
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap3_l4_core__i2c3 = {
@@ -385,7 +347,6 @@ static struct omap_hwmod_ocp_if omap3_l4_core__i2c3 = {
        .slave          = &omap3xxx_i2c3_hwmod,
        .clk            = "i2c3_ick",
        .addr           = omap3xxx_i2c3_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_i2c3_addr_space),
        .fw = {
                .omap2 = {
                        .l4_fw_region  = OMAP3_L4_CORE_FW_I2C3_REGION,
@@ -403,6 +364,7 @@ static struct omap_hwmod_addr_space omap3_sr1_addr_space[] = {
                .pa_end         = OMAP34XX_SR1_BASE + SZ_1K - 1,
                .flags          = ADDR_TYPE_RT,
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap3_l4_core__sr1 = {
@@ -410,7 +372,6 @@ static struct omap_hwmod_ocp_if omap3_l4_core__sr1 = {
        .slave          = &omap34xx_sr1_hwmod,
        .clk            = "sr_l4_ick",
        .addr           = omap3_sr1_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap3_sr1_addr_space),
        .user           = OCP_USER_MPU,
 };
 
@@ -421,6 +382,7 @@ static struct omap_hwmod_addr_space omap3_sr2_addr_space[] = {
                .pa_end         = OMAP34XX_SR2_BASE + SZ_1K - 1,
                .flags          = ADDR_TYPE_RT,
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap3_l4_core__sr2 = {
@@ -428,7 +390,6 @@ static struct omap_hwmod_ocp_if omap3_l4_core__sr2 = {
        .slave          = &omap34xx_sr2_hwmod,
        .clk            = "sr_l4_ick",
        .addr           = omap3_sr2_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap3_sr2_addr_space),
        .user           = OCP_USER_MPU,
 };
 
@@ -442,6 +403,7 @@ static struct omap_hwmod_addr_space omap3xxx_usbhsotg_addrs[] = {
                .pa_end         = OMAP34XX_HSUSB_OTG_BASE + SZ_4K - 1,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_core -> usbhsotg  */
@@ -450,7 +412,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__usbhsotg = {
        .slave          = &omap3xxx_usbhsotg_hwmod,
        .clk            = "l4_ick",
        .addr           = omap3xxx_usbhsotg_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_usbhsotg_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -468,6 +429,7 @@ static struct omap_hwmod_addr_space am35xx_usbhsotg_addrs[] = {
                .pa_end         = AM35XX_IPSS_USBOTGSS_BASE + SZ_4K - 1,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_core -> usbhsotg  */
@@ -476,7 +438,6 @@ static struct omap_hwmod_ocp_if am35xx_l4_core__usbhsotg = {
        .slave          = &am35xx_usbhsotg_hwmod,
        .clk            = "l4_ick",
        .addr           = am35xx_usbhsotg_addrs,
-       .addr_cnt       = ARRAY_SIZE(am35xx_usbhsotg_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -611,9 +572,6 @@ static struct omap_hwmod_class omap3xxx_timer_hwmod_class = {
 
 /* timer1 */
 static struct omap_hwmod omap3xxx_timer1_hwmod;
-static struct omap_hwmod_irq_info omap3xxx_timer1_mpu_irqs[] = {
-       { .irq = 37, },
-};
 
 static struct omap_hwmod_addr_space omap3xxx_timer1_addrs[] = {
        {
@@ -621,6 +579,7 @@ static struct omap_hwmod_addr_space omap3xxx_timer1_addrs[] = {
                .pa_end         = 0x48318000 + SZ_1K - 1,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_wkup -> timer1 */
@@ -629,7 +588,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__timer1 = {
        .slave          = &omap3xxx_timer1_hwmod,
        .clk            = "gpt1_ick",
        .addr           = omap3xxx_timer1_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_timer1_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -641,8 +599,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_timer1_slaves[] = {
 /* timer1 hwmod */
 static struct omap_hwmod omap3xxx_timer1_hwmod = {
        .name           = "timer1",
-       .mpu_irqs       = omap3xxx_timer1_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_timer1_mpu_irqs),
+       .mpu_irqs       = omap2_timer1_mpu_irqs,
        .main_clk       = "gpt1_fck",
        .prcm           = {
                .omap2 = {
@@ -661,9 +618,6 @@ static struct omap_hwmod omap3xxx_timer1_hwmod = {
 
 /* timer2 */
 static struct omap_hwmod omap3xxx_timer2_hwmod;
-static struct omap_hwmod_irq_info omap3xxx_timer2_mpu_irqs[] = {
-       { .irq = 38, },
-};
 
 static struct omap_hwmod_addr_space omap3xxx_timer2_addrs[] = {
        {
@@ -671,6 +625,7 @@ static struct omap_hwmod_addr_space omap3xxx_timer2_addrs[] = {
                .pa_end         = 0x49032000 + SZ_1K - 1,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> timer2 */
@@ -679,7 +634,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer2 = {
        .slave          = &omap3xxx_timer2_hwmod,
        .clk            = "gpt2_ick",
        .addr           = omap3xxx_timer2_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_timer2_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -691,8 +645,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_timer2_slaves[] = {
 /* timer2 hwmod */
 static struct omap_hwmod omap3xxx_timer2_hwmod = {
        .name           = "timer2",
-       .mpu_irqs       = omap3xxx_timer2_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_timer2_mpu_irqs),
+       .mpu_irqs       = omap2_timer2_mpu_irqs,
        .main_clk       = "gpt2_fck",
        .prcm           = {
                .omap2 = {
@@ -711,9 +664,6 @@ static struct omap_hwmod omap3xxx_timer2_hwmod = {
 
 /* timer3 */
 static struct omap_hwmod omap3xxx_timer3_hwmod;
-static struct omap_hwmod_irq_info omap3xxx_timer3_mpu_irqs[] = {
-       { .irq = 39, },
-};
 
 static struct omap_hwmod_addr_space omap3xxx_timer3_addrs[] = {
        {
@@ -721,6 +671,7 @@ static struct omap_hwmod_addr_space omap3xxx_timer3_addrs[] = {
                .pa_end         = 0x49034000 + SZ_1K - 1,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> timer3 */
@@ -729,7 +680,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer3 = {
        .slave          = &omap3xxx_timer3_hwmod,
        .clk            = "gpt3_ick",
        .addr           = omap3xxx_timer3_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_timer3_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -741,8 +691,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_timer3_slaves[] = {
 /* timer3 hwmod */
 static struct omap_hwmod omap3xxx_timer3_hwmod = {
        .name           = "timer3",
-       .mpu_irqs       = omap3xxx_timer3_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_timer3_mpu_irqs),
+       .mpu_irqs       = omap2_timer3_mpu_irqs,
        .main_clk       = "gpt3_fck",
        .prcm           = {
                .omap2 = {
@@ -761,9 +710,6 @@ static struct omap_hwmod omap3xxx_timer3_hwmod = {
 
 /* timer4 */
 static struct omap_hwmod omap3xxx_timer4_hwmod;
-static struct omap_hwmod_irq_info omap3xxx_timer4_mpu_irqs[] = {
-       { .irq = 40, },
-};
 
 static struct omap_hwmod_addr_space omap3xxx_timer4_addrs[] = {
        {
@@ -771,6 +717,7 @@ static struct omap_hwmod_addr_space omap3xxx_timer4_addrs[] = {
                .pa_end         = 0x49036000 + SZ_1K - 1,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> timer4 */
@@ -779,7 +726,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer4 = {
        .slave          = &omap3xxx_timer4_hwmod,
        .clk            = "gpt4_ick",
        .addr           = omap3xxx_timer4_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_timer4_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -791,8 +737,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_timer4_slaves[] = {
 /* timer4 hwmod */
 static struct omap_hwmod omap3xxx_timer4_hwmod = {
        .name           = "timer4",
-       .mpu_irqs       = omap3xxx_timer4_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_timer4_mpu_irqs),
+       .mpu_irqs       = omap2_timer4_mpu_irqs,
        .main_clk       = "gpt4_fck",
        .prcm           = {
                .omap2 = {
@@ -811,9 +756,6 @@ static struct omap_hwmod omap3xxx_timer4_hwmod = {
 
 /* timer5 */
 static struct omap_hwmod omap3xxx_timer5_hwmod;
-static struct omap_hwmod_irq_info omap3xxx_timer5_mpu_irqs[] = {
-       { .irq = 41, },
-};
 
 static struct omap_hwmod_addr_space omap3xxx_timer5_addrs[] = {
        {
@@ -821,6 +763,7 @@ static struct omap_hwmod_addr_space omap3xxx_timer5_addrs[] = {
                .pa_end         = 0x49038000 + SZ_1K - 1,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> timer5 */
@@ -829,7 +772,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer5 = {
        .slave          = &omap3xxx_timer5_hwmod,
        .clk            = "gpt5_ick",
        .addr           = omap3xxx_timer5_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_timer5_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -841,8 +783,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_timer5_slaves[] = {
 /* timer5 hwmod */
 static struct omap_hwmod omap3xxx_timer5_hwmod = {
        .name           = "timer5",
-       .mpu_irqs       = omap3xxx_timer5_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_timer5_mpu_irqs),
+       .mpu_irqs       = omap2_timer5_mpu_irqs,
        .main_clk       = "gpt5_fck",
        .prcm           = {
                .omap2 = {
@@ -861,9 +802,6 @@ static struct omap_hwmod omap3xxx_timer5_hwmod = {
 
 /* timer6 */
 static struct omap_hwmod omap3xxx_timer6_hwmod;
-static struct omap_hwmod_irq_info omap3xxx_timer6_mpu_irqs[] = {
-       { .irq = 42, },
-};
 
 static struct omap_hwmod_addr_space omap3xxx_timer6_addrs[] = {
        {
@@ -871,6 +809,7 @@ static struct omap_hwmod_addr_space omap3xxx_timer6_addrs[] = {
                .pa_end         = 0x4903A000 + SZ_1K - 1,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> timer6 */
@@ -879,7 +818,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer6 = {
        .slave          = &omap3xxx_timer6_hwmod,
        .clk            = "gpt6_ick",
        .addr           = omap3xxx_timer6_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_timer6_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -891,8 +829,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_timer6_slaves[] = {
 /* timer6 hwmod */
 static struct omap_hwmod omap3xxx_timer6_hwmod = {
        .name           = "timer6",
-       .mpu_irqs       = omap3xxx_timer6_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_timer6_mpu_irqs),
+       .mpu_irqs       = omap2_timer6_mpu_irqs,
        .main_clk       = "gpt6_fck",
        .prcm           = {
                .omap2 = {
@@ -911,9 +848,6 @@ static struct omap_hwmod omap3xxx_timer6_hwmod = {
 
 /* timer7 */
 static struct omap_hwmod omap3xxx_timer7_hwmod;
-static struct omap_hwmod_irq_info omap3xxx_timer7_mpu_irqs[] = {
-       { .irq = 43, },
-};
 
 static struct omap_hwmod_addr_space omap3xxx_timer7_addrs[] = {
        {
@@ -921,6 +855,7 @@ static struct omap_hwmod_addr_space omap3xxx_timer7_addrs[] = {
                .pa_end         = 0x4903C000 + SZ_1K - 1,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> timer7 */
@@ -929,7 +864,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer7 = {
        .slave          = &omap3xxx_timer7_hwmod,
        .clk            = "gpt7_ick",
        .addr           = omap3xxx_timer7_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_timer7_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -941,8 +875,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_timer7_slaves[] = {
 /* timer7 hwmod */
 static struct omap_hwmod omap3xxx_timer7_hwmod = {
        .name           = "timer7",
-       .mpu_irqs       = omap3xxx_timer7_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_timer7_mpu_irqs),
+       .mpu_irqs       = omap2_timer7_mpu_irqs,
        .main_clk       = "gpt7_fck",
        .prcm           = {
                .omap2 = {
@@ -961,9 +894,6 @@ static struct omap_hwmod omap3xxx_timer7_hwmod = {
 
 /* timer8 */
 static struct omap_hwmod omap3xxx_timer8_hwmod;
-static struct omap_hwmod_irq_info omap3xxx_timer8_mpu_irqs[] = {
-       { .irq = 44, },
-};
 
 static struct omap_hwmod_addr_space omap3xxx_timer8_addrs[] = {
        {
@@ -971,6 +901,7 @@ static struct omap_hwmod_addr_space omap3xxx_timer8_addrs[] = {
                .pa_end         = 0x4903E000 + SZ_1K - 1,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> timer8 */
@@ -979,7 +910,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer8 = {
        .slave          = &omap3xxx_timer8_hwmod,
        .clk            = "gpt8_ick",
        .addr           = omap3xxx_timer8_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_timer8_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -991,8 +921,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_timer8_slaves[] = {
 /* timer8 hwmod */
 static struct omap_hwmod omap3xxx_timer8_hwmod = {
        .name           = "timer8",
-       .mpu_irqs       = omap3xxx_timer8_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_timer8_mpu_irqs),
+       .mpu_irqs       = omap2_timer8_mpu_irqs,
        .main_clk       = "gpt8_fck",
        .prcm           = {
                .omap2 = {
@@ -1011,9 +940,6 @@ static struct omap_hwmod omap3xxx_timer8_hwmod = {
 
 /* timer9 */
 static struct omap_hwmod omap3xxx_timer9_hwmod;
-static struct omap_hwmod_irq_info omap3xxx_timer9_mpu_irqs[] = {
-       { .irq = 45, },
-};
 
 static struct omap_hwmod_addr_space omap3xxx_timer9_addrs[] = {
        {
@@ -1021,6 +947,7 @@ static struct omap_hwmod_addr_space omap3xxx_timer9_addrs[] = {
                .pa_end         = 0x49040000 + SZ_1K - 1,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> timer9 */
@@ -1029,7 +956,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer9 = {
        .slave          = &omap3xxx_timer9_hwmod,
        .clk            = "gpt9_ick",
        .addr           = omap3xxx_timer9_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_timer9_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1041,8 +967,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_timer9_slaves[] = {
 /* timer9 hwmod */
 static struct omap_hwmod omap3xxx_timer9_hwmod = {
        .name           = "timer9",
-       .mpu_irqs       = omap3xxx_timer9_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_timer9_mpu_irqs),
+       .mpu_irqs       = omap2_timer9_mpu_irqs,
        .main_clk       = "gpt9_fck",
        .prcm           = {
                .omap2 = {
@@ -1061,25 +986,13 @@ static struct omap_hwmod omap3xxx_timer9_hwmod = {
 
 /* timer10 */
 static struct omap_hwmod omap3xxx_timer10_hwmod;
-static struct omap_hwmod_irq_info omap3xxx_timer10_mpu_irqs[] = {
-       { .irq = 46, },
-};
-
-static struct omap_hwmod_addr_space omap3xxx_timer10_addrs[] = {
-       {
-               .pa_start       = 0x48086000,
-               .pa_end         = 0x48086000 + SZ_1K - 1,
-               .flags          = ADDR_TYPE_RT
-       },
-};
 
 /* l4_core -> timer10 */
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__timer10 = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap3xxx_timer10_hwmod,
        .clk            = "gpt10_ick",
-       .addr           = omap3xxx_timer10_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_timer10_addrs),
+       .addr           = omap2_timer10_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1091,8 +1004,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_timer10_slaves[] = {
 /* timer10 hwmod */
 static struct omap_hwmod omap3xxx_timer10_hwmod = {
        .name           = "timer10",
-       .mpu_irqs       = omap3xxx_timer10_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_timer10_mpu_irqs),
+       .mpu_irqs       = omap2_timer10_mpu_irqs,
        .main_clk       = "gpt10_fck",
        .prcm           = {
                .omap2 = {
@@ -1111,25 +1023,13 @@ static struct omap_hwmod omap3xxx_timer10_hwmod = {
 
 /* timer11 */
 static struct omap_hwmod omap3xxx_timer11_hwmod;
-static struct omap_hwmod_irq_info omap3xxx_timer11_mpu_irqs[] = {
-       { .irq = 47, },
-};
-
-static struct omap_hwmod_addr_space omap3xxx_timer11_addrs[] = {
-       {
-               .pa_start       = 0x48088000,
-               .pa_end         = 0x48088000 + SZ_1K - 1,
-               .flags          = ADDR_TYPE_RT
-       },
-};
 
 /* l4_core -> timer11 */
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__timer11 = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap3xxx_timer11_hwmod,
        .clk            = "gpt11_ick",
-       .addr           = omap3xxx_timer11_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_timer11_addrs),
+       .addr           = omap2_timer11_addrs,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1141,8 +1041,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_timer11_slaves[] = {
 /* timer11 hwmod */
 static struct omap_hwmod omap3xxx_timer11_hwmod = {
        .name           = "timer11",
-       .mpu_irqs       = omap3xxx_timer11_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_timer11_mpu_irqs),
+       .mpu_irqs       = omap2_timer11_mpu_irqs,
        .main_clk       = "gpt11_fck",
        .prcm           = {
                .omap2 = {
@@ -1163,6 +1062,7 @@ static struct omap_hwmod omap3xxx_timer11_hwmod = {
 static struct omap_hwmod omap3xxx_timer12_hwmod;
 static struct omap_hwmod_irq_info omap3xxx_timer12_mpu_irqs[] = {
        { .irq = 95, },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap3xxx_timer12_addrs[] = {
@@ -1171,6 +1071,7 @@ static struct omap_hwmod_addr_space omap3xxx_timer12_addrs[] = {
                .pa_end         = 0x48304000 + SZ_1K - 1,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_core -> timer12 */
@@ -1179,7 +1080,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__timer12 = {
        .slave          = &omap3xxx_timer12_hwmod,
        .clk            = "gpt12_ick",
        .addr           = omap3xxx_timer12_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_timer12_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1192,7 +1092,6 @@ static struct omap_hwmod_ocp_if *omap3xxx_timer12_slaves[] = {
 static struct omap_hwmod omap3xxx_timer12_hwmod = {
        .name           = "timer12",
        .mpu_irqs       = omap3xxx_timer12_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_timer12_mpu_irqs),
        .main_clk       = "gpt12_fck",
        .prcm           = {
                .omap2 = {
@@ -1216,6 +1115,7 @@ static struct omap_hwmod_addr_space omap3xxx_wd_timer2_addrs[] = {
                .pa_end         = 0x4831407f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__wd_timer2 = {
@@ -1223,7 +1123,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__wd_timer2 = {
        .slave          = &omap3xxx_wd_timer2_hwmod,
        .clk            = "wdt2_ick",
        .addr           = omap3xxx_wd_timer2_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_wd_timer2_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1291,45 +1190,16 @@ static struct omap_hwmod omap3xxx_wd_timer2_hwmod = {
        .flags          = HWMOD_SWSUP_SIDLE,
 };
 
-/* UART common */
-
-static struct omap_hwmod_class_sysconfig uart_sysc = {
-       .rev_offs       = 0x50,
-       .sysc_offs      = 0x54,
-       .syss_offs      = 0x58,
-       .sysc_flags     = (SYSC_HAS_SIDLEMODE |
-                          SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
-                          SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
-       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class uart_class = {
-       .name = "uart",
-       .sysc = &uart_sysc,
-};
-
 /* UART1 */
 
-static struct omap_hwmod_irq_info uart1_mpu_irqs[] = {
-       { .irq = INT_24XX_UART1_IRQ, },
-};
-
-static struct omap_hwmod_dma_info uart1_sdma_reqs[] = {
-       { .name = "tx", .dma_req = OMAP24XX_DMA_UART1_TX, },
-       { .name = "rx", .dma_req = OMAP24XX_DMA_UART1_RX, },
-};
-
 static struct omap_hwmod_ocp_if *omap3xxx_uart1_slaves[] = {
        &omap3_l4_core__uart1,
 };
 
 static struct omap_hwmod omap3xxx_uart1_hwmod = {
        .name           = "uart1",
-       .mpu_irqs       = uart1_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(uart1_mpu_irqs),
-       .sdma_reqs      = uart1_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(uart1_sdma_reqs),
+       .mpu_irqs       = omap2_uart1_mpu_irqs,
+       .sdma_reqs      = omap2_uart1_sdma_reqs,
        .main_clk       = "uart1_fck",
        .prcm           = {
                .omap2 = {
@@ -1342,31 +1212,20 @@ static struct omap_hwmod omap3xxx_uart1_hwmod = {
        },
        .slaves         = omap3xxx_uart1_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap3xxx_uart1_slaves),
-       .class          = &uart_class,
+       .class          = &omap2_uart_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
 };
 
 /* UART2 */
 
-static struct omap_hwmod_irq_info uart2_mpu_irqs[] = {
-       { .irq = INT_24XX_UART2_IRQ, },
-};
-
-static struct omap_hwmod_dma_info uart2_sdma_reqs[] = {
-       { .name = "tx", .dma_req = OMAP24XX_DMA_UART2_TX, },
-       { .name = "rx", .dma_req = OMAP24XX_DMA_UART2_RX, },
-};
-
 static struct omap_hwmod_ocp_if *omap3xxx_uart2_slaves[] = {
        &omap3_l4_core__uart2,
 };
 
 static struct omap_hwmod omap3xxx_uart2_hwmod = {
        .name           = "uart2",
-       .mpu_irqs       = uart2_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(uart2_mpu_irqs),
-       .sdma_reqs      = uart2_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(uart2_sdma_reqs),
+       .mpu_irqs       = omap2_uart2_mpu_irqs,
+       .sdma_reqs      = omap2_uart2_sdma_reqs,
        .main_clk       = "uart2_fck",
        .prcm           = {
                .omap2 = {
@@ -1379,31 +1238,20 @@ static struct omap_hwmod omap3xxx_uart2_hwmod = {
        },
        .slaves         = omap3xxx_uart2_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap3xxx_uart2_slaves),
-       .class          = &uart_class,
+       .class          = &omap2_uart_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
 };
 
 /* UART3 */
 
-static struct omap_hwmod_irq_info uart3_mpu_irqs[] = {
-       { .irq = INT_24XX_UART3_IRQ, },
-};
-
-static struct omap_hwmod_dma_info uart3_sdma_reqs[] = {
-       { .name = "tx", .dma_req = OMAP24XX_DMA_UART3_TX, },
-       { .name = "rx", .dma_req = OMAP24XX_DMA_UART3_RX, },
-};
-
 static struct omap_hwmod_ocp_if *omap3xxx_uart3_slaves[] = {
        &omap3_l4_per__uart3,
 };
 
 static struct omap_hwmod omap3xxx_uart3_hwmod = {
        .name           = "uart3",
-       .mpu_irqs       = uart3_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(uart3_mpu_irqs),
-       .sdma_reqs      = uart3_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(uart3_sdma_reqs),
+       .mpu_irqs       = omap2_uart3_mpu_irqs,
+       .sdma_reqs      = omap2_uart3_sdma_reqs,
        .main_clk       = "uart3_fck",
        .prcm           = {
                .omap2 = {
@@ -1416,7 +1264,7 @@ static struct omap_hwmod omap3xxx_uart3_hwmod = {
        },
        .slaves         = omap3xxx_uart3_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap3xxx_uart3_slaves),
-       .class          = &uart_class,
+       .class          = &omap2_uart_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
 };
 
@@ -1424,11 +1272,13 @@ static struct omap_hwmod omap3xxx_uart3_hwmod = {
 
 static struct omap_hwmod_irq_info uart4_mpu_irqs[] = {
        { .irq = INT_36XX_UART4_IRQ, },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info uart4_sdma_reqs[] = {
        { .name = "rx", .dma_req = OMAP36XX_DMA_UART4_RX, },
        { .name = "tx", .dma_req = OMAP36XX_DMA_UART4_TX, },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_ocp_if *omap3xxx_uart4_slaves[] = {
@@ -1438,9 +1288,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_uart4_slaves[] = {
 static struct omap_hwmod omap3xxx_uart4_hwmod = {
        .name           = "uart4",
        .mpu_irqs       = uart4_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(uart4_mpu_irqs),
        .sdma_reqs      = uart4_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(uart4_sdma_reqs),
        .main_clk       = "uart4_fck",
        .prcm           = {
                .omap2 = {
@@ -1453,7 +1301,7 @@ static struct omap_hwmod omap3xxx_uart4_hwmod = {
        },
        .slaves         = omap3xxx_uart4_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap3xxx_uart4_slaves),
-       .class          = &uart_class,
+       .class          = &omap2_uart_class,
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP3630ES1),
 };
 
@@ -1462,27 +1310,10 @@ static struct omap_hwmod_class i2c_class = {
        .sysc = &i2c_sysc,
 };
 
-/*
- * 'dss' class
- * display sub-system
- */
-
-static struct omap_hwmod_class_sysconfig omap3xxx_dss_sysc = {
-       .rev_offs       = 0x0000,
-       .sysc_offs      = 0x0010,
-       .syss_offs      = 0x0014,
-       .sysc_flags     = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap3xxx_dss_hwmod_class = {
-       .name = "dss",
-       .sysc = &omap3xxx_dss_sysc,
-};
-
 static struct omap_hwmod_dma_info omap3xxx_dss_sdma_chs[] = {
        { .name = "dispc", .dma_req = 5 },
        { .name = "dsi1", .dma_req = 74 },
+       { .dma_req = -1 }
 };
 
 /* dss */
@@ -1491,21 +1322,12 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_masters[] = {
        &omap3xxx_dss__l3,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_dss_addrs[] = {
-       {
-               .pa_start       = 0x48050000,
-               .pa_end         = 0x480503FF,
-               .flags          = ADDR_TYPE_RT
-       },
-};
-
 /* l4_core -> dss */
 static struct omap_hwmod_ocp_if omap3430es1_l4_core__dss = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap3430es1_dss_core_hwmod,
        .clk            = "dss_ick",
-       .addr           = omap3xxx_dss_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_dss_addrs),
+       .addr           = omap2_dss_addrs,
        .fw = {
                .omap2 = {
                        .l4_fw_region  = OMAP3ES1_L4_CORE_FW_DSS_CORE_REGION,
@@ -1520,8 +1342,7 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap3xxx_dss_core_hwmod,
        .clk            = "dss_ick",
-       .addr           = omap3xxx_dss_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_dss_addrs),
+       .addr           = omap2_dss_addrs,
        .fw = {
                .omap2 = {
                        .l4_fw_region  = OMAP3_L4_CORE_FW_DSS_CORE_REGION,
@@ -1549,11 +1370,9 @@ static struct omap_hwmod_opt_clk dss_opt_clks[] = {
 
 static struct omap_hwmod omap3430es1_dss_core_hwmod = {
        .name           = "dss_core",
-       .class          = &omap3xxx_dss_hwmod_class,
+       .class          = &omap2_dss_hwmod_class,
        .main_clk       = "dss1_alwon_fck", /* instead of dss_fck */
        .sdma_reqs      = omap3xxx_dss_sdma_chs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap3xxx_dss_sdma_chs),
-
        .prcm           = {
                .omap2 = {
                        .prcm_reg_id = 1,
@@ -1575,11 +1394,9 @@ static struct omap_hwmod omap3430es1_dss_core_hwmod = {
 
 static struct omap_hwmod omap3xxx_dss_core_hwmod = {
        .name           = "dss_core",
-       .class          = &omap3xxx_dss_hwmod_class,
+       .class          = &omap2_dss_hwmod_class,
        .main_clk       = "dss1_alwon_fck", /* instead of dss_fck */
        .sdma_reqs      = omap3xxx_dss_sdma_chs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap3xxx_dss_sdma_chs),
-
        .prcm           = {
                .omap2 = {
                        .prcm_reg_id = 1,
@@ -1600,47 +1417,12 @@ static struct omap_hwmod omap3xxx_dss_core_hwmod = {
                                CHIP_IS_OMAP3630ES1 | CHIP_GE_OMAP3630ES1_1),
 };
 
-/*
- * 'dispc' class
- * display controller
- */
-
-static struct omap_hwmod_class_sysconfig omap3xxx_dispc_sysc = {
-       .rev_offs       = 0x0000,
-       .sysc_offs      = 0x0010,
-       .syss_offs      = 0x0014,
-       .sysc_flags     = (SYSC_HAS_SIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
-                          SYSC_HAS_MIDLEMODE | SYSC_HAS_ENAWAKEUP |
-                          SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
-       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-                          MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap3xxx_dispc_hwmod_class = {
-       .name = "dispc",
-       .sysc = &omap3xxx_dispc_sysc,
-};
-
-static struct omap_hwmod_irq_info omap3xxx_dispc_irqs[] = {
-       { .irq = 25 },
-};
-
-static struct omap_hwmod_addr_space omap3xxx_dss_dispc_addrs[] = {
-       {
-               .pa_start       = 0x48050400,
-               .pa_end         = 0x480507FF,
-               .flags          = ADDR_TYPE_RT
-       },
-};
-
 /* l4_core -> dss_dispc */
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dispc = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap3xxx_dss_dispc_hwmod,
        .clk            = "dss_ick",
-       .addr           = omap3xxx_dss_dispc_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_dss_dispc_addrs),
+       .addr           = omap2_dss_dispc_addrs,
        .fw = {
                .omap2 = {
                        .l4_fw_region  = OMAP3_L4_CORE_FW_DSS_DISPC_REGION,
@@ -1658,9 +1440,8 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_dispc_slaves[] = {
 
 static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
        .name           = "dss_dispc",
-       .class          = &omap3xxx_dispc_hwmod_class,
-       .mpu_irqs       = omap3xxx_dispc_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_dispc_irqs),
+       .class          = &omap2_dispc_hwmod_class,
+       .mpu_irqs       = omap2_dispc_irqs,
        .main_clk       = "dss1_alwon_fck",
        .prcm           = {
                .omap2 = {
@@ -1688,6 +1469,7 @@ static struct omap_hwmod_class omap3xxx_dsi_hwmod_class = {
 
 static struct omap_hwmod_irq_info omap3xxx_dsi1_irqs[] = {
        { .irq = 25 },
+       { .irq = -1 }
 };
 
 /* dss_dsi1 */
@@ -1697,6 +1479,7 @@ static struct omap_hwmod_addr_space omap3xxx_dss_dsi1_addrs[] = {
                .pa_end         = 0x4804FFFF,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_core -> dss_dsi1 */
@@ -1704,7 +1487,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dsi1 = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap3xxx_dss_dsi1_hwmod,
        .addr           = omap3xxx_dss_dsi1_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_dss_dsi1_addrs),
        .fw = {
                .omap2 = {
                        .l4_fw_region  = OMAP3_L4_CORE_FW_DSS_DSI_REGION,
@@ -1724,7 +1506,6 @@ static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = {
        .name           = "dss_dsi1",
        .class          = &omap3xxx_dsi_hwmod_class,
        .mpu_irqs       = omap3xxx_dsi1_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_dsi1_irqs),
        .main_clk       = "dss1_alwon_fck",
        .prcm           = {
                .omap2 = {
@@ -1741,41 +1522,12 @@ static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = {
        .flags          = HWMOD_NO_IDLEST,
 };
 
-/*
- * 'rfbi' class
- * remote frame buffer interface
- */
-
-static struct omap_hwmod_class_sysconfig omap3xxx_rfbi_sysc = {
-       .rev_offs       = 0x0000,
-       .sysc_offs      = 0x0010,
-       .syss_offs      = 0x0014,
-       .sysc_flags     = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
-                          SYSC_HAS_AUTOIDLE),
-       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap3xxx_rfbi_hwmod_class = {
-       .name = "rfbi",
-       .sysc = &omap3xxx_rfbi_sysc,
-};
-
-static struct omap_hwmod_addr_space omap3xxx_dss_rfbi_addrs[] = {
-       {
-               .pa_start       = 0x48050800,
-               .pa_end         = 0x48050BFF,
-               .flags          = ADDR_TYPE_RT
-       },
-};
-
 /* l4_core -> dss_rfbi */
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_rfbi = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap3xxx_dss_rfbi_hwmod,
        .clk            = "dss_ick",
-       .addr           = omap3xxx_dss_rfbi_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_dss_rfbi_addrs),
+       .addr           = omap2_dss_rfbi_addrs,
        .fw = {
                .omap2 = {
                        .l4_fw_region  = OMAP3_L4_CORE_FW_DSS_RFBI_REGION,
@@ -1793,7 +1545,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_rfbi_slaves[] = {
 
 static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
        .name           = "dss_rfbi",
-       .class          = &omap3xxx_rfbi_hwmod_class,
+       .class          = &omap2_rfbi_hwmod_class,
        .main_clk       = "dss1_alwon_fck",
        .prcm           = {
                .omap2 = {
@@ -1810,31 +1562,12 @@ static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
        .flags          = HWMOD_NO_IDLEST,
 };
 
-/*
- * 'venc' class
- * video encoder
- */
-
-static struct omap_hwmod_class omap3xxx_venc_hwmod_class = {
-       .name = "venc",
-};
-
-/* dss_venc */
-static struct omap_hwmod_addr_space omap3xxx_dss_venc_addrs[] = {
-       {
-               .pa_start       = 0x48050C00,
-               .pa_end         = 0x48050FFF,
-               .flags          = ADDR_TYPE_RT
-       },
-};
-
 /* l4_core -> dss_venc */
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_venc = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap3xxx_dss_venc_hwmod,
        .clk            = "dss_tv_fck",
-       .addr           = omap3xxx_dss_venc_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_dss_venc_addrs),
+       .addr           = omap2_dss_venc_addrs,
        .fw = {
                .omap2 = {
                        .l4_fw_region  = OMAP3_L4_CORE_FW_DSS_VENC_REGION,
@@ -1853,7 +1586,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_venc_slaves[] = {
 
 static struct omap_hwmod omap3xxx_dss_venc_hwmod = {
        .name           = "dss_venc",
-       .class          = &omap3xxx_venc_hwmod_class,
+       .class          = &omap2_venc_hwmod_class,
        .main_clk       = "dss1_alwon_fck",
        .prcm           = {
                .omap2 = {
@@ -1876,25 +1609,14 @@ static struct omap_i2c_dev_attr i2c1_dev_attr = {
        .fifo_depth     = 8, /* bytes */
 };
 
-static struct omap_hwmod_irq_info i2c1_mpu_irqs[] = {
-       { .irq = INT_24XX_I2C1_IRQ, },
-};
-
-static struct omap_hwmod_dma_info i2c1_sdma_reqs[] = {
-       { .name = "tx", .dma_req = OMAP24XX_DMA_I2C1_TX },
-       { .name = "rx", .dma_req = OMAP24XX_DMA_I2C1_RX },
-};
-
 static struct omap_hwmod_ocp_if *omap3xxx_i2c1_slaves[] = {
        &omap3_l4_core__i2c1,
 };
 
 static struct omap_hwmod omap3xxx_i2c1_hwmod = {
        .name           = "i2c1",
-       .mpu_irqs       = i2c1_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(i2c1_mpu_irqs),
-       .sdma_reqs      = i2c1_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(i2c1_sdma_reqs),
+       .mpu_irqs       = omap2_i2c1_mpu_irqs,
+       .sdma_reqs      = omap2_i2c1_sdma_reqs,
        .main_clk       = "i2c1_fck",
        .prcm           = {
                .omap2 = {
@@ -1918,25 +1640,14 @@ static struct omap_i2c_dev_attr i2c2_dev_attr = {
        .fifo_depth     = 8, /* bytes */
 };
 
-static struct omap_hwmod_irq_info i2c2_mpu_irqs[] = {
-       { .irq = INT_24XX_I2C2_IRQ, },
-};
-
-static struct omap_hwmod_dma_info i2c2_sdma_reqs[] = {
-       { .name = "tx", .dma_req = OMAP24XX_DMA_I2C2_TX },
-       { .name = "rx", .dma_req = OMAP24XX_DMA_I2C2_RX },
-};
-
 static struct omap_hwmod_ocp_if *omap3xxx_i2c2_slaves[] = {
        &omap3_l4_core__i2c2,
 };
 
 static struct omap_hwmod omap3xxx_i2c2_hwmod = {
        .name           = "i2c2",
-       .mpu_irqs       = i2c2_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(i2c2_mpu_irqs),
-       .sdma_reqs      = i2c2_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(i2c2_sdma_reqs),
+       .mpu_irqs       = omap2_i2c2_mpu_irqs,
+       .sdma_reqs      = omap2_i2c2_sdma_reqs,
        .main_clk       = "i2c2_fck",
        .prcm           = {
                .omap2 = {
@@ -1962,11 +1673,13 @@ static struct omap_i2c_dev_attr i2c3_dev_attr = {
 
 static struct omap_hwmod_irq_info i2c3_mpu_irqs[] = {
        { .irq = INT_34XX_I2C3_IRQ, },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info i2c3_sdma_reqs[] = {
        { .name = "tx", .dma_req = OMAP34XX_DMA_I2C3_TX },
        { .name = "rx", .dma_req = OMAP34XX_DMA_I2C3_RX },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_ocp_if *omap3xxx_i2c3_slaves[] = {
@@ -1976,9 +1689,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_i2c3_slaves[] = {
 static struct omap_hwmod omap3xxx_i2c3_hwmod = {
        .name           = "i2c3",
        .mpu_irqs       = i2c3_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(i2c3_mpu_irqs),
        .sdma_reqs      = i2c3_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(i2c3_sdma_reqs),
        .main_clk       = "i2c3_fck",
        .prcm           = {
                .omap2 = {
@@ -2003,13 +1714,13 @@ static struct omap_hwmod_addr_space omap3xxx_gpio1_addrs[] = {
                .pa_end         = 0x483101ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__gpio1 = {
        .master         = &omap3xxx_l4_wkup_hwmod,
        .slave          = &omap3xxx_gpio1_hwmod,
        .addr           = omap3xxx_gpio1_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_gpio1_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2020,13 +1731,13 @@ static struct omap_hwmod_addr_space omap3xxx_gpio2_addrs[] = {
                .pa_end         = 0x490501ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio2 = {
        .master         = &omap3xxx_l4_per_hwmod,
        .slave          = &omap3xxx_gpio2_hwmod,
        .addr           = omap3xxx_gpio2_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_gpio2_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2037,13 +1748,13 @@ static struct omap_hwmod_addr_space omap3xxx_gpio3_addrs[] = {
                .pa_end         = 0x490521ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio3 = {
        .master         = &omap3xxx_l4_per_hwmod,
        .slave          = &omap3xxx_gpio3_hwmod,
        .addr           = omap3xxx_gpio3_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_gpio3_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2054,13 +1765,13 @@ static struct omap_hwmod_addr_space omap3xxx_gpio4_addrs[] = {
                .pa_end         = 0x490541ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio4 = {
        .master         = &omap3xxx_l4_per_hwmod,
        .slave          = &omap3xxx_gpio4_hwmod,
        .addr           = omap3xxx_gpio4_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_gpio4_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2071,13 +1782,13 @@ static struct omap_hwmod_addr_space omap3xxx_gpio5_addrs[] = {
                .pa_end         = 0x490561ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio5 = {
        .master         = &omap3xxx_l4_per_hwmod,
        .slave          = &omap3xxx_gpio5_hwmod,
        .addr           = omap3xxx_gpio5_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_gpio5_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2088,13 +1799,13 @@ static struct omap_hwmod_addr_space omap3xxx_gpio6_addrs[] = {
                .pa_end         = 0x490581ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio6 = {
        .master         = &omap3xxx_l4_per_hwmod,
        .slave          = &omap3xxx_gpio6_hwmod,
        .addr           = omap3xxx_gpio6_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_gpio6_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2127,10 +1838,6 @@ static struct omap_gpio_dev_attr gpio_dev_attr = {
 };
 
 /* gpio1 */
-static struct omap_hwmod_irq_info omap3xxx_gpio1_irqs[] = {
-       { .irq = 29 }, /* INT_34XX_GPIO_BANK1 */
-};
-
 static struct omap_hwmod_opt_clk gpio1_opt_clks[] = {
        { .role = "dbclk", .clk = "gpio1_dbck", },
 };
@@ -2142,8 +1849,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio1_slaves[] = {
 static struct omap_hwmod omap3xxx_gpio1_hwmod = {
        .name           = "gpio1",
        .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
-       .mpu_irqs       = omap3xxx_gpio1_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_gpio1_irqs),
+       .mpu_irqs       = omap2_gpio1_irqs,
        .main_clk       = "gpio1_ick",
        .opt_clks       = gpio1_opt_clks,
        .opt_clks_cnt   = ARRAY_SIZE(gpio1_opt_clks),
@@ -2164,10 +1870,6 @@ static struct omap_hwmod omap3xxx_gpio1_hwmod = {
 };
 
 /* gpio2 */
-static struct omap_hwmod_irq_info omap3xxx_gpio2_irqs[] = {
-       { .irq = 30 }, /* INT_34XX_GPIO_BANK2 */
-};
-
 static struct omap_hwmod_opt_clk gpio2_opt_clks[] = {
        { .role = "dbclk", .clk = "gpio2_dbck", },
 };
@@ -2179,8 +1881,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio2_slaves[] = {
 static struct omap_hwmod omap3xxx_gpio2_hwmod = {
        .name           = "gpio2",
        .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
-       .mpu_irqs       = omap3xxx_gpio2_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_gpio2_irqs),
+       .mpu_irqs       = omap2_gpio2_irqs,
        .main_clk       = "gpio2_ick",
        .opt_clks       = gpio2_opt_clks,
        .opt_clks_cnt   = ARRAY_SIZE(gpio2_opt_clks),
@@ -2201,10 +1902,6 @@ static struct omap_hwmod omap3xxx_gpio2_hwmod = {
 };
 
 /* gpio3 */
-static struct omap_hwmod_irq_info omap3xxx_gpio3_irqs[] = {
-       { .irq = 31 }, /* INT_34XX_GPIO_BANK3 */
-};
-
 static struct omap_hwmod_opt_clk gpio3_opt_clks[] = {
        { .role = "dbclk", .clk = "gpio3_dbck", },
 };
@@ -2216,8 +1913,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio3_slaves[] = {
 static struct omap_hwmod omap3xxx_gpio3_hwmod = {
        .name           = "gpio3",
        .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
-       .mpu_irqs       = omap3xxx_gpio3_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_gpio3_irqs),
+       .mpu_irqs       = omap2_gpio3_irqs,
        .main_clk       = "gpio3_ick",
        .opt_clks       = gpio3_opt_clks,
        .opt_clks_cnt   = ARRAY_SIZE(gpio3_opt_clks),
@@ -2238,10 +1934,6 @@ static struct omap_hwmod omap3xxx_gpio3_hwmod = {
 };
 
 /* gpio4 */
-static struct omap_hwmod_irq_info omap3xxx_gpio4_irqs[] = {
-       { .irq = 32 }, /* INT_34XX_GPIO_BANK4 */
-};
-
 static struct omap_hwmod_opt_clk gpio4_opt_clks[] = {
        { .role = "dbclk", .clk = "gpio4_dbck", },
 };
@@ -2253,8 +1945,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio4_slaves[] = {
 static struct omap_hwmod omap3xxx_gpio4_hwmod = {
        .name           = "gpio4",
        .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
-       .mpu_irqs       = omap3xxx_gpio4_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_gpio4_irqs),
+       .mpu_irqs       = omap2_gpio4_irqs,
        .main_clk       = "gpio4_ick",
        .opt_clks       = gpio4_opt_clks,
        .opt_clks_cnt   = ARRAY_SIZE(gpio4_opt_clks),
@@ -2277,6 +1968,7 @@ static struct omap_hwmod omap3xxx_gpio4_hwmod = {
 /* gpio5 */
 static struct omap_hwmod_irq_info omap3xxx_gpio5_irqs[] = {
        { .irq = 33 }, /* INT_34XX_GPIO_BANK5 */
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_opt_clk gpio5_opt_clks[] = {
@@ -2291,7 +1983,6 @@ static struct omap_hwmod omap3xxx_gpio5_hwmod = {
        .name           = "gpio5",
        .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
        .mpu_irqs       = omap3xxx_gpio5_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_gpio5_irqs),
        .main_clk       = "gpio5_ick",
        .opt_clks       = gpio5_opt_clks,
        .opt_clks_cnt   = ARRAY_SIZE(gpio5_opt_clks),
@@ -2314,6 +2005,7 @@ static struct omap_hwmod omap3xxx_gpio5_hwmod = {
 /* gpio6 */
 static struct omap_hwmod_irq_info omap3xxx_gpio6_irqs[] = {
        { .irq = 34 }, /* INT_34XX_GPIO_BANK6 */
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_opt_clk gpio6_opt_clks[] = {
@@ -2328,7 +2020,6 @@ static struct omap_hwmod omap3xxx_gpio6_hwmod = {
        .name           = "gpio6",
        .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
        .mpu_irqs       = omap3xxx_gpio6_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_gpio6_irqs),
        .main_clk       = "gpio6_ick",
        .opt_clks       = gpio6_opt_clks,
        .opt_clks_cnt   = ARRAY_SIZE(gpio6_opt_clks),
@@ -2382,19 +2073,13 @@ static struct omap_hwmod_class omap3xxx_dma_hwmod_class = {
 };
 
 /* dma_system */
-static struct omap_hwmod_irq_info omap3xxx_dma_system_irqs[] = {
-       { .name = "0", .irq = 12 }, /* INT_24XX_SDMA_IRQ0 */
-       { .name = "1", .irq = 13 }, /* INT_24XX_SDMA_IRQ1 */
-       { .name = "2", .irq = 14 }, /* INT_24XX_SDMA_IRQ2 */
-       { .name = "3", .irq = 15 }, /* INT_24XX_SDMA_IRQ3 */
-};
-
 static struct omap_hwmod_addr_space omap3xxx_dma_system_addrs[] = {
        {
                .pa_start       = 0x48056000,
                .pa_end         = 0x48056fff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* dma_system master ports */
@@ -2408,7 +2093,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dma_system = {
        .slave          = &omap3xxx_dma_system_hwmod,
        .clk            = "core_l4_ick",
        .addr           = omap3xxx_dma_system_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_dma_system_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2420,8 +2104,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_dma_system_slaves[] = {
 static struct omap_hwmod omap3xxx_dma_system_hwmod = {
        .name           = "dma",
        .class          = &omap3xxx_dma_hwmod_class,
-       .mpu_irqs       = omap3xxx_dma_system_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_dma_system_irqs),
+       .mpu_irqs       = omap2_dma_system_irqs,
        .main_clk       = "core_l3_ick",
        .prcm = {
                .omap2 = {
@@ -2466,11 +2149,7 @@ static struct omap_hwmod_irq_info omap3xxx_mcbsp1_irqs[] = {
        { .name = "irq", .irq = 16 },
        { .name = "tx", .irq = 59 },
        { .name = "rx", .irq = 60 },
-};
-
-static struct omap_hwmod_dma_info omap3xxx_mcbsp1_sdma_chs[] = {
-       { .name = "rx", .dma_req = 32 },
-       { .name = "tx", .dma_req = 31 },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap3xxx_mcbsp1_addrs[] = {
@@ -2480,6 +2159,7 @@ static struct omap_hwmod_addr_space omap3xxx_mcbsp1_addrs[] = {
                .pa_end         = 0x480740ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_core -> mcbsp1 */
@@ -2488,7 +2168,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__mcbsp1 = {
        .slave          = &omap3xxx_mcbsp1_hwmod,
        .clk            = "mcbsp1_ick",
        .addr           = omap3xxx_mcbsp1_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_mcbsp1_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2501,9 +2180,7 @@ static struct omap_hwmod omap3xxx_mcbsp1_hwmod = {
        .name           = "mcbsp1",
        .class          = &omap3xxx_mcbsp_hwmod_class,
        .mpu_irqs       = omap3xxx_mcbsp1_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_mcbsp1_irqs),
-       .sdma_reqs      = omap3xxx_mcbsp1_sdma_chs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap3xxx_mcbsp1_sdma_chs),
+       .sdma_reqs      = omap2_mcbsp1_sdma_reqs,
        .main_clk       = "mcbsp1_fck",
        .prcm           = {
                .omap2 = {
@@ -2524,11 +2201,7 @@ static struct omap_hwmod_irq_info omap3xxx_mcbsp2_irqs[] = {
        { .name = "irq", .irq = 17 },
        { .name = "tx", .irq = 62 },
        { .name = "rx", .irq = 63 },
-};
-
-static struct omap_hwmod_dma_info omap3xxx_mcbsp2_sdma_chs[] = {
-       { .name = "rx", .dma_req = 34 },
-       { .name = "tx", .dma_req = 33 },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap3xxx_mcbsp2_addrs[] = {
@@ -2538,6 +2211,7 @@ static struct omap_hwmod_addr_space omap3xxx_mcbsp2_addrs[] = {
                .pa_end         = 0x490220ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> mcbsp2 */
@@ -2546,7 +2220,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp2 = {
        .slave          = &omap3xxx_mcbsp2_hwmod,
        .clk            = "mcbsp2_ick",
        .addr           = omap3xxx_mcbsp2_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_mcbsp2_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2563,9 +2236,7 @@ static struct omap_hwmod omap3xxx_mcbsp2_hwmod = {
        .name           = "mcbsp2",
        .class          = &omap3xxx_mcbsp_hwmod_class,
        .mpu_irqs       = omap3xxx_mcbsp2_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_mcbsp2_irqs),
-       .sdma_reqs      = omap3xxx_mcbsp2_sdma_chs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap3xxx_mcbsp2_sdma_chs),
+       .sdma_reqs      = omap2_mcbsp2_sdma_reqs,
        .main_clk       = "mcbsp2_fck",
        .prcm           = {
                .omap2 = {
@@ -2587,11 +2258,7 @@ static struct omap_hwmod_irq_info omap3xxx_mcbsp3_irqs[] = {
        { .name = "irq", .irq = 22 },
        { .name = "tx", .irq = 89 },
        { .name = "rx", .irq = 90 },
-};
-
-static struct omap_hwmod_dma_info omap3xxx_mcbsp3_sdma_chs[] = {
-       { .name = "rx", .dma_req = 18 },
-       { .name = "tx", .dma_req = 17 },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap3xxx_mcbsp3_addrs[] = {
@@ -2601,6 +2268,7 @@ static struct omap_hwmod_addr_space omap3xxx_mcbsp3_addrs[] = {
                .pa_end         = 0x490240ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> mcbsp3 */
@@ -2609,7 +2277,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp3 = {
        .slave          = &omap3xxx_mcbsp3_hwmod,
        .clk            = "mcbsp3_ick",
        .addr           = omap3xxx_mcbsp3_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_mcbsp3_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2626,9 +2293,7 @@ static struct omap_hwmod omap3xxx_mcbsp3_hwmod = {
        .name           = "mcbsp3",
        .class          = &omap3xxx_mcbsp_hwmod_class,
        .mpu_irqs       = omap3xxx_mcbsp3_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_mcbsp3_irqs),
-       .sdma_reqs      = omap3xxx_mcbsp3_sdma_chs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap3xxx_mcbsp3_sdma_chs),
+       .sdma_reqs      = omap2_mcbsp3_sdma_reqs,
        .main_clk       = "mcbsp3_fck",
        .prcm           = {
                .omap2 = {
@@ -2650,11 +2315,13 @@ static struct omap_hwmod_irq_info omap3xxx_mcbsp4_irqs[] = {
        { .name = "irq", .irq = 23 },
        { .name = "tx", .irq = 54 },
        { .name = "rx", .irq = 55 },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap3xxx_mcbsp4_sdma_chs[] = {
        { .name = "rx", .dma_req = 20 },
        { .name = "tx", .dma_req = 19 },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap3xxx_mcbsp4_addrs[] = {
@@ -2664,6 +2331,7 @@ static struct omap_hwmod_addr_space omap3xxx_mcbsp4_addrs[] = {
                .pa_end         = 0x490260ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> mcbsp4 */
@@ -2672,7 +2340,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp4 = {
        .slave          = &omap3xxx_mcbsp4_hwmod,
        .clk            = "mcbsp4_ick",
        .addr           = omap3xxx_mcbsp4_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_mcbsp4_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2685,9 +2352,7 @@ static struct omap_hwmod omap3xxx_mcbsp4_hwmod = {
        .name           = "mcbsp4",
        .class          = &omap3xxx_mcbsp_hwmod_class,
        .mpu_irqs       = omap3xxx_mcbsp4_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_mcbsp4_irqs),
        .sdma_reqs      = omap3xxx_mcbsp4_sdma_chs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap3xxx_mcbsp4_sdma_chs),
        .main_clk       = "mcbsp4_fck",
        .prcm           = {
                .omap2 = {
@@ -2708,11 +2373,13 @@ static struct omap_hwmod_irq_info omap3xxx_mcbsp5_irqs[] = {
        { .name = "irq", .irq = 27 },
        { .name = "tx", .irq = 81 },
        { .name = "rx", .irq = 82 },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap3xxx_mcbsp5_sdma_chs[] = {
        { .name = "rx", .dma_req = 22 },
        { .name = "tx", .dma_req = 21 },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap3xxx_mcbsp5_addrs[] = {
@@ -2722,6 +2389,7 @@ static struct omap_hwmod_addr_space omap3xxx_mcbsp5_addrs[] = {
                .pa_end         = 0x480960ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_core -> mcbsp5 */
@@ -2730,7 +2398,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__mcbsp5 = {
        .slave          = &omap3xxx_mcbsp5_hwmod,
        .clk            = "mcbsp5_ick",
        .addr           = omap3xxx_mcbsp5_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_mcbsp5_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2743,9 +2410,7 @@ static struct omap_hwmod omap3xxx_mcbsp5_hwmod = {
        .name           = "mcbsp5",
        .class          = &omap3xxx_mcbsp_hwmod_class,
        .mpu_irqs       = omap3xxx_mcbsp5_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_mcbsp5_irqs),
        .sdma_reqs      = omap3xxx_mcbsp5_sdma_chs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap3xxx_mcbsp5_sdma_chs),
        .main_clk       = "mcbsp5_fck",
        .prcm           = {
                .omap2 = {
@@ -2776,6 +2441,7 @@ static struct omap_hwmod_class omap3xxx_mcbsp_sidetone_hwmod_class = {
 /* mcbsp2_sidetone */
 static struct omap_hwmod_irq_info omap3xxx_mcbsp2_sidetone_irqs[] = {
        { .name = "irq", .irq = 4 },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap3xxx_mcbsp2_sidetone_addrs[] = {
@@ -2785,6 +2451,7 @@ static struct omap_hwmod_addr_space omap3xxx_mcbsp2_sidetone_addrs[] = {
                .pa_end         = 0x490280ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> mcbsp2_sidetone */
@@ -2793,7 +2460,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp2_sidetone = {
        .slave          = &omap3xxx_mcbsp2_sidetone_hwmod,
        .clk            = "mcbsp2_ick",
        .addr           = omap3xxx_mcbsp2_sidetone_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_mcbsp2_sidetone_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -2806,7 +2472,6 @@ static struct omap_hwmod omap3xxx_mcbsp2_sidetone_hwmod = {
        .name           = "mcbsp2_sidetone",
        .class          = &omap3xxx_mcbsp_sidetone_hwmod_class,
        .mpu_irqs       = omap3xxx_mcbsp2_sidetone_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_mcbsp2_sidetone_irqs),
        .main_clk       = "mcbsp2_fck",
        .prcm           = {
                .omap2 = {
@@ -2825,6 +2490,7 @@ static struct omap_hwmod omap3xxx_mcbsp2_sidetone_hwmod = {
 /* mcbsp3_sidetone */
 static struct omap_hwmod_irq_info omap3xxx_mcbsp3_sidetone_irqs[] = {
        { .name = "irq", .irq = 5 },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap3xxx_mcbsp3_sidetone_addrs[] = {
@@ -2834,6 +2500,7 @@ static struct omap_hwmod_addr_space omap3xxx_mcbsp3_sidetone_addrs[] = {
                .pa_end         = 0x4902A0ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> mcbsp3_sidetone */
@@ -2842,7 +2509,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp3_sidetone = {
        .slave          = &omap3xxx_mcbsp3_sidetone_hwmod,
        .clk            = "mcbsp3_ick",
        .addr           = omap3xxx_mcbsp3_sidetone_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_mcbsp3_sidetone_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -2855,7 +2521,6 @@ static struct omap_hwmod omap3xxx_mcbsp3_sidetone_hwmod = {
        .name           = "mcbsp3_sidetone",
        .class          = &omap3xxx_mcbsp_sidetone_hwmod_class,
        .mpu_irqs       = omap3xxx_mcbsp3_sidetone_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_mcbsp3_sidetone_irqs),
        .main_clk       = "mcbsp3_fck",
        .prcm           = {
                .omap2 = {
@@ -3025,6 +2690,7 @@ static struct omap_hwmod_class omap3xxx_mailbox_hwmod_class = {
 static struct omap_hwmod omap3xxx_mailbox_hwmod;
 static struct omap_hwmod_irq_info omap3xxx_mailbox_irqs[] = {
        { .irq = 26 },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap3xxx_mailbox_addrs[] = {
@@ -3033,6 +2699,7 @@ static struct omap_hwmod_addr_space omap3xxx_mailbox_addrs[] = {
                .pa_end         = 0x480941ff,
                .flags          = ADDR_TYPE_RT,
        },
+       { }
 };
 
 /* l4_core -> mailbox */
@@ -3040,7 +2707,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__mailbox = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap3xxx_mailbox_hwmod,
        .addr           = omap3xxx_mailbox_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap3xxx_mailbox_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3053,7 +2719,6 @@ static struct omap_hwmod omap3xxx_mailbox_hwmod = {
        .name           = "mailbox",
        .class          = &omap3xxx_mailbox_hwmod_class,
        .mpu_irqs       = omap3xxx_mailbox_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_mailbox_irqs),
        .main_clk       = "mailboxes_ick",
        .prcm           = {
                .omap2 = {
@@ -3070,56 +2735,29 @@ static struct omap_hwmod omap3xxx_mailbox_hwmod = {
 };
 
 /* l4 core -> mcspi1 interface */
-static struct omap_hwmod_addr_space omap34xx_mcspi1_addr_space[] = {
-       {
-               .pa_start       = 0x48098000,
-               .pa_end         = 0x480980ff,
-               .flags          = ADDR_TYPE_RT,
-       },
-};
-
 static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi1 = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap34xx_mcspi1,
        .clk            = "mcspi1_ick",
-       .addr           = omap34xx_mcspi1_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap34xx_mcspi1_addr_space),
+       .addr           = omap2_mcspi1_addr_space,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
 /* l4 core -> mcspi2 interface */
-static struct omap_hwmod_addr_space omap34xx_mcspi2_addr_space[] = {
-       {
-               .pa_start       = 0x4809a000,
-               .pa_end         = 0x4809a0ff,
-               .flags          = ADDR_TYPE_RT,
-       },
-};
-
 static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi2 = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap34xx_mcspi2,
        .clk            = "mcspi2_ick",
-       .addr           = omap34xx_mcspi2_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap34xx_mcspi2_addr_space),
+       .addr           = omap2_mcspi2_addr_space,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
 /* l4 core -> mcspi3 interface */
-static struct omap_hwmod_addr_space omap34xx_mcspi3_addr_space[] = {
-       {
-               .pa_start       = 0x480b8000,
-               .pa_end         = 0x480b80ff,
-               .flags          = ADDR_TYPE_RT,
-       },
-};
-
 static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi3 = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap34xx_mcspi3,
        .clk            = "mcspi3_ick",
-       .addr           = omap34xx_mcspi3_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap34xx_mcspi3_addr_space),
+       .addr           = omap2430_mcspi3_addr_space,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3130,6 +2768,7 @@ static struct omap_hwmod_addr_space omap34xx_mcspi4_addr_space[] = {
                .pa_end         = 0x480ba0ff,
                .flags          = ADDR_TYPE_RT,
        },
+       { }
 };
 
 static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi4 = {
@@ -3137,7 +2776,6 @@ static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi4 = {
        .slave          = &omap34xx_mcspi4,
        .clk            = "mcspi4_ick",
        .addr           = omap34xx_mcspi4_addr_space,
-       .addr_cnt       = ARRAY_SIZE(omap34xx_mcspi4_addr_space),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3165,21 +2803,6 @@ static struct omap_hwmod_class omap34xx_mcspi_class = {
 };
 
 /* mcspi1 */
-static struct omap_hwmod_irq_info omap34xx_mcspi1_mpu_irqs[] = {
-       { .name = "irq", .irq = 65 },
-};
-
-static struct omap_hwmod_dma_info omap34xx_mcspi1_sdma_reqs[] = {
-       { .name = "tx0", .dma_req = 35 },
-       { .name = "rx0", .dma_req = 36 },
-       { .name = "tx1", .dma_req = 37 },
-       { .name = "rx1", .dma_req = 38 },
-       { .name = "tx2", .dma_req = 39 },
-       { .name = "rx2", .dma_req = 40 },
-       { .name = "tx3", .dma_req = 41 },
-       { .name = "rx3", .dma_req = 42 },
-};
-
 static struct omap_hwmod_ocp_if *omap34xx_mcspi1_slaves[] = {
        &omap34xx_l4_core__mcspi1,
 };
@@ -3190,10 +2813,8 @@ static struct omap2_mcspi_dev_attr omap_mcspi1_dev_attr = {
 
 static struct omap_hwmod omap34xx_mcspi1 = {
        .name           = "mcspi1",
-       .mpu_irqs       = omap34xx_mcspi1_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap34xx_mcspi1_mpu_irqs),
-       .sdma_reqs      = omap34xx_mcspi1_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap34xx_mcspi1_sdma_reqs),
+       .mpu_irqs       = omap2_mcspi1_mpu_irqs,
+       .sdma_reqs      = omap2_mcspi1_sdma_reqs,
        .main_clk       = "mcspi1_fck",
        .prcm           = {
                .omap2 = {
@@ -3212,17 +2833,6 @@ static struct omap_hwmod omap34xx_mcspi1 = {
 };
 
 /* mcspi2 */
-static struct omap_hwmod_irq_info omap34xx_mcspi2_mpu_irqs[] = {
-       { .name = "irq", .irq = 66 },
-};
-
-static struct omap_hwmod_dma_info omap34xx_mcspi2_sdma_reqs[] = {
-       { .name = "tx0", .dma_req = 43 },
-       { .name = "rx0", .dma_req = 44 },
-       { .name = "tx1", .dma_req = 45 },
-       { .name = "rx1", .dma_req = 46 },
-};
-
 static struct omap_hwmod_ocp_if *omap34xx_mcspi2_slaves[] = {
        &omap34xx_l4_core__mcspi2,
 };
@@ -3233,10 +2843,8 @@ static struct omap2_mcspi_dev_attr omap_mcspi2_dev_attr = {
 
 static struct omap_hwmod omap34xx_mcspi2 = {
        .name           = "mcspi2",
-       .mpu_irqs       = omap34xx_mcspi2_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap34xx_mcspi2_mpu_irqs),
-       .sdma_reqs      = omap34xx_mcspi2_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap34xx_mcspi2_sdma_reqs),
+       .mpu_irqs       = omap2_mcspi2_mpu_irqs,
+       .sdma_reqs      = omap2_mcspi2_sdma_reqs,
        .main_clk       = "mcspi2_fck",
        .prcm           = {
                .omap2 = {
@@ -3257,6 +2865,7 @@ static struct omap_hwmod omap34xx_mcspi2 = {
 /* mcspi3 */
 static struct omap_hwmod_irq_info omap34xx_mcspi3_mpu_irqs[] = {
        { .name = "irq", .irq = 91 }, /* 91 */
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap34xx_mcspi3_sdma_reqs[] = {
@@ -3264,6 +2873,7 @@ static struct omap_hwmod_dma_info omap34xx_mcspi3_sdma_reqs[] = {
        { .name = "rx0", .dma_req = 16 },
        { .name = "tx1", .dma_req = 23 },
        { .name = "rx1", .dma_req = 24 },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_ocp_if *omap34xx_mcspi3_slaves[] = {
@@ -3277,9 +2887,7 @@ static struct omap2_mcspi_dev_attr omap_mcspi3_dev_attr = {
 static struct omap_hwmod omap34xx_mcspi3 = {
        .name           = "mcspi3",
        .mpu_irqs       = omap34xx_mcspi3_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap34xx_mcspi3_mpu_irqs),
        .sdma_reqs      = omap34xx_mcspi3_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap34xx_mcspi3_sdma_reqs),
        .main_clk       = "mcspi3_fck",
        .prcm           = {
                .omap2 = {
@@ -3300,11 +2908,13 @@ static struct omap_hwmod omap34xx_mcspi3 = {
 /* SPI4 */
 static struct omap_hwmod_irq_info omap34xx_mcspi4_mpu_irqs[] = {
        { .name = "irq", .irq = INT_34XX_SPI4_IRQ }, /* 48 */
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap34xx_mcspi4_sdma_reqs[] = {
        { .name = "tx0", .dma_req = 70 }, /* DMA_SPI4_TX0 */
        { .name = "rx0", .dma_req = 71 }, /* DMA_SPI4_RX0 */
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_ocp_if *omap34xx_mcspi4_slaves[] = {
@@ -3318,9 +2928,7 @@ static struct omap2_mcspi_dev_attr omap_mcspi4_dev_attr = {
 static struct omap_hwmod omap34xx_mcspi4 = {
        .name           = "mcspi4",
        .mpu_irqs       = omap34xx_mcspi4_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap34xx_mcspi4_mpu_irqs),
        .sdma_reqs      = omap34xx_mcspi4_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap34xx_mcspi4_sdma_reqs),
        .main_clk       = "mcspi4_fck",
        .prcm           = {
                .omap2 = {
@@ -3362,12 +2970,12 @@ static struct omap_hwmod_irq_info omap3xxx_usbhsotg_mpu_irqs[] = {
 
        { .name = "mc", .irq = 92 },
        { .name = "dma", .irq = 93 },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod omap3xxx_usbhsotg_hwmod = {
        .name           = "usb_otg_hs",
        .mpu_irqs       = omap3xxx_usbhsotg_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap3xxx_usbhsotg_mpu_irqs),
        .main_clk       = "hsotgusb_ick",
        .prcm           = {
                .omap2 = {
@@ -3399,6 +3007,7 @@ static struct omap_hwmod omap3xxx_usbhsotg_hwmod = {
 static struct omap_hwmod_irq_info am35xx_usbhsotg_mpu_irqs[] = {
 
        { .name = "mc", .irq = 71 },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_class am35xx_usbotg_class = {
@@ -3409,7 +3018,6 @@ static struct omap_hwmod_class am35xx_usbotg_class = {
 static struct omap_hwmod am35xx_usbhsotg_hwmod = {
        .name           = "am35x_otg_hs",
        .mpu_irqs       = am35xx_usbhsotg_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(am35xx_usbhsotg_mpu_irqs),
        .main_clk       = NULL,
        .prcm = {
                .omap2 = {
@@ -3445,11 +3053,13 @@ static struct omap_hwmod_class omap34xx_mmc_class = {
 
 static struct omap_hwmod_irq_info omap34xx_mmc1_mpu_irqs[] = {
        { .irq = 83, },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap34xx_mmc1_sdma_reqs[] = {
        { .name = "tx", .dma_req = 61, },
        { .name = "rx", .dma_req = 62, },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_opt_clk omap34xx_mmc1_opt_clks[] = {
@@ -3467,9 +3077,7 @@ static struct omap_mmc_dev_attr mmc1_dev_attr = {
 static struct omap_hwmod omap3xxx_mmc1_hwmod = {
        .name           = "mmc1",
        .mpu_irqs       = omap34xx_mmc1_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap34xx_mmc1_mpu_irqs),
        .sdma_reqs      = omap34xx_mmc1_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap34xx_mmc1_sdma_reqs),
        .opt_clks       = omap34xx_mmc1_opt_clks,
        .opt_clks_cnt   = ARRAY_SIZE(omap34xx_mmc1_opt_clks),
        .main_clk       = "mmchs1_fck",
@@ -3493,11 +3101,13 @@ static struct omap_hwmod omap3xxx_mmc1_hwmod = {
 
 static struct omap_hwmod_irq_info omap34xx_mmc2_mpu_irqs[] = {
        { .irq = INT_24XX_MMC2_IRQ, },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap34xx_mmc2_sdma_reqs[] = {
        { .name = "tx", .dma_req = 47, },
        { .name = "rx", .dma_req = 48, },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_opt_clk omap34xx_mmc2_opt_clks[] = {
@@ -3511,9 +3121,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_mmc2_slaves[] = {
 static struct omap_hwmod omap3xxx_mmc2_hwmod = {
        .name           = "mmc2",
        .mpu_irqs       = omap34xx_mmc2_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap34xx_mmc2_mpu_irqs),
        .sdma_reqs      = omap34xx_mmc2_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap34xx_mmc2_sdma_reqs),
        .opt_clks       = omap34xx_mmc2_opt_clks,
        .opt_clks_cnt   = ARRAY_SIZE(omap34xx_mmc2_opt_clks),
        .main_clk       = "mmchs2_fck",
@@ -3536,11 +3144,13 @@ static struct omap_hwmod omap3xxx_mmc2_hwmod = {
 
 static struct omap_hwmod_irq_info omap34xx_mmc3_mpu_irqs[] = {
        { .irq = 94, },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap34xx_mmc3_sdma_reqs[] = {
        { .name = "tx", .dma_req = 77, },
        { .name = "rx", .dma_req = 78, },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_opt_clk omap34xx_mmc3_opt_clks[] = {
@@ -3554,9 +3164,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_mmc3_slaves[] = {
 static struct omap_hwmod omap3xxx_mmc3_hwmod = {
        .name           = "mmc3",
        .mpu_irqs       = omap34xx_mmc3_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap34xx_mmc3_mpu_irqs),
        .sdma_reqs      = omap34xx_mmc3_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap34xx_mmc3_sdma_reqs),
        .opt_clks       = omap34xx_mmc3_opt_clks,
        .opt_clks_cnt   = ARRAY_SIZE(omap34xx_mmc3_opt_clks),
        .main_clk       = "mmchs3_fck",
index e1c69ff..e011437 100644 (file)
@@ -80,7 +80,12 @@ static struct omap_hwmod_class omap44xx_dmm_hwmod_class = {
        .name   = "dmm",
 };
 
-/* dmm interface data */
+/* dmm */
+static struct omap_hwmod_irq_info omap44xx_dmm_irqs[] = {
+       { .irq = 113 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
+};
+
 /* l3_main_1 -> dmm */
 static struct omap_hwmod_ocp_if omap44xx_l3_main_1__dmm = {
        .master         = &omap44xx_l3_main_1_hwmod,
@@ -95,6 +100,7 @@ static struct omap_hwmod_addr_space omap44xx_dmm_addrs[] = {
                .pa_end         = 0x4e0007ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* mpu -> dmm */
@@ -103,7 +109,6 @@ static struct omap_hwmod_ocp_if omap44xx_mpu__dmm = {
        .slave          = &omap44xx_dmm_hwmod,
        .clk            = "l3_div_ck",
        .addr           = omap44xx_dmm_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_dmm_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -113,17 +118,12 @@ static struct omap_hwmod_ocp_if *omap44xx_dmm_slaves[] = {
        &omap44xx_mpu__dmm,
 };
 
-static struct omap_hwmod_irq_info omap44xx_dmm_irqs[] = {
-       { .irq = 113 + OMAP44XX_IRQ_GIC_START },
-};
-
 static struct omap_hwmod omap44xx_dmm_hwmod = {
        .name           = "dmm",
        .class          = &omap44xx_dmm_hwmod_class,
+       .mpu_irqs       = omap44xx_dmm_irqs,
        .slaves         = omap44xx_dmm_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap44xx_dmm_slaves),
-       .mpu_irqs       = omap44xx_dmm_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_dmm_irqs),
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
 };
 
@@ -135,7 +135,7 @@ static struct omap_hwmod_class omap44xx_emif_fw_hwmod_class = {
        .name   = "emif_fw",
 };
 
-/* emif_fw interface data */
+/* emif_fw */
 /* dmm -> emif_fw */
 static struct omap_hwmod_ocp_if omap44xx_dmm__emif_fw = {
        .master         = &omap44xx_dmm_hwmod,
@@ -150,6 +150,7 @@ static struct omap_hwmod_addr_space omap44xx_emif_fw_addrs[] = {
                .pa_end         = 0x4a20c0ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_cfg -> emif_fw */
@@ -158,7 +159,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__emif_fw = {
        .slave          = &omap44xx_emif_fw_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_emif_fw_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_emif_fw_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -184,7 +184,7 @@ static struct omap_hwmod_class omap44xx_l3_hwmod_class = {
        .name   = "l3",
 };
 
-/* l3_instr interface data */
+/* l3_instr */
 /* iva -> l3_instr */
 static struct omap_hwmod_ocp_if omap44xx_iva__l3_instr = {
        .master         = &omap44xx_iva_hwmod,
@@ -215,7 +215,13 @@ static struct omap_hwmod omap44xx_l3_instr_hwmod = {
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
 };
 
-/* l3_main_1 interface data */
+/* l3_main_1 */
+static struct omap_hwmod_irq_info omap44xx_l3_main_1_irqs[] = {
+       { .name = "dbg_err", .irq = 9 + OMAP44XX_IRQ_GIC_START },
+       { .name = "app_err", .irq = 10 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
+};
+
 /* dsp -> l3_main_1 */
 static struct omap_hwmod_ocp_if omap44xx_dsp__l3_main_1 = {
        .master         = &omap44xx_dsp_hwmod,
@@ -264,18 +270,13 @@ static struct omap_hwmod_ocp_if omap44xx_mmc2__l3_main_1 = {
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* L3 target configuration and error log registers */
-static struct omap_hwmod_irq_info omap44xx_l3_targ_irqs[] = {
-       { .irq = 9  + OMAP44XX_IRQ_GIC_START },
-       { .irq = 10 + OMAP44XX_IRQ_GIC_START },
-};
-
 static struct omap_hwmod_addr_space omap44xx_l3_main_1_addrs[] = {
        {
                .pa_start       = 0x44000000,
                .pa_end         = 0x44000fff,
-               .flags          = ADDR_TYPE_RT,
+               .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* mpu -> l3_main_1 */
@@ -284,8 +285,7 @@ static struct omap_hwmod_ocp_if omap44xx_mpu__l3_main_1 = {
        .slave          = &omap44xx_l3_main_1_hwmod,
        .clk            = "l3_div_ck",
        .addr           = omap44xx_l3_main_1_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_l3_main_1_addrs),
-       .user           = OCP_USER_MPU | OCP_USER_SDMA,
+       .user           = OCP_USER_MPU,
 };
 
 /* l3_main_1 slave ports */
@@ -302,14 +302,13 @@ static struct omap_hwmod_ocp_if *omap44xx_l3_main_1_slaves[] = {
 static struct omap_hwmod omap44xx_l3_main_1_hwmod = {
        .name           = "l3_main_1",
        .class          = &omap44xx_l3_hwmod_class,
-       .mpu_irqs       = omap44xx_l3_targ_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_l3_targ_irqs),
+       .mpu_irqs       = omap44xx_l3_main_1_irqs,
        .slaves         = omap44xx_l3_main_1_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap44xx_l3_main_1_slaves),
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
 };
 
-/* l3_main_2 interface data */
+/* l3_main_2 */
 /* dma_system -> l3_main_2 */
 static struct omap_hwmod_ocp_if omap44xx_dma_system__l3_main_2 = {
        .master         = &omap44xx_dma_system_hwmod,
@@ -354,8 +353,9 @@ static struct omap_hwmod_addr_space omap44xx_l3_main_2_addrs[] = {
        {
                .pa_start       = 0x44800000,
                .pa_end         = 0x44801fff,
-               .flags          = ADDR_TYPE_RT,
+               .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l3_main_1 -> l3_main_2 */
@@ -364,8 +364,7 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l3_main_2 = {
        .slave          = &omap44xx_l3_main_2_hwmod,
        .clk            = "l3_div_ck",
        .addr           = omap44xx_l3_main_2_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_l3_main_2_addrs),
-       .user           = OCP_USER_MPU | OCP_USER_SDMA,
+       .user           = OCP_USER_MPU,
 };
 
 /* l4_cfg -> l3_main_2 */
@@ -404,13 +403,14 @@ static struct omap_hwmod omap44xx_l3_main_2_hwmod = {
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
 };
 
-/* l3_main_3 interface data */
+/* l3_main_3 */
 static struct omap_hwmod_addr_space omap44xx_l3_main_3_addrs[] = {
        {
                .pa_start       = 0x45000000,
                .pa_end         = 0x45000fff,
-               .flags          = ADDR_TYPE_RT,
+               .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l3_main_1 -> l3_main_3 */
@@ -419,8 +419,7 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l3_main_3 = {
        .slave          = &omap44xx_l3_main_3_hwmod,
        .clk            = "l3_div_ck",
        .addr           = omap44xx_l3_main_3_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_l3_main_3_addrs),
-       .user           = OCP_USER_MPU | OCP_USER_SDMA,
+       .user           = OCP_USER_MPU,
 };
 
 /* l3_main_2 -> l3_main_3 */
@@ -462,7 +461,7 @@ static struct omap_hwmod_class omap44xx_l4_hwmod_class = {
        .name   = "l4",
 };
 
-/* l4_abe interface data */
+/* l4_abe */
 /* aess -> l4_abe */
 static struct omap_hwmod_ocp_if omap44xx_aess__l4_abe = {
        .master         = &omap44xx_aess_hwmod,
@@ -511,7 +510,7 @@ static struct omap_hwmod omap44xx_l4_abe_hwmod = {
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
 };
 
-/* l4_cfg interface data */
+/* l4_cfg */
 /* l3_main_1 -> l4_cfg */
 static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l4_cfg = {
        .master         = &omap44xx_l3_main_1_hwmod,
@@ -533,7 +532,7 @@ static struct omap_hwmod omap44xx_l4_cfg_hwmod = {
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
 };
 
-/* l4_per interface data */
+/* l4_per */
 /* l3_main_2 -> l4_per */
 static struct omap_hwmod_ocp_if omap44xx_l3_main_2__l4_per = {
        .master         = &omap44xx_l3_main_2_hwmod,
@@ -555,7 +554,7 @@ static struct omap_hwmod omap44xx_l4_per_hwmod = {
        .omap_chip      = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
 };
 
-/* l4_wkup interface data */
+/* l4_wkup */
 /* l4_cfg -> l4_wkup */
 static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l4_wkup = {
        .master         = &omap44xx_l4_cfg_hwmod,
@@ -585,7 +584,7 @@ static struct omap_hwmod_class omap44xx_mpu_bus_hwmod_class = {
        .name   = "mpu_bus",
 };
 
-/* mpu_private interface data */
+/* mpu_private */
 /* mpu -> mpu_private */
 static struct omap_hwmod_ocp_if omap44xx_mpu__mpu_private = {
        .master         = &omap44xx_mpu_hwmod,
@@ -633,7 +632,9 @@ static struct omap_hwmod omap44xx_mpu_private_hwmod = {
  *  gpmc
  *  gpu
  *  hdq1w
- *  hsi
+ *  mcasp
+ *  mpu_c0
+ *  mpu_c1
  *  ocmc_ram
  *  ocp2scp_usb_phy
  *  ocp_wp_noc
@@ -660,7 +661,8 @@ static struct omap_hwmod_class_sysconfig omap44xx_aess_sysc = {
        .sysc_offs      = 0x0010,
        .sysc_flags     = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE),
        .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-                          MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+                          MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART |
+                          MSTANDBY_SMART_WKUP),
        .sysc_fields    = &omap_hwmod_sysc_type2,
 };
 
@@ -672,6 +674,7 @@ static struct omap_hwmod_class omap44xx_aess_hwmod_class = {
 /* aess */
 static struct omap_hwmod_irq_info omap44xx_aess_irqs[] = {
        { .irq = 99 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_aess_sdma_reqs[] = {
@@ -683,6 +686,7 @@ static struct omap_hwmod_dma_info omap44xx_aess_sdma_reqs[] = {
        { .name = "fifo5", .dma_req = 105 + OMAP44XX_DMA_REQ_START },
        { .name = "fifo6", .dma_req = 106 + OMAP44XX_DMA_REQ_START },
        { .name = "fifo7", .dma_req = 107 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 /* aess master ports */
@@ -696,6 +700,7 @@ static struct omap_hwmod_addr_space omap44xx_aess_addrs[] = {
                .pa_end         = 0x401f13ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_abe -> aess */
@@ -704,7 +709,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess = {
        .slave          = &omap44xx_aess_hwmod,
        .clk            = "ocp_abe_iclk",
        .addr           = omap44xx_aess_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_aess_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -714,6 +718,7 @@ static struct omap_hwmod_addr_space omap44xx_aess_dma_addrs[] = {
                .pa_end         = 0x490f13ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_abe -> aess (dma) */
@@ -722,7 +727,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess_dma = {
        .slave          = &omap44xx_aess_hwmod,
        .clk            = "ocp_abe_iclk",
        .addr           = omap44xx_aess_dma_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_aess_dma_addrs),
        .user           = OCP_USER_SDMA,
 };
 
@@ -736,11 +740,9 @@ static struct omap_hwmod omap44xx_aess_hwmod = {
        .name           = "aess",
        .class          = &omap44xx_aess_hwmod_class,
        .mpu_irqs       = omap44xx_aess_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_aess_irqs),
        .sdma_reqs      = omap44xx_aess_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_aess_sdma_reqs),
        .main_clk       = "aess_fck",
-       .prcm           = {
+       .prcm = {
                .omap4 = {
                        .clkctrl_reg = OMAP4430_CM1_ABE_AESS_CLKCTRL,
                },
@@ -769,7 +771,7 @@ static struct omap_hwmod_opt_clk bandgap_opt_clks[] = {
 static struct omap_hwmod omap44xx_bandgap_hwmod = {
        .name           = "bandgap",
        .class          = &omap44xx_bandgap_hwmod_class,
-       .prcm           = {
+       .prcm = {
                .omap4 = {
                        .clkctrl_reg = OMAP4430_CM_WKUP_BANDGAP_CLKCTRL,
                },
@@ -806,6 +808,7 @@ static struct omap_hwmod_addr_space omap44xx_counter_32k_addrs[] = {
                .pa_end         = 0x4a30401f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_wkup -> counter_32k */
@@ -814,7 +817,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__counter_32k = {
        .slave          = &omap44xx_counter_32k_hwmod,
        .clk            = "l4_wkup_clk_mux_ck",
        .addr           = omap44xx_counter_32k_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_counter_32k_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -828,7 +830,7 @@ static struct omap_hwmod omap44xx_counter_32k_hwmod = {
        .class          = &omap44xx_counter_hwmod_class,
        .flags          = HWMOD_SWSUP_SIDLE,
        .main_clk       = "sys_32k_ck",
-       .prcm           = {
+       .prcm = {
                .omap4 = {
                        .clkctrl_reg = OMAP4430_CM_WKUP_SYNCTIMER_CLKCTRL,
                },
@@ -875,6 +877,7 @@ static struct omap_hwmod_irq_info omap44xx_dma_system_irqs[] = {
        { .name = "1", .irq = 13 + OMAP44XX_IRQ_GIC_START },
        { .name = "2", .irq = 14 + OMAP44XX_IRQ_GIC_START },
        { .name = "3", .irq = 15 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 /* dma_system master ports */
@@ -888,6 +891,7 @@ static struct omap_hwmod_addr_space omap44xx_dma_system_addrs[] = {
                .pa_end         = 0x4a056fff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_cfg -> dma_system */
@@ -896,7 +900,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__dma_system = {
        .slave          = &omap44xx_dma_system_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_dma_system_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_dma_system_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -909,7 +912,6 @@ static struct omap_hwmod omap44xx_dma_system_hwmod = {
        .name           = "dma_system",
        .class          = &omap44xx_dma_hwmod_class,
        .mpu_irqs       = omap44xx_dma_system_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_dma_system_irqs),
        .main_clk       = "l3_div_ck",
        .prcm = {
                .omap4 = {
@@ -948,10 +950,12 @@ static struct omap_hwmod_class omap44xx_dmic_hwmod_class = {
 static struct omap_hwmod omap44xx_dmic_hwmod;
 static struct omap_hwmod_irq_info omap44xx_dmic_irqs[] = {
        { .irq = 114 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_dmic_sdma_reqs[] = {
        { .dma_req = 66 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_dmic_addrs[] = {
@@ -960,6 +964,7 @@ static struct omap_hwmod_addr_space omap44xx_dmic_addrs[] = {
                .pa_end         = 0x4012e07f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_abe -> dmic */
@@ -968,7 +973,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__dmic = {
        .slave          = &omap44xx_dmic_hwmod,
        .clk            = "ocp_abe_iclk",
        .addr           = omap44xx_dmic_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_dmic_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -978,6 +982,7 @@ static struct omap_hwmod_addr_space omap44xx_dmic_dma_addrs[] = {
                .pa_end         = 0x4902e07f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_abe -> dmic (dma) */
@@ -986,7 +991,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__dmic_dma = {
        .slave          = &omap44xx_dmic_hwmod,
        .clk            = "ocp_abe_iclk",
        .addr           = omap44xx_dmic_dma_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_dmic_dma_addrs),
        .user           = OCP_USER_SDMA,
 };
 
@@ -1000,11 +1004,9 @@ static struct omap_hwmod omap44xx_dmic_hwmod = {
        .name           = "dmic",
        .class          = &omap44xx_dmic_hwmod_class,
        .mpu_irqs       = omap44xx_dmic_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_dmic_irqs),
        .sdma_reqs      = omap44xx_dmic_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_dmic_sdma_reqs),
        .main_clk       = "dmic_fck",
-       .prcm           = {
+       .prcm = {
                .omap4 = {
                        .clkctrl_reg = OMAP4430_CM1_ABE_DMIC_CLKCTRL,
                },
@@ -1026,6 +1028,7 @@ static struct omap_hwmod_class omap44xx_dsp_hwmod_class = {
 /* dsp */
 static struct omap_hwmod_irq_info omap44xx_dsp_irqs[] = {
        { .irq = 28 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_rst_info omap44xx_dsp_resets[] = {
@@ -1082,7 +1085,6 @@ static struct omap_hwmod omap44xx_dsp_hwmod = {
        .name           = "dsp",
        .class          = &omap44xx_dsp_hwmod_class,
        .mpu_irqs       = omap44xx_dsp_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_dsp_irqs),
        .rst_lines      = omap44xx_dsp_resets,
        .rst_lines_cnt  = ARRAY_SIZE(omap44xx_dsp_resets),
        .main_clk       = "dsp_fck",
@@ -1127,6 +1129,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_dma_addrs[] = {
                .pa_end         = 0x5800007f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l3_main_2 -> dss */
@@ -1135,7 +1138,6 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss = {
        .slave          = &omap44xx_dss_hwmod,
        .clk            = "l3_div_ck",
        .addr           = omap44xx_dss_dma_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_dss_dma_addrs),
        .user           = OCP_USER_SDMA,
 };
 
@@ -1145,6 +1147,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_addrs[] = {
                .pa_end         = 0x4804007f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> dss */
@@ -1153,7 +1156,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss = {
        .slave          = &omap44xx_dss_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_dss_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_dss_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -1215,10 +1217,12 @@ static struct omap_hwmod_class omap44xx_dispc_hwmod_class = {
 static struct omap_hwmod omap44xx_dss_dispc_hwmod;
 static struct omap_hwmod_irq_info omap44xx_dss_dispc_irqs[] = {
        { .irq = 25 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_dss_dispc_sdma_reqs[] = {
        { .dma_req = 5 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_dss_dispc_dma_addrs[] = {
@@ -1227,6 +1231,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_dispc_dma_addrs[] = {
                .pa_end         = 0x58001fff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l3_main_2 -> dss_dispc */
@@ -1235,7 +1240,6 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_dispc = {
        .slave          = &omap44xx_dss_dispc_hwmod,
        .clk            = "l3_div_ck",
        .addr           = omap44xx_dss_dispc_dma_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_dss_dispc_dma_addrs),
        .user           = OCP_USER_SDMA,
 };
 
@@ -1245,6 +1249,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_dispc_addrs[] = {
                .pa_end         = 0x48041fff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> dss_dispc */
@@ -1253,7 +1258,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dispc = {
        .slave          = &omap44xx_dss_dispc_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_dss_dispc_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_dss_dispc_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -1267,9 +1271,7 @@ static struct omap_hwmod omap44xx_dss_dispc_hwmod = {
        .name           = "dss_dispc",
        .class          = &omap44xx_dispc_hwmod_class,
        .mpu_irqs       = omap44xx_dss_dispc_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_dss_dispc_irqs),
        .sdma_reqs      = omap44xx_dss_dispc_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_dss_dispc_sdma_reqs),
        .main_clk       = "dss_fck",
        .prcm = {
                .omap4 = {
@@ -1306,10 +1308,12 @@ static struct omap_hwmod_class omap44xx_dsi_hwmod_class = {
 static struct omap_hwmod omap44xx_dss_dsi1_hwmod;
 static struct omap_hwmod_irq_info omap44xx_dss_dsi1_irqs[] = {
        { .irq = 53 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_dss_dsi1_sdma_reqs[] = {
        { .dma_req = 74 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_dss_dsi1_dma_addrs[] = {
@@ -1318,6 +1322,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_dsi1_dma_addrs[] = {
                .pa_end         = 0x580041ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l3_main_2 -> dss_dsi1 */
@@ -1326,7 +1331,6 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_dsi1 = {
        .slave          = &omap44xx_dss_dsi1_hwmod,
        .clk            = "l3_div_ck",
        .addr           = omap44xx_dss_dsi1_dma_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_dss_dsi1_dma_addrs),
        .user           = OCP_USER_SDMA,
 };
 
@@ -1336,6 +1340,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_dsi1_addrs[] = {
                .pa_end         = 0x480441ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> dss_dsi1 */
@@ -1344,7 +1349,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dsi1 = {
        .slave          = &omap44xx_dss_dsi1_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_dss_dsi1_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_dss_dsi1_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -1358,9 +1362,7 @@ static struct omap_hwmod omap44xx_dss_dsi1_hwmod = {
        .name           = "dss_dsi1",
        .class          = &omap44xx_dsi_hwmod_class,
        .mpu_irqs       = omap44xx_dss_dsi1_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_dss_dsi1_irqs),
        .sdma_reqs      = omap44xx_dss_dsi1_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_dss_dsi1_sdma_reqs),
        .main_clk       = "dss_fck",
        .prcm = {
                .omap4 = {
@@ -1376,10 +1378,12 @@ static struct omap_hwmod omap44xx_dss_dsi1_hwmod = {
 static struct omap_hwmod omap44xx_dss_dsi2_hwmod;
 static struct omap_hwmod_irq_info omap44xx_dss_dsi2_irqs[] = {
        { .irq = 84 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_dss_dsi2_sdma_reqs[] = {
        { .dma_req = 83 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_dss_dsi2_dma_addrs[] = {
@@ -1388,6 +1392,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_dsi2_dma_addrs[] = {
                .pa_end         = 0x580051ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l3_main_2 -> dss_dsi2 */
@@ -1396,7 +1401,6 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_dsi2 = {
        .slave          = &omap44xx_dss_dsi2_hwmod,
        .clk            = "l3_div_ck",
        .addr           = omap44xx_dss_dsi2_dma_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_dss_dsi2_dma_addrs),
        .user           = OCP_USER_SDMA,
 };
 
@@ -1406,6 +1410,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_dsi2_addrs[] = {
                .pa_end         = 0x480451ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> dss_dsi2 */
@@ -1414,7 +1419,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dsi2 = {
        .slave          = &omap44xx_dss_dsi2_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_dss_dsi2_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_dss_dsi2_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -1428,9 +1432,7 @@ static struct omap_hwmod omap44xx_dss_dsi2_hwmod = {
        .name           = "dss_dsi2",
        .class          = &omap44xx_dsi_hwmod_class,
        .mpu_irqs       = omap44xx_dss_dsi2_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_dss_dsi2_irqs),
        .sdma_reqs      = omap44xx_dss_dsi2_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_dss_dsi2_sdma_reqs),
        .main_clk       = "dss_fck",
        .prcm = {
                .omap4 = {
@@ -1466,10 +1468,12 @@ static struct omap_hwmod_class omap44xx_hdmi_hwmod_class = {
 static struct omap_hwmod omap44xx_dss_hdmi_hwmod;
 static struct omap_hwmod_irq_info omap44xx_dss_hdmi_irqs[] = {
        { .irq = 101 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_dss_hdmi_sdma_reqs[] = {
        { .dma_req = 75 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_dss_hdmi_dma_addrs[] = {
@@ -1478,6 +1482,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_hdmi_dma_addrs[] = {
                .pa_end         = 0x58006fff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l3_main_2 -> dss_hdmi */
@@ -1486,7 +1491,6 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_hdmi = {
        .slave          = &omap44xx_dss_hdmi_hwmod,
        .clk            = "l3_div_ck",
        .addr           = omap44xx_dss_hdmi_dma_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_dss_hdmi_dma_addrs),
        .user           = OCP_USER_SDMA,
 };
 
@@ -1496,6 +1500,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_hdmi_addrs[] = {
                .pa_end         = 0x48046fff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> dss_hdmi */
@@ -1504,7 +1509,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_hdmi = {
        .slave          = &omap44xx_dss_hdmi_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_dss_hdmi_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_dss_hdmi_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -1518,9 +1522,7 @@ static struct omap_hwmod omap44xx_dss_hdmi_hwmod = {
        .name           = "dss_hdmi",
        .class          = &omap44xx_hdmi_hwmod_class,
        .mpu_irqs       = omap44xx_dss_hdmi_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_dss_hdmi_irqs),
        .sdma_reqs      = omap44xx_dss_hdmi_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_dss_hdmi_sdma_reqs),
        .main_clk       = "dss_fck",
        .prcm = {
                .omap4 = {
@@ -1556,6 +1558,7 @@ static struct omap_hwmod_class omap44xx_rfbi_hwmod_class = {
 static struct omap_hwmod omap44xx_dss_rfbi_hwmod;
 static struct omap_hwmod_dma_info omap44xx_dss_rfbi_sdma_reqs[] = {
        { .dma_req = 13 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_dss_rfbi_dma_addrs[] = {
@@ -1564,6 +1567,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_rfbi_dma_addrs[] = {
                .pa_end         = 0x580020ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l3_main_2 -> dss_rfbi */
@@ -1572,7 +1576,6 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_rfbi = {
        .slave          = &omap44xx_dss_rfbi_hwmod,
        .clk            = "l3_div_ck",
        .addr           = omap44xx_dss_rfbi_dma_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_dss_rfbi_dma_addrs),
        .user           = OCP_USER_SDMA,
 };
 
@@ -1582,6 +1585,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_rfbi_addrs[] = {
                .pa_end         = 0x480420ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> dss_rfbi */
@@ -1590,7 +1594,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_rfbi = {
        .slave          = &omap44xx_dss_rfbi_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_dss_rfbi_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_dss_rfbi_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -1604,7 +1607,6 @@ static struct omap_hwmod omap44xx_dss_rfbi_hwmod = {
        .name           = "dss_rfbi",
        .class          = &omap44xx_rfbi_hwmod_class,
        .sdma_reqs      = omap44xx_dss_rfbi_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_dss_rfbi_sdma_reqs),
        .main_clk       = "dss_fck",
        .prcm = {
                .omap4 = {
@@ -1633,6 +1635,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_venc_dma_addrs[] = {
                .pa_end         = 0x580030ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l3_main_2 -> dss_venc */
@@ -1641,7 +1644,6 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_venc = {
        .slave          = &omap44xx_dss_venc_hwmod,
        .clk            = "l3_div_ck",
        .addr           = omap44xx_dss_venc_dma_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_dss_venc_dma_addrs),
        .user           = OCP_USER_SDMA,
 };
 
@@ -1651,6 +1653,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_venc_addrs[] = {
                .pa_end         = 0x480430ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> dss_venc */
@@ -1659,7 +1662,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_venc = {
        .slave          = &omap44xx_dss_venc_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_dss_venc_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_dss_venc_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -1716,6 +1718,7 @@ static struct omap_gpio_dev_attr gpio_dev_attr = {
 static struct omap_hwmod omap44xx_gpio1_hwmod;
 static struct omap_hwmod_irq_info omap44xx_gpio1_irqs[] = {
        { .irq = 29 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_gpio1_addrs[] = {
@@ -1724,6 +1727,7 @@ static struct omap_hwmod_addr_space omap44xx_gpio1_addrs[] = {
                .pa_end         = 0x4a3101ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_wkup -> gpio1 */
@@ -1732,7 +1736,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__gpio1 = {
        .slave          = &omap44xx_gpio1_hwmod,
        .clk            = "l4_wkup_clk_mux_ck",
        .addr           = omap44xx_gpio1_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_gpio1_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1749,7 +1752,6 @@ static struct omap_hwmod omap44xx_gpio1_hwmod = {
        .name           = "gpio1",
        .class          = &omap44xx_gpio_hwmod_class,
        .mpu_irqs       = omap44xx_gpio1_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_gpio1_irqs),
        .main_clk       = "gpio1_ick",
        .prcm = {
                .omap4 = {
@@ -1768,6 +1770,7 @@ static struct omap_hwmod omap44xx_gpio1_hwmod = {
 static struct omap_hwmod omap44xx_gpio2_hwmod;
 static struct omap_hwmod_irq_info omap44xx_gpio2_irqs[] = {
        { .irq = 30 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_gpio2_addrs[] = {
@@ -1776,6 +1779,7 @@ static struct omap_hwmod_addr_space omap44xx_gpio2_addrs[] = {
                .pa_end         = 0x480551ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> gpio2 */
@@ -1784,7 +1788,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio2 = {
        .slave          = &omap44xx_gpio2_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_gpio2_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_gpio2_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1802,7 +1805,6 @@ static struct omap_hwmod omap44xx_gpio2_hwmod = {
        .class          = &omap44xx_gpio_hwmod_class,
        .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
        .mpu_irqs       = omap44xx_gpio2_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_gpio2_irqs),
        .main_clk       = "gpio2_ick",
        .prcm = {
                .omap4 = {
@@ -1821,6 +1823,7 @@ static struct omap_hwmod omap44xx_gpio2_hwmod = {
 static struct omap_hwmod omap44xx_gpio3_hwmod;
 static struct omap_hwmod_irq_info omap44xx_gpio3_irqs[] = {
        { .irq = 31 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_gpio3_addrs[] = {
@@ -1829,6 +1832,7 @@ static struct omap_hwmod_addr_space omap44xx_gpio3_addrs[] = {
                .pa_end         = 0x480571ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> gpio3 */
@@ -1837,7 +1841,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio3 = {
        .slave          = &omap44xx_gpio3_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_gpio3_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_gpio3_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1855,7 +1858,6 @@ static struct omap_hwmod omap44xx_gpio3_hwmod = {
        .class          = &omap44xx_gpio_hwmod_class,
        .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
        .mpu_irqs       = omap44xx_gpio3_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_gpio3_irqs),
        .main_clk       = "gpio3_ick",
        .prcm = {
                .omap4 = {
@@ -1874,6 +1876,7 @@ static struct omap_hwmod omap44xx_gpio3_hwmod = {
 static struct omap_hwmod omap44xx_gpio4_hwmod;
 static struct omap_hwmod_irq_info omap44xx_gpio4_irqs[] = {
        { .irq = 32 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_gpio4_addrs[] = {
@@ -1882,6 +1885,7 @@ static struct omap_hwmod_addr_space omap44xx_gpio4_addrs[] = {
                .pa_end         = 0x480591ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> gpio4 */
@@ -1890,7 +1894,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio4 = {
        .slave          = &omap44xx_gpio4_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_gpio4_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_gpio4_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1908,7 +1911,6 @@ static struct omap_hwmod omap44xx_gpio4_hwmod = {
        .class          = &omap44xx_gpio_hwmod_class,
        .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
        .mpu_irqs       = omap44xx_gpio4_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_gpio4_irqs),
        .main_clk       = "gpio4_ick",
        .prcm = {
                .omap4 = {
@@ -1927,6 +1929,7 @@ static struct omap_hwmod omap44xx_gpio4_hwmod = {
 static struct omap_hwmod omap44xx_gpio5_hwmod;
 static struct omap_hwmod_irq_info omap44xx_gpio5_irqs[] = {
        { .irq = 33 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_gpio5_addrs[] = {
@@ -1935,6 +1938,7 @@ static struct omap_hwmod_addr_space omap44xx_gpio5_addrs[] = {
                .pa_end         = 0x4805b1ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> gpio5 */
@@ -1943,7 +1947,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio5 = {
        .slave          = &omap44xx_gpio5_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_gpio5_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_gpio5_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -1961,7 +1964,6 @@ static struct omap_hwmod omap44xx_gpio5_hwmod = {
        .class          = &omap44xx_gpio_hwmod_class,
        .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
        .mpu_irqs       = omap44xx_gpio5_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_gpio5_irqs),
        .main_clk       = "gpio5_ick",
        .prcm = {
                .omap4 = {
@@ -1980,6 +1982,7 @@ static struct omap_hwmod omap44xx_gpio5_hwmod = {
 static struct omap_hwmod omap44xx_gpio6_hwmod;
 static struct omap_hwmod_irq_info omap44xx_gpio6_irqs[] = {
        { .irq = 34 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_gpio6_addrs[] = {
@@ -1988,6 +1991,7 @@ static struct omap_hwmod_addr_space omap44xx_gpio6_addrs[] = {
                .pa_end         = 0x4805d1ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> gpio6 */
@@ -1996,7 +2000,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio6 = {
        .slave          = &omap44xx_gpio6_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_gpio6_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_gpio6_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2014,7 +2017,6 @@ static struct omap_hwmod omap44xx_gpio6_hwmod = {
        .class          = &omap44xx_gpio_hwmod_class,
        .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
        .mpu_irqs       = omap44xx_gpio6_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_gpio6_irqs),
        .main_clk       = "gpio6_ick",
        .prcm = {
                .omap4 = {
@@ -2044,7 +2046,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_hsi_sysc = {
                           SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
        .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
                           SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
-                          MSTANDBY_SMART),
+                          MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
        .sysc_fields    = &omap_hwmod_sysc_type1,
 };
 
@@ -2058,6 +2060,7 @@ static struct omap_hwmod_irq_info omap44xx_hsi_irqs[] = {
        { .name = "mpu_p1", .irq = 67 + OMAP44XX_IRQ_GIC_START },
        { .name = "mpu_p2", .irq = 68 + OMAP44XX_IRQ_GIC_START },
        { .name = "mpu_dma", .irq = 71 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 /* hsi master ports */
@@ -2071,6 +2074,7 @@ static struct omap_hwmod_addr_space omap44xx_hsi_addrs[] = {
                .pa_end         = 0x4a05bfff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_cfg -> hsi */
@@ -2079,7 +2083,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__hsi = {
        .slave          = &omap44xx_hsi_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_hsi_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_hsi_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2092,9 +2095,8 @@ static struct omap_hwmod omap44xx_hsi_hwmod = {
        .name           = "hsi",
        .class          = &omap44xx_hsi_hwmod_class,
        .mpu_irqs       = omap44xx_hsi_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_hsi_irqs),
        .main_clk       = "hsi_fck",
-       .prcm           = {
+       .prcm = {
                .omap4 = {
                        .clkctrl_reg = OMAP4430_CM_L3INIT_HSI_CLKCTRL,
                },
@@ -2131,11 +2133,13 @@ static struct omap_hwmod_class omap44xx_i2c_hwmod_class = {
 static struct omap_hwmod omap44xx_i2c1_hwmod;
 static struct omap_hwmod_irq_info omap44xx_i2c1_irqs[] = {
        { .irq = 56 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_i2c1_sdma_reqs[] = {
        { .name = "tx", .dma_req = 26 + OMAP44XX_DMA_REQ_START },
        { .name = "rx", .dma_req = 27 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_i2c1_addrs[] = {
@@ -2144,6 +2148,7 @@ static struct omap_hwmod_addr_space omap44xx_i2c1_addrs[] = {
                .pa_end         = 0x480700ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> i2c1 */
@@ -2152,7 +2157,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__i2c1 = {
        .slave          = &omap44xx_i2c1_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_i2c1_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_i2c1_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2166,9 +2170,7 @@ static struct omap_hwmod omap44xx_i2c1_hwmod = {
        .class          = &omap44xx_i2c_hwmod_class,
        .flags          = HWMOD_INIT_NO_RESET,
        .mpu_irqs       = omap44xx_i2c1_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_i2c1_irqs),
        .sdma_reqs      = omap44xx_i2c1_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_i2c1_sdma_reqs),
        .main_clk       = "i2c1_fck",
        .prcm = {
                .omap4 = {
@@ -2184,11 +2186,13 @@ static struct omap_hwmod omap44xx_i2c1_hwmod = {
 static struct omap_hwmod omap44xx_i2c2_hwmod;
 static struct omap_hwmod_irq_info omap44xx_i2c2_irqs[] = {
        { .irq = 57 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_i2c2_sdma_reqs[] = {
        { .name = "tx", .dma_req = 28 + OMAP44XX_DMA_REQ_START },
        { .name = "rx", .dma_req = 29 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_i2c2_addrs[] = {
@@ -2197,6 +2201,7 @@ static struct omap_hwmod_addr_space omap44xx_i2c2_addrs[] = {
                .pa_end         = 0x480720ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> i2c2 */
@@ -2205,7 +2210,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__i2c2 = {
        .slave          = &omap44xx_i2c2_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_i2c2_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_i2c2_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2219,9 +2223,7 @@ static struct omap_hwmod omap44xx_i2c2_hwmod = {
        .class          = &omap44xx_i2c_hwmod_class,
        .flags          = HWMOD_INIT_NO_RESET,
        .mpu_irqs       = omap44xx_i2c2_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_i2c2_irqs),
        .sdma_reqs      = omap44xx_i2c2_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_i2c2_sdma_reqs),
        .main_clk       = "i2c2_fck",
        .prcm = {
                .omap4 = {
@@ -2237,11 +2239,13 @@ static struct omap_hwmod omap44xx_i2c2_hwmod = {
 static struct omap_hwmod omap44xx_i2c3_hwmod;
 static struct omap_hwmod_irq_info omap44xx_i2c3_irqs[] = {
        { .irq = 61 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_i2c3_sdma_reqs[] = {
        { .name = "tx", .dma_req = 24 + OMAP44XX_DMA_REQ_START },
        { .name = "rx", .dma_req = 25 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_i2c3_addrs[] = {
@@ -2250,6 +2254,7 @@ static struct omap_hwmod_addr_space omap44xx_i2c3_addrs[] = {
                .pa_end         = 0x480600ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> i2c3 */
@@ -2258,7 +2263,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__i2c3 = {
        .slave          = &omap44xx_i2c3_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_i2c3_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_i2c3_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2272,9 +2276,7 @@ static struct omap_hwmod omap44xx_i2c3_hwmod = {
        .class          = &omap44xx_i2c_hwmod_class,
        .flags          = HWMOD_INIT_NO_RESET,
        .mpu_irqs       = omap44xx_i2c3_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_i2c3_irqs),
        .sdma_reqs      = omap44xx_i2c3_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_i2c3_sdma_reqs),
        .main_clk       = "i2c3_fck",
        .prcm = {
                .omap4 = {
@@ -2290,11 +2292,13 @@ static struct omap_hwmod omap44xx_i2c3_hwmod = {
 static struct omap_hwmod omap44xx_i2c4_hwmod;
 static struct omap_hwmod_irq_info omap44xx_i2c4_irqs[] = {
        { .irq = 62 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_i2c4_sdma_reqs[] = {
        { .name = "tx", .dma_req = 123 + OMAP44XX_DMA_REQ_START },
        { .name = "rx", .dma_req = 124 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_i2c4_addrs[] = {
@@ -2303,6 +2307,7 @@ static struct omap_hwmod_addr_space omap44xx_i2c4_addrs[] = {
                .pa_end         = 0x483500ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> i2c4 */
@@ -2311,7 +2316,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__i2c4 = {
        .slave          = &omap44xx_i2c4_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_i2c4_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_i2c4_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2325,9 +2329,7 @@ static struct omap_hwmod omap44xx_i2c4_hwmod = {
        .class          = &omap44xx_i2c_hwmod_class,
        .flags          = HWMOD_INIT_NO_RESET,
        .mpu_irqs       = omap44xx_i2c4_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_i2c4_irqs),
        .sdma_reqs      = omap44xx_i2c4_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_i2c4_sdma_reqs),
        .main_clk       = "i2c4_fck",
        .prcm = {
                .omap4 = {
@@ -2351,6 +2353,7 @@ static struct omap_hwmod_class omap44xx_ipu_hwmod_class = {
 /* ipu */
 static struct omap_hwmod_irq_info omap44xx_ipu_irqs[] = {
        { .irq = 100 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_rst_info omap44xx_ipu_c0_resets[] = {
@@ -2390,7 +2393,7 @@ static struct omap_hwmod omap44xx_ipu_c0_hwmod = {
        .flags          = HWMOD_INIT_NO_RESET,
        .rst_lines      = omap44xx_ipu_c0_resets,
        .rst_lines_cnt  = ARRAY_SIZE(omap44xx_ipu_c0_resets),
-       .prcm           = {
+       .prcm = {
                .omap4 = {
                        .rstctrl_reg = OMAP4430_RM_DUCATI_RSTCTRL,
                },
@@ -2405,7 +2408,7 @@ static struct omap_hwmod omap44xx_ipu_c1_hwmod = {
        .flags          = HWMOD_INIT_NO_RESET,
        .rst_lines      = omap44xx_ipu_c1_resets,
        .rst_lines_cnt  = ARRAY_SIZE(omap44xx_ipu_c1_resets),
-       .prcm           = {
+       .prcm = {
                .omap4 = {
                        .rstctrl_reg = OMAP4430_RM_DUCATI_RSTCTRL,
                },
@@ -2417,11 +2420,10 @@ static struct omap_hwmod omap44xx_ipu_hwmod = {
        .name           = "ipu",
        .class          = &omap44xx_ipu_hwmod_class,
        .mpu_irqs       = omap44xx_ipu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_ipu_irqs),
        .rst_lines      = omap44xx_ipu_resets,
        .rst_lines_cnt  = ARRAY_SIZE(omap44xx_ipu_resets),
        .main_clk       = "ipu_fck",
-       .prcm           = {
+       .prcm = {
                .omap4 = {
                        .clkctrl_reg = OMAP4430_CM_DUCATI_DUCATI_CLKCTRL,
                        .rstctrl_reg = OMAP4430_RM_DUCATI_RSTCTRL,
@@ -2446,7 +2448,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_iss_sysc = {
                           SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
        .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
                           SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
-                          MSTANDBY_SMART),
+                          MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
        .sysc_fields    = &omap_hwmod_sysc_type2,
 };
 
@@ -2458,6 +2460,7 @@ static struct omap_hwmod_class omap44xx_iss_hwmod_class = {
 /* iss */
 static struct omap_hwmod_irq_info omap44xx_iss_irqs[] = {
        { .irq = 24 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_iss_sdma_reqs[] = {
@@ -2465,6 +2468,7 @@ static struct omap_hwmod_dma_info omap44xx_iss_sdma_reqs[] = {
        { .name = "2", .dma_req = 9 + OMAP44XX_DMA_REQ_START },
        { .name = "3", .dma_req = 11 + OMAP44XX_DMA_REQ_START },
        { .name = "4", .dma_req = 12 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 /* iss master ports */
@@ -2478,6 +2482,7 @@ static struct omap_hwmod_addr_space omap44xx_iss_addrs[] = {
                .pa_end         = 0x520000ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l3_main_2 -> iss */
@@ -2486,7 +2491,6 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iss = {
        .slave          = &omap44xx_iss_hwmod,
        .clk            = "l3_div_ck",
        .addr           = omap44xx_iss_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_iss_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2503,11 +2507,9 @@ static struct omap_hwmod omap44xx_iss_hwmod = {
        .name           = "iss",
        .class          = &omap44xx_iss_hwmod_class,
        .mpu_irqs       = omap44xx_iss_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_iss_irqs),
        .sdma_reqs      = omap44xx_iss_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_iss_sdma_reqs),
        .main_clk       = "iss_fck",
-       .prcm           = {
+       .prcm = {
                .omap4 = {
                        .clkctrl_reg = OMAP4430_CM_CAM_ISS_CLKCTRL,
                },
@@ -2535,6 +2537,7 @@ static struct omap_hwmod_irq_info omap44xx_iva_irqs[] = {
        { .name = "sync_1", .irq = 103 + OMAP44XX_IRQ_GIC_START },
        { .name = "sync_0", .irq = 104 + OMAP44XX_IRQ_GIC_START },
        { .name = "mailbox_0", .irq = 107 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_rst_info omap44xx_iva_resets[] = {
@@ -2561,6 +2564,7 @@ static struct omap_hwmod_addr_space omap44xx_iva_addrs[] = {
                .pa_end         = 0x5a07ffff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l3_main_2 -> iva */
@@ -2569,7 +2573,6 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iva = {
        .slave          = &omap44xx_iva_hwmod,
        .clk            = "l3_div_ck",
        .addr           = omap44xx_iva_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_iva_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -2613,7 +2616,6 @@ static struct omap_hwmod omap44xx_iva_hwmod = {
        .name           = "iva",
        .class          = &omap44xx_iva_hwmod_class,
        .mpu_irqs       = omap44xx_iva_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_iva_irqs),
        .rst_lines      = omap44xx_iva_resets,
        .rst_lines_cnt  = ARRAY_SIZE(omap44xx_iva_resets),
        .main_clk       = "iva_fck",
@@ -2656,6 +2658,7 @@ static struct omap_hwmod_class omap44xx_kbd_hwmod_class = {
 static struct omap_hwmod omap44xx_kbd_hwmod;
 static struct omap_hwmod_irq_info omap44xx_kbd_irqs[] = {
        { .irq = 120 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_kbd_addrs[] = {
@@ -2664,6 +2667,7 @@ static struct omap_hwmod_addr_space omap44xx_kbd_addrs[] = {
                .pa_end         = 0x4a31c07f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_wkup -> kbd */
@@ -2672,7 +2676,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__kbd = {
        .slave          = &omap44xx_kbd_hwmod,
        .clk            = "l4_wkup_clk_mux_ck",
        .addr           = omap44xx_kbd_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_kbd_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2685,9 +2688,8 @@ static struct omap_hwmod omap44xx_kbd_hwmod = {
        .name           = "kbd",
        .class          = &omap44xx_kbd_hwmod_class,
        .mpu_irqs       = omap44xx_kbd_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_kbd_irqs),
        .main_clk       = "kbd_fck",
-       .prcm           = {
+       .prcm = {
                .omap4 = {
                        .clkctrl_reg = OMAP4430_CM_WKUP_KEYBOARD_CLKCTRL,
                },
@@ -2721,6 +2723,7 @@ static struct omap_hwmod_class omap44xx_mailbox_hwmod_class = {
 static struct omap_hwmod omap44xx_mailbox_hwmod;
 static struct omap_hwmod_irq_info omap44xx_mailbox_irqs[] = {
        { .irq = 26 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_mailbox_addrs[] = {
@@ -2729,6 +2732,7 @@ static struct omap_hwmod_addr_space omap44xx_mailbox_addrs[] = {
                .pa_end         = 0x4a0f41ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_cfg -> mailbox */
@@ -2737,7 +2741,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__mailbox = {
        .slave          = &omap44xx_mailbox_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_mailbox_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_mailbox_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2750,8 +2753,7 @@ static struct omap_hwmod omap44xx_mailbox_hwmod = {
        .name           = "mailbox",
        .class          = &omap44xx_mailbox_hwmod_class,
        .mpu_irqs       = omap44xx_mailbox_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_mailbox_irqs),
-       .prcm           = {
+       .prcm = {
                .omap4 = {
                        .clkctrl_reg = OMAP4430_CM_L4CFG_MAILBOX_CLKCTRL,
                },
@@ -2784,11 +2786,13 @@ static struct omap_hwmod_class omap44xx_mcbsp_hwmod_class = {
 static struct omap_hwmod omap44xx_mcbsp1_hwmod;
 static struct omap_hwmod_irq_info omap44xx_mcbsp1_irqs[] = {
        { .irq = 17 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_mcbsp1_sdma_reqs[] = {
        { .name = "tx", .dma_req = 32 + OMAP44XX_DMA_REQ_START },
        { .name = "rx", .dma_req = 33 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_mcbsp1_addrs[] = {
@@ -2798,6 +2802,7 @@ static struct omap_hwmod_addr_space omap44xx_mcbsp1_addrs[] = {
                .pa_end         = 0x401220ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_abe -> mcbsp1 */
@@ -2806,7 +2811,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp1 = {
        .slave          = &omap44xx_mcbsp1_hwmod,
        .clk            = "ocp_abe_iclk",
        .addr           = omap44xx_mcbsp1_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_mcbsp1_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -2817,6 +2821,7 @@ static struct omap_hwmod_addr_space omap44xx_mcbsp1_dma_addrs[] = {
                .pa_end         = 0x490220ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_abe -> mcbsp1 (dma) */
@@ -2825,7 +2830,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp1_dma = {
        .slave          = &omap44xx_mcbsp1_hwmod,
        .clk            = "ocp_abe_iclk",
        .addr           = omap44xx_mcbsp1_dma_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_mcbsp1_dma_addrs),
        .user           = OCP_USER_SDMA,
 };
 
@@ -2839,9 +2843,7 @@ static struct omap_hwmod omap44xx_mcbsp1_hwmod = {
        .name           = "mcbsp1",
        .class          = &omap44xx_mcbsp_hwmod_class,
        .mpu_irqs       = omap44xx_mcbsp1_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_mcbsp1_irqs),
        .sdma_reqs      = omap44xx_mcbsp1_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_mcbsp1_sdma_reqs),
        .main_clk       = "mcbsp1_fck",
        .prcm = {
                .omap4 = {
@@ -2857,11 +2859,13 @@ static struct omap_hwmod omap44xx_mcbsp1_hwmod = {
 static struct omap_hwmod omap44xx_mcbsp2_hwmod;
 static struct omap_hwmod_irq_info omap44xx_mcbsp2_irqs[] = {
        { .irq = 22 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_mcbsp2_sdma_reqs[] = {
        { .name = "tx", .dma_req = 16 + OMAP44XX_DMA_REQ_START },
        { .name = "rx", .dma_req = 17 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_mcbsp2_addrs[] = {
@@ -2871,6 +2875,7 @@ static struct omap_hwmod_addr_space omap44xx_mcbsp2_addrs[] = {
                .pa_end         = 0x401240ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_abe -> mcbsp2 */
@@ -2879,7 +2884,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp2 = {
        .slave          = &omap44xx_mcbsp2_hwmod,
        .clk            = "ocp_abe_iclk",
        .addr           = omap44xx_mcbsp2_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_mcbsp2_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -2890,6 +2894,7 @@ static struct omap_hwmod_addr_space omap44xx_mcbsp2_dma_addrs[] = {
                .pa_end         = 0x490240ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_abe -> mcbsp2 (dma) */
@@ -2898,7 +2903,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp2_dma = {
        .slave          = &omap44xx_mcbsp2_hwmod,
        .clk            = "ocp_abe_iclk",
        .addr           = omap44xx_mcbsp2_dma_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_mcbsp2_dma_addrs),
        .user           = OCP_USER_SDMA,
 };
 
@@ -2912,9 +2916,7 @@ static struct omap_hwmod omap44xx_mcbsp2_hwmod = {
        .name           = "mcbsp2",
        .class          = &omap44xx_mcbsp_hwmod_class,
        .mpu_irqs       = omap44xx_mcbsp2_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_mcbsp2_irqs),
        .sdma_reqs      = omap44xx_mcbsp2_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_mcbsp2_sdma_reqs),
        .main_clk       = "mcbsp2_fck",
        .prcm = {
                .omap4 = {
@@ -2930,11 +2932,13 @@ static struct omap_hwmod omap44xx_mcbsp2_hwmod = {
 static struct omap_hwmod omap44xx_mcbsp3_hwmod;
 static struct omap_hwmod_irq_info omap44xx_mcbsp3_irqs[] = {
        { .irq = 23 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_mcbsp3_sdma_reqs[] = {
        { .name = "tx", .dma_req = 18 + OMAP44XX_DMA_REQ_START },
        { .name = "rx", .dma_req = 19 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_mcbsp3_addrs[] = {
@@ -2944,6 +2948,7 @@ static struct omap_hwmod_addr_space omap44xx_mcbsp3_addrs[] = {
                .pa_end         = 0x401260ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_abe -> mcbsp3 */
@@ -2952,7 +2957,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp3 = {
        .slave          = &omap44xx_mcbsp3_hwmod,
        .clk            = "ocp_abe_iclk",
        .addr           = omap44xx_mcbsp3_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_mcbsp3_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -2963,6 +2967,7 @@ static struct omap_hwmod_addr_space omap44xx_mcbsp3_dma_addrs[] = {
                .pa_end         = 0x490260ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_abe -> mcbsp3 (dma) */
@@ -2971,7 +2976,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp3_dma = {
        .slave          = &omap44xx_mcbsp3_hwmod,
        .clk            = "ocp_abe_iclk",
        .addr           = omap44xx_mcbsp3_dma_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_mcbsp3_dma_addrs),
        .user           = OCP_USER_SDMA,
 };
 
@@ -2985,9 +2989,7 @@ static struct omap_hwmod omap44xx_mcbsp3_hwmod = {
        .name           = "mcbsp3",
        .class          = &omap44xx_mcbsp_hwmod_class,
        .mpu_irqs       = omap44xx_mcbsp3_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_mcbsp3_irqs),
        .sdma_reqs      = omap44xx_mcbsp3_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_mcbsp3_sdma_reqs),
        .main_clk       = "mcbsp3_fck",
        .prcm = {
                .omap4 = {
@@ -3003,11 +3005,13 @@ static struct omap_hwmod omap44xx_mcbsp3_hwmod = {
 static struct omap_hwmod omap44xx_mcbsp4_hwmod;
 static struct omap_hwmod_irq_info omap44xx_mcbsp4_irqs[] = {
        { .irq = 16 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_mcbsp4_sdma_reqs[] = {
        { .name = "tx", .dma_req = 30 + OMAP44XX_DMA_REQ_START },
        { .name = "rx", .dma_req = 31 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_mcbsp4_addrs[] = {
@@ -3016,6 +3020,7 @@ static struct omap_hwmod_addr_space omap44xx_mcbsp4_addrs[] = {
                .pa_end         = 0x480960ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> mcbsp4 */
@@ -3024,7 +3029,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__mcbsp4 = {
        .slave          = &omap44xx_mcbsp4_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_mcbsp4_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_mcbsp4_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3037,9 +3041,7 @@ static struct omap_hwmod omap44xx_mcbsp4_hwmod = {
        .name           = "mcbsp4",
        .class          = &omap44xx_mcbsp_hwmod_class,
        .mpu_irqs       = omap44xx_mcbsp4_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_mcbsp4_irqs),
        .sdma_reqs      = omap44xx_mcbsp4_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_mcbsp4_sdma_reqs),
        .main_clk       = "mcbsp4_fck",
        .prcm = {
                .omap4 = {
@@ -3076,11 +3078,13 @@ static struct omap_hwmod_class omap44xx_mcpdm_hwmod_class = {
 static struct omap_hwmod omap44xx_mcpdm_hwmod;
 static struct omap_hwmod_irq_info omap44xx_mcpdm_irqs[] = {
        { .irq = 112 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_mcpdm_sdma_reqs[] = {
        { .name = "up_link", .dma_req = 64 + OMAP44XX_DMA_REQ_START },
        { .name = "dn_link", .dma_req = 65 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_mcpdm_addrs[] = {
@@ -3089,6 +3093,7 @@ static struct omap_hwmod_addr_space omap44xx_mcpdm_addrs[] = {
                .pa_end         = 0x4013207f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_abe -> mcpdm */
@@ -3097,7 +3102,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcpdm = {
        .slave          = &omap44xx_mcpdm_hwmod,
        .clk            = "ocp_abe_iclk",
        .addr           = omap44xx_mcpdm_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_mcpdm_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -3107,6 +3111,7 @@ static struct omap_hwmod_addr_space omap44xx_mcpdm_dma_addrs[] = {
                .pa_end         = 0x4903207f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_abe -> mcpdm (dma) */
@@ -3115,7 +3120,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcpdm_dma = {
        .slave          = &omap44xx_mcpdm_hwmod,
        .clk            = "ocp_abe_iclk",
        .addr           = omap44xx_mcpdm_dma_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_mcpdm_dma_addrs),
        .user           = OCP_USER_SDMA,
 };
 
@@ -3129,11 +3133,9 @@ static struct omap_hwmod omap44xx_mcpdm_hwmod = {
        .name           = "mcpdm",
        .class          = &omap44xx_mcpdm_hwmod_class,
        .mpu_irqs       = omap44xx_mcpdm_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_mcpdm_irqs),
        .sdma_reqs      = omap44xx_mcpdm_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_mcpdm_sdma_reqs),
        .main_clk       = "mcpdm_fck",
-       .prcm           = {
+       .prcm = {
                .omap4 = {
                        .clkctrl_reg = OMAP4430_CM1_ABE_PDM_CLKCTRL,
                },
@@ -3169,6 +3171,7 @@ static struct omap_hwmod_class omap44xx_mcspi_hwmod_class = {
 static struct omap_hwmod omap44xx_mcspi1_hwmod;
 static struct omap_hwmod_irq_info omap44xx_mcspi1_irqs[] = {
        { .irq = 65 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_mcspi1_sdma_reqs[] = {
@@ -3180,6 +3183,7 @@ static struct omap_hwmod_dma_info omap44xx_mcspi1_sdma_reqs[] = {
        { .name = "rx2", .dma_req = 39 + OMAP44XX_DMA_REQ_START },
        { .name = "tx3", .dma_req = 40 + OMAP44XX_DMA_REQ_START },
        { .name = "rx3", .dma_req = 41 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_mcspi1_addrs[] = {
@@ -3188,6 +3192,7 @@ static struct omap_hwmod_addr_space omap44xx_mcspi1_addrs[] = {
                .pa_end         = 0x480981ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> mcspi1 */
@@ -3196,7 +3201,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__mcspi1 = {
        .slave          = &omap44xx_mcspi1_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_mcspi1_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_mcspi1_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3214,9 +3218,7 @@ static struct omap_hwmod omap44xx_mcspi1_hwmod = {
        .name           = "mcspi1",
        .class          = &omap44xx_mcspi_hwmod_class,
        .mpu_irqs       = omap44xx_mcspi1_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_mcspi1_irqs),
        .sdma_reqs      = omap44xx_mcspi1_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_mcspi1_sdma_reqs),
        .main_clk       = "mcspi1_fck",
        .prcm = {
                .omap4 = {
@@ -3233,6 +3235,7 @@ static struct omap_hwmod omap44xx_mcspi1_hwmod = {
 static struct omap_hwmod omap44xx_mcspi2_hwmod;
 static struct omap_hwmod_irq_info omap44xx_mcspi2_irqs[] = {
        { .irq = 66 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_mcspi2_sdma_reqs[] = {
@@ -3240,6 +3243,7 @@ static struct omap_hwmod_dma_info omap44xx_mcspi2_sdma_reqs[] = {
        { .name = "rx0", .dma_req = 43 + OMAP44XX_DMA_REQ_START },
        { .name = "tx1", .dma_req = 44 + OMAP44XX_DMA_REQ_START },
        { .name = "rx1", .dma_req = 45 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_mcspi2_addrs[] = {
@@ -3248,6 +3252,7 @@ static struct omap_hwmod_addr_space omap44xx_mcspi2_addrs[] = {
                .pa_end         = 0x4809a1ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> mcspi2 */
@@ -3256,7 +3261,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__mcspi2 = {
        .slave          = &omap44xx_mcspi2_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_mcspi2_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_mcspi2_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3274,9 +3278,7 @@ static struct omap_hwmod omap44xx_mcspi2_hwmod = {
        .name           = "mcspi2",
        .class          = &omap44xx_mcspi_hwmod_class,
        .mpu_irqs       = omap44xx_mcspi2_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_mcspi2_irqs),
        .sdma_reqs      = omap44xx_mcspi2_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_mcspi2_sdma_reqs),
        .main_clk       = "mcspi2_fck",
        .prcm = {
                .omap4 = {
@@ -3293,6 +3295,7 @@ static struct omap_hwmod omap44xx_mcspi2_hwmod = {
 static struct omap_hwmod omap44xx_mcspi3_hwmod;
 static struct omap_hwmod_irq_info omap44xx_mcspi3_irqs[] = {
        { .irq = 91 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_mcspi3_sdma_reqs[] = {
@@ -3300,6 +3303,7 @@ static struct omap_hwmod_dma_info omap44xx_mcspi3_sdma_reqs[] = {
        { .name = "rx0", .dma_req = 15 + OMAP44XX_DMA_REQ_START },
        { .name = "tx1", .dma_req = 22 + OMAP44XX_DMA_REQ_START },
        { .name = "rx1", .dma_req = 23 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_mcspi3_addrs[] = {
@@ -3308,6 +3312,7 @@ static struct omap_hwmod_addr_space omap44xx_mcspi3_addrs[] = {
                .pa_end         = 0x480b81ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> mcspi3 */
@@ -3316,7 +3321,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__mcspi3 = {
        .slave          = &omap44xx_mcspi3_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_mcspi3_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_mcspi3_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3334,9 +3338,7 @@ static struct omap_hwmod omap44xx_mcspi3_hwmod = {
        .name           = "mcspi3",
        .class          = &omap44xx_mcspi_hwmod_class,
        .mpu_irqs       = omap44xx_mcspi3_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_mcspi3_irqs),
        .sdma_reqs      = omap44xx_mcspi3_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_mcspi3_sdma_reqs),
        .main_clk       = "mcspi3_fck",
        .prcm = {
                .omap4 = {
@@ -3353,11 +3355,13 @@ static struct omap_hwmod omap44xx_mcspi3_hwmod = {
 static struct omap_hwmod omap44xx_mcspi4_hwmod;
 static struct omap_hwmod_irq_info omap44xx_mcspi4_irqs[] = {
        { .irq = 48 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_mcspi4_sdma_reqs[] = {
        { .name = "tx0", .dma_req = 69 + OMAP44XX_DMA_REQ_START },
        { .name = "rx0", .dma_req = 70 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_mcspi4_addrs[] = {
@@ -3366,6 +3370,7 @@ static struct omap_hwmod_addr_space omap44xx_mcspi4_addrs[] = {
                .pa_end         = 0x480ba1ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> mcspi4 */
@@ -3374,7 +3379,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__mcspi4 = {
        .slave          = &omap44xx_mcspi4_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_mcspi4_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_mcspi4_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3392,9 +3396,7 @@ static struct omap_hwmod omap44xx_mcspi4_hwmod = {
        .name           = "mcspi4",
        .class          = &omap44xx_mcspi_hwmod_class,
        .mpu_irqs       = omap44xx_mcspi4_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_mcspi4_irqs),
        .sdma_reqs      = omap44xx_mcspi4_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_mcspi4_sdma_reqs),
        .main_clk       = "mcspi4_fck",
        .prcm = {
                .omap4 = {
@@ -3420,7 +3422,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_mmc_sysc = {
                           SYSC_HAS_SOFTRESET),
        .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
                           SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
-                          MSTANDBY_SMART),
+                          MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
        .sysc_fields    = &omap_hwmod_sysc_type2,
 };
 
@@ -3430,14 +3432,15 @@ static struct omap_hwmod_class omap44xx_mmc_hwmod_class = {
 };
 
 /* mmc1 */
-
 static struct omap_hwmod_irq_info omap44xx_mmc1_irqs[] = {
        { .irq = 83 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_mmc1_sdma_reqs[] = {
        { .name = "tx", .dma_req = 60 + OMAP44XX_DMA_REQ_START },
        { .name = "rx", .dma_req = 61 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 /* mmc1 master ports */
@@ -3451,6 +3454,7 @@ static struct omap_hwmod_addr_space omap44xx_mmc1_addrs[] = {
                .pa_end         = 0x4809c3ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> mmc1 */
@@ -3459,7 +3463,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc1 = {
        .slave          = &omap44xx_mmc1_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_mmc1_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_mmc1_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3477,11 +3480,9 @@ static struct omap_hwmod omap44xx_mmc1_hwmod = {
        .name           = "mmc1",
        .class          = &omap44xx_mmc_hwmod_class,
        .mpu_irqs       = omap44xx_mmc1_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_mmc1_irqs),
        .sdma_reqs      = omap44xx_mmc1_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_mmc1_sdma_reqs),
        .main_clk       = "mmc1_fck",
-       .prcm           = {
+       .prcm = {
                .omap4 = {
                        .clkctrl_reg = OMAP4430_CM_L3INIT_MMC1_CLKCTRL,
                },
@@ -3497,11 +3498,13 @@ static struct omap_hwmod omap44xx_mmc1_hwmod = {
 /* mmc2 */
 static struct omap_hwmod_irq_info omap44xx_mmc2_irqs[] = {
        { .irq = 86 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_mmc2_sdma_reqs[] = {
        { .name = "tx", .dma_req = 46 + OMAP44XX_DMA_REQ_START },
        { .name = "rx", .dma_req = 47 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 /* mmc2 master ports */
@@ -3515,6 +3518,7 @@ static struct omap_hwmod_addr_space omap44xx_mmc2_addrs[] = {
                .pa_end         = 0x480b43ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> mmc2 */
@@ -3523,7 +3527,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc2 = {
        .slave          = &omap44xx_mmc2_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_mmc2_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_mmc2_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3536,11 +3539,9 @@ static struct omap_hwmod omap44xx_mmc2_hwmod = {
        .name           = "mmc2",
        .class          = &omap44xx_mmc_hwmod_class,
        .mpu_irqs       = omap44xx_mmc2_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_mmc2_irqs),
        .sdma_reqs      = omap44xx_mmc2_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_mmc2_sdma_reqs),
        .main_clk       = "mmc2_fck",
-       .prcm           = {
+       .prcm = {
                .omap4 = {
                        .clkctrl_reg = OMAP4430_CM_L3INIT_MMC2_CLKCTRL,
                },
@@ -3556,11 +3557,13 @@ static struct omap_hwmod omap44xx_mmc2_hwmod = {
 static struct omap_hwmod omap44xx_mmc3_hwmod;
 static struct omap_hwmod_irq_info omap44xx_mmc3_irqs[] = {
        { .irq = 94 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_mmc3_sdma_reqs[] = {
        { .name = "tx", .dma_req = 76 + OMAP44XX_DMA_REQ_START },
        { .name = "rx", .dma_req = 77 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_mmc3_addrs[] = {
@@ -3569,6 +3572,7 @@ static struct omap_hwmod_addr_space omap44xx_mmc3_addrs[] = {
                .pa_end         = 0x480ad3ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> mmc3 */
@@ -3577,7 +3581,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc3 = {
        .slave          = &omap44xx_mmc3_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_mmc3_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_mmc3_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3590,11 +3593,9 @@ static struct omap_hwmod omap44xx_mmc3_hwmod = {
        .name           = "mmc3",
        .class          = &omap44xx_mmc_hwmod_class,
        .mpu_irqs       = omap44xx_mmc3_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_mmc3_irqs),
        .sdma_reqs      = omap44xx_mmc3_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_mmc3_sdma_reqs),
        .main_clk       = "mmc3_fck",
-       .prcm           = {
+       .prcm = {
                .omap4 = {
                        .clkctrl_reg = OMAP4430_CM_L4PER_MMCSD3_CLKCTRL,
                },
@@ -3608,11 +3609,13 @@ static struct omap_hwmod omap44xx_mmc3_hwmod = {
 static struct omap_hwmod omap44xx_mmc4_hwmod;
 static struct omap_hwmod_irq_info omap44xx_mmc4_irqs[] = {
        { .irq = 96 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_mmc4_sdma_reqs[] = {
        { .name = "tx", .dma_req = 56 + OMAP44XX_DMA_REQ_START },
        { .name = "rx", .dma_req = 57 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_mmc4_addrs[] = {
@@ -3621,6 +3624,7 @@ static struct omap_hwmod_addr_space omap44xx_mmc4_addrs[] = {
                .pa_end         = 0x480d13ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> mmc4 */
@@ -3629,7 +3633,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc4 = {
        .slave          = &omap44xx_mmc4_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_mmc4_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_mmc4_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3642,11 +3645,10 @@ static struct omap_hwmod omap44xx_mmc4_hwmod = {
        .name           = "mmc4",
        .class          = &omap44xx_mmc_hwmod_class,
        .mpu_irqs       = omap44xx_mmc4_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_mmc4_irqs),
+
        .sdma_reqs      = omap44xx_mmc4_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_mmc4_sdma_reqs),
        .main_clk       = "mmc4_fck",
-       .prcm           = {
+       .prcm = {
                .omap4 = {
                        .clkctrl_reg = OMAP4430_CM_L4PER_MMCSD4_CLKCTRL,
                },
@@ -3660,11 +3662,13 @@ static struct omap_hwmod omap44xx_mmc4_hwmod = {
 static struct omap_hwmod omap44xx_mmc5_hwmod;
 static struct omap_hwmod_irq_info omap44xx_mmc5_irqs[] = {
        { .irq = 59 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_mmc5_sdma_reqs[] = {
        { .name = "tx", .dma_req = 58 + OMAP44XX_DMA_REQ_START },
        { .name = "rx", .dma_req = 59 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_mmc5_addrs[] = {
@@ -3673,6 +3677,7 @@ static struct omap_hwmod_addr_space omap44xx_mmc5_addrs[] = {
                .pa_end         = 0x480d53ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> mmc5 */
@@ -3681,7 +3686,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc5 = {
        .slave          = &omap44xx_mmc5_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_mmc5_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_mmc5_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3694,11 +3698,9 @@ static struct omap_hwmod omap44xx_mmc5_hwmod = {
        .name           = "mmc5",
        .class          = &omap44xx_mmc_hwmod_class,
        .mpu_irqs       = omap44xx_mmc5_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_mmc5_irqs),
        .sdma_reqs      = omap44xx_mmc5_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_mmc5_sdma_reqs),
        .main_clk       = "mmc5_fck",
-       .prcm           = {
+       .prcm = {
                .omap4 = {
                        .clkctrl_reg = OMAP4430_CM_L4PER_MMCSD5_CLKCTRL,
                },
@@ -3722,6 +3724,7 @@ static struct omap_hwmod_irq_info omap44xx_mpu_irqs[] = {
        { .name = "pl310", .irq = 0 + OMAP44XX_IRQ_GIC_START },
        { .name = "cti0", .irq = 1 + OMAP44XX_IRQ_GIC_START },
        { .name = "cti1", .irq = 2 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 /* mpu master ports */
@@ -3734,9 +3737,8 @@ static struct omap_hwmod_ocp_if *omap44xx_mpu_masters[] = {
 static struct omap_hwmod omap44xx_mpu_hwmod = {
        .name           = "mpu",
        .class          = &omap44xx_mpu_hwmod_class,
-       .flags          = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
+       .flags          = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
        .mpu_irqs       = omap44xx_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_mpu_irqs),
        .main_clk       = "dpll_mpu_m2_ck",
        .prcm = {
                .omap4 = {
@@ -3778,6 +3780,7 @@ static struct omap_hwmod_class omap44xx_smartreflex_hwmod_class = {
 static struct omap_hwmod omap44xx_smartreflex_core_hwmod;
 static struct omap_hwmod_irq_info omap44xx_smartreflex_core_irqs[] = {
        { .irq = 19 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_smartreflex_core_addrs[] = {
@@ -3786,6 +3789,7 @@ static struct omap_hwmod_addr_space omap44xx_smartreflex_core_addrs[] = {
                .pa_end         = 0x4a0dd03f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_cfg -> smartreflex_core */
@@ -3794,7 +3798,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__smartreflex_core = {
        .slave          = &omap44xx_smartreflex_core_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_smartreflex_core_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_smartreflex_core_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3807,7 +3810,7 @@ static struct omap_hwmod omap44xx_smartreflex_core_hwmod = {
        .name           = "smartreflex_core",
        .class          = &omap44xx_smartreflex_hwmod_class,
        .mpu_irqs       = omap44xx_smartreflex_core_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_smartreflex_core_irqs),
+
        .main_clk       = "smartreflex_core_fck",
        .vdd_name       = "core",
        .prcm = {
@@ -3824,6 +3827,7 @@ static struct omap_hwmod omap44xx_smartreflex_core_hwmod = {
 static struct omap_hwmod omap44xx_smartreflex_iva_hwmod;
 static struct omap_hwmod_irq_info omap44xx_smartreflex_iva_irqs[] = {
        { .irq = 102 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_smartreflex_iva_addrs[] = {
@@ -3832,6 +3836,7 @@ static struct omap_hwmod_addr_space omap44xx_smartreflex_iva_addrs[] = {
                .pa_end         = 0x4a0db03f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_cfg -> smartreflex_iva */
@@ -3840,7 +3845,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__smartreflex_iva = {
        .slave          = &omap44xx_smartreflex_iva_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_smartreflex_iva_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_smartreflex_iva_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3853,7 +3857,6 @@ static struct omap_hwmod omap44xx_smartreflex_iva_hwmod = {
        .name           = "smartreflex_iva",
        .class          = &omap44xx_smartreflex_hwmod_class,
        .mpu_irqs       = omap44xx_smartreflex_iva_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_smartreflex_iva_irqs),
        .main_clk       = "smartreflex_iva_fck",
        .vdd_name       = "iva",
        .prcm = {
@@ -3870,6 +3873,7 @@ static struct omap_hwmod omap44xx_smartreflex_iva_hwmod = {
 static struct omap_hwmod omap44xx_smartreflex_mpu_hwmod;
 static struct omap_hwmod_irq_info omap44xx_smartreflex_mpu_irqs[] = {
        { .irq = 18 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_smartreflex_mpu_addrs[] = {
@@ -3878,6 +3882,7 @@ static struct omap_hwmod_addr_space omap44xx_smartreflex_mpu_addrs[] = {
                .pa_end         = 0x4a0d903f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_cfg -> smartreflex_mpu */
@@ -3886,7 +3891,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__smartreflex_mpu = {
        .slave          = &omap44xx_smartreflex_mpu_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_smartreflex_mpu_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_smartreflex_mpu_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3899,7 +3903,6 @@ static struct omap_hwmod omap44xx_smartreflex_mpu_hwmod = {
        .name           = "smartreflex_mpu",
        .class          = &omap44xx_smartreflex_hwmod_class,
        .mpu_irqs       = omap44xx_smartreflex_mpu_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_smartreflex_mpu_irqs),
        .main_clk       = "smartreflex_mpu_fck",
        .vdd_name       = "mpu",
        .prcm = {
@@ -3943,6 +3946,7 @@ static struct omap_hwmod_addr_space omap44xx_spinlock_addrs[] = {
                .pa_end         = 0x4a0f6fff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_cfg -> spinlock */
@@ -3951,7 +3955,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__spinlock = {
        .slave          = &omap44xx_spinlock_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_spinlock_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_spinlock_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -4015,6 +4018,7 @@ static struct omap_hwmod_class omap44xx_timer_hwmod_class = {
 static struct omap_hwmod omap44xx_timer1_hwmod;
 static struct omap_hwmod_irq_info omap44xx_timer1_irqs[] = {
        { .irq = 37 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_timer1_addrs[] = {
@@ -4023,6 +4027,7 @@ static struct omap_hwmod_addr_space omap44xx_timer1_addrs[] = {
                .pa_end         = 0x4a31807f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_wkup -> timer1 */
@@ -4031,7 +4036,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__timer1 = {
        .slave          = &omap44xx_timer1_hwmod,
        .clk            = "l4_wkup_clk_mux_ck",
        .addr           = omap44xx_timer1_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_timer1_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -4044,7 +4048,6 @@ static struct omap_hwmod omap44xx_timer1_hwmod = {
        .name           = "timer1",
        .class          = &omap44xx_timer_1ms_hwmod_class,
        .mpu_irqs       = omap44xx_timer1_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_timer1_irqs),
        .main_clk       = "timer1_fck",
        .prcm = {
                .omap4 = {
@@ -4060,6 +4063,7 @@ static struct omap_hwmod omap44xx_timer1_hwmod = {
 static struct omap_hwmod omap44xx_timer2_hwmod;
 static struct omap_hwmod_irq_info omap44xx_timer2_irqs[] = {
        { .irq = 38 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_timer2_addrs[] = {
@@ -4068,6 +4072,7 @@ static struct omap_hwmod_addr_space omap44xx_timer2_addrs[] = {
                .pa_end         = 0x4803207f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> timer2 */
@@ -4076,7 +4081,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__timer2 = {
        .slave          = &omap44xx_timer2_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_timer2_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_timer2_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -4089,7 +4093,6 @@ static struct omap_hwmod omap44xx_timer2_hwmod = {
        .name           = "timer2",
        .class          = &omap44xx_timer_1ms_hwmod_class,
        .mpu_irqs       = omap44xx_timer2_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_timer2_irqs),
        .main_clk       = "timer2_fck",
        .prcm = {
                .omap4 = {
@@ -4105,6 +4108,7 @@ static struct omap_hwmod omap44xx_timer2_hwmod = {
 static struct omap_hwmod omap44xx_timer3_hwmod;
 static struct omap_hwmod_irq_info omap44xx_timer3_irqs[] = {
        { .irq = 39 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_timer3_addrs[] = {
@@ -4113,6 +4117,7 @@ static struct omap_hwmod_addr_space omap44xx_timer3_addrs[] = {
                .pa_end         = 0x4803407f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> timer3 */
@@ -4121,7 +4126,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__timer3 = {
        .slave          = &omap44xx_timer3_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_timer3_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_timer3_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -4134,7 +4138,6 @@ static struct omap_hwmod omap44xx_timer3_hwmod = {
        .name           = "timer3",
        .class          = &omap44xx_timer_hwmod_class,
        .mpu_irqs       = omap44xx_timer3_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_timer3_irqs),
        .main_clk       = "timer3_fck",
        .prcm = {
                .omap4 = {
@@ -4150,6 +4153,7 @@ static struct omap_hwmod omap44xx_timer3_hwmod = {
 static struct omap_hwmod omap44xx_timer4_hwmod;
 static struct omap_hwmod_irq_info omap44xx_timer4_irqs[] = {
        { .irq = 40 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_timer4_addrs[] = {
@@ -4158,6 +4162,7 @@ static struct omap_hwmod_addr_space omap44xx_timer4_addrs[] = {
                .pa_end         = 0x4803607f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> timer4 */
@@ -4166,7 +4171,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__timer4 = {
        .slave          = &omap44xx_timer4_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_timer4_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_timer4_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -4179,7 +4183,6 @@ static struct omap_hwmod omap44xx_timer4_hwmod = {
        .name           = "timer4",
        .class          = &omap44xx_timer_hwmod_class,
        .mpu_irqs       = omap44xx_timer4_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_timer4_irqs),
        .main_clk       = "timer4_fck",
        .prcm = {
                .omap4 = {
@@ -4195,6 +4198,7 @@ static struct omap_hwmod omap44xx_timer4_hwmod = {
 static struct omap_hwmod omap44xx_timer5_hwmod;
 static struct omap_hwmod_irq_info omap44xx_timer5_irqs[] = {
        { .irq = 41 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_timer5_addrs[] = {
@@ -4203,6 +4207,7 @@ static struct omap_hwmod_addr_space omap44xx_timer5_addrs[] = {
                .pa_end         = 0x4013807f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_abe -> timer5 */
@@ -4211,7 +4216,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer5 = {
        .slave          = &omap44xx_timer5_hwmod,
        .clk            = "ocp_abe_iclk",
        .addr           = omap44xx_timer5_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_timer5_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -4221,6 +4225,7 @@ static struct omap_hwmod_addr_space omap44xx_timer5_dma_addrs[] = {
                .pa_end         = 0x4903807f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_abe -> timer5 (dma) */
@@ -4229,7 +4234,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer5_dma = {
        .slave          = &omap44xx_timer5_hwmod,
        .clk            = "ocp_abe_iclk",
        .addr           = omap44xx_timer5_dma_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_timer5_dma_addrs),
        .user           = OCP_USER_SDMA,
 };
 
@@ -4243,7 +4247,6 @@ static struct omap_hwmod omap44xx_timer5_hwmod = {
        .name           = "timer5",
        .class          = &omap44xx_timer_hwmod_class,
        .mpu_irqs       = omap44xx_timer5_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_timer5_irqs),
        .main_clk       = "timer5_fck",
        .prcm = {
                .omap4 = {
@@ -4259,6 +4262,7 @@ static struct omap_hwmod omap44xx_timer5_hwmod = {
 static struct omap_hwmod omap44xx_timer6_hwmod;
 static struct omap_hwmod_irq_info omap44xx_timer6_irqs[] = {
        { .irq = 42 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_timer6_addrs[] = {
@@ -4267,6 +4271,7 @@ static struct omap_hwmod_addr_space omap44xx_timer6_addrs[] = {
                .pa_end         = 0x4013a07f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_abe -> timer6 */
@@ -4275,7 +4280,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer6 = {
        .slave          = &omap44xx_timer6_hwmod,
        .clk            = "ocp_abe_iclk",
        .addr           = omap44xx_timer6_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_timer6_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -4285,6 +4289,7 @@ static struct omap_hwmod_addr_space omap44xx_timer6_dma_addrs[] = {
                .pa_end         = 0x4903a07f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_abe -> timer6 (dma) */
@@ -4293,7 +4298,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer6_dma = {
        .slave          = &omap44xx_timer6_hwmod,
        .clk            = "ocp_abe_iclk",
        .addr           = omap44xx_timer6_dma_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_timer6_dma_addrs),
        .user           = OCP_USER_SDMA,
 };
 
@@ -4307,7 +4311,7 @@ static struct omap_hwmod omap44xx_timer6_hwmod = {
        .name           = "timer6",
        .class          = &omap44xx_timer_hwmod_class,
        .mpu_irqs       = omap44xx_timer6_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_timer6_irqs),
+
        .main_clk       = "timer6_fck",
        .prcm = {
                .omap4 = {
@@ -4323,6 +4327,7 @@ static struct omap_hwmod omap44xx_timer6_hwmod = {
 static struct omap_hwmod omap44xx_timer7_hwmod;
 static struct omap_hwmod_irq_info omap44xx_timer7_irqs[] = {
        { .irq = 43 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_timer7_addrs[] = {
@@ -4331,6 +4336,7 @@ static struct omap_hwmod_addr_space omap44xx_timer7_addrs[] = {
                .pa_end         = 0x4013c07f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_abe -> timer7 */
@@ -4339,7 +4345,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer7 = {
        .slave          = &omap44xx_timer7_hwmod,
        .clk            = "ocp_abe_iclk",
        .addr           = omap44xx_timer7_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_timer7_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -4349,6 +4354,7 @@ static struct omap_hwmod_addr_space omap44xx_timer7_dma_addrs[] = {
                .pa_end         = 0x4903c07f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_abe -> timer7 (dma) */
@@ -4357,7 +4363,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer7_dma = {
        .slave          = &omap44xx_timer7_hwmod,
        .clk            = "ocp_abe_iclk",
        .addr           = omap44xx_timer7_dma_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_timer7_dma_addrs),
        .user           = OCP_USER_SDMA,
 };
 
@@ -4371,7 +4376,6 @@ static struct omap_hwmod omap44xx_timer7_hwmod = {
        .name           = "timer7",
        .class          = &omap44xx_timer_hwmod_class,
        .mpu_irqs       = omap44xx_timer7_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_timer7_irqs),
        .main_clk       = "timer7_fck",
        .prcm = {
                .omap4 = {
@@ -4387,6 +4391,7 @@ static struct omap_hwmod omap44xx_timer7_hwmod = {
 static struct omap_hwmod omap44xx_timer8_hwmod;
 static struct omap_hwmod_irq_info omap44xx_timer8_irqs[] = {
        { .irq = 44 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_timer8_addrs[] = {
@@ -4395,6 +4400,7 @@ static struct omap_hwmod_addr_space omap44xx_timer8_addrs[] = {
                .pa_end         = 0x4013e07f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_abe -> timer8 */
@@ -4403,7 +4409,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer8 = {
        .slave          = &omap44xx_timer8_hwmod,
        .clk            = "ocp_abe_iclk",
        .addr           = omap44xx_timer8_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_timer8_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -4413,6 +4418,7 @@ static struct omap_hwmod_addr_space omap44xx_timer8_dma_addrs[] = {
                .pa_end         = 0x4903e07f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_abe -> timer8 (dma) */
@@ -4421,7 +4427,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer8_dma = {
        .slave          = &omap44xx_timer8_hwmod,
        .clk            = "ocp_abe_iclk",
        .addr           = omap44xx_timer8_dma_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_timer8_dma_addrs),
        .user           = OCP_USER_SDMA,
 };
 
@@ -4435,7 +4440,6 @@ static struct omap_hwmod omap44xx_timer8_hwmod = {
        .name           = "timer8",
        .class          = &omap44xx_timer_hwmod_class,
        .mpu_irqs       = omap44xx_timer8_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_timer8_irqs),
        .main_clk       = "timer8_fck",
        .prcm = {
                .omap4 = {
@@ -4451,6 +4455,7 @@ static struct omap_hwmod omap44xx_timer8_hwmod = {
 static struct omap_hwmod omap44xx_timer9_hwmod;
 static struct omap_hwmod_irq_info omap44xx_timer9_irqs[] = {
        { .irq = 45 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_timer9_addrs[] = {
@@ -4459,6 +4464,7 @@ static struct omap_hwmod_addr_space omap44xx_timer9_addrs[] = {
                .pa_end         = 0x4803e07f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> timer9 */
@@ -4467,7 +4473,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__timer9 = {
        .slave          = &omap44xx_timer9_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_timer9_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_timer9_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -4480,7 +4485,6 @@ static struct omap_hwmod omap44xx_timer9_hwmod = {
        .name           = "timer9",
        .class          = &omap44xx_timer_hwmod_class,
        .mpu_irqs       = omap44xx_timer9_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_timer9_irqs),
        .main_clk       = "timer9_fck",
        .prcm = {
                .omap4 = {
@@ -4496,6 +4500,7 @@ static struct omap_hwmod omap44xx_timer9_hwmod = {
 static struct omap_hwmod omap44xx_timer10_hwmod;
 static struct omap_hwmod_irq_info omap44xx_timer10_irqs[] = {
        { .irq = 46 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_timer10_addrs[] = {
@@ -4504,6 +4509,7 @@ static struct omap_hwmod_addr_space omap44xx_timer10_addrs[] = {
                .pa_end         = 0x4808607f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> timer10 */
@@ -4512,7 +4518,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__timer10 = {
        .slave          = &omap44xx_timer10_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_timer10_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_timer10_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -4525,7 +4530,6 @@ static struct omap_hwmod omap44xx_timer10_hwmod = {
        .name           = "timer10",
        .class          = &omap44xx_timer_1ms_hwmod_class,
        .mpu_irqs       = omap44xx_timer10_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_timer10_irqs),
        .main_clk       = "timer10_fck",
        .prcm = {
                .omap4 = {
@@ -4541,6 +4545,7 @@ static struct omap_hwmod omap44xx_timer10_hwmod = {
 static struct omap_hwmod omap44xx_timer11_hwmod;
 static struct omap_hwmod_irq_info omap44xx_timer11_irqs[] = {
        { .irq = 47 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_timer11_addrs[] = {
@@ -4549,6 +4554,7 @@ static struct omap_hwmod_addr_space omap44xx_timer11_addrs[] = {
                .pa_end         = 0x4808807f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> timer11 */
@@ -4557,7 +4563,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__timer11 = {
        .slave          = &omap44xx_timer11_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_timer11_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_timer11_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -4570,7 +4575,6 @@ static struct omap_hwmod omap44xx_timer11_hwmod = {
        .name           = "timer11",
        .class          = &omap44xx_timer_hwmod_class,
        .mpu_irqs       = omap44xx_timer11_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_timer11_irqs),
        .main_clk       = "timer11_fck",
        .prcm = {
                .omap4 = {
@@ -4608,11 +4612,13 @@ static struct omap_hwmod_class omap44xx_uart_hwmod_class = {
 static struct omap_hwmod omap44xx_uart1_hwmod;
 static struct omap_hwmod_irq_info omap44xx_uart1_irqs[] = {
        { .irq = 72 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_uart1_sdma_reqs[] = {
        { .name = "tx", .dma_req = 48 + OMAP44XX_DMA_REQ_START },
        { .name = "rx", .dma_req = 49 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_uart1_addrs[] = {
@@ -4621,6 +4627,7 @@ static struct omap_hwmod_addr_space omap44xx_uart1_addrs[] = {
                .pa_end         = 0x4806a0ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> uart1 */
@@ -4629,7 +4636,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__uart1 = {
        .slave          = &omap44xx_uart1_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_uart1_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_uart1_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -4642,9 +4648,7 @@ static struct omap_hwmod omap44xx_uart1_hwmod = {
        .name           = "uart1",
        .class          = &omap44xx_uart_hwmod_class,
        .mpu_irqs       = omap44xx_uart1_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_uart1_irqs),
        .sdma_reqs      = omap44xx_uart1_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_uart1_sdma_reqs),
        .main_clk       = "uart1_fck",
        .prcm = {
                .omap4 = {
@@ -4660,11 +4664,13 @@ static struct omap_hwmod omap44xx_uart1_hwmod = {
 static struct omap_hwmod omap44xx_uart2_hwmod;
 static struct omap_hwmod_irq_info omap44xx_uart2_irqs[] = {
        { .irq = 73 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_uart2_sdma_reqs[] = {
        { .name = "tx", .dma_req = 50 + OMAP44XX_DMA_REQ_START },
        { .name = "rx", .dma_req = 51 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_uart2_addrs[] = {
@@ -4673,6 +4679,7 @@ static struct omap_hwmod_addr_space omap44xx_uart2_addrs[] = {
                .pa_end         = 0x4806c0ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> uart2 */
@@ -4681,7 +4688,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__uart2 = {
        .slave          = &omap44xx_uart2_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_uart2_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_uart2_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -4694,9 +4700,7 @@ static struct omap_hwmod omap44xx_uart2_hwmod = {
        .name           = "uart2",
        .class          = &omap44xx_uart_hwmod_class,
        .mpu_irqs       = omap44xx_uart2_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_uart2_irqs),
        .sdma_reqs      = omap44xx_uart2_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_uart2_sdma_reqs),
        .main_clk       = "uart2_fck",
        .prcm = {
                .omap4 = {
@@ -4712,11 +4716,13 @@ static struct omap_hwmod omap44xx_uart2_hwmod = {
 static struct omap_hwmod omap44xx_uart3_hwmod;
 static struct omap_hwmod_irq_info omap44xx_uart3_irqs[] = {
        { .irq = 74 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_uart3_sdma_reqs[] = {
        { .name = "tx", .dma_req = 52 + OMAP44XX_DMA_REQ_START },
        { .name = "rx", .dma_req = 53 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_uart3_addrs[] = {
@@ -4725,6 +4731,7 @@ static struct omap_hwmod_addr_space omap44xx_uart3_addrs[] = {
                .pa_end         = 0x480200ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> uart3 */
@@ -4733,7 +4740,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__uart3 = {
        .slave          = &omap44xx_uart3_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_uart3_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_uart3_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -4745,11 +4751,9 @@ static struct omap_hwmod_ocp_if *omap44xx_uart3_slaves[] = {
 static struct omap_hwmod omap44xx_uart3_hwmod = {
        .name           = "uart3",
        .class          = &omap44xx_uart_hwmod_class,
-       .flags          = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
+       .flags          = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
        .mpu_irqs       = omap44xx_uart3_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_uart3_irqs),
        .sdma_reqs      = omap44xx_uart3_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_uart3_sdma_reqs),
        .main_clk       = "uart3_fck",
        .prcm = {
                .omap4 = {
@@ -4765,11 +4769,13 @@ static struct omap_hwmod omap44xx_uart3_hwmod = {
 static struct omap_hwmod omap44xx_uart4_hwmod;
 static struct omap_hwmod_irq_info omap44xx_uart4_irqs[] = {
        { .irq = 70 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_dma_info omap44xx_uart4_sdma_reqs[] = {
        { .name = "tx", .dma_req = 54 + OMAP44XX_DMA_REQ_START },
        { .name = "rx", .dma_req = 55 + OMAP44XX_DMA_REQ_START },
+       { .dma_req = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_uart4_addrs[] = {
@@ -4778,6 +4784,7 @@ static struct omap_hwmod_addr_space omap44xx_uart4_addrs[] = {
                .pa_end         = 0x4806e0ff,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_per -> uart4 */
@@ -4786,7 +4793,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__uart4 = {
        .slave          = &omap44xx_uart4_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_uart4_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_uart4_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -4799,9 +4805,7 @@ static struct omap_hwmod omap44xx_uart4_hwmod = {
        .name           = "uart4",
        .class          = &omap44xx_uart_hwmod_class,
        .mpu_irqs       = omap44xx_uart4_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_uart4_irqs),
        .sdma_reqs      = omap44xx_uart4_sdma_reqs,
-       .sdma_reqs_cnt  = ARRAY_SIZE(omap44xx_uart4_sdma_reqs),
        .main_clk       = "uart4_fck",
        .prcm = {
                .omap4 = {
@@ -4832,14 +4836,15 @@ static struct omap_hwmod_class_sysconfig omap44xx_usb_otg_hs_sysc = {
 };
 
 static struct omap_hwmod_class omap44xx_usb_otg_hs_hwmod_class = {
-       .name = "usb_otg_hs",
-       .sysc = &omap44xx_usb_otg_hs_sysc,
+       .name   = "usb_otg_hs",
+       .sysc   = &omap44xx_usb_otg_hs_sysc,
 };
 
 /* usb_otg_hs */
 static struct omap_hwmod_irq_info omap44xx_usb_otg_hs_irqs[] = {
        { .name = "mc", .irq = 92 + OMAP44XX_IRQ_GIC_START },
        { .name = "dma", .irq = 93 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 /* usb_otg_hs master ports */
@@ -4853,6 +4858,7 @@ static struct omap_hwmod_addr_space omap44xx_usb_otg_hs_addrs[] = {
                .pa_end         = 0x4a0ab003,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_cfg -> usb_otg_hs */
@@ -4861,7 +4867,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_otg_hs = {
        .slave          = &omap44xx_usb_otg_hs_hwmod,
        .clk            = "l4_div_ck",
        .addr           = omap44xx_usb_otg_hs_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_usb_otg_hs_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -4879,7 +4884,6 @@ static struct omap_hwmod omap44xx_usb_otg_hs_hwmod = {
        .class          = &omap44xx_usb_otg_hs_hwmod_class,
        .flags          = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
        .mpu_irqs       = omap44xx_usb_otg_hs_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_usb_otg_hs_irqs),
        .main_clk       = "usb_otg_hs_ick",
        .prcm = {
                .omap4 = {
@@ -4887,7 +4891,7 @@ static struct omap_hwmod omap44xx_usb_otg_hs_hwmod = {
                },
        },
        .opt_clks       = usb_otg_hs_opt_clks,
-       .opt_clks_cnt = ARRAY_SIZE(usb_otg_hs_opt_clks),
+       .opt_clks_cnt   = ARRAY_SIZE(usb_otg_hs_opt_clks),
        .slaves         = omap44xx_usb_otg_hs_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap44xx_usb_otg_hs_slaves),
        .masters        = omap44xx_usb_otg_hs_masters,
@@ -4922,6 +4926,7 @@ static struct omap_hwmod_class omap44xx_wd_timer_hwmod_class = {
 static struct omap_hwmod omap44xx_wd_timer2_hwmod;
 static struct omap_hwmod_irq_info omap44xx_wd_timer2_irqs[] = {
        { .irq = 80 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_wd_timer2_addrs[] = {
@@ -4930,6 +4935,7 @@ static struct omap_hwmod_addr_space omap44xx_wd_timer2_addrs[] = {
                .pa_end         = 0x4a31407f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_wkup -> wd_timer2 */
@@ -4938,7 +4944,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__wd_timer2 = {
        .slave          = &omap44xx_wd_timer2_hwmod,
        .clk            = "l4_wkup_clk_mux_ck",
        .addr           = omap44xx_wd_timer2_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_wd_timer2_addrs),
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -4951,7 +4956,6 @@ static struct omap_hwmod omap44xx_wd_timer2_hwmod = {
        .name           = "wd_timer2",
        .class          = &omap44xx_wd_timer_hwmod_class,
        .mpu_irqs       = omap44xx_wd_timer2_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_wd_timer2_irqs),
        .main_clk       = "wd_timer2_fck",
        .prcm = {
                .omap4 = {
@@ -4967,6 +4971,7 @@ static struct omap_hwmod omap44xx_wd_timer2_hwmod = {
 static struct omap_hwmod omap44xx_wd_timer3_hwmod;
 static struct omap_hwmod_irq_info omap44xx_wd_timer3_irqs[] = {
        { .irq = 36 + OMAP44XX_IRQ_GIC_START },
+       { .irq = -1 }
 };
 
 static struct omap_hwmod_addr_space omap44xx_wd_timer3_addrs[] = {
@@ -4975,6 +4980,7 @@ static struct omap_hwmod_addr_space omap44xx_wd_timer3_addrs[] = {
                .pa_end         = 0x4013007f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_abe -> wd_timer3 */
@@ -4983,7 +4989,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__wd_timer3 = {
        .slave          = &omap44xx_wd_timer3_hwmod,
        .clk            = "ocp_abe_iclk",
        .addr           = omap44xx_wd_timer3_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_wd_timer3_addrs),
        .user           = OCP_USER_MPU,
 };
 
@@ -4993,6 +4998,7 @@ static struct omap_hwmod_addr_space omap44xx_wd_timer3_dma_addrs[] = {
                .pa_end         = 0x4903007f,
                .flags          = ADDR_TYPE_RT
        },
+       { }
 };
 
 /* l4_abe -> wd_timer3 (dma) */
@@ -5001,7 +5007,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__wd_timer3_dma = {
        .slave          = &omap44xx_wd_timer3_hwmod,
        .clk            = "ocp_abe_iclk",
        .addr           = omap44xx_wd_timer3_dma_addrs,
-       .addr_cnt       = ARRAY_SIZE(omap44xx_wd_timer3_dma_addrs),
        .user           = OCP_USER_SDMA,
 };
 
@@ -5015,7 +5020,6 @@ static struct omap_hwmod omap44xx_wd_timer3_hwmod = {
        .name           = "wd_timer3",
        .class          = &omap44xx_wd_timer_hwmod_class,
        .mpu_irqs       = omap44xx_wd_timer3_irqs,
-       .mpu_irqs_cnt   = ARRAY_SIZE(omap44xx_wd_timer3_irqs),
        .main_clk       = "wd_timer3_fck",
        .prcm = {
                .omap4 = {
index 08a1342..de832eb 100644 (file)
@@ -49,23 +49,3 @@ struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2 = {
        .srst_shift     = SYSC_TYPE2_SOFTRESET_SHIFT,
 };
 
-
-/*
- * omap_hwmod class data
- */
-
-struct omap_hwmod_class l3_hwmod_class = {
-       .name = "l3"
-};
-
-struct omap_hwmod_class l4_hwmod_class = {
-       .name = "l4"
-};
-
-struct omap_hwmod_class mpu_hwmod_class = {
-       .name = "mpu"
-};
-
-struct omap_hwmod_class iva_hwmod_class = {
-       .name = "iva"
-};
index c34e98b..39a7c37 100644 (file)
@@ -1,10 +1,10 @@
 /*
  * omap_hwmod_common_data.h - OMAP hwmod common macros and declarations
  *
- * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2010-2011 Nokia Corporation
  * Paul Walmsley
  *
- * Copyright (C) 2010 Texas Instruments, Inc.
+ * Copyright (C) 2010-2011 Texas Instruments, Inc.
  * Benoît Cousson
  *
  * This program is free software; you can redistribute it and/or modify
 
 #include <plat/omap_hwmod.h>
 
+/* Common address space across OMAP2xxx */
+extern struct omap_hwmod_addr_space omap2xxx_uart1_addr_space[];
+extern struct omap_hwmod_addr_space omap2xxx_uart2_addr_space[];
+extern struct omap_hwmod_addr_space omap2xxx_uart3_addr_space[];
+extern struct omap_hwmod_addr_space omap2xxx_timer2_addrs[];
+extern struct omap_hwmod_addr_space omap2xxx_timer3_addrs[];
+extern struct omap_hwmod_addr_space omap2xxx_timer4_addrs[];
+extern struct omap_hwmod_addr_space omap2xxx_timer5_addrs[];
+extern struct omap_hwmod_addr_space omap2xxx_timer6_addrs[];
+extern struct omap_hwmod_addr_space omap2xxx_timer7_addrs[];
+extern struct omap_hwmod_addr_space omap2xxx_timer8_addrs[];
+extern struct omap_hwmod_addr_space omap2xxx_timer9_addrs[];
+extern struct omap_hwmod_addr_space omap2xxx_timer12_addrs[];
+extern struct omap_hwmod_addr_space omap2xxx_mcbsp2_addrs[];
+
+/* Common address space across OMAP2xxx/3xxx */
+extern struct omap_hwmod_addr_space omap2_i2c1_addr_space[];
+extern struct omap_hwmod_addr_space omap2_i2c2_addr_space[];
+extern struct omap_hwmod_addr_space omap2_dss_addrs[];
+extern struct omap_hwmod_addr_space omap2_dss_dispc_addrs[];
+extern struct omap_hwmod_addr_space omap2_dss_rfbi_addrs[];
+extern struct omap_hwmod_addr_space omap2_dss_venc_addrs[];
+extern struct omap_hwmod_addr_space omap2_timer10_addrs[];
+extern struct omap_hwmod_addr_space omap2_timer11_addrs[];
+extern struct omap_hwmod_addr_space omap2430_mmc1_addr_space[];
+extern struct omap_hwmod_addr_space omap2430_mmc2_addr_space[];
+extern struct omap_hwmod_addr_space omap2_mcspi1_addr_space[];
+extern struct omap_hwmod_addr_space omap2_mcspi2_addr_space[];
+extern struct omap_hwmod_addr_space omap2430_mcspi3_addr_space[];
+extern struct omap_hwmod_addr_space omap2_dma_system_addrs[];
+extern struct omap_hwmod_addr_space omap2_mailbox_addrs[];
+extern struct omap_hwmod_addr_space omap2_mcbsp1_addrs[];
+
+/* Common IP block data across OMAP2xxx */
+extern struct omap_hwmod_irq_info omap2xxx_timer12_mpu_irqs[];
+extern struct omap_hwmod_dma_info omap2xxx_dss_sdma_chs[];
+
+/* Common IP block data */
+extern struct omap_hwmod_dma_info omap2_uart1_sdma_reqs[];
+extern struct omap_hwmod_dma_info omap2_uart2_sdma_reqs[];
+extern struct omap_hwmod_dma_info omap2_uart3_sdma_reqs[];
+extern struct omap_hwmod_dma_info omap2_i2c1_sdma_reqs[];
+extern struct omap_hwmod_dma_info omap2_i2c2_sdma_reqs[];
+extern struct omap_hwmod_dma_info omap2_mcspi1_sdma_reqs[];
+extern struct omap_hwmod_dma_info omap2_mcspi2_sdma_reqs[];
+extern struct omap_hwmod_dma_info omap2_mcbsp1_sdma_reqs[];
+extern struct omap_hwmod_dma_info omap2_mcbsp2_sdma_reqs[];
+
+/* Common IP block data on OMAP2430/OMAP3 */
+extern struct omap_hwmod_dma_info omap2_mcbsp3_sdma_reqs[];
+
+/* Common IP block data across OMAP2/3 */
+extern struct omap_hwmod_irq_info omap2_timer1_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_timer2_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_timer3_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_timer4_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_timer5_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_timer6_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_timer7_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_timer8_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_timer9_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_timer10_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_timer11_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_uart1_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_uart2_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_uart3_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_dispc_irqs[];
+extern struct omap_hwmod_irq_info omap2_i2c1_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_i2c2_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_gpio1_irqs[];
+extern struct omap_hwmod_irq_info omap2_gpio2_irqs[];
+extern struct omap_hwmod_irq_info omap2_gpio3_irqs[];
+extern struct omap_hwmod_irq_info omap2_gpio4_irqs[];
+extern struct omap_hwmod_irq_info omap2_dma_system_irqs[];
+extern struct omap_hwmod_irq_info omap2_mcspi1_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_mcspi2_mpu_irqs[];
+
 /* OMAP hwmod classes - forward declarations */
 extern struct omap_hwmod_class l3_hwmod_class;
 extern struct omap_hwmod_class l4_hwmod_class;
 extern struct omap_hwmod_class mpu_hwmod_class;
 extern struct omap_hwmod_class iva_hwmod_class;
+extern struct omap_hwmod_class omap2_uart_class;
+extern struct omap_hwmod_class omap2_dss_hwmod_class;
+extern struct omap_hwmod_class omap2_dispc_hwmod_class;
+extern struct omap_hwmod_class omap2_rfbi_hwmod_class;
+extern struct omap_hwmod_class omap2_venc_hwmod_class;
+
+extern struct omap_hwmod_class omap2xxx_timer_hwmod_class;
+extern struct omap_hwmod_class omap2xxx_wd_timer_hwmod_class;
+extern struct omap_hwmod_class omap2xxx_gpio_hwmod_class;
+extern struct omap_hwmod_class omap2xxx_dma_hwmod_class;
+extern struct omap_hwmod_class omap2xxx_mailbox_hwmod_class;
+extern struct omap_hwmod_class omap2xxx_mcspi_class;
 
 #endif
index e01da45..4411163 100644 (file)
 #include "prm2xxx_3xxx.h"
 #include "pm.h"
 
-int omap2_pm_debug;
 u32 enable_off_mode;
-u32 sleep_while_idle;
-u32 wakeup_timer_seconds;
-u32 wakeup_timer_milliseconds;
-
-#define DUMP_PRM_MOD_REG(mod, reg)    \
-       regs[reg_count].name = #mod "." #reg; \
-       regs[reg_count++].val = omap2_prm_read_mod_reg(mod, reg)
-#define DUMP_CM_MOD_REG(mod, reg)     \
-       regs[reg_count].name = #mod "." #reg; \
-       regs[reg_count++].val = omap2_cm_read_mod_reg(mod, reg)
-#define DUMP_PRM_REG(reg) \
-       regs[reg_count].name = #reg; \
-       regs[reg_count++].val = __raw_readl(reg)
-#define DUMP_CM_REG(reg) \
-       regs[reg_count].name = #reg; \
-       regs[reg_count++].val = __raw_readl(reg)
-#define DUMP_INTC_REG(reg, off) \
-       regs[reg_count].name = #reg; \
-       regs[reg_count++].val = \
-                        __raw_readl(OMAP2_L4_IO_ADDRESS(0x480fe000 + (off)))
-
-void omap2_pm_dump(int mode, int resume, unsigned int us)
-{
-       struct reg {
-               const char *name;
-               u32 val;
-       } regs[32];
-       int reg_count = 0, i;
-       const char *s1 = NULL, *s2 = NULL;
-
-       if (!resume) {
-#if 0
-               /* MPU */
-               DUMP_PRM_MOD_REG(OCP_MOD, OMAP2_PRM_IRQENABLE_MPU_OFFSET);
-               DUMP_CM_MOD_REG(MPU_MOD, OMAP2_CM_CLKSTCTRL);
-               DUMP_PRM_MOD_REG(MPU_MOD, OMAP2_PM_PWSTCTRL);
-               DUMP_PRM_MOD_REG(MPU_MOD, OMAP2_PM_PWSTST);
-               DUMP_PRM_MOD_REG(MPU_MOD, PM_WKDEP);
-#endif
-#if 0
-               /* INTC */
-               DUMP_INTC_REG(INTC_MIR0, 0x0084);
-               DUMP_INTC_REG(INTC_MIR1, 0x00a4);
-               DUMP_INTC_REG(INTC_MIR2, 0x00c4);
-#endif
-#if 0
-               DUMP_CM_MOD_REG(CORE_MOD, CM_FCLKEN1);
-               if (cpu_is_omap24xx()) {
-                       DUMP_CM_MOD_REG(CORE_MOD, OMAP24XX_CM_FCLKEN2);
-                       DUMP_PRM_MOD_REG(OMAP24XX_GR_MOD,
-                                       OMAP2_PRCM_CLKEMUL_CTRL_OFFSET);
-                       DUMP_PRM_MOD_REG(OMAP24XX_GR_MOD,
-                                       OMAP2_PRCM_CLKSRC_CTRL_OFFSET);
-               }
-               DUMP_CM_MOD_REG(WKUP_MOD, CM_FCLKEN);
-               DUMP_CM_MOD_REG(CORE_MOD, CM_ICLKEN1);
-               DUMP_CM_MOD_REG(CORE_MOD, CM_ICLKEN2);
-               DUMP_CM_MOD_REG(WKUP_MOD, CM_ICLKEN);
-               DUMP_CM_MOD_REG(PLL_MOD, CM_CLKEN);
-               DUMP_CM_MOD_REG(PLL_MOD, CM_AUTOIDLE);
-               DUMP_PRM_MOD_REG(CORE_MOD, OMAP2_PM_PWSTST);
-#endif
-#if 0
-               /* DSP */
-               if (cpu_is_omap24xx()) {
-                       DUMP_CM_MOD_REG(OMAP24XX_DSP_MOD, CM_FCLKEN);
-                       DUMP_CM_MOD_REG(OMAP24XX_DSP_MOD, CM_ICLKEN);
-                       DUMP_CM_MOD_REG(OMAP24XX_DSP_MOD, CM_IDLEST);
-                       DUMP_CM_MOD_REG(OMAP24XX_DSP_MOD, CM_AUTOIDLE);
-                       DUMP_CM_MOD_REG(OMAP24XX_DSP_MOD, CM_CLKSEL);
-                       DUMP_CM_MOD_REG(OMAP24XX_DSP_MOD, OMAP2_CM_CLKSTCTRL);
-                       DUMP_PRM_MOD_REG(OMAP24XX_DSP_MOD, OMAP2_RM_RSTCTRL);
-                       DUMP_PRM_MOD_REG(OMAP24XX_DSP_MOD, OMAP2_RM_RSTST);
-                       DUMP_PRM_MOD_REG(OMAP24XX_DSP_MOD, OMAP2_PM_PWSTCTRL);
-                       DUMP_PRM_MOD_REG(OMAP24XX_DSP_MOD, OMAP2_PM_PWSTST);
-               }
-#endif
-       } else {
-               DUMP_PRM_MOD_REG(CORE_MOD, PM_WKST1);
-               if (cpu_is_omap24xx())
-                       DUMP_PRM_MOD_REG(CORE_MOD, OMAP24XX_PM_WKST2);
-               DUMP_PRM_MOD_REG(WKUP_MOD, PM_WKST);
-               DUMP_PRM_MOD_REG(OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET);
-#if 1
-               DUMP_INTC_REG(INTC_PENDING_IRQ0, 0x0098);
-               DUMP_INTC_REG(INTC_PENDING_IRQ1, 0x00b8);
-               DUMP_INTC_REG(INTC_PENDING_IRQ2, 0x00d8);
-#endif
-       }
-
-       switch (mode) {
-       case 0:
-               s1 = "full";
-               s2 = "retention";
-               break;
-       case 1:
-               s1 = "MPU";
-               s2 = "retention";
-               break;
-       case 2:
-               s1 = "MPU";
-               s2 = "idle";
-               break;
-       }
-
-       if (!resume)
-#ifdef CONFIG_NO_HZ
-               printk(KERN_INFO
-                      "--- Going to %s %s (next timer after %u ms)\n", s1, s2,
-                      jiffies_to_msecs(get_next_timer_interrupt(jiffies) -
-                                       jiffies));
-#else
-               printk(KERN_INFO "--- Going to %s %s\n", s1, s2);
-#endif
-       else
-               printk(KERN_INFO "--- Woke up (slept for %u.%03u ms)\n",
-                       us / 1000, us % 1000);
-
-       for (i = 0; i < reg_count; i++)
-               printk(KERN_INFO "%-20s: 0x%08x\n", regs[i].name, regs[i].val);
-}
-
-void omap2_pm_wakeup_on_timer(u32 seconds, u32 milliseconds)
-{
-       u32 tick_rate, cycles;
-
-       if (!seconds && !milliseconds)
-               return;
-
-       tick_rate = clk_get_rate(omap_dm_timer_get_fclk(gptimer_wakeup));
-       cycles = tick_rate * seconds + tick_rate * milliseconds / 1000;
-       omap_dm_timer_stop(gptimer_wakeup);
-       omap_dm_timer_set_load_start(gptimer_wakeup, 0, 0xffffffff - cycles);
-
-       pr_info("PM: Resume timer in %u.%03u secs"
-               " (%d ticks at %d ticks/sec.)\n",
-               seconds, milliseconds, cycles, tick_rate);
-}
 
 #ifdef CONFIG_DEBUG_FS
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 
-static void pm_dbg_regset_store(u32 *ptr);
-
-static struct dentry *pm_dbg_dir;
-
 static int pm_dbg_init_done;
 
 static int pm_dbg_init(void);
@@ -196,160 +53,6 @@ enum {
        DEBUG_FILE_TIMERS,
 };
 
-struct pm_module_def {
-       char name[8]; /* Name of the module */
-       short type; /* CM or PRM */
-       unsigned short offset;
-       int low; /* First register address on this module */
-       int high; /* Last register address on this module */
-};
-
-#define MOD_CM 0
-#define MOD_PRM 1
-
-static const struct pm_module_def *pm_dbg_reg_modules;
-static const struct pm_module_def omap3_pm_reg_modules[] = {
-       { "IVA2", MOD_CM, OMAP3430_IVA2_MOD, 0, 0x4c },
-       { "OCP", MOD_CM, OCP_MOD, 0, 0x10 },
-       { "MPU", MOD_CM, MPU_MOD, 4, 0x4c },
-       { "CORE", MOD_CM, CORE_MOD, 0, 0x4c },
-       { "SGX", MOD_CM, OMAP3430ES2_SGX_MOD, 0, 0x4c },
-       { "WKUP", MOD_CM, WKUP_MOD, 0, 0x40 },
-       { "CCR", MOD_CM, PLL_MOD, 0, 0x70 },
-       { "DSS", MOD_CM, OMAP3430_DSS_MOD, 0, 0x4c },
-       { "CAM", MOD_CM, OMAP3430_CAM_MOD, 0, 0x4c },
-       { "PER", MOD_CM, OMAP3430_PER_MOD, 0, 0x4c },
-       { "EMU", MOD_CM, OMAP3430_EMU_MOD, 0x40, 0x54 },
-       { "NEON", MOD_CM, OMAP3430_NEON_MOD, 0x20, 0x48 },
-       { "USB", MOD_CM, OMAP3430ES2_USBHOST_MOD, 0, 0x4c },
-
-       { "IVA2", MOD_PRM, OMAP3430_IVA2_MOD, 0x50, 0xfc },
-       { "OCP", MOD_PRM, OCP_MOD, 4, 0x1c },
-       { "MPU", MOD_PRM, MPU_MOD, 0x58, 0xe8 },
-       { "CORE", MOD_PRM, CORE_MOD, 0x58, 0xf8 },
-       { "SGX", MOD_PRM, OMAP3430ES2_SGX_MOD, 0x58, 0xe8 },
-       { "WKUP", MOD_PRM, WKUP_MOD, 0xa0, 0xb0 },
-       { "CCR", MOD_PRM, PLL_MOD, 0x40, 0x70 },
-       { "DSS", MOD_PRM, OMAP3430_DSS_MOD, 0x58, 0xe8 },
-       { "CAM", MOD_PRM, OMAP3430_CAM_MOD, 0x58, 0xe8 },
-       { "PER", MOD_PRM, OMAP3430_PER_MOD, 0x58, 0xe8 },
-       { "EMU", MOD_PRM, OMAP3430_EMU_MOD, 0x58, 0xe4 },
-       { "GLBL", MOD_PRM, OMAP3430_GR_MOD, 0x20, 0xe4 },
-       { "NEON", MOD_PRM, OMAP3430_NEON_MOD, 0x58, 0xe8 },
-       { "USB", MOD_PRM, OMAP3430ES2_USBHOST_MOD, 0x58, 0xe8 },
-       { "", 0, 0, 0, 0 },
-};
-
-#define PM_DBG_MAX_REG_SETS 4
-
-static void *pm_dbg_reg_set[PM_DBG_MAX_REG_SETS];
-
-static int pm_dbg_get_regset_size(void)
-{
-       static int regset_size;
-
-       if (regset_size == 0) {
-               int i = 0;
-
-               while (pm_dbg_reg_modules[i].name[0] != 0) {
-                       regset_size += pm_dbg_reg_modules[i].high +
-                               4 - pm_dbg_reg_modules[i].low;
-                       i++;
-               }
-       }
-       return regset_size;
-}
-
-static int pm_dbg_show_regs(struct seq_file *s, void *unused)
-{
-       int i, j;
-       unsigned long val;
-       int reg_set = (int)s->private;
-       u32 *ptr;
-       void *store = NULL;
-       int regs;
-       int linefeed;
-
-       if (reg_set == 0) {
-               store = kmalloc(pm_dbg_get_regset_size(), GFP_KERNEL);
-               ptr = store;
-               pm_dbg_regset_store(ptr);
-       } else {
-               ptr = pm_dbg_reg_set[reg_set - 1];
-       }
-
-       i = 0;
-
-       while (pm_dbg_reg_modules[i].name[0] != 0) {
-               regs = 0;
-               linefeed = 0;
-               if (pm_dbg_reg_modules[i].type == MOD_CM)
-                       seq_printf(s, "MOD: CM_%s (%08x)\n",
-                               pm_dbg_reg_modules[i].name,
-                               (u32)(OMAP3430_CM_BASE +
-                               pm_dbg_reg_modules[i].offset));
-               else
-                       seq_printf(s, "MOD: PRM_%s (%08x)\n",
-                               pm_dbg_reg_modules[i].name,
-                               (u32)(OMAP3430_PRM_BASE +
-                               pm_dbg_reg_modules[i].offset));
-
-               for (j = pm_dbg_reg_modules[i].low;
-                       j <= pm_dbg_reg_modules[i].high; j += 4) {
-                       val = *(ptr++);
-                       if (val != 0) {
-                               regs++;
-                               if (linefeed) {
-                                       seq_printf(s, "\n");
-                                       linefeed = 0;
-                               }
-                               seq_printf(s, "  %02x => %08lx", j, val);
-                               if (regs % 4 == 0)
-                                       linefeed = 1;
-                       }
-               }
-               seq_printf(s, "\n");
-               i++;
-       }
-
-       if (store != NULL)
-               kfree(store);
-
-       return 0;
-}
-
-static void pm_dbg_regset_store(u32 *ptr)
-{
-       int i, j;
-       u32 val;
-
-       i = 0;
-
-       while (pm_dbg_reg_modules[i].name[0] != 0) {
-               for (j = pm_dbg_reg_modules[i].low;
-                       j <= pm_dbg_reg_modules[i].high; j += 4) {
-                       if (pm_dbg_reg_modules[i].type == MOD_CM)
-                               val = omap2_cm_read_mod_reg(
-                                       pm_dbg_reg_modules[i].offset, j);
-                       else
-                               val = omap2_prm_read_mod_reg(
-                                       pm_dbg_reg_modules[i].offset, j);
-                       *(ptr++) = val;
-               }
-               i++;
-       }
-}
-
-int pm_dbg_regset_save(int reg_set)
-{
-       if (pm_dbg_reg_set[reg_set-1] == NULL)
-               return -EINVAL;
-
-       pm_dbg_regset_store(pm_dbg_reg_set[reg_set-1]);
-
-       return 0;
-}
-
 static const char pwrdm_state_names[][PWRDM_MAX_PWRSTS] = {
        "OFF",
        "RET",
@@ -469,11 +172,6 @@ static int pm_dbg_open(struct inode *inode, struct file *file)
        };
 }
 
-static int pm_dbg_reg_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, pm_dbg_show_regs, inode->i_private);
-}
-
 static const struct file_operations debug_fops = {
        .open           = pm_dbg_open,
        .read           = seq_read,
@@ -481,40 +179,6 @@ static const struct file_operations debug_fops = {
        .release        = single_release,
 };
 
-static const struct file_operations debug_reg_fops = {
-       .open           = pm_dbg_reg_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-int pm_dbg_regset_init(int reg_set)
-{
-       char name[2];
-
-       if (!pm_dbg_init_done)
-               pm_dbg_init();
-
-       if (reg_set < 1 || reg_set > PM_DBG_MAX_REG_SETS ||
-               pm_dbg_reg_set[reg_set-1] != NULL)
-               return -EINVAL;
-
-       pm_dbg_reg_set[reg_set-1] =
-               kmalloc(pm_dbg_get_regset_size(), GFP_KERNEL);
-
-       if (pm_dbg_reg_set[reg_set-1] == NULL)
-               return -ENOMEM;
-
-       if (pm_dbg_dir != NULL) {
-               sprintf(name, "%d", reg_set);
-
-               (void) debugfs_create_file(name, S_IRUGO,
-                       pm_dbg_dir, (void *)reg_set, &debug_reg_fops);
-       }
-
-       return 0;
-}
-
 static int pwrdm_suspend_get(void *data, u64 *val)
 {
        int ret = -EINVAL;
@@ -576,9 +240,6 @@ static int option_set(void *data, u64 val)
 {
        u32 *option = data;
 
-       if (option == &wakeup_timer_milliseconds && val >= 1000)
-               return -EINVAL;
-
        *option = val;
 
        if (option == &enable_off_mode) {
@@ -595,22 +256,13 @@ static int option_set(void *data, u64 val)
 
 DEFINE_SIMPLE_ATTRIBUTE(pm_dbg_option_fops, option_get, option_set, "%llu\n");
 
-static int pm_dbg_init(void)
+static int __init pm_dbg_init(void)
 {
-       int i;
        struct dentry *d;
-       char name[2];
 
        if (pm_dbg_init_done)
                return 0;
 
-       if (cpu_is_omap34xx())
-               pm_dbg_reg_modules = omap3_pm_reg_modules;
-       else {
-               printk(KERN_ERR "%s: only OMAP3 supported\n", __func__);
-               return -ENODEV;
-       }
-
        d = debugfs_create_dir("pm_debug", NULL);
        if (IS_ERR(d))
                return PTR_ERR(d);
@@ -622,30 +274,8 @@ static int pm_dbg_init(void)
 
        pwrdm_for_each(pwrdms_setup, (void *)d);
 
-       pm_dbg_dir = debugfs_create_dir("registers", d);
-       if (IS_ERR(pm_dbg_dir))
-               return PTR_ERR(pm_dbg_dir);
-
-       (void) debugfs_create_file("current", S_IRUGO,
-               pm_dbg_dir, (void *)0, &debug_reg_fops);
-
-       for (i = 0; i < PM_DBG_MAX_REG_SETS; i++)
-               if (pm_dbg_reg_set[i] != NULL) {
-                       sprintf(name, "%d", i+1);
-                       (void) debugfs_create_file(name, S_IRUGO,
-                               pm_dbg_dir, (void *)(i+1), &debug_reg_fops);
-
-               }
-
        (void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUSR, d,
                                   &enable_off_mode, &pm_dbg_option_fops);
-       (void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUSR, d,
-                                  &sleep_while_idle, &pm_dbg_option_fops);
-       (void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUSR, d,
-                                  &wakeup_timer_seconds, &pm_dbg_option_fops);
-       (void) debugfs_create_file("wakeup_timer_milliseconds",
-                       S_IRUGO | S_IWUSR, d, &wakeup_timer_milliseconds,
-                       &pm_dbg_option_fops);
        pm_dbg_init_done = 1;
 
        return 0;
index 45bcfce..babac19 100644 (file)
@@ -60,32 +60,16 @@ inline void omap3_pm_init_cpuidle(struct cpuidle_params *cpuidle_board_params)
 extern int omap3_pm_get_suspend_state(struct powerdomain *pwrdm);
 extern int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state);
 
-extern u32 wakeup_timer_seconds;
-extern u32 wakeup_timer_milliseconds;
-extern struct omap_dm_timer *gptimer_wakeup;
-
 #ifdef CONFIG_PM_DEBUG
-extern void omap2_pm_dump(int mode, int resume, unsigned int us);
-extern void omap2_pm_wakeup_on_timer(u32 seconds, u32 milliseconds);
-extern int omap2_pm_debug;
 extern u32 enable_off_mode;
-extern u32 sleep_while_idle;
 #else
-#define omap2_pm_dump(mode, resume, us)                do {} while (0);
-#define omap2_pm_wakeup_on_timer(seconds, milliseconds)        do {} while (0);
-#define omap2_pm_debug                         0
 #define enable_off_mode 0
-#define sleep_while_idle 0
 #endif
 
 #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
 extern void pm_dbg_update_time(struct powerdomain *pwrdm, int prev);
-extern int pm_dbg_regset_save(int reg_set);
-extern int pm_dbg_regset_init(int reg_set);
 #else
 #define pm_dbg_update_time(pwrdm, prev) do {} while (0);
-#define pm_dbg_regset_save(reg_set) do {} while (0);
-#define pm_dbg_regset_init(reg_set) do {} while (0);
 #endif /* CONFIG_PM_DEBUG */
 
 extern void omap24xx_idle_loop_suspend(void);
index df3ded6..bf089e7 100644 (file)
@@ -53,6 +53,8 @@
 #include "powerdomain.h"
 #include "clockdomain.h"
 
+static int omap2_pm_debug;
+
 #ifdef CONFIG_SUSPEND
 static suspend_state_t suspend_state = PM_SUSPEND_ON;
 static inline bool is_suspending(void)
@@ -123,7 +125,6 @@ static void omap2_enter_full_retention(void)
        omap2_gpio_prepare_for_idle(0);
 
        if (omap2_pm_debug) {
-               omap2_pm_dump(0, 0, 0);
                getnstimeofday(&ts_preidle);
        }
 
@@ -160,7 +161,6 @@ no_sleep:
                getnstimeofday(&ts_postidle);
                ts_idle = timespec_sub(ts_postidle, ts_preidle);
                tmp = timespec_to_ns(&ts_idle) * NSEC_PER_USEC;
-               omap2_pm_dump(0, 1, tmp);
        }
        omap2_gpio_resume_after_idle();
 
@@ -247,7 +247,6 @@ static void omap2_enter_mpu_retention(void)
        }
 
        if (omap2_pm_debug) {
-               omap2_pm_dump(only_idle ? 2 : 1, 0, 0);
                getnstimeofday(&ts_preidle);
        }
 
@@ -259,7 +258,6 @@ static void omap2_enter_mpu_retention(void)
                getnstimeofday(&ts_postidle);
                ts_idle = timespec_sub(ts_postidle, ts_preidle);
                tmp = timespec_to_ns(&ts_idle) * NSEC_PER_USEC;
-               omap2_pm_dump(only_idle ? 2 : 1, 1, tmp);
        }
 }
 
index c155c9d..96a7624 100644 (file)
@@ -497,8 +497,6 @@ console_still_active:
 
 int omap3_can_sleep(void)
 {
-       if (!sleep_while_idle)
-               return 0;
        if (!omap_uart_can_sleep())
                return 0;
        return 1;
@@ -534,10 +532,6 @@ static int omap3_pm_suspend(void)
        struct power_state *pwrst;
        int state, ret = 0;
 
-       if (wakeup_timer_seconds || wakeup_timer_milliseconds)
-               omap2_pm_wakeup_on_timer(wakeup_timer_seconds,
-                                        wakeup_timer_milliseconds);
-
        /* Read current next_pwrsts */
        list_for_each_entry(pwrst, &pwrst_list, node)
                pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
index c4222c7..3a7e678 100644 (file)
@@ -53,7 +53,7 @@ static struct powerdomain core_44xx_pwrdm = {
                [3] = PWRSTS_ON,        /* ducati_l2ram */
                [4] = PWRSTS_ON,        /* ducati_unicache */
        },
-       .flags          = PWRDM_HAS_LOWPOWERSTATECHANGE,
+       .flags            = PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
 
 /* gfx_44xx_pwrdm: 3D accelerator power domain */
@@ -70,7 +70,7 @@ static struct powerdomain gfx_44xx_pwrdm = {
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* gfx_mem */
        },
-       .flags          = PWRDM_HAS_LOWPOWERSTATECHANGE,
+       .flags            = PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
 
 /* abe_44xx_pwrdm: Audio back end power domain */
@@ -90,7 +90,7 @@ static struct powerdomain abe_44xx_pwrdm = {
                [0] = PWRSTS_ON,        /* aessmem */
                [1] = PWRSTS_ON,        /* periphmem */
        },
-       .flags          = PWRDM_HAS_LOWPOWERSTATECHANGE,
+       .flags            = PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
 
 /* dss_44xx_pwrdm: Display subsystem power domain */
@@ -108,7 +108,7 @@ static struct powerdomain dss_44xx_pwrdm = {
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* dss_mem */
        },
-       .flags          = PWRDM_HAS_LOWPOWERSTATECHANGE,
+       .flags            = PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
 
 /* tesla_44xx_pwrdm: Tesla processor power domain */
@@ -130,7 +130,7 @@ static struct powerdomain tesla_44xx_pwrdm = {
                [1] = PWRSTS_ON,        /* tesla_l1 */
                [2] = PWRSTS_ON,        /* tesla_l2 */
        },
-       .flags          = PWRDM_HAS_LOWPOWERSTATECHANGE,
+       .flags            = PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
 
 /* wkup_44xx_pwrdm: Wake-up power domain */
@@ -241,7 +241,7 @@ static struct powerdomain ivahd_44xx_pwrdm = {
                [2] = PWRSTS_ON,        /* tcm1_mem */
                [3] = PWRSTS_ON,        /* tcm2_mem */
        },
-       .flags          = PWRDM_HAS_LOWPOWERSTATECHANGE,
+       .flags            = PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
 
 /* cam_44xx_pwrdm: Camera subsystem power domain */
@@ -258,7 +258,7 @@ static struct powerdomain cam_44xx_pwrdm = {
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* cam_mem */
        },
-       .flags          = PWRDM_HAS_LOWPOWERSTATECHANGE,
+       .flags            = PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
 
 /* l3init_44xx_pwrdm: L3 initators pheripherals power domain  */
@@ -276,7 +276,7 @@ static struct powerdomain l3init_44xx_pwrdm = {
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* l3init_bank1 */
        },
-       .flags          = PWRDM_HAS_LOWPOWERSTATECHANGE,
+       .flags            = PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
 
 /* l4per_44xx_pwrdm: Target peripherals power domain */
@@ -296,7 +296,7 @@ static struct powerdomain l4per_44xx_pwrdm = {
                [0] = PWRSTS_ON,        /* nonretained_bank */
                [1] = PWRSTS_ON,        /* retained_bank */
        },
-       .flags          = PWRDM_HAS_LOWPOWERSTATECHANGE,
+       .flags            = PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
 
 /*
index d22d1b4..8a6e250 100644 (file)
@@ -31,7 +31,6 @@
        OMAP2_L4_IO_ADDRESS(OMAP4430_PRCM_MPU_BASE + (inst) + (reg))
 
 /* PRCM_MPU instances */
-
 #define OMAP4430_PRCM_MPU_OCP_SOCKET_PRCM_INST 0x0000
 #define OMAP4430_PRCM_MPU_DEVICE_PRM_INST      0x0200
 #define OMAP4430_PRCM_MPU_CPU0_INST            0x0400
  */
 
 /* PRCM_MPU.OCP_SOCKET_PRCM register offsets */
-#define OMAP4_REVISION_PRCM_OFFSET                     0x0000
-#define OMAP4430_REVISION_PRCM                         OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_OCP_SOCKET_PRCM_INST, 0x0000)
+#define OMAP4_REVISION_PRCM_OFFSET             0x0000
+#define OMAP4430_REVISION_PRCM                 OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_OCP_SOCKET_PRCM_INST, 0x0000)
 
 /* PRCM_MPU.DEVICE_PRM register offsets */
-#define OMAP4_PRCM_MPU_PRM_RSTST_OFFSET                        0x0000
-#define OMAP4430_PRCM_MPU_PRM_RSTST                    OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_DEVICE_PRM_INST, 0x0000)
-#define OMAP4_PRCM_MPU_PRM_PSCON_COUNT_OFFSET          0x0004
-#define OMAP4430_PRCM_MPU_PRM_PSCON_COUNT              OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_DEVICE_PRM_INST, 0x0004)
+#define OMAP4_PRCM_MPU_PRM_RSTST_OFFSET                0x0000
+#define OMAP4430_PRCM_MPU_PRM_RSTST            OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_DEVICE_PRM_INST, 0x0000)
+#define OMAP4_PRCM_MPU_PRM_PSCON_COUNT_OFFSET  0x0004
+#define OMAP4430_PRCM_MPU_PRM_PSCON_COUNT      OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_DEVICE_PRM_INST, 0x0004)
 
 /* PRCM_MPU.CPU0 register offsets */
-#define OMAP4_PM_CPU0_PWRSTCTRL_OFFSET                 0x0000
-#define OMAP4430_PM_CPU0_PWRSTCTRL                     OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0000)
-#define OMAP4_PM_CPU0_PWRSTST_OFFSET                   0x0004
-#define OMAP4430_PM_CPU0_PWRSTST                       OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0004)
-#define OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET              0x0008
-#define OMAP4430_RM_CPU0_CPU0_CONTEXT                  OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0008)
-#define OMAP4_RM_CPU0_CPU0_RSTCTRL_OFFSET              0x000c
-#define OMAP4430_RM_CPU0_CPU0_RSTCTRL                  OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x000c)
-#define OMAP4_RM_CPU0_CPU0_RSTST_OFFSET                        0x0010
-#define OMAP4430_RM_CPU0_CPU0_RSTST                    OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0010)
-#define OMAP4_CM_CPU0_CPU0_CLKCTRL_OFFSET              0x0014
-#define OMAP4430_CM_CPU0_CPU0_CLKCTRL                  OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0014)
-#define OMAP4_CM_CPU0_CLKSTCTRL_OFFSET                 0x0018
-#define OMAP4430_CM_CPU0_CLKSTCTRL                     OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0018)
+#define OMAP4_PM_CPU0_PWRSTCTRL_OFFSET         0x0000
+#define OMAP4430_PM_CPU0_PWRSTCTRL             OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0000)
+#define OMAP4_PM_CPU0_PWRSTST_OFFSET           0x0004
+#define OMAP4430_PM_CPU0_PWRSTST               OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0004)
+#define OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET      0x0008
+#define OMAP4430_RM_CPU0_CPU0_CONTEXT          OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0008)
+#define OMAP4_RM_CPU0_CPU0_RSTCTRL_OFFSET      0x000c
+#define OMAP4430_RM_CPU0_CPU0_RSTCTRL          OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x000c)
+#define OMAP4_RM_CPU0_CPU0_RSTST_OFFSET                0x0010
+#define OMAP4430_RM_CPU0_CPU0_RSTST            OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0010)
+#define OMAP4_CM_CPU0_CPU0_CLKCTRL_OFFSET      0x0014
+#define OMAP4430_CM_CPU0_CPU0_CLKCTRL          OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0014)
+#define OMAP4_CM_CPU0_CLKSTCTRL_OFFSET         0x0018
+#define OMAP4430_CM_CPU0_CLKSTCTRL             OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0018)
 
 /* PRCM_MPU.CPU1 register offsets */
-#define OMAP4_PM_CPU1_PWRSTCTRL_OFFSET                 0x0000
-#define OMAP4430_PM_CPU1_PWRSTCTRL                     OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0000)
-#define OMAP4_PM_CPU1_PWRSTST_OFFSET                   0x0004
-#define OMAP4430_PM_CPU1_PWRSTST                       OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0004)
-#define OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET              0x0008
-#define OMAP4430_RM_CPU1_CPU1_CONTEXT                  OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0008)
-#define OMAP4_RM_CPU1_CPU1_RSTCTRL_OFFSET              0x000c
-#define OMAP4430_RM_CPU1_CPU1_RSTCTRL                  OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x000c)
-#define OMAP4_RM_CPU1_CPU1_RSTST_OFFSET                        0x0010
-#define OMAP4430_RM_CPU1_CPU1_RSTST                    OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0010)
-#define OMAP4_CM_CPU1_CPU1_CLKCTRL_OFFSET              0x0014
-#define OMAP4430_CM_CPU1_CPU1_CLKCTRL                  OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0014)
-#define OMAP4_CM_CPU1_CLKSTCTRL_OFFSET                 0x0018
-#define OMAP4430_CM_CPU1_CLKSTCTRL                     OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0018)
+#define OMAP4_PM_CPU1_PWRSTCTRL_OFFSET         0x0000
+#define OMAP4430_PM_CPU1_PWRSTCTRL             OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0000)
+#define OMAP4_PM_CPU1_PWRSTST_OFFSET           0x0004
+#define OMAP4430_PM_CPU1_PWRSTST               OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0004)
+#define OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET      0x0008
+#define OMAP4430_RM_CPU1_CPU1_CONTEXT          OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0008)
+#define OMAP4_RM_CPU1_CPU1_RSTCTRL_OFFSET      0x000c
+#define OMAP4430_RM_CPU1_CPU1_RSTCTRL          OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x000c)
+#define OMAP4_RM_CPU1_CPU1_RSTST_OFFSET                0x0010
+#define OMAP4430_RM_CPU1_CPU1_RSTST            OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0010)
+#define OMAP4_CM_CPU1_CPU1_CLKCTRL_OFFSET      0x0014
+#define OMAP4430_CM_CPU1_CPU1_CLKCTRL          OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0014)
+#define OMAP4_CM_CPU1_CLKSTCTRL_OFFSET         0x0018
+#define OMAP4430_CM_CPU1_CLKSTCTRL             OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0018)
 
 /* Function prototypes */
 # ifndef __ASSEMBLER__
index 67a0d3f..6e53120 100644 (file)
@@ -31,7 +31,7 @@
 #define OMAP4430_PRM_BASE              0x4a306000
 
 #define OMAP44XX_PRM_REGADDR(inst, reg)                                \
-       OMAP2_L4_IO_ADDRESS(OMAP4430_PRM_BASE + (inst) + (reg))
+       OMAP2_L4_IO_ADDRESS(OMAP4430_PRM_BASE + (inst) + (reg))
 
 
 /* PRM instances */
 #define OMAP4430_PRM_CAM_INST          0x1000
 #define OMAP4430_PRM_DSS_INST          0x1100
 #define OMAP4430_PRM_GFX_INST          0x1200
-#define OMAP4430_PRM_L3INIT_INST               0x1300
+#define OMAP4430_PRM_L3INIT_INST       0x1300
 #define OMAP4430_PRM_L4PER_INST                0x1400
-#define OMAP4430_PRM_CEFUSE_INST               0x1600
+#define OMAP4430_PRM_CEFUSE_INST       0x1600
 #define OMAP4430_PRM_WKUP_INST         0x1700
 #define OMAP4430_PRM_WKUP_CM_INST      0x1800
 #define OMAP4430_PRM_EMU_INST          0x1900
-#define OMAP4430_PRM_EMU_CM_INST               0x1a00
-#define OMAP4430_PRM_DEVICE_INST               0x1b00
+#define OMAP4430_PRM_EMU_CM_INST       0x1a00
+#define OMAP4430_PRM_DEVICE_INST       0x1b00
 #define OMAP4430_PRM_INSTR_INST                0x1f00
 
 /* PRM clockdomain register offsets (from instance start) */
-#define OMAP4430_PRM_MPU_MPU_CDOFFS            0x0000
-#define OMAP4430_PRM_TESLA_TESLA_CDOFFS                0x0000
-#define OMAP4430_PRM_ABE_ABE_CDOFFS            0x0000
-#define OMAP4430_PRM_CORE_CORE_CDOFFS          0x0000
-#define OMAP4430_PRM_IVAHD_IVAHD_CDOFFS                0x0000
-#define OMAP4430_PRM_CAM_CAM_CDOFFS            0x0000
-#define OMAP4430_PRM_DSS_DSS_CDOFFS            0x0000
-#define OMAP4430_PRM_GFX_GFX_CDOFFS            0x0000
-#define OMAP4430_PRM_L3INIT_L3INIT_CDOFFS      0x0000
-#define OMAP4430_PRM_L4PER_L4PER_CDOFFS                0x0000
-#define OMAP4430_PRM_CEFUSE_CEFUSE_CDOFFS      0x0000
 #define OMAP4430_PRM_WKUP_CM_WKUP_CDOFFS       0x0000
-#define OMAP4430_PRM_EMU_EMU_CDOFFS            0x0000
 #define OMAP4430_PRM_EMU_CM_EMU_CDOFFS         0x0000
 
 /* OMAP4 specific register offsets */
 #define OMAP4430_RM_MEMIF_DLL_H_CONTEXT                        OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0464)
 #define OMAP4_RM_D2D_SAD2D_CONTEXT_OFFSET              0x0524
 #define OMAP4430_RM_D2D_SAD2D_CONTEXT                  OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0524)
-#define OMAP4_RM_D2D_INSTEM_ICR_CONTEXT_OFFSET         0x052c
-#define OMAP4430_RM_D2D_INSTEM_ICR_CONTEXT             OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x052c)
+#define OMAP4_RM_D2D_MODEM_ICR_CONTEXT_OFFSET          0x052c
+#define OMAP4430_RM_D2D_MODEM_ICR_CONTEXT              OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x052c)
 #define OMAP4_RM_D2D_SAD2D_FW_CONTEXT_OFFSET           0x0534
 #define OMAP4430_RM_D2D_SAD2D_FW_CONTEXT               OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0534)
 #define OMAP4_RM_L4CFG_L4_CFG_CONTEXT_OFFSET           0x0624
 #define OMAP4430_PRM_VC_VAL_BYPASS                     OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00a0)
 #define OMAP4_PRM_VC_CFG_CHANNEL_OFFSET                        0x00a4
 #define OMAP4430_PRM_VC_CFG_CHANNEL                    OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00a4)
-#define OMAP4_PRM_VC_CFG_I2C_INSTE_OFFSET              0x00a8
-#define OMAP4430_PRM_VC_CFG_I2C_INSTE                  OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00a8)
+#define OMAP4_PRM_VC_CFG_I2C_MODE_OFFSET               0x00a8
+#define OMAP4430_PRM_VC_CFG_I2C_MODE                   OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00a8)
 #define OMAP4_PRM_VC_CFG_I2C_CLK_OFFSET                        0x00ac
 #define OMAP4430_PRM_VC_CFG_I2C_CLK                    OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00ac)
 #define OMAP4_PRM_SRAM_COUNT_OFFSET                    0x00b0
 #define OMAP4430_PRM_PHASE2A_CNDP                      OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00ec)
 #define OMAP4_PRM_PHASE2B_CNDP_OFFSET                  0x00f0
 #define OMAP4430_PRM_PHASE2B_CNDP                      OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00f0)
-#define OMAP4_PRM_INSTEM_IF_CTRL_OFFSET                        0x00f4
-#define OMAP4430_PRM_INSTEM_IF_CTRL                    OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00f4)
+#define OMAP4_PRM_MODEM_IF_CTRL_OFFSET                 0x00f4
+#define OMAP4430_PRM_MODEM_IF_CTRL                     OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00f4)
 #define OMAP4_PRM_VC_ERRST_OFFSET                      0x00f8
 #define OMAP4430_PRM_VC_ERRST                          OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00f8)
 
index fb7dc52..2ce2fb7 100644 (file)
@@ -143,7 +143,7 @@ static irqreturn_t sr_interrupt(int irq, void *data)
                sr_write_reg(sr_info, IRQSTATUS, status);
        }
 
-       if (sr_class->class_type == SR_CLASS2 && sr_class->notify)
+       if (sr_class->notify)
                sr_class->notify(sr_info->voltdm, status);
 
        return IRQ_HANDLED;
@@ -258,9 +258,7 @@ static int sr_late_init(struct omap_sr *sr_info)
        struct resource *mem;
        int ret = 0;
 
-       if (sr_class->class_type == SR_CLASS2 &&
-               sr_class->notify_flags && sr_info->irq) {
-
+       if (sr_class->notify && sr_class->notify_flags && sr_info->irq) {
                name = kasprintf(GFP_KERNEL, "sr_%s", sr_info->voltdm->name);
                if (name == NULL) {
                        ret = -ENOMEM;
@@ -270,6 +268,7 @@ static int sr_late_init(struct omap_sr *sr_info)
                                0, name, (void *)sr_info);
                if (ret)
                        goto error;
+               disable_irq(sr_info->irq);
        }
 
        if (pdata && pdata->enable_on_init)
@@ -278,16 +277,16 @@ static int sr_late_init(struct omap_sr *sr_info)
        return ret;
 
 error:
-               iounmap(sr_info->base);
-               mem = platform_get_resource(sr_info->pdev, IORESOURCE_MEM, 0);
-               release_mem_region(mem->start, resource_size(mem));
-               list_del(&sr_info->node);
-               dev_err(&sr_info->pdev->dev, "%s: ERROR in registering"
-                       "interrupt handler. Smartreflex will"
-                       "not function as desired\n", __func__);
-               kfree(name);
-               kfree(sr_info);
-               return ret;
+       iounmap(sr_info->base);
+       mem = platform_get_resource(sr_info->pdev, IORESOURCE_MEM, 0);
+       release_mem_region(mem->start, resource_size(mem));
+       list_del(&sr_info->node);
+       dev_err(&sr_info->pdev->dev, "%s: ERROR in registering"
+               "interrupt handler. Smartreflex will"
+               "not function as desired\n", __func__);
+       kfree(name);
+       kfree(sr_info);
+       return ret;
 }
 
 static void sr_v1_disable(struct omap_sr *sr)
@@ -808,10 +807,13 @@ static int omap_sr_autocomp_store(void *data, u64 val)
                return -EINVAL;
        }
 
-       if (!val)
-               sr_stop_vddautocomp(sr_info);
-       else
-               sr_start_vddautocomp(sr_info);
+       /* control enable/disable only if there is a delta in value */
+       if (sr_info->autocomp_active != val) {
+               if (!val)
+                       sr_stop_vddautocomp(sr_info);
+               else
+                       sr_start_vddautocomp(sr_info);
+       }
 
        return 0;
 }
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c
deleted file mode 100644 (file)
index 3b9cf85..0000000
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * linux/arch/arm/mach-omap2/timer-gp.c
- *
- * OMAP2 GP timer support.
- *
- * Copyright (C) 2009 Nokia Corporation
- *
- * Update to use new clocksource/clockevent layers
- * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
- * Copyright (C) 2007 MontaVista Software, Inc.
- *
- * Original driver:
- * Copyright (C) 2005 Nokia Corporation
- * Author: Paul Mundt <paul.mundt@nokia.com>
- *         Juha Yrjölä <juha.yrjola@nokia.com>
- * OMAP Dual-mode timer framework support by Timo Teras
- *
- * Some parts based off of TI's 24xx code:
- *
- * Copyright (C) 2004-2009 Texas Instruments, Inc.
- *
- * Roughly modelled after the OMAP1 MPU timer code.
- * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/init.h>
-#include <linux/time.h>
-#include <linux/interrupt.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
-
-#include <asm/mach/time.h>
-#include <plat/dmtimer.h>
-#include <asm/localtimer.h>
-#include <asm/sched_clock.h>
-#include <plat/common.h>
-#include <plat/omap_hwmod.h>
-
-#include "timer-gp.h"
-
-
-/* MAX_GPTIMER_ID: number of GPTIMERs on the chip */
-#define MAX_GPTIMER_ID         12
-
-static struct omap_dm_timer *gptimer;
-static struct clock_event_device clockevent_gpt;
-static u8 __initdata gptimer_id = 1;
-static u8 __initdata inited;
-struct omap_dm_timer *gptimer_wakeup;
-
-static irqreturn_t omap2_gp_timer_interrupt(int irq, void *dev_id)
-{
-       struct omap_dm_timer *gpt = (struct omap_dm_timer *)dev_id;
-       struct clock_event_device *evt = &clockevent_gpt;
-
-       omap_dm_timer_write_status(gpt, OMAP_TIMER_INT_OVERFLOW);
-
-       evt->event_handler(evt);
-       return IRQ_HANDLED;
-}
-
-static struct irqaction omap2_gp_timer_irq = {
-       .name           = "gp timer",
-       .flags          = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
-       .handler        = omap2_gp_timer_interrupt,
-};
-
-static int omap2_gp_timer_set_next_event(unsigned long cycles,
-                                        struct clock_event_device *evt)
-{
-       omap_dm_timer_set_load_start(gptimer, 0, 0xffffffff - cycles);
-
-       return 0;
-}
-
-static void omap2_gp_timer_set_mode(enum clock_event_mode mode,
-                                   struct clock_event_device *evt)
-{
-       u32 period;
-
-       omap_dm_timer_stop(gptimer);
-
-       switch (mode) {
-       case CLOCK_EVT_MODE_PERIODIC:
-               period = clk_get_rate(omap_dm_timer_get_fclk(gptimer)) / HZ;
-               period -= 1;
-               omap_dm_timer_set_load_start(gptimer, 1, 0xffffffff - period);
-               break;
-       case CLOCK_EVT_MODE_ONESHOT:
-               break;
-       case CLOCK_EVT_MODE_UNUSED:
-       case CLOCK_EVT_MODE_SHUTDOWN:
-       case CLOCK_EVT_MODE_RESUME:
-               break;
-       }
-}
-
-static struct clock_event_device clockevent_gpt = {
-       .name           = "gp timer",
-       .features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-       .shift          = 32,
-       .set_next_event = omap2_gp_timer_set_next_event,
-       .set_mode       = omap2_gp_timer_set_mode,
-};
-
-/**
- * omap2_gp_clockevent_set_gptimer - set which GPTIMER is used for clockevents
- * @id: GPTIMER to use (1..MAX_GPTIMER_ID)
- *
- * Define the GPTIMER that the system should use for the tick timer.
- * Meant to be called from board-*.c files in the event that GPTIMER1, the
- * default, is unsuitable.  Returns -EINVAL on error or 0 on success.
- */
-int __init omap2_gp_clockevent_set_gptimer(u8 id)
-{
-       if (id < 1 || id > MAX_GPTIMER_ID)
-               return -EINVAL;
-
-       BUG_ON(inited);
-
-       gptimer_id = id;
-
-       return 0;
-}
-
-static void __init omap2_gp_clockevent_init(void)
-{
-       u32 tick_rate;
-       int src;
-       char clockevent_hwmod_name[8]; /* 8 = sizeof("timerXX0") */
-
-       inited = 1;
-
-       sprintf(clockevent_hwmod_name, "timer%d", gptimer_id);
-       omap_hwmod_setup_one(clockevent_hwmod_name);
-
-       gptimer = omap_dm_timer_request_specific(gptimer_id);
-       BUG_ON(gptimer == NULL);
-       gptimer_wakeup = gptimer;
-
-#if defined(CONFIG_OMAP_32K_TIMER)
-       src = OMAP_TIMER_SRC_32_KHZ;
-#else
-       src = OMAP_TIMER_SRC_SYS_CLK;
-       WARN(gptimer_id == 12, "WARNING: GPTIMER12 can only use the "
-            "secure 32KiHz clock source\n");
-#endif
-
-       if (gptimer_id != 12)
-               WARN(IS_ERR_VALUE(omap_dm_timer_set_source(gptimer, src)),
-                    "timer-gp: omap_dm_timer_set_source() failed\n");
-
-       tick_rate = clk_get_rate(omap_dm_timer_get_fclk(gptimer));
-
-       pr_info("OMAP clockevent source: GPTIMER%d at %u Hz\n",
-               gptimer_id, tick_rate);
-
-       omap2_gp_timer_irq.dev_id = (void *)gptimer;
-       setup_irq(omap_dm_timer_get_irq(gptimer), &omap2_gp_timer_irq);
-       omap_dm_timer_set_int_enable(gptimer, OMAP_TIMER_INT_OVERFLOW);
-
-       clockevent_gpt.mult = div_sc(tick_rate, NSEC_PER_SEC,
-                                    clockevent_gpt.shift);
-       clockevent_gpt.max_delta_ns =
-               clockevent_delta2ns(0xffffffff, &clockevent_gpt);
-       clockevent_gpt.min_delta_ns =
-               clockevent_delta2ns(3, &clockevent_gpt);
-               /* Timer internal resynch latency. */
-
-       clockevent_gpt.cpumask = cpumask_of(0);
-       clockevents_register_device(&clockevent_gpt);
-}
-
-/* Clocksource code */
-
-#ifdef CONFIG_OMAP_32K_TIMER
-/* 
- * When 32k-timer is enabled, don't use GPTimer for clocksource
- * instead, just leave default clocksource which uses the 32k
- * sync counter.  See clocksource setup in plat-omap/counter_32k.c
- */
-
-static void __init omap2_gp_clocksource_init(void)
-{
-       omap_init_clocksource_32k();
-}
-
-#else
-/*
- * clocksource
- */
-static DEFINE_CLOCK_DATA(cd);
-static struct omap_dm_timer *gpt_clocksource;
-static cycle_t clocksource_read_cycles(struct clocksource *cs)
-{
-       return (cycle_t)omap_dm_timer_read_counter(gpt_clocksource);
-}
-
-static struct clocksource clocksource_gpt = {
-       .name           = "gp timer",
-       .rating         = 300,
-       .read           = clocksource_read_cycles,
-       .mask           = CLOCKSOURCE_MASK(32),
-       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-static void notrace dmtimer_update_sched_clock(void)
-{
-       u32 cyc;
-
-       cyc = omap_dm_timer_read_counter(gpt_clocksource);
-
-       update_sched_clock(&cd, cyc, (u32)~0);
-}
-
-/* Setup free-running counter for clocksource */
-static void __init omap2_gp_clocksource_init(void)
-{
-       static struct omap_dm_timer *gpt;
-       u32 tick_rate;
-       static char err1[] __initdata = KERN_ERR
-               "%s: failed to request dm-timer\n";
-       static char err2[] __initdata = KERN_ERR
-               "%s: can't register clocksource!\n";
-
-       gpt = omap_dm_timer_request();
-       if (!gpt)
-               printk(err1, clocksource_gpt.name);
-       gpt_clocksource = gpt;
-
-       omap_dm_timer_set_source(gpt, OMAP_TIMER_SRC_SYS_CLK);
-       tick_rate = clk_get_rate(omap_dm_timer_get_fclk(gpt));
-
-       omap_dm_timer_set_load_start(gpt, 1, 0);
-
-       init_sched_clock(&cd, dmtimer_update_sched_clock, 32, tick_rate);
-
-       if (clocksource_register_hz(&clocksource_gpt, tick_rate))
-               printk(err2, clocksource_gpt.name);
-}
-#endif
-
-static void __init omap2_gp_timer_init(void)
-{
-#ifdef CONFIG_LOCAL_TIMERS
-       if (cpu_is_omap44xx()) {
-               twd_base = ioremap(OMAP44XX_LOCAL_TWD_BASE, SZ_256);
-               BUG_ON(!twd_base);
-       }
-#endif
-       omap_dm_timer_init();
-
-       omap2_gp_clockevent_init();
-       omap2_gp_clocksource_init();
-}
-
-struct sys_timer omap_timer = {
-       .init   = omap2_gp_timer_init,
-};
diff --git a/arch/arm/mach-omap2/timer-gp.h b/arch/arm/mach-omap2/timer-gp.h
deleted file mode 100644 (file)
index 5c1072c..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * OMAP2/3 GPTIMER support.headers
- *
- * Copyright (C) 2009 Nokia Corporation
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef __ARCH_ARM_PLAT_OMAP_INCLUDE_MACH_TIMER_GP_H
-#define __ARCH_ARM_PLAT_OMAP_INCLUDE_MACH_TIMER_GP_H
-
-extern int __init omap2_gp_clockevent_set_gptimer(u8 id);
-
-#endif
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
new file mode 100644 (file)
index 0000000..e964072
--- /dev/null
@@ -0,0 +1,342 @@
+/*
+ * linux/arch/arm/mach-omap2/timer.c
+ *
+ * OMAP2 GP timer support.
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ *
+ * Update to use new clocksource/clockevent layers
+ * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
+ * Copyright (C) 2007 MontaVista Software, Inc.
+ *
+ * Original driver:
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Paul Mundt <paul.mundt@nokia.com>
+ *         Juha Yrjölä <juha.yrjola@nokia.com>
+ * OMAP Dual-mode timer framework support by Timo Teras
+ *
+ * Some parts based off of TI's 24xx code:
+ *
+ * Copyright (C) 2004-2009 Texas Instruments, Inc.
+ *
+ * Roughly modelled after the OMAP1 MPU timer code.
+ * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+
+#include <asm/mach/time.h>
+#include <plat/dmtimer.h>
+#include <asm/localtimer.h>
+#include <asm/sched_clock.h>
+#include <plat/common.h>
+#include <plat/omap_hwmod.h>
+
+/* Parent clocks, eventually these will come from the clock framework */
+
+#define OMAP2_MPU_SOURCE       "sys_ck"
+#define OMAP3_MPU_SOURCE       OMAP2_MPU_SOURCE
+#define OMAP4_MPU_SOURCE       "sys_clkin_ck"
+#define OMAP2_32K_SOURCE       "func_32k_ck"
+#define OMAP3_32K_SOURCE       "omap_32k_fck"
+#define OMAP4_32K_SOURCE       "sys_32k_ck"
+
+#ifdef CONFIG_OMAP_32K_TIMER
+#define OMAP2_CLKEV_SOURCE     OMAP2_32K_SOURCE
+#define OMAP3_CLKEV_SOURCE     OMAP3_32K_SOURCE
+#define OMAP4_CLKEV_SOURCE     OMAP4_32K_SOURCE
+#define OMAP3_SECURE_TIMER     12
+#else
+#define OMAP2_CLKEV_SOURCE     OMAP2_MPU_SOURCE
+#define OMAP3_CLKEV_SOURCE     OMAP3_MPU_SOURCE
+#define OMAP4_CLKEV_SOURCE     OMAP4_MPU_SOURCE
+#define OMAP3_SECURE_TIMER     1
+#endif
+
+/* MAX_GPTIMER_ID: number of GPTIMERs on the chip */
+#define MAX_GPTIMER_ID         12
+
+u32 sys_timer_reserved;
+
+/* Clockevent code */
+
+static struct omap_dm_timer clkev;
+static struct clock_event_device clockevent_gpt;
+
+static irqreturn_t omap2_gp_timer_interrupt(int irq, void *dev_id)
+{
+       struct clock_event_device *evt = &clockevent_gpt;
+
+       __omap_dm_timer_write_status(clkev.io_base, OMAP_TIMER_INT_OVERFLOW);
+
+       evt->event_handler(evt);
+       return IRQ_HANDLED;
+}
+
+static struct irqaction omap2_gp_timer_irq = {
+       .name           = "gp timer",
+       .flags          = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+       .handler        = omap2_gp_timer_interrupt,
+};
+
+static int omap2_gp_timer_set_next_event(unsigned long cycles,
+                                        struct clock_event_device *evt)
+{
+       __omap_dm_timer_load_start(clkev.io_base, OMAP_TIMER_CTRL_ST,
+                                               0xffffffff - cycles, 1);
+
+       return 0;
+}
+
+static void omap2_gp_timer_set_mode(enum clock_event_mode mode,
+                                   struct clock_event_device *evt)
+{
+       u32 period;
+
+       __omap_dm_timer_stop(clkev.io_base, 1, clkev.rate);
+
+       switch (mode) {
+       case CLOCK_EVT_MODE_PERIODIC:
+               period = clkev.rate / HZ;
+               period -= 1;
+               /* Looks like we need to first set the load value separately */
+               __omap_dm_timer_write(clkev.io_base, OMAP_TIMER_LOAD_REG,
+                                       0xffffffff - period, 1);
+               __omap_dm_timer_load_start(clkev.io_base,
+                                       OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
+                                               0xffffffff - period, 1);
+               break;
+       case CLOCK_EVT_MODE_ONESHOT:
+               break;
+       case CLOCK_EVT_MODE_UNUSED:
+       case CLOCK_EVT_MODE_SHUTDOWN:
+       case CLOCK_EVT_MODE_RESUME:
+               break;
+       }
+}
+
+static struct clock_event_device clockevent_gpt = {
+       .name           = "gp timer",
+       .features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+       .shift          = 32,
+       .set_next_event = omap2_gp_timer_set_next_event,
+       .set_mode       = omap2_gp_timer_set_mode,
+};
+
+static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
+                                               int gptimer_id,
+                                               const char *fck_source)
+{
+       char name[10]; /* 10 = sizeof("gptXX_Xck0") */
+       struct omap_hwmod *oh;
+       size_t size;
+       int res = 0;
+
+       sprintf(name, "timer%d", gptimer_id);
+       omap_hwmod_setup_one(name);
+       oh = omap_hwmod_lookup(name);
+       if (!oh)
+               return -ENODEV;
+
+       timer->irq = oh->mpu_irqs[0].irq;
+       timer->phys_base = oh->slaves[0]->addr->pa_start;
+       size = oh->slaves[0]->addr->pa_end - timer->phys_base;
+
+       /* Static mapping, never released */
+       timer->io_base = ioremap(timer->phys_base, size);
+       if (!timer->io_base)
+               return -ENXIO;
+
+       /* After the dmtimer is using hwmod these clocks won't be needed */
+       sprintf(name, "gpt%d_fck", gptimer_id);
+       timer->fclk = clk_get(NULL, name);
+       if (IS_ERR(timer->fclk))
+               return -ENODEV;
+
+       sprintf(name, "gpt%d_ick", gptimer_id);
+       timer->iclk = clk_get(NULL, name);
+       if (IS_ERR(timer->iclk)) {
+               clk_put(timer->fclk);
+               return -ENODEV;
+       }
+
+       omap_hwmod_enable(oh);
+
+       sys_timer_reserved |= (1 << (gptimer_id - 1));
+
+       if (gptimer_id != 12) {
+               struct clk *src;
+
+               src = clk_get(NULL, fck_source);
+               if (IS_ERR(src)) {
+                       res = -EINVAL;
+               } else {
+                       res = __omap_dm_timer_set_source(timer->fclk, src);
+                       if (IS_ERR_VALUE(res))
+                               pr_warning("%s: timer%i cannot set source\n",
+                                               __func__, gptimer_id);
+                       clk_put(src);
+               }
+       }
+       __omap_dm_timer_reset(timer->io_base, 1, 1);
+       timer->posted = 1;
+
+       timer->rate = clk_get_rate(timer->fclk);
+
+       timer->reserved = 1;
+
+       return res;
+}
+
+static void __init omap2_gp_clockevent_init(int gptimer_id,
+                                               const char *fck_source)
+{
+       int res;
+
+       res = omap_dm_timer_init_one(&clkev, gptimer_id, fck_source);
+       BUG_ON(res);
+
+       omap2_gp_timer_irq.dev_id = (void *)&clkev;
+       setup_irq(clkev.irq, &omap2_gp_timer_irq);
+
+       __omap_dm_timer_int_enable(clkev.io_base, OMAP_TIMER_INT_OVERFLOW);
+
+       clockevent_gpt.mult = div_sc(clkev.rate, NSEC_PER_SEC,
+                                    clockevent_gpt.shift);
+       clockevent_gpt.max_delta_ns =
+               clockevent_delta2ns(0xffffffff, &clockevent_gpt);
+       clockevent_gpt.min_delta_ns =
+               clockevent_delta2ns(3, &clockevent_gpt);
+               /* Timer internal resynch latency. */
+
+       clockevent_gpt.cpumask = cpumask_of(0);
+       clockevents_register_device(&clockevent_gpt);
+
+       pr_info("OMAP clockevent source: GPTIMER%d at %lu Hz\n",
+               gptimer_id, clkev.rate);
+}
+
+/* Clocksource code */
+
+#ifdef CONFIG_OMAP_32K_TIMER
+/*
+ * When 32k-timer is enabled, don't use GPTimer for clocksource
+ * instead, just leave default clocksource which uses the 32k
+ * sync counter.  See clocksource setup in plat-omap/counter_32k.c
+ */
+
+static void __init omap2_gp_clocksource_init(int unused, const char *dummy)
+{
+       omap_init_clocksource_32k();
+}
+
+#else
+
+static struct omap_dm_timer clksrc;
+
+/*
+ * clocksource
+ */
+static DEFINE_CLOCK_DATA(cd);
+static cycle_t clocksource_read_cycles(struct clocksource *cs)
+{
+       return (cycle_t)__omap_dm_timer_read_counter(clksrc.io_base, 1);
+}
+
+static struct clocksource clocksource_gpt = {
+       .name           = "gp timer",
+       .rating         = 300,
+       .read           = clocksource_read_cycles,
+       .mask           = CLOCKSOURCE_MASK(32),
+       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static void notrace dmtimer_update_sched_clock(void)
+{
+       u32 cyc;
+
+       cyc = __omap_dm_timer_read_counter(clksrc.io_base, 1);
+
+       update_sched_clock(&cd, cyc, (u32)~0);
+}
+
+unsigned long long notrace sched_clock(void)
+{
+       u32 cyc = 0;
+
+       if (clksrc.reserved)
+               cyc = __omap_dm_timer_read_counter(clksrc.io_base, 1);
+
+       return cyc_to_sched_clock(&cd, cyc, (u32)~0);
+}
+
+/* Setup free-running counter for clocksource */
+static void __init omap2_gp_clocksource_init(int gptimer_id,
+                                               const char *fck_source)
+{
+       int res;
+
+       res = omap_dm_timer_init_one(&clksrc, gptimer_id, fck_source);
+       BUG_ON(res);
+
+       pr_info("OMAP clocksource: GPTIMER%d at %lu Hz\n",
+               gptimer_id, clksrc.rate);
+
+       __omap_dm_timer_load_start(clksrc.io_base, OMAP_TIMER_CTRL_ST, 0, 1);
+       init_sched_clock(&cd, dmtimer_update_sched_clock, 32, clksrc.rate);
+
+       if (clocksource_register_hz(&clocksource_gpt, clksrc.rate))
+               pr_err("Could not register clocksource %s\n",
+                       clocksource_gpt.name);
+}
+#endif
+
+#define OMAP_SYS_TIMER_INIT(name, clkev_nr, clkev_src,                 \
+                               clksrc_nr, clksrc_src)                  \
+static void __init omap##name##_timer_init(void)                       \
+{                                                                      \
+       omap2_gp_clockevent_init((clkev_nr), clkev_src);                \
+       omap2_gp_clocksource_init((clksrc_nr), clksrc_src);             \
+}
+
+#define OMAP_SYS_TIMER(name)                                           \
+struct sys_timer omap##name##_timer = {                                        \
+       .init   = omap##name##_timer_init,                              \
+};
+
+#ifdef CONFIG_ARCH_OMAP2
+OMAP_SYS_TIMER_INIT(2, 1, OMAP2_CLKEV_SOURCE, 2, OMAP2_MPU_SOURCE)
+OMAP_SYS_TIMER(2)
+#endif
+
+#ifdef CONFIG_ARCH_OMAP3
+OMAP_SYS_TIMER_INIT(3, 1, OMAP3_CLKEV_SOURCE, 2, OMAP3_MPU_SOURCE)
+OMAP_SYS_TIMER(3)
+OMAP_SYS_TIMER_INIT(3_secure, OMAP3_SECURE_TIMER, OMAP3_CLKEV_SOURCE,
+                       2, OMAP3_MPU_SOURCE)
+OMAP_SYS_TIMER(3_secure)
+#endif
+
+#ifdef CONFIG_ARCH_OMAP4
+static void __init omap4_timer_init(void)
+{
+#ifdef CONFIG_LOCAL_TIMERS
+       twd_base = ioremap(OMAP44XX_LOCAL_TWD_BASE, SZ_256);
+       BUG_ON(!twd_base);
+#endif
+       omap2_gp_clockevent_init(1, OMAP4_CLKEV_SOURCE);
+       omap2_gp_clocksource_init(2, OMAP4_MPU_SOURCE);
+}
+OMAP_SYS_TIMER(4)
+#endif
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
new file mode 100644 (file)
index 0000000..3aaa46f
--- /dev/null
@@ -0,0 +1,304 @@
+/*
+ * twl-common.c
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc..
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/i2c.h>
+#include <linux/i2c/twl.h>
+#include <linux/gpio.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
+
+#include <plat/i2c.h>
+#include <plat/usb.h>
+
+#include "twl-common.h"
+
+static struct i2c_board_info __initdata pmic_i2c_board_info = {
+       .addr           = 0x48,
+       .flags          = I2C_CLIENT_WAKE,
+};
+
+void __init omap_pmic_init(int bus, u32 clkrate,
+                          const char *pmic_type, int pmic_irq,
+                          struct twl4030_platform_data *pmic_data)
+{
+       strncpy(pmic_i2c_board_info.type, pmic_type,
+               sizeof(pmic_i2c_board_info.type));
+       pmic_i2c_board_info.irq = pmic_irq;
+       pmic_i2c_board_info.platform_data = pmic_data;
+
+       omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1);
+}
+
+static struct twl4030_usb_data omap4_usb_pdata = {
+       .phy_init       = omap4430_phy_init,
+       .phy_exit       = omap4430_phy_exit,
+       .phy_power      = omap4430_phy_power,
+       .phy_set_clock  = omap4430_phy_set_clk,
+       .phy_suspend    = omap4430_phy_suspend,
+};
+
+static struct twl4030_usb_data omap3_usb_pdata = {
+       .usb_mode       = T2_USB_MODE_ULPI,
+};
+
+static int omap3_batt_table[] = {
+/* 0 C */
+30800, 29500, 28300, 27100,
+26000, 24900, 23900, 22900, 22000, 21100, 20300, 19400, 18700, 17900,
+17200, 16500, 15900, 15300, 14700, 14100, 13600, 13100, 12600, 12100,
+11600, 11200, 10800, 10400, 10000, 9630,  9280,  8950,  8620,  8310,
+8020,  7730,  7460,  7200,  6950,  6710,  6470,  6250,  6040,  5830,
+5640,  5450,  5260,  5090,  4920,  4760,  4600,  4450,  4310,  4170,
+4040,  3910,  3790,  3670,  3550
+};
+
+static struct twl4030_bci_platform_data omap3_bci_pdata = {
+       .battery_tmp_tbl        = omap3_batt_table,
+       .tblsize                = ARRAY_SIZE(omap3_batt_table),
+};
+
+static struct twl4030_madc_platform_data omap3_madc_pdata = {
+       .irq_line       = 1,
+};
+
+static struct twl4030_codec_audio_data omap3_audio;
+
+static struct twl4030_codec_data omap3_codec_pdata = {
+       .audio_mclk = 26000000,
+       .audio = &omap3_audio,
+};
+
+static struct regulator_consumer_supply omap3_vdda_dac_supplies[] = {
+       REGULATOR_SUPPLY("vdda_dac", "omapdss_venc"),
+};
+
+static struct regulator_init_data omap3_vdac_idata = {
+       .constraints = {
+               .min_uV                 = 1800000,
+               .max_uV                 = 1800000,
+               .valid_modes_mask       = REGULATOR_MODE_NORMAL
+                                       | REGULATOR_MODE_STANDBY,
+               .valid_ops_mask         = REGULATOR_CHANGE_MODE
+                                       | REGULATOR_CHANGE_STATUS,
+       },
+       .num_consumer_supplies  = ARRAY_SIZE(omap3_vdda_dac_supplies),
+       .consumer_supplies      = omap3_vdda_dac_supplies,
+};
+
+static struct regulator_consumer_supply omap3_vpll2_supplies[] = {
+       REGULATOR_SUPPLY("vdds_dsi", "omapdss"),
+       REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi1"),
+};
+
+static struct regulator_init_data omap3_vpll2_idata = {
+       .constraints = {
+               .min_uV                 = 1800000,
+               .max_uV                 = 1800000,
+               .valid_modes_mask       = REGULATOR_MODE_NORMAL
+                                       | REGULATOR_MODE_STANDBY,
+               .valid_ops_mask         = REGULATOR_CHANGE_MODE
+                                       | REGULATOR_CHANGE_STATUS,
+       },
+       .num_consumer_supplies          = ARRAY_SIZE(omap3_vpll2_supplies),
+       .consumer_supplies              = omap3_vpll2_supplies,
+};
+
+static struct regulator_init_data omap4_vdac_idata = {
+       .constraints = {
+               .min_uV                 = 1800000,
+               .max_uV                 = 1800000,
+               .valid_modes_mask       = REGULATOR_MODE_NORMAL
+                                       | REGULATOR_MODE_STANDBY,
+               .valid_ops_mask         = REGULATOR_CHANGE_MODE
+                                       | REGULATOR_CHANGE_STATUS,
+       },
+};
+
+static struct regulator_init_data omap4_vaux2_idata = {
+       .constraints = {
+               .min_uV                 = 1200000,
+               .max_uV                 = 2800000,
+               .apply_uV               = true,
+               .valid_modes_mask       = REGULATOR_MODE_NORMAL
+                                       | REGULATOR_MODE_STANDBY,
+               .valid_ops_mask         = REGULATOR_CHANGE_VOLTAGE
+                                       | REGULATOR_CHANGE_MODE
+                                       | REGULATOR_CHANGE_STATUS,
+       },
+};
+
+static struct regulator_init_data omap4_vaux3_idata = {
+       .constraints = {
+               .min_uV                 = 1000000,
+               .max_uV                 = 3000000,
+               .apply_uV               = true,
+               .valid_modes_mask       = REGULATOR_MODE_NORMAL
+                                       | REGULATOR_MODE_STANDBY,
+               .valid_ops_mask         = REGULATOR_CHANGE_VOLTAGE
+                                       | REGULATOR_CHANGE_MODE
+                                       | REGULATOR_CHANGE_STATUS,
+       },
+};
+
+static struct regulator_consumer_supply omap4_vmmc_supply[] = {
+       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
+};
+
+/* VMMC1 for MMC1 card */
+static struct regulator_init_data omap4_vmmc_idata = {
+       .constraints = {
+               .min_uV                 = 1200000,
+               .max_uV                 = 3000000,
+               .apply_uV               = true,
+               .valid_modes_mask       = REGULATOR_MODE_NORMAL
+                                       | REGULATOR_MODE_STANDBY,
+               .valid_ops_mask         = REGULATOR_CHANGE_VOLTAGE
+                                       | REGULATOR_CHANGE_MODE
+                                       | REGULATOR_CHANGE_STATUS,
+       },
+       .num_consumer_supplies  = ARRAY_SIZE(omap4_vmmc_supply),
+       .consumer_supplies      = omap4_vmmc_supply,
+};
+
+static struct regulator_init_data omap4_vpp_idata = {
+       .constraints = {
+               .min_uV                 = 1800000,
+               .max_uV                 = 2500000,
+               .apply_uV               = true,
+               .valid_modes_mask       = REGULATOR_MODE_NORMAL
+                                       | REGULATOR_MODE_STANDBY,
+               .valid_ops_mask         = REGULATOR_CHANGE_VOLTAGE
+                                       | REGULATOR_CHANGE_MODE
+                                       | REGULATOR_CHANGE_STATUS,
+       },
+};
+
+static struct regulator_init_data omap4_vana_idata = {
+       .constraints = {
+               .min_uV                 = 2100000,
+               .max_uV                 = 2100000,
+               .valid_modes_mask       = REGULATOR_MODE_NORMAL
+                                       | REGULATOR_MODE_STANDBY,
+               .valid_ops_mask         = REGULATOR_CHANGE_MODE
+                                       | REGULATOR_CHANGE_STATUS,
+       },
+};
+
+static struct regulator_init_data omap4_vcxio_idata = {
+       .constraints = {
+               .min_uV                 = 1800000,
+               .max_uV                 = 1800000,
+               .valid_modes_mask       = REGULATOR_MODE_NORMAL
+                                       | REGULATOR_MODE_STANDBY,
+               .valid_ops_mask         = REGULATOR_CHANGE_MODE
+                                       | REGULATOR_CHANGE_STATUS,
+       },
+};
+
+static struct regulator_init_data omap4_vusb_idata = {
+       .constraints = {
+               .min_uV                 = 3300000,
+               .max_uV                 = 3300000,
+               .apply_uV               = true,
+               .valid_modes_mask       = REGULATOR_MODE_NORMAL
+                                       | REGULATOR_MODE_STANDBY,
+               .valid_ops_mask         = REGULATOR_CHANGE_MODE
+                                       | REGULATOR_CHANGE_STATUS,
+       },
+};
+
+static struct regulator_init_data omap4_clk32kg_idata = {
+       .constraints = {
+               .valid_ops_mask         = REGULATOR_CHANGE_STATUS,
+       },
+};
+
+void __init omap4_pmic_get_config(struct twl4030_platform_data *pmic_data,
+                                 u32 pdata_flags, u32 regulators_flags)
+{
+       if (!pmic_data->irq_base)
+               pmic_data->irq_base = TWL6030_IRQ_BASE;
+       if (!pmic_data->irq_end)
+               pmic_data->irq_end = TWL6030_IRQ_END;
+
+       /* Common platform data configurations */
+       if (pdata_flags & TWL_COMMON_PDATA_USB && !pmic_data->usb)
+               pmic_data->usb = &omap4_usb_pdata;
+
+       /* Common regulator configurations */
+       if (regulators_flags & TWL_COMMON_REGULATOR_VDAC && !pmic_data->vdac)
+               pmic_data->vdac = &omap4_vdac_idata;
+
+       if (regulators_flags & TWL_COMMON_REGULATOR_VAUX2 && !pmic_data->vaux2)
+               pmic_data->vaux2 = &omap4_vaux2_idata;
+
+       if (regulators_flags & TWL_COMMON_REGULATOR_VAUX3 && !pmic_data->vaux3)
+               pmic_data->vaux3 = &omap4_vaux3_idata;
+
+       if (regulators_flags & TWL_COMMON_REGULATOR_VMMC && !pmic_data->vmmc)
+               pmic_data->vmmc = &omap4_vmmc_idata;
+
+       if (regulators_flags & TWL_COMMON_REGULATOR_VPP && !pmic_data->vpp)
+               pmic_data->vpp = &omap4_vpp_idata;
+
+       if (regulators_flags & TWL_COMMON_REGULATOR_VANA && !pmic_data->vana)
+               pmic_data->vana = &omap4_vana_idata;
+
+       if (regulators_flags & TWL_COMMON_REGULATOR_VCXIO && !pmic_data->vcxio)
+               pmic_data->vcxio = &omap4_vcxio_idata;
+
+       if (regulators_flags & TWL_COMMON_REGULATOR_VUSB && !pmic_data->vusb)
+               pmic_data->vusb = &omap4_vusb_idata;
+
+       if (regulators_flags & TWL_COMMON_REGULATOR_CLK32KG &&
+           !pmic_data->clk32kg)
+               pmic_data->clk32kg = &omap4_clk32kg_idata;
+}
+
+void __init omap3_pmic_get_config(struct twl4030_platform_data *pmic_data,
+                                 u32 pdata_flags, u32 regulators_flags)
+{
+       if (!pmic_data->irq_base)
+               pmic_data->irq_base = TWL4030_IRQ_BASE;
+       if (!pmic_data->irq_end)
+               pmic_data->irq_end = TWL4030_IRQ_END;
+
+       /* Common platform data configurations */
+       if (pdata_flags & TWL_COMMON_PDATA_USB && !pmic_data->usb)
+               pmic_data->usb = &omap3_usb_pdata;
+
+       if (pdata_flags & TWL_COMMON_PDATA_BCI && !pmic_data->bci)
+               pmic_data->bci = &omap3_bci_pdata;
+
+       if (pdata_flags & TWL_COMMON_PDATA_MADC && !pmic_data->madc)
+               pmic_data->madc = &omap3_madc_pdata;
+
+       if (pdata_flags & TWL_COMMON_PDATA_AUDIO && !pmic_data->codec)
+               pmic_data->codec = &omap3_codec_pdata;
+
+       /* Common regulator configurations */
+       if (regulators_flags & TWL_COMMON_REGULATOR_VDAC && !pmic_data->vdac)
+               pmic_data->vdac = &omap3_vdac_idata;
+
+       if (regulators_flags & TWL_COMMON_REGULATOR_VPLL2 && !pmic_data->vpll2)
+               pmic_data->vpll2 = &omap3_vpll2_idata;
+}
diff --git a/arch/arm/mach-omap2/twl-common.h b/arch/arm/mach-omap2/twl-common.h
new file mode 100644 (file)
index 0000000..5e83a5b
--- /dev/null
@@ -0,0 +1,59 @@
+#ifndef __OMAP_PMIC_COMMON__
+#define __OMAP_PMIC_COMMON__
+
+#define TWL_COMMON_PDATA_USB           (1 << 0)
+#define TWL_COMMON_PDATA_BCI           (1 << 1)
+#define TWL_COMMON_PDATA_MADC          (1 << 2)
+#define TWL_COMMON_PDATA_AUDIO         (1 << 3)
+
+/* Common LDO regulators for TWL4030/TWL6030 */
+#define TWL_COMMON_REGULATOR_VDAC      (1 << 0)
+#define TWL_COMMON_REGULATOR_VAUX1     (1 << 1)
+#define TWL_COMMON_REGULATOR_VAUX2     (1 << 2)
+#define TWL_COMMON_REGULATOR_VAUX3     (1 << 3)
+
+/* TWL6030 LDO regulators */
+#define TWL_COMMON_REGULATOR_VMMC      (1 << 4)
+#define TWL_COMMON_REGULATOR_VPP       (1 << 5)
+#define TWL_COMMON_REGULATOR_VUSIM     (1 << 6)
+#define TWL_COMMON_REGULATOR_VANA      (1 << 7)
+#define TWL_COMMON_REGULATOR_VCXIO     (1 << 8)
+#define TWL_COMMON_REGULATOR_VUSB      (1 << 9)
+#define TWL_COMMON_REGULATOR_CLK32KG   (1 << 10)
+
+/* TWL4030 LDO regulators */
+#define TWL_COMMON_REGULATOR_VPLL1     (1 << 4)
+#define TWL_COMMON_REGULATOR_VPLL2     (1 << 5)
+
+
+struct twl4030_platform_data;
+
+void omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq,
+                   struct twl4030_platform_data *pmic_data);
+
+static inline void omap2_pmic_init(const char *pmic_type,
+                                  struct twl4030_platform_data *pmic_data)
+{
+       omap_pmic_init(2, 2600, pmic_type, INT_24XX_SYS_NIRQ, pmic_data);
+}
+
+static inline void omap3_pmic_init(const char *pmic_type,
+                                  struct twl4030_platform_data *pmic_data)
+{
+       omap_pmic_init(1, 2600, pmic_type, INT_34XX_SYS_NIRQ, pmic_data);
+}
+
+static inline void omap4_pmic_init(const char *pmic_type,
+                                  struct twl4030_platform_data *pmic_data)
+{
+       /* Phoenix Audio IC needs I2C1 to start with 400 KHz or less */
+       omap_pmic_init(1, 400, pmic_type, OMAP44XX_IRQ_SYS_1N, pmic_data);
+}
+
+void omap3_pmic_get_config(struct twl4030_platform_data *pmic_data,
+                          u32 pdata_flags, u32 regulators_flags);
+
+void omap4_pmic_get_config(struct twl4030_platform_data *pmic_data,
+                          u32 pdata_flags, u32 regulators_flags);
+
+#endif /* __OMAP_PMIC_COMMON__ */
index b2248e7..b199596 100644 (file)
@@ -12,6 +12,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+#define pr_fmt(fmt) "%s: " fmt, __func__
 
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -161,10 +162,10 @@ static mfp_cfg_t cm_x3xx_mfp_cfg[] __initdata = {
        GPIO99_GPIO,                    /* Ethernet IRQ */
 
        /* RTC GPIOs */
-       GPIO95_GPIO,                    /* RTC CS */
-       GPIO96_GPIO,                    /* RTC WR */
-       GPIO97_GPIO,                    /* RTC RD */
-       GPIO98_GPIO,                    /* RTC IO */
+       GPIO95_GPIO | MFP_LPM_DRIVE_HIGH,       /* RTC CS */
+       GPIO96_GPIO | MFP_LPM_DRIVE_HIGH,       /* RTC WR */
+       GPIO97_GPIO | MFP_LPM_DRIVE_HIGH,       /* RTC RD */
+       GPIO98_GPIO,                            /* RTC IO */
 
        /* Standard I2C */
        GPIO21_I2C_SCL,
@@ -484,14 +485,13 @@ static int cm_x300_ulpi_phy_reset(void)
        int err;
 
        /* reset the PHY */
-       err = gpio_request(GPIO_ULPI_PHY_RST, "ulpi reset");
+       err = gpio_request_one(GPIO_ULPI_PHY_RST, GPIOF_OUT_INIT_LOW,
+                              "ulpi reset");
        if (err) {
-               pr_err("%s: failed to request ULPI reset GPIO: %d\n",
-                      __func__, err);
+               pr_err("failed to request ULPI reset GPIO: %d\n", err);
                return err;
        }
 
-       gpio_direction_output(GPIO_ULPI_PHY_RST, 0);
        msleep(10);
        gpio_set_value(GPIO_ULPI_PHY_RST, 1);
        msleep(10);
@@ -510,8 +510,7 @@ static inline int cm_x300_u2d_init(struct device *dev)
                pout_clk = clk_get(NULL, "CLK_POUT");
                if (IS_ERR(pout_clk)) {
                        err = PTR_ERR(pout_clk);
-                       pr_err("%s: failed to get CLK_POUT: %d\n",
-                              __func__, err);
+                       pr_err("failed to get CLK_POUT: %d\n", err);
                        return err;
                }
                clk_enable(pout_clk);
@@ -768,39 +767,36 @@ static void __init cm_x300_init_da9030(void)
        irq_set_irq_wake(IRQ_WAKEUP0, 1);
 }
 
+/* wi2wi gpio setting for system_rev >= 130 */
+static struct gpio cm_x300_wi2wi_gpios[] __initdata = {
+       { 71, GPIOF_OUT_INIT_HIGH, "wlan en" },
+       { 70, GPIOF_OUT_INIT_HIGH, "bt reset" },
+};
+
 static void __init cm_x300_init_wi2wi(void)
 {
        int bt_reset, wlan_en;
        int err;
 
        if (system_rev < 130) {
-               wlan_en = 77;
-               bt_reset = 78;
-       } else {
-               wlan_en = 71;
-               bt_reset = 70;
+               cm_x300_wi2wi_gpios[0].gpio = 77;       /* wlan en */
+               cm_x300_wi2wi_gpios[1].gpio = 78;       /* bt reset */
        }
 
        /* Libertas and CSR reset */
-       err = gpio_request(wlan_en, "wlan en");
+       err = gpio_request_array(ARRAY_AND_SIZE(cm_x300_wi2wi_gpios));
        if (err) {
-               pr_err("CM-X300: failed to request wlan en gpio: %d\n", err);
-       } else {
-               gpio_direction_output(wlan_en, 1);
-               gpio_free(wlan_en);
+               pr_err("failed to request wifi/bt gpios: %d\n", err);
+               return;
        }
 
-       err = gpio_request(bt_reset, "bt reset");
-       if (err) {
-               pr_err("CM-X300: failed to request bt reset gpio: %d\n", err);
-       } else {
-               gpio_direction_output(bt_reset, 1);
-               udelay(10);
-               gpio_set_value(bt_reset, 0);
-               udelay(10);
-               gpio_set_value(bt_reset, 1);
-               gpio_free(bt_reset);
-       }
+       udelay(10);
+       gpio_set_value(bt_reset, 0);
+       udelay(10);
+       gpio_set_value(bt_reset, 1);
+
+       gpio_free(wlan_en);
+       gpio_free(bt_reset);
 }
 
 /* MFP */
index f941a49..99960a1 100644 (file)
@@ -135,42 +135,6 @@ static unsigned long hx4700_pin_config[] __initdata = {
        GPIO66_GPIO,    /* nSDIO_IRQ */
 };
 
-#define HX4700_GPIO_IN(num, _desc) \
-       { .gpio = (num), .dir = 0, .desc = (_desc) }
-#define HX4700_GPIO_OUT(num, _init, _desc) \
-       { .gpio = (num), .dir = 1, .init = (_init), .desc = (_desc) }
-struct gpio_ress {
-       unsigned gpio : 8;
-       unsigned dir : 1;
-       unsigned init : 1;
-       char *desc;
-};
-
-static int hx4700_gpio_request(struct gpio_ress *gpios, int size)
-{
-       int i, rc = 0;
-       int gpio;
-       int dir;
-
-       for (i = 0; (!rc) && (i < size); i++) {
-               gpio = gpios[i].gpio;
-               dir = gpios[i].dir;
-               rc = gpio_request(gpio, gpios[i].desc);
-               if (rc) {
-                       pr_err("Error requesting GPIO %d(%s) : %d\n",
-                              gpio, gpios[i].desc, rc);
-                       continue;
-               }
-               if (dir)
-                       gpio_direction_output(gpio, gpios[i].init);
-               else
-                       gpio_direction_input(gpio);
-       }
-       while ((rc) && (--i >= 0))
-               gpio_free(gpios[i].gpio);
-       return rc;
-}
-
 /*
  * IRDA
  */
@@ -829,26 +793,30 @@ static struct platform_device *devices[] __initdata = {
        &pcmcia,
 };
 
-static struct gpio_ress global_gpios[] = {
-       HX4700_GPIO_IN(GPIO12_HX4700_ASIC3_IRQ, "ASIC3_IRQ"),
-       HX4700_GPIO_IN(GPIO13_HX4700_W3220_IRQ, "W3220_IRQ"),
-       HX4700_GPIO_IN(GPIO14_HX4700_nWLAN_IRQ, "WLAN_IRQ"),
-       HX4700_GPIO_OUT(GPIO59_HX4700_LCD_PC1,          1, "LCD_PC1"),
-       HX4700_GPIO_OUT(GPIO62_HX4700_LCD_nRESET,       1, "LCD_RESET"),
-       HX4700_GPIO_OUT(GPIO70_HX4700_LCD_SLIN1,        1, "LCD_SLIN1"),
-       HX4700_GPIO_OUT(GPIO84_HX4700_LCD_SQN,          1, "LCD_SQN"),
-       HX4700_GPIO_OUT(GPIO110_HX4700_LCD_LVDD_3V3_ON, 1, "LCD_LVDD"),
-       HX4700_GPIO_OUT(GPIO111_HX4700_LCD_AVDD_3V3_ON, 1, "LCD_AVDD"),
-       HX4700_GPIO_OUT(GPIO32_HX4700_RS232_ON,         1, "RS232_ON"),
-       HX4700_GPIO_OUT(GPIO71_HX4700_ASIC3_nRESET,     1, "ASIC3_nRESET"),
-       HX4700_GPIO_OUT(GPIO82_HX4700_EUART_RESET,      1, "EUART_RESET"),
-       HX4700_GPIO_OUT(GPIO105_HX4700_nIR_ON,          1, "nIR_EN"),
+static struct gpio global_gpios[] = {
+       { GPIO12_HX4700_ASIC3_IRQ, GPIOF_IN, "ASIC3_IRQ" },
+       { GPIO13_HX4700_W3220_IRQ, GPIOF_IN, "W3220_IRQ" },
+       { GPIO14_HX4700_nWLAN_IRQ, GPIOF_IN, "WLAN_IRQ" },
+       { GPIO59_HX4700_LCD_PC1,          GPIOF_OUT_INIT_HIGH, "LCD_PC1" },
+       { GPIO62_HX4700_LCD_nRESET,       GPIOF_OUT_INIT_HIGH, "LCD_RESET" },
+       { GPIO70_HX4700_LCD_SLIN1,        GPIOF_OUT_INIT_HIGH, "LCD_SLIN1" },
+       { GPIO84_HX4700_LCD_SQN,          GPIOF_OUT_INIT_HIGH, "LCD_SQN" },
+       { GPIO110_HX4700_LCD_LVDD_3V3_ON, GPIOF_OUT_INIT_HIGH, "LCD_LVDD" },
+       { GPIO111_HX4700_LCD_AVDD_3V3_ON, GPIOF_OUT_INIT_HIGH, "LCD_AVDD" },
+       { GPIO32_HX4700_RS232_ON,         GPIOF_OUT_INIT_HIGH, "RS232_ON" },
+       { GPIO71_HX4700_ASIC3_nRESET,     GPIOF_OUT_INIT_HIGH, "ASIC3_nRESET" },
+       { GPIO82_HX4700_EUART_RESET,      GPIOF_OUT_INIT_HIGH, "EUART_RESET" },
+       { GPIO105_HX4700_nIR_ON,          GPIOF_OUT_INIT_HIGH, "nIR_EN" },
 };
 
 static void __init hx4700_init(void)
 {
+       int ret;
+
        pxa2xx_mfp_config(ARRAY_AND_SIZE(hx4700_pin_config));
-       hx4700_gpio_request(ARRAY_AND_SIZE(global_gpios));
+       ret = gpio_request_array(ARRAY_AND_SIZE(global_gpios));
+       if (ret)
+               pr_err ("hx4700: Failed to request GPIOs.\n");
 
        pxa_set_ffuart_info(NULL);
        pxa_set_btuart_info(NULL);
index 0a2efcf..7cbfc5d 100644 (file)
@@ -12,6 +12,7 @@
 #ifndef _MAGICIAN_H_
 #define _MAGICIAN_H_
 
+#include <linux/gpio.h>
 #include <mach/irqs.h>
 
 /*
@@ -77,7 +78,7 @@
  * CPLD EGPIOs
  */
 
-#define MAGICIAN_EGPIO_BASE                    0x80 /* GPIO_BOARD_START */
+#define MAGICIAN_EGPIO_BASE                    NR_BUILTIN_GPIO
 #define MAGICIAN_EGPIO(reg,bit) \
        (MAGICIAN_EGPIO_BASE + 8*reg + bit)
 
index e192057..0e42798 100644 (file)
@@ -344,22 +344,14 @@ static struct pxafb_mach_info samsung_info = {
  * Backlight
  */
 
+static struct gpio magician_bl_gpios[] = {
+       { EGPIO_MAGICIAN_BL_POWER,  GPIOF_DIR_OUT, "Backlight power" },
+       { EGPIO_MAGICIAN_BL_POWER2, GPIOF_DIR_OUT, "Backlight power 2" },
+};
+
 static int magician_backlight_init(struct device *dev)
 {
-       int ret;
-
-       ret = gpio_request(EGPIO_MAGICIAN_BL_POWER, "BL_POWER");
-       if (ret)
-               goto err;
-       ret = gpio_request(EGPIO_MAGICIAN_BL_POWER2, "BL_POWER2");
-       if (ret)
-               goto err2;
-       return 0;
-
-err2:
-       gpio_free(EGPIO_MAGICIAN_BL_POWER);
-err:
-       return ret;
+       return gpio_request_array(ARRAY_AND_SIZE(magician_bl_gpios));
 }
 
 static int magician_backlight_notify(struct device *dev, int brightness)
@@ -376,8 +368,7 @@ static int magician_backlight_notify(struct device *dev, int brightness)
 
 static void magician_backlight_exit(struct device *dev)
 {
-       gpio_free(EGPIO_MAGICIAN_BL_POWER);
-       gpio_free(EGPIO_MAGICIAN_BL_POWER2);
+       gpio_free_array(ARRAY_AND_SIZE(magician_bl_gpios));
 }
 
 static struct platform_pwm_backlight_data backlight_data = {
@@ -712,16 +703,25 @@ static struct platform_device *devices[] __initdata = {
        &leds_gpio,
 };
 
+static struct gpio magician_global_gpios[] = {
+       { GPIO13_MAGICIAN_CPLD_IRQ,   GPIOF_IN, "CPLD_IRQ" },
+       { GPIO107_MAGICIAN_DS1WM_IRQ, GPIOF_IN, "DS1WM_IRQ" },
+       { GPIO104_MAGICIAN_LCD_POWER_1, GPIOF_OUT_INIT_LOW, "LCD power 1" },
+       { GPIO105_MAGICIAN_LCD_POWER_2, GPIOF_OUT_INIT_LOW, "LCD power 2" },
+       { GPIO106_MAGICIAN_LCD_POWER_3, GPIOF_OUT_INIT_LOW, "LCD power 3" },
+       { GPIO83_MAGICIAN_nIR_EN, GPIOF_OUT_INIT_HIGH, "nIR_EN" },
+};
+
 static void __init magician_init(void)
 {
        void __iomem *cpld;
        int lcd_select;
        int err;
 
-       gpio_request(GPIO13_MAGICIAN_CPLD_IRQ, "CPLD_IRQ");
-       gpio_request(GPIO107_MAGICIAN_DS1WM_IRQ, "DS1WM_IRQ");
-
        pxa2xx_mfp_config(ARRAY_AND_SIZE(magician_pin_config));
+       err = gpio_request_array(ARRAY_AND_SIZE(magician_global_gpios));
+       if (err)
+               pr_err("magician: Failed to request GPIOs: %d\n", err);
 
        pxa_set_ffuart_info(NULL);
        pxa_set_btuart_info(NULL);
@@ -729,11 +729,7 @@ static void __init magician_init(void)
 
        platform_add_devices(ARRAY_AND_SIZE(devices));
 
-       err = gpio_request(GPIO83_MAGICIAN_nIR_EN, "nIR_EN");
-       if (!err) {
-               gpio_direction_output(GPIO83_MAGICIAN_nIR_EN, 1);
-               pxa_set_ficp_info(&magician_ficp_info);
-       }
+       pxa_set_ficp_info(&magician_ficp_info);
        pxa27x_set_i2c_power_info(NULL);
        pxa_set_i2c_info(&i2c_info);
        pxa_set_mci_info(&magician_mci_info);
@@ -747,16 +743,9 @@ static void __init magician_init(void)
                system_rev = board_id & 0x7;
                lcd_select = board_id & 0x8;
                pr_info("LCD type: %s\n", lcd_select ? "Samsung" : "Toppoly");
-               if (lcd_select && (system_rev < 3)) {
-                       gpio_request(GPIO75_MAGICIAN_SAMSUNG_POWER, "SAMSUNG_POWER");
-                       gpio_direction_output(GPIO75_MAGICIAN_SAMSUNG_POWER, 0);
-               }
-               gpio_request(GPIO104_MAGICIAN_LCD_POWER_1, "LCD_POWER_1");
-               gpio_request(GPIO105_MAGICIAN_LCD_POWER_2, "LCD_POWER_2");
-               gpio_request(GPIO106_MAGICIAN_LCD_POWER_3, "LCD_POWER_3");
-               gpio_direction_output(GPIO104_MAGICIAN_LCD_POWER_1, 0);
-               gpio_direction_output(GPIO105_MAGICIAN_LCD_POWER_2, 0);
-               gpio_direction_output(GPIO106_MAGICIAN_LCD_POWER_3, 0);
+               if (lcd_select && (system_rev < 3))
+                       gpio_request_one(GPIO75_MAGICIAN_SAMSUNG_POWER,
+                                        GPIOF_OUT_INIT_LOW, "SAMSUNG_POWER");
                pxa_set_fb_info(NULL, lcd_select ? &samsung_info : &toppoly_info);
        } else
                pr_err("LCD detection: CPLD mapping failed\n");
index e347013..aa67637 100644 (file)
@@ -177,50 +177,6 @@ static unsigned long mioa701_pin_config[] = {
        MFP_CFG_OUT(GPIO116, AF0, DRIVE_HIGH),
 };
 
-#define MIO_GPIO_IN(num, _desc) \
-       { .gpio = (num), .dir = 0, .desc = (_desc) }
-#define MIO_GPIO_OUT(num, _init, _desc) \
-       { .gpio = (num), .dir = 1, .init = (_init), .desc = (_desc) }
-struct gpio_ress {
-       unsigned gpio : 8;
-       unsigned dir : 1;
-       unsigned init : 1;
-       char *desc;
-};
-
-static int mio_gpio_request(struct gpio_ress *gpios, int size)
-{
-       int i, rc = 0;
-       int gpio;
-       int dir;
-
-       for (i = 0; (!rc) && (i < size); i++) {
-               gpio = gpios[i].gpio;
-               dir = gpios[i].dir;
-               rc = gpio_request(gpio, gpios[i].desc);
-               if (rc) {
-                       printk(KERN_ERR "Error requesting GPIO %d(%s) : %d\n",
-                              gpio, gpios[i].desc, rc);
-                       continue;
-               }
-               if (dir)
-                       gpio_direction_output(gpio, gpios[i].init);
-               else
-                       gpio_direction_input(gpio);
-       }
-       while ((rc) && (--i >= 0))
-               gpio_free(gpios[i].gpio);
-       return rc;
-}
-
-static void mio_gpio_free(struct gpio_ress *gpios, int size)
-{
-       int i;
-
-       for (i = 0; i < size; i++)
-               gpio_free(gpios[i].gpio);
-}
-
 /* LCD Screen and Backlight */
 static struct platform_pwm_backlight_data mioa701_backlight_data = {
        .pwm_id         = 0,
@@ -346,16 +302,16 @@ irqreturn_t gsm_on_irq(int irq, void *p)
        return IRQ_HANDLED;
 }
 
-struct gpio_ress gsm_gpios[] = {
-       MIO_GPIO_IN(GPIO25_GSM_MOD_ON_STATE, "GSM state"),
-       MIO_GPIO_IN(GPIO113_GSM_EVENT, "GSM event"),
+static struct gpio gsm_gpios[] = {
+       { GPIO25_GSM_MOD_ON_STATE, GPIOF_IN, "GSM state" },
+       { GPIO113_GSM_EVENT, GPIOF_IN, "GSM event" },
 };
 
 static int __init gsm_init(void)
 {
        int rc;
 
-       rc = mio_gpio_request(ARRAY_AND_SIZE(gsm_gpios));
+       rc = gpio_request_array(ARRAY_AND_SIZE(gsm_gpios));
        if (rc)
                goto err_gpio;
        rc = request_irq(gpio_to_irq(GPIO25_GSM_MOD_ON_STATE), gsm_on_irq,
@@ -369,7 +325,7 @@ static int __init gsm_init(void)
 
 err_irq:
        printk(KERN_ERR "Mioa701: Can't request GSM_ON irq\n");
-       mio_gpio_free(ARRAY_AND_SIZE(gsm_gpios));
+       gpio_free_array(ARRAY_AND_SIZE(gsm_gpios));
 err_gpio:
        printk(KERN_ERR "Mioa701: gsm not available\n");
        return rc;
@@ -378,7 +334,7 @@ err_gpio:
 static void gsm_exit(void)
 {
        free_irq(gpio_to_irq(GPIO25_GSM_MOD_ON_STATE), NULL);
-       mio_gpio_free(ARRAY_AND_SIZE(gsm_gpios));
+       gpio_free_array(ARRAY_AND_SIZE(gsm_gpios));
 }
 
 /*
@@ -749,14 +705,16 @@ static void mioa701_restart(char c, const char *cmd)
        arm_machine_restart('s', cmd);
 }
 
-static struct gpio_ress global_gpios[] = {
-       MIO_GPIO_OUT(GPIO9_CHARGE_EN, 1, "Charger enable"),
-       MIO_GPIO_OUT(GPIO18_POWEROFF, 0, "Power Off"),
-       MIO_GPIO_OUT(GPIO87_LCD_POWER, 0, "LCD Power"),
+static struct gpio global_gpios[] = {
+       { GPIO9_CHARGE_EN, GPIOF_OUT_INIT_HIGH, "Charger enable" },
+       { GPIO18_POWEROFF, GPIOF_OUT_INIT_LOW, "Power Off" },
+       { GPIO87_LCD_POWER, GPIOF_OUT_INIT_LOW, "LCD Power" },
 };
 
 static void __init mioa701_machine_init(void)
 {
+       int rc;
+
        PSLR  = 0xff100000; /* SYSDEL=125ms, PWRDEL=125ms, PSLR_SL_ROD=1 */
        PCFR = PCFR_DC_EN | PCFR_GPR_EN | PCFR_OPDE;
        RTTR = 32768 - 1; /* Reset crazy WinCE value */
@@ -766,7 +724,9 @@ static void __init mioa701_machine_init(void)
        pxa_set_ffuart_info(NULL);
        pxa_set_btuart_info(NULL);
        pxa_set_stuart_info(NULL);
-       mio_gpio_request(ARRAY_AND_SIZE(global_gpios));
+       rc = gpio_request_array(ARRAY_AND_SIZE(global_gpios));
+       if (rc)
+               pr_err("MioA701: Failed to request GPIOs: %d", rc);
        bootstrap_init();
        pxa_set_fb_info(NULL, &mioa701_pxafb_info);
        pxa_set_mci_info(&mioa701_mci_info);
index 9322fe5..e53a333 100644 (file)
@@ -104,7 +104,7 @@ static void __init saarb_init(void)
 
 MACHINE_START(SAARB, "PXA955 Handheld Platform (aka SAARB)")
        .boot_params    = 0xa0000100,
-       .map_io         = pxa_map_io,
+       .map_io         = pxa3xx_map_io,
        .nr_irqs        = SAARB_NR_IRQS,
        .init_irq       = pxa95x_init_irq,
        .timer          = &pxa_timer,
index 1e2aba2..ce5c251 100644 (file)
@@ -381,7 +381,7 @@ void ag5evm_sdhi1_set_pwr(struct platform_device *pdev, int state)
        gpio_set_value(GPIO_PORT114, state);
 }
 
-static struct sh_mobile_sdhi_info sh_sdhi1_platdata = {
+static struct sh_mobile_sdhi_info sh_sdhi1_info = {
        .tmio_flags     = TMIO_MMC_WRPROTECT_DISABLE,
        .tmio_caps      = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ,
        .tmio_ocr_mask  = MMC_VDD_32_33 | MMC_VDD_33_34,
@@ -413,7 +413,7 @@ static struct platform_device sdhi1_device = {
        .name           = "sh_mobile_sdhi",
        .id             = 1,
        .dev            = {
-               .platform_data  = &sh_sdhi1_platdata,
+               .platform_data  = &sh_sdhi1_info,
        },
        .num_resources  = ARRAY_SIZE(sdhi1_resources),
        .resource       = sdhi1_resources,
index f6b687f..803bc6e 100644 (file)
@@ -913,7 +913,7 @@ static struct i2c_board_info imx074_info = {
        I2C_BOARD_INFO("imx074", 0x1a),
 };
 
-struct soc_camera_link imx074_link = {
+static struct soc_camera_link imx074_link = {
        .bus_id         = 0,
        .board_info     = &imx074_info,
        .i2c_adapter_id = 0,
index 7e1d375..3802f2a 100644 (file)
@@ -1287,9 +1287,9 @@ static struct platform_device *mackerel_devices[] __initdata = {
        &nor_flash_device,
        &smc911x_device,
        &lcdc_device,
-       &usbhs0_device,
        &usb1_host_device,
        &usbhs1_device,
+       &usbhs0_device,
        &leds_device,
        &fsi_device,
        &fsi_ak4643_device,
index fd4cf1c..70cdbd6 100644 (file)
@@ -110,10 +110,18 @@ static pin_cfg_t mop500_pins_common[] = {
        GPIO168_KP_O0,
 
        /* UART */
-       GPIO0_U0_CTSn   | PIN_INPUT_PULLUP,
-       GPIO1_U0_RTSn   | PIN_OUTPUT_HIGH,
-       GPIO2_U0_RXD    | PIN_INPUT_PULLUP,
-       GPIO3_U0_TXD    | PIN_OUTPUT_HIGH,
+       /* uart-0 pins gpio configuration should be
+        * kept intact to prevent glitch in tx line
+        * when tty dev is opened. Later these pins
+        * are configured to uart mop500_pins_uart0
+        *
+        * It will be replaced with uart configuration
+        * once the issue is solved.
+        */
+       GPIO0_GPIO      | PIN_INPUT_PULLUP,
+       GPIO1_GPIO      | PIN_OUTPUT_HIGH,
+       GPIO2_GPIO      | PIN_INPUT_PULLUP,
+       GPIO3_GPIO      | PIN_OUTPUT_HIGH,
 
        GPIO29_U2_RXD   | PIN_INPUT_PULLUP,
        GPIO30_U2_TXD   | PIN_OUTPUT_HIGH,
index bb26f40..2a08c07 100644 (file)
 #include <linux/leds-lp5521.h>
 #include <linux/input.h>
 #include <linux/gpio_keys.h>
+#include <linux/delay.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 
 #include <plat/i2c.h>
 #include <plat/ste_dma40.h>
+#include <plat/pincfg.h>
 
 #include <mach/hardware.h>
 #include <mach/setup.h>
 #include <mach/devices.h>
 #include <mach/irqs.h>
 
+#include "pins-db8500.h"
 #include "ste-dma40-db8500.h"
 #include "devices-db8500.h"
 #include "board-mop500.h"
@@ -393,12 +396,63 @@ static struct stedma40_chan_cfg uart2_dma_cfg_tx = {
 };
 #endif
 
+
+static pin_cfg_t mop500_pins_uart0[] = {
+       GPIO0_U0_CTSn   | PIN_INPUT_PULLUP,
+       GPIO1_U0_RTSn   | PIN_OUTPUT_HIGH,
+       GPIO2_U0_RXD    | PIN_INPUT_PULLUP,
+       GPIO3_U0_TXD    | PIN_OUTPUT_HIGH,
+};
+
+#define PRCC_K_SOFTRST_SET      0x18
+#define PRCC_K_SOFTRST_CLEAR    0x1C
+static void ux500_uart0_reset(void)
+{
+       void __iomem *prcc_rst_set, *prcc_rst_clr;
+
+       prcc_rst_set = (void __iomem *)IO_ADDRESS(U8500_CLKRST1_BASE +
+                       PRCC_K_SOFTRST_SET);
+       prcc_rst_clr = (void __iomem *)IO_ADDRESS(U8500_CLKRST1_BASE +
+                       PRCC_K_SOFTRST_CLEAR);
+
+       /* Activate soft reset PRCC_K_SOFTRST_CLEAR */
+       writel((readl(prcc_rst_clr) | 0x1), prcc_rst_clr);
+       udelay(1);
+
+       /* Release soft reset PRCC_K_SOFTRST_SET */
+       writel((readl(prcc_rst_set) | 0x1), prcc_rst_set);
+       udelay(1);
+}
+
+static void ux500_uart0_init(void)
+{
+       int ret;
+
+       ret = nmk_config_pins(mop500_pins_uart0,
+                       ARRAY_SIZE(mop500_pins_uart0));
+       if (ret < 0)
+               pr_err("pl011: uart pins_enable failed\n");
+}
+
+static void ux500_uart0_exit(void)
+{
+       int ret;
+
+       ret = nmk_config_pins_sleep(mop500_pins_uart0,
+                       ARRAY_SIZE(mop500_pins_uart0));
+       if (ret < 0)
+               pr_err("pl011: uart pins_disable failed\n");
+}
+
 static struct amba_pl011_data uart0_plat = {
 #ifdef CONFIG_STE_DMA40
        .dma_filter = stedma40_filter,
        .dma_rx_param = &uart0_dma_cfg_rx,
        .dma_tx_param = &uart0_dma_cfg_tx,
 #endif
+       .init = ux500_uart0_init,
+       .exit = ux500_uart0_exit,
+       .reset = ux500_uart0_reset,
 };
 
 static struct amba_pl011_data uart1_plat = {
index 3c38678..089c0b5 100644 (file)
@@ -210,19 +210,21 @@ cpu_v7_name:
 
 /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
 .globl cpu_v7_suspend_size
-.equ   cpu_v7_suspend_size, 4 * 8
+.equ   cpu_v7_suspend_size, 4 * 9
 #ifdef CONFIG_PM_SLEEP
 ENTRY(cpu_v7_do_suspend)
        stmfd   sp!, {r4 - r11, lr}
        mrc     p15, 0, r4, c13, c0, 0  @ FCSE/PID
        mrc     p15, 0, r5, c13, c0, 1  @ Context ID
+       mrc     p15, 0, r6, c13, c0, 3  @ User r/o thread ID
+       stmia   r0!, {r4 - r6}
        mrc     p15, 0, r6, c3, c0, 0   @ Domain ID
        mrc     p15, 0, r7, c2, c0, 0   @ TTB 0
        mrc     p15, 0, r8, c2, c0, 1   @ TTB 1
        mrc     p15, 0, r9, c1, c0, 0   @ Control register
        mrc     p15, 0, r10, c1, c0, 1  @ Auxiliary control register
        mrc     p15, 0, r11, c1, c0, 2  @ Co-processor access control
-       stmia   r0, {r4 - r11}
+       stmia   r0, {r6 - r11}
        ldmfd   sp!, {r4 - r11, pc}
 ENDPROC(cpu_v7_do_suspend)
 
@@ -230,9 +232,11 @@ ENTRY(cpu_v7_do_resume)
        mov     ip, #0
        mcr     p15, 0, ip, c8, c7, 0   @ invalidate TLBs
        mcr     p15, 0, ip, c7, c5, 0   @ invalidate I cache
-       ldmia   r0, {r4 - r11}
+       ldmia   r0!, {r4 - r6}
        mcr     p15, 0, r4, c13, c0, 0  @ FCSE/PID
        mcr     p15, 0, r5, c13, c0, 1  @ Context ID
+       mcr     p15, 0, r6, c13, c0, 3  @ User r/o thread ID
+       ldmia   r0, {r6 - r11}
        mcr     p15, 0, r6, c3, c0, 0   @ Domain ID
        mcr     p15, 0, r7, c2, c0, 0   @ TTB 0
        mcr     p15, 0, r8, c2, c0, 1   @ TTB 1
@@ -418,9 +422,9 @@ ENTRY(v7_processor_functions)
        .word   cpu_v7_dcache_clean_area
        .word   cpu_v7_switch_mm
        .word   cpu_v7_set_pte_ext
-       .word   0
-       .word   0
-       .word   0
+       .word   cpu_v7_suspend_size
+       .word   cpu_v7_do_suspend
+       .word   cpu_v7_do_resume
        .size   v7_processor_functions, . - v7_processor_functions
 
        .section ".rodata"
index 9612a87..bab73e2 100644 (file)
@@ -18,6 +18,7 @@
  */
 #include <linux/init.h>
 #include <asm/traps.h>
+#include <asm/ptrace.h>
 
 static int cp6_trap(struct pt_regs *regs, unsigned int instr)
 {
index 49a4c75..6e6735f 100644 (file)
@@ -211,9 +211,6 @@ choice
        depends on ARCH_OMAP
        default OMAP_PM_NOOP
 
-config OMAP_PM_NONE
-       bool "No PM layer"
-
 config OMAP_PM_NOOP
        bool "No-op/debug PM layer"
 
index f7fed60..a6cbb71 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/err.h>
 #include <linux/io.h>
 #include <linux/sched.h>
+#include <linux/clocksource.h>
 
 #include <asm/sched_clock.h>
 
 
 #include <plat/clock.h>
 
-
 /*
  * 32KHz clocksource ... always available, on pretty most chips except
  * OMAP 730 and 1510.  Other timers could be used as clocksources, with
  * higher resolution in free-running counter modes (e.g. 12 MHz xtal),
  * but systems won't necessarily want to spend resources that way.
  */
+static void __iomem *timer_32k_base;
 
 #define OMAP16XX_TIMER_32K_SYNCHRONIZED                0xfffbc410
 
-#include <linux/clocksource.h>
-
-/*
- * offset_32k holds the init time counter value. It is then subtracted
- * from every counter read to achieve a counter that counts time from the
- * kernel boot (needed for sched_clock()).
- */
-static u32 offset_32k __read_mostly;
-
-#ifdef CONFIG_ARCH_OMAP16XX
-static cycle_t notrace omap16xx_32k_read(struct clocksource *cs)
-{
-       return omap_readl(OMAP16XX_TIMER_32K_SYNCHRONIZED) - offset_32k;
-}
-#else
-#define omap16xx_32k_read      NULL
-#endif
-
-#ifdef CONFIG_SOC_OMAP2420
-static cycle_t notrace omap2420_32k_read(struct clocksource *cs)
-{
-       return omap_readl(OMAP2420_32KSYNCT_BASE + 0x10) - offset_32k;
-}
-#else
-#define omap2420_32k_read      NULL
-#endif
-
-#ifdef CONFIG_SOC_OMAP2430
-static cycle_t notrace omap2430_32k_read(struct clocksource *cs)
-{
-       return omap_readl(OMAP2430_32KSYNCT_BASE + 0x10) - offset_32k;
-}
-#else
-#define omap2430_32k_read      NULL
-#endif
-
-#ifdef CONFIG_ARCH_OMAP3
-static cycle_t notrace omap34xx_32k_read(struct clocksource *cs)
-{
-       return omap_readl(OMAP3430_32KSYNCT_BASE + 0x10) - offset_32k;
-}
-#else
-#define omap34xx_32k_read      NULL
-#endif
-
-#ifdef CONFIG_ARCH_OMAP4
-static cycle_t notrace omap44xx_32k_read(struct clocksource *cs)
-{
-       return omap_readl(OMAP4430_32KSYNCT_BASE + 0x10) - offset_32k;
-}
-#else
-#define omap44xx_32k_read      NULL
-#endif
-
-/*
- * Kernel assumes that sched_clock can be called early but may not have
- * things ready yet.
- */
-static cycle_t notrace omap_32k_read_dummy(struct clocksource *cs)
-{
-       return 0;
-}
-
-static struct clocksource clocksource_32k = {
-       .name           = "32k_counter",
-       .rating         = 250,
-       .read           = omap_32k_read_dummy,
-       .mask           = CLOCKSOURCE_MASK(32),
-       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
 /*
  * Returns current time from boot in nsecs. It's OK for this to wrap
  * around for now, as it's just a relative time stamp.
@@ -122,11 +52,11 @@ static DEFINE_CLOCK_DATA(cd);
 
 static inline unsigned long long notrace _omap_32k_sched_clock(void)
 {
-       u32 cyc = clocksource_32k.read(&clocksource_32k);
+       u32 cyc = timer_32k_base ? __raw_readl(timer_32k_base) : 0;
        return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT);
 }
 
-#ifndef CONFIG_OMAP_MPU_TIMER
+#if defined(CONFIG_OMAP_32K_TIMER) && !defined(CONFIG_OMAP_MPU_TIMER)
 unsigned long long notrace sched_clock(void)
 {
        return _omap_32k_sched_clock();
@@ -140,7 +70,7 @@ unsigned long long notrace omap_32k_sched_clock(void)
 
 static void notrace omap_update_sched_clock(void)
 {
-       u32 cyc = clocksource_32k.read(&clocksource_32k);
+       u32 cyc = timer_32k_base ? __raw_readl(timer_32k_base) : 0;
        update_sched_clock(&cd, cyc, (u32)~0);
 }
 
@@ -153,6 +83,7 @@ static void notrace omap_update_sched_clock(void)
  */
 static struct timespec persistent_ts;
 static cycles_t cycles, last_cycles;
+static unsigned int persistent_mult, persistent_shift;
 void read_persistent_clock(struct timespec *ts)
 {
        unsigned long long nsecs;
@@ -160,11 +91,10 @@ void read_persistent_clock(struct timespec *ts)
        struct timespec *tsp = &persistent_ts;
 
        last_cycles = cycles;
-       cycles = clocksource_32k.read(&clocksource_32k);
+       cycles = timer_32k_base ? __raw_readl(timer_32k_base) : 0;
        delta = cycles - last_cycles;
 
-       nsecs = clocksource_cyc2ns(delta,
-                                  clocksource_32k.mult, clocksource_32k.shift);
+       nsecs = clocksource_cyc2ns(delta, persistent_mult, persistent_shift);
 
        timespec_add_ns(tsp, nsecs);
        *ts = *tsp;
@@ -176,29 +106,46 @@ int __init omap_init_clocksource_32k(void)
                        "%s: can't register clocksource!\n";
 
        if (cpu_is_omap16xx() || cpu_class_is_omap2()) {
+               u32 pbase;
+               unsigned long size = SZ_4K;
+               void __iomem *base;
                struct clk *sync_32k_ick;
 
-               if (cpu_is_omap16xx())
-                       clocksource_32k.read = omap16xx_32k_read;
-               else if (cpu_is_omap2420())
-                       clocksource_32k.read = omap2420_32k_read;
+               if (cpu_is_omap16xx()) {
+                       pbase = OMAP16XX_TIMER_32K_SYNCHRONIZED;
+                       size = SZ_1K;
+               } else if (cpu_is_omap2420())
+                       pbase = OMAP2420_32KSYNCT_BASE + 0x10;
                else if (cpu_is_omap2430())
-                       clocksource_32k.read = omap2430_32k_read;
+                       pbase = OMAP2430_32KSYNCT_BASE + 0x10;
                else if (cpu_is_omap34xx())
-                       clocksource_32k.read = omap34xx_32k_read;
+                       pbase = OMAP3430_32KSYNCT_BASE + 0x10;
                else if (cpu_is_omap44xx())
-                       clocksource_32k.read = omap44xx_32k_read;
+                       pbase = OMAP4430_32KSYNCT_BASE + 0x10;
                else
                        return -ENODEV;
 
+               /* For this to work we must have a static mapping in io.c for this area */
+               base = ioremap(pbase, size);
+               if (!base)
+                       return -ENODEV;
+
                sync_32k_ick = clk_get(NULL, "omap_32ksync_ick");
                if (!IS_ERR(sync_32k_ick))
                        clk_enable(sync_32k_ick);
 
-               offset_32k = clocksource_32k.read(&clocksource_32k);
+               timer_32k_base = base;
+
+               /*
+                * 120000 rough estimate from the calculations in
+                * __clocksource_updatefreq_scale.
+                */
+               clocks_calc_mult_shift(&persistent_mult, &persistent_shift,
+                               32768, NSEC_PER_SEC, 120000);
 
-               if (clocksource_register_hz(&clocksource_32k, 32768))
-                       printk(err, clocksource_32k.name);
+               if (clocksource_mmio_init(base, "32k_counter", 32768, 250, 32,
+                                         clocksource_mmio_readl_up))
+                       printk(err, "32k_counter");
 
                init_fixed_sched_clock(&cd, omap_update_sched_clock, 32,
                                       32768, SC_MULT, SC_SHIFT);
index ee9f6eb..8dfb818 100644 (file)
 #include <plat/dmtimer.h>
 #include <mach/irqs.h>
 
-/* register offsets */
-#define _OMAP_TIMER_ID_OFFSET          0x00
-#define _OMAP_TIMER_OCP_CFG_OFFSET     0x10
-#define _OMAP_TIMER_SYS_STAT_OFFSET    0x14
-#define _OMAP_TIMER_STAT_OFFSET                0x18
-#define _OMAP_TIMER_INT_EN_OFFSET      0x1c
-#define _OMAP_TIMER_WAKEUP_EN_OFFSET   0x20
-#define _OMAP_TIMER_CTRL_OFFSET                0x24
-#define                OMAP_TIMER_CTRL_GPOCFG          (1 << 14)
-#define                OMAP_TIMER_CTRL_CAPTMODE        (1 << 13)
-#define                OMAP_TIMER_CTRL_PT              (1 << 12)
-#define                OMAP_TIMER_CTRL_TCM_LOWTOHIGH   (0x1 << 8)
-#define                OMAP_TIMER_CTRL_TCM_HIGHTOLOW   (0x2 << 8)
-#define                OMAP_TIMER_CTRL_TCM_BOTHEDGES   (0x3 << 8)
-#define                OMAP_TIMER_CTRL_SCPWM           (1 << 7)
-#define                OMAP_TIMER_CTRL_CE              (1 << 6) /* compare enable */
-#define                OMAP_TIMER_CTRL_PRE             (1 << 5) /* prescaler enable */
-#define                OMAP_TIMER_CTRL_PTV_SHIFT       2 /* prescaler value shift */
-#define                OMAP_TIMER_CTRL_POSTED          (1 << 2)
-#define                OMAP_TIMER_CTRL_AR              (1 << 1) /* auto-reload enable */
-#define                OMAP_TIMER_CTRL_ST              (1 << 0) /* start timer */
-#define _OMAP_TIMER_COUNTER_OFFSET     0x28
-#define _OMAP_TIMER_LOAD_OFFSET                0x2c
-#define _OMAP_TIMER_TRIGGER_OFFSET     0x30
-#define _OMAP_TIMER_WRITE_PEND_OFFSET  0x34
-#define                WP_NONE                 0       /* no write pending bit */
-#define                WP_TCLR                 (1 << 0)
-#define                WP_TCRR                 (1 << 1)
-#define                WP_TLDR                 (1 << 2)
-#define                WP_TTGR                 (1 << 3)
-#define                WP_TMAR                 (1 << 4)
-#define                WP_TPIR                 (1 << 5)
-#define                WP_TNIR                 (1 << 6)
-#define                WP_TCVR                 (1 << 7)
-#define                WP_TOCR                 (1 << 8)
-#define                WP_TOWR                 (1 << 9)
-#define _OMAP_TIMER_MATCH_OFFSET       0x38
-#define _OMAP_TIMER_CAPTURE_OFFSET     0x3c
-#define _OMAP_TIMER_IF_CTRL_OFFSET     0x40
-#define _OMAP_TIMER_CAPTURE2_OFFSET            0x44    /* TCAR2, 34xx only */
-#define _OMAP_TIMER_TICK_POS_OFFSET            0x48    /* TPIR, 34xx only */
-#define _OMAP_TIMER_TICK_NEG_OFFSET            0x4c    /* TNIR, 34xx only */
-#define _OMAP_TIMER_TICK_COUNT_OFFSET          0x50    /* TCVR, 34xx only */
-#define _OMAP_TIMER_TICK_INT_MASK_SET_OFFSET   0x54    /* TOCR, 34xx only */
-#define _OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET 0x58    /* TOWR, 34xx only */
-
-/* register offsets with the write pending bit encoded */
-#define        WPSHIFT                                 16
-
-#define OMAP_TIMER_ID_REG                      (_OMAP_TIMER_ID_OFFSET \
-                                                       | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_OCP_CFG_REG                 (_OMAP_TIMER_OCP_CFG_OFFSET \
-                                                       | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_SYS_STAT_REG                        (_OMAP_TIMER_SYS_STAT_OFFSET \
-                                                       | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_STAT_REG                    (_OMAP_TIMER_STAT_OFFSET \
-                                                       | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_INT_EN_REG                  (_OMAP_TIMER_INT_EN_OFFSET \
-                                                       | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_WAKEUP_EN_REG               (_OMAP_TIMER_WAKEUP_EN_OFFSET \
-                                                       | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_CTRL_REG                    (_OMAP_TIMER_CTRL_OFFSET \
-                                                       | (WP_TCLR << WPSHIFT))
-
-#define OMAP_TIMER_COUNTER_REG                 (_OMAP_TIMER_COUNTER_OFFSET \
-                                                       | (WP_TCRR << WPSHIFT))
-
-#define OMAP_TIMER_LOAD_REG                    (_OMAP_TIMER_LOAD_OFFSET \
-                                                       | (WP_TLDR << WPSHIFT))
-
-#define OMAP_TIMER_TRIGGER_REG                 (_OMAP_TIMER_TRIGGER_OFFSET \
-                                                       | (WP_TTGR << WPSHIFT))
-
-#define OMAP_TIMER_WRITE_PEND_REG              (_OMAP_TIMER_WRITE_PEND_OFFSET \
-                                                       | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_MATCH_REG                   (_OMAP_TIMER_MATCH_OFFSET \
-                                                       | (WP_TMAR << WPSHIFT))
-
-#define OMAP_TIMER_CAPTURE_REG                 (_OMAP_TIMER_CAPTURE_OFFSET \
-                                                       | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_IF_CTRL_REG                 (_OMAP_TIMER_IF_CTRL_OFFSET \
-                                                       | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_CAPTURE2_REG                        (_OMAP_TIMER_CAPTURE2_OFFSET \
-                                                       | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_TICK_POS_REG                        (_OMAP_TIMER_TICK_POS_OFFSET \
-                                                       | (WP_TPIR << WPSHIFT))
-
-#define OMAP_TIMER_TICK_NEG_REG                        (_OMAP_TIMER_TICK_NEG_OFFSET \
-                                                       | (WP_TNIR << WPSHIFT))
-
-#define OMAP_TIMER_TICK_COUNT_REG              (_OMAP_TIMER_TICK_COUNT_OFFSET \
-                                                       | (WP_TCVR << WPSHIFT))
-
-#define OMAP_TIMER_TICK_INT_MASK_SET_REG                               \
-               (_OMAP_TIMER_TICK_INT_MASK_SET_OFFSET | (WP_TOCR << WPSHIFT))
-
-#define OMAP_TIMER_TICK_INT_MASK_COUNT_REG                             \
-               (_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT))
-
-struct omap_dm_timer {
-       unsigned long phys_base;
-       int irq;
-#ifdef CONFIG_ARCH_OMAP2PLUS
-       struct clk *iclk, *fclk;
-#endif
-       void __iomem *io_base;
-       unsigned reserved:1;
-       unsigned enabled:1;
-       unsigned posted:1;
-};
-
 static int dm_timer_count;
 
 #ifdef CONFIG_ARCH_OMAP1
@@ -291,11 +170,7 @@ static spinlock_t dm_timer_lock;
  */
 static inline u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, u32 reg)
 {
-       if (timer->posted)
-               while (readl(timer->io_base + (OMAP_TIMER_WRITE_PEND_REG & 0xff))
-                               & (reg >> WPSHIFT))
-                       cpu_relax();
-       return readl(timer->io_base + (reg & 0xff));
+       return __omap_dm_timer_read(timer->io_base, reg, timer->posted);
 }
 
 /*
@@ -307,11 +182,7 @@ static inline u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, u32 reg)
 static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
                                                u32 value)
 {
-       if (timer->posted)
-               while (readl(timer->io_base + (OMAP_TIMER_WRITE_PEND_REG & 0xff))
-                               & (reg >> WPSHIFT))
-                       cpu_relax();
-       writel(value, timer->io_base + (reg & 0xff));
+       __omap_dm_timer_write(timer->io_base, reg, value, timer->posted);
 }
 
 static void omap_dm_timer_wait_for_reset(struct omap_dm_timer *timer)
@@ -330,7 +201,7 @@ static void omap_dm_timer_wait_for_reset(struct omap_dm_timer *timer)
 
 static void omap_dm_timer_reset(struct omap_dm_timer *timer)
 {
-       u32 l;
+       int autoidle = 0, wakeup = 0;
 
        if (!cpu_class_is_omap2() || timer != &dm_timers[0]) {
                omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
@@ -338,28 +209,21 @@ static void omap_dm_timer_reset(struct omap_dm_timer *timer)
        }
        omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ);
 
-       l = omap_dm_timer_read_reg(timer, OMAP_TIMER_OCP_CFG_REG);
-       l |= 0x02 << 3;  /* Set to smart-idle mode */
-       l |= 0x2 << 8;   /* Set clock activity to perserve f-clock on idle */
-
        /* Enable autoidle on OMAP2 / OMAP3 */
        if (cpu_is_omap24xx() || cpu_is_omap34xx())
-               l |= 0x1 << 0;
+               autoidle = 1;
 
        /*
         * Enable wake-up on OMAP2 CPUs.
         */
        if (cpu_class_is_omap2())
-               l |= 1 << 2;
-       omap_dm_timer_write_reg(timer, OMAP_TIMER_OCP_CFG_REG, l);
+               wakeup = 1;
 
-       /* Match hardware reset default of posted mode */
-       omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG,
-                       OMAP_TIMER_CTRL_POSTED);
+       __omap_dm_timer_reset(timer->io_base, autoidle, wakeup);
        timer->posted = 1;
 }
 
-static void omap_dm_timer_prepare(struct omap_dm_timer *timer)
+void omap_dm_timer_prepare(struct omap_dm_timer *timer)
 {
        omap_dm_timer_enable(timer);
        omap_dm_timer_reset(timer);
@@ -531,25 +395,13 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_start);
 
 void omap_dm_timer_stop(struct omap_dm_timer *timer)
 {
-       u32 l;
+       unsigned long rate = 0;
 
-       l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
-       if (l & OMAP_TIMER_CTRL_ST) {
-               l &= ~0x1;
-               omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
 #ifdef CONFIG_ARCH_OMAP2PLUS
-               /* Readback to make sure write has completed */
-               omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
-                /*
-                 * Wait for functional clock period x 3.5 to make sure that
-                 * timer is stopped
-                 */
-               udelay(3500000 / clk_get_rate(timer->fclk) + 1);
+       rate = clk_get_rate(timer->fclk);
 #endif
-       }
-       /* Ack possibly pending interrupt */
-       omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG,
-                       OMAP_TIMER_INT_OVERFLOW);
+
+       __omap_dm_timer_stop(timer->io_base, timer->posted, rate);
 }
 EXPORT_SYMBOL_GPL(omap_dm_timer_stop);
 
@@ -572,22 +424,11 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_set_source);
 
 int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
 {
-       int ret = -EINVAL;
-
        if (source < 0 || source >= 3)
                return -EINVAL;
 
-       clk_disable(timer->fclk);
-       ret = clk_set_parent(timer->fclk, dm_source_clocks[source]);
-       clk_enable(timer->fclk);
-
-       /*
-        * When the functional clock disappears, too quick writes seem
-        * to cause an abort. XXX Is this still necessary?
-        */
-       __delay(300000);
-
-       return ret;
+       return __omap_dm_timer_set_source(timer->fclk,
+                                               dm_source_clocks[source]);
 }
 EXPORT_SYMBOL_GPL(omap_dm_timer_set_source);
 
@@ -625,8 +466,7 @@ void omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload,
        }
        l |= OMAP_TIMER_CTRL_ST;
 
-       omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, load);
-       omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+       __omap_dm_timer_load_start(timer->io_base, l, load, timer->posted);
 }
 EXPORT_SYMBOL_GPL(omap_dm_timer_set_load_start);
 
@@ -679,8 +519,7 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_set_prescaler);
 void omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
                                  unsigned int value)
 {
-       omap_dm_timer_write_reg(timer, OMAP_TIMER_INT_EN_REG, value);
-       omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG, value);
+       __omap_dm_timer_int_enable(timer->io_base, value);
 }
 EXPORT_SYMBOL_GPL(omap_dm_timer_set_int_enable);
 
@@ -696,17 +535,13 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_read_status);
 
 void omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value)
 {
-       omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG, value);
+       __omap_dm_timer_write_status(timer->io_base, value);
 }
 EXPORT_SYMBOL_GPL(omap_dm_timer_write_status);
 
 unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer)
 {
-       unsigned int l;
-
-       l = omap_dm_timer_read_reg(timer, OMAP_TIMER_COUNTER_REG);
-
-       return l;
+       return __omap_dm_timer_read_counter(timer->io_base, timer->posted);
 }
 EXPORT_SYMBOL_GPL(omap_dm_timer_read_counter);
 
@@ -737,7 +572,7 @@ int omap_dm_timers_active(void)
 }
 EXPORT_SYMBOL_GPL(omap_dm_timers_active);
 
-int __init omap_dm_timer_init(void)
+static int __init omap_dm_timer_init(void)
 {
        struct omap_dm_timer *timer;
        int i, map_size = SZ_8K;        /* Module 4KB + L4 4KB except on omap1 */
@@ -790,8 +625,16 @@ int __init omap_dm_timer_init(void)
                        sprintf(clk_name, "gpt%d_fck", i + 1);
                        timer->fclk = clk_get(NULL, clk_name);
                }
+
+               /* One or two timers may be set up early for sys_timer */
+               if (sys_timer_reserved & (1  << i)) {
+                       timer->reserved = 1;
+                       timer->posted = 1;
+               }
 #endif
        }
 
        return 0;
 }
+
+arch_initcall(omap_dm_timer_init);
index 006e599..f57e064 100644 (file)
@@ -152,7 +152,7 @@ struct dpll_data {
        u16                     max_multiplier;
        u8                      last_rounded_n;
        u8                      min_divider;
-       u                     max_divider;
+       u16                     max_divider;
        u8                      modes;
 #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
        void __iomem            *autoidle_reg;
index 5288130..4564cc6 100644 (file)
 struct sys_timer;
 
 extern void omap_map_common_io(void);
-extern struct sys_timer omap_timer;
+extern struct sys_timer omap1_timer;
+extern struct sys_timer omap2_timer;
+extern struct sys_timer omap3_timer;
+extern struct sys_timer omap3_secure_timer;
+extern struct sys_timer omap4_timer;
 extern bool omap_32k_timer_init(void);
 extern int __init omap_init_clocksource_32k(void);
 extern unsigned long long notrace omap_32k_sched_clock(void);
index d6c70d2..eb5d16c 100644 (file)
  * 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
 #ifndef __ASM_ARCH_DMTIMER_H
 #define __ASM_ARCH_DMTIMER_H
 
  */
 #define OMAP_TIMER_IP_VERSION_1                        0x1
 struct omap_dm_timer;
-extern struct omap_dm_timer *gptimer_wakeup;
-extern struct sys_timer omap_timer;
 struct clk;
 
-int omap_dm_timer_init(void);
-
 struct omap_dm_timer *omap_dm_timer_request(void);
 struct omap_dm_timer *omap_dm_timer_request_specific(int timer_id);
 void omap_dm_timer_free(struct omap_dm_timer *timer);
@@ -93,5 +93,248 @@ void omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value
 
 int omap_dm_timers_active(void);
 
+/*
+ * Do not use the defines below, they are not needed. They should be only
+ * used by dmtimer.c and sys_timer related code.
+ */
+
+/* register offsets */
+#define _OMAP_TIMER_ID_OFFSET          0x00
+#define _OMAP_TIMER_OCP_CFG_OFFSET     0x10
+#define _OMAP_TIMER_SYS_STAT_OFFSET    0x14
+#define _OMAP_TIMER_STAT_OFFSET                0x18
+#define _OMAP_TIMER_INT_EN_OFFSET      0x1c
+#define _OMAP_TIMER_WAKEUP_EN_OFFSET   0x20
+#define _OMAP_TIMER_CTRL_OFFSET                0x24
+#define                OMAP_TIMER_CTRL_GPOCFG          (1 << 14)
+#define                OMAP_TIMER_CTRL_CAPTMODE        (1 << 13)
+#define                OMAP_TIMER_CTRL_PT              (1 << 12)
+#define                OMAP_TIMER_CTRL_TCM_LOWTOHIGH   (0x1 << 8)
+#define                OMAP_TIMER_CTRL_TCM_HIGHTOLOW   (0x2 << 8)
+#define                OMAP_TIMER_CTRL_TCM_BOTHEDGES   (0x3 << 8)
+#define                OMAP_TIMER_CTRL_SCPWM           (1 << 7)
+#define                OMAP_TIMER_CTRL_CE              (1 << 6) /* compare enable */
+#define                OMAP_TIMER_CTRL_PRE             (1 << 5) /* prescaler enable */
+#define                OMAP_TIMER_CTRL_PTV_SHIFT       2 /* prescaler value shift */
+#define                OMAP_TIMER_CTRL_POSTED          (1 << 2)
+#define                OMAP_TIMER_CTRL_AR              (1 << 1) /* auto-reload enable */
+#define                OMAP_TIMER_CTRL_ST              (1 << 0) /* start timer */
+#define _OMAP_TIMER_COUNTER_OFFSET     0x28
+#define _OMAP_TIMER_LOAD_OFFSET                0x2c
+#define _OMAP_TIMER_TRIGGER_OFFSET     0x30
+#define _OMAP_TIMER_WRITE_PEND_OFFSET  0x34
+#define                WP_NONE                 0       /* no write pending bit */
+#define                WP_TCLR                 (1 << 0)
+#define                WP_TCRR                 (1 << 1)
+#define                WP_TLDR                 (1 << 2)
+#define                WP_TTGR                 (1 << 3)
+#define                WP_TMAR                 (1 << 4)
+#define                WP_TPIR                 (1 << 5)
+#define                WP_TNIR                 (1 << 6)
+#define                WP_TCVR                 (1 << 7)
+#define                WP_TOCR                 (1 << 8)
+#define                WP_TOWR                 (1 << 9)
+#define _OMAP_TIMER_MATCH_OFFSET       0x38
+#define _OMAP_TIMER_CAPTURE_OFFSET     0x3c
+#define _OMAP_TIMER_IF_CTRL_OFFSET     0x40
+#define _OMAP_TIMER_CAPTURE2_OFFSET            0x44    /* TCAR2, 34xx only */
+#define _OMAP_TIMER_TICK_POS_OFFSET            0x48    /* TPIR, 34xx only */
+#define _OMAP_TIMER_TICK_NEG_OFFSET            0x4c    /* TNIR, 34xx only */
+#define _OMAP_TIMER_TICK_COUNT_OFFSET          0x50    /* TCVR, 34xx only */
+#define _OMAP_TIMER_TICK_INT_MASK_SET_OFFSET   0x54    /* TOCR, 34xx only */
+#define _OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET 0x58    /* TOWR, 34xx only */
+
+/* register offsets with the write pending bit encoded */
+#define        WPSHIFT                                 16
+
+#define OMAP_TIMER_ID_REG                      (_OMAP_TIMER_ID_OFFSET \
+                                                       | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_OCP_CFG_REG                 (_OMAP_TIMER_OCP_CFG_OFFSET \
+                                                       | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_SYS_STAT_REG                        (_OMAP_TIMER_SYS_STAT_OFFSET \
+                                                       | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_STAT_REG                    (_OMAP_TIMER_STAT_OFFSET \
+                                                       | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_INT_EN_REG                  (_OMAP_TIMER_INT_EN_OFFSET \
+                                                       | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_WAKEUP_EN_REG               (_OMAP_TIMER_WAKEUP_EN_OFFSET \
+                                                       | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_CTRL_REG                    (_OMAP_TIMER_CTRL_OFFSET \
+                                                       | (WP_TCLR << WPSHIFT))
+
+#define OMAP_TIMER_COUNTER_REG                 (_OMAP_TIMER_COUNTER_OFFSET \
+                                                       | (WP_TCRR << WPSHIFT))
+
+#define OMAP_TIMER_LOAD_REG                    (_OMAP_TIMER_LOAD_OFFSET \
+                                                       | (WP_TLDR << WPSHIFT))
+
+#define OMAP_TIMER_TRIGGER_REG                 (_OMAP_TIMER_TRIGGER_OFFSET \
+                                                       | (WP_TTGR << WPSHIFT))
+
+#define OMAP_TIMER_WRITE_PEND_REG              (_OMAP_TIMER_WRITE_PEND_OFFSET \
+                                                       | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_MATCH_REG                   (_OMAP_TIMER_MATCH_OFFSET \
+                                                       | (WP_TMAR << WPSHIFT))
+
+#define OMAP_TIMER_CAPTURE_REG                 (_OMAP_TIMER_CAPTURE_OFFSET \
+                                                       | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_IF_CTRL_REG                 (_OMAP_TIMER_IF_CTRL_OFFSET \
+                                                       | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_CAPTURE2_REG                        (_OMAP_TIMER_CAPTURE2_OFFSET \
+                                                       | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_TICK_POS_REG                        (_OMAP_TIMER_TICK_POS_OFFSET \
+                                                       | (WP_TPIR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_NEG_REG                        (_OMAP_TIMER_TICK_NEG_OFFSET \
+                                                       | (WP_TNIR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_COUNT_REG              (_OMAP_TIMER_TICK_COUNT_OFFSET \
+                                                       | (WP_TCVR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_INT_MASK_SET_REG                               \
+               (_OMAP_TIMER_TICK_INT_MASK_SET_OFFSET | (WP_TOCR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_INT_MASK_COUNT_REG                             \
+               (_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT))
+
+struct omap_dm_timer {
+       unsigned long phys_base;
+       int irq;
+#ifdef CONFIG_ARCH_OMAP2PLUS
+       struct clk *iclk, *fclk;
+#endif
+       void __iomem *io_base;
+       unsigned long rate;
+       unsigned reserved:1;
+       unsigned enabled:1;
+       unsigned posted:1;
+};
+
+extern u32 sys_timer_reserved;
+void omap_dm_timer_prepare(struct omap_dm_timer *timer);
+
+static inline u32 __omap_dm_timer_read(void __iomem *base, u32 reg,
+                                               int posted)
+{
+       if (posted)
+               while (__raw_readl(base + (OMAP_TIMER_WRITE_PEND_REG & 0xff))
+                               & (reg >> WPSHIFT))
+                       cpu_relax();
+
+       return __raw_readl(base + (reg & 0xff));
+}
+
+static inline void __omap_dm_timer_write(void __iomem *base, u32 reg, u32 val,
+                                               int posted)
+{
+       if (posted)
+               while (__raw_readl(base + (OMAP_TIMER_WRITE_PEND_REG & 0xff))
+                               & (reg >> WPSHIFT))
+                       cpu_relax();
+
+       __raw_writel(val, base + (reg & 0xff));
+}
+
+/* Assumes the source clock has been set by caller */
+static inline void __omap_dm_timer_reset(void __iomem *base, int autoidle,
+                                               int wakeup)
+{
+       u32 l;
+
+       l = __omap_dm_timer_read(base, OMAP_TIMER_OCP_CFG_REG, 0);
+       l |= 0x02 << 3;  /* Set to smart-idle mode */
+       l |= 0x2 << 8;   /* Set clock activity to perserve f-clock on idle */
+
+       if (autoidle)
+               l |= 0x1 << 0;
+
+       if (wakeup)
+               l |= 1 << 2;
+
+       __omap_dm_timer_write(base, OMAP_TIMER_OCP_CFG_REG, l, 0);
+
+       /* Match hardware reset default of posted mode */
+       __omap_dm_timer_write(base, OMAP_TIMER_IF_CTRL_REG,
+                                       OMAP_TIMER_CTRL_POSTED, 0);
+}
+
+static inline int __omap_dm_timer_set_source(struct clk *timer_fck,
+                                               struct clk *parent)
+{
+       int ret;
+
+       clk_disable(timer_fck);
+       ret = clk_set_parent(timer_fck, parent);
+       clk_enable(timer_fck);
+
+       /*
+        * When the functional clock disappears, too quick writes seem
+        * to cause an abort. XXX Is this still necessary?
+        */
+       __delay(300000);
+
+       return ret;
+}
+
+static inline void __omap_dm_timer_stop(void __iomem *base, int posted,
+                                               unsigned long rate)
+{
+       u32 l;
+
+       l = __omap_dm_timer_read(base, OMAP_TIMER_CTRL_REG, posted);
+       if (l & OMAP_TIMER_CTRL_ST) {
+               l &= ~0x1;
+               __omap_dm_timer_write(base, OMAP_TIMER_CTRL_REG, l, posted);
+#ifdef CONFIG_ARCH_OMAP2PLUS
+               /* Readback to make sure write has completed */
+               __omap_dm_timer_read(base, OMAP_TIMER_CTRL_REG, posted);
+               /*
+                * Wait for functional clock period x 3.5 to make sure that
+                * timer is stopped
+                */
+               udelay(3500000 / rate + 1);
+#endif
+       }
+
+       /* Ack possibly pending interrupt */
+       __omap_dm_timer_write(base, OMAP_TIMER_STAT_REG,
+                                       OMAP_TIMER_INT_OVERFLOW, 0);
+}
+
+static inline void __omap_dm_timer_load_start(void __iomem *base, u32 ctrl,
+                                               unsigned int load, int posted)
+{
+       __omap_dm_timer_write(base, OMAP_TIMER_COUNTER_REG, load, posted);
+       __omap_dm_timer_write(base, OMAP_TIMER_CTRL_REG, ctrl, posted);
+}
+
+static inline void __omap_dm_timer_int_enable(void __iomem *base,
+                                               unsigned int value)
+{
+       __omap_dm_timer_write(base, OMAP_TIMER_INT_EN_REG, value, 0);
+       __omap_dm_timer_write(base, OMAP_TIMER_WAKEUP_EN_REG, value, 0);
+}
+
+static inline unsigned int __omap_dm_timer_read_counter(void __iomem *base,
+                                                       int posted)
+{
+       return __omap_dm_timer_read(base, OMAP_TIMER_COUNTER_REG, posted);
+}
+
+static inline void __omap_dm_timer_write_status(void __iomem *base,
+                                               unsigned int value)
+{
+       __omap_dm_timer_write(base, OMAP_TIMER_STAT_REG, value, 0);
+}
 
 #endif /* __ASM_ARCH_DMTIMER_H */
index 5a25098..c884320 100644 (file)
 #define INTCPS_NR_IRQS         96
 
 #ifndef __ASSEMBLY__
-extern void omap_init_irq(void);
+extern void __iomem *omap_irq_base;
+void omap1_init_irq(void);
+void omap2_init_irq(void);
+void omap3_init_irq(void);
+void ti816x_init_irq(void);
 extern int omap_irq_pending(void);
 void omap_intc_save_context(void);
 void omap_intc_restore_context(void);
index f8f690a..9882c65 100644 (file)
@@ -24,7 +24,6 @@
 #ifndef __ASM_ARCH_OMAP_MCBSP_H
 #define __ASM_ARCH_OMAP_MCBSP_H
 
-#include <linux/completion.h>
 #include <linux/spinlock.h>
 
 #include <mach/hardware.h>
@@ -34,7 +33,7 @@
 #define OMAP_MCBSP_PLATFORM_DEVICE(port_nr)            \
 static struct platform_device omap_mcbsp##port_nr = {  \
        .name   = "omap-mcbsp-dai",                     \
-       .id     = OMAP_MCBSP##port_nr,                  \
+       .id     = port_nr - 1,                  \
 }
 
 #define MCBSP_CONFIG_TYPE2     0x2
@@ -332,18 +331,6 @@ struct omap_mcbsp_reg_cfg {
        u16 rccr;
 };
 
-typedef enum {
-       OMAP_MCBSP1 = 0,
-       OMAP_MCBSP2,
-       OMAP_MCBSP3,
-       OMAP_MCBSP4,
-       OMAP_MCBSP5
-} omap_mcbsp_id;
-
-typedef int __bitwise omap_mcbsp_io_type_t;
-#define OMAP_MCBSP_IRQ_IO ((__force omap_mcbsp_io_type_t) 1)
-#define OMAP_MCBSP_POLL_IO ((__force omap_mcbsp_io_type_t) 2)
-
 typedef enum {
        OMAP_MCBSP_WORD_8 = 0,
        OMAP_MCBSP_WORD_12,
@@ -353,38 +340,6 @@ typedef enum {
        OMAP_MCBSP_WORD_32,
 } omap_mcbsp_word_length;
 
-typedef enum {
-       OMAP_MCBSP_CLK_RISING = 0,
-       OMAP_MCBSP_CLK_FALLING,
-} omap_mcbsp_clk_polarity;
-
-typedef enum {
-       OMAP_MCBSP_FS_ACTIVE_HIGH = 0,
-       OMAP_MCBSP_FS_ACTIVE_LOW,
-} omap_mcbsp_fs_polarity;
-
-typedef enum {
-       OMAP_MCBSP_CLK_STP_MODE_NO_DELAY = 0,
-       OMAP_MCBSP_CLK_STP_MODE_DELAY,
-} omap_mcbsp_clk_stp_mode;
-
-
-/******* SPI specific mode **********/
-typedef enum {
-       OMAP_MCBSP_SPI_MASTER = 0,
-       OMAP_MCBSP_SPI_SLAVE,
-} omap_mcbsp_spi_mode;
-
-struct omap_mcbsp_spi_cfg {
-       omap_mcbsp_spi_mode             spi_mode;
-       omap_mcbsp_clk_polarity         rx_clock_polarity;
-       omap_mcbsp_clk_polarity         tx_clock_polarity;
-       omap_mcbsp_fs_polarity          fsx_polarity;
-       u8                              clk_div;
-       omap_mcbsp_clk_stp_mode         clk_stp_mode;
-       omap_mcbsp_word_length          word_length;
-};
-
 /* Platform specific configuration */
 struct omap_mcbsp_ops {
        void (*request)(unsigned int);
@@ -422,25 +377,13 @@ struct omap_mcbsp {
        void __iomem *io_base;
        u8 id;
        u8 free;
-       omap_mcbsp_word_length rx_word_length;
-       omap_mcbsp_word_length tx_word_length;
 
-       omap_mcbsp_io_type_t io_type; /* IRQ or poll */
-       /* IRQ based TX/RX */
        int rx_irq;
        int tx_irq;
 
        /* DMA stuff */
        u8 dma_rx_sync;
-       short dma_rx_lch;
        u8 dma_tx_sync;
-       short dma_tx_lch;
-
-       /* Completion queues */
-       struct completion tx_irq_completion;
-       struct completion rx_irq_completion;
-       struct completion tx_dma_completion;
-       struct completion rx_dma_completion;
 
        /* Protect the field .free, while checking if the mcbsp is in use */
        spinlock_t lock;
@@ -499,24 +442,9 @@ int omap_mcbsp_request(unsigned int id);
 void omap_mcbsp_free(unsigned int id);
 void omap_mcbsp_start(unsigned int id, int tx, int rx);
 void omap_mcbsp_stop(unsigned int id, int tx, int rx);
-void omap_mcbsp_xmit_word(unsigned int id, u32 word);
-u32 omap_mcbsp_recv_word(unsigned int id);
-
-int omap_mcbsp_xmit_buffer(unsigned int id, dma_addr_t buffer, unsigned int length);
-int omap_mcbsp_recv_buffer(unsigned int id, dma_addr_t buffer, unsigned int length);
-int omap_mcbsp_spi_master_xmit_word_poll(unsigned int id, u32 word);
-int omap_mcbsp_spi_master_recv_word_poll(unsigned int id, u32 * word);
-
 
 /* McBSP functional clock source changing function */
 extern int omap2_mcbsp_set_clks_src(u8 id, u8 fck_src_id);
-/* SPI specific API */
-void omap_mcbsp_set_spi_mode(unsigned int id, const struct omap_mcbsp_spi_cfg * spi_cfg);
-
-/* Polled read/write functions */
-int omap_mcbsp_pollread(unsigned int id, u16 * buf);
-int omap_mcbsp_pollwrite(unsigned int id, u16 buf);
-int omap_mcbsp_set_io_type(unsigned int id, omap_mcbsp_io_type_t io_type);
 
 /* McBSP signal muxing API */
 void omap2_mcbsp1_mux_clkr_src(u8 mux);
index d86d1ec..67fc506 100644 (file)
@@ -19,15 +19,11 @@ enum nand_io {
 };
 
 struct omap_nand_platform_data {
-       unsigned int            options;
        int                     cs;
-       int                     gpio_irq;
        struct mtd_partition    *parts;
        struct gpmc_timings     *gpmc_t;
        int                     nr_parts;
-       int                     (*nand_setup)(void);
-       int                     (*dev_ready)(struct omap_nand_platform_data *);
-       int                     dma_channel;
+       bool                    dev_ready;
        int                     gpmc_irq;
        enum nand_io            xfer_type;
        unsigned long           phys_base;
index c0a7520..0840df8 100644 (file)
  * framework starts.  The "_if_" is to avoid name collisions with the
  * PM idle-loop code.
  */
-#ifdef CONFIG_OMAP_PM_NONE
-#define omap_pm_if_early_init() 0
-#else
 int __init omap_pm_if_early_init(void);
-#endif
 
 /**
  * omap_pm_if_init - OMAP PM init code called after clock fw init
@@ -52,11 +48,7 @@ int __init omap_pm_if_early_init(void);
  * The main initialization code.  OPP tables are passed in here.  The
  * "_if_" is to avoid name collisions with the PM idle-loop code.
  */
-#ifdef CONFIG_OMAP_PM_NONE
-#define omap_pm_if_init() 0
-#else
 int __init omap_pm_if_init(void);
-#endif
 
 /**
  * omap_pm_if_exit - OMAP PM exit code
index 1adea9c..ce06ac6 100644 (file)
@@ -77,7 +77,6 @@ extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2;
 #define HWMOD_IDLEMODE_FORCE           (1 << 0)
 #define HWMOD_IDLEMODE_NO              (1 << 1)
 #define HWMOD_IDLEMODE_SMART           (1 << 2)
-/* Slave idle mode flag only */
 #define HWMOD_IDLEMODE_SMART_WKUP      (1 << 3)
 
 /**
@@ -98,7 +97,7 @@ struct omap_hwmod_mux_info {
 /**
  * struct omap_hwmod_irq_info - MPU IRQs used by the hwmod
  * @name: name of the IRQ channel (module local name)
- * @irq_ch: IRQ channel ID
+ * @irq: IRQ channel ID (should be non-negative except -1 = terminator)
  *
  * @name should be something short, e.g., "tx" or "rx".  It is for use
  * by platform_get_resource_byname().  It is defined locally to the
@@ -106,13 +105,13 @@ struct omap_hwmod_mux_info {
  */
 struct omap_hwmod_irq_info {
        const char      *name;
-       u16             irq;
+       s16             irq;
 };
 
 /**
  * struct omap_hwmod_dma_info - DMA channels used by the hwmod
  * @name: name of the DMA channel (module local name)
- * @dma_req: DMA request ID
+ * @dma_req: DMA request ID (should be non-negative except -1 = terminator)
  *
  * @name should be something short, e.g., "tx" or "rx".  It is for use
  * by platform_get_resource_byname().  It is defined locally to the
@@ -120,7 +119,7 @@ struct omap_hwmod_irq_info {
  */
 struct omap_hwmod_dma_info {
        const char      *name;
-       u16             dma_req;
+       s16             dma_req;
 };
 
 /**
@@ -220,7 +219,6 @@ struct omap_hwmod_addr_space {
  * @clk: interface clock: OMAP clock name
  * @_clk: pointer to the interface struct clk (filled in at runtime)
  * @fw: interface firewall data
- * @addr_cnt: ARRAY_SIZE(@addr)
  * @width: OCP data width
  * @user: initiators using this interface (see OCP_USER_* macros above)
  * @flags: OCP interface flags (see OCPIF_* macros above)
@@ -239,7 +237,6 @@ struct omap_hwmod_ocp_if {
        union {
                struct omap_hwmod_omap2_firewall omap2;
        }                               fw;
-       u8                              addr_cnt;
        u8                              width;
        u8                              user;
        u8                              flags;
@@ -258,6 +255,7 @@ struct omap_hwmod_ocp_if {
 #define MSTANDBY_FORCE         (HWMOD_IDLEMODE_FORCE << MASTER_STANDBY_SHIFT)
 #define MSTANDBY_NO            (HWMOD_IDLEMODE_NO << MASTER_STANDBY_SHIFT)
 #define MSTANDBY_SMART         (HWMOD_IDLEMODE_SMART << MASTER_STANDBY_SHIFT)
+#define MSTANDBY_SMART_WKUP    (HWMOD_IDLEMODE_SMART_WKUP << MASTER_STANDBY_SHIFT)
 
 /* omap_hwmod_sysconfig.sysc_flags capability flags */
 #define SYSC_HAS_AUTOIDLE      (1 << 0)
@@ -468,8 +466,8 @@ struct omap_hwmod_class {
  * @name: name of the hwmod
  * @class: struct omap_hwmod_class * to the class of this hwmod
  * @od: struct omap_device currently associated with this hwmod (internal use)
- * @mpu_irqs: ptr to an array of MPU IRQs (see also mpu_irqs_cnt)
- * @sdma_reqs: ptr to an array of System DMA request IDs (see sdma_reqs_cnt)
+ * @mpu_irqs: ptr to an array of MPU IRQs
+ * @sdma_reqs: ptr to an array of System DMA request IDs
  * @prcm: PRCM data pertaining to this hwmod
  * @main_clk: main clock: OMAP clock name
  * @_clk: pointer to the main struct clk (filled in at runtime)
@@ -482,8 +480,6 @@ struct omap_hwmod_class {
  * @_sysc_cache: internal-use hwmod flags
  * @_mpu_rt_va: cached register target start address (internal use)
  * @_mpu_port_index: cached MPU register target slave ID (internal use)
- * @mpu_irqs_cnt: number of @mpu_irqs
- * @sdma_reqs_cnt: number of @sdma_reqs
  * @opt_clks_cnt: number of @opt_clks
  * @master_cnt: number of @master entries
  * @slaves_cnt: number of @slave entries
@@ -531,8 +527,6 @@ struct omap_hwmod {
        u16                             flags;
        u8                              _mpu_port_index;
        u8                              response_lat;
-       u8                              mpu_irqs_cnt;
-       u8                              sdma_reqs_cnt;
        u8                              rst_lines_cnt;
        u8                              opt_clks_cnt;
        u8                              masters_cnt;
index 5587acf..3c1fbdc 100644 (file)
@@ -16,8 +16,6 @@
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
-#include <linux/wait.h>
-#include <linux/completion.h>
 #include <linux/interrupt.h>
 #include <linux/err.h>
 #include <linux/clk.h>
@@ -25,7 +23,6 @@
 #include <linux/io.h>
 #include <linux/slab.h>
 
-#include <plat/dma.h>
 #include <plat/mcbsp.h>
 #include <plat/omap_device.h>
 #include <linux/pm_runtime.h>
@@ -136,8 +133,6 @@ static irqreturn_t omap_mcbsp_tx_irq_handler(int irq, void *dev_id)
                        irqst_spcr2);
                /* Writing zero to XSYNC_ERR clears the IRQ */
                MCBSP_WRITE(mcbsp_tx, SPCR2, MCBSP_READ_CACHE(mcbsp_tx, SPCR2));
-       } else {
-               complete(&mcbsp_tx->tx_irq_completion);
        }
 
        return IRQ_HANDLED;
@@ -156,41 +151,11 @@ static irqreturn_t omap_mcbsp_rx_irq_handler(int irq, void *dev_id)
                        irqst_spcr1);
                /* Writing zero to RSYNC_ERR clears the IRQ */
                MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1));
-       } else {
-               complete(&mcbsp_rx->rx_irq_completion);
        }
 
        return IRQ_HANDLED;
 }
 
-static void omap_mcbsp_tx_dma_callback(int lch, u16 ch_status, void *data)
-{
-       struct omap_mcbsp *mcbsp_dma_tx = data;
-
-       dev_dbg(mcbsp_dma_tx->dev, "TX DMA callback : 0x%x\n",
-               MCBSP_READ(mcbsp_dma_tx, SPCR2));
-
-       /* We can free the channels */
-       omap_free_dma(mcbsp_dma_tx->dma_tx_lch);
-       mcbsp_dma_tx->dma_tx_lch = -1;
-
-       complete(&mcbsp_dma_tx->tx_dma_completion);
-}
-
-static void omap_mcbsp_rx_dma_callback(int lch, u16 ch_status, void *data)
-{
-       struct omap_mcbsp *mcbsp_dma_rx = data;
-
-       dev_dbg(mcbsp_dma_rx->dev, "RX DMA callback : 0x%x\n",
-               MCBSP_READ(mcbsp_dma_rx, SPCR2));
-
-       /* We can free the channels */
-       omap_free_dma(mcbsp_dma_rx->dma_rx_lch);
-       mcbsp_dma_rx->dma_rx_lch = -1;
-
-       complete(&mcbsp_dma_rx->rx_dma_completion);
-}
-
 /*
  * omap_mcbsp_config simply write a config to the
  * appropriate McBSP.
@@ -758,37 +723,6 @@ static inline void omap_st_start(struct omap_mcbsp *mcbsp) {}
 static inline void omap_st_stop(struct omap_mcbsp *mcbsp) {}
 #endif
 
-/*
- * We can choose between IRQ based or polled IO.
- * This needs to be called before omap_mcbsp_request().
- */
-int omap_mcbsp_set_io_type(unsigned int id, omap_mcbsp_io_type_t io_type)
-{
-       struct omap_mcbsp *mcbsp;
-
-       if (!omap_mcbsp_check_valid_id(id)) {
-               printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
-               return -ENODEV;
-       }
-       mcbsp = id_to_mcbsp_ptr(id);
-
-       spin_lock(&mcbsp->lock);
-
-       if (!mcbsp->free) {
-               dev_err(mcbsp->dev, "McBSP%d is currently in use\n",
-                       mcbsp->id);
-               spin_unlock(&mcbsp->lock);
-               return -EINVAL;
-       }
-
-       mcbsp->io_type = io_type;
-
-       spin_unlock(&mcbsp->lock);
-
-       return 0;
-}
-EXPORT_SYMBOL(omap_mcbsp_set_io_type);
-
 int omap_mcbsp_request(unsigned int id)
 {
        struct omap_mcbsp *mcbsp;
@@ -833,29 +767,24 @@ int omap_mcbsp_request(unsigned int id)
        MCBSP_WRITE(mcbsp, SPCR1, 0);
        MCBSP_WRITE(mcbsp, SPCR2, 0);
 
-       if (mcbsp->io_type == OMAP_MCBSP_IRQ_IO) {
-               /* We need to get IRQs here */
-               init_completion(&mcbsp->tx_irq_completion);
-               err = request_irq(mcbsp->tx_irq, omap_mcbsp_tx_irq_handler,
-                                       0, "McBSP", (void *)mcbsp);
+       err = request_irq(mcbsp->tx_irq, omap_mcbsp_tx_irq_handler,
+                               0, "McBSP", (void *)mcbsp);
+       if (err != 0) {
+               dev_err(mcbsp->dev, "Unable to request TX IRQ %d "
+                               "for McBSP%d\n", mcbsp->tx_irq,
+                               mcbsp->id);
+               goto err_clk_disable;
+       }
+
+       if (mcbsp->rx_irq) {
+               err = request_irq(mcbsp->rx_irq,
+                               omap_mcbsp_rx_irq_handler,
+                               0, "McBSP", (void *)mcbsp);
                if (err != 0) {
-                       dev_err(mcbsp->dev, "Unable to request TX IRQ %d "
-                                       "for McBSP%d\n", mcbsp->tx_irq,
+                       dev_err(mcbsp->dev, "Unable to request RX IRQ %d "
+                                       "for McBSP%d\n", mcbsp->rx_irq,
                                        mcbsp->id);
-                       goto err_clk_disable;
-               }
-
-               if (mcbsp->rx_irq) {
-                       init_completion(&mcbsp->rx_irq_completion);
-                       err = request_irq(mcbsp->rx_irq,
-                                       omap_mcbsp_rx_irq_handler,
-                                       0, "McBSP", (void *)mcbsp);
-                       if (err != 0) {
-                               dev_err(mcbsp->dev, "Unable to request RX IRQ %d "
-                                               "for McBSP%d\n", mcbsp->rx_irq,
-                                               mcbsp->id);
-                               goto err_free_irq;
-                       }
+                       goto err_free_irq;
                }
        }
 
@@ -901,12 +830,9 @@ void omap_mcbsp_free(unsigned int id)
 
        pm_runtime_put_sync(mcbsp->dev);
 
-       if (mcbsp->io_type == OMAP_MCBSP_IRQ_IO) {
-               /* Free IRQs */
-               if (mcbsp->rx_irq)
-                       free_irq(mcbsp->rx_irq, (void *)mcbsp);
-               free_irq(mcbsp->tx_irq, (void *)mcbsp);
-       }
+       if (mcbsp->rx_irq)
+               free_irq(mcbsp->rx_irq, (void *)mcbsp);
+       free_irq(mcbsp->tx_irq, (void *)mcbsp);
 
        reg_cache = mcbsp->reg_cache;
 
@@ -943,9 +869,6 @@ void omap_mcbsp_start(unsigned int id, int tx, int rx)
        if (cpu_is_omap34xx())
                omap_st_start(mcbsp);
 
-       mcbsp->rx_word_length = (MCBSP_READ_CACHE(mcbsp, RCR1) >> 5) & 0x7;
-       mcbsp->tx_word_length = (MCBSP_READ_CACHE(mcbsp, XCR1) >> 5) & 0x7;
-
        /* Only enable SRG, if McBSP is master */
        w = MCBSP_READ_CACHE(mcbsp, PCR0);
        if (w & (FSXM | FSRM | CLKXM | CLKRM))
@@ -1043,485 +966,6 @@ void omap_mcbsp_stop(unsigned int id, int tx, int rx)
 }
 EXPORT_SYMBOL(omap_mcbsp_stop);
 
-/* polled mcbsp i/o operations */
-int omap_mcbsp_pollwrite(unsigned int id, u16 buf)
-{
-       struct omap_mcbsp *mcbsp;
-
-       if (!omap_mcbsp_check_valid_id(id)) {
-               printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
-               return -ENODEV;
-       }
-
-       mcbsp = id_to_mcbsp_ptr(id);
-
-       MCBSP_WRITE(mcbsp, DXR1, buf);
-       /* if frame sync error - clear the error */
-       if (MCBSP_READ(mcbsp, SPCR2) & XSYNC_ERR) {
-               /* clear error */
-               MCBSP_WRITE(mcbsp, SPCR2, MCBSP_READ_CACHE(mcbsp, SPCR2));
-               /* resend */
-               return -1;
-       } else {
-               /* wait for transmit confirmation */
-               int attemps = 0;
-               while (!(MCBSP_READ(mcbsp, SPCR2) & XRDY)) {
-                       if (attemps++ > 1000) {
-                               MCBSP_WRITE(mcbsp, SPCR2,
-                                               MCBSP_READ_CACHE(mcbsp, SPCR2) &
-                                               (~XRST));
-                               udelay(10);
-                               MCBSP_WRITE(mcbsp, SPCR2,
-                                               MCBSP_READ_CACHE(mcbsp, SPCR2) |
-                                               (XRST));
-                               udelay(10);
-                               dev_err(mcbsp->dev, "Could not write to"
-                                       " McBSP%d Register\n", mcbsp->id);
-                               return -2;
-                       }
-               }
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(omap_mcbsp_pollwrite);
-
-int omap_mcbsp_pollread(unsigned int id, u16 *buf)
-{
-       struct omap_mcbsp *mcbsp;
-
-       if (!omap_mcbsp_check_valid_id(id)) {
-               printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
-               return -ENODEV;
-       }
-       mcbsp = id_to_mcbsp_ptr(id);
-
-       /* if frame sync error - clear the error */
-       if (MCBSP_READ(mcbsp, SPCR1) & RSYNC_ERR) {
-               /* clear error */
-               MCBSP_WRITE(mcbsp, SPCR1, MCBSP_READ_CACHE(mcbsp, SPCR1));
-               /* resend */
-               return -1;
-       } else {
-               /* wait for receive confirmation */
-               int attemps = 0;
-               while (!(MCBSP_READ(mcbsp, SPCR1) & RRDY)) {
-                       if (attemps++ > 1000) {
-                               MCBSP_WRITE(mcbsp, SPCR1,
-                                               MCBSP_READ_CACHE(mcbsp, SPCR1) &
-                                               (~RRST));
-                               udelay(10);
-                               MCBSP_WRITE(mcbsp, SPCR1,
-                                               MCBSP_READ_CACHE(mcbsp, SPCR1) |
-                                               (RRST));
-                               udelay(10);
-                               dev_err(mcbsp->dev, "Could not read from"
-                                       " McBSP%d Register\n", mcbsp->id);
-                               return -2;
-                       }
-               }
-       }
-       *buf = MCBSP_READ(mcbsp, DRR1);
-
-       return 0;
-}
-EXPORT_SYMBOL(omap_mcbsp_pollread);
-
-/*
- * IRQ based word transmission.
- */
-void omap_mcbsp_xmit_word(unsigned int id, u32 word)
-{
-       struct omap_mcbsp *mcbsp;
-       omap_mcbsp_word_length word_length;
-
-       if (!omap_mcbsp_check_valid_id(id)) {
-               printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
-               return;
-       }
-
-       mcbsp = id_to_mcbsp_ptr(id);
-       word_length = mcbsp->tx_word_length;
-
-       wait_for_completion(&mcbsp->tx_irq_completion);
-
-       if (word_length > OMAP_MCBSP_WORD_16)
-               MCBSP_WRITE(mcbsp, DXR2, word >> 16);
-       MCBSP_WRITE(mcbsp, DXR1, word & 0xffff);
-}
-EXPORT_SYMBOL(omap_mcbsp_xmit_word);
-
-u32 omap_mcbsp_recv_word(unsigned int id)
-{
-       struct omap_mcbsp *mcbsp;
-       u16 word_lsb, word_msb = 0;
-       omap_mcbsp_word_length word_length;
-
-       if (!omap_mcbsp_check_valid_id(id)) {
-               printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
-               return -ENODEV;
-       }
-       mcbsp = id_to_mcbsp_ptr(id);
-
-       word_length = mcbsp->rx_word_length;
-
-       wait_for_completion(&mcbsp->rx_irq_completion);
-
-       if (word_length > OMAP_MCBSP_WORD_16)
-               word_msb = MCBSP_READ(mcbsp, DRR2);
-       word_lsb = MCBSP_READ(mcbsp, DRR1);
-
-       return (word_lsb | (word_msb << 16));
-}
-EXPORT_SYMBOL(omap_mcbsp_recv_word);
-
-int omap_mcbsp_spi_master_xmit_word_poll(unsigned int id, u32 word)
-{
-       struct omap_mcbsp *mcbsp;
-       omap_mcbsp_word_length tx_word_length;
-       omap_mcbsp_word_length rx_word_length;
-       u16 spcr2, spcr1, attempts = 0, word_lsb, word_msb = 0;
-
-       if (!omap_mcbsp_check_valid_id(id)) {
-               printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
-               return -ENODEV;
-       }
-       mcbsp = id_to_mcbsp_ptr(id);
-       tx_word_length = mcbsp->tx_word_length;
-       rx_word_length = mcbsp->rx_word_length;
-
-       if (tx_word_length != rx_word_length)
-               return -EINVAL;
-
-       /* First we wait for the transmitter to be ready */
-       spcr2 = MCBSP_READ(mcbsp, SPCR2);
-       while (!(spcr2 & XRDY)) {
-               spcr2 = MCBSP_READ(mcbsp, SPCR2);
-               if (attempts++ > 1000) {
-                       /* We must reset the transmitter */
-                       MCBSP_WRITE(mcbsp, SPCR2,
-                                   MCBSP_READ_CACHE(mcbsp, SPCR2) & (~XRST));
-                       udelay(10);
-                       MCBSP_WRITE(mcbsp, SPCR2,
-                                   MCBSP_READ_CACHE(mcbsp, SPCR2) | XRST);
-                       udelay(10);
-                       dev_err(mcbsp->dev, "McBSP%d transmitter not "
-                               "ready\n", mcbsp->id);
-                       return -EAGAIN;
-               }
-       }
-
-       /* Now we can push the data */
-       if (tx_word_length > OMAP_MCBSP_WORD_16)
-               MCBSP_WRITE(mcbsp, DXR2, word >> 16);
-       MCBSP_WRITE(mcbsp, DXR1, word & 0xffff);
-
-       /* We wait for the receiver to be ready */
-       spcr1 = MCBSP_READ(mcbsp, SPCR1);
-       while (!(spcr1 & RRDY)) {
-               spcr1 = MCBSP_READ(mcbsp, SPCR1);
-               if (attempts++ > 1000) {
-                       /* We must reset the receiver */
-                       MCBSP_WRITE(mcbsp, SPCR1,
-                                   MCBSP_READ_CACHE(mcbsp, SPCR1) & (~RRST));
-                       udelay(10);
-                       MCBSP_WRITE(mcbsp, SPCR1,
-                                   MCBSP_READ_CACHE(mcbsp, SPCR1) | RRST);
-                       udelay(10);
-                       dev_err(mcbsp->dev, "McBSP%d receiver not "
-                               "ready\n", mcbsp->id);
-                       return -EAGAIN;
-               }
-       }
-
-       /* Receiver is ready, let's read the dummy data */
-       if (rx_word_length > OMAP_MCBSP_WORD_16)
-               word_msb = MCBSP_READ(mcbsp, DRR2);
-       word_lsb = MCBSP_READ(mcbsp, DRR1);
-
-       return 0;
-}
-EXPORT_SYMBOL(omap_mcbsp_spi_master_xmit_word_poll);
-
-int omap_mcbsp_spi_master_recv_word_poll(unsigned int id, u32 *word)
-{
-       struct omap_mcbsp *mcbsp;
-       u32 clock_word = 0;
-       omap_mcbsp_word_length tx_word_length;
-       omap_mcbsp_word_length rx_word_length;
-       u16 spcr2, spcr1, attempts = 0, word_lsb, word_msb = 0;
-
-       if (!omap_mcbsp_check_valid_id(id)) {
-               printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
-               return -ENODEV;
-       }
-
-       mcbsp = id_to_mcbsp_ptr(id);
-
-       tx_word_length = mcbsp->tx_word_length;
-       rx_word_length = mcbsp->rx_word_length;
-
-       if (tx_word_length != rx_word_length)
-               return -EINVAL;
-
-       /* First we wait for the transmitter to be ready */
-       spcr2 = MCBSP_READ(mcbsp, SPCR2);
-       while (!(spcr2 & XRDY)) {
-               spcr2 = MCBSP_READ(mcbsp, SPCR2);
-               if (attempts++ > 1000) {
-                       /* We must reset the transmitter */
-                       MCBSP_WRITE(mcbsp, SPCR2,
-                                   MCBSP_READ_CACHE(mcbsp, SPCR2) & (~XRST));
-                       udelay(10);
-                       MCBSP_WRITE(mcbsp, SPCR2,
-                                   MCBSP_READ_CACHE(mcbsp, SPCR2) | XRST);
-                       udelay(10);
-                       dev_err(mcbsp->dev, "McBSP%d transmitter not "
-                               "ready\n", mcbsp->id);
-                       return -EAGAIN;
-               }
-       }
-
-       /* We first need to enable the bus clock */
-       if (tx_word_length > OMAP_MCBSP_WORD_16)
-               MCBSP_WRITE(mcbsp, DXR2, clock_word >> 16);
-       MCBSP_WRITE(mcbsp, DXR1, clock_word & 0xffff);
-
-       /* We wait for the receiver to be ready */
-       spcr1 = MCBSP_READ(mcbsp, SPCR1);
-       while (!(spcr1 & RRDY)) {
-               spcr1 = MCBSP_READ(mcbsp, SPCR1);
-               if (attempts++ > 1000) {
-                       /* We must reset the receiver */
-                       MCBSP_WRITE(mcbsp, SPCR1,
-                                   MCBSP_READ_CACHE(mcbsp, SPCR1) & (~RRST));
-                       udelay(10);
-                       MCBSP_WRITE(mcbsp, SPCR1,
-                                   MCBSP_READ_CACHE(mcbsp, SPCR1) | RRST);
-                       udelay(10);
-                       dev_err(mcbsp->dev, "McBSP%d receiver not "
-                               "ready\n", mcbsp->id);
-                       return -EAGAIN;
-               }
-       }
-
-       /* Receiver is ready, there is something for us */
-       if (rx_word_length > OMAP_MCBSP_WORD_16)
-               word_msb = MCBSP_READ(mcbsp, DRR2);
-       word_lsb = MCBSP_READ(mcbsp, DRR1);
-
-       word[0] = (word_lsb | (word_msb << 16));
-
-       return 0;
-}
-EXPORT_SYMBOL(omap_mcbsp_spi_master_recv_word_poll);
-
-/*
- * Simple DMA based buffer rx/tx routines.
- * Nothing fancy, just a single buffer tx/rx through DMA.
- * The DMA resources are released once the transfer is done.
- * For anything fancier, you should use your own customized DMA
- * routines and callbacks.
- */
-int omap_mcbsp_xmit_buffer(unsigned int id, dma_addr_t buffer,
-                               unsigned int length)
-{
-       struct omap_mcbsp *mcbsp;
-       int dma_tx_ch;
-       int src_port = 0;
-       int dest_port = 0;
-       int sync_dev = 0;
-
-       if (!omap_mcbsp_check_valid_id(id)) {
-               printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
-               return -ENODEV;
-       }
-       mcbsp = id_to_mcbsp_ptr(id);
-
-       if (omap_request_dma(mcbsp->dma_tx_sync, "McBSP TX",
-                               omap_mcbsp_tx_dma_callback,
-                               mcbsp,
-                               &dma_tx_ch)) {
-               dev_err(mcbsp->dev, " Unable to request DMA channel for "
-                               "McBSP%d TX. Trying IRQ based TX\n",
-                               mcbsp->id);
-               return -EAGAIN;
-       }
-       mcbsp->dma_tx_lch = dma_tx_ch;
-
-       dev_err(mcbsp->dev, "McBSP%d TX DMA on channel %d\n", mcbsp->id,
-               dma_tx_ch);
-
-       init_completion(&mcbsp->tx_dma_completion);
-
-       if (cpu_class_is_omap1()) {
-               src_port = OMAP_DMA_PORT_TIPB;
-               dest_port = OMAP_DMA_PORT_EMIFF;
-       }
-       if (cpu_class_is_omap2())
-               sync_dev = mcbsp->dma_tx_sync;
-
-       omap_set_dma_transfer_params(mcbsp->dma_tx_lch,
-                                    OMAP_DMA_DATA_TYPE_S16,
-                                    length >> 1, 1,
-                                    OMAP_DMA_SYNC_ELEMENT,
-        sync_dev, 0);
-
-       omap_set_dma_dest_params(mcbsp->dma_tx_lch,
-                                src_port,
-                                OMAP_DMA_AMODE_CONSTANT,
-                                mcbsp->phys_base + OMAP_MCBSP_REG_DXR1,
-                                0, 0);
-
-       omap_set_dma_src_params(mcbsp->dma_tx_lch,
-                               dest_port,
-                               OMAP_DMA_AMODE_POST_INC,
-                               buffer,
-                               0, 0);
-
-       omap_start_dma(mcbsp->dma_tx_lch);
-       wait_for_completion(&mcbsp->tx_dma_completion);
-
-       return 0;
-}
-EXPORT_SYMBOL(omap_mcbsp_xmit_buffer);
-
-int omap_mcbsp_recv_buffer(unsigned int id, dma_addr_t buffer,
-                               unsigned int length)
-{
-       struct omap_mcbsp *mcbsp;
-       int dma_rx_ch;
-       int src_port = 0;
-       int dest_port = 0;
-       int sync_dev = 0;
-
-       if (!omap_mcbsp_check_valid_id(id)) {
-               printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
-               return -ENODEV;
-       }
-       mcbsp = id_to_mcbsp_ptr(id);
-
-       if (omap_request_dma(mcbsp->dma_rx_sync, "McBSP RX",
-                               omap_mcbsp_rx_dma_callback,
-                               mcbsp,
-                               &dma_rx_ch)) {
-               dev_err(mcbsp->dev, "Unable to request DMA channel for "
-                               "McBSP%d RX. Trying IRQ based RX\n",
-                               mcbsp->id);
-               return -EAGAIN;
-       }
-       mcbsp->dma_rx_lch = dma_rx_ch;
-
-       dev_err(mcbsp->dev, "McBSP%d RX DMA on channel %d\n", mcbsp->id,
-               dma_rx_ch);
-
-       init_completion(&mcbsp->rx_dma_completion);
-
-       if (cpu_class_is_omap1()) {
-               src_port = OMAP_DMA_PORT_TIPB;
-               dest_port = OMAP_DMA_PORT_EMIFF;
-       }
-       if (cpu_class_is_omap2())
-               sync_dev = mcbsp->dma_rx_sync;
-
-       omap_set_dma_transfer_params(mcbsp->dma_rx_lch,
-                                       OMAP_DMA_DATA_TYPE_S16,
-                                       length >> 1, 1,
-                                       OMAP_DMA_SYNC_ELEMENT,
-                                       sync_dev, 0);
-
-       omap_set_dma_src_params(mcbsp->dma_rx_lch,
-                               src_port,
-                               OMAP_DMA_AMODE_CONSTANT,
-                               mcbsp->phys_base + OMAP_MCBSP_REG_DRR1,
-                               0, 0);
-
-       omap_set_dma_dest_params(mcbsp->dma_rx_lch,
-                                       dest_port,
-                                       OMAP_DMA_AMODE_POST_INC,
-                                       buffer,
-                                       0, 0);
-
-       omap_start_dma(mcbsp->dma_rx_lch);
-       wait_for_completion(&mcbsp->rx_dma_completion);
-
-       return 0;
-}
-EXPORT_SYMBOL(omap_mcbsp_recv_buffer);
-
-/*
- * SPI wrapper.
- * Since SPI setup is much simpler than the generic McBSP one,
- * this wrapper just need an omap_mcbsp_spi_cfg structure as an input.
- * Once this is done, you can call omap_mcbsp_start().
- */
-void omap_mcbsp_set_spi_mode(unsigned int id,
-                               const struct omap_mcbsp_spi_cfg *spi_cfg)
-{
-       struct omap_mcbsp *mcbsp;
-       struct omap_mcbsp_reg_cfg mcbsp_cfg;
-
-       if (!omap_mcbsp_check_valid_id(id)) {
-               printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
-               return;
-       }
-       mcbsp = id_to_mcbsp_ptr(id);
-
-       memset(&mcbsp_cfg, 0, sizeof(struct omap_mcbsp_reg_cfg));
-
-       /* SPI has only one frame */
-       mcbsp_cfg.rcr1 |= (RWDLEN1(spi_cfg->word_length) | RFRLEN1(0));
-       mcbsp_cfg.xcr1 |= (XWDLEN1(spi_cfg->word_length) | XFRLEN1(0));
-
-       /* Clock stop mode */
-       if (spi_cfg->clk_stp_mode == OMAP_MCBSP_CLK_STP_MODE_NO_DELAY)
-               mcbsp_cfg.spcr1 |= (1 << 12);
-       else
-               mcbsp_cfg.spcr1 |= (3 << 11);
-
-       /* Set clock parities */
-       if (spi_cfg->rx_clock_polarity == OMAP_MCBSP_CLK_RISING)
-               mcbsp_cfg.pcr0 |= CLKRP;
-       else
-               mcbsp_cfg.pcr0 &= ~CLKRP;
-
-       if (spi_cfg->tx_clock_polarity == OMAP_MCBSP_CLK_RISING)
-               mcbsp_cfg.pcr0 &= ~CLKXP;
-       else
-               mcbsp_cfg.pcr0 |= CLKXP;
-
-       /* Set SCLKME to 0 and CLKSM to 1 */
-       mcbsp_cfg.pcr0 &= ~SCLKME;
-       mcbsp_cfg.srgr2 |= CLKSM;
-
-       /* Set FSXP */
-       if (spi_cfg->fsx_polarity == OMAP_MCBSP_FS_ACTIVE_HIGH)
-               mcbsp_cfg.pcr0 &= ~FSXP;
-       else
-               mcbsp_cfg.pcr0 |= FSXP;
-
-       if (spi_cfg->spi_mode == OMAP_MCBSP_SPI_MASTER) {
-               mcbsp_cfg.pcr0 |= CLKXM;
-               mcbsp_cfg.srgr1 |= CLKGDV(spi_cfg->clk_div - 1);
-               mcbsp_cfg.pcr0 |= FSXM;
-               mcbsp_cfg.srgr2 &= ~FSGM;
-               mcbsp_cfg.xcr2 |= XDATDLY(1);
-               mcbsp_cfg.rcr2 |= RDATDLY(1);
-       } else {
-               mcbsp_cfg.pcr0 &= ~CLKXM;
-               mcbsp_cfg.srgr1 |= CLKGDV(1);
-               mcbsp_cfg.pcr0 &= ~FSXM;
-               mcbsp_cfg.xcr2 &= ~XDATDLY(3);
-               mcbsp_cfg.rcr2 &= ~RDATDLY(3);
-       }
-
-       mcbsp_cfg.xcr2 &= ~XPHASE;
-       mcbsp_cfg.rcr2 &= ~RPHASE;
-
-       omap_mcbsp_config(id, &mcbsp_cfg);
-}
-EXPORT_SYMBOL(omap_mcbsp_set_spi_mode);
-
 #ifdef CONFIG_ARCH_OMAP3
 #define max_thres(m)                   (mcbsp->pdata->buffer_size)
 #define valid_threshold(m, val)                ((val) <= max_thres(m))
@@ -1833,8 +1277,6 @@ static int __devinit omap_mcbsp_probe(struct platform_device *pdev)
        spin_lock_init(&mcbsp->lock);
        mcbsp->id = id + 1;
        mcbsp->free = true;
-       mcbsp->dma_tx_lch = -1;
-       mcbsp->dma_rx_lch = -1;
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpu");
        if (!res) {
@@ -1860,9 +1302,6 @@ static int __devinit omap_mcbsp_probe(struct platform_device *pdev)
        else
                mcbsp->phys_dma_base = res->start;
 
-       /* Default I/O is IRQ based */
-       mcbsp->io_type = OMAP_MCBSP_IRQ_IO;
-
        mcbsp->tx_irq = platform_get_irq_byname(pdev, "tx");
        mcbsp->rx_irq = platform_get_irq_byname(pdev, "rx");
 
index 49fc0df..c8b9cd1 100644 (file)
@@ -236,11 +236,6 @@ static int _omap_device_deactivate(struct omap_device *od, u8 ignore_lat)
        return 0;
 }
 
-static inline struct omap_device *_find_by_pdev(struct platform_device *pdev)
-{
-       return container_of(pdev, struct omap_device, pdev);
-}
-
 /**
  * _add_optional_clock_clkdev - Add clkdev entry for hwmod optional clocks
  * @od: struct omap_device *od
@@ -316,7 +311,7 @@ u32 omap_device_get_context_loss_count(struct platform_device *pdev)
        struct omap_device *od;
        u32 ret = 0;
 
-       od = _find_by_pdev(pdev);
+       od = to_omap_device(pdev);
 
        if (od->hwmods_cnt)
                ret = omap_hwmod_get_context_loss_count(od->hwmods[0]);
@@ -611,7 +606,7 @@ int omap_device_enable(struct platform_device *pdev)
        int ret;
        struct omap_device *od;
 
-       od = _find_by_pdev(pdev);
+       od = to_omap_device(pdev);
 
        if (od->_state == OMAP_DEVICE_STATE_ENABLED) {
                WARN(1, "omap_device: %s.%d: %s() called from invalid state %d\n",
@@ -650,7 +645,7 @@ int omap_device_idle(struct platform_device *pdev)
        int ret;
        struct omap_device *od;
 
-       od = _find_by_pdev(pdev);
+       od = to_omap_device(pdev);
 
        if (od->_state != OMAP_DEVICE_STATE_ENABLED) {
                WARN(1, "omap_device: %s.%d: %s() called from invalid state %d\n",
@@ -681,7 +676,7 @@ int omap_device_shutdown(struct platform_device *pdev)
        int ret, i;
        struct omap_device *od;
 
-       od = _find_by_pdev(pdev);
+       od = to_omap_device(pdev);
 
        if (od->_state != OMAP_DEVICE_STATE_ENABLED &&
            od->_state != OMAP_DEVICE_STATE_IDLE) {
@@ -722,7 +717,7 @@ int omap_device_align_pm_lat(struct platform_device *pdev,
        int ret = -EINVAL;
        struct omap_device *od;
 
-       od = _find_by_pdev(pdev);
+       od = to_omap_device(pdev);
 
        if (new_wakeup_lat_limit == od->dev_wakeup_lat)
                return 0;
index c151c5f..116edfe 100644 (file)
 #define S5PV210_UFSTAT_RXMASK  (255<<0)
 #define S5PV210_UFSTAT_RXSHIFT (0)
 
+#define NO_NEED_CHECK_CLKSRC   1
+
 #ifndef __ASSEMBLY__
 
 /* struct s3c24xx_uart_clksrc
index 9f3b5ac..115ced3 100644 (file)
@@ -14,12 +14,6 @@ extern struct pglist_data *node_data[];
 #define NODE_DATA(nid)         (node_data[nid])
 
 #define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn)
-#define node_start_pfn(nid)    (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid)                                              \
-({                                                                     \
-       pg_data_t *__pgdat = NODE_DATA(nid);                            \
-       __pgdat->node_start_pfn + __pgdat->node_spanned_pages - 1;      \
-})
 
 #define pmd_page(pmd)          (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
 /*
@@ -44,7 +38,7 @@ static __inline__ int pfn_to_nid(unsigned long pfn)
        int node;
 
        for (node = 0 ; node < MAX_NUMNODES ; node++)
-               if (pfn >= node_start_pfn(node) && pfn <= node_end_pfn(node))
+               if (pfn >= node_start_pfn(node) && pfn < node_end_pfn(node))
                        break;
 
        return node;
index 3d6e60d..780560b 100644 (file)
@@ -15,6 +15,7 @@
  * User space memory access functions
  */
 #include <linux/thread_info.h>
+#include <linux/kernel.h>
 #include <asm/page.h>
 #include <asm/errno.h>
 
index 9608d2c..e67eb9c 100644 (file)
@@ -14,13 +14,6 @@ extern struct node_map_data node_data[];
 
 #define NODE_DATA(nid)          (&node_data[nid].pg_data)
 
-#define node_start_pfn(nid)    (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid)                                              \
-({                                                                     \
-       pg_data_t *__pgdat = NODE_DATA(nid);                            \
-       __pgdat->node_start_pfn + __pgdat->node_spanned_pages;          \
-})
-
 /* We have these possible memory map layouts:
  * Astro: 0-3.75, 67.75-68, 4-64
  * zx1: 0-1, 257-260, 4-256
index 4f685a7..98d9426 100644 (file)
                        wm8776:codec@1a {
                                compatible = "wlf,wm8776";
                                reg = <0x1a>;
-                               /* MCLK source is a stand-alone oscillator */
-                               clock-frequency = <12288000>;
+                               /*
+                                * clock-frequency will be set by U-Boot if
+                                * the clock is enabled.
+                                */
                        };
                };
 
                        codec-handle = <&wm8776>;
                        fsl,playback-dma = <&dma00>;
                        fsl,capture-dma = <&dma01>;
-                       fsl,fifo-depth = <16>;
+                       fsl,fifo-depth = <15>;
+                       fsl,ssi-asynchronous;
                };
 
                dma@c300 {
index c9f212b..80bc5de 100644 (file)
@@ -148,7 +148,6 @@ CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_SCSI_CXGB3_ISCSI=m
 CONFIG_SCSI_CXGB4_ISCSI=m
 CONFIG_SCSI_BNX2_ISCSI=m
-CONFIG_SCSI_BNX2_ISCSI=m
 CONFIG_BE2ISCSI=m
 CONFIG_SCSI_IBMVSCSI=y
 CONFIG_SCSI_IBMVFC=m
index fd3fd58..7b58917 100644 (file)
@@ -38,13 +38,6 @@ u64 memory_hotplug_max(void);
 #define memory_hotplug_max() memblock_end_of_DRAM()
 #endif
 
-/*
- * Following are macros that each numa implmentation must define.
- */
-
-#define node_start_pfn(nid)    (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid)      (NODE_DATA(nid)->node_end_pfn)
-
 #else
 #define memory_hotplug_max() memblock_end_of_DRAM()
 #endif /* CONFIG_NEED_MULTIPLE_NODES */
index 77578c0..c57c193 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/init.h>
 #include <linux/rtc.h>
 #include <linux/delay.h>
+#include <linux/ratelimit.h>
 #include <asm/prom.h>
 #include <asm/rtas.h>
 #include <asm/time.h>
@@ -29,9 +30,10 @@ unsigned long __init rtas_get_boot_time(void)
                }
        } while (wait_time && (get_tb() < max_wait_tb));
 
-       if (error != 0 && printk_ratelimit()) {
-               printk(KERN_WARNING "error: reading the clock failed (%d)\n",
-                       error);
+       if (error != 0) {
+               printk_ratelimited(KERN_WARNING
+                                  "error: reading the clock failed (%d)\n",
+                                  error);
                return 0;
        }
 
@@ -55,19 +57,21 @@ void rtas_get_rtc_time(struct rtc_time *rtc_tm)
 
                wait_time = rtas_busy_delay_time(error);
                if (wait_time) {
-                       if (in_interrupt() && printk_ratelimit()) {
+                       if (in_interrupt()) {
                                memset(rtc_tm, 0, sizeof(struct rtc_time));
-                               printk(KERN_WARNING "error: reading clock"
-                                      " would delay interrupt\n");
+                               printk_ratelimited(KERN_WARNING
+                                                  "error: reading clock "
+                                                  "would delay interrupt\n");
                                return; /* delay not allowed */
                        }
                        msleep(wait_time);
                }
        } while (wait_time && (get_tb() < max_wait_tb));
 
-        if (error != 0 && printk_ratelimit()) {
-                printk(KERN_WARNING "error: reading the clock failed (%d)\n",
-                      error);
+       if (error != 0) {
+               printk_ratelimited(KERN_WARNING
+                                  "error: reading the clock failed (%d)\n",
+                                  error);
                return;
         }
 
@@ -99,9 +103,10 @@ int rtas_set_rtc_time(struct rtc_time *tm)
                }
        } while (wait_time && (get_tb() < max_wait_tb));
 
-        if (error != 0 && printk_ratelimit())
-                printk(KERN_WARNING "error: setting the clock failed (%d)\n",
-                      error);
+       if (error != 0)
+               printk_ratelimited(KERN_WARNING
+                                  "error: setting the clock failed (%d)\n",
+                                  error);
 
         return 0;
 }
index b96a3a0..78b76dc 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/errno.h>
 #include <linux/elf.h>
 #include <linux/ptrace.h>
+#include <linux/ratelimit.h>
 #ifdef CONFIG_PPC64
 #include <linux/syscalls.h>
 #include <linux/compat.h>
@@ -892,11 +893,12 @@ badframe:
        printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
               regs, frame, newsp);
 #endif
-       if (show_unhandled_signals && printk_ratelimit())
-               printk(KERN_INFO "%s[%d]: bad frame in handle_rt_signal32: "
-                       "%p nip %08lx lr %08lx\n",
-                       current->comm, current->pid,
-                       addr, regs->nip, regs->link);
+       if (show_unhandled_signals)
+               printk_ratelimited(KERN_INFO
+                                  "%s[%d]: bad frame in handle_rt_signal32: "
+                                  "%p nip %08lx lr %08lx\n",
+                                  current->comm, current->pid,
+                                  addr, regs->nip, regs->link);
 
        force_sigsegv(sig, current);
        return 0;
@@ -1058,11 +1060,12 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
        return 0;
 
  bad:
-       if (show_unhandled_signals && printk_ratelimit())
-               printk(KERN_INFO "%s[%d]: bad frame in sys_rt_sigreturn: "
-                       "%p nip %08lx lr %08lx\n",
-                       current->comm, current->pid,
-                       rt_sf, regs->nip, regs->link);
+       if (show_unhandled_signals)
+               printk_ratelimited(KERN_INFO
+                                  "%s[%d]: bad frame in sys_rt_sigreturn: "
+                                  "%p nip %08lx lr %08lx\n",
+                                  current->comm, current->pid,
+                                  rt_sf, regs->nip, regs->link);
 
        force_sig(SIGSEGV, current);
        return 0;
@@ -1149,12 +1152,12 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
         * We kill the task with a SIGSEGV in this situation.
         */
        if (do_setcontext(ctx, regs, 1)) {
-               if (show_unhandled_signals && printk_ratelimit())
-                       printk(KERN_INFO "%s[%d]: bad frame in "
-                               "sys_debug_setcontext: %p nip %08lx "
-                               "lr %08lx\n",
-                               current->comm, current->pid,
-                               ctx, regs->nip, regs->link);
+               if (show_unhandled_signals)
+                       printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
+                                          "sys_debug_setcontext: %p nip %08lx "
+                                          "lr %08lx\n",
+                                          current->comm, current->pid,
+                                          ctx, regs->nip, regs->link);
 
                force_sig(SIGSEGV, current);
                goto out;
@@ -1236,11 +1239,12 @@ badframe:
        printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
               regs, frame, newsp);
 #endif
-       if (show_unhandled_signals && printk_ratelimit())
-               printk(KERN_INFO "%s[%d]: bad frame in handle_signal32: "
-                       "%p nip %08lx lr %08lx\n",
-                       current->comm, current->pid,
-                       frame, regs->nip, regs->link);
+       if (show_unhandled_signals)
+               printk_ratelimited(KERN_INFO
+                                  "%s[%d]: bad frame in handle_signal32: "
+                                  "%p nip %08lx lr %08lx\n",
+                                  current->comm, current->pid,
+                                  frame, regs->nip, regs->link);
 
        force_sigsegv(sig, current);
        return 0;
@@ -1288,11 +1292,12 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
        return 0;
 
 badframe:
-       if (show_unhandled_signals && printk_ratelimit())
-               printk(KERN_INFO "%s[%d]: bad frame in sys_sigreturn: "
-                       "%p nip %08lx lr %08lx\n",
-                       current->comm, current->pid,
-                       addr, regs->nip, regs->link);
+       if (show_unhandled_signals)
+               printk_ratelimited(KERN_INFO
+                                  "%s[%d]: bad frame in sys_sigreturn: "
+                                  "%p nip %08lx lr %08lx\n",
+                                  current->comm, current->pid,
+                                  addr, regs->nip, regs->link);
 
        force_sig(SIGSEGV, current);
        return 0;
index da989ff..e91c736 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/elf.h>
 #include <linux/ptrace.h>
 #include <linux/module.h>
+#include <linux/ratelimit.h>
 
 #include <asm/sigcontext.h>
 #include <asm/ucontext.h>
@@ -380,10 +381,10 @@ badframe:
        printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n",
               regs, uc, &uc->uc_mcontext);
 #endif
-       if (show_unhandled_signals && printk_ratelimit())
-               printk(regs->msr & MSR_64BIT ? fmt64 : fmt32,
-                       current->comm, current->pid, "rt_sigreturn",
-                       (long)uc, regs->nip, regs->link);
+       if (show_unhandled_signals)
+               printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
+                                  current->comm, current->pid, "rt_sigreturn",
+                                  (long)uc, regs->nip, regs->link);
 
        force_sig(SIGSEGV, current);
        return 0;
@@ -468,10 +469,10 @@ badframe:
        printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n",
               regs, frame, newsp);
 #endif
-       if (show_unhandled_signals && printk_ratelimit())
-               printk(regs->msr & MSR_64BIT ? fmt64 : fmt32,
-                       current->comm, current->pid, "setup_rt_frame",
-                       (long)frame, regs->nip, regs->link);
+       if (show_unhandled_signals)
+               printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
+                                  current->comm, current->pid, "setup_rt_frame",
+                                  (long)frame, regs->nip, regs->link);
 
        force_sigsegv(signr, current);
        return 0;
index 0ff4ab9..1a01414 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/bug.h>
 #include <linux/kdebug.h>
 #include <linux/debugfs.h>
+#include <linux/ratelimit.h>
 
 #include <asm/emulated_ops.h>
 #include <asm/pgtable.h>
@@ -197,12 +198,11 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
                if (die("Exception in kernel mode", regs, signr))
                        return;
        } else if (show_unhandled_signals &&
-                   unhandled_signal(current, signr) &&
-                   printk_ratelimit()) {
-                       printk(regs->msr & MSR_64BIT ? fmt64 : fmt32,
-                               current->comm, current->pid, signr,
-                               addr, regs->nip, regs->link, code);
-               }
+                  unhandled_signal(current, signr)) {
+               printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
+                                  current->comm, current->pid, signr,
+                                  addr, regs->nip, regs->link, code);
+       }
 
        memset(&info, 0, sizeof(info));
        info.si_signo = signr;
@@ -425,7 +425,7 @@ int machine_check_e500mc(struct pt_regs *regs)
        unsigned long reason = mcsr;
        int recoverable = 1;
 
-       if (reason & MCSR_BUS_RBERR) {
+       if (reason & MCSR_LD) {
                recoverable = fsl_rio_mcheck_exception(regs);
                if (recoverable == 1)
                        goto silent_out;
@@ -1342,9 +1342,8 @@ void altivec_assist_exception(struct pt_regs *regs)
        } else {
                /* didn't recognize the instruction */
                /* XXX quick hack for now: set the non-Java bit in the VSCR */
-               if (printk_ratelimit())
-                       printk(KERN_ERR "Unrecognized altivec instruction "
-                              "in %s at %lx\n", current->comm, regs->nip);
+               printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
+                                  "in %s at %lx\n", current->comm, regs->nip);
                current->thread.vscr.u[3] |= 0x10000;
        }
 }
@@ -1548,9 +1547,8 @@ u32 ppc_warn_emulated;
 
 void ppc_warn_emulated_print(const char *type)
 {
-       if (printk_ratelimit())
-               pr_warning("%s used emulated %s instruction\n", current->comm,
-                          type);
+       pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
+                           type);
 }
 
 static int __init ppc_warn_emulated_init(void)
index 54f4fb9..ad35f66 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/kdebug.h>
 #include <linux/perf_event.h>
 #include <linux/magic.h>
+#include <linux/ratelimit.h>
 
 #include <asm/firmware.h>
 #include <asm/page.h>
@@ -346,11 +347,10 @@ bad_area_nosemaphore:
                return 0;
        }
 
-       if (is_exec && (error_code & DSISR_PROTFAULT)
-           && printk_ratelimit())
-               printk(KERN_CRIT "kernel tried to execute NX-protected"
-                      " page (%lx) - exploit attempt? (uid: %d)\n",
-                      address, current_uid());
+       if (is_exec && (error_code & DSISR_PROTFAULT))
+               printk_ratelimited(KERN_CRIT "kernel tried to execute NX-protected"
+                                  " page (%lx) - exploit attempt? (uid: %d)\n",
+                                  address, current_uid());
 
        return SIGSEGV;
 
index 5b206a2..b3fd081 100644 (file)
@@ -283,23 +283,24 @@ static void __iomem *rio_regs_win;
 #ifdef CONFIG_E500
 int fsl_rio_mcheck_exception(struct pt_regs *regs)
 {
-       const struct exception_table_entry *entry = NULL;
-       unsigned long reason = mfspr(SPRN_MCSR);
-
-       if (reason & MCSR_BUS_RBERR) {
-               reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR));
-               if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) {
-                       /* Check if we are prepared to handle this fault */
-                       entry = search_exception_tables(regs->nip);
-                       if (entry) {
-                               pr_debug("RIO: %s - MC Exception handled\n",
-                                        __func__);
-                               out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR),
-                                        0);
-                               regs->msr |= MSR_RI;
-                               regs->nip = entry->fixup;
-                               return 1;
-                       }
+       const struct exception_table_entry *entry;
+       unsigned long reason;
+
+       if (!rio_regs_win)
+               return 0;
+
+       reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR));
+       if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) {
+               /* Check if we are prepared to handle this fault */
+               entry = search_exception_tables(regs->nip);
+               if (entry) {
+                       pr_debug("RIO: %s - MC Exception handled\n",
+                                __func__);
+                       out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR),
+                                0);
+                       regs->msr |= MSR_RI;
+                       regs->nip = entry->fixup;
+                       return 1;
                }
        }
 
index 3a8de5b..58d7a53 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/syscore_ops.h>
+#include <linux/ratelimit.h>
 
 #include <asm/ptrace.h>
 #include <asm/signal.h>
@@ -1648,9 +1649,8 @@ static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg)
                return NO_IRQ;
        }
        if (unlikely(mpic->protected && test_bit(src, mpic->protected))) {
-               if (printk_ratelimit())
-                       printk(KERN_WARNING "%s: Got protected source %d !\n",
-                              mpic->name, (int)src);
+               printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n",
+                                  mpic->name, (int)src);
                mpic_eoi(mpic);
                return NO_IRQ;
        }
@@ -1688,9 +1688,8 @@ unsigned int mpic_get_coreint_irq(void)
                return NO_IRQ;
        }
        if (unlikely(mpic->protected && test_bit(src, mpic->protected))) {
-               if (printk_ratelimit())
-                       printk(KERN_WARNING "%s: Got protected source %d !\n",
-                              mpic->name, (int)src);
+               printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n",
+                                  mpic->name, (int)src);
                return NO_IRQ;
        }
 
index 90d77bd..c03fef7 100644 (file)
@@ -579,6 +579,7 @@ config S390_GUEST
        def_bool y
        prompt "s390 guest support for KVM (EXPERIMENTAL)"
        depends on 64BIT && EXPERIMENTAL
+       select VIRTUALIZATION
        select VIRTIO
        select VIRTIO_RING
        select VIRTIO_CONSOLE
index 52420d2..1d55c95 100644 (file)
@@ -262,7 +262,7 @@ void smp_ctl_set_bit(int cr, int bit)
 
        memset(&parms.orvals, 0, sizeof(parms.orvals));
        memset(&parms.andvals, 0xff, sizeof(parms.andvals));
-       parms.orvals[cr] = 1 << bit;
+       parms.orvals[cr] = 1UL << bit;
        on_each_cpu(smp_ctl_bit_callback, &parms, 1);
 }
 EXPORT_SYMBOL(smp_ctl_set_bit);
@@ -276,7 +276,7 @@ void smp_ctl_clear_bit(int cr, int bit)
 
        memset(&parms.orvals, 0, sizeof(parms.orvals));
        memset(&parms.andvals, 0xff, sizeof(parms.andvals));
-       parms.andvals[cr] = ~(1L << bit);
+       parms.andvals[cr] = ~(1UL << bit);
        on_each_cpu(smp_ctl_bit_callback, &parms, 1);
 }
 EXPORT_SYMBOL(smp_ctl_clear_bit);
index 5995e9b..0e358c2 100644 (file)
@@ -25,7 +25,7 @@ extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth);
 
 #include "hwsampler.h"
 
-#define DEFAULT_INTERVAL       4096
+#define DEFAULT_INTERVAL       4127518
 
 #define DEFAULT_SDBT_BLOCKS    1
 #define DEFAULT_SDB_BLOCKS     511
@@ -151,6 +151,12 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops)
        if (oprofile_max_interval == 0)
                return -ENODEV;
 
+       /* The initial value should be sane */
+       if (oprofile_hw_interval < oprofile_min_interval)
+               oprofile_hw_interval = oprofile_min_interval;
+       if (oprofile_hw_interval > oprofile_max_interval)
+               oprofile_hw_interval = oprofile_max_interval;
+
        if (oprofile_timer_init(ops))
                return -ENODEV;
 
index f03338c..bbdeb48 100644 (file)
@@ -348,6 +348,7 @@ config CPU_SUBTYPE_SH7720
        select SYS_SUPPORTS_CMT
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select USB_ARCH_HAS_OHCI
+       select USB_OHCI_SH if USB_OHCI_HCD
        help
          Select SH7720 if you have a SH3-DSP SH7720 CPU.
 
@@ -357,6 +358,7 @@ config CPU_SUBTYPE_SH7721
        select CPU_HAS_DSP
        select SYS_SUPPORTS_CMT
        select USB_ARCH_HAS_OHCI
+       select USB_OHCI_SH if USB_OHCI_HCD
        help
          Select SH7721 if you have a SH3-DSP SH7721 CPU.
 
@@ -440,6 +442,7 @@ config CPU_SUBTYPE_SH7763
        bool "Support SH7763 processor"
        select CPU_SH4A
        select USB_ARCH_HAS_OHCI
+       select USB_OHCI_SH if USB_OHCI_HCD
        help
          Select SH7763 if you have a SH4A SH7763(R5S77631) CPU.
 
@@ -467,7 +470,9 @@ config CPU_SUBTYPE_SH7786
        select GENERIC_CLOCKEVENTS_BROADCAST if SMP
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select USB_ARCH_HAS_OHCI
+       select USB_OHCI_SH if USB_OHCI_HCD
        select USB_ARCH_HAS_EHCI
+       select USB_EHCI_SH if USB_EHCI_HCD
 
 config CPU_SUBTYPE_SHX3
        bool "Support SH-X3 processor"
index 33ddb13..cfde98d 100644 (file)
@@ -9,7 +9,6 @@ CONFIG_TASK_XACCT=y
 CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 # CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_KALLSYMS_ALL=y
 CONFIG_SLAB=y
@@ -39,8 +38,6 @@ CONFIG_IPV6=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_M25P80=y
@@ -56,18 +53,19 @@ CONFIG_SH_ETH=y
 # CONFIG_KEYBOARD_ATKBD is not set
 # CONFIG_MOUSE_PS2 is not set
 # CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
 CONFIG_SERIAL_SH_SCI=y
 CONFIG_SERIAL_SH_SCI_NR_UARTS=3
 CONFIG_SERIAL_SH_SCI_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
 # CONFIG_HW_RANDOM is not set
 CONFIG_SPI=y
 CONFIG_SPI_SH=y
 # CONFIG_HWMON is not set
-CONFIG_MFD_SH_MOBILE_SDHI=y
 CONFIG_USB=y
 CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_SH=y
 CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_SH=y
 CONFIG_USB_STORAGE=y
 CONFIG_MMC=y
 CONFIG_MMC_SDHI=y
index 8887baf..15a8496 100644 (file)
@@ -9,10 +9,6 @@
 extern struct pglist_data *node_data[];
 #define NODE_DATA(nid)         (node_data[nid])
 
-#define node_start_pfn(nid)    (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid)      (NODE_DATA(nid)->node_start_pfn + \
-                                NODE_DATA(nid)->node_spanned_pages)
-
 static inline int pfn_to_nid(unsigned long pfn)
 {
        int nid;
index 423dabf..e915dea 100644 (file)
@@ -183,7 +183,7 @@ static const struct sh_dmae_slave_config sh7757_dmae1_slaves[] = {
        {
                .slave_id       = SHDMA_SLAVE_SCIF2_RX,
                .addr           = 0x1f4b0014,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
                .mid_rid        = 0x22,
        },
@@ -197,7 +197,7 @@ static const struct sh_dmae_slave_config sh7757_dmae1_slaves[] = {
        {
                .slave_id       = SHDMA_SLAVE_SCIF3_RX,
                .addr           = 0x1f4c0014,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
                .mid_rid        = 0x2a,
        },
@@ -211,7 +211,7 @@ static const struct sh_dmae_slave_config sh7757_dmae1_slaves[] = {
        {
                .slave_id       = SHDMA_SLAVE_SCIF4_RX,
                .addr           = 0x1f4d0014,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
                .mid_rid        = 0x42,
        },
@@ -228,7 +228,7 @@ static const struct sh_dmae_slave_config sh7757_dmae2_slaves[] = {
        {
                .slave_id       = SHDMA_SLAVE_RIIC0_RX,
                .addr           = 0x1e500013,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
                .mid_rid        = 0x22,
        },
@@ -242,7 +242,7 @@ static const struct sh_dmae_slave_config sh7757_dmae2_slaves[] = {
        {
                .slave_id       = SHDMA_SLAVE_RIIC1_RX,
                .addr           = 0x1e510013,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
                .mid_rid        = 0x2a,
        },
@@ -256,7 +256,7 @@ static const struct sh_dmae_slave_config sh7757_dmae2_slaves[] = {
        {
                .slave_id       = SHDMA_SLAVE_RIIC2_RX,
                .addr           = 0x1e520013,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
                .mid_rid        = 0xa2,
        },
@@ -265,12 +265,12 @@ static const struct sh_dmae_slave_config sh7757_dmae2_slaves[] = {
                .addr           = 0x1e530012,
                .chcr           = SM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
-               .mid_rid        = 0xab,
+               .mid_rid        = 0xa9,
        },
        {
                .slave_id       = SHDMA_SLAVE_RIIC3_RX,
                .addr           = 0x1e530013,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
                .mid_rid        = 0xaf,
        },
@@ -279,14 +279,14 @@ static const struct sh_dmae_slave_config sh7757_dmae2_slaves[] = {
                .addr           = 0x1e540012,
                .chcr           = SM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
-               .mid_rid        = 0xc1,
+               .mid_rid        = 0xc5,
        },
        {
                .slave_id       = SHDMA_SLAVE_RIIC4_RX,
                .addr           = 0x1e540013,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
-               .mid_rid        = 0xc2,
+               .mid_rid        = 0xc6,
        },
 };
 
@@ -301,7 +301,7 @@ static const struct sh_dmae_slave_config sh7757_dmae3_slaves[] = {
        {
                .slave_id       = SHDMA_SLAVE_RIIC5_RX,
                .addr           = 0x1e550013,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
                .mid_rid        = 0x22,
        },
@@ -315,7 +315,7 @@ static const struct sh_dmae_slave_config sh7757_dmae3_slaves[] = {
        {
                .slave_id       = SHDMA_SLAVE_RIIC6_RX,
                .addr           = 0x1e560013,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
                .mid_rid        = 0x2a,
        },
@@ -329,7 +329,7 @@ static const struct sh_dmae_slave_config sh7757_dmae3_slaves[] = {
        {
                .slave_id       = SHDMA_SLAVE_RIIC7_RX,
                .addr           = 0x1e570013,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
                .mid_rid        = 0x42,
        },
@@ -343,7 +343,7 @@ static const struct sh_dmae_slave_config sh7757_dmae3_slaves[] = {
        {
                .slave_id       = SHDMA_SLAVE_RIIC8_RX,
                .addr           = 0x1e580013,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
                .mid_rid        = 0x46,
        },
@@ -357,7 +357,7 @@ static const struct sh_dmae_slave_config sh7757_dmae3_slaves[] = {
        {
                .slave_id       = SHDMA_SLAVE_RIIC9_RX,
                .addr           = 0x1e590013,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
                .mid_rid        = 0x52,
        },
@@ -659,6 +659,54 @@ static struct platform_device spi0_device = {
        .resource       = spi0_resources,
 };
 
+static struct resource usb_ehci_resources[] = {
+       [0] = {
+               .start  = 0xfe4f1000,
+               .end    = 0xfe4f10ff,
+               .flags  = IORESOURCE_MEM,
+       },
+       [1] = {
+               .start  = 57,
+               .end    = 57,
+               .flags  = IORESOURCE_IRQ,
+       },
+};
+
+static struct platform_device usb_ehci_device = {
+       .name           = "sh_ehci",
+       .id             = -1,
+       .dev = {
+               .dma_mask = &usb_ehci_device.dev.coherent_dma_mask,
+               .coherent_dma_mask = DMA_BIT_MASK(32),
+       },
+       .num_resources  = ARRAY_SIZE(usb_ehci_resources),
+       .resource       = usb_ehci_resources,
+};
+
+static struct resource usb_ohci_resources[] = {
+       [0] = {
+               .start  = 0xfe4f1800,
+               .end    = 0xfe4f18ff,
+               .flags  = IORESOURCE_MEM,
+       },
+       [1] = {
+               .start  = 57,
+               .end    = 57,
+               .flags  = IORESOURCE_IRQ,
+       },
+};
+
+static struct platform_device usb_ohci_device = {
+       .name           = "sh_ohci",
+       .id             = -1,
+       .dev = {
+               .dma_mask = &usb_ohci_device.dev.coherent_dma_mask,
+               .coherent_dma_mask = DMA_BIT_MASK(32),
+       },
+       .num_resources  = ARRAY_SIZE(usb_ohci_resources),
+       .resource       = usb_ohci_resources,
+};
+
 static struct platform_device *sh7757_devices[] __initdata = {
        &scif2_device,
        &scif3_device,
@@ -670,6 +718,8 @@ static struct platform_device *sh7757_devices[] __initdata = {
        &dma2_device,
        &dma3_device,
        &spi0_device,
+       &usb_ehci_device,
+       &usb_ohci_device,
 };
 
 static int __init sh7757_devices_setup(void)
@@ -1039,13 +1089,13 @@ static DECLARE_INTC_DESC(intc_desc, "sh7757", vectors, groups,
 
 /* Support for external interrupt pins in IRQ mode */
 static struct intc_vect vectors_irq0123[] __initdata = {
-       INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280),
-       INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300),
+       INTC_VECT(IRQ0, 0x200), INTC_VECT(IRQ1, 0x240),
+       INTC_VECT(IRQ2, 0x280), INTC_VECT(IRQ3, 0x2c0),
 };
 
 static struct intc_vect vectors_irq4567[] __initdata = {
-       INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380),
-       INTC_VECT(IRQ6, 0x3c0), INTC_VECT(IRQ7, 0x200),
+       INTC_VECT(IRQ4, 0x300), INTC_VECT(IRQ5, 0x340),
+       INTC_VECT(IRQ6, 0x380), INTC_VECT(IRQ7, 0x3c0),
 };
 
 static struct intc_sense_reg sense_registers[] __initdata = {
@@ -1079,14 +1129,14 @@ static struct intc_vect vectors_irl0123[] __initdata = {
 };
 
 static struct intc_vect vectors_irl4567[] __initdata = {
-       INTC_VECT(IRL4_LLLL, 0xb00), INTC_VECT(IRL4_LLLH, 0xb20),
-       INTC_VECT(IRL4_LLHL, 0xb40), INTC_VECT(IRL4_LLHH, 0xb60),
-       INTC_VECT(IRL4_LHLL, 0xb80), INTC_VECT(IRL4_LHLH, 0xba0),
-       INTC_VECT(IRL4_LHHL, 0xbc0), INTC_VECT(IRL4_LHHH, 0xbe0),
-       INTC_VECT(IRL4_HLLL, 0xc00), INTC_VECT(IRL4_HLLH, 0xc20),
-       INTC_VECT(IRL4_HLHL, 0xc40), INTC_VECT(IRL4_HLHH, 0xc60),
-       INTC_VECT(IRL4_HHLL, 0xc80), INTC_VECT(IRL4_HHLH, 0xca0),
-       INTC_VECT(IRL4_HHHL, 0xcc0),
+       INTC_VECT(IRL4_LLLL, 0x200), INTC_VECT(IRL4_LLLH, 0x220),
+       INTC_VECT(IRL4_LLHL, 0x240), INTC_VECT(IRL4_LLHH, 0x260),
+       INTC_VECT(IRL4_LHLL, 0x280), INTC_VECT(IRL4_LHLH, 0x2a0),
+       INTC_VECT(IRL4_LHHL, 0x2c0), INTC_VECT(IRL4_LHHH, 0x2e0),
+       INTC_VECT(IRL4_HLLL, 0x300), INTC_VECT(IRL4_HLLH, 0x320),
+       INTC_VECT(IRL4_HLHL, 0x340), INTC_VECT(IRL4_HLHH, 0x360),
+       INTC_VECT(IRL4_HHLL, 0x380), INTC_VECT(IRL4_HHLH, 0x3a0),
+       INTC_VECT(IRL4_HHHL, 0x3c0),
 };
 
 static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7757-irl0123", vectors_irl0123,
index 9197110..a3ee919 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/seq_file.h>
 #include <linux/ftrace.h>
 #include <linux/delay.h>
+#include <linux/ratelimit.h>
 #include <asm/processor.h>
 #include <asm/machvec.h>
 #include <asm/uaccess.h>
@@ -268,9 +269,8 @@ void migrate_irqs(void)
                        unsigned int newcpu = cpumask_any_and(data->affinity,
                                                              cpu_online_mask);
                        if (newcpu >= nr_cpu_ids) {
-                               if (printk_ratelimit())
-                                       printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
-                                              irq, cpu);
+                               pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
+                                                   irq, cpu);
 
                                cpumask_setall(data->affinity);
                                newcpu = cpumask_any_and(data->affinity,
index b2595b8..620fa7f 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/seq_file.h>
 #include <linux/proc_fs.h>
 #include <linux/uaccess.h>
+#include <linux/ratelimit.h>
 #include <asm/alignment.h>
 #include <asm/processor.h>
 
@@ -95,13 +96,13 @@ int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
 void unaligned_fixups_notify(struct task_struct *tsk, insn_size_t insn,
                             struct pt_regs *regs)
 {
-       if (user_mode(regs) && (se_usermode & UM_WARN) && printk_ratelimit())
-               pr_notice("Fixing up unaligned userspace access "
+       if (user_mode(regs) && (se_usermode & UM_WARN))
+               pr_notice_ratelimited("Fixing up unaligned userspace access "
                          "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
                          tsk->comm, task_pid_nr(tsk),
                          (void *)instruction_pointer(regs), insn);
-       else if (se_kernmode_warn && printk_ratelimit())
-               pr_notice("Fixing up unaligned kernel access "
+       else if (se_kernmode_warn)
+               pr_notice_ratelimited("Fixing up unaligned kernel access "
                          "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
                          tsk->comm, task_pid_nr(tsk),
                          (void *)instruction_pointer(regs), insn);
index e8c6487..99d9b9f 100644 (file)
@@ -8,8 +8,6 @@
 extern struct pglist_data *node_data[];
 
 #define NODE_DATA(nid)         (node_data[nid])
-#define node_start_pfn(nid)    (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid)      (NODE_DATA(nid)->node_end_pfn)
 
 extern int numa_cpu_lookup_table[];
 extern cpumask_t numa_cpumask_lookup_table[];
index c6344c4..9d3dbce 100644 (file)
@@ -40,17 +40,6 @@ static inline int pfn_to_nid(unsigned long pfn)
        return highbits_to_node[__pfn_to_highbits(pfn)];
 }
 
-/*
- * Following are macros that each numa implmentation must define.
- */
-
-#define node_start_pfn(nid)    (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid)                                              \
-({                                                                     \
-       pg_data_t *__pgdat = NODE_DATA(nid);                            \
-       __pgdat->node_start_pfn + __pgdat->node_spanned_pages;          \
-})
-
 #define kern_addr_valid(kaddr) virt_addr_valid((void *)kaddr)
 
 static inline int pfn_valid(int pfn)
diff --git a/arch/um/include/asm/percpu.h b/arch/um/include/asm/percpu.h
new file mode 100644 (file)
index 0000000..efe7508
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef __UM_PERCPU_H
+#define __UM_PERCPU_H
+
+#include <asm-generic/percpu.h>
+
+#endif /* __UM_PERCPU_H */
index 2fefa50..af60d8a 100644 (file)
@@ -62,7 +62,7 @@ extern int sfi_mtimer_num;
 #else /* CONFIG_APB_TIMER */
 
 static inline unsigned long apbt_quick_calibrate(void) {return 0; }
-static inline void apbt_time_init(void) {return 0; }
+static inline void apbt_time_init(void) { }
 
 #endif
 #endif /* ASM_X86_APBT_H */
index 5e83a41..224e8c5 100644 (file)
@@ -48,17 +48,6 @@ static inline int pfn_to_nid(unsigned long pfn)
 #endif
 }
 
-/*
- * Following are macros that each numa implmentation must define.
- */
-
-#define node_start_pfn(nid)    (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid)                                              \
-({                                                                     \
-       pg_data_t *__pgdat = NODE_DATA(nid);                            \
-       __pgdat->node_start_pfn + __pgdat->node_spanned_pages;          \
-})
-
 static inline int pfn_valid(int pfn)
 {
        int nid = pfn_to_nid(pfn);
index b3f88d7..129d9aa 100644 (file)
@@ -13,8 +13,5 @@ extern struct pglist_data *node_data[];
 
 #define NODE_DATA(nid)         (node_data[nid])
 
-#define node_start_pfn(nid)    (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid)       (NODE_DATA(nid)->node_start_pfn +      \
-                                NODE_DATA(nid)->node_spanned_pages)
 #endif
 #endif /* _ASM_X86_MMZONE_64_H */
index 6df88c7..adc9867 100644 (file)
@@ -3372,7 +3372,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
        int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
        bool op_prefix = false;
        struct opcode opcode;
-       struct operand memop = { .type = OP_NONE };
+       struct operand memop = { .type = OP_NONE }, *memopp = NULL;
 
        c->eip = ctxt->eip;
        c->fetch.start = c->eip;
@@ -3547,9 +3547,6 @@ done_prefixes:
        if (memop.type == OP_MEM && c->ad_bytes != 8)
                memop.addr.mem.ea = (u32)memop.addr.mem.ea;
 
-       if (memop.type == OP_MEM && c->rip_relative)
-               memop.addr.mem.ea += c->eip;
-
        /*
         * Decode and fetch the source operand: register, memory
         * or immediate.
@@ -3571,6 +3568,7 @@ done_prefixes:
                                                           c->op_bytes;
        srcmem_common:
                c->src = memop;
+               memopp = &c->src;
                break;
        case SrcImmU16:
                rc = decode_imm(ctxt, &c->src, 2, false);
@@ -3667,6 +3665,7 @@ done_prefixes:
        case DstMem:
        case DstMem64:
                c->dst = memop;
+               memopp = &c->dst;
                if ((c->d & DstMask) == DstMem64)
                        c->dst.bytes = 8;
                else
@@ -3700,10 +3699,13 @@ done_prefixes:
                /* Special instructions do their own operand decoding. */
        default:
                c->dst.type = OP_NONE; /* Disable writeback. */
-               return 0;
+               break;
        }
 
 done:
+       if (memopp && memopp->type == OP_MEM && c->rip_relative)
+               memopp->addr.mem.ea += c->eip;
+
        return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
 }
 
index 0972315..68c3c13 100644 (file)
@@ -188,7 +188,7 @@ static bool resource_contains(struct resource *res, resource_size_t point)
        return false;
 }
 
-static void coalesce_windows(struct pci_root_info *info, int type)
+static void coalesce_windows(struct pci_root_info *info, unsigned long type)
 {
        int i, j;
        struct resource *res1, *res2;
index 8214724..fe00830 100644 (file)
@@ -333,6 +333,7 @@ static int xen_register_pirq(u32 gsi, int triggering)
        struct physdev_map_pirq map_irq;
        int shareable = 0;
        char *name;
+       bool gsi_override = false;
 
        if (!xen_pv_domain())
                return -1;
@@ -349,11 +350,32 @@ static int xen_register_pirq(u32 gsi, int triggering)
        if (pirq < 0)
                goto out;
 
-       irq = xen_bind_pirq_gsi_to_irq(gsi, pirq, shareable, name);
+       /* Before we bind the GSI to a Linux IRQ, check whether
+        * we need to override it with bus_irq (IRQ) value. Usually for
+        * IRQs below IRQ_LEGACY_IRQ this holds IRQ == GSI, as so:
+        *  ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 low level)
+        * but there are oddballs where the IRQ != GSI:
+        *  ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low level)
+        * which ends up being: gsi_to_irq[9] == 20
+        * (which is what acpi_gsi_to_irq ends up calling when starting the
+        * the ACPI interpreter and keels over since IRQ 9 has not been
+        * setup as we had setup IRQ 20 for it).
+        */
+       if (gsi == acpi_sci_override_gsi) {
+               /* Check whether the GSI != IRQ */
+               acpi_gsi_to_irq(gsi, &irq);
+               if (irq != gsi)
+                       /* Bugger, we MUST have that IRQ. */
+                       gsi_override = true;
+       }
+       if (gsi_override)
+               irq = xen_bind_pirq_gsi_to_irq(irq, pirq, shareable, name);
+       else
+               irq = xen_bind_pirq_gsi_to_irq(gsi, pirq, shareable, name);
        if (irq < 0)
                goto out;
 
-       printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d\n", pirq, irq);
+       printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d (gsi=%d)\n", pirq, irq, gsi);
 
        map_irq.domid = DOMID_SELF;
        map_irq.type = MAP_PIRQ_TYPE_GSI;
index 673e968..0ccccb6 100644 (file)
@@ -1232,7 +1232,11 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
 {
        struct {
                struct mmuext_op op;
+#ifdef CONFIG_SMP
                DECLARE_BITMAP(mask, num_processors);
+#else
+               DECLARE_BITMAP(mask, NR_CPUS);
+#endif
        } *args;
        struct multicall_space mcs;
 
index a62be8d..3689f83 100644 (file)
@@ -927,7 +927,7 @@ static int throtl_dispatch(struct request_queue *q)
 
        bio_list_init(&bio_list_on_stack);
 
-       throtl_log(td, "dispatch nr_queued=%lu read=%u write=%u",
+       throtl_log(td, "dispatch nr_queued=%d read=%u write=%u",
                        total_nr_queued(td), td->nr_queued[READ],
                        td->nr_queued[WRITE]);
 
@@ -1204,7 +1204,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
        }
 
 queue_bio:
-       throtl_log_tg(td, tg, "[%c] bio. bdisp=%u sz=%u bps=%llu"
+       throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
                        " iodisp=%u iops=%u queued=%d/%d",
                        rw == READ ? 'R' : 'W',
                        tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
index 3c7b537..f379943 100644 (file)
@@ -988,9 +988,10 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
 
        cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
                                        st->min_vdisktime);
-       cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
-                       " sect=%u", used_sl, cfqq->slice_dispatch, charge,
-                       iops_mode(cfqd), cfqq->nr_sectors);
+       cfq_log_cfqq(cfqq->cfqd, cfqq,
+                    "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
+                    used_sl, cfqq->slice_dispatch, charge,
+                    iops_mode(cfqd), cfqq->nr_sectors);
        cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
                                          unaccounted_sl);
        cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
@@ -2023,8 +2024,8 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
         */
        if (sample_valid(cic->ttime_samples) &&
            (cfqq->slice_end - jiffies < cic->ttime_mean)) {
-               cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d",
-                               cic->ttime_mean);
+               cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
+                            cic->ttime_mean);
                return;
        }
 
@@ -2772,8 +2773,11 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
        smp_wmb();
        cic->key = cfqd_dead_key(cfqd);
 
-       if (ioc->ioc_data == cic)
+       if (rcu_dereference(ioc->ioc_data) == cic) {
+               spin_lock(&ioc->lock);
                rcu_assign_pointer(ioc->ioc_data, NULL);
+               spin_unlock(&ioc->lock);
+       }
 
        if (cic->cfqq[BLK_RW_ASYNC]) {
                cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
index 95822ae..3608289 100644 (file)
@@ -1371,6 +1371,7 @@ struct disk_events {
        struct gendisk          *disk;          /* the associated disk */
        spinlock_t              lock;
 
+       struct mutex            block_mutex;    /* protects blocking */
        int                     block;          /* event blocking depth */
        unsigned int            pending;        /* events already sent out */
        unsigned int            clearing;       /* events being cleared */
@@ -1414,22 +1415,44 @@ static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
        return msecs_to_jiffies(intv_msecs);
 }
 
-static void __disk_block_events(struct gendisk *disk, bool sync)
+/**
+ * disk_block_events - block and flush disk event checking
+ * @disk: disk to block events for
+ *
+ * On return from this function, it is guaranteed that event checking
+ * isn't in progress and won't happen until unblocked by
+ * disk_unblock_events().  Events blocking is counted and the actual
+ * unblocking happens after the matching number of unblocks are done.
+ *
+ * Note that this intentionally does not block event checking from
+ * disk_clear_events().
+ *
+ * CONTEXT:
+ * Might sleep.
+ */
+void disk_block_events(struct gendisk *disk)
 {
        struct disk_events *ev = disk->ev;
        unsigned long flags;
        bool cancel;
 
+       if (!ev)
+               return;
+
+       /*
+        * Outer mutex ensures that the first blocker completes canceling
+        * the event work before further blockers are allowed to finish.
+        */
+       mutex_lock(&ev->block_mutex);
+
        spin_lock_irqsave(&ev->lock, flags);
        cancel = !ev->block++;
        spin_unlock_irqrestore(&ev->lock, flags);
 
-       if (cancel) {
-               if (sync)
-                       cancel_delayed_work_sync(&disk->ev->dwork);
-               else
-                       cancel_delayed_work(&disk->ev->dwork);
-       }
+       if (cancel)
+               cancel_delayed_work_sync(&disk->ev->dwork);
+
+       mutex_unlock(&ev->block_mutex);
 }
 
 static void __disk_unblock_events(struct gendisk *disk, bool check_now)
@@ -1460,27 +1483,6 @@ out_unlock:
        spin_unlock_irqrestore(&ev->lock, flags);
 }
 
-/**
- * disk_block_events - block and flush disk event checking
- * @disk: disk to block events for
- *
- * On return from this function, it is guaranteed that event checking
- * isn't in progress and won't happen until unblocked by
- * disk_unblock_events().  Events blocking is counted and the actual
- * unblocking happens after the matching number of unblocks are done.
- *
- * Note that this intentionally does not block event checking from
- * disk_clear_events().
- *
- * CONTEXT:
- * Might sleep.
- */
-void disk_block_events(struct gendisk *disk)
-{
-       if (disk->ev)
-               __disk_block_events(disk, true);
-}
-
 /**
  * disk_unblock_events - unblock disk event checking
  * @disk: disk to unblock events for
@@ -1508,10 +1510,18 @@ void disk_unblock_events(struct gendisk *disk)
  */
 void disk_check_events(struct gendisk *disk)
 {
-       if (disk->ev) {
-               __disk_block_events(disk, false);
-               __disk_unblock_events(disk, true);
+       struct disk_events *ev = disk->ev;
+       unsigned long flags;
+
+       if (!ev)
+               return;
+
+       spin_lock_irqsave(&ev->lock, flags);
+       if (!ev->block) {
+               cancel_delayed_work(&ev->dwork);
+               queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
        }
+       spin_unlock_irqrestore(&ev->lock, flags);
 }
 EXPORT_SYMBOL_GPL(disk_check_events);
 
@@ -1546,7 +1556,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
        spin_unlock_irq(&ev->lock);
 
        /* uncondtionally schedule event check and wait for it to finish */
-       __disk_block_events(disk, true);
+       disk_block_events(disk);
        queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
        flush_delayed_work(&ev->dwork);
        __disk_unblock_events(disk, false);
@@ -1664,7 +1674,7 @@ static ssize_t disk_events_poll_msecs_store(struct device *dev,
        if (intv < 0 && intv != -1)
                return -EINVAL;
 
-       __disk_block_events(disk, true);
+       disk_block_events(disk);
        disk->ev->poll_msecs = intv;
        __disk_unblock_events(disk, true);
 
@@ -1750,6 +1760,7 @@ static void disk_add_events(struct gendisk *disk)
        INIT_LIST_HEAD(&ev->node);
        ev->disk = disk;
        spin_lock_init(&ev->lock);
+       mutex_init(&ev->block_mutex);
        ev->block = 1;
        ev->poll_msecs = -1;
        INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
@@ -1770,7 +1781,7 @@ static void disk_del_events(struct gendisk *disk)
        if (!disk->ev)
                return;
 
-       __disk_block_events(disk, true);
+       disk_block_events(disk);
 
        mutex_lock(&disk_events_mutex);
        list_del_init(&disk->ev->node);
index b5ccae2..b0165ec 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/interrupt.h>
 #include <linux/mm.h>
 #include <linux/net.h>
-#include <linux/slab.h>
 
 #define DEFLATE_DEF_LEVEL              Z_DEFAULT_COMPRESSION
 #define DEFLATE_DEF_WINBITS            11
@@ -73,7 +72,7 @@ static int deflate_decomp_init(struct deflate_ctx *ctx)
        int ret = 0;
        struct z_stream_s *stream = &ctx->decomp_stream;
 
-       stream->workspace = kzalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
+       stream->workspace = vzalloc(zlib_inflate_workspacesize());
        if (!stream->workspace) {
                ret = -ENOMEM;
                goto out;
@@ -86,7 +85,7 @@ static int deflate_decomp_init(struct deflate_ctx *ctx)
 out:
        return ret;
 out_free:
-       kfree(stream->workspace);
+       vfree(stream->workspace);
        goto out;
 }
 
@@ -99,7 +98,7 @@ static void deflate_comp_exit(struct deflate_ctx *ctx)
 static void deflate_decomp_exit(struct deflate_ctx *ctx)
 {
        zlib_inflateEnd(&ctx->decomp_stream);
-       kfree(ctx->decomp_stream.workspace);
+       vfree(ctx->decomp_stream.workspace);
 }
 
 static int deflate_init(struct crypto_tfm *tfm)
index d11d761..06b62e5 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/interrupt.h>
 #include <linux/mm.h>
 #include <linux/net.h>
-#include <linux/slab.h>
 
 #include <crypto/internal/compress.h>
 
@@ -60,7 +59,7 @@ static void zlib_decomp_exit(struct zlib_ctx *ctx)
 
        if (stream->workspace) {
                zlib_inflateEnd(stream);
-               kfree(stream->workspace);
+               vfree(stream->workspace);
                stream->workspace = NULL;
        }
 }
@@ -228,13 +227,13 @@ static int zlib_decompress_setup(struct crypto_pcomp *tfm, void *params,
                                 ? nla_get_u32(tb[ZLIB_DECOMP_WINDOWBITS])
                                 : DEF_WBITS;
 
-       stream->workspace = kzalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
+       stream->workspace = vzalloc(zlib_inflate_workspacesize());
        if (!stream->workspace)
                return -ENOMEM;
 
        ret = zlib_inflateInit2(stream, ctx->decomp_windowBits);
        if (ret != Z_OK) {
-               kfree(stream->workspace);
+               vfree(stream->workspace);
                stream->workspace = NULL;
                return -EINVAL;
        }
index d38c40f..41223c7 100644 (file)
@@ -452,7 +452,7 @@ void ahci_save_initial_config(struct device *dev,
        }
 
        if (mask_port_map) {
-               dev_printk(KERN_ERR, dev, "masking port_map 0x%x -> 0x%x\n",
+               dev_printk(KERN_WARNING, dev, "masking port_map 0x%x -> 0x%x\n",
                           port_map,
                           port_map & mask_port_map);
                port_map &= mask_port_map;
index 736bee5..000d03a 100644 (file)
@@ -4143,9 +4143,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
         * Devices which choke on SETXFER.  Applies only if both the
         * device and controller are SATA.
         */
-       { "PIONEER DVD-RW  DVRTD08",    "1.00", ATA_HORKAGE_NOSETXFER },
-       { "PIONEER DVD-RW  DVR-212D",   "1.28", ATA_HORKAGE_NOSETXFER },
-       { "PIONEER DVD-RW  DVR-216D",   "1.08", ATA_HORKAGE_NOSETXFER },
+       { "PIONEER DVD-RW  DVRTD08",    NULL,   ATA_HORKAGE_NOSETXFER },
+       { "PIONEER DVD-RW  DVR-212D",   NULL,   ATA_HORKAGE_NOSETXFER },
+       { "PIONEER DVD-RW  DVR-216D",   NULL,   ATA_HORKAGE_NOSETXFER },
 
        /* End Marker */
        { }
index d51f979..927f968 100644 (file)
@@ -3797,6 +3797,12 @@ EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
  */
 int ata_sas_port_start(struct ata_port *ap)
 {
+       /*
+        * the port is marked as frozen at allocation time, but if we don't
+        * have new eh, we won't thaw it
+        */
+       if (!ap->ops->error_handler)
+               ap->pflags &= ~ATA_PFLAG_FROZEN;
        return 0;
 }
 EXPORT_SYMBOL_GPL(ata_sas_port_start);
index 75a6a0c..5d7f58a 100644 (file)
@@ -161,6 +161,9 @@ static const struct pci_device_id marvell_pci_tbl[] = {
        { PCI_DEVICE(0x11AB, 0x6121), },
        { PCI_DEVICE(0x11AB, 0x6123), },
        { PCI_DEVICE(0x11AB, 0x6145), },
+       { PCI_DEVICE(0x1B4B, 0x91A0), },
+       { PCI_DEVICE(0x1B4B, 0x91A4), },
+
        { }     /* terminate list */
 };
 
index 1c4b3aa..dc88a39 100644 (file)
@@ -389,7 +389,7 @@ static void sata_dwc_tf_dump(struct ata_taskfile *tf)
 /*
  * Function: get_burst_length_encode
  * arguments: datalength: length in bytes of data
- * returns value to be programmed in register corrresponding to data length
+ * returns value to be programmed in register corresponding to data length
  * This value is effectively the log(base 2) of the length
  */
 static  int get_burst_length_encode(int datalength)
index 1c291af..6040717 100644 (file)
@@ -367,7 +367,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister);
  *
  * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
  */
-struct platform_device *__init_or_module platform_device_register_resndata(
+struct platform_device *platform_device_register_resndata(
                struct device *parent,
                const char *name, int id,
                const struct resource *res, unsigned int num,
index eaa8a85..ad367c4 100644 (file)
@@ -387,7 +387,7 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
        clknb = container_of(nb, struct pm_clk_notifier_block, nb);
 
        switch (action) {
-       case BUS_NOTIFY_ADD_DEVICE:
+       case BUS_NOTIFY_BIND_DRIVER:
                if (clknb->con_ids[0]) {
                        for (con_id = clknb->con_ids; *con_id; con_id++)
                                enable_clock(dev, *con_id);
@@ -395,7 +395,7 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
                        enable_clock(dev, NULL);
                }
                break;
-       case BUS_NOTIFY_DEL_DEVICE:
+       case BUS_NOTIFY_UNBOUND_DRIVER:
                if (clknb->con_ids[0]) {
                        for (con_id = clknb->con_ids; *con_id; con_id++)
                                disable_clock(dev, *con_id);
index aa63202..06f09bf 100644 (file)
@@ -57,7 +57,8 @@ static int async_error;
  */
 void device_pm_init(struct device *dev)
 {
-       dev->power.in_suspend = false;
+       dev->power.is_prepared = false;
+       dev->power.is_suspended = false;
        init_completion(&dev->power.completion);
        complete_all(&dev->power.completion);
        dev->power.wakeup = NULL;
@@ -91,7 +92,7 @@ void device_pm_add(struct device *dev)
        pr_debug("PM: Adding info for %s:%s\n",
                 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
        mutex_lock(&dpm_list_mtx);
-       if (dev->parent && dev->parent->power.in_suspend)
+       if (dev->parent && dev->parent->power.is_prepared)
                dev_warn(dev, "parent %s should not be sleeping\n",
                        dev_name(dev->parent));
        list_add_tail(&dev->power.entry, &dpm_list);
@@ -511,7 +512,14 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
        dpm_wait(dev->parent, async);
        device_lock(dev);
 
-       dev->power.in_suspend = false;
+       /*
+        * This is a fib.  But we'll allow new children to be added below
+        * a resumed device, even if the device hasn't been completed yet.
+        */
+       dev->power.is_prepared = false;
+
+       if (!dev->power.is_suspended)
+               goto Unlock;
 
        if (dev->pwr_domain) {
                pm_dev_dbg(dev, state, "power domain ");
@@ -548,6 +556,9 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
        }
 
  End:
+       dev->power.is_suspended = false;
+
+ Unlock:
        device_unlock(dev);
        complete_all(&dev->power.completion);
 
@@ -670,7 +681,7 @@ void dpm_complete(pm_message_t state)
                struct device *dev = to_device(dpm_prepared_list.prev);
 
                get_device(dev);
-               dev->power.in_suspend = false;
+               dev->power.is_prepared = false;
                list_move(&dev->power.entry, &list);
                mutex_unlock(&dpm_list_mtx);
 
@@ -835,11 +846,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        device_lock(dev);
 
        if (async_error)
-               goto End;
+               goto Unlock;
 
        if (pm_wakeup_pending()) {
                async_error = -EBUSY;
-               goto End;
+               goto Unlock;
        }
 
        if (dev->pwr_domain) {
@@ -877,6 +888,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        }
 
  End:
+       dev->power.is_suspended = !error;
+
+ Unlock:
        device_unlock(dev);
        complete_all(&dev->power.completion);
 
@@ -1042,7 +1056,7 @@ int dpm_prepare(pm_message_t state)
                        put_device(dev);
                        break;
                }
-               dev->power.in_suspend = true;
+               dev->power.is_prepared = true;
                if (!list_empty(&dev->power.entry))
                        list_move_tail(&dev->power.entry, &dpm_prepared_list);
                put_device(dev);
index 219d88a..dde6a0f 100644 (file)
@@ -139,6 +139,7 @@ static int cn_call_callback(struct sk_buff *skb)
        spin_unlock_bh(&dev->cbdev->queue_lock);
 
        if (cbq != NULL) {
+               err = 0;
                cbq->callback(msg, nsp);
                kfree_skb(skb);
                cn_queue_release_callback(cbq);
index d0e65d6..676d957 100644 (file)
@@ -238,9 +238,9 @@ static int build_sh_desc_ipsec(struct caam_ctx *ctx)
 
        /* build shared descriptor for this session */
        sh_desc = kmalloc(CAAM_CMD_SZ * DESC_AEAD_SHARED_TEXT_LEN +
-                         keys_fit_inline ?
-                         ctx->split_key_pad_len + ctx->enckeylen :
-                         CAAM_PTR_SZ * 2, GFP_DMA | GFP_KERNEL);
+                         (keys_fit_inline ?
+                          ctx->split_key_pad_len + ctx->enckeylen :
+                          CAAM_PTR_SZ * 2), GFP_DMA | GFP_KERNEL);
        if (!sh_desc) {
                dev_err(jrdev, "could not allocate shared descriptor\n");
                return -ENOMEM;
index 87096b6..2f21b0b 100644 (file)
@@ -13,6 +13,7 @@ menu "Google Firmware Drivers"
 config GOOGLE_SMI
        tristate "SMI interface for Google platforms"
        depends on ACPI && DMI
+       select EFI
        select EFI_VARS
        help
          Say Y here if you want to enable SMI callbacks for Google
index 74e4ff5..4012fe4 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/module.h>
 #include <linux/mman.h>
 #include <linux/pagemap.h>
+#include <linux/shmem_fs.h>
 #include "drmP.h"
 
 /** @file drm_gem.c
index 4d46441..0a893f7 100644 (file)
@@ -1207,13 +1207,17 @@ static int i915_context_status(struct seq_file *m, void *unused)
        if (ret)
                return ret;
 
-       seq_printf(m, "power context ");
-       describe_obj(m, dev_priv->pwrctx);
-       seq_printf(m, "\n");
+       if (dev_priv->pwrctx) {
+               seq_printf(m, "power context ");
+               describe_obj(m, dev_priv->pwrctx);
+               seq_printf(m, "\n");
+       }
 
-       seq_printf(m, "render context ");
-       describe_obj(m, dev_priv->renderctx);
-       seq_printf(m, "\n");
+       if (dev_priv->renderctx) {
+               seq_printf(m, "render context ");
+               describe_obj(m, dev_priv->renderctx);
+               seq_printf(m, "\n");
+       }
 
        mutex_unlock(&dev->mode_config.mutex);
 
index 0239e99..e178702 100644 (file)
@@ -1266,30 +1266,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
 
        intel_modeset_gem_init(dev);
 
-       if (IS_IVYBRIDGE(dev)) {
-               /* Share pre & uninstall handlers with ILK/SNB */
-               dev->driver->irq_handler = ivybridge_irq_handler;
-               dev->driver->irq_preinstall = ironlake_irq_preinstall;
-               dev->driver->irq_postinstall = ivybridge_irq_postinstall;
-               dev->driver->irq_uninstall = ironlake_irq_uninstall;
-               dev->driver->enable_vblank = ivybridge_enable_vblank;
-               dev->driver->disable_vblank = ivybridge_disable_vblank;
-       } else if (HAS_PCH_SPLIT(dev)) {
-               dev->driver->irq_handler = ironlake_irq_handler;
-               dev->driver->irq_preinstall = ironlake_irq_preinstall;
-               dev->driver->irq_postinstall = ironlake_irq_postinstall;
-               dev->driver->irq_uninstall = ironlake_irq_uninstall;
-               dev->driver->enable_vblank = ironlake_enable_vblank;
-               dev->driver->disable_vblank = ironlake_disable_vblank;
-       } else {
-               dev->driver->irq_preinstall = i915_driver_irq_preinstall;
-               dev->driver->irq_postinstall = i915_driver_irq_postinstall;
-               dev->driver->irq_uninstall = i915_driver_irq_uninstall;
-               dev->driver->irq_handler = i915_driver_irq_handler;
-               dev->driver->enable_vblank = i915_enable_vblank;
-               dev->driver->disable_vblank = i915_disable_vblank;
-       }
-
        ret = drm_irq_install(dev);
        if (ret)
                goto cleanup_gem;
@@ -2017,12 +1993,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        /* enable GEM by default */
        dev_priv->has_gem = 1;
 
-       dev->driver->get_vblank_counter = i915_get_vblank_counter;
-       dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
-       if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
-               dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
-               dev->driver->get_vblank_counter = gm45_get_vblank_counter;
-       }
+       intel_irq_init(dev);
 
        /* Try to make sure MCHBAR is enabled before poking at it */
        intel_setup_mchbar(dev);
@@ -2182,9 +2153,8 @@ int i915_driver_unload(struct drm_device *dev)
                /* Flush any outstanding unpin_work. */
                flush_workqueue(dev_priv->wq);
 
-               i915_gem_free_all_phys_object(dev);
-
                mutex_lock(&dev->struct_mutex);
+               i915_gem_free_all_phys_object(dev);
                i915_gem_cleanup_ringbuffer(dev);
                mutex_unlock(&dev->struct_mutex);
                if (I915_HAS_FBC(dev) && i915_powersave)
index 0defd42..013d304 100644 (file)
@@ -579,6 +579,9 @@ int i915_reset(struct drm_device *dev, u8 flags)
        } else switch (INTEL_INFO(dev)->gen) {
        case 6:
                ret = gen6_do_reset(dev, flags);
+               /* If reset with a user forcewake, try to restore */
+               if (atomic_read(&dev_priv->forcewake_count))
+                       __gen6_gt_force_wake_get(dev_priv);
                break;
        case 5:
                ret = ironlake_do_reset(dev, flags);
@@ -762,14 +765,6 @@ static struct drm_driver driver = {
        .resume = i915_resume,
 
        .device_is_agp = i915_driver_device_is_agp,
-       .enable_vblank = i915_enable_vblank,
-       .disable_vblank = i915_disable_vblank,
-       .get_vblank_timestamp = i915_get_vblank_timestamp,
-       .get_scanout_position = i915_get_crtc_scanoutpos,
-       .irq_preinstall = i915_driver_irq_preinstall,
-       .irq_postinstall = i915_driver_irq_postinstall,
-       .irq_uninstall = i915_driver_irq_uninstall,
-       .irq_handler = i915_driver_irq_handler,
        .reclaim_buffers = drm_core_reclaim_buffers,
        .master_create = i915_master_create,
        .master_destroy = i915_master_destroy,
index f63ee16..f245c58 100644 (file)
@@ -211,6 +211,9 @@ struct drm_i915_display_funcs {
        void (*fdi_link_train)(struct drm_crtc *crtc);
        void (*init_clock_gating)(struct drm_device *dev);
        void (*init_pch_clock_gating)(struct drm_device *dev);
+       int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
+                         struct drm_framebuffer *fb,
+                         struct drm_i915_gem_object *obj);
        /* clock updates for mode set */
        /* cursor updates */
        /* render clock increase/decrease */
@@ -994,8 +997,6 @@ extern unsigned int i915_enable_fbc;
 
 extern int i915_suspend(struct drm_device *dev, pm_message_t state);
 extern int i915_resume(struct drm_device *dev);
-extern void i915_save_display(struct drm_device *dev);
-extern void i915_restore_display(struct drm_device *dev);
 extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
 extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
 
@@ -1030,33 +1031,12 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
 extern int i915_irq_wait(struct drm_device *dev, void *data,
                         struct drm_file *file_priv);
 
-extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
-extern void i915_driver_irq_preinstall(struct drm_device * dev);
-extern int i915_driver_irq_postinstall(struct drm_device *dev);
-extern void i915_driver_irq_uninstall(struct drm_device * dev);
-
-extern irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS);
-extern void ironlake_irq_preinstall(struct drm_device *dev);
-extern int ironlake_irq_postinstall(struct drm_device *dev);
-extern void ironlake_irq_uninstall(struct drm_device *dev);
-
-extern irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS);
-extern void ivybridge_irq_preinstall(struct drm_device *dev);
-extern int ivybridge_irq_postinstall(struct drm_device *dev);
-extern void ivybridge_irq_uninstall(struct drm_device *dev);
+extern void intel_irq_init(struct drm_device *dev);
 
 extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
 extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
-extern int i915_enable_vblank(struct drm_device *dev, int crtc);
-extern void i915_disable_vblank(struct drm_device *dev, int crtc);
-extern int ironlake_enable_vblank(struct drm_device *dev, int crtc);
-extern void ironlake_disable_vblank(struct drm_device *dev, int crtc);
-extern int ivybridge_enable_vblank(struct drm_device *dev, int crtc);
-extern void ivybridge_disable_vblank(struct drm_device *dev, int crtc);
-extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
-extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
 extern int i915_vblank_swap(struct drm_device *dev, void *data,
                            struct drm_file *file_priv);
 
@@ -1067,13 +1047,6 @@ void
 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
 
 void intel_enable_asle (struct drm_device *dev);
-int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
-                             int *max_error,
-                             struct timeval *vblank_time,
-                             unsigned flags);
-
-int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
-                            int *vpos, int *hpos);
 
 #ifdef CONFIG_DEBUG_FS
 extern void i915_destroy_error_state(struct drm_device *dev);
index 94c84d7..5c0d124 100644 (file)
@@ -31,6 +31,7 @@
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
+#include <linux/shmem_fs.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/pci.h>
@@ -359,8 +360,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
                if ((page_offset + remain) > PAGE_SIZE)
                        page_length = PAGE_SIZE - page_offset;
 
-               page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
-                                          GFP_HIGHUSER | __GFP_RECLAIMABLE);
+               page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
                if (IS_ERR(page))
                        return PTR_ERR(page);
 
@@ -463,8 +463,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
                if ((data_page_offset + page_length) > PAGE_SIZE)
                        page_length = PAGE_SIZE - data_page_offset;
 
-               page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
-                                          GFP_HIGHUSER | __GFP_RECLAIMABLE);
+               page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
                if (IS_ERR(page)) {
                        ret = PTR_ERR(page);
                        goto out;
@@ -797,8 +796,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
                if ((page_offset + remain) > PAGE_SIZE)
                        page_length = PAGE_SIZE - page_offset;
 
-               page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
-                                          GFP_HIGHUSER | __GFP_RECLAIMABLE);
+               page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
                if (IS_ERR(page))
                        return PTR_ERR(page);
 
@@ -907,8 +905,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
                if ((data_page_offset + page_length) > PAGE_SIZE)
                        page_length = PAGE_SIZE - data_page_offset;
 
-               page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
-                                          GFP_HIGHUSER | __GFP_RECLAIMABLE);
+               page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
                if (IS_ERR(page)) {
                        ret = PTR_ERR(page);
                        goto out;
@@ -1219,11 +1216,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                ret = i915_gem_object_bind_to_gtt(obj, 0, true);
                if (ret)
                        goto unlock;
-       }
 
-       ret = i915_gem_object_set_to_gtt_domain(obj, write);
-       if (ret)
-               goto unlock;
+               ret = i915_gem_object_set_to_gtt_domain(obj, write);
+               if (ret)
+                       goto unlock;
+       }
 
        if (obj->tiling_mode == I915_TILING_NONE)
                ret = i915_gem_object_put_fence(obj);
@@ -1558,12 +1555,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
 
        inode = obj->base.filp->f_path.dentry->d_inode;
        mapping = inode->i_mapping;
+       gfpmask |= mapping_gfp_mask(mapping);
+
        for (i = 0; i < page_count; i++) {
-               page = read_cache_page_gfp(mapping, i,
-                                          GFP_HIGHUSER |
-                                          __GFP_COLD |
-                                          __GFP_RECLAIMABLE |
-                                          gfpmask);
+               page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
                if (IS_ERR(page))
                        goto err_pages;
 
@@ -1701,13 +1696,10 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
        /* Our goal here is to return as much of the memory as
         * is possible back to the system as we are called from OOM.
         * To do this we must instruct the shmfs to drop all of its
-        * backing pages, *now*. Here we mirror the actions taken
-        * when by shmem_delete_inode() to release the backing store.
+        * backing pages, *now*.
         */
        inode = obj->base.filp->f_path.dentry->d_inode;
-       truncate_inode_pages(inode->i_mapping, 0);
-       if (inode->i_op->truncate_range)
-               inode->i_op->truncate_range(inode, 0, (loff_t)-1);
+       shmem_truncate_range(inode, 0, (loff_t)-1);
 
        obj->madv = __I915_MADV_PURGED;
 }
@@ -2080,8 +2072,8 @@ i915_wait_request(struct intel_ring_buffer *ring,
                if (!ier) {
                        DRM_ERROR("something (likely vbetool) disabled "
                                  "interrupts, re-enabling\n");
-                       i915_driver_irq_preinstall(ring->dev);
-                       i915_driver_irq_postinstall(ring->dev);
+                       ring->dev->driver->irq_preinstall(ring->dev);
+                       ring->dev->driver->irq_postinstall(ring->dev);
                }
 
                trace_i915_gem_request_wait_begin(ring, seqno);
@@ -2926,8 +2918,6 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
         */
        wmb();
 
-       i915_gem_release_mmap(obj);
-
        old_write_domain = obj->base.write_domain;
        obj->base.write_domain = 0;
 
@@ -3567,6 +3557,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
+       struct address_space *mapping;
 
        obj = kzalloc(sizeof(*obj), GFP_KERNEL);
        if (obj == NULL)
@@ -3577,6 +3568,9 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
                return NULL;
        }
 
+       mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+       mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
+
        i915_gem_info_add_obj(dev_priv, size);
 
        obj->base.write_domain = I915_GEM_DOMAIN_CPU;
@@ -3952,8 +3946,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
 
        page_count = obj->base.size / PAGE_SIZE;
        for (i = 0; i < page_count; i++) {
-               struct page *page = read_cache_page_gfp(mapping, i,
-                                                       GFP_HIGHUSER | __GFP_RECLAIMABLE);
+               struct page *page = shmem_read_mapping_page(mapping, i);
                if (!IS_ERR(page)) {
                        char *dst = kmap_atomic(page);
                        memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
@@ -4014,8 +4007,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
                struct page *page;
                char *dst, *src;
 
-               page = read_cache_page_gfp(mapping, i,
-                                          GFP_HIGHUSER | __GFP_RECLAIMABLE);
+               page = shmem_read_mapping_page(mapping, i);
                if (IS_ERR(page))
                        return PTR_ERR(page);
 
index 20a4cc5..4934cf8 100644 (file)
@@ -187,10 +187,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
        if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
                i915_gem_clflush_object(obj);
 
-       /* blow away mappings if mapped through GTT */
-       if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
-               i915_gem_release_mmap(obj);
-
        if (obj->base.pending_write_domain)
                cd->flips |= atomic_read(&obj->pending_flip);
 
index 9e34a1a..3b03f85 100644 (file)
@@ -152,7 +152,7 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
 /* Called from drm generic code, passed a 'crtc', which
  * we use as a pipe index
  */
-u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
+static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long high_frame;
@@ -184,7 +184,7 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
        return (high1 << 8) | low;
 }
 
-u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
+static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int reg = PIPE_FRMCOUNT_GM45(pipe);
@@ -198,7 +198,7 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
        return I915_READ(reg);
 }
 
-int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
                             int *vpos, int *hpos)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -264,7 +264,7 @@ int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
        return ret;
 }
 
-int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
+static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
                              int *max_error,
                              struct timeval *vblank_time,
                              unsigned flags)
@@ -462,7 +462,7 @@ static void pch_irq_handler(struct drm_device *dev)
                DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
 }
 
-irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -550,7 +550,7 @@ done:
        return ret;
 }
 
-irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1209,7 +1209,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
        }
 }
 
-irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1454,7 +1454,7 @@ int i915_irq_wait(struct drm_device *dev, void *data,
 /* Called from drm generic code, passed 'crtc' which
  * we use as a pipe index
  */
-int i915_enable_vblank(struct drm_device *dev, int pipe)
+static int i915_enable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
@@ -1478,7 +1478,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
        return 0;
 }
 
-int ironlake_enable_vblank(struct drm_device *dev, int pipe)
+static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
@@ -1494,7 +1494,7 @@ int ironlake_enable_vblank(struct drm_device *dev, int pipe)
        return 0;
 }
 
-int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
+static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
@@ -1513,7 +1513,7 @@ int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
 /* Called from drm generic code, passed 'crtc' which
  * we use as a pipe index
  */
-void i915_disable_vblank(struct drm_device *dev, int pipe)
+static void i915_disable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
@@ -1529,7 +1529,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
-void ironlake_disable_vblank(struct drm_device *dev, int pipe)
+static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
@@ -1540,7 +1540,7 @@ void ironlake_disable_vblank(struct drm_device *dev, int pipe)
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
-void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
+static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
@@ -1728,7 +1728,7 @@ repeat:
 
 /* drm_dma.h hooks
 */
-void ironlake_irq_preinstall(struct drm_device *dev)
+static void ironlake_irq_preinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
@@ -1740,7 +1740,7 @@ void ironlake_irq_preinstall(struct drm_device *dev)
                INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
 
        I915_WRITE(HWSTAM, 0xeffe);
-       if (IS_GEN6(dev)) {
+       if (IS_GEN6(dev) || IS_GEN7(dev)) {
                /* Workaround stalls observed on Sandy Bridge GPUs by
                 * making the blitter command streamer generate a
                 * write to the Hardware Status Page for
@@ -1749,6 +1749,7 @@ void ironlake_irq_preinstall(struct drm_device *dev)
                 * happens.
                 */
                I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT);
+               I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT);
        }
 
        /* XXX hotplug from PCH */
@@ -1768,7 +1769,7 @@ void ironlake_irq_preinstall(struct drm_device *dev)
        POSTING_READ(SDEIER);
 }
 
-int ironlake_irq_postinstall(struct drm_device *dev)
+static int ironlake_irq_postinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        /* enable kind of interrupts always enabled */
@@ -1840,7 +1841,7 @@ int ironlake_irq_postinstall(struct drm_device *dev)
        return 0;
 }
 
-int ivybridge_irq_postinstall(struct drm_device *dev)
+static int ivybridge_irq_postinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        /* enable kind of interrupts always enabled */
@@ -1890,7 +1891,7 @@ int ivybridge_irq_postinstall(struct drm_device *dev)
        return 0;
 }
 
-void i915_driver_irq_preinstall(struct drm_device * dev)
+static void i915_driver_irq_preinstall(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
@@ -1917,7 +1918,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
  * Must be called after intel_modeset_init or hotplug interrupts won't be
  * enabled correctly.
  */
-int i915_driver_irq_postinstall(struct drm_device *dev)
+static int i915_driver_irq_postinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
@@ -1993,7 +1994,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
        return 0;
 }
 
-void ironlake_irq_uninstall(struct drm_device *dev)
+static void ironlake_irq_uninstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
@@ -2013,7 +2014,7 @@ void ironlake_irq_uninstall(struct drm_device *dev)
        I915_WRITE(GTIIR, I915_READ(GTIIR));
 }
 
-void i915_driver_irq_uninstall(struct drm_device * dev)
+static void i915_driver_irq_uninstall(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
@@ -2039,3 +2040,41 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
                           I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
        I915_WRITE(IIR, I915_READ(IIR));
 }
+
+void intel_irq_init(struct drm_device *dev)
+{
+       dev->driver->get_vblank_counter = i915_get_vblank_counter;
+       dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+       if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
+               dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
+               dev->driver->get_vblank_counter = gm45_get_vblank_counter;
+       }
+
+
+       dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
+       dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+
+       if (IS_IVYBRIDGE(dev)) {
+               /* Share pre & uninstall handlers with ILK/SNB */
+               dev->driver->irq_handler = ivybridge_irq_handler;
+               dev->driver->irq_preinstall = ironlake_irq_preinstall;
+               dev->driver->irq_postinstall = ivybridge_irq_postinstall;
+               dev->driver->irq_uninstall = ironlake_irq_uninstall;
+               dev->driver->enable_vblank = ivybridge_enable_vblank;
+               dev->driver->disable_vblank = ivybridge_disable_vblank;
+       } else if (HAS_PCH_SPLIT(dev)) {
+               dev->driver->irq_handler = ironlake_irq_handler;
+               dev->driver->irq_preinstall = ironlake_irq_preinstall;
+               dev->driver->irq_postinstall = ironlake_irq_postinstall;
+               dev->driver->irq_uninstall = ironlake_irq_uninstall;
+               dev->driver->enable_vblank = ironlake_enable_vblank;
+               dev->driver->disable_vblank = ironlake_disable_vblank;
+       } else {
+               dev->driver->irq_preinstall = i915_driver_irq_preinstall;
+               dev->driver->irq_postinstall = i915_driver_irq_postinstall;
+               dev->driver->irq_uninstall = i915_driver_irq_uninstall;
+               dev->driver->irq_handler = i915_driver_irq_handler;
+               dev->driver->enable_vblank = i915_enable_vblank;
+               dev->driver->disable_vblank = i915_disable_vblank;
+       }
+}
index 2f967af..5d5def7 100644 (file)
 #define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE           0
 #define   GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR                   (1 << 3)
 
+#define GEN6_BSD_HWSTAM                        0x12098
 #define GEN6_BSD_IMR                   0x120a8
 #define   GEN6_BSD_USER_INTERRUPT      (1 << 12)
 
index 60a94d2..5257cfc 100644 (file)
@@ -597,7 +597,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
        return;
 }
 
-void i915_save_display(struct drm_device *dev)
+static void i915_save_display(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -689,7 +689,7 @@ void i915_save_display(struct drm_device *dev)
        i915_save_vga(dev);
 }
 
-void i915_restore_display(struct drm_device *dev)
+static void i915_restore_display(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -780,6 +780,7 @@ void i915_restore_display(struct drm_device *dev)
                I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
        else
                I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
+
        I915_WRITE(VGA0, dev_priv->saveVGA0);
        I915_WRITE(VGA1, dev_priv->saveVGA1);
        I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
@@ -796,6 +797,8 @@ int i915_save_state(struct drm_device *dev)
 
        pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
 
+       mutex_lock(&dev->struct_mutex);
+
        /* Hardware status page */
        dev_priv->saveHWS = I915_READ(HWS_PGA);
 
@@ -835,6 +838,8 @@ int i915_save_state(struct drm_device *dev)
        for (i = 0; i < 3; i++)
                dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
 
+       mutex_unlock(&dev->struct_mutex);
+
        return 0;
 }
 
@@ -845,6 +850,8 @@ int i915_restore_state(struct drm_device *dev)
 
        pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
 
+       mutex_lock(&dev->struct_mutex);
+
        /* Hardware status page */
        I915_WRITE(HWS_PGA, dev_priv->saveHWS);
 
@@ -862,6 +869,7 @@ int i915_restore_state(struct drm_device *dev)
                I915_WRITE(IER, dev_priv->saveIER);
                I915_WRITE(IMR, dev_priv->saveIMR);
        }
+       mutex_unlock(&dev->struct_mutex);
 
        intel_init_clock_gating(dev);
 
@@ -873,6 +881,8 @@ int i915_restore_state(struct drm_device *dev)
        if (IS_GEN6(dev))
                gen6_enable_rps(dev_priv);
 
+       mutex_lock(&dev->struct_mutex);
+
        /* Cache mode state */
        I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
 
@@ -886,6 +896,8 @@ int i915_restore_state(struct drm_device *dev)
        for (i = 0; i < 3; i++)
                I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
 
+       mutex_unlock(&dev->struct_mutex);
+
        intel_i2c_reset(dev);
 
        return 0;
index 81a9059..21b6f93 100644 (file)
@@ -4687,6 +4687,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
 
        I915_WRITE(DSPCNTR(plane), dspcntr);
        POSTING_READ(DSPCNTR(plane));
+       intel_enable_plane(dev_priv, plane, pipe);
 
        ret = intel_pipe_set_base(crtc, x, y, old_fb);
 
@@ -5217,8 +5218,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 
        I915_WRITE(DSPCNTR(plane), dspcntr);
        POSTING_READ(DSPCNTR(plane));
-       if (!HAS_PCH_SPLIT(dev))
-               intel_enable_plane(dev_priv, plane, pipe);
 
        ret = intel_pipe_set_base(crtc, x, y, old_fb);
 
@@ -6262,6 +6261,197 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
        spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
+static int intel_gen2_queue_flip(struct drm_device *dev,
+                                struct drm_crtc *crtc,
+                                struct drm_framebuffer *fb,
+                                struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       unsigned long offset;
+       u32 flip_mask;
+       int ret;
+
+       ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+       if (ret)
+               goto out;
+
+       /* Offset into the new buffer for cases of shared fbs between CRTCs */
+       offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+
+       ret = BEGIN_LP_RING(6);
+       if (ret)
+               goto out;
+
+       /* Can't queue multiple flips, so wait for the previous
+        * one to finish before executing the next.
+        */
+       if (intel_crtc->plane)
+               flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+       else
+               flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+       OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
+       OUT_RING(MI_NOOP);
+       OUT_RING(MI_DISPLAY_FLIP |
+                MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+       OUT_RING(fb->pitch);
+       OUT_RING(obj->gtt_offset + offset);
+       OUT_RING(MI_NOOP);
+       ADVANCE_LP_RING();
+out:
+       return ret;
+}
+
+static int intel_gen3_queue_flip(struct drm_device *dev,
+                                struct drm_crtc *crtc,
+                                struct drm_framebuffer *fb,
+                                struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       unsigned long offset;
+       u32 flip_mask;
+       int ret;
+
+       ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+       if (ret)
+               goto out;
+
+       /* Offset into the new buffer for cases of shared fbs between CRTCs */
+       offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+
+       ret = BEGIN_LP_RING(6);
+       if (ret)
+               goto out;
+
+       if (intel_crtc->plane)
+               flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+       else
+               flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+       OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
+       OUT_RING(MI_NOOP);
+       OUT_RING(MI_DISPLAY_FLIP_I915 |
+                MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+       OUT_RING(fb->pitch);
+       OUT_RING(obj->gtt_offset + offset);
+       OUT_RING(MI_NOOP);
+
+       ADVANCE_LP_RING();
+out:
+       return ret;
+}
+
+static int intel_gen4_queue_flip(struct drm_device *dev,
+                                struct drm_crtc *crtc,
+                                struct drm_framebuffer *fb,
+                                struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       uint32_t pf, pipesrc;
+       int ret;
+
+       ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+       if (ret)
+               goto out;
+
+       ret = BEGIN_LP_RING(4);
+       if (ret)
+               goto out;
+
+       /* i965+ uses the linear or tiled offsets from the
+        * Display Registers (which do not change across a page-flip)
+        * so we need only reprogram the base address.
+        */
+       OUT_RING(MI_DISPLAY_FLIP |
+                MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+       OUT_RING(fb->pitch);
+       OUT_RING(obj->gtt_offset | obj->tiling_mode);
+
+       /* XXX Enabling the panel-fitter across page-flip is so far
+        * untested on non-native modes, so ignore it for now.
+        * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
+        */
+       pf = 0;
+       pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+       OUT_RING(pf | pipesrc);
+       ADVANCE_LP_RING();
+out:
+       return ret;
+}
+
+static int intel_gen6_queue_flip(struct drm_device *dev,
+                                struct drm_crtc *crtc,
+                                struct drm_framebuffer *fb,
+                                struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       uint32_t pf, pipesrc;
+       int ret;
+
+       ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+       if (ret)
+               goto out;
+
+       ret = BEGIN_LP_RING(4);
+       if (ret)
+               goto out;
+
+       OUT_RING(MI_DISPLAY_FLIP |
+                MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+       OUT_RING(fb->pitch | obj->tiling_mode);
+       OUT_RING(obj->gtt_offset);
+
+       pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
+       pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+       OUT_RING(pf | pipesrc);
+       ADVANCE_LP_RING();
+out:
+       return ret;
+}
+
+/*
+ * On gen7 we currently use the blit ring because (in early silicon at least)
+ * the render ring doesn't give us interrpts for page flip completion, which
+ * means clients will hang after the first flip is queued.  Fortunately the
+ * blit ring generates interrupts properly, so use it instead.
+ */
+static int intel_gen7_queue_flip(struct drm_device *dev,
+                                struct drm_crtc *crtc,
+                                struct drm_framebuffer *fb,
+                                struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+       int ret;
+
+       ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+       if (ret)
+               goto out;
+
+       ret = intel_ring_begin(ring, 4);
+       if (ret)
+               goto out;
+
+       intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
+       intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
+       intel_ring_emit(ring, (obj->gtt_offset));
+       intel_ring_emit(ring, (MI_NOOP));
+       intel_ring_advance(ring);
+out:
+       return ret;
+}
+
+static int intel_default_queue_flip(struct drm_device *dev,
+                                   struct drm_crtc *crtc,
+                                   struct drm_framebuffer *fb,
+                                   struct drm_i915_gem_object *obj)
+{
+       return -ENODEV;
+}
+
 static int intel_crtc_page_flip(struct drm_crtc *crtc,
                                struct drm_framebuffer *fb,
                                struct drm_pending_vblank_event *event)
@@ -6272,9 +6462,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        struct drm_i915_gem_object *obj;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_unpin_work *work;
-       unsigned long flags, offset;
-       int pipe = intel_crtc->pipe;
-       u32 pf, pipesrc;
+       unsigned long flags;
        int ret;
 
        work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -6303,9 +6491,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        obj = intel_fb->obj;
 
        mutex_lock(&dev->struct_mutex);
-       ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
-       if (ret)
-               goto cleanup_work;
 
        /* Reference the objects for the scheduled work. */
        drm_gem_object_reference(&work->old_fb_obj->base);
@@ -6317,91 +6502,18 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        if (ret)
                goto cleanup_objs;
 
-       if (IS_GEN3(dev) || IS_GEN2(dev)) {
-               u32 flip_mask;
-
-               /* Can't queue multiple flips, so wait for the previous
-                * one to finish before executing the next.
-                */
-               ret = BEGIN_LP_RING(2);
-               if (ret)
-                       goto cleanup_objs;
-
-               if (intel_crtc->plane)
-                       flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
-               else
-                       flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-               OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
-               OUT_RING(MI_NOOP);
-               ADVANCE_LP_RING();
-       }
-
        work->pending_flip_obj = obj;
 
        work->enable_stall_check = true;
 
-       /* Offset into the new buffer for cases of shared fbs between CRTCs */
-       offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
-
-       ret = BEGIN_LP_RING(4);
-       if (ret)
-               goto cleanup_objs;
-
        /* Block clients from rendering to the new back buffer until
         * the flip occurs and the object is no longer visible.
         */
        atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
 
-       switch (INTEL_INFO(dev)->gen) {
-       case 2:
-               OUT_RING(MI_DISPLAY_FLIP |
-                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-               OUT_RING(fb->pitch);
-               OUT_RING(obj->gtt_offset + offset);
-               OUT_RING(MI_NOOP);
-               break;
-
-       case 3:
-               OUT_RING(MI_DISPLAY_FLIP_I915 |
-                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-               OUT_RING(fb->pitch);
-               OUT_RING(obj->gtt_offset + offset);
-               OUT_RING(MI_NOOP);
-               break;
-
-       case 4:
-       case 5:
-               /* i965+ uses the linear or tiled offsets from the
-                * Display Registers (which do not change across a page-flip)
-                * so we need only reprogram the base address.
-                */
-               OUT_RING(MI_DISPLAY_FLIP |
-                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-               OUT_RING(fb->pitch);
-               OUT_RING(obj->gtt_offset | obj->tiling_mode);
-
-               /* XXX Enabling the panel-fitter across page-flip is so far
-                * untested on non-native modes, so ignore it for now.
-                * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
-                */
-               pf = 0;
-               pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
-               OUT_RING(pf | pipesrc);
-               break;
-
-       case 6:
-       case 7:
-               OUT_RING(MI_DISPLAY_FLIP |
-                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-               OUT_RING(fb->pitch | obj->tiling_mode);
-               OUT_RING(obj->gtt_offset);
-
-               pf = I915_READ(PF_CTL(pipe)) & PF_ENABLE;
-               pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
-               OUT_RING(pf | pipesrc);
-               break;
-       }
-       ADVANCE_LP_RING();
+       ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
+       if (ret)
+               goto cleanup_pending;
 
        mutex_unlock(&dev->struct_mutex);
 
@@ -6409,10 +6521,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
        return 0;
 
+cleanup_pending:
+       atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
 cleanup_objs:
        drm_gem_object_unreference(&work->old_fb_obj->base);
        drm_gem_object_unreference(&obj->base);
-cleanup_work:
        mutex_unlock(&dev->struct_mutex);
 
        spin_lock_irqsave(&dev->event_lock, flags);
@@ -7657,6 +7770,31 @@ static void intel_init_display(struct drm_device *dev)
                else
                        dev_priv->display.get_fifo_size = i830_get_fifo_size;
        }
+
+       /* Default just returns -ENODEV to indicate unsupported */
+       dev_priv->display.queue_flip = intel_default_queue_flip;
+
+       switch (INTEL_INFO(dev)->gen) {
+       case 2:
+               dev_priv->display.queue_flip = intel_gen2_queue_flip;
+               break;
+
+       case 3:
+               dev_priv->display.queue_flip = intel_gen3_queue_flip;
+               break;
+
+       case 4:
+       case 5:
+               dev_priv->display.queue_flip = intel_gen4_queue_flip;
+               break;
+
+       case 6:
+               dev_priv->display.queue_flip = intel_gen6_queue_flip;
+               break;
+       case 7:
+               dev_priv->display.queue_flip = intel_gen7_queue_flip;
+               break;
+       }
 }
 
 /*
index a670c00..9e2959b 100644 (file)
@@ -1409,6 +1409,11 @@ void intel_setup_overlay(struct drm_device *dev)
        overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
        if (!overlay)
                return;
+
+       mutex_lock(&dev->struct_mutex);
+       if (WARN_ON(dev_priv->overlay))
+               goto out_free;
+
        overlay->dev = dev;
 
        reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
@@ -1448,7 +1453,7 @@ void intel_setup_overlay(struct drm_device *dev)
 
        regs = intel_overlay_map_regs(overlay);
        if (!regs)
-               goto out_free_bo;
+               goto out_unpin_bo;
 
        memset(regs, 0, sizeof(struct overlay_registers));
        update_polyphase_filter(regs);
@@ -1457,14 +1462,17 @@ void intel_setup_overlay(struct drm_device *dev)
        intel_overlay_unmap_regs(overlay, regs);
 
        dev_priv->overlay = overlay;
+       mutex_unlock(&dev->struct_mutex);
        DRM_INFO("initialized overlay support\n");
        return;
 
 out_unpin_bo:
-       i915_gem_object_unpin(reg_bo);
+       if (!OVERLAY_NEEDS_PHYSICAL(dev))
+               i915_gem_object_unpin(reg_bo);
 out_free_bo:
        drm_gem_object_unreference(&reg_bo->base);
 out_free:
+       mutex_unlock(&dev->struct_mutex);
        kfree(overlay);
        return;
 }
index 144f79a..731acea 100644 (file)
@@ -371,7 +371,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->vram.flags_valid        = nv50_vram_flags_valid;
                break;
        case 0xC0:
-       case 0xD0:
                engine->instmem.init            = nvc0_instmem_init;
                engine->instmem.takedown        = nvc0_instmem_takedown;
                engine->instmem.suspend         = nvc0_instmem_suspend;
@@ -923,7 +922,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
                dev_priv->card_type = NV_50;
                break;
        case 0xc0:
-       case 0xd0:
                dev_priv->card_type = NV_C0;
                break;
        default:
index 445af79..e8a5ffb 100644 (file)
@@ -2013,9 +2013,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.tile_config |= (3 << 0);
                break;
        }
-       /* num banks is 8 on all fusion asics */
+       /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
        if (rdev->flags & RADEON_IS_IGP)
-               rdev->config.evergreen.tile_config |= 8 << 4;
+               rdev->config.evergreen.tile_config |= 1 << 4;
        else
                rdev->config.evergreen.tile_config |=
                        ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
@@ -2248,7 +2248,10 @@ int evergreen_mc_init(struct radeon_device *rdev)
 
        /* Get VRAM informations */
        rdev->mc.vram_is_ddr = true;
-       tmp = RREG32(MC_ARB_RAMCFG);
+       if (rdev->flags & RADEON_IS_IGP)
+               tmp = RREG32(FUS_MC_ARB_RAMCFG);
+       else
+               tmp = RREG32(MC_ARB_RAMCFG);
        if (tmp & CHANSIZE_OVERRIDE) {
                chansize = 16;
        } else if (tmp & CHANSIZE_MASK) {
index 9736746..4672869 100644 (file)
 #define        CGTS_USER_TCC_DISABLE                           0x914C
 #define                TCC_DISABLE_MASK                                0xFFFF0000
 #define                TCC_DISABLE_SHIFT                               16
-#define        CGTS_SM_CTRL_REG                                0x915C
+#define        CGTS_SM_CTRL_REG                                0x9150
 #define                OVERRIDE                                (1 << 21)
 
 #define        TA_CNTL_AUX                                     0x9508
index 27f4557..ef0e0e0 100644 (file)
@@ -179,6 +179,7 @@ void radeon_pm_resume(struct radeon_device *rdev);
 void radeon_combios_get_power_modes(struct radeon_device *rdev);
 void radeon_atombios_get_power_modes(struct radeon_device *rdev);
 void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
+int radeon_atom_get_max_vddc(struct radeon_device *rdev, u16 *voltage);
 void rs690_pm_info(struct radeon_device *rdev);
 extern int rv6xx_get_temp(struct radeon_device *rdev);
 extern int rv770_get_temp(struct radeon_device *rdev);
index 1e725d9..bf2b615 100644 (file)
@@ -2320,6 +2320,14 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
                        le16_to_cpu(clock_info->r600.usVDDC);
        }
 
+       /* patch up vddc if necessary */
+       if (rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage == 0xff01) {
+               u16 vddc;
+
+               if (radeon_atom_get_max_vddc(rdev, &vddc) == 0)
+                       rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = vddc;
+       }
+
        if (rdev->flags & RADEON_IS_IGP) {
                /* skip invalid modes */
                if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
@@ -2630,7 +2638,35 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v
        atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 }
 
+int radeon_atom_get_max_vddc(struct radeon_device *rdev,
+                            u16 *voltage)
+{
+       union set_voltage args;
+       int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+       u8 frev, crev;
+
+       if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+               return -EINVAL;
+
+       switch (crev) {
+       case 1:
+               return -EINVAL;
+       case 2:
+               args.v2.ucVoltageType = SET_VOLTAGE_GET_MAX_VOLTAGE;
+               args.v2.ucVoltageMode = 0;
+               args.v2.usVoltageLevel = 0;
+
+               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+               *voltage = le16_to_cpu(args.v2.usVoltageLevel);
+               break;
+       default:
+               DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+               return -EINVAL;
+       }
 
+       return 0;
+}
 
 void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
 {
index 1aba85c..3fc5fa1 100644 (file)
@@ -104,7 +104,7 @@ static bool radeon_read_bios(struct radeon_device *rdev)
 static bool radeon_atrm_get_bios(struct radeon_device *rdev)
 {
        int ret;
-       int size = 64 * 1024;
+       int size = 256 * 1024;
        int i;
 
        if (!radeon_atrm_supported(rdev->pdev))
index 6f508ff..8bb347d 100644 (file)
@@ -575,6 +575,12 @@ static void rv770_program_channel_remap(struct radeon_device *rdev)
        else
                tcp_chan_steer = 0x00fac688;
 
+       /* RV770 CE has special chremap setup */
+       if (rdev->pdev->device == 0x944e) {
+               tcp_chan_steer = 0x00b08b08;
+               mc_shared_chremap = 0x00b08b08;
+       }
+
        WREG32(TCP_CHAN_STEER, tcp_chan_steer);
        WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
 }
index 90e23e0..58c271e 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/sched.h>
 #include <linux/highmem.h>
 #include <linux/pagemap.h>
+#include <linux/shmem_fs.h>
 #include <linux/file.h>
 #include <linux/swap.h>
 #include <linux/slab.h>
@@ -484,7 +485,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
        swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
 
        for (i = 0; i < ttm->num_pages; ++i) {
-               from_page = read_mapping_page(swap_space, i, NULL);
+               from_page = shmem_read_mapping_page(swap_space, i);
                if (IS_ERR(from_page)) {
                        ret = PTR_ERR(from_page);
                        goto out_err;
@@ -557,7 +558,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
                from_page = ttm->pages[i];
                if (unlikely(from_page == NULL))
                        continue;
-               to_page = read_mapping_page(swap_space, i, NULL);
+               to_page = shmem_read_mapping_page(swap_space, i);
                if (unlikely(IS_ERR(to_page))) {
                        ret = PTR_ERR(to_page);
                        goto out_err;
index f7440e8..6f3289a 100644 (file)
@@ -1423,6 +1423,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH_DUAL) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
index aecb5a4..a756ee6 100644 (file)
 
 #define USB_VENDOR_ID_LUMIO            0x202e
 #define USB_DEVICE_ID_CRYSTALTOUCH     0x0006
+#define USB_DEVICE_ID_CRYSTALTOUCH_DUAL        0x0007
 
 #define USB_VENDOR_ID_MCC              0x09db
 #define USB_DEVICE_ID_MCC_PMD1024LS    0x0076
index 0b2dcd0..62cac4d 100644 (file)
@@ -271,6 +271,8 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                        }
                        return 1;
                case HID_DG_CONTACTID:
+                       if (!td->maxcontacts)
+                               td->maxcontacts = MT_DEFAULT_MAXCONTACT;
                        input_mt_init_slots(hi->input, td->maxcontacts);
                        td->last_slot_field = usage->hid;
                        td->last_field_index = field->index;
@@ -547,9 +549,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
        if (ret)
                goto fail;
 
-       if (!td->maxcontacts)
-               td->maxcontacts = MT_DEFAULT_MAXCONTACT;
-
        td->slots = kzalloc(td->maxcontacts * sizeof(struct mt_slot),
                                GFP_KERNEL);
        if (!td->slots) {
@@ -677,6 +676,9 @@ static const struct hid_device_id mt_devices[] = {
        { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
                HID_USB_DEVICE(USB_VENDOR_ID_LUMIO,
                        USB_DEVICE_ID_CRYSTALTOUCH) },
+       { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
+               HID_USB_DEVICE(USB_VENDOR_ID_LUMIO,
+                       USB_DEVICE_ID_CRYSTALTOUCH_DUAL) },
 
        /* MosArt panels */
        { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
@@ -707,10 +709,10 @@ static const struct hid_device_id mt_devices[] = {
                HID_USB_DEVICE(USB_VENDOR_ID_STANTUM,
                        USB_DEVICE_ID_MTP)},
        { .driver_data = MT_CLS_CONFIDENCE,
-               HID_USB_DEVICE(USB_VENDOR_ID_STANTUM,
+               HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM,
                        USB_DEVICE_ID_MTP_STM)},
        { .driver_data = MT_CLS_CONFIDENCE,
-               HID_USB_DEVICE(USB_VENDOR_ID_STANTUM,
+               HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_SITRONIX,
                        USB_DEVICE_ID_MTP_SITRONIX)},
 
        /* Touch International panels */
index 16db83c..5f888f7 100644 (file)
@@ -333,7 +333,7 @@ config SENSORS_F71882FG
            F71858FG
            F71862FG
            F71863FG
-           F71869F/E
+           F71869F/E/A
            F71882FG
            F71883FG
            F71889FG/ED/A
index c2ee204..b9b7caf 100644 (file)
@@ -32,6 +32,7 @@ static int adm1275_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {
        int config;
+       int ret;
        struct pmbus_driver_info *info;
 
        if (!i2c_check_functionality(client->adapter,
@@ -43,8 +44,10 @@ static int adm1275_probe(struct i2c_client *client,
                return -ENOMEM;
 
        config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG);
-       if (config < 0)
-               return config;
+       if (config < 0) {
+               ret = config;
+               goto err_mem;
+       }
 
        info->pages = 1;
        info->direct[PSC_VOLTAGE_IN] = true;
@@ -76,7 +79,14 @@ static int adm1275_probe(struct i2c_client *client,
        else
                info->func[0] |= PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT;
 
-       return pmbus_do_probe(client, id, info);
+       ret = pmbus_do_probe(client, id, info);
+       if (ret)
+               goto err_mem;
+       return 0;
+
+err_mem:
+       kfree(info);
+       return ret;
 }
 
 static int adm1275_remove(struct i2c_client *client)
index e0ef323..0064432 100644 (file)
@@ -78,8 +78,9 @@ static u16 emc6w201_read16(struct i2c_client *client, u8 reg)
 
        lsb = i2c_smbus_read_byte_data(client, reg);
        msb = i2c_smbus_read_byte_data(client, reg + 1);
-       if (lsb < 0 || msb < 0) {
-               dev_err(&client->dev, "16-bit read failed at 0x%02x\n", reg);
+       if (unlikely(lsb < 0 || msb < 0)) {
+               dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
+                       16, "read", reg);
                return 0xFFFF;  /* Arbitrary value */
        }
 
@@ -95,10 +96,39 @@ static int emc6w201_write16(struct i2c_client *client, u8 reg, u16 val)
        int err;
 
        err = i2c_smbus_write_byte_data(client, reg, val & 0xff);
-       if (!err)
+       if (likely(!err))
                err = i2c_smbus_write_byte_data(client, reg + 1, val >> 8);
-       if (err < 0)
-               dev_err(&client->dev, "16-bit write failed at 0x%02x\n", reg);
+       if (unlikely(err < 0))
+               dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
+                       16, "write", reg);
+
+       return err;
+}
+
+/* Read 8-bit value from register */
+static u8 emc6w201_read8(struct i2c_client *client, u8 reg)
+{
+       int val;
+
+       val = i2c_smbus_read_byte_data(client, reg);
+       if (unlikely(val < 0)) {
+               dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
+                       8, "read", reg);
+               return 0x00;    /* Arbitrary value */
+       }
+
+       return val;
+}
+
+/* Write 8-bit value to register */
+static int emc6w201_write8(struct i2c_client *client, u8 reg, u8 val)
+{
+       int err;
+
+       err = i2c_smbus_write_byte_data(client, reg, val);
+       if (unlikely(err < 0))
+               dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
+                       8, "write", reg);
 
        return err;
 }
@@ -114,25 +144,25 @@ static struct emc6w201_data *emc6w201_update_device(struct device *dev)
        if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
                for (nr = 0; nr < 6; nr++) {
                        data->in[input][nr] =
-                               i2c_smbus_read_byte_data(client,
+                               emc6w201_read8(client,
                                                EMC6W201_REG_IN(nr));
                        data->in[min][nr] =
-                               i2c_smbus_read_byte_data(client,
+                               emc6w201_read8(client,
                                                EMC6W201_REG_IN_LOW(nr));
                        data->in[max][nr] =
-                               i2c_smbus_read_byte_data(client,
+                               emc6w201_read8(client,
                                                EMC6W201_REG_IN_HIGH(nr));
                }
 
                for (nr = 0; nr < 6; nr++) {
                        data->temp[input][nr] =
-                               i2c_smbus_read_byte_data(client,
+                               emc6w201_read8(client,
                                                EMC6W201_REG_TEMP(nr));
                        data->temp[min][nr] =
-                               i2c_smbus_read_byte_data(client,
+                               emc6w201_read8(client,
                                                EMC6W201_REG_TEMP_LOW(nr));
                        data->temp[max][nr] =
-                               i2c_smbus_read_byte_data(client,
+                               emc6w201_read8(client,
                                                EMC6W201_REG_TEMP_HIGH(nr));
                }
 
@@ -192,7 +222,7 @@ static ssize_t set_in(struct device *dev, struct device_attribute *devattr,
 
        mutex_lock(&data->update_lock);
        data->in[sf][nr] = SENSORS_LIMIT(val, 0, 255);
-       err = i2c_smbus_write_byte_data(client, reg, data->in[sf][nr]);
+       err = emc6w201_write8(client, reg, data->in[sf][nr]);
        mutex_unlock(&data->update_lock);
 
        return err < 0 ? err : count;
@@ -229,7 +259,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
 
        mutex_lock(&data->update_lock);
        data->temp[sf][nr] = SENSORS_LIMIT(val, -127, 128);
-       err = i2c_smbus_write_byte_data(client, reg, data->temp[sf][nr]);
+       err = emc6w201_write8(client, reg, data->temp[sf][nr]);
        mutex_unlock(&data->update_lock);
 
        return err < 0 ? err : count;
@@ -444,7 +474,7 @@ static int emc6w201_detect(struct i2c_client *client,
 
        /* Check configuration */
        config = i2c_smbus_read_byte_data(client, EMC6W201_REG_CONFIG);
-       if ((config & 0xF4) != 0x04)
+       if (config < 0 || (config & 0xF4) != 0x04)
                return -ENODEV;
        if (!(config & 0x01)) {
                dev_err(&client->dev, "Monitoring not enabled\n");
index a4a94a0..2d96ed2 100644 (file)
@@ -52,6 +52,7 @@
 #define SIO_F71858_ID          0x0507  /* Chipset ID */
 #define SIO_F71862_ID          0x0601  /* Chipset ID */
 #define SIO_F71869_ID          0x0814  /* Chipset ID */
+#define SIO_F71869A_ID         0x1007  /* Chipset ID */
 #define SIO_F71882_ID          0x0541  /* Chipset ID */
 #define SIO_F71889_ID          0x0723  /* Chipset ID */
 #define SIO_F71889E_ID         0x0909  /* Chipset ID */
@@ -108,8 +109,8 @@ static unsigned short force_id;
 module_param(force_id, ushort, 0);
 MODULE_PARM_DESC(force_id, "Override the detected device ID");
 
-enum chips { f71808e, f71808a, f71858fg, f71862fg, f71869, f71882fg, f71889fg,
-            f71889ed, f71889a, f8000, f81865f };
+enum chips { f71808e, f71808a, f71858fg, f71862fg, f71869, f71869a, f71882fg,
+            f71889fg, f71889ed, f71889a, f8000, f81865f };
 
 static const char *f71882fg_names[] = {
        "f71808e",
@@ -117,6 +118,7 @@ static const char *f71882fg_names[] = {
        "f71858fg",
        "f71862fg",
        "f71869", /* Both f71869f and f71869e, reg. compatible and same id */
+       "f71869a",
        "f71882fg",
        "f71889fg", /* f81801u too, same id */
        "f71889ed",
@@ -131,6 +133,7 @@ static const char f71882fg_has_in[][F71882FG_MAX_INS] = {
        [f71858fg]      = { 1, 1, 1, 0, 0, 0, 0, 0, 0 },
        [f71862fg]      = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
        [f71869]        = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
+       [f71869a]       = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
        [f71882fg]      = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
        [f71889fg]      = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
        [f71889ed]      = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
@@ -145,6 +148,7 @@ static const char f71882fg_has_in1_alarm[] = {
        [f71858fg]      = 0,
        [f71862fg]      = 0,
        [f71869]        = 0,
+       [f71869a]       = 0,
        [f71882fg]      = 1,
        [f71889fg]      = 1,
        [f71889ed]      = 1,
@@ -159,6 +163,7 @@ static const char f71882fg_fan_has_beep[] = {
        [f71858fg]      = 0,
        [f71862fg]      = 1,
        [f71869]        = 1,
+       [f71869a]       = 1,
        [f71882fg]      = 1,
        [f71889fg]      = 1,
        [f71889ed]      = 1,
@@ -173,6 +178,7 @@ static const char f71882fg_nr_fans[] = {
        [f71858fg]      = 3,
        [f71862fg]      = 3,
        [f71869]        = 3,
+       [f71869a]       = 3,
        [f71882fg]      = 4,
        [f71889fg]      = 3,
        [f71889ed]      = 3,
@@ -187,6 +193,7 @@ static const char f71882fg_temp_has_beep[] = {
        [f71858fg]      = 0,
        [f71862fg]      = 1,
        [f71869]        = 1,
+       [f71869a]       = 1,
        [f71882fg]      = 1,
        [f71889fg]      = 1,
        [f71889ed]      = 1,
@@ -201,6 +208,7 @@ static const char f71882fg_nr_temps[] = {
        [f71858fg]      = 3,
        [f71862fg]      = 3,
        [f71869]        = 3,
+       [f71869a]       = 3,
        [f71882fg]      = 3,
        [f71889fg]      = 3,
        [f71889ed]      = 3,
@@ -2243,6 +2251,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
                case f71808e:
                case f71808a:
                case f71869:
+               case f71869a:
                        /* These always have signed auto point temps */
                        data->auto_point_temp_signed = 1;
                        /* Fall through to select correct fan/pwm reg bank! */
@@ -2305,6 +2314,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
                case f71808e:
                case f71808a:
                case f71869:
+               case f71869a:
                case f71889fg:
                case f71889ed:
                case f71889a:
@@ -2528,6 +2538,9 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
        case SIO_F71869_ID:
                sio_data->type = f71869;
                break;
+       case SIO_F71869A_ID:
+               sio_data->type = f71869a;
+               break;
        case SIO_F71882_ID:
                sio_data->type = f71882fg;
                break;
@@ -2662,7 +2675,7 @@ static void __exit f71882fg_exit(void)
 }
 
 MODULE_DESCRIPTION("F71882FG Hardware Monitoring Driver");
-MODULE_AUTHOR("Hans Edgington, Hans de Goede (hdegoede@redhat.com)");
+MODULE_AUTHOR("Hans Edgington, Hans de Goede <hdegoede@redhat.com>");
 MODULE_LICENSE("GPL");
 
 module_init(f71882fg_init);
index 2582bfe..c8195a0 100644 (file)
@@ -202,7 +202,7 @@ static struct vrm_model vrm_models[] = {
 
        {X86_VENDOR_CENTAUR, 0x6, 0x7, ANY, 85},        /* Eden ESP/Ezra */
        {X86_VENDOR_CENTAUR, 0x6, 0x8, 0x7, 85},        /* Ezra T */
-       {X86_VENDOR_CENTAUR, 0x6, 0x9, 0x7, 85},        /* Nemiah */
+       {X86_VENDOR_CENTAUR, 0x6, 0x9, 0x7, 85},        /* Nehemiah */
        {X86_VENDOR_CENTAUR, 0x6, 0x9, ANY, 17},        /* C3-M, Eden-N */
        {X86_VENDOR_CENTAUR, 0x6, 0xA, 0x7, 0},         /* No information */
        {X86_VENDOR_CENTAUR, 0x6, 0xA, ANY, 13},        /* C7, Esther */
index 98e2e28..931d940 100644 (file)
@@ -47,12 +47,14 @@ static void pmbus_find_sensor_groups(struct i2c_client *client,
        if (info->func[0]
            && pmbus_check_byte_register(client, 0, PMBUS_STATUS_INPUT))
                info->func[0] |= PMBUS_HAVE_STATUS_INPUT;
-       if (pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_1)) {
+       if (pmbus_check_byte_register(client, 0, PMBUS_FAN_CONFIG_12) &&
+           pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_1)) {
                info->func[0] |= PMBUS_HAVE_FAN12;
                if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_12))
                        info->func[0] |= PMBUS_HAVE_STATUS_FAN12;
        }
-       if (pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_3)) {
+       if (pmbus_check_byte_register(client, 0, PMBUS_FAN_CONFIG_34) &&
+           pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_3)) {
                info->func[0] |= PMBUS_HAVE_FAN34;
                if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_34))
                        info->func[0] |= PMBUS_HAVE_STATUS_FAN34;
@@ -63,6 +65,10 @@ static void pmbus_find_sensor_groups(struct i2c_client *client,
                                              PMBUS_STATUS_TEMPERATURE))
                        info->func[0] |= PMBUS_HAVE_STATUS_TEMP;
        }
+       if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_2))
+               info->func[0] |= PMBUS_HAVE_TEMP2;
+       if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_3))
+               info->func[0] |= PMBUS_HAVE_TEMP3;
 
        /* Sensors detected on all pages */
        for (page = 0; page < info->pages; page++) {
index 354770e..744672c 100644 (file)
@@ -1430,14 +1430,9 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
        i2c_set_clientdata(client, data);
        mutex_init(&data->update_lock);
 
-       /*
-        * Bail out if status register or PMBus revision register
-        * does not exist.
-        */
-       if (i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE) < 0
-           || i2c_smbus_read_byte_data(client, PMBUS_REVISION) < 0) {
-               dev_err(&client->dev,
-                       "Status or revision register not found\n");
+       /* Bail out if PMBus status register does not exist. */
+       if (i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE) < 0) {
+               dev_err(&client->dev, "PMBus status register not found\n");
                ret = -ENODEV;
                goto out_data;
        }
index 020c872..3494a4c 100644 (file)
@@ -887,7 +887,7 @@ static void __exit sch5627_exit(void)
 }
 
 MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver");
-MODULE_AUTHOR("Hans de Goede (hdegoede@redhat.com)");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
 MODULE_LICENSE("GPL");
 
 module_init(sch5627_init);
index dd39c1e..26c352a 100644 (file)
@@ -234,7 +234,7 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
 
        if (taos->state != TAOS_STATE_IDLE) {
                err = -ENODEV;
-               dev_dbg(&serio->dev, "TAOS EVM reset failed (state=%d, "
+               dev_err(&serio->dev, "TAOS EVM reset failed (state=%d, "
                        "pos=%d)\n", taos->state, taos->pos);
                goto exit_close;
        }
@@ -255,7 +255,7 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
                                         msecs_to_jiffies(250));
        if (taos->state != TAOS_STATE_IDLE) {
                err = -ENODEV;
-               dev_err(&adapter->dev, "Echo off failed "
+               dev_err(&serio->dev, "TAOS EVM echo off failed "
                        "(state=%d)\n", taos->state);
                goto exit_close;
        }
@@ -263,7 +263,7 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
        err = i2c_add_adapter(adapter);
        if (err)
                goto exit_close;
-       dev_dbg(&serio->dev, "Connected to TAOS EVM\n");
+       dev_info(&serio->dev, "Connected to TAOS EVM\n");
 
        taos->client = taos_instantiate_device(adapter);
        return 0;
@@ -288,7 +288,7 @@ static void taos_disconnect(struct serio *serio)
        serio_set_drvdata(serio, NULL);
        kfree(taos);
 
-       dev_dbg(&serio->dev, "Disconnected from TAOS EVM\n");
+       dev_info(&serio->dev, "Disconnected from TAOS EVM\n");
 }
 
 static struct serio_device_id taos_serio_ids[] = {
index 54e1ce7..6f89536 100644 (file)
@@ -201,10 +201,11 @@ static int pca954x_probe(struct i2c_client *client,
 
        i2c_set_clientdata(client, data);
 
-       /* Read the mux register at addr to verify
-        * that the mux is in fact present.
+       /* Write the mux register at addr to verify
+        * that the mux is in fact present. This also
+        * initializes the mux to disconnected state.
         */
-       if (i2c_smbus_read_byte(client) < 0) {
+       if (i2c_smbus_write_byte(client, 0) < 0) {
                dev_warn(&client->dev, "probe failed\n");
                goto exit_free;
        }
index f62f52f..fc0f2bd 100644 (file)
@@ -3641,7 +3641,8 @@ static struct kobj_type cm_port_obj_type = {
 
 static char *cm_devnode(struct device *dev, mode_t *mode)
 {
-       *mode = 0666;
+       if (mode)
+               *mode = 0666;
        return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
 }
 
index e49a85f..56898b6 100644 (file)
@@ -826,7 +826,8 @@ static void ib_uverbs_remove_one(struct ib_device *device)
 
 static char *uverbs_devnode(struct device *dev, mode_t *mode)
 {
-       *mode = 0666;
+       if (mode)
+               *mode = 0666;
        return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
 }
 
index f660cd0..31fb440 100644 (file)
@@ -1463,9 +1463,9 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
        struct c4iw_qp_attributes attrs;
        int disconnect = 1;
        int release = 0;
-       int abort = 0;
        struct tid_info *t = dev->rdev.lldi.tids;
        unsigned int tid = GET_TID(hdr);
+       int ret;
 
        ep = lookup_tid(t, tid);
        PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
@@ -1501,10 +1501,12 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
                start_ep_timer(ep);
                __state_set(&ep->com, CLOSING);
                attrs.next_state = C4IW_QP_STATE_CLOSING;
-               abort = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+               ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
                                       C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
-               peer_close_upcall(ep);
-               disconnect = 1;
+               if (ret != -ECONNRESET) {
+                       peer_close_upcall(ep);
+                       disconnect = 1;
+               }
                break;
        case ABORTING:
                disconnect = 0;
@@ -2109,15 +2111,16 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
                break;
        }
 
-       mutex_unlock(&ep->com.mutex);
        if (close) {
-               if (abrupt)
-                       ret = abort_connection(ep, NULL, gfp);
-               else
+               if (abrupt) {
+                       close_complete_upcall(ep);
+                       ret = send_abort(ep, NULL, gfp);
+               } else
                        ret = send_halfclose(ep, gfp);
                if (ret)
                        fatal = 1;
        }
+       mutex_unlock(&ep->com.mutex);
        if (fatal)
                release_ep_resources(ep);
        return ret;
@@ -2301,6 +2304,31 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
        return 0;
 }
 
+static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+       struct cpl_abort_req_rss *req = cplhdr(skb);
+       struct c4iw_ep *ep;
+       struct tid_info *t = dev->rdev.lldi.tids;
+       unsigned int tid = GET_TID(req);
+
+       ep = lookup_tid(t, tid);
+       if (is_neg_adv_abort(req->status)) {
+               PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
+                    ep->hwtid);
+               kfree_skb(skb);
+               return 0;
+       }
+       PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
+            ep->com.state);
+
+       /*
+        * Wake up any threads in rdma_init() or rdma_fini().
+        */
+       c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+       sched(dev, skb);
+       return 0;
+}
+
 /*
  * Most upcalls from the T4 Core go to sched() to
  * schedule the processing on a work queue.
@@ -2317,7 +2345,7 @@ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
        [CPL_PASS_ESTABLISH] = sched,
        [CPL_PEER_CLOSE] = sched,
        [CPL_CLOSE_CON_RPL] = sched,
-       [CPL_ABORT_REQ_RSS] = sched,
+       [CPL_ABORT_REQ_RSS] = peer_abort_intr,
        [CPL_RDMA_TERMINATE] = sched,
        [CPL_FW4_ACK] = sched,
        [CPL_SET_TCB_RPL] = set_tcb_rpl,
index 8d8f8ad..1720dc7 100644 (file)
@@ -801,6 +801,10 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
        if (ucontext) {
                memsize = roundup(memsize, PAGE_SIZE);
                hwentries = memsize / sizeof *chp->cq.queue;
+               while (hwentries > T4_MAX_IQ_SIZE) {
+                       memsize -= PAGE_SIZE;
+                       hwentries = memsize / sizeof *chp->cq.queue;
+               }
        }
        chp->cq.size = hwentries;
        chp->cq.memsize = memsize;
index 273ffe4..0347eed 100644 (file)
@@ -625,7 +625,7 @@ pbl_done:
        mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
        mhp->attr.va_fbo = virt;
        mhp->attr.page_size = shift - 12;
-       mhp->attr.len = (u32) length;
+       mhp->attr.len = length;
 
        err = register_mem(rhp, php, mhp, shift);
        if (err)
index 3b773b0..a41578e 100644 (file)
@@ -1207,11 +1207,8 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                                c4iw_get_ep(&qhp->ep->com);
                        }
                        ret = rdma_fini(rhp, qhp, ep);
-                       if (ret) {
-                               if (internal)
-                                       c4iw_get_ep(&qhp->ep->com);
+                       if (ret)
                                goto err;
-                       }
                        break;
                case C4IW_QP_STATE_TERMINATE:
                        set_state(qhp, C4IW_QP_STATE_TERMINATE);
index 9f53e68..8ec5237 100644 (file)
@@ -469,6 +469,8 @@ static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
 #define IB_7322_LT_STATE_RECOVERIDLE     0x0f
 #define IB_7322_LT_STATE_CFGENH          0x10
 #define IB_7322_LT_STATE_CFGTEST         0x11
+#define IB_7322_LT_STATE_CFGWAITRMTTEST  0x12
+#define IB_7322_LT_STATE_CFGWAITENH      0x13
 
 /* link state machine states from IBC */
 #define IB_7322_L_STATE_DOWN             0x0
@@ -498,8 +500,10 @@ static const u8 qib_7322_physportstate[0x20] = {
                IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
        [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
        [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
-       [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
-       [0x13] = IB_PHYSPORTSTATE_CFG_WAIT_ENH,
+       [IB_7322_LT_STATE_CFGWAITRMTTEST] =
+               IB_PHYSPORTSTATE_CFG_TRAIN,
+       [IB_7322_LT_STATE_CFGWAITENH] =
+               IB_PHYSPORTSTATE_CFG_WAIT_ENH,
        [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
        [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
        [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
@@ -1692,7 +1696,9 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
                break;
        }
 
-       if (ibclt == IB_7322_LT_STATE_CFGTEST &&
+       if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
+             ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
+            ibclt == IB_7322_LT_STATE_LINKUP) &&
            (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
                force_h1(ppd);
                ppd->cpspec->qdr_reforce = 1;
@@ -7301,12 +7307,17 @@ static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
 static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
 {
        u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
-       printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS %s\n",
-               ppd->dd->unit, ppd->port, (enable ? "on" : "off"));
-       if (enable)
+       u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
+
+       if (enable && !state) {
+               printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS on\n",
+                       ppd->dd->unit, ppd->port);
                data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
-       else
+       } else if (!enable && state) {
+               printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS off\n",
+                       ppd->dd->unit, ppd->port);
                data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
+       }
        qib_write_kreg_port(ppd, krp_serdesctrl, data);
 }
 
index a693c56..6ae57d2 100644 (file)
@@ -96,8 +96,12 @@ void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
         * states, or if it transitions from any of the up (INIT or better)
         * states into any of the down states (except link recovery), then
         * call the chip-specific code to take appropriate actions.
+        *
+        * ppd->lflags could be 0 if this is the first time the interrupt
+        * handlers has been called but the link is already up.
         */
-       if (lstate >= IB_PORT_INIT && (ppd->lflags & QIBL_LINKDOWN) &&
+       if (lstate >= IB_PORT_INIT &&
+           (!ppd->lflags || (ppd->lflags & QIBL_LINKDOWN)) &&
            ltstate == IB_PHYSPORTSTATE_LINKUP) {
                /* transitioned to UP */
                if (dd->f_ib_updown(ppd, 1, ibcs))
index c0cff64..cc1dc48 100644 (file)
@@ -593,7 +593,7 @@ static void lp5521_unregister_sysfs(struct i2c_client *client)
                                &lp5521_led_attribute_group);
 }
 
-static int __init lp5521_init_led(struct lp5521_led *led,
+static int __devinit lp5521_init_led(struct lp5521_led *led,
                                struct i2c_client *client,
                                int chan, struct lp5521_platform_data *pdata)
 {
@@ -637,7 +637,7 @@ static int __init lp5521_init_led(struct lp5521_led *led,
        return 0;
 }
 
-static int lp5521_probe(struct i2c_client *client,
+static int __devinit lp5521_probe(struct i2c_client *client,
                        const struct i2c_device_id *id)
 {
        struct lp5521_chip              *chip;
index e19fed2..5971e30 100644 (file)
@@ -826,7 +826,7 @@ static int __init lp5523_init_engine(struct lp5523_engine *engine, int id)
        return 0;
 }
 
-static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev,
+static int __devinit lp5523_init_led(struct lp5523_led *led, struct device *dev,
                           int chan, struct lp5523_platform_data *pdata)
 {
        char name[32];
@@ -872,7 +872,7 @@ static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev,
 
 static struct i2c_driver lp5523_driver;
 
-static int lp5523_probe(struct i2c_client *client,
+static int __devinit lp5523_probe(struct i2c_client *client,
                        const struct i2c_device_id *id)
 {
        struct lp5523_chip              *chip;
index 4332fc2..91e31e2 100644 (file)
@@ -7088,6 +7088,7 @@ static int remove_and_add_spares(mddev_t *mddev)
                list_for_each_entry(rdev, &mddev->disks, same_set) {
                        if (rdev->raid_disk >= 0 &&
                            !test_bit(In_sync, &rdev->flags) &&
+                           !test_bit(Faulty, &rdev->flags) &&
                            !test_bit(Blocked, &rdev->flags))
                                spares++;
                        if (rdev->raid_disk < 0
index d019746..2a40d0e 100644 (file)
@@ -47,7 +47,7 @@ static uint32_t sg_dwiter_read_buffer(struct sg_mapping_iter *miter)
 
 static inline bool needs_unaligned_copy(const void *ptr)
 {
-#ifdef HAVE_EFFICIENT_UNALIGNED_ACCESS
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
        return false;
 #else
        return ((ptr - NULL) & 3) != 0;
index 668d41e..df03dd3 100644 (file)
@@ -270,7 +270,7 @@ ioc4_variant(struct ioc4_driver_data *idd)
        return IOC4_VARIANT_PCI_RT;
 }
 
-static void __devinit
+static void
 ioc4_load_modules(struct work_struct *work)
 {
        request_module("sgiioc4");
index 81d7fa4..150cd70 100644 (file)
@@ -120,6 +120,7 @@ static int recur_count = REC_NUM_DEFAULT;
 static enum cname cpoint = CN_INVALID;
 static enum ctype cptype = CT_NONE;
 static int count = DEFAULT_COUNT;
+static DEFINE_SPINLOCK(count_lock);
 
 module_param(recur_count, int, 0644);
 MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\
@@ -230,11 +231,14 @@ static const char *cp_name_to_str(enum cname name)
 static int lkdtm_parse_commandline(void)
 {
        int i;
+       unsigned long flags;
 
        if (cpoint_count < 1 || recur_count < 1)
                return -EINVAL;
 
+       spin_lock_irqsave(&count_lock, flags);
        count = cpoint_count;
+       spin_unlock_irqrestore(&count_lock, flags);
 
        /* No special parameters */
        if (!cpoint_type && !cpoint_name)
@@ -349,6 +353,9 @@ static void lkdtm_do_action(enum ctype which)
 
 static void lkdtm_handler(void)
 {
+       unsigned long flags;
+
+       spin_lock_irqsave(&count_lock, flags);
        count--;
        printk(KERN_INFO "lkdtm: Crash point %s of type %s hit, trigger in %d rounds\n",
                        cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
@@ -357,6 +364,7 @@ static void lkdtm_handler(void)
                lkdtm_do_action(cptype);
                count = cpoint_count;
        }
+       spin_unlock_irqrestore(&count_lock, flags);
 }
 
 static int lkdtm_register_cpoint(enum cname which)
index bb6f925..374dfcf 100644 (file)
@@ -317,7 +317,8 @@ EXPORT_SYMBOL_GPL(pti_request_masterchannel);
  *                             a master, channel ID address
  *                             used to write to PTI HW.
  *
- * @mc: master, channel apeture ID address to be released.
+ * @mc: master, channel apeture ID address to be released.  This
+ *      will de-allocate the structure via kfree().
  */
 void pti_release_masterchannel(struct pti_masterchannel *mc)
 {
@@ -475,8 +476,10 @@ static int pti_tty_install(struct tty_driver *driver, struct tty_struct *tty)
                else
                        pti_tty_data->mc = pti_request_masterchannel(2);
 
-               if (pti_tty_data->mc == NULL)
+               if (pti_tty_data->mc == NULL) {
+                       kfree(pti_tty_data);
                        return -ENXIO;
+               }
                tty->driver_data = pti_tty_data;
        }
 
@@ -495,7 +498,7 @@ static void pti_tty_cleanup(struct tty_struct *tty)
        if (pti_tty_data == NULL)
                return;
        pti_release_masterchannel(pti_tty_data->mc);
-       kfree(tty->driver_data);
+       kfree(pti_tty_data);
        tty->driver_data = NULL;
 }
 
@@ -581,7 +584,7 @@ static int pti_char_open(struct inode *inode, struct file *filp)
 static int pti_char_release(struct inode *inode, struct file *filp)
 {
        pti_release_masterchannel(filp->private_data);
-       kfree(filp->private_data);
+       filp->private_data = NULL;
        return 0;
 }
 
index f91f82e..54c91ff 100644 (file)
@@ -605,7 +605,7 @@ long st_unregister(struct st_proto_s *proto)
        pr_debug("%s: %d ", __func__, proto->chnl_id);
 
        st_kim_ref(&st_gdata, 0);
-       if (proto->chnl_id >= ST_MAX_CHANNELS) {
+       if (!st_gdata || proto->chnl_id >= ST_MAX_CHANNELS) {
                pr_err(" chnl_id %d not supported", proto->chnl_id);
                return -EPROTONOSUPPORT;
        }
index 5da93ee..38fd2f0 100644 (file)
@@ -245,9 +245,9 @@ void skip_change_remote_baud(unsigned char **ptr, long *len)
                pr_err("invalid action after change remote baud command");
        } else {
                *ptr = *ptr + sizeof(struct bts_action) +
-                       ((struct bts_action *)nxt_action)->size;
+                       ((struct bts_action *)cur_action)->size;
                *len = *len - (sizeof(struct bts_action) +
-                               ((struct bts_action *)nxt_action)->size);
+                               ((struct bts_action *)cur_action)->size);
                /* warn user on not commenting these in firmware */
                pr_warn("skipping the wait event of change remote baud");
        }
@@ -604,6 +604,10 @@ void st_kim_ref(struct st_data_s **core_data, int id)
        struct kim_data_s       *kim_gdata;
        /* get kim_gdata reference from platform device */
        pdev = st_get_plat_device(id);
+       if (!pdev) {
+               *core_data = NULL;
+               return;
+       }
        kim_gdata = dev_get_drvdata(&pdev->dev);
        *core_data = kim_gdata->core_data;
 }
index 71da564..f85e422 100644 (file)
@@ -1024,7 +1024,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
        INIT_LIST_HEAD(&md->part);
        md->usage = 1;
 
-       ret = mmc_init_queue(&md->queue, card, &md->lock);
+       ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
        if (ret)
                goto err_putdisk;
 
@@ -1297,6 +1297,9 @@ static void mmc_blk_remove(struct mmc_card *card)
        struct mmc_blk_data *md = mmc_get_drvdata(card);
 
        mmc_blk_remove_parts(card, md);
+       mmc_claim_host(card->host);
+       mmc_blk_part_switch(card, md);
+       mmc_release_host(card->host);
        mmc_blk_remove_req(md);
        mmc_set_drvdata(card, NULL);
 }
index c07322c..6413afa 100644 (file)
@@ -106,10 +106,12 @@ static void mmc_request(struct request_queue *q)
  * @mq: mmc queue
  * @card: mmc card to attach this queue
  * @lock: queue lock
+ * @subname: partition subname
  *
  * Initialise a MMC card request queue.
  */
-int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
+int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
+                  spinlock_t *lock, const char *subname)
 {
        struct mmc_host *host = card->host;
        u64 limit = BLK_BOUNCE_HIGH;
@@ -133,12 +135,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
                mq->queue->limits.max_discard_sectors = UINT_MAX;
                if (card->erased_byte == 0)
                        mq->queue->limits.discard_zeroes_data = 1;
-               if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) {
-                       mq->queue->limits.discard_granularity =
-                                                       card->erase_size << 9;
-                       mq->queue->limits.discard_alignment =
-                                                       card->erase_size << 9;
-               }
+               mq->queue->limits.discard_granularity = card->pref_erase << 9;
                if (mmc_can_secure_erase_trim(card))
                        queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
                                                mq->queue);
@@ -209,8 +206,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
 
        sema_init(&mq->thread_sem, 1);
 
-       mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d",
-               host->index);
+       mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
+               host->index, subname ? subname : "");
 
        if (IS_ERR(mq->thread)) {
                ret = PTR_ERR(mq->thread);
index 64e66e0..6223ef8 100644 (file)
@@ -19,7 +19,8 @@ struct mmc_queue {
        unsigned int            bounce_sg_len;
 };
 
-extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *);
+extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
+                         const char *);
 extern void mmc_cleanup_queue(struct mmc_queue *);
 extern void mmc_queue_suspend(struct mmc_queue *);
 extern void mmc_queue_resume(struct mmc_queue *);
index 68091dd..7843efe 100644 (file)
@@ -1245,7 +1245,7 @@ static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
                 */
                timeout_clks <<= 1;
                timeout_us += (timeout_clks * 1000) /
-                             (card->host->ios.clock / 1000);
+                             (mmc_host_clk_rate(card->host) / 1000);
 
                erase_timeout = timeout_us / 1000;
 
index 4d0c15b..262fff0 100644 (file)
@@ -691,15 +691,54 @@ static int mmc_sdio_resume(struct mmc_host *host)
 static int mmc_sdio_power_restore(struct mmc_host *host)
 {
        int ret;
+       u32 ocr;
 
        BUG_ON(!host);
        BUG_ON(!host->card);
 
        mmc_claim_host(host);
+
+       /*
+        * Reset the card by performing the same steps that are taken by
+        * mmc_rescan_try_freq() and mmc_attach_sdio() during a "normal" probe.
+        *
+        * sdio_reset() is technically not needed. Having just powered up the
+        * hardware, it should already be in reset state. However, some
+        * platforms (such as SD8686 on OLPC) do not instantly cut power,
+        * meaning that a reset is required when restoring power soon after
+        * powering off. It is harmless in other cases.
+        *
+        * The CMD5 reset (mmc_send_io_op_cond()), according to the SDIO spec,
+        * is not necessary for non-removable cards. However, it is required
+        * for OLPC SD8686 (which expects a [CMD5,5,3,7] init sequence), and
+        * harmless in other situations.
+        *
+        * With these steps taken, mmc_select_voltage() is also required to
+        * restore the correct voltage setting of the card.
+        */
+       sdio_reset(host);
+       mmc_go_idle(host);
+       mmc_send_if_cond(host, host->ocr_avail);
+
+       ret = mmc_send_io_op_cond(host, 0, &ocr);
+       if (ret)
+               goto out;
+
+       if (host->ocr_avail_sdio)
+               host->ocr_avail = host->ocr_avail_sdio;
+
+       host->ocr = mmc_select_voltage(host, ocr & ~0x7F);
+       if (!host->ocr) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        ret = mmc_sdio_init_card(host, host->ocr, host->card,
                                mmc_card_keep_power(host));
        if (!ret && host->sdio_irqs)
                mmc_signal_sdio_irq(host);
+
+out:
        mmc_release_host(host);
 
        return ret;
index d29b9c3..d2565df 100644 (file)
@@ -189,7 +189,7 @@ static int sdio_bus_remove(struct device *dev)
 
        /* Then undo the runtime PM settings in sdio_bus_probe() */
        if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
-               pm_runtime_put_noidle(dev);
+               pm_runtime_put_sync(dev);
 
 out:
        return ret;
index e2aecb7..ab66f24 100644 (file)
 #include <linux/mmc/core.h>
 #include <linux/mmc/host.h>
 
+/* For archs that don't support NO_IRQ (such as mips), provide a dummy value */
+#ifndef NO_IRQ
+#define NO_IRQ 0
+#endif
+
 MODULE_LICENSE("GPL");
 
 enum {
index 5b2e215..dedf3da 100644 (file)
@@ -429,7 +429,6 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
                                return -EINVAL;
                        }
                }
-               mmc_slot(host).ocr_mask = mmc_regulator_get_ocrmask(reg);
 
                /* Allow an aux regulator */
                reg = regulator_get(host->dev, "vmmc_aux");
@@ -962,7 +961,8 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
        spin_unlock(&host->irq_lock);
 
        if (host->use_dma && dma_ch != -1) {
-               dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len,
+               dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
+                       host->data->sg_len,
                        omap_hsmmc_get_dma_dir(host, host->data));
                omap_free_dma(dma_ch);
        }
@@ -1346,7 +1346,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
                return;
        }
 
-       dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
+       dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
                omap_hsmmc_get_dma_dir(host, data));
 
        req_in_progress = host->req_in_progress;
index b365429..ce500f0 100644 (file)
@@ -92,7 +92,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
                mmc_data->ocr_mask = p->tmio_ocr_mask;
                mmc_data->capabilities |= p->tmio_caps;
 
-               if (p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) {
+               if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) {
                        priv->param_tx.slave_id = p->dma_slave_tx;
                        priv->param_rx.slave_id = p->dma_slave_rx;
                        priv->dma_priv.chan_priv_tx = &priv->param_tx;
@@ -165,13 +165,14 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
 
        p->pdata = NULL;
 
+       tmio_mmc_host_remove(host);
+
        for (i = 0; i < 3; i++) {
                irq = platform_get_irq(pdev, i);
                if (irq >= 0)
                        free_irq(irq, host);
        }
 
-       tmio_mmc_host_remove(host);
        clk_disable(priv->clk);
        clk_put(priv->clk);
        kfree(priv);
index ad6347b..0b09e82 100644 (file)
@@ -824,8 +824,8 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
        struct tmio_mmc_host *host = mmc_priv(mmc);
        struct tmio_mmc_data *pdata = host->pdata;
 
-       return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
-               !(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
+       return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
+                (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
 }
 
 static int tmio_mmc_get_cd(struct mmc_host *mmc)
index cbb0330..d4455ff 100644 (file)
@@ -2096,7 +2096,7 @@ static struct mmc_host_ops vub300_mmc_ops = {
 static int vub300_probe(struct usb_interface *interface,
                        const struct usb_device_id *id)
 {                              /* NOT irq */
-       struct vub300_mmc_host *vub300 = NULL;
+       struct vub300_mmc_host *vub300;
        struct usb_host_interface *iface_desc;
        struct usb_device *udev = usb_get_dev(interface_to_usbdev(interface));
        int i;
@@ -2118,23 +2118,20 @@ static int vub300_probe(struct usb_interface *interface,
        command_out_urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!command_out_urb) {
                retval = -ENOMEM;
-               dev_err(&vub300->udev->dev,
-                       "not enough memory for the command_out_urb\n");
+               dev_err(&udev->dev, "not enough memory for command_out_urb\n");
                goto error0;
        }
        command_res_urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!command_res_urb) {
                retval = -ENOMEM;
-               dev_err(&vub300->udev->dev,
-                       "not enough memory for the command_res_urb\n");
+               dev_err(&udev->dev, "not enough memory for command_res_urb\n");
                goto error1;
        }
        /* this also allocates memory for our VUB300 mmc host device */
        mmc = mmc_alloc_host(sizeof(struct vub300_mmc_host), &udev->dev);
        if (!mmc) {
                retval = -ENOMEM;
-               dev_err(&vub300->udev->dev,
-                       "not enough memory for the mmc_host\n");
+               dev_err(&udev->dev, "not enough memory for the mmc_host\n");
                goto error4;
        }
        /* MMC core transfer sizes tunable parameters */
index 0bb254c..33d8aad 100644 (file)
@@ -339,9 +339,9 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
                                    (FIR_OP_UA  << FIR_OP1_SHIFT) |
                                    (FIR_OP_RBW << FIR_OP2_SHIFT));
                out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT);
-               /* 5 bytes for manuf, device and exts */
-               out_be32(&lbc->fbcr, 5);
-               elbc_fcm_ctrl->read_bytes = 5;
+               /* nand_get_flash_type() reads 8 bytes of entire ID string */
+               out_be32(&lbc->fbcr, 8);
+               elbc_fcm_ctrl->read_bytes = 8;
                elbc_fcm_ctrl->use_mdr = 1;
                elbc_fcm_ctrl->mdr = 0;
 
index 19f04a3..93359fa 100644 (file)
@@ -3416,7 +3416,8 @@ config NETCONSOLE
 
 config NETCONSOLE_DYNAMIC
        bool "Dynamic reconfiguration of logging targets"
-       depends on NETCONSOLE && SYSFS && CONFIGFS_FS
+       depends on NETCONSOLE && SYSFS && CONFIGFS_FS && \
+                       !(NETCONSOLE=y && CONFIGFS_FS=m)
        help
          This option enables the ability to dynamically reconfigure target
          parameters (interface, IP addresses, port numbers, MAC addresses)
index 4b70311..74be989 100644 (file)
@@ -49,6 +49,7 @@
 #include <linux/zlib.h>
 #include <linux/io.h>
 #include <linux/stringify.h>
+#include <linux/vmalloc.h>
 
 #define BNX2X_MAIN
 #include "bnx2x.h"
@@ -4537,8 +4538,7 @@ static int bnx2x_gunzip_init(struct bnx2x *bp)
        if (bp->strm  == NULL)
                goto gunzip_nomem2;
 
-       bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
-                                     GFP_KERNEL);
+       bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
        if (bp->strm->workspace == NULL)
                goto gunzip_nomem3;
 
@@ -4562,7 +4562,7 @@ gunzip_nomem1:
 static void bnx2x_gunzip_end(struct bnx2x *bp)
 {
        if (bp->strm) {
-               kfree(bp->strm->workspace);
+               vfree(bp->strm->workspace);
                kfree(bp->strm);
                bp->strm = NULL;
        }
index 1d699e3..754df5e 100644 (file)
@@ -36,7 +36,7 @@ config CAN_SLCAN
 config CAN_DEV
        tristate "Platform CAN drivers with Netlink support"
        depends on CAN
-       default Y
+       default y
        ---help---
          Enables the common framework for platform CAN drivers with Netlink
          support. This is the standard library for CAN drivers.
@@ -45,7 +45,7 @@ config CAN_DEV
 config CAN_CALC_BITTIMING
        bool "CAN bit-timing calculation"
        depends on CAN_DEV
-       default Y
+       default y
        ---help---
          If enabled, CAN bit-timing parameters will be calculated for the
          bit-rate specified via Netlink argument "bitrate" when the device
index 3f562ba..76bf589 100644 (file)
@@ -2026,7 +2026,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
                skb->ip_summed = CHECKSUM_UNNECESSARY;
        } else
                skb_checksum_none_assert(skb);
-       skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
+       skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
 
        if (unlikely(p->vlan_valid)) {
                struct vlan_group *grp = pi->vlan_grp;
@@ -2145,7 +2145,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
        if (!complete)
                return;
 
-       skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
+       skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
 
        if (unlikely(cpl->vlan_valid)) {
                struct vlan_group *grp = pi->vlan_grp;
index 31e9407..1dbdf82 100644 (file)
@@ -305,7 +305,7 @@ static void z_decomp_free(void *arg)
 
        if (state) {
                zlib_inflateEnd(&state->strm);
-               kfree(state->strm.workspace);
+               vfree(state->strm.workspace);
                kfree(state);
        }
 }
@@ -345,8 +345,7 @@ static void *z_decomp_alloc(unsigned char *options, int opt_len)
 
        state->w_size         = w_size;
        state->strm.next_out  = NULL;
-       state->strm.workspace = kmalloc(zlib_inflate_workspacesize(),
-                                       GFP_KERNEL|__GFP_REPEAT);
+       state->strm.workspace = vmalloc(zlib_inflate_workspacesize());
        if (state->strm.workspace == NULL)
                goto out_free;
 
index 05d8178..5990621 100644 (file)
@@ -742,7 +742,7 @@ static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
        msleep(2);
        for (i = 0; i < 5; i++) {
                udelay(100);
-               if (!(RTL_R32(ERIDR) & ERIAR_FLAG))
+               if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
                        break;
        }
 
index 77c5092..5d3436d 100644 (file)
@@ -378,7 +378,7 @@ static int rionet_close(struct net_device *ndev)
 
 static void rionet_remove(struct rio_dev *rdev)
 {
-       struct net_device *ndev = NULL;
+       struct net_device *ndev = rio_get_drvdata(rdev);
        struct rionet_peer *peer, *tmp;
 
        free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ?
@@ -433,22 +433,12 @@ static const struct net_device_ops rionet_netdev_ops = {
        .ndo_set_mac_address    = eth_mac_addr,
 };
 
-static int rionet_setup_netdev(struct rio_mport *mport)
+static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
 {
        int rc = 0;
-       struct net_device *ndev = NULL;
        struct rionet_private *rnet;
        u16 device_id;
 
-       /* Allocate our net_device structure */
-       ndev = alloc_etherdev(sizeof(struct rionet_private));
-       if (ndev == NULL) {
-               printk(KERN_INFO "%s: could not allocate ethernet device.\n",
-                      DRV_NAME);
-               rc = -ENOMEM;
-               goto out;
-       }
-
        rionet_active = (struct rio_dev **)__get_free_pages(GFP_KERNEL,
                        mport->sys_size ? __fls(sizeof(void *)) + 4 : 0);
        if (!rionet_active) {
@@ -504,11 +494,21 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
        int rc = -ENODEV;
        u32 lpef, lsrc_ops, ldst_ops;
        struct rionet_peer *peer;
+       struct net_device *ndev = NULL;
 
        /* If local device is not rionet capable, give up quickly */
        if (!rionet_capable)
                goto out;
 
+       /* Allocate our net_device structure */
+       ndev = alloc_etherdev(sizeof(struct rionet_private));
+       if (ndev == NULL) {
+               printk(KERN_INFO "%s: could not allocate ethernet device.\n",
+                      DRV_NAME);
+               rc = -ENOMEM;
+               goto out;
+       }
+
        /*
         * First time through, make sure local device is rionet
         * capable, setup netdev,  and set flags so this is skipped
@@ -529,7 +529,7 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
                        goto out;
                }
 
-               rc = rionet_setup_netdev(rdev->net->hport);
+               rc = rionet_setup_netdev(rdev->net->hport, ndev);
                rionet_check = 1;
        }
 
@@ -546,6 +546,8 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
                list_add_tail(&peer->node, &rionet_peers);
        }
 
+       rio_set_drvdata(rdev, ndev);
+
       out:
        return rc;
 }
index d965fb1..a9b6c63 100644 (file)
@@ -100,34 +100,42 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
 static int
 kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr)
 {
-       char init_msg_1[] =
+       const static char init_msg_1[] =
                { 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00,
                0x00, 0x00 };
-       char init_msg_2[] =
+       const static char init_msg_2[] =
                { 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0xf4,
                0x00, 0x00 };
-       char receive_buf[28];
+       const static int buflen = 28;
+       char *usb_buf;
        int status;
 
-       status = kalmia_send_init_packet(dev, init_msg_1, sizeof(init_msg_1)
-               / sizeof(init_msg_1[0]), receive_buf, 24);
+       usb_buf = kmalloc(buflen, GFP_DMA | GFP_KERNEL);
+       if (!usb_buf)
+               return -ENOMEM;
+
+       memcpy(usb_buf, init_msg_1, 12);
+       status = kalmia_send_init_packet(dev, usb_buf, sizeof(init_msg_1)
+               / sizeof(init_msg_1[0]), usb_buf, 24);
        if (status != 0)
                return status;
 
-       status = kalmia_send_init_packet(dev, init_msg_2, sizeof(init_msg_2)
-               / sizeof(init_msg_2[0]), receive_buf, 28);
+       memcpy(usb_buf, init_msg_2, 12);
+       status = kalmia_send_init_packet(dev, usb_buf, sizeof(init_msg_2)
+               / sizeof(init_msg_2[0]), usb_buf, 28);
        if (status != 0)
                return status;
 
-       memcpy(ethernet_addr, receive_buf + 10, ETH_ALEN);
+       memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN);
 
+       kfree(usb_buf);
        return status;
 }
 
 static int
 kalmia_bind(struct usbnet *dev, struct usb_interface *intf)
 {
-       u8 status;
+       int status;
        u8 ethernet_addr[ETH_ALEN];
 
        /* Don't bind to AT command interface */
@@ -190,7 +198,8 @@ kalmia_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
        dev_kfree_skb_any(skb);
        skb = skb2;
 
-       done: header_start = skb_push(skb, KALMIA_HEADER_LENGTH);
+done:
+       header_start = skb_push(skb, KALMIA_HEADER_LENGTH);
        ether_type_1 = header_start[KALMIA_HEADER_LENGTH + 12];
        ether_type_2 = header_start[KALMIA_HEADER_LENGTH + 13];
 
@@ -201,9 +210,8 @@ kalmia_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
        header_start[0] = 0x57;
        header_start[1] = 0x44;
        content_len = skb->len - KALMIA_HEADER_LENGTH;
-       header_start[2] = (content_len & 0xff); /* low byte */
-       header_start[3] = (content_len >> 8); /* high byte */
 
+       put_unaligned_le16(content_len, &header_start[2]);
        header_start[4] = ether_type_1;
        header_start[5] = ether_type_2;
 
@@ -231,13 +239,13 @@ kalmia_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
         * Our task here is to strip off framing, leaving skb with one
         * data frame for the usbnet framework code to process.
         */
-       const u8 HEADER_END_OF_USB_PACKET[] =
+       const static u8 HEADER_END_OF_USB_PACKET[] =
                { 0x57, 0x5a, 0x00, 0x00, 0x08, 0x00 };
-       const u8 EXPECTED_UNKNOWN_HEADER_1[] =
+       const static u8 EXPECTED_UNKNOWN_HEADER_1[] =
                { 0x57, 0x43, 0x1e, 0x00, 0x15, 0x02 };
-       const u8 EXPECTED_UNKNOWN_HEADER_2[] =
+       const static u8 EXPECTED_UNKNOWN_HEADER_2[] =
                { 0x57, 0x50, 0x0e, 0x00, 0x00, 0x00 };
-       u8 i = 0;
+       int i = 0;
 
        /* incomplete header? */
        if (skb->len < KALMIA_HEADER_LENGTH)
@@ -285,7 +293,7 @@ kalmia_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 
                /* subtract start header and end header */
                usb_packet_length = skb->len - (2 * KALMIA_HEADER_LENGTH);
-               ether_packet_length = header_start[2] + (header_start[3] << 8);
+               ether_packet_length = get_unaligned_le16(&header_start[2]);
                skb_pull(skb, KALMIA_HEADER_LENGTH);
 
                /* Some small packets misses end marker */
index 241756e..1a2234c 100644 (file)
@@ -331,17 +331,7 @@ static const struct usb_device_id  products [] = {
        ZAURUS_MASTER_INTERFACE,
        .driver_info = ZAURUS_PXA_INFO,
 },
-
-
-/* At least some of the newest PXA units have very different lies about
- * their standards support:  they claim to be cell phones offering
- * direct access to their radios!  (No, they don't conform to CDC MDLM.)
- */
 {
-       USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM,
-                       USB_CDC_PROTO_NONE),
-       .driver_info = (unsigned long) &bogus_mdlm_info,
-}, {
        /* Motorola MOTOMAGX phones */
        USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x6425, USB_CLASS_COMM,
                        USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
index 9f8ccae..254b64b 100644 (file)
@@ -1624,6 +1624,16 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
        pci_read_config_byte(pdev, 0x8, &revisionid);
        pci_read_config_word(pdev, 0x3C, &irqline);
 
+       /* PCI ID 0x10ec:0x8192 occurs for both RTL8192E, which uses
+        * r8192e_pci, and RTL8192SE, which uses this driver. If the
+        * revision ID is RTL_PCI_REVISION_ID_8192PCIE (0x01), then
+        * the correct driver is r8192e_pci, thus this routine should
+        * return false.
+        */
+       if (deviceid == RTL_PCI_8192SE_DID &&
+           revisionid == RTL_PCI_REVISION_ID_8192PCIE)
+               return false;
+
        if (deviceid == RTL_PCI_8192_DID ||
            deviceid == RTL_PCI_0044_DID ||
            deviceid == RTL_PCI_0047_DID ||
@@ -1856,7 +1866,8 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
        pci_write_config_byte(pdev, 0x04, 0x07);
 
        /* find adapter */
-       _rtl_pci_find_adapter(pdev, hw);
+       if (!_rtl_pci_find_adapter(pdev, hw))
+               goto fail3;
 
        /* Init IO handler */
        _rtl_pci_io_handler_init(&pdev->dev, hw);
index bee7c14..092e342 100644 (file)
@@ -53,6 +53,8 @@ MODULE_FIRMWARE("rtlwifi/rtl8192cufw.bin");
 static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
+       const struct firmware *firmware;
+       int err;
 
        rtlpriv->dm.dm_initialgain_enable = 1;
        rtlpriv->dm.dm_flag = 0;
@@ -64,6 +66,24 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
                         ("Can't alloc buffer for fw.\n"));
                return 1;
        }
+       /* request fw */
+       err = request_firmware(&firmware, rtlpriv->cfg->fw_name,
+                       rtlpriv->io.dev);
+       if (err) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("Failed to request firmware!\n"));
+               return 1;
+       }
+       if (firmware->size > 0x4000) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("Firmware is too big!\n"));
+               release_firmware(firmware);
+               return 1;
+       }
+       memcpy(rtlpriv->rtlhal.pfirmware, firmware->data, firmware->size);
+       rtlpriv->rtlhal.fwsize = firmware->size;
+       release_firmware(firmware);
+
        return 0;
 }
 
index 135df16..46767c5 100644 (file)
@@ -624,7 +624,7 @@ static int pci_pm_prepare(struct device *dev)
         * system from the sleep state, we'll have to prevent it from signaling
         * wake-up.
         */
-       pm_runtime_resume(dev);
+       pm_runtime_get_sync(dev);
 
        if (drv && drv->pm && drv->pm->prepare)
                error = drv->pm->prepare(dev);
@@ -638,6 +638,8 @@ static void pci_pm_complete(struct device *dev)
 
        if (drv && drv->pm && drv->pm->complete)
                drv->pm->complete(dev);
+
+       pm_runtime_put_sync(dev);
 }
 
 #else /* !CONFIG_PM_SLEEP */
index 5f10c23..2c5b9b9 100644 (file)
@@ -3284,7 +3284,7 @@ static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
  * @dev: the PCI device
  * @decode: true = enable decoding, false = disable decoding
  * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
- * @change_bridge_flags: traverse ancestors and change bridges
+ * @flags: traverse ancestors and change bridges
  * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
  */
 int pci_set_vga_state(struct pci_dev *dev, bool decode,
index 48849ff..bafb3c3 100644 (file)
@@ -168,7 +168,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
                res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN;
                if (type == pci_bar_io) {
                        l &= PCI_BASE_ADDRESS_IO_MASK;
-                       mask = PCI_BASE_ADDRESS_IO_MASK & IO_SPACE_LIMIT;
+                       mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
                } else {
                        l &= PCI_BASE_ADDRESS_MEM_MASK;
                        mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
index e8a1406..02145e9 100644 (file)
@@ -2761,6 +2761,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
 }
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
 #endif /*CONFIG_MMC_RICOH_MMC*/
 
 #if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
index 81af2b3..69ae2fd 100644 (file)
@@ -48,9 +48,6 @@ static int sharpsl_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
 {
        int ret;
 
-       if (platform_scoop_config->pcmcia_init)
-               platform_scoop_config->pcmcia_init();
-
        /* Register interrupts */
        if (SCOOP_DEV[skt->nr].cd_irq >= 0) {
                struct pcmcia_irqs cd_irq;
index b829e65..57ddb96 100644 (file)
@@ -55,10 +55,6 @@ static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
                }
                skt->socket.pci_irq = IRQ_GPIO(GPIO_PRDY);
                break;
-
-#ifndef CONFIG_MACH_TRIZEPS_CONXS
-       case 1:
-#endif
        default:
                break;
        }
index 4724ba3..b2005b4 100644 (file)
@@ -149,6 +149,7 @@ static const struct i2c_device_id ds1307_id[] = {
        { "ds1340", ds_1340 },
        { "ds3231", ds_3231 },
        { "m41t00", m41t00 },
+       { "pt7c4338", ds_1307 },
        { "rx8025", rx_8025 },
        { }
 };
index b8bc862..efd6066 100644 (file)
@@ -78,7 +78,6 @@ struct vt8500_rtc {
        void __iomem            *regbase;
        struct resource         *res;
        int                     irq_alarm;
-       int                     irq_hz;
        struct rtc_device       *rtc;
        spinlock_t              lock;           /* Protects this structure */
 };
@@ -100,10 +99,6 @@ static irqreturn_t vt8500_rtc_irq(int irq, void *dev_id)
        if (isr & 1)
                events |= RTC_AF | RTC_IRQF;
 
-       /* Only second/minute interrupts are supported */
-       if (isr & 2)
-               events |= RTC_UF | RTC_IRQF;
-
        rtc_update_irq(vt8500_rtc->rtc, 1, events);
 
        return IRQ_HANDLED;
@@ -199,27 +194,12 @@ static int vt8500_alarm_irq_enable(struct device *dev, unsigned int enabled)
        return 0;
 }
 
-static int vt8500_update_irq_enable(struct device *dev, unsigned int enabled)
-{
-       struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
-       unsigned long tmp = readl(vt8500_rtc->regbase + VT8500_RTC_CR);
-
-       if (enabled)
-               tmp |= VT8500_RTC_CR_SM_SEC | VT8500_RTC_CR_SM_ENABLE;
-       else
-               tmp &= ~VT8500_RTC_CR_SM_ENABLE;
-
-       writel(tmp, vt8500_rtc->regbase + VT8500_RTC_CR);
-       return 0;
-}
-
 static const struct rtc_class_ops vt8500_rtc_ops = {
        .read_time = vt8500_rtc_read_time,
        .set_time = vt8500_rtc_set_time,
        .read_alarm = vt8500_rtc_read_alarm,
        .set_alarm = vt8500_rtc_set_alarm,
        .alarm_irq_enable = vt8500_alarm_irq_enable,
-       .update_irq_enable = vt8500_update_irq_enable,
 };
 
 static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
@@ -248,13 +228,6 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
                goto err_free;
        }
 
-       vt8500_rtc->irq_hz = platform_get_irq(pdev, 1);
-       if (vt8500_rtc->irq_hz < 0) {
-               dev_err(&pdev->dev, "No 1Hz IRQ resource defined\n");
-               ret = -ENXIO;
-               goto err_free;
-       }
-
        vt8500_rtc->res = request_mem_region(vt8500_rtc->res->start,
                                             resource_size(vt8500_rtc->res),
                                             "vt8500-rtc");
@@ -272,9 +245,8 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
                goto err_release;
        }
 
-       /* Enable the second/minute interrupt generation and enable RTC */
-       writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H
-               | VT8500_RTC_CR_SM_ENABLE | VT8500_RTC_CR_SM_SEC,
+       /* Enable RTC and set it to 24-hour mode */
+       writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H,
               vt8500_rtc->regbase + VT8500_RTC_CR);
 
        vt8500_rtc->rtc = rtc_device_register("vt8500-rtc", &pdev->dev,
@@ -286,26 +258,16 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
                goto err_unmap;
        }
 
-       ret = request_irq(vt8500_rtc->irq_hz, vt8500_rtc_irq, 0,
-                         "rtc 1Hz", vt8500_rtc);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "can't get irq %i, err %d\n",
-                       vt8500_rtc->irq_hz, ret);
-               goto err_unreg;
-       }
-
        ret = request_irq(vt8500_rtc->irq_alarm, vt8500_rtc_irq, 0,
                          "rtc alarm", vt8500_rtc);
        if (ret < 0) {
                dev_err(&pdev->dev, "can't get irq %i, err %d\n",
                        vt8500_rtc->irq_alarm, ret);
-               goto err_free_hz;
+               goto err_unreg;
        }
 
        return 0;
 
-err_free_hz:
-       free_irq(vt8500_rtc->irq_hz, vt8500_rtc);
 err_unreg:
        rtc_device_unregister(vt8500_rtc->rtc);
 err_unmap:
@@ -323,7 +285,6 @@ static int __devexit vt8500_rtc_remove(struct platform_device *pdev)
        struct vt8500_rtc *vt8500_rtc = platform_get_drvdata(pdev);
 
        free_irq(vt8500_rtc->irq_alarm, vt8500_rtc);
-       free_irq(vt8500_rtc->irq_hz, vt8500_rtc);
 
        rtc_device_unregister(vt8500_rtc->rtc);
 
index 4a1f029..8d9dae8 100644 (file)
@@ -830,6 +830,19 @@ config SCSI_GDTH
          To compile this driver as a module, choose M here: the
          module will be called gdth.
 
+config SCSI_ISCI
+       tristate "Intel(R) C600 Series Chipset SAS Controller"
+       depends on PCI && SCSI
+       depends on X86
+       # (temporary): known alpha quality driver
+       depends on EXPERIMENTAL
+       select SCSI_SAS_LIBSAS
+       ---help---
+         This driver supports the 6Gb/s SAS capabilities of the storage
+         control unit found in the Intel(R) C600 series chipset.
+
+         The experimental tag will be removed after the driver exits alpha
+
 config SCSI_GENERIC_NCR5380
        tristate "Generic NCR5380/53c400 SCSI PIO support"
        depends on ISA && SCSI
index 7ad0b8a..3c08f53 100644 (file)
@@ -73,6 +73,7 @@ obj-$(CONFIG_SCSI_AACRAID)    += aacraid/
 obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o
 obj-$(CONFIG_SCSI_AIC94XX)     += aic94xx/
 obj-$(CONFIG_SCSI_PM8001)      += pm8001/
+obj-$(CONFIG_SCSI_ISCI)                += isci/
 obj-$(CONFIG_SCSI_IPS)         += ips.o
 obj-$(CONFIG_SCSI_FD_MCS)      += fd_mcs.o
 obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
index c6c0434..6bba23a 100644 (file)
@@ -1037,6 +1037,7 @@ static void complete_scsi_command(struct CommandList *cp)
        unsigned char sense_key;
        unsigned char asc;      /* additional sense code */
        unsigned char ascq;     /* additional sense code qualifier */
+       unsigned long sense_data_size;
 
        ei = cp->err_info;
        cmd = (struct scsi_cmnd *) cp->scsi_cmd;
@@ -1051,10 +1052,14 @@ static void complete_scsi_command(struct CommandList *cp)
        cmd->result |= ei->ScsiStatus;
 
        /* copy the sense data whether we need to or not. */
-       memcpy(cmd->sense_buffer, ei->SenseInfo,
-               ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
-                       SCSI_SENSE_BUFFERSIZE :
-                       ei->SenseLen);
+       if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
+               sense_data_size = SCSI_SENSE_BUFFERSIZE;
+       else
+               sense_data_size = sizeof(ei->SenseInfo);
+       if (ei->SenseLen < sense_data_size)
+               sense_data_size = ei->SenseLen;
+
+       memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
        scsi_set_resid(cmd, ei->ResidualCnt);
 
        if (ei->CommandStatus == 0) {
@@ -2580,7 +2585,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
                c->SG[0].Ext = 0; /* we are not chaining*/
        }
        hpsa_scsi_do_simple_cmd_core(h, c);
-       hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
+       if (iocommand.buf_size > 0)
+               hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
        check_ioctl_unit_attention(h, c);
 
        /* Copy the error information out */
index b765061..bdfa223 100644 (file)
@@ -4306,8 +4306,8 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
                spin_lock_irqsave(vhost->host->host_lock, flags);
                if (rc == H_CLOSED)
                        vio_enable_interrupts(to_vio_dev(vhost->dev));
-               else if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
-                        (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
+               if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
+                   (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
                        ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
                        dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
                }
diff --git a/drivers/scsi/isci/Makefile b/drivers/scsi/isci/Makefile
new file mode 100644 (file)
index 0000000..3359e10
--- /dev/null
@@ -0,0 +1,8 @@
+obj-$(CONFIG_SCSI_ISCI) += isci.o
+isci-objs := init.o phy.o request.o \
+            remote_device.o port.o \
+            host.o task.o probe_roms.o \
+            remote_node_context.o \
+            remote_node_table.o \
+            unsolicited_frame_control.o \
+            port_config.o \
diff --git a/drivers/scsi/isci/firmware/Makefile b/drivers/scsi/isci/firmware/Makefile
new file mode 100644 (file)
index 0000000..5f54461
--- /dev/null
@@ -0,0 +1,19 @@
+# Makefile for create_fw
+#
+CC=gcc
+CFLAGS=-c -Wall -O2 -g
+LDFLAGS=
+SOURCES=create_fw.c
+OBJECTS=$(SOURCES:.cpp=.o)
+EXECUTABLE=create_fw
+
+all: $(SOURCES) $(EXECUTABLE)
+
+$(EXECUTABLE): $(OBJECTS)
+       $(CC) $(LDFLAGS) $(OBJECTS) -o $@
+
+.c.o:
+       $(CC) $(CFLAGS) $< -O $@
+
+clean:
+       rm -f *.o $(EXECUTABLE)
diff --git a/drivers/scsi/isci/firmware/README b/drivers/scsi/isci/firmware/README
new file mode 100644 (file)
index 0000000..8056d2b
--- /dev/null
@@ -0,0 +1,36 @@
+This defines the temporary binary blow we are to pass to the SCU
+driver to emulate the binary firmware that we will eventually be
+able to access via NVRAM on the SCU controller.
+
+The current size of the binary blob is expected to be 149 bytes or larger
+
+Header Types:
+0x1: Phy Masks
+0x2: Phy Gens
+0x3: SAS Addrs
+0xff: End of Data
+
+ID string - u8[12]: "#SCU MAGIC#\0"
+Version - u8: 1
+SubVersion - u8: 0
+
+Header Type - u8: 0x1
+Size - u8: 8
+Phy Mask - u32[8]
+
+Header Type - u8: 0x2
+Size - u8: 8
+Phy Gen - u32[8]
+
+Header Type - u8: 0x3
+Size - u8: 8
+Sas Addr - u64[8]
+
+Header Type - u8: 0xf
+
+
+==============================================================================
+
+Place isci_firmware.bin in /lib/firmware
+Be sure to recreate the initramfs image to include the firmware.
+
diff --git a/drivers/scsi/isci/firmware/create_fw.c b/drivers/scsi/isci/firmware/create_fw.c
new file mode 100644 (file)
index 0000000..c7a2887
--- /dev/null
@@ -0,0 +1,99 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <string.h>
+#include <errno.h>
+#include <asm/types.h>
+#include <strings.h>
+#include <stdint.h>
+
+#include "create_fw.h"
+#include "../probe_roms.h"
+
+int write_blob(struct isci_orom *isci_orom)
+{
+       FILE *fd;
+       int err;
+       size_t count;
+
+       fd = fopen(blob_name, "w+");
+       if (!fd) {
+               perror("Open file for write failed");
+               fclose(fd);
+               return -EIO;
+       }
+
+       count = fwrite(isci_orom, sizeof(struct isci_orom), 1, fd);
+       if (count != 1) {
+               perror("Write data failed");
+               fclose(fd);
+               return -EIO;
+       }
+
+       fclose(fd);
+
+       return 0;
+}
+
+void set_binary_values(struct isci_orom *isci_orom)
+{
+       int ctrl_idx, phy_idx, port_idx;
+
+       /* setting OROM signature */
+       strncpy(isci_orom->hdr.signature, sig, strlen(sig));
+       isci_orom->hdr.version = version;
+       isci_orom->hdr.total_block_length = sizeof(struct isci_orom);
+       isci_orom->hdr.hdr_length = sizeof(struct sci_bios_oem_param_block_hdr);
+       isci_orom->hdr.num_elements = num_elements;
+
+       for (ctrl_idx = 0; ctrl_idx < 2; ctrl_idx++) {
+               isci_orom->ctrl[ctrl_idx].controller.mode_type = mode_type;
+               isci_orom->ctrl[ctrl_idx].controller.max_concurrent_dev_spin_up =
+                       max_num_concurrent_dev_spin_up;
+               isci_orom->ctrl[ctrl_idx].controller.do_enable_ssc =
+                       enable_ssc;
+
+               for (port_idx = 0; port_idx < 4; port_idx++)
+                       isci_orom->ctrl[ctrl_idx].ports[port_idx].phy_mask =
+                               phy_mask[ctrl_idx][port_idx];
+
+               for (phy_idx = 0; phy_idx < 4; phy_idx++) {
+                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.high =
+                               (__u32)(sas_addr[ctrl_idx][phy_idx] >> 32);
+                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.low =
+                               (__u32)(sas_addr[ctrl_idx][phy_idx]);
+
+                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control0 =
+                               afe_tx_amp_control0;
+                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control1 =
+                               afe_tx_amp_control1;
+                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control2 =
+                               afe_tx_amp_control2;
+                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control3 =
+                               afe_tx_amp_control3;
+               }
+       }
+}
+
+int main(void)
+{
+       int err;
+       struct isci_orom *isci_orom;
+
+       isci_orom = malloc(sizeof(struct isci_orom));
+       memset(isci_orom, 0, sizeof(struct isci_orom));
+
+       set_binary_values(isci_orom);
+
+       err = write_blob(isci_orom);
+       if (err < 0) {
+               free(isci_orom);
+               return err;
+       }
+
+       free(isci_orom);
+       return 0;
+}
diff --git a/drivers/scsi/isci/firmware/create_fw.h b/drivers/scsi/isci/firmware/create_fw.h
new file mode 100644 (file)
index 0000000..5f29882
--- /dev/null
@@ -0,0 +1,77 @@
+#ifndef _CREATE_FW_H_
+#define _CREATE_FW_H_
+#include "../probe_roms.h"
+
+
+/* we are configuring for 2 SCUs */
+static const int num_elements = 2;
+
+/*
+ * For all defined arrays:
+ * elements 0-3 are for SCU0, ports 0-3
+ * elements 4-7 are for SCU1, ports 0-3
+ *
+ * valid configurations for one SCU are:
+ *  P0  P1  P2  P3
+ * ----------------
+ * 0xF,0x0,0x0,0x0 # 1 x4 port
+ * 0x3,0x0,0x4,0x8 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are each x1
+ *                 # ports
+ * 0x1,0x2,0xC,0x0 # Phys 0 and 1 are each x1 ports, phy 2 and phy 3 are a x2
+ *                 # port
+ * 0x3,0x0,0xC,0x0 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are a x2 port
+ * 0x1,0x2,0x4,0x8 # Each phy is a x1 port (this is the default configuration)
+ *
+ * if there is a port/phy on which you do not wish to override the default
+ * values, use the value assigned to UNINIT_PARAM (255).
+ */
+
+/* discovery mode type (port auto config mode by default ) */
+
+/*
+ * if there is a port/phy on which you do not wish to override the default
+ * values, use the value "0000000000000000". SAS address of zero's is
+ * considered invalid and will not be used.
+ */
+#ifdef MPC
+static const int mode_type = SCIC_PORT_MANUAL_CONFIGURATION_MODE;
+static const __u8 phy_mask[2][4] = { {1, 2, 4, 8},
+                                    {1, 2, 4, 8} };
+static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFFF0000001ULL,
+                                                    0x5FCFFFFFF0000002ULL,
+                                                    0x5FCFFFFFF0000003ULL,
+                                                    0x5FCFFFFFF0000004ULL },
+                                                  { 0x5FCFFFFFF0000005ULL,
+                                                    0x5FCFFFFFF0000006ULL,
+                                                    0x5FCFFFFFF0000007ULL,
+                                                    0x5FCFFFFFF0000008ULL } };
+#else  /* APC (default) */
+static const int mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
+static const __u8 phy_mask[2][4];
+static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFF00000001ULL,
+                                                    0x5FCFFFFF00000001ULL,
+                                                    0x5FCFFFFF00000001ULL,
+                                                    0x5FCFFFFF00000001ULL },
+                                                  { 0x5FCFFFFF00000002ULL,
+                                                    0x5FCFFFFF00000002ULL,
+                                                    0x5FCFFFFF00000002ULL,
+                                                    0x5FCFFFFF00000002ULL } };
+#endif
+
+/* Maximum number of concurrent device spin up */
+static const int max_num_concurrent_dev_spin_up = 1;
+
+/* enable of ssc operation */
+static const int enable_ssc;
+
+/* AFE_TX_AMP_CONTROL */
+static const unsigned int afe_tx_amp_control0 = 0x000bdd08;
+static const unsigned int afe_tx_amp_control1 = 0x000ffc00;
+static const unsigned int afe_tx_amp_control2 = 0x000b7c09;
+static const unsigned int afe_tx_amp_control3 = 0x000afc6e;
+
+static const char blob_name[] = "isci_firmware.bin";
+static const char sig[] = "ISCUOEMB";
+static const unsigned char version = 0x10;
+
+#endif
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
new file mode 100644 (file)
index 0000000..26072f1
--- /dev/null
@@ -0,0 +1,2751 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/circ_buf.h>
+#include <linux/device.h>
+#include <scsi/sas.h>
+#include "host.h"
+#include "isci.h"
+#include "port.h"
+#include "host.h"
+#include "probe_roms.h"
+#include "remote_device.h"
+#include "request.h"
+#include "scu_completion_codes.h"
+#include "scu_event_codes.h"
+#include "registers.h"
+#include "scu_remote_node_context.h"
+#include "scu_task_context.h"
+
+#define SCU_CONTEXT_RAM_INIT_STALL_TIME      200
+
+#define smu_max_ports(dcc_value) \
+       (\
+               (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
+                >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
+       )
+
+#define smu_max_task_contexts(dcc_value)       \
+       (\
+               (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
+                >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
+       )
+
+#define smu_max_rncs(dcc_value) \
+       (\
+               (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
+                >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
+       )
+
+#define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT      100
+
+/**
+ *
+ *
+ * The number of milliseconds to wait while a given phy is consuming power
+ * before allowing another set of phys to consume power. Ultimately, this will
+ * be specified by OEM parameter.
+ */
+#define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
+
+/**
+ * NORMALIZE_PUT_POINTER() -
+ *
+ * This macro will normalize the completion queue put pointer so its value can
+ * be used as an array inde
+ */
+#define NORMALIZE_PUT_POINTER(x) \
+       ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
+
+
+/**
+ * NORMALIZE_EVENT_POINTER() -
+ *
+ * This macro will normalize the completion queue event entry so its value can
+ * be used as an index.
+ */
+#define NORMALIZE_EVENT_POINTER(x) \
+       (\
+               ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
+               >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
+       )
+
+/**
+ * NORMALIZE_GET_POINTER() -
+ *
+ * This macro will normalize the completion queue get pointer so its value can
+ * be used as an index into an array
+ */
+#define NORMALIZE_GET_POINTER(x) \
+       ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
+
+/**
+ * NORMALIZE_GET_POINTER_CYCLE_BIT() -
+ *
+ * This macro will normalize the completion queue cycle pointer so it matches
+ * the completion queue cycle bit
+ */
+#define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
+       ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
+
+/**
+ * COMPLETION_QUEUE_CYCLE_BIT() -
+ *
+ * This macro will return the cycle bit of the completion queue entry
+ */
+#define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
+
+/* Init the state machine and call the state entry function (if any) */
+void sci_init_sm(struct sci_base_state_machine *sm,
+                const struct sci_base_state *state_table, u32 initial_state)
+{
+       sci_state_transition_t handler;
+
+       sm->initial_state_id    = initial_state;
+       sm->previous_state_id   = initial_state;
+       sm->current_state_id    = initial_state;
+       sm->state_table         = state_table;
+
+       handler = sm->state_table[initial_state].enter_state;
+       if (handler)
+               handler(sm);
+}
+
+/* Call the state exit fn, update the current state, call the state entry fn */
+void sci_change_state(struct sci_base_state_machine *sm, u32 next_state)
+{
+       sci_state_transition_t handler;
+
+       handler = sm->state_table[sm->current_state_id].exit_state;
+       if (handler)
+               handler(sm);
+
+       sm->previous_state_id = sm->current_state_id;
+       sm->current_state_id = next_state;
+
+       handler = sm->state_table[sm->current_state_id].enter_state;
+       if (handler)
+               handler(sm);
+}
+
+static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
+{
+       u32 get_value = ihost->completion_queue_get;
+       u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
+
+       if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
+           COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]))
+               return true;
+
+       return false;
+}
+
+static bool sci_controller_isr(struct isci_host *ihost)
+{
+       if (sci_controller_completion_queue_has_entries(ihost)) {
+               return true;
+       } else {
+               /*
+                * we have a spurious interrupt it could be that we have already
+                * emptied the completion queue from a previous interrupt */
+               writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+
+               /*
+                * There is a race in the hardware that could cause us not to be notified
+                * of an interrupt completion if we do not take this step.  We will mask
+                * then unmask the interrupts so if there is another interrupt pending
+                * the clearing of the interrupt source we get the next interrupt message. */
+               writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
+               writel(0, &ihost->smu_registers->interrupt_mask);
+       }
+
+       return false;
+}
+
+irqreturn_t isci_msix_isr(int vec, void *data)
+{
+       struct isci_host *ihost = data;
+
+       if (sci_controller_isr(ihost))
+               tasklet_schedule(&ihost->completion_tasklet);
+
+       return IRQ_HANDLED;
+}
+
+static bool sci_controller_error_isr(struct isci_host *ihost)
+{
+       u32 interrupt_status;
+
+       interrupt_status =
+               readl(&ihost->smu_registers->interrupt_status);
+       interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
+
+       if (interrupt_status != 0) {
+               /*
+                * There is an error interrupt pending so let it through and handle
+                * in the callback */
+               return true;
+       }
+
+       /*
+        * There is a race in the hardware that could cause us not to be notified
+        * of an interrupt completion if we do not take this step.  We will mask
+        * then unmask the error interrupts so if there was another interrupt
+        * pending we will be notified.
+        * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
+       writel(0xff, &ihost->smu_registers->interrupt_mask);
+       writel(0, &ihost->smu_registers->interrupt_mask);
+
+       return false;
+}
+
+static void sci_controller_task_completion(struct isci_host *ihost, u32 ent)
+{
+       u32 index = SCU_GET_COMPLETION_INDEX(ent);
+       struct isci_request *ireq = ihost->reqs[index];
+
+       /* Make sure that we really want to process this IO request */
+       if (test_bit(IREQ_ACTIVE, &ireq->flags) &&
+           ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
+           ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index])
+               /* Yep this is a valid io request pass it along to the
+                * io request handler
+                */
+               sci_io_request_tc_completion(ireq, ent);
+}
+
+static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent)
+{
+       u32 index;
+       struct isci_request *ireq;
+       struct isci_remote_device *idev;
+
+       index = SCU_GET_COMPLETION_INDEX(ent);
+
+       switch (scu_get_command_request_type(ent)) {
+       case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
+       case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
+               ireq = ihost->reqs[index];
+               dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n",
+                        __func__, ent, ireq);
+               /* @todo For a post TC operation we need to fail the IO
+                * request
+                */
+               break;
+       case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
+       case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
+       case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
+               idev = ihost->device_table[index];
+               dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n",
+                        __func__, ent, idev);
+               /* @todo For a port RNC operation we need to fail the
+                * device
+                */
+               break;
+       default:
+               dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n",
+                        __func__, ent);
+               break;
+       }
+}
+
+static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent)
+{
+       u32 index;
+       u32 frame_index;
+
+       struct scu_unsolicited_frame_header *frame_header;
+       struct isci_phy *iphy;
+       struct isci_remote_device *idev;
+
+       enum sci_status result = SCI_FAILURE;
+
+       frame_index = SCU_GET_FRAME_INDEX(ent);
+
+       frame_header = ihost->uf_control.buffers.array[frame_index].header;
+       ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
+
+       if (SCU_GET_FRAME_ERROR(ent)) {
+               /*
+                * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
+                * /       this cause a problem? We expect the phy initialization will
+                * /       fail if there is an error in the frame. */
+               sci_controller_release_frame(ihost, frame_index);
+               return;
+       }
+
+       if (frame_header->is_address_frame) {
+               index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
+               iphy = &ihost->phys[index];
+               result = sci_phy_frame_handler(iphy, frame_index);
+       } else {
+
+               index = SCU_GET_COMPLETION_INDEX(ent);
+
+               if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+                       /*
+                        * This is a signature fis or a frame from a direct attached SATA
+                        * device that has not yet been created.  In either case forwared
+                        * the frame to the PE and let it take care of the frame data. */
+                       index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
+                       iphy = &ihost->phys[index];
+                       result = sci_phy_frame_handler(iphy, frame_index);
+               } else {
+                       if (index < ihost->remote_node_entries)
+                               idev = ihost->device_table[index];
+                       else
+                               idev = NULL;
+
+                       if (idev != NULL)
+                               result = sci_remote_device_frame_handler(idev, frame_index);
+                       else
+                               sci_controller_release_frame(ihost, frame_index);
+               }
+       }
+
+       if (result != SCI_SUCCESS) {
+               /*
+                * / @todo Is there any reason to report some additional error message
+                * /       when we get this failure notifiction? */
+       }
+}
+
+static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
+{
+       struct isci_remote_device *idev;
+       struct isci_request *ireq;
+       struct isci_phy *iphy;
+       u32 index;
+
+       index = SCU_GET_COMPLETION_INDEX(ent);
+
+       switch (scu_get_event_type(ent)) {
+       case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
+               /* / @todo The driver did something wrong and we need to fix the condtion. */
+               dev_err(&ihost->pdev->dev,
+                       "%s: SCIC Controller 0x%p received SMU command error "
+                       "0x%x\n",
+                       __func__,
+                       ihost,
+                       ent);
+               break;
+
+       case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
+       case SCU_EVENT_TYPE_SMU_ERROR:
+       case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
+               /*
+                * / @todo This is a hardware failure and its likely that we want to
+                * /       reset the controller. */
+               dev_err(&ihost->pdev->dev,
+                       "%s: SCIC Controller 0x%p received fatal controller "
+                       "event  0x%x\n",
+                       __func__,
+                       ihost,
+                       ent);
+               break;
+
+       case SCU_EVENT_TYPE_TRANSPORT_ERROR:
+               ireq = ihost->reqs[index];
+               sci_io_request_event_handler(ireq, ent);
+               break;
+
+       case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
+               switch (scu_get_event_specifier(ent)) {
+               case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
+               case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
+                       ireq = ihost->reqs[index];
+                       if (ireq != NULL)
+                               sci_io_request_event_handler(ireq, ent);
+                       else
+                               dev_warn(&ihost->pdev->dev,
+                                        "%s: SCIC Controller 0x%p received "
+                                        "event 0x%x for io request object "
+                                        "that doesnt exist.\n",
+                                        __func__,
+                                        ihost,
+                                        ent);
+
+                       break;
+
+               case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
+                       idev = ihost->device_table[index];
+                       if (idev != NULL)
+                               sci_remote_device_event_handler(idev, ent);
+                       else
+                               dev_warn(&ihost->pdev->dev,
+                                        "%s: SCIC Controller 0x%p received "
+                                        "event 0x%x for remote device object "
+                                        "that doesnt exist.\n",
+                                        __func__,
+                                        ihost,
+                                        ent);
+
+                       break;
+               }
+               break;
+
+       case SCU_EVENT_TYPE_BROADCAST_CHANGE:
+       /*
+        * direct the broadcast change event to the phy first and then let
+        * the phy redirect the broadcast change to the port object */
+       case SCU_EVENT_TYPE_ERR_CNT_EVENT:
+       /*
+        * direct error counter event to the phy object since that is where
+        * we get the event notification.  This is a type 4 event. */
+       case SCU_EVENT_TYPE_OSSP_EVENT:
+               index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
+               iphy = &ihost->phys[index];
+               sci_phy_event_handler(iphy, ent);
+               break;
+
+       case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+       case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+       case SCU_EVENT_TYPE_RNC_OPS_MISC:
+               if (index < ihost->remote_node_entries) {
+                       idev = ihost->device_table[index];
+
+                       if (idev != NULL)
+                               sci_remote_device_event_handler(idev, ent);
+               } else
+                       dev_err(&ihost->pdev->dev,
+                               "%s: SCIC Controller 0x%p received event 0x%x "
+                               "for remote device object 0x%0x that doesnt "
+                               "exist.\n",
+                               __func__,
+                               ihost,
+                               ent,
+                               index);
+
+               break;
+
+       default:
+               dev_warn(&ihost->pdev->dev,
+                        "%s: SCIC Controller received unknown event code %x\n",
+                        __func__,
+                        ent);
+               break;
+       }
+}
+
+static void sci_controller_process_completions(struct isci_host *ihost)
+{
+       u32 completion_count = 0;
+       u32 ent;
+       u32 get_index;
+       u32 get_cycle;
+       u32 event_get;
+       u32 event_cycle;
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: completion queue begining get:0x%08x\n",
+               __func__,
+               ihost->completion_queue_get);
+
+       /* Get the component parts of the completion queue */
+       get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get);
+       get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get;
+
+       event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get);
+       event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get;
+
+       while (
+               NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
+               == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])
+               ) {
+               completion_count++;
+
+               ent = ihost->completion_queue[get_index];
+
+               /* increment the get pointer and check for rollover to toggle the cycle bit */
+               get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
+                            (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT);
+               get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1);
+
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: completion queue entry:0x%08x\n",
+                       __func__,
+                       ent);
+
+               switch (SCU_GET_COMPLETION_TYPE(ent)) {
+               case SCU_COMPLETION_TYPE_TASK:
+                       sci_controller_task_completion(ihost, ent);
+                       break;
+
+               case SCU_COMPLETION_TYPE_SDMA:
+                       sci_controller_sdma_completion(ihost, ent);
+                       break;
+
+               case SCU_COMPLETION_TYPE_UFI:
+                       sci_controller_unsolicited_frame(ihost, ent);
+                       break;
+
+               case SCU_COMPLETION_TYPE_EVENT:
+               case SCU_COMPLETION_TYPE_NOTIFY: {
+                       event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
+                                      (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
+                       event_get = (event_get+1) & (SCU_MAX_EVENTS-1);
+
+                       sci_controller_event_completion(ihost, ent);
+                       break;
+               }
+               default:
+                       dev_warn(&ihost->pdev->dev,
+                                "%s: SCIC Controller received unknown "
+                                "completion type %x\n",
+                                __func__,
+                                ent);
+                       break;
+               }
+       }
+
+       /* Update the get register if we completed one or more entries */
+       if (completion_count > 0) {
+               ihost->completion_queue_get =
+                       SMU_CQGR_GEN_BIT(ENABLE) |
+                       SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
+                       event_cycle |
+                       SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) |
+                       get_cycle |
+                       SMU_CQGR_GEN_VAL(POINTER, get_index);
+
+               writel(ihost->completion_queue_get,
+                      &ihost->smu_registers->completion_queue_get);
+
+       }
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: completion queue ending get:0x%08x\n",
+               __func__,
+               ihost->completion_queue_get);
+
+}
+
+static void sci_controller_error_handler(struct isci_host *ihost)
+{
+       u32 interrupt_status;
+
+       interrupt_status =
+               readl(&ihost->smu_registers->interrupt_status);
+
+       if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
+           sci_controller_completion_queue_has_entries(ihost)) {
+
+               sci_controller_process_completions(ihost);
+               writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status);
+       } else {
+               dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__,
+                       interrupt_status);
+
+               sci_change_state(&ihost->sm, SCIC_FAILED);
+
+               return;
+       }
+
+       /* If we dont process any completions I am not sure that we want to do this.
+        * We are in the middle of a hardware fault and should probably be reset.
+        */
+       writel(0, &ihost->smu_registers->interrupt_mask);
+}
+
+irqreturn_t isci_intx_isr(int vec, void *data)
+{
+       irqreturn_t ret = IRQ_NONE;
+       struct isci_host *ihost = data;
+
+       if (sci_controller_isr(ihost)) {
+               writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+               tasklet_schedule(&ihost->completion_tasklet);
+               ret = IRQ_HANDLED;
+       } else if (sci_controller_error_isr(ihost)) {
+               spin_lock(&ihost->scic_lock);
+               sci_controller_error_handler(ihost);
+               spin_unlock(&ihost->scic_lock);
+               ret = IRQ_HANDLED;
+       }
+
+       return ret;
+}
+
+irqreturn_t isci_error_isr(int vec, void *data)
+{
+       struct isci_host *ihost = data;
+
+       if (sci_controller_error_isr(ihost))
+               sci_controller_error_handler(ihost);
+
+       return IRQ_HANDLED;
+}
+
+/**
+ * isci_host_start_complete() - This function is called by the core library,
+ *    through the ISCI Module, to indicate controller start status.
+ * @isci_host: This parameter specifies the ISCI host object
+ * @completion_status: This parameter specifies the completion status from the
+ *    core library.
+ *
+ */
+static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
+{
+       if (completion_status != SCI_SUCCESS)
+               dev_info(&ihost->pdev->dev,
+                       "controller start timed out, continuing...\n");
+       isci_host_change_state(ihost, isci_ready);
+       clear_bit(IHOST_START_PENDING, &ihost->flags);
+       wake_up(&ihost->eventq);
+}
+
+int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+       struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
+
+       if (test_bit(IHOST_START_PENDING, &ihost->flags))
+               return 0;
+
+       /* todo: use sas_flush_discovery once it is upstream */
+       scsi_flush_work(shost);
+
+       scsi_flush_work(shost);
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: ihost->status = %d, time = %ld\n",
+                __func__, isci_host_get_state(ihost), time);
+
+       return 1;
+
+}
+
+/**
+ * sci_controller_get_suggested_start_timeout() - This method returns the
+ *    suggested sci_controller_start() timeout amount.  The user is free to
+ *    use any timeout value, but this method provides the suggested minimum
+ *    start timeout value.  The returned value is based upon empirical
+ *    information determined as a result of interoperability testing.
+ * @controller: the handle to the controller object for which to return the
+ *    suggested start timeout.
+ *
+ * This method returns the number of milliseconds for the suggested start
+ * operation timeout.
+ */
+static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
+{
+       /* Validate the user supplied parameters. */
+       if (!ihost)
+               return 0;
+
+       /*
+        * The suggested minimum timeout value for a controller start operation:
+        *
+        *     Signature FIS Timeout
+        *   + Phy Start Timeout
+        *   + Number of Phy Spin Up Intervals
+        *   ---------------------------------
+        *   Number of milliseconds for the controller start operation.
+        *
+        * NOTE: The number of phy spin up intervals will be equivalent
+        *       to the number of phys divided by the number phys allowed
+        *       per interval - 1 (once OEM parameters are supported).
+        *       Currently we assume only 1 phy per interval. */
+
+       return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
+               + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
+               + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
+}
+
+static void sci_controller_enable_interrupts(struct isci_host *ihost)
+{
+       BUG_ON(ihost->smu_registers == NULL);
+       writel(0, &ihost->smu_registers->interrupt_mask);
+}
+
+void sci_controller_disable_interrupts(struct isci_host *ihost)
+{
+       BUG_ON(ihost->smu_registers == NULL);
+       writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
+}
+
+static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
+{
+       u32 port_task_scheduler_value;
+
+       port_task_scheduler_value =
+               readl(&ihost->scu_registers->peg0.ptsg.control);
+       port_task_scheduler_value |=
+               (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
+                SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
+       writel(port_task_scheduler_value,
+              &ihost->scu_registers->peg0.ptsg.control);
+}
+
+static void sci_controller_assign_task_entries(struct isci_host *ihost)
+{
+       u32 task_assignment;
+
+       /*
+        * Assign all the TCs to function 0
+        * TODO: Do we actually need to read this register to write it back?
+        */
+
+       task_assignment =
+               readl(&ihost->smu_registers->task_context_assignment[0]);
+
+       task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
+               (SMU_TCA_GEN_VAL(ENDING,  ihost->task_context_entries - 1)) |
+               (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
+
+       writel(task_assignment,
+               &ihost->smu_registers->task_context_assignment[0]);
+
+}
+
+static void sci_controller_initialize_completion_queue(struct isci_host *ihost)
+{
+       u32 index;
+       u32 completion_queue_control_value;
+       u32 completion_queue_get_value;
+       u32 completion_queue_put_value;
+
+       ihost->completion_queue_get = 0;
+
+       completion_queue_control_value =
+               (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) |
+                SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1));
+
+       writel(completion_queue_control_value,
+              &ihost->smu_registers->completion_queue_control);
+
+
+       /* Set the completion queue get pointer and enable the queue */
+       completion_queue_get_value = (
+               (SMU_CQGR_GEN_VAL(POINTER, 0))
+               | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
+               | (SMU_CQGR_GEN_BIT(ENABLE))
+               | (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
+               );
+
+       writel(completion_queue_get_value,
+              &ihost->smu_registers->completion_queue_get);
+
+       /* Set the completion queue put pointer */
+       completion_queue_put_value = (
+               (SMU_CQPR_GEN_VAL(POINTER, 0))
+               | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
+               );
+
+       writel(completion_queue_put_value,
+              &ihost->smu_registers->completion_queue_put);
+
+       /* Initialize the cycle bit of the completion queue entries */
+       for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) {
+               /*
+                * If get.cycle_bit != completion_queue.cycle_bit
+                * its not a valid completion queue entry
+                * so at system start all entries are invalid */
+               ihost->completion_queue[index] = 0x80000000;
+       }
+}
+
+static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost)
+{
+       u32 frame_queue_control_value;
+       u32 frame_queue_get_value;
+       u32 frame_queue_put_value;
+
+       /* Write the queue size */
+       frame_queue_control_value =
+               SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES);
+
+       writel(frame_queue_control_value,
+              &ihost->scu_registers->sdma.unsolicited_frame_queue_control);
+
+       /* Setup the get pointer for the unsolicited frame queue */
+       frame_queue_get_value = (
+               SCU_UFQGP_GEN_VAL(POINTER, 0)
+               |  SCU_UFQGP_GEN_BIT(ENABLE_BIT)
+               );
+
+       writel(frame_queue_get_value,
+              &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+       /* Setup the put pointer for the unsolicited frame queue */
+       frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
+       writel(frame_queue_put_value,
+              &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
+}
+
+static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
+{
+       if (ihost->sm.current_state_id == SCIC_STARTING) {
+               /*
+                * We move into the ready state, because some of the phys/ports
+                * may be up and operational.
+                */
+               sci_change_state(&ihost->sm, SCIC_READY);
+
+               isci_host_start_complete(ihost, status);
+       }
+}
+
+static bool is_phy_starting(struct isci_phy *iphy)
+{
+       enum sci_phy_states state;
+
+       state = iphy->sm.current_state_id;
+       switch (state) {
+       case SCI_PHY_STARTING:
+       case SCI_PHY_SUB_INITIAL:
+       case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+       case SCI_PHY_SUB_AWAIT_IAF_UF:
+       case SCI_PHY_SUB_AWAIT_SAS_POWER:
+       case SCI_PHY_SUB_AWAIT_SATA_POWER:
+       case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
+       case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+       case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
+       case SCI_PHY_SUB_FINAL:
+               return true;
+       default:
+               return false;
+       }
+}
+
+/**
+ * sci_controller_start_next_phy - start phy
+ * @scic: controller
+ *
+ * If all the phys have been started, then attempt to transition the
+ * controller to the READY state and inform the user
+ * (sci_cb_controller_start_complete()).
+ */
+static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
+{
+       struct sci_oem_params *oem = &ihost->oem_parameters;
+       struct isci_phy *iphy;
+       enum sci_status status;
+
+       status = SCI_SUCCESS;
+
+       if (ihost->phy_startup_timer_pending)
+               return status;
+
+       if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
+               bool is_controller_start_complete = true;
+               u32 state;
+               u8 index;
+
+               for (index = 0; index < SCI_MAX_PHYS; index++) {
+                       iphy = &ihost->phys[index];
+                       state = iphy->sm.current_state_id;
+
+                       if (!phy_get_non_dummy_port(iphy))
+                               continue;
+
+                       /* The controller start operation is complete iff:
+                        * - all links have been given an opportunity to start
+                        * - have no indication of a connected device
+                        * - have an indication of a connected device and it has
+                        *   finished the link training process.
+                        */
+                       if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
+                           (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
+                           (iphy->is_in_link_training == true && is_phy_starting(iphy))) {
+                               is_controller_start_complete = false;
+                               break;
+                       }
+               }
+
+               /*
+                * The controller has successfully finished the start process.
+                * Inform the SCI Core user and transition to the READY state. */
+               if (is_controller_start_complete == true) {
+                       sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
+                       sci_del_timer(&ihost->phy_timer);
+                       ihost->phy_startup_timer_pending = false;
+               }
+       } else {
+               iphy = &ihost->phys[ihost->next_phy_to_start];
+
+               if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
+                       if (phy_get_non_dummy_port(iphy) == NULL) {
+                               ihost->next_phy_to_start++;
+
+                               /* Caution recursion ahead be forwarned
+                                *
+                                * The PHY was never added to a PORT in MPC mode
+                                * so start the next phy in sequence This phy
+                                * will never go link up and will not draw power
+                                * the OEM parameters either configured the phy
+                                * incorrectly for the PORT or it was never
+                                * assigned to a PORT
+                                */
+                               return sci_controller_start_next_phy(ihost);
+                       }
+               }
+
+               status = sci_phy_start(iphy);
+
+               if (status == SCI_SUCCESS) {
+                       sci_mod_timer(&ihost->phy_timer,
+                                     SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
+                       ihost->phy_startup_timer_pending = true;
+               } else {
+                       dev_warn(&ihost->pdev->dev,
+                                "%s: Controller stop operation failed "
+                                "to stop phy %d because of status "
+                                "%d.\n",
+                                __func__,
+                                ihost->phys[ihost->next_phy_to_start].phy_index,
+                                status);
+               }
+
+               ihost->next_phy_to_start++;
+       }
+
+       return status;
+}
+
+static void phy_startup_timeout(unsigned long data)
+{
+       struct sci_timer *tmr = (struct sci_timer *)data;
+       struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer);
+       unsigned long flags;
+       enum sci_status status;
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       if (tmr->cancel)
+               goto done;
+
+       ihost->phy_startup_timer_pending = false;
+
+       do {
+               status = sci_controller_start_next_phy(ihost);
+       } while (status != SCI_SUCCESS);
+
+done:
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+static u16 isci_tci_active(struct isci_host *ihost)
+{
+       return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
+}
+
+static enum sci_status sci_controller_start(struct isci_host *ihost,
+                                            u32 timeout)
+{
+       enum sci_status result;
+       u16 index;
+
+       if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
+               dev_warn(&ihost->pdev->dev,
+                        "SCIC Controller start operation requested in "
+                        "invalid state\n");
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       /* Build the TCi free pool */
+       BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8);
+       ihost->tci_head = 0;
+       ihost->tci_tail = 0;
+       for (index = 0; index < ihost->task_context_entries; index++)
+               isci_tci_free(ihost, index);
+
+       /* Build the RNi free pool */
+       sci_remote_node_table_initialize(&ihost->available_remote_nodes,
+                                        ihost->remote_node_entries);
+
+       /*
+        * Before anything else lets make sure we will not be
+        * interrupted by the hardware.
+        */
+       sci_controller_disable_interrupts(ihost);
+
+       /* Enable the port task scheduler */
+       sci_controller_enable_port_task_scheduler(ihost);
+
+       /* Assign all the task entries to ihost physical function */
+       sci_controller_assign_task_entries(ihost);
+
+       /* Now initialize the completion queue */
+       sci_controller_initialize_completion_queue(ihost);
+
+       /* Initialize the unsolicited frame queue for use */
+       sci_controller_initialize_unsolicited_frame_queue(ihost);
+
+       /* Start all of the ports on this controller */
+       for (index = 0; index < ihost->logical_port_entries; index++) {
+               struct isci_port *iport = &ihost->ports[index];
+
+               result = sci_port_start(iport);
+               if (result)
+                       return result;
+       }
+
+       sci_controller_start_next_phy(ihost);
+
+       sci_mod_timer(&ihost->timer, timeout);
+
+       sci_change_state(&ihost->sm, SCIC_STARTING);
+
+       return SCI_SUCCESS;
+}
+
+void isci_host_scan_start(struct Scsi_Host *shost)
+{
+       struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
+       unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
+
+       set_bit(IHOST_START_PENDING, &ihost->flags);
+
+       spin_lock_irq(&ihost->scic_lock);
+       sci_controller_start(ihost, tmo);
+       sci_controller_enable_interrupts(ihost);
+       spin_unlock_irq(&ihost->scic_lock);
+}
+
+static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status)
+{
+       isci_host_change_state(ihost, isci_stopped);
+       sci_controller_disable_interrupts(ihost);
+       clear_bit(IHOST_STOP_PENDING, &ihost->flags);
+       wake_up(&ihost->eventq);
+}
+
+static void sci_controller_completion_handler(struct isci_host *ihost)
+{
+       /* Empty out the completion queue */
+       if (sci_controller_completion_queue_has_entries(ihost))
+               sci_controller_process_completions(ihost);
+
+       /* Clear the interrupt and enable all interrupts again */
+       writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+       /* Could we write the value of SMU_ISR_COMPLETION? */
+       writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
+       writel(0, &ihost->smu_registers->interrupt_mask);
+}
+
+/**
+ * isci_host_completion_routine() - This function is the delayed service
+ *    routine that calls the sci core library's completion handler. It's
+ *    scheduled as a tasklet from the interrupt service routine when interrupts
+ *    in use, or set as the timeout function in polled mode.
+ * @data: This parameter specifies the ISCI host object
+ *
+ */
+static void isci_host_completion_routine(unsigned long data)
+{
+       struct isci_host *ihost = (struct isci_host *)data;
+       struct list_head    completed_request_list;
+       struct list_head    errored_request_list;
+       struct list_head    *current_position;
+       struct list_head    *next_position;
+       struct isci_request *request;
+       struct isci_request *next_request;
+       struct sas_task     *task;
+
+       INIT_LIST_HEAD(&completed_request_list);
+       INIT_LIST_HEAD(&errored_request_list);
+
+       spin_lock_irq(&ihost->scic_lock);
+
+       sci_controller_completion_handler(ihost);
+
+       /* Take the lists of completed I/Os from the host. */
+
+       list_splice_init(&ihost->requests_to_complete,
+                        &completed_request_list);
+
+       /* Take the list of errored I/Os from the host. */
+       list_splice_init(&ihost->requests_to_errorback,
+                        &errored_request_list);
+
+       spin_unlock_irq(&ihost->scic_lock);
+
+       /* Process any completions in the lists. */
+       list_for_each_safe(current_position, next_position,
+                          &completed_request_list) {
+
+               request = list_entry(current_position, struct isci_request,
+                                    completed_node);
+               task = isci_request_access_task(request);
+
+               /* Normal notification (task_done) */
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: Normal - request/task = %p/%p\n",
+                       __func__,
+                       request,
+                       task);
+
+               /* Return the task to libsas */
+               if (task != NULL) {
+
+                       task->lldd_task = NULL;
+                       if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+
+                               /* If the task is already in the abort path,
+                               * the task_done callback cannot be called.
+                               */
+                               task->task_done(task);
+                       }
+               }
+
+               spin_lock_irq(&ihost->scic_lock);
+               isci_free_tag(ihost, request->io_tag);
+               spin_unlock_irq(&ihost->scic_lock);
+       }
+       list_for_each_entry_safe(request, next_request, &errored_request_list,
+                                completed_node) {
+
+               task = isci_request_access_task(request);
+
+               /* Use sas_task_abort */
+               dev_warn(&ihost->pdev->dev,
+                        "%s: Error - request/task = %p/%p\n",
+                        __func__,
+                        request,
+                        task);
+
+               if (task != NULL) {
+
+                       /* Put the task into the abort path if it's not there
+                        * already.
+                        */
+                       if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
+                               sas_task_abort(task);
+
+               } else {
+                       /* This is a case where the request has completed with a
+                        * status such that it needed further target servicing,
+                        * but the sas_task reference has already been removed
+                        * from the request.  Since it was errored, it was not
+                        * being aborted, so there is nothing to do except free
+                        * it.
+                        */
+
+                       spin_lock_irq(&ihost->scic_lock);
+                       /* Remove the request from the remote device's list
+                       * of pending requests.
+                       */
+                       list_del_init(&request->dev_node);
+                       isci_free_tag(ihost, request->io_tag);
+                       spin_unlock_irq(&ihost->scic_lock);
+               }
+       }
+
+}
+
+/**
+ * sci_controller_stop() - This method will stop an individual controller
+ *    object.This method will invoke the associated user callback upon
+ *    completion.  The completion callback is called when the following
+ *    conditions are met: -# the method return status is SCI_SUCCESS. -# the
+ *    controller has been quiesced. This method will ensure that all IO
+ *    requests are quiesced, phys are stopped, and all additional operation by
+ *    the hardware is halted.
+ * @controller: the handle to the controller object to stop.
+ * @timeout: This parameter specifies the number of milliseconds in which the
+ *    stop operation should complete.
+ *
+ * The controller must be in the STARTED or STOPPED state. Indicate if the
+ * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
+ * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
+ * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
+ * controller is not either in the STARTED or STOPPED states.
+ */
+static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
+{
+       if (ihost->sm.current_state_id != SCIC_READY) {
+               dev_warn(&ihost->pdev->dev,
+                        "SCIC Controller stop operation requested in "
+                        "invalid state\n");
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       sci_mod_timer(&ihost->timer, timeout);
+       sci_change_state(&ihost->sm, SCIC_STOPPING);
+       return SCI_SUCCESS;
+}
+
+/**
+ * sci_controller_reset() - This method will reset the supplied core
+ *    controller regardless of the state of said controller.  This operation is
+ *    considered destructive.  In other words, all current operations are wiped
+ *    out.  No IO completions for outstanding devices occur.  Outstanding IO
+ *    requests are not aborted or completed at the actual remote device.
+ * @controller: the handle to the controller object to reset.
+ *
+ * Indicate if the controller reset method succeeded or failed in some way.
+ * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
+ * the controller reset operation is unable to complete.
+ */
+static enum sci_status sci_controller_reset(struct isci_host *ihost)
+{
+       switch (ihost->sm.current_state_id) {
+       case SCIC_RESET:
+       case SCIC_READY:
+       case SCIC_STOPPED:
+       case SCIC_FAILED:
+               /*
+                * The reset operation is not a graceful cleanup, just
+                * perform the state transition.
+                */
+               sci_change_state(&ihost->sm, SCIC_RESETTING);
+               return SCI_SUCCESS;
+       default:
+               dev_warn(&ihost->pdev->dev,
+                        "SCIC Controller reset operation requested in "
+                        "invalid state\n");
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+void isci_host_deinit(struct isci_host *ihost)
+{
+       int i;
+
+       isci_host_change_state(ihost, isci_stopping);
+       for (i = 0; i < SCI_MAX_PORTS; i++) {
+               struct isci_port *iport = &ihost->ports[i];
+               struct isci_remote_device *idev, *d;
+
+               list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) {
+                       if (test_bit(IDEV_ALLOCATED, &idev->flags))
+                               isci_remote_device_stop(ihost, idev);
+               }
+       }
+
+       set_bit(IHOST_STOP_PENDING, &ihost->flags);
+
+       spin_lock_irq(&ihost->scic_lock);
+       sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT);
+       spin_unlock_irq(&ihost->scic_lock);
+
+       wait_for_stop(ihost);
+       sci_controller_reset(ihost);
+
+       /* Cancel any/all outstanding port timers */
+       for (i = 0; i < ihost->logical_port_entries; i++) {
+               struct isci_port *iport = &ihost->ports[i];
+               del_timer_sync(&iport->timer.timer);
+       }
+
+       /* Cancel any/all outstanding phy timers */
+       for (i = 0; i < SCI_MAX_PHYS; i++) {
+               struct isci_phy *iphy = &ihost->phys[i];
+               del_timer_sync(&iphy->sata_timer.timer);
+       }
+
+       del_timer_sync(&ihost->port_agent.timer.timer);
+
+       del_timer_sync(&ihost->power_control.timer.timer);
+
+       del_timer_sync(&ihost->timer.timer);
+
+       del_timer_sync(&ihost->phy_timer.timer);
+}
+
+static void __iomem *scu_base(struct isci_host *isci_host)
+{
+       struct pci_dev *pdev = isci_host->pdev;
+       int id = isci_host->id;
+
+       return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
+}
+
+static void __iomem *smu_base(struct isci_host *isci_host)
+{
+       struct pci_dev *pdev = isci_host->pdev;
+       int id = isci_host->id;
+
+       return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
+}
+
+static void isci_user_parameters_get(struct sci_user_parameters *u)
+{
+       int i;
+
+       for (i = 0; i < SCI_MAX_PHYS; i++) {
+               struct sci_phy_user_params *u_phy = &u->phys[i];
+
+               u_phy->max_speed_generation = phy_gen;
+
+               /* we are not exporting these for now */
+               u_phy->align_insertion_frequency = 0x7f;
+               u_phy->in_connection_align_insertion_frequency = 0xff;
+               u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
+       }
+
+       u->stp_inactivity_timeout = stp_inactive_to;
+       u->ssp_inactivity_timeout = ssp_inactive_to;
+       u->stp_max_occupancy_timeout = stp_max_occ_to;
+       u->ssp_max_occupancy_timeout = ssp_max_occ_to;
+       u->no_outbound_task_timeout = no_outbound_task_to;
+       u->max_number_concurrent_device_spin_up = max_concurr_spinup;
+}
+
+static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+       sci_change_state(&ihost->sm, SCIC_RESET);
+}
+
+static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+       sci_del_timer(&ihost->timer);
+}
+
+#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
+#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
+#define INTERRUPT_COALESCE_TIMEOUT_MAX_US                    2700000
+#define INTERRUPT_COALESCE_NUMBER_MAX                        256
+#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN                7
+#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX                28
+
+/**
+ * sci_controller_set_interrupt_coalescence() - This method allows the user to
+ *    configure the interrupt coalescence.
+ * @controller: This parameter represents the handle to the controller object
+ *    for which its interrupt coalesce register is overridden.
+ * @coalesce_number: Used to control the number of entries in the Completion
+ *    Queue before an interrupt is generated. If the number of entries exceed
+ *    this number, an interrupt will be generated. The valid range of the input
+ *    is [0, 256]. A setting of 0 results in coalescing being disabled.
+ * @coalesce_timeout: Timeout value in microseconds. The valid range of the
+ *    input is [0, 2700000] . A setting of 0 is allowed and results in no
+ *    interrupt coalescing timeout.
+ *
+ * Indicate if the user successfully set the interrupt coalesce parameters.
+ * SCI_SUCCESS The user successfully updated the interrutp coalescence.
+ * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
+ */
+static enum sci_status
+sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
+                                        u32 coalesce_number,
+                                        u32 coalesce_timeout)
+{
+       u8 timeout_encode = 0;
+       u32 min = 0;
+       u32 max = 0;
+
+       /* Check if the input parameters fall in the range. */
+       if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
+               return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+       /*
+        *  Defined encoding for interrupt coalescing timeout:
+        *              Value   Min      Max     Units
+        *              -----   ---      ---     -----
+        *              0       -        -       Disabled
+        *              1       13.3     20.0    ns
+        *              2       26.7     40.0
+        *              3       53.3     80.0
+        *              4       106.7    160.0
+        *              5       213.3    320.0
+        *              6       426.7    640.0
+        *              7       853.3    1280.0
+        *              8       1.7      2.6     us
+        *              9       3.4      5.1
+        *              10      6.8      10.2
+        *              11      13.7     20.5
+        *              12      27.3     41.0
+        *              13      54.6     81.9
+        *              14      109.2    163.8
+        *              15      218.5    327.7
+        *              16      436.9    655.4
+        *              17      873.8    1310.7
+        *              18      1.7      2.6     ms
+        *              19      3.5      5.2
+        *              20      7.0      10.5
+        *              21      14.0     21.0
+        *              22      28.0     41.9
+        *              23      55.9     83.9
+        *              24      111.8    167.8
+        *              25      223.7    335.5
+        *              26      447.4    671.1
+        *              27      894.8    1342.2
+        *              28      1.8      2.7     s
+        *              Others Undefined */
+
+       /*
+        * Use the table above to decide the encode of interrupt coalescing timeout
+        * value for register writing. */
+       if (coalesce_timeout == 0)
+               timeout_encode = 0;
+       else{
+               /* make the timeout value in unit of (10 ns). */
+               coalesce_timeout = coalesce_timeout * 100;
+               min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
+               max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
+
+               /* get the encode of timeout for register writing. */
+               for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
+                     timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
+                     timeout_encode++) {
+                       if (min <= coalesce_timeout &&  max > coalesce_timeout)
+                               break;
+                       else if (coalesce_timeout >= max && coalesce_timeout < min * 2
+                                && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
+                               if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
+                                       break;
+                               else{
+                                       timeout_encode++;
+                                       break;
+                               }
+                       } else {
+                               max = max * 2;
+                               min = min * 2;
+                       }
+               }
+
+               if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
+                       /* the value is out of range. */
+                       return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+       }
+
+       writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
+              SMU_ICC_GEN_VAL(TIMER, timeout_encode),
+              &ihost->smu_registers->interrupt_coalesce_control);
+
+
+       ihost->interrupt_coalesce_number = (u16)coalesce_number;
+       ihost->interrupt_coalesce_timeout = coalesce_timeout / 100;
+
+       return SCI_SUCCESS;
+}
+
+
+static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+       /* set the default interrupt coalescence number and timeout value. */
+       sci_controller_set_interrupt_coalescence(ihost, 0x10, 250);
+}
+
+static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+       /* disable interrupt coalescence. */
+       sci_controller_set_interrupt_coalescence(ihost, 0, 0);
+}
+
+static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
+{
+       u32 index;
+       enum sci_status status;
+       enum sci_status phy_status;
+
+       status = SCI_SUCCESS;
+
+       for (index = 0; index < SCI_MAX_PHYS; index++) {
+               phy_status = sci_phy_stop(&ihost->phys[index]);
+
+               if (phy_status != SCI_SUCCESS &&
+                   phy_status != SCI_FAILURE_INVALID_STATE) {
+                       status = SCI_FAILURE;
+
+                       dev_warn(&ihost->pdev->dev,
+                                "%s: Controller stop operation failed to stop "
+                                "phy %d because of status %d.\n",
+                                __func__,
+                                ihost->phys[index].phy_index, phy_status);
+               }
+       }
+
+       return status;
+}
+
+static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
+{
+       u32 index;
+       enum sci_status port_status;
+       enum sci_status status = SCI_SUCCESS;
+
+       for (index = 0; index < ihost->logical_port_entries; index++) {
+               struct isci_port *iport = &ihost->ports[index];
+
+               port_status = sci_port_stop(iport);
+
+               if ((port_status != SCI_SUCCESS) &&
+                   (port_status != SCI_FAILURE_INVALID_STATE)) {
+                       status = SCI_FAILURE;
+
+                       dev_warn(&ihost->pdev->dev,
+                                "%s: Controller stop operation failed to "
+                                "stop port %d because of status %d.\n",
+                                __func__,
+                                iport->logical_port_index,
+                                port_status);
+               }
+       }
+
+       return status;
+}
+
+static enum sci_status sci_controller_stop_devices(struct isci_host *ihost)
+{
+       u32 index;
+       enum sci_status status;
+       enum sci_status device_status;
+
+       status = SCI_SUCCESS;
+
+       for (index = 0; index < ihost->remote_node_entries; index++) {
+               if (ihost->device_table[index] != NULL) {
+                       /* / @todo What timeout value do we want to provide to this request? */
+                       device_status = sci_remote_device_stop(ihost->device_table[index], 0);
+
+                       if ((device_status != SCI_SUCCESS) &&
+                           (device_status != SCI_FAILURE_INVALID_STATE)) {
+                               dev_warn(&ihost->pdev->dev,
+                                        "%s: Controller stop operation failed "
+                                        "to stop device 0x%p because of "
+                                        "status %d.\n",
+                                        __func__,
+                                        ihost->device_table[index], device_status);
+                       }
+               }
+       }
+
+       return status;
+}
+
+static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+       /* Stop all of the components for this controller */
+       sci_controller_stop_phys(ihost);
+       sci_controller_stop_ports(ihost);
+       sci_controller_stop_devices(ihost);
+}
+
+static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+       sci_del_timer(&ihost->timer);
+}
+
+static void sci_controller_reset_hardware(struct isci_host *ihost)
+{
+       /* Disable interrupts so we dont take any spurious interrupts */
+       sci_controller_disable_interrupts(ihost);
+
+       /* Reset the SCU */
+       writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control);
+
+       /* Delay for 1ms to before clearing the CQP and UFQPR. */
+       udelay(1000);
+
+       /* The write to the CQGR clears the CQP */
+       writel(0x00000000, &ihost->smu_registers->completion_queue_get);
+
+       /* The write to the UFQGP clears the UFQPR */
+       writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+}
+
+static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+       sci_controller_reset_hardware(ihost);
+       sci_change_state(&ihost->sm, SCIC_RESET);
+}
+
+static const struct sci_base_state sci_controller_state_table[] = {
+       [SCIC_INITIAL] = {
+               .enter_state = sci_controller_initial_state_enter,
+       },
+       [SCIC_RESET] = {},
+       [SCIC_INITIALIZING] = {},
+       [SCIC_INITIALIZED] = {},
+       [SCIC_STARTING] = {
+               .exit_state  = sci_controller_starting_state_exit,
+       },
+       [SCIC_READY] = {
+               .enter_state = sci_controller_ready_state_enter,
+               .exit_state  = sci_controller_ready_state_exit,
+       },
+       [SCIC_RESETTING] = {
+               .enter_state = sci_controller_resetting_state_enter,
+       },
+       [SCIC_STOPPING] = {
+               .enter_state = sci_controller_stopping_state_enter,
+               .exit_state = sci_controller_stopping_state_exit,
+       },
+       [SCIC_STOPPED] = {},
+       [SCIC_FAILED] = {}
+};
+
+static void sci_controller_set_default_config_parameters(struct isci_host *ihost)
+{
+       /* these defaults are overridden by the platform / firmware */
+       u16 index;
+
+       /* Default to APC mode. */
+       ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
+
+       /* Default to APC mode. */
+       ihost->oem_parameters.controller.max_concurrent_dev_spin_up = 1;
+
+       /* Default to no SSC operation. */
+       ihost->oem_parameters.controller.do_enable_ssc = false;
+
+       /* Initialize all of the port parameter information to narrow ports. */
+       for (index = 0; index < SCI_MAX_PORTS; index++) {
+               ihost->oem_parameters.ports[index].phy_mask = 0;
+       }
+
+       /* Initialize all of the phy parameter information. */
+       for (index = 0; index < SCI_MAX_PHYS; index++) {
+               /* Default to 6G (i.e. Gen 3) for now. */
+               ihost->user_parameters.phys[index].max_speed_generation = 3;
+
+               /* the frequencies cannot be 0 */
+               ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
+               ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff;
+               ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
+
+               /*
+                * Previous Vitesse based expanders had a arbitration issue that
+                * is worked around by having the upper 32-bits of SAS address
+                * with a value greater then the Vitesse company identifier.
+                * Hence, usage of 0x5FCFFFFF. */
+               ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id;
+               ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF;
+       }
+
+       ihost->user_parameters.stp_inactivity_timeout = 5;
+       ihost->user_parameters.ssp_inactivity_timeout = 5;
+       ihost->user_parameters.stp_max_occupancy_timeout = 5;
+       ihost->user_parameters.ssp_max_occupancy_timeout = 20;
+       ihost->user_parameters.no_outbound_task_timeout = 20;
+}
+
+static void controller_timeout(unsigned long data)
+{
+       struct sci_timer *tmr = (struct sci_timer *)data;
+       struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer);
+       struct sci_base_state_machine *sm = &ihost->sm;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       if (tmr->cancel)
+               goto done;
+
+       if (sm->current_state_id == SCIC_STARTING)
+               sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
+       else if (sm->current_state_id == SCIC_STOPPING) {
+               sci_change_state(sm, SCIC_FAILED);
+               isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
+       } else  /* / @todo Now what do we want to do in this case? */
+               dev_err(&ihost->pdev->dev,
+                       "%s: Controller timer fired when controller was not "
+                       "in a state being timed.\n",
+                       __func__);
+
+done:
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+static enum sci_status sci_controller_construct(struct isci_host *ihost,
+                                               void __iomem *scu_base,
+                                               void __iomem *smu_base)
+{
+       u8 i;
+
+       sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL);
+
+       ihost->scu_registers = scu_base;
+       ihost->smu_registers = smu_base;
+
+       sci_port_configuration_agent_construct(&ihost->port_agent);
+
+       /* Construct the ports for this controller */
+       for (i = 0; i < SCI_MAX_PORTS; i++)
+               sci_port_construct(&ihost->ports[i], i, ihost);
+       sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
+
+       /* Construct the phys for this controller */
+       for (i = 0; i < SCI_MAX_PHYS; i++) {
+               /* Add all the PHYs to the dummy port */
+               sci_phy_construct(&ihost->phys[i],
+                                 &ihost->ports[SCI_MAX_PORTS], i);
+       }
+
+       ihost->invalid_phy_mask = 0;
+
+       sci_init_timer(&ihost->timer, controller_timeout);
+
+       /* Initialize the User and OEM parameters to default values. */
+       sci_controller_set_default_config_parameters(ihost);
+
+       return sci_controller_reset(ihost);
+}
+
+int sci_oem_parameters_validate(struct sci_oem_params *oem)
+{
+       int i;
+
+       for (i = 0; i < SCI_MAX_PORTS; i++)
+               if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
+                       return -EINVAL;
+
+       for (i = 0; i < SCI_MAX_PHYS; i++)
+               if (oem->phys[i].sas_address.high == 0 &&
+                   oem->phys[i].sas_address.low == 0)
+                       return -EINVAL;
+
+       if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
+               for (i = 0; i < SCI_MAX_PHYS; i++)
+                       if (oem->ports[i].phy_mask != 0)
+                               return -EINVAL;
+       } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
+               u8 phy_mask = 0;
+
+               for (i = 0; i < SCI_MAX_PHYS; i++)
+                       phy_mask |= oem->ports[i].phy_mask;
+
+               if (phy_mask == 0)
+                       return -EINVAL;
+       } else
+               return -EINVAL;
+
+       if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT)
+               return -EINVAL;
+
+       return 0;
+}
+
+static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
+{
+       u32 state = ihost->sm.current_state_id;
+
+       if (state == SCIC_RESET ||
+           state == SCIC_INITIALIZING ||
+           state == SCIC_INITIALIZED) {
+
+               if (sci_oem_parameters_validate(&ihost->oem_parameters))
+                       return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+               return SCI_SUCCESS;
+       }
+
+       return SCI_FAILURE_INVALID_STATE;
+}
+
+static void power_control_timeout(unsigned long data)
+{
+       struct sci_timer *tmr = (struct sci_timer *)data;
+       struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer);
+       struct isci_phy *iphy;
+       unsigned long flags;
+       u8 i;
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       if (tmr->cancel)
+               goto done;
+
+       ihost->power_control.phys_granted_power = 0;
+
+       if (ihost->power_control.phys_waiting == 0) {
+               ihost->power_control.timer_started = false;
+               goto done;
+       }
+
+       for (i = 0; i < SCI_MAX_PHYS; i++) {
+
+               if (ihost->power_control.phys_waiting == 0)
+                       break;
+
+               iphy = ihost->power_control.requesters[i];
+               if (iphy == NULL)
+                       continue;
+
+               if (ihost->power_control.phys_granted_power >=
+                   ihost->oem_parameters.controller.max_concurrent_dev_spin_up)
+                       break;
+
+               ihost->power_control.requesters[i] = NULL;
+               ihost->power_control.phys_waiting--;
+               ihost->power_control.phys_granted_power++;
+               sci_phy_consume_power_handler(iphy);
+       }
+
+       /*
+        * It doesn't matter if the power list is empty, we need to start the
+        * timer in case another phy becomes ready.
+        */
+       sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
+       ihost->power_control.timer_started = true;
+
+done:
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+void sci_controller_power_control_queue_insert(struct isci_host *ihost,
+                                              struct isci_phy *iphy)
+{
+       BUG_ON(iphy == NULL);
+
+       if (ihost->power_control.phys_granted_power <
+           ihost->oem_parameters.controller.max_concurrent_dev_spin_up) {
+               ihost->power_control.phys_granted_power++;
+               sci_phy_consume_power_handler(iphy);
+
+               /*
+                * stop and start the power_control timer. When the timer fires, the
+                * no_of_phys_granted_power will be set to 0
+                */
+               if (ihost->power_control.timer_started)
+                       sci_del_timer(&ihost->power_control.timer);
+
+               sci_mod_timer(&ihost->power_control.timer,
+                                SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
+               ihost->power_control.timer_started = true;
+
+       } else {
+               /* Add the phy in the waiting list */
+               ihost->power_control.requesters[iphy->phy_index] = iphy;
+               ihost->power_control.phys_waiting++;
+       }
+}
+
+void sci_controller_power_control_queue_remove(struct isci_host *ihost,
+                                              struct isci_phy *iphy)
+{
+       BUG_ON(iphy == NULL);
+
+       if (ihost->power_control.requesters[iphy->phy_index])
+               ihost->power_control.phys_waiting--;
+
+       ihost->power_control.requesters[iphy->phy_index] = NULL;
+}
+
+#define AFE_REGISTER_WRITE_DELAY 10
+
+/* Initialize the AFE for this phy index. We need to read the AFE setup from
+ * the OEM parameters
+ */
+static void sci_controller_afe_initialization(struct isci_host *ihost)
+{
+       const struct sci_oem_params *oem = &ihost->oem_parameters;
+       struct pci_dev *pdev = ihost->pdev;
+       u32 afe_status;
+       u32 phy_id;
+
+       /* Clear DFX Status registers */
+       writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0);
+       udelay(AFE_REGISTER_WRITE_DELAY);
+
+       if (is_b0(pdev)) {
+               /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
+                * Timer, PM Stagger Timer */
+               writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+       }
+
+       /* Configure bias currents to normal */
+       if (is_a2(pdev))
+               writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control);
+       else if (is_b0(pdev) || is_c0(pdev))
+               writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control);
+
+       udelay(AFE_REGISTER_WRITE_DELAY);
+
+       /* Enable PLL */
+       if (is_b0(pdev) || is_c0(pdev))
+               writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0);
+       else
+               writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0);
+
+       udelay(AFE_REGISTER_WRITE_DELAY);
+
+       /* Wait for the PLL to lock */
+       do {
+               afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+       } while ((afe_status & 0x00001000) == 0);
+
+       if (is_a2(pdev)) {
+               /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
+               writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+       }
+
+       for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
+               const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
+
+               if (is_b0(pdev)) {
+                        /* Configure transmitter SSC parameters */
+                       writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+               } else if (is_c0(pdev)) {
+                        /* Configure transmitter SSC parameters */
+                       writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       /*
+                        * All defaults, except the Receive Word Alignament/Comma Detect
+                        * Enable....(0xe800) */
+                       writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+               } else {
+                       /*
+                        * All defaults, except the Receive Word Alignament/Comma Detect
+                        * Enable....(0xe800) */
+                       writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+               }
+
+               /*
+                * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
+                * & increase TX int & ext bias 20%....(0xe85c) */
+               if (is_a2(pdev))
+                       writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+               else if (is_b0(pdev)) {
+                        /* Power down TX and RX (PWRDNTX and PWRDNRX) */
+                       writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       /*
+                        * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
+                        * & increase TX int & ext bias 20%....(0xe85c) */
+                       writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+               } else {
+                       writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       /*
+                        * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
+                        * & increase TX int & ext bias 20%....(0xe85c) */
+                       writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+               }
+               udelay(AFE_REGISTER_WRITE_DELAY);
+
+               if (is_a2(pdev)) {
+                       /* Enable TX equalization (0xe824) */
+                       writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+               }
+
+               /*
+                * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
+                * RDD=0x0(RX Detect Enabled) ....(0xe800) */
+               writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+
+               /* Leave DFE/FFE on */
+               if (is_a2(pdev))
+                       writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+               else if (is_b0(pdev)) {
+                       writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+                       /* Enable TX equalization (0xe824) */
+                       writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+               } else {
+                       writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       /* Enable TX equalization (0xe824) */
+                       writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+               }
+
+               udelay(AFE_REGISTER_WRITE_DELAY);
+
+               writel(oem_phy->afe_tx_amp_control0,
+                       &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+
+               writel(oem_phy->afe_tx_amp_control1,
+                       &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+
+               writel(oem_phy->afe_tx_amp_control2,
+                       &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+
+               writel(oem_phy->afe_tx_amp_control3,
+                       &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+       }
+
+       /* Transfer control to the PEs */
+       writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0);
+       udelay(AFE_REGISTER_WRITE_DELAY);
+}
+
+static void sci_controller_initialize_power_control(struct isci_host *ihost)
+{
+       sci_init_timer(&ihost->power_control.timer, power_control_timeout);
+
+       memset(ihost->power_control.requesters, 0,
+              sizeof(ihost->power_control.requesters));
+
+       ihost->power_control.phys_waiting = 0;
+       ihost->power_control.phys_granted_power = 0;
+}
+
+static enum sci_status sci_controller_initialize(struct isci_host *ihost)
+{
+       struct sci_base_state_machine *sm = &ihost->sm;
+       enum sci_status result = SCI_FAILURE;
+       unsigned long i, state, val;
+
+       if (ihost->sm.current_state_id != SCIC_RESET) {
+               dev_warn(&ihost->pdev->dev,
+                        "SCIC Controller initialize operation requested "
+                        "in invalid state\n");
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       sci_change_state(sm, SCIC_INITIALIZING);
+
+       sci_init_timer(&ihost->phy_timer, phy_startup_timeout);
+
+       ihost->next_phy_to_start = 0;
+       ihost->phy_startup_timer_pending = false;
+
+       sci_controller_initialize_power_control(ihost);
+
+       /*
+        * There is nothing to do here for B0 since we do not have to
+        * program the AFE registers.
+        * / @todo The AFE settings are supposed to be correct for the B0 but
+        * /       presently they seem to be wrong. */
+       sci_controller_afe_initialization(ihost);
+
+
+       /* Take the hardware out of reset */
+       writel(0, &ihost->smu_registers->soft_reset_control);
+
+       /*
+        * / @todo Provide meaningfull error code for hardware failure
+        * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
+       for (i = 100; i >= 1; i--) {
+               u32 status;
+
+               /* Loop until the hardware reports success */
+               udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
+               status = readl(&ihost->smu_registers->control_status);
+
+               if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED)
+                       break;
+       }
+       if (i == 0)
+               goto out;
+
+       /*
+        * Determine what are the actaul device capacities that the
+        * hardware will support */
+       val = readl(&ihost->smu_registers->device_context_capacity);
+
+       /* Record the smaller of the two capacity values */
+       ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS);
+       ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS);
+       ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES);
+
+       /*
+        * Make all PEs that are unassigned match up with the
+        * logical ports
+        */
+       for (i = 0; i < ihost->logical_port_entries; i++) {
+               struct scu_port_task_scheduler_group_registers __iomem
+                       *ptsg = &ihost->scu_registers->peg0.ptsg;
+
+               writel(i, &ptsg->protocol_engine[i]);
+       }
+
+       /* Initialize hardware PCI Relaxed ordering in DMA engines */
+       val = readl(&ihost->scu_registers->sdma.pdma_configuration);
+       val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
+       writel(val, &ihost->scu_registers->sdma.pdma_configuration);
+
+       val = readl(&ihost->scu_registers->sdma.cdma_configuration);
+       val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
+       writel(val, &ihost->scu_registers->sdma.cdma_configuration);
+
+       /*
+        * Initialize the PHYs before the PORTs because the PHY registers
+        * are accessed during the port initialization.
+        */
+       for (i = 0; i < SCI_MAX_PHYS; i++) {
+               result = sci_phy_initialize(&ihost->phys[i],
+                                           &ihost->scu_registers->peg0.pe[i].tl,
+                                           &ihost->scu_registers->peg0.pe[i].ll);
+               if (result != SCI_SUCCESS)
+                       goto out;
+       }
+
+       for (i = 0; i < ihost->logical_port_entries; i++) {
+               struct isci_port *iport = &ihost->ports[i];
+
+               iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i];
+               iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0];
+               iport->viit_registers = &ihost->scu_registers->peg0.viit[i];
+       }
+
+       result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent);
+
+ out:
+       /* Advance the controller state machine */
+       if (result == SCI_SUCCESS)
+               state = SCIC_INITIALIZED;
+       else
+               state = SCIC_FAILED;
+       sci_change_state(sm, state);
+
+       return result;
+}
+
+static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
+                                              struct sci_user_parameters *sci_parms)
+{
+       u32 state = ihost->sm.current_state_id;
+
+       if (state == SCIC_RESET ||
+           state == SCIC_INITIALIZING ||
+           state == SCIC_INITIALIZED) {
+               u16 index;
+
+               /*
+                * Validate the user parameters.  If they are not legal, then
+                * return a failure.
+                */
+               for (index = 0; index < SCI_MAX_PHYS; index++) {
+                       struct sci_phy_user_params *user_phy;
+
+                       user_phy = &sci_parms->phys[index];
+
+                       if (!((user_phy->max_speed_generation <=
+                                               SCIC_SDS_PARM_MAX_SPEED) &&
+                             (user_phy->max_speed_generation >
+                                               SCIC_SDS_PARM_NO_SPEED)))
+                               return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+                       if (user_phy->in_connection_align_insertion_frequency <
+                                       3)
+                               return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+                       if ((user_phy->in_connection_align_insertion_frequency <
+                                               3) ||
+                           (user_phy->align_insertion_frequency == 0) ||
+                           (user_phy->
+                               notify_enable_spin_up_insertion_frequency ==
+                                               0))
+                               return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+               }
+
+               if ((sci_parms->stp_inactivity_timeout == 0) ||
+                   (sci_parms->ssp_inactivity_timeout == 0) ||
+                   (sci_parms->stp_max_occupancy_timeout == 0) ||
+                   (sci_parms->ssp_max_occupancy_timeout == 0) ||
+                   (sci_parms->no_outbound_task_timeout == 0))
+                       return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+               memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
+
+               return SCI_SUCCESS;
+       }
+
+       return SCI_FAILURE_INVALID_STATE;
+}
+
+static int sci_controller_mem_init(struct isci_host *ihost)
+{
+       struct device *dev = &ihost->pdev->dev;
+       dma_addr_t dma;
+       size_t size;
+       int err;
+
+       size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
+       ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
+       if (!ihost->completion_queue)
+               return -ENOMEM;
+
+       writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower);
+       writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper);
+
+       size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
+       ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma,
+                                                              GFP_KERNEL);
+       if (!ihost->remote_node_context_table)
+               return -ENOMEM;
+
+       writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower);
+       writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper);
+
+       size = ihost->task_context_entries * sizeof(struct scu_task_context),
+       ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
+       if (!ihost->task_context_table)
+               return -ENOMEM;
+
+       ihost->task_context_dma = dma;
+       writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower);
+       writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper);
+
+       err = sci_unsolicited_frame_control_construct(ihost);
+       if (err)
+               return err;
+
+       /*
+        * Inform the silicon as to the location of the UF headers and
+        * address table.
+        */
+       writel(lower_32_bits(ihost->uf_control.headers.physical_address),
+               &ihost->scu_registers->sdma.uf_header_base_address_lower);
+       writel(upper_32_bits(ihost->uf_control.headers.physical_address),
+               &ihost->scu_registers->sdma.uf_header_base_address_upper);
+
+       writel(lower_32_bits(ihost->uf_control.address_table.physical_address),
+               &ihost->scu_registers->sdma.uf_address_table_lower);
+       writel(upper_32_bits(ihost->uf_control.address_table.physical_address),
+               &ihost->scu_registers->sdma.uf_address_table_upper);
+
+       return 0;
+}
+
+int isci_host_init(struct isci_host *ihost)
+{
+       int err = 0, i;
+       enum sci_status status;
+       struct sci_user_parameters sci_user_params;
+       struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
+
+       spin_lock_init(&ihost->state_lock);
+       spin_lock_init(&ihost->scic_lock);
+       init_waitqueue_head(&ihost->eventq);
+
+       isci_host_change_state(ihost, isci_starting);
+
+       status = sci_controller_construct(ihost, scu_base(ihost),
+                                         smu_base(ihost));
+
+       if (status != SCI_SUCCESS) {
+               dev_err(&ihost->pdev->dev,
+                       "%s: sci_controller_construct failed - status = %x\n",
+                       __func__,
+                       status);
+               return -ENODEV;
+       }
+
+       ihost->sas_ha.dev = &ihost->pdev->dev;
+       ihost->sas_ha.lldd_ha = ihost;
+
+       /*
+        * grab initial values stored in the controller object for OEM and USER
+        * parameters
+        */
+       isci_user_parameters_get(&sci_user_params);
+       status = sci_user_parameters_set(ihost, &sci_user_params);
+       if (status != SCI_SUCCESS) {
+               dev_warn(&ihost->pdev->dev,
+                        "%s: sci_user_parameters_set failed\n",
+                        __func__);
+               return -ENODEV;
+       }
+
+       /* grab any OEM parameters specified in orom */
+       if (pci_info->orom) {
+               status = isci_parse_oem_parameters(&ihost->oem_parameters,
+                                                  pci_info->orom,
+                                                  ihost->id);
+               if (status != SCI_SUCCESS) {
+                       dev_warn(&ihost->pdev->dev,
+                                "parsing firmware oem parameters failed\n");
+                       return -EINVAL;
+               }
+       }
+
+       status = sci_oem_parameters_set(ihost);
+       if (status != SCI_SUCCESS) {
+               dev_warn(&ihost->pdev->dev,
+                               "%s: sci_oem_parameters_set failed\n",
+                               __func__);
+               return -ENODEV;
+       }
+
+       tasklet_init(&ihost->completion_tasklet,
+                    isci_host_completion_routine, (unsigned long)ihost);
+
+       INIT_LIST_HEAD(&ihost->requests_to_complete);
+       INIT_LIST_HEAD(&ihost->requests_to_errorback);
+
+       spin_lock_irq(&ihost->scic_lock);
+       status = sci_controller_initialize(ihost);
+       spin_unlock_irq(&ihost->scic_lock);
+       if (status != SCI_SUCCESS) {
+               dev_warn(&ihost->pdev->dev,
+                        "%s: sci_controller_initialize failed -"
+                        " status = 0x%x\n",
+                        __func__, status);
+               return -ENODEV;
+       }
+
+       err = sci_controller_mem_init(ihost);
+       if (err)
+               return err;
+
+       for (i = 0; i < SCI_MAX_PORTS; i++)
+               isci_port_init(&ihost->ports[i], ihost, i);
+
+       for (i = 0; i < SCI_MAX_PHYS; i++)
+               isci_phy_init(&ihost->phys[i], ihost, i);
+
+       for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
+               struct isci_remote_device *idev = &ihost->devices[i];
+
+               INIT_LIST_HEAD(&idev->reqs_in_process);
+               INIT_LIST_HEAD(&idev->node);
+       }
+
+       for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
+               struct isci_request *ireq;
+               dma_addr_t dma;
+
+               ireq = dmam_alloc_coherent(&ihost->pdev->dev,
+                                          sizeof(struct isci_request), &dma,
+                                          GFP_KERNEL);
+               if (!ireq)
+                       return -ENOMEM;
+
+               ireq->tc = &ihost->task_context_table[i];
+               ireq->owning_controller = ihost;
+               spin_lock_init(&ireq->state_lock);
+               ireq->request_daddr = dma;
+               ireq->isci_host = ihost;
+               ihost->reqs[i] = ireq;
+       }
+
+       return 0;
+}
+
+void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
+                           struct isci_phy *iphy)
+{
+       switch (ihost->sm.current_state_id) {
+       case SCIC_STARTING:
+               sci_del_timer(&ihost->phy_timer);
+               ihost->phy_startup_timer_pending = false;
+               ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
+                                                 iport, iphy);
+               sci_controller_start_next_phy(ihost);
+               break;
+       case SCIC_READY:
+               ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
+                                                 iport, iphy);
+               break;
+       default:
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: SCIC Controller linkup event from phy %d in "
+                       "unexpected state %d\n", __func__, iphy->phy_index,
+                       ihost->sm.current_state_id);
+       }
+}
+
+void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
+                             struct isci_phy *iphy)
+{
+       switch (ihost->sm.current_state_id) {
+       case SCIC_STARTING:
+       case SCIC_READY:
+               ihost->port_agent.link_down_handler(ihost, &ihost->port_agent,
+                                                  iport, iphy);
+               break;
+       default:
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: SCIC Controller linkdown event from phy %d in "
+                       "unexpected state %d\n",
+                       __func__,
+                       iphy->phy_index,
+                       ihost->sm.current_state_id);
+       }
+}
+
+static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
+{
+       u32 index;
+
+       for (index = 0; index < ihost->remote_node_entries; index++) {
+               if ((ihost->device_table[index] != NULL) &&
+                  (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING))
+                       return true;
+       }
+
+       return false;
+}
+
+void sci_controller_remote_device_stopped(struct isci_host *ihost,
+                                         struct isci_remote_device *idev)
+{
+       if (ihost->sm.current_state_id != SCIC_STOPPING) {
+               dev_dbg(&ihost->pdev->dev,
+                       "SCIC Controller 0x%p remote device stopped event "
+                       "from device 0x%p in unexpected state %d\n",
+                       ihost, idev,
+                       ihost->sm.current_state_id);
+               return;
+       }
+
+       if (!sci_controller_has_remote_devices_stopping(ihost))
+               sci_change_state(&ihost->sm, SCIC_STOPPED);
+}
+
+void sci_controller_post_request(struct isci_host *ihost, u32 request)
+{
+       dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n",
+               __func__, ihost->id, request);
+
+       writel(request, &ihost->smu_registers->post_context_port);
+}
+
+struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
+{
+       u16 task_index;
+       u16 task_sequence;
+
+       task_index = ISCI_TAG_TCI(io_tag);
+
+       if (task_index < ihost->task_context_entries) {
+               struct isci_request *ireq = ihost->reqs[task_index];
+
+               if (test_bit(IREQ_ACTIVE, &ireq->flags)) {
+                       task_sequence = ISCI_TAG_SEQ(io_tag);
+
+                       if (task_sequence == ihost->io_request_sequence[task_index])
+                               return ireq;
+               }
+       }
+
+       return NULL;
+}
+
+/**
+ * This method allocates remote node index and the reserves the remote node
+ *    context space for use. This method can fail if there are no more remote
+ *    node index available.
+ * @scic: This is the controller object which contains the set of
+ *    free remote node ids
+ * @sci_dev: This is the device object which is requesting the a remote node
+ *    id
+ * @node_id: This is the remote node id that is assinged to the device if one
+ *    is available
+ *
+ * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
+ * node index available.
+ */
+enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
+                                                           struct isci_remote_device *idev,
+                                                           u16 *node_id)
+{
+       u16 node_index;
+       u32 remote_node_count = sci_remote_device_node_count(idev);
+
+       node_index = sci_remote_node_table_allocate_remote_node(
+               &ihost->available_remote_nodes, remote_node_count
+               );
+
+       if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+               ihost->device_table[node_index] = idev;
+
+               *node_id = node_index;
+
+               return SCI_SUCCESS;
+       }
+
+       return SCI_FAILURE_INSUFFICIENT_RESOURCES;
+}
+
+void sci_controller_free_remote_node_context(struct isci_host *ihost,
+                                            struct isci_remote_device *idev,
+                                            u16 node_id)
+{
+       u32 remote_node_count = sci_remote_device_node_count(idev);
+
+       if (ihost->device_table[node_id] == idev) {
+               ihost->device_table[node_id] = NULL;
+
+               sci_remote_node_table_release_remote_node_index(
+                       &ihost->available_remote_nodes, remote_node_count, node_id
+                       );
+       }
+}
+
+void sci_controller_copy_sata_response(void *response_buffer,
+                                      void *frame_header,
+                                      void *frame_buffer)
+{
+       /* XXX type safety? */
+       memcpy(response_buffer, frame_header, sizeof(u32));
+
+       memcpy(response_buffer + sizeof(u32),
+              frame_buffer,
+              sizeof(struct dev_to_host_fis) - sizeof(u32));
+}
+
+void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index)
+{
+       if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index))
+               writel(ihost->uf_control.get,
+                       &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+}
+
+void isci_tci_free(struct isci_host *ihost, u16 tci)
+{
+       u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1);
+
+       ihost->tci_pool[tail] = tci;
+       ihost->tci_tail = tail + 1;
+}
+
+static u16 isci_tci_alloc(struct isci_host *ihost)
+{
+       u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1);
+       u16 tci = ihost->tci_pool[head];
+
+       ihost->tci_head = head + 1;
+       return tci;
+}
+
+static u16 isci_tci_space(struct isci_host *ihost)
+{
+       return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
+}
+
+u16 isci_alloc_tag(struct isci_host *ihost)
+{
+       if (isci_tci_space(ihost)) {
+               u16 tci = isci_tci_alloc(ihost);
+               u8 seq = ihost->io_request_sequence[tci];
+
+               return ISCI_TAG(seq, tci);
+       }
+
+       return SCI_CONTROLLER_INVALID_IO_TAG;
+}
+
+enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag)
+{
+       u16 tci = ISCI_TAG_TCI(io_tag);
+       u16 seq = ISCI_TAG_SEQ(io_tag);
+
+       /* prevent tail from passing head */
+       if (isci_tci_active(ihost) == 0)
+               return SCI_FAILURE_INVALID_IO_TAG;
+
+       if (seq == ihost->io_request_sequence[tci]) {
+               ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1);
+
+               isci_tci_free(ihost, tci);
+
+               return SCI_SUCCESS;
+       }
+       return SCI_FAILURE_INVALID_IO_TAG;
+}
+
+enum sci_status sci_controller_start_io(struct isci_host *ihost,
+                                       struct isci_remote_device *idev,
+                                       struct isci_request *ireq)
+{
+       enum sci_status status;
+
+       if (ihost->sm.current_state_id != SCIC_READY) {
+               dev_warn(&ihost->pdev->dev, "invalid state to start I/O");
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       status = sci_remote_device_start_io(ihost, idev, ireq);
+       if (status != SCI_SUCCESS)
+               return status;
+
+       set_bit(IREQ_ACTIVE, &ireq->flags);
+       sci_controller_post_request(ihost, ireq->post_context);
+       return SCI_SUCCESS;
+}
+
+enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
+                                                struct isci_remote_device *idev,
+                                                struct isci_request *ireq)
+{
+       /* terminate an ongoing (i.e. started) core IO request.  This does not
+        * abort the IO request at the target, but rather removes the IO
+        * request from the host controller.
+        */
+       enum sci_status status;
+
+       if (ihost->sm.current_state_id != SCIC_READY) {
+               dev_warn(&ihost->pdev->dev,
+                        "invalid state to terminate request\n");
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       status = sci_io_request_terminate(ireq);
+       if (status != SCI_SUCCESS)
+               return status;
+
+       /*
+        * Utilize the original post context command and or in the POST_TC_ABORT
+        * request sub-type.
+        */
+       sci_controller_post_request(ihost,
+                                   ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
+       return SCI_SUCCESS;
+}
+
+/**
+ * sci_controller_complete_io() - This method will perform core specific
+ *    completion operations for an IO request.  After this method is invoked,
+ *    the user should consider the IO request as invalid until it is properly
+ *    reused (i.e. re-constructed).
+ * @ihost: The handle to the controller object for which to complete the
+ *    IO request.
+ * @idev: The handle to the remote device object for which to complete
+ *    the IO request.
+ * @ireq: the handle to the io request object to complete.
+ */
+enum sci_status sci_controller_complete_io(struct isci_host *ihost,
+                                          struct isci_remote_device *idev,
+                                          struct isci_request *ireq)
+{
+       enum sci_status status;
+       u16 index;
+
+       switch (ihost->sm.current_state_id) {
+       case SCIC_STOPPING:
+               /* XXX: Implement this function */
+               return SCI_FAILURE;
+       case SCIC_READY:
+               status = sci_remote_device_complete_io(ihost, idev, ireq);
+               if (status != SCI_SUCCESS)
+                       return status;
+
+               index = ISCI_TAG_TCI(ireq->io_tag);
+               clear_bit(IREQ_ACTIVE, &ireq->flags);
+               return SCI_SUCCESS;
+       default:
+               dev_warn(&ihost->pdev->dev, "invalid state to complete I/O");
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+}
+
+enum sci_status sci_controller_continue_io(struct isci_request *ireq)
+{
+       struct isci_host *ihost = ireq->owning_controller;
+
+       if (ihost->sm.current_state_id != SCIC_READY) {
+               dev_warn(&ihost->pdev->dev, "invalid state to continue I/O");
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       set_bit(IREQ_ACTIVE, &ireq->flags);
+       sci_controller_post_request(ihost, ireq->post_context);
+       return SCI_SUCCESS;
+}
+
+/**
+ * sci_controller_start_task() - This method is called by the SCIC user to
+ *    send/start a framework task management request.
+ * @controller: the handle to the controller object for which to start the task
+ *    management request.
+ * @remote_device: the handle to the remote device object for which to start
+ *    the task management request.
+ * @task_request: the handle to the task request object to start.
+ */
+enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
+                                              struct isci_remote_device *idev,
+                                              struct isci_request *ireq)
+{
+       enum sci_status status;
+
+       if (ihost->sm.current_state_id != SCIC_READY) {
+               dev_warn(&ihost->pdev->dev,
+                        "%s: SCIC Controller starting task from invalid "
+                        "state\n",
+                        __func__);
+               return SCI_TASK_FAILURE_INVALID_STATE;
+       }
+
+       status = sci_remote_device_start_task(ihost, idev, ireq);
+       switch (status) {
+       case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
+               set_bit(IREQ_ACTIVE, &ireq->flags);
+
+               /*
+                * We will let framework know this task request started successfully,
+                * although core is still woring on starting the request (to post tc when
+                * RNC is resumed.)
+                */
+               return SCI_SUCCESS;
+       case SCI_SUCCESS:
+               set_bit(IREQ_ACTIVE, &ireq->flags);
+               sci_controller_post_request(ihost, ireq->post_context);
+               break;
+       default:
+               break;
+       }
+
+       return status;
+}
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
new file mode 100644 (file)
index 0000000..062101a
--- /dev/null
@@ -0,0 +1,542 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _SCI_HOST_H_
+#define _SCI_HOST_H_
+
+#include "remote_device.h"
+#include "phy.h"
+#include "isci.h"
+#include "remote_node_table.h"
+#include "registers.h"
+#include "unsolicited_frame_control.h"
+#include "probe_roms.h"
+
+struct isci_request;
+struct scu_task_context;
+
+
+/**
+ * struct sci_power_control -
+ *
+ * This structure defines the fields for managing power control for direct
+ * attached disk devices.
+ */
+struct sci_power_control {
+       /**
+        * This field is set when the power control timer is running and cleared when
+        * it is not.
+        */
+       bool timer_started;
+
+       /**
+        * Timer to control when the directed attached disks can consume power.
+        */
+       struct sci_timer timer;
+
+       /**
+        * This field is used to keep track of how many phys are put into the
+        * requesters field.
+        */
+       u8 phys_waiting;
+
+       /**
+        * This field is used to keep track of how many phys have been granted to consume power
+        */
+       u8 phys_granted_power;
+
+       /**
+        * This field is an array of phys that we are waiting on. The phys are direct
+        * mapped into requesters via struct sci_phy.phy_index
+        */
+       struct isci_phy *requesters[SCI_MAX_PHYS];
+
+};
+
+struct sci_port_configuration_agent;
+typedef void (*port_config_fn)(struct isci_host *,
+                              struct sci_port_configuration_agent *,
+                              struct isci_port *, struct isci_phy *);
+
+struct sci_port_configuration_agent {
+       u16 phy_configured_mask;
+       u16 phy_ready_mask;
+       struct {
+               u8 min_index;
+               u8 max_index;
+       } phy_valid_port_range[SCI_MAX_PHYS];
+       bool timer_pending;
+       port_config_fn link_up_handler;
+       port_config_fn link_down_handler;
+       struct sci_timer        timer;
+};
+
+/**
+ * isci_host - primary host/controller object
+ * @timer: timeout start/stop operations
+ * @device_table: rni (hw remote node index) to remote device lookup table
+ * @available_remote_nodes: rni allocator
+ * @power_control: manage device spin up
+ * @io_request_sequence: generation number for tci's (task contexts)
+ * @task_context_table: hw task context table
+ * @remote_node_context_table: hw remote node context table
+ * @completion_queue: hw-producer driver-consumer communication ring
+ * @completion_queue_get: tracks the driver 'head' of the ring to notify hw
+ * @logical_port_entries: min({driver|silicon}-supported-port-count)
+ * @remote_node_entries: min({driver|silicon}-supported-node-count)
+ * @task_context_entries: min({driver|silicon}-supported-task-count)
+ * @phy_timer: phy startup timer
+ * @invalid_phy_mask: if an invalid_link_up notification is reported a bit for
+ *                   the phy index is set so further notifications are not
+ *                   made.  Once the phy reports link up and is made part of a
+ *                   port then this bit is cleared.
+
+ */
+struct isci_host {
+       struct sci_base_state_machine sm;
+       /* XXX can we time this externally */
+       struct sci_timer timer;
+       /* XXX drop reference module params directly */
+       struct sci_user_parameters user_parameters;
+       /* XXX no need to be a union */
+       struct sci_oem_params oem_parameters;
+       struct sci_port_configuration_agent port_agent;
+       struct isci_remote_device *device_table[SCI_MAX_REMOTE_DEVICES];
+       struct sci_remote_node_table available_remote_nodes;
+       struct sci_power_control power_control;
+       u8 io_request_sequence[SCI_MAX_IO_REQUESTS];
+       struct scu_task_context *task_context_table;
+       dma_addr_t task_context_dma;
+       union scu_remote_node_context *remote_node_context_table;
+       u32 *completion_queue;
+       u32 completion_queue_get;
+       u32 logical_port_entries;
+       u32 remote_node_entries;
+       u32 task_context_entries;
+       struct sci_unsolicited_frame_control uf_control;
+
+       /* phy startup */
+       struct sci_timer phy_timer;
+       /* XXX kill */
+       bool phy_startup_timer_pending;
+       u32 next_phy_to_start;
+       /* XXX convert to unsigned long and use bitops */
+       u8 invalid_phy_mask;
+
+       /* TODO attempt dynamic interrupt coalescing scheme */
+       u16 interrupt_coalesce_number;
+       u32 interrupt_coalesce_timeout;
+       struct smu_registers __iomem *smu_registers;
+       struct scu_registers __iomem *scu_registers;
+
+       u16 tci_head;
+       u16 tci_tail;
+       u16 tci_pool[SCI_MAX_IO_REQUESTS];
+
+       int id; /* unique within a given pci device */
+       struct isci_phy phys[SCI_MAX_PHYS];
+       struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */
+       struct sas_ha_struct sas_ha;
+
+       spinlock_t state_lock;
+       struct pci_dev *pdev;
+       enum isci_status status;
+       #define IHOST_START_PENDING 0
+       #define IHOST_STOP_PENDING 1
+       unsigned long flags;
+       wait_queue_head_t eventq;
+       struct Scsi_Host *shost;
+       struct tasklet_struct completion_tasklet;
+       struct list_head requests_to_complete;
+       struct list_head requests_to_errorback;
+       spinlock_t scic_lock;
+       struct isci_request *reqs[SCI_MAX_IO_REQUESTS];
+       struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES];
+};
+
+/**
+ * enum sci_controller_states - This enumeration depicts all the states
+ *    for the common controller state machine.
+ */
+enum sci_controller_states {
+       /**
+        * Simply the initial state for the base controller state machine.
+        */
+       SCIC_INITIAL = 0,
+
+       /**
+        * This state indicates that the controller is reset.  The memory for
+        * the controller is in it's initial state, but the controller requires
+        * initialization.
+        * This state is entered from the INITIAL state.
+        * This state is entered from the RESETTING state.
+        */
+       SCIC_RESET,
+
+       /**
+        * This state is typically an action state that indicates the controller
+        * is in the process of initialization.  In this state no new IO operations
+        * are permitted.
+        * This state is entered from the RESET state.
+        */
+       SCIC_INITIALIZING,
+
+       /**
+        * This state indicates that the controller has been successfully
+        * initialized.  In this state no new IO operations are permitted.
+        * This state is entered from the INITIALIZING state.
+        */
+       SCIC_INITIALIZED,
+
+       /**
+        * This state indicates the the controller is in the process of becoming
+        * ready (i.e. starting).  In this state no new IO operations are permitted.
+        * This state is entered from the INITIALIZED state.
+        */
+       SCIC_STARTING,
+
+       /**
+        * This state indicates the controller is now ready.  Thus, the user
+        * is able to perform IO operations on the controller.
+        * This state is entered from the STARTING state.
+        */
+       SCIC_READY,
+
+       /**
+        * This state is typically an action state that indicates the controller
+        * is in the process of resetting.  Thus, the user is unable to perform
+        * IO operations on the controller.  A reset is considered destructive in
+        * most cases.
+        * This state is entered from the READY state.
+        * This state is entered from the FAILED state.
+        * This state is entered from the STOPPED state.
+        */
+       SCIC_RESETTING,
+
+       /**
+        * This state indicates that the controller is in the process of stopping.
+        * In this state no new IO operations are permitted, but existing IO
+        * operations are allowed to complete.
+        * This state is entered from the READY state.
+        */
+       SCIC_STOPPING,
+
+       /**
+        * This state indicates that the controller has successfully been stopped.
+        * In this state no new IO operations are permitted.
+        * This state is entered from the STOPPING state.
+        */
+       SCIC_STOPPED,
+
+       /**
+        * This state indicates that the controller could not successfully be
+        * initialized.  In this state no new IO operations are permitted.
+        * This state is entered from the INITIALIZING state.
+        * This state is entered from the STARTING state.
+        * This state is entered from the STOPPING state.
+        * This state is entered from the RESETTING state.
+        */
+       SCIC_FAILED,
+};
+
+/**
+ * struct isci_pci_info - This class represents the pci function containing the
+ *    controllers. Depending on PCI SKU, there could be up to 2 controllers in
+ *    the PCI function.
+ */
+#define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS)
+
+struct isci_pci_info {
+       struct msix_entry msix_entries[SCI_MAX_MSIX_INT];
+       struct isci_host *hosts[SCI_MAX_CONTROLLERS];
+       struct isci_orom *orom;
+};
+
+static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev)
+{
+       return pci_get_drvdata(pdev);
+}
+
+#define for_each_isci_host(id, ihost, pdev) \
+       for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
+            id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
+            ihost = to_pci_info(pdev)->hosts[++id])
+
+static inline enum isci_status isci_host_get_state(struct isci_host *isci_host)
+{
+       return isci_host->status;
+}
+
+static inline void isci_host_change_state(struct isci_host *isci_host,
+                                         enum isci_status status)
+{
+       unsigned long flags;
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: isci_host = %p, state = 0x%x",
+               __func__,
+               isci_host,
+               status);
+       spin_lock_irqsave(&isci_host->state_lock, flags);
+       isci_host->status = status;
+       spin_unlock_irqrestore(&isci_host->state_lock, flags);
+
+}
+
+static inline void wait_for_start(struct isci_host *ihost)
+{
+       wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags));
+}
+
+static inline void wait_for_stop(struct isci_host *ihost)
+{
+       wait_event(ihost->eventq, !test_bit(IHOST_STOP_PENDING, &ihost->flags));
+}
+
+static inline void wait_for_device_start(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+       wait_event(ihost->eventq, !test_bit(IDEV_START_PENDING, &idev->flags));
+}
+
+static inline void wait_for_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+       wait_event(ihost->eventq, !test_bit(IDEV_STOP_PENDING, &idev->flags));
+}
+
+static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
+{
+       return dev->port->ha->lldd_ha;
+}
+
+/* we always use protocol engine group zero */
+#define ISCI_PEG 0
+
+/* see sci_controller_io_tag_allocate|free for how seq and tci are built */
+#define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci)
+
+/* these are returned by the hardware, so sanitize them */
+#define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1))
+#define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1))
+
+/* expander attached sata devices require 3 rnc slots */
+static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
+{
+       struct domain_device *dev = idev->domain_dev;
+
+       if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
+           !idev->is_direct_attached)
+               return SCU_STP_REMOTE_NODE_COUNT;
+       return SCU_SSP_REMOTE_NODE_COUNT;
+}
+
+/**
+ * sci_controller_clear_invalid_phy() -
+ *
+ * This macro will clear the bit in the invalid phy mask for this controller
+ * object.  This is used to control messages reported for invalid link up
+ * notifications.
+ */
+#define sci_controller_clear_invalid_phy(controller, phy) \
+       ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index))
+
+static inline struct device *sciphy_to_dev(struct isci_phy *iphy)
+{
+
+       if (!iphy || !iphy->isci_port || !iphy->isci_port->isci_host)
+               return NULL;
+
+       return &iphy->isci_port->isci_host->pdev->dev;
+}
+
+static inline struct device *sciport_to_dev(struct isci_port *iport)
+{
+
+       if (!iport || !iport->isci_host)
+               return NULL;
+
+       return &iport->isci_host->pdev->dev;
+}
+
+static inline struct device *scirdev_to_dev(struct isci_remote_device *idev)
+{
+       if (!idev || !idev->isci_port || !idev->isci_port->isci_host)
+               return NULL;
+
+       return &idev->isci_port->isci_host->pdev->dev;
+}
+
+static inline bool is_a2(struct pci_dev *pdev)
+{
+       if (pdev->revision < 4)
+               return true;
+       return false;
+}
+
+static inline bool is_b0(struct pci_dev *pdev)
+{
+       if (pdev->revision == 4)
+               return true;
+       return false;
+}
+
+static inline bool is_c0(struct pci_dev *pdev)
+{
+       if (pdev->revision >= 5)
+               return true;
+       return false;
+}
+
+void sci_controller_post_request(struct isci_host *ihost,
+                                     u32 request);
+void sci_controller_release_frame(struct isci_host *ihost,
+                                      u32 frame_index);
+void sci_controller_copy_sata_response(void *response_buffer,
+                                           void *frame_header,
+                                           void *frame_buffer);
+enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
+                                                                struct isci_remote_device *idev,
+                                                                u16 *node_id);
+void sci_controller_free_remote_node_context(
+       struct isci_host *ihost,
+       struct isci_remote_device *idev,
+       u16 node_id);
+
+struct isci_request *sci_request_by_tag(struct isci_host *ihost,
+                                            u16 io_tag);
+
+void sci_controller_power_control_queue_insert(
+       struct isci_host *ihost,
+       struct isci_phy *iphy);
+
+void sci_controller_power_control_queue_remove(
+       struct isci_host *ihost,
+       struct isci_phy *iphy);
+
+void sci_controller_link_up(
+       struct isci_host *ihost,
+       struct isci_port *iport,
+       struct isci_phy *iphy);
+
+void sci_controller_link_down(
+       struct isci_host *ihost,
+       struct isci_port *iport,
+       struct isci_phy *iphy);
+
+void sci_controller_remote_device_stopped(
+       struct isci_host *ihost,
+       struct isci_remote_device *idev);
+
+void sci_controller_copy_task_context(
+       struct isci_host *ihost,
+       struct isci_request *ireq);
+
+void sci_controller_register_setup(struct isci_host *ihost);
+
+enum sci_status sci_controller_continue_io(struct isci_request *ireq);
+int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
+void isci_host_scan_start(struct Scsi_Host *);
+u16 isci_alloc_tag(struct isci_host *ihost);
+enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag);
+void isci_tci_free(struct isci_host *ihost, u16 tci);
+
+int isci_host_init(struct isci_host *);
+
+void isci_host_init_controller_names(
+       struct isci_host *isci_host,
+       unsigned int controller_idx);
+
+void isci_host_deinit(
+       struct isci_host *);
+
+void isci_host_port_link_up(
+       struct isci_host *,
+       struct isci_port *,
+       struct isci_phy *);
+int isci_host_dev_found(struct domain_device *);
+
+void isci_host_remote_device_start_complete(
+       struct isci_host *,
+       struct isci_remote_device *,
+       enum sci_status);
+
+void sci_controller_disable_interrupts(
+       struct isci_host *ihost);
+
+enum sci_status sci_controller_start_io(
+       struct isci_host *ihost,
+       struct isci_remote_device *idev,
+       struct isci_request *ireq);
+
+enum sci_task_status sci_controller_start_task(
+       struct isci_host *ihost,
+       struct isci_remote_device *idev,
+       struct isci_request *ireq);
+
+enum sci_status sci_controller_terminate_request(
+       struct isci_host *ihost,
+       struct isci_remote_device *idev,
+       struct isci_request *ireq);
+
+enum sci_status sci_controller_complete_io(
+       struct isci_host *ihost,
+       struct isci_remote_device *idev,
+       struct isci_request *ireq);
+
+void sci_port_configuration_agent_construct(
+       struct sci_port_configuration_agent *port_agent);
+
+enum sci_status sci_port_configuration_agent_initialize(
+       struct isci_host *ihost,
+       struct sci_port_configuration_agent *port_agent);
+#endif
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
new file mode 100644 (file)
index 0000000..61e0d09
--- /dev/null
@@ -0,0 +1,565 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/efi.h>
+#include <asm/string.h>
+#include "isci.h"
+#include "task.h"
+#include "probe_roms.h"
+
+static struct scsi_transport_template *isci_transport_template;
+
+static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = {
+       { PCI_VDEVICE(INTEL, 0x1D61),},
+       { PCI_VDEVICE(INTEL, 0x1D63),},
+       { PCI_VDEVICE(INTEL, 0x1D65),},
+       { PCI_VDEVICE(INTEL, 0x1D67),},
+       { PCI_VDEVICE(INTEL, 0x1D69),},
+       { PCI_VDEVICE(INTEL, 0x1D6B),},
+       { PCI_VDEVICE(INTEL, 0x1D60),},
+       { PCI_VDEVICE(INTEL, 0x1D62),},
+       { PCI_VDEVICE(INTEL, 0x1D64),},
+       { PCI_VDEVICE(INTEL, 0x1D66),},
+       { PCI_VDEVICE(INTEL, 0x1D68),},
+       { PCI_VDEVICE(INTEL, 0x1D6A),},
+       {}
+};
+
+MODULE_DEVICE_TABLE(pci, isci_id_table);
+
+/* linux isci specific settings */
+
+unsigned char no_outbound_task_to = 20;
+module_param(no_outbound_task_to, byte, 0);
+MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
+
+u16 ssp_max_occ_to = 20;
+module_param(ssp_max_occ_to, ushort, 0);
+MODULE_PARM_DESC(ssp_max_occ_to, "SSP Max occupancy timeout (100us incr)");
+
+u16 stp_max_occ_to = 5;
+module_param(stp_max_occ_to, ushort, 0);
+MODULE_PARM_DESC(stp_max_occ_to, "STP Max occupancy timeout (100us incr)");
+
+u16 ssp_inactive_to = 5;
+module_param(ssp_inactive_to, ushort, 0);
+MODULE_PARM_DESC(ssp_inactive_to, "SSP inactivity timeout (100us incr)");
+
+u16 stp_inactive_to = 5;
+module_param(stp_inactive_to, ushort, 0);
+MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
+
+unsigned char phy_gen = 3;
+module_param(phy_gen, byte, 0);
+MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
+
+unsigned char max_concurr_spinup = 1;
+module_param(max_concurr_spinup, byte, 0);
+MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
+
+static struct scsi_host_template isci_sht = {
+
+       .module                         = THIS_MODULE,
+       .name                           = DRV_NAME,
+       .proc_name                      = DRV_NAME,
+       .queuecommand                   = sas_queuecommand,
+       .target_alloc                   = sas_target_alloc,
+       .slave_configure                = sas_slave_configure,
+       .slave_destroy                  = sas_slave_destroy,
+       .scan_finished                  = isci_host_scan_finished,
+       .scan_start                     = isci_host_scan_start,
+       .change_queue_depth             = sas_change_queue_depth,
+       .change_queue_type              = sas_change_queue_type,
+       .bios_param                     = sas_bios_param,
+       .can_queue                      = ISCI_CAN_QUEUE_VAL,
+       .cmd_per_lun                    = 1,
+       .this_id                        = -1,
+       .sg_tablesize                   = SG_ALL,
+       .max_sectors                    = SCSI_DEFAULT_MAX_SECTORS,
+       .use_clustering                 = ENABLE_CLUSTERING,
+       .eh_device_reset_handler        = sas_eh_device_reset_handler,
+       .eh_bus_reset_handler           = isci_bus_reset_handler,
+       .slave_alloc                    = sas_slave_alloc,
+       .target_destroy                 = sas_target_destroy,
+       .ioctl                          = sas_ioctl,
+};
+
+static struct sas_domain_function_template isci_transport_ops  = {
+
+       /* The class calls these to notify the LLDD of an event. */
+       .lldd_port_formed       = isci_port_formed,
+       .lldd_port_deformed     = isci_port_deformed,
+
+       /* The class calls these when a device is found or gone. */
+       .lldd_dev_found         = isci_remote_device_found,
+       .lldd_dev_gone          = isci_remote_device_gone,
+
+       .lldd_execute_task      = isci_task_execute_task,
+       /* Task Management Functions. Must be called from process context. */
+       .lldd_abort_task        = isci_task_abort_task,
+       .lldd_abort_task_set    = isci_task_abort_task_set,
+       .lldd_clear_aca         = isci_task_clear_aca,
+       .lldd_clear_task_set    = isci_task_clear_task_set,
+       .lldd_I_T_nexus_reset   = isci_task_I_T_nexus_reset,
+       .lldd_lu_reset          = isci_task_lu_reset,
+       .lldd_query_task        = isci_task_query_task,
+
+       /* Port and Adapter management */
+       .lldd_clear_nexus_port  = isci_task_clear_nexus_port,
+       .lldd_clear_nexus_ha    = isci_task_clear_nexus_ha,
+
+       /* Phy management */
+       .lldd_control_phy       = isci_phy_control,
+};
+
+
+/******************************************************************************
+* P R O T E C T E D  M E T H O D S
+******************************************************************************/
+
+
+
+/**
+ * isci_register_sas_ha() - This method initializes various lldd
+ *    specific members of the sas_ha struct and calls the libsas
+ *    sas_register_ha() function.
+ * @isci_host: This parameter specifies the lldd specific wrapper for the
+ *    libsas sas_ha struct.
+ *
+ * This method returns an error code indicating sucess or failure. The user
+ * should check for possible memory allocation error return otherwise, a zero
+ * indicates success.
+ */
+static int isci_register_sas_ha(struct isci_host *isci_host)
+{
+       int i;
+       struct sas_ha_struct *sas_ha = &(isci_host->sas_ha);
+       struct asd_sas_phy **sas_phys;
+       struct asd_sas_port **sas_ports;
+
+       sas_phys = devm_kzalloc(&isci_host->pdev->dev,
+                               SCI_MAX_PHYS * sizeof(void *),
+                               GFP_KERNEL);
+       if (!sas_phys)
+               return -ENOMEM;
+
+       sas_ports = devm_kzalloc(&isci_host->pdev->dev,
+                                SCI_MAX_PORTS * sizeof(void *),
+                                GFP_KERNEL);
+       if (!sas_ports)
+               return -ENOMEM;
+
+       /*----------------- Libsas Initialization Stuff----------------------
+        * Set various fields in the sas_ha struct:
+        */
+
+       sas_ha->sas_ha_name = DRV_NAME;
+       sas_ha->lldd_module = THIS_MODULE;
+       sas_ha->sas_addr    = &isci_host->phys[0].sas_addr[0];
+
+       /* set the array of phy and port structs.  */
+       for (i = 0; i < SCI_MAX_PHYS; i++) {
+               sas_phys[i] = &isci_host->phys[i].sas_phy;
+               sas_ports[i] = &isci_host->ports[i].sas_port;
+       }
+
+       sas_ha->sas_phy  = sas_phys;
+       sas_ha->sas_port = sas_ports;
+       sas_ha->num_phys = SCI_MAX_PHYS;
+
+       sas_ha->lldd_queue_size = ISCI_CAN_QUEUE_VAL;
+       sas_ha->lldd_max_execute_num = 1;
+       sas_ha->strict_wide_ports = 1;
+
+       sas_register_ha(sas_ha);
+
+       return 0;
+}
+
+static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
+       struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+       struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
+}
+
+static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
+
+static void isci_unregister(struct isci_host *isci_host)
+{
+       struct Scsi_Host *shost;
+
+       if (!isci_host)
+               return;
+
+       shost = isci_host->shost;
+       device_remove_file(&shost->shost_dev, &dev_attr_isci_id);
+
+       sas_unregister_ha(&isci_host->sas_ha);
+
+       sas_remove_host(isci_host->shost);
+       scsi_remove_host(isci_host->shost);
+       scsi_host_put(isci_host->shost);
+}
+
+static int __devinit isci_pci_init(struct pci_dev *pdev)
+{
+       int err, bar_num, bar_mask = 0;
+       void __iomem * const *iomap;
+
+       err = pcim_enable_device(pdev);
+       if (err) {
+               dev_err(&pdev->dev,
+                       "failed enable PCI device %s!\n",
+                       pci_name(pdev));
+               return err;
+       }
+
+       for (bar_num = 0; bar_num < SCI_PCI_BAR_COUNT; bar_num++)
+               bar_mask |= 1 << (bar_num * 2);
+
+       err = pcim_iomap_regions(pdev, bar_mask, DRV_NAME);
+       if (err)
+               return err;
+
+       iomap = pcim_iomap_table(pdev);
+       if (!iomap)
+               return -ENOMEM;
+
+       pci_set_master(pdev);
+
+       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (err) {
+               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (err)
+                       return err;
+       }
+
+       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (err) {
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int num_controllers(struct pci_dev *pdev)
+{
+       /* bar size alone can tell us if we are running with a dual controller
+        * part, no need to trust revision ids that might be under broken firmware
+        * control
+        */
+       resource_size_t scu_bar_size = pci_resource_len(pdev, SCI_SCU_BAR*2);
+       resource_size_t smu_bar_size = pci_resource_len(pdev, SCI_SMU_BAR*2);
+
+       if (scu_bar_size >= SCI_SCU_BAR_SIZE*SCI_MAX_CONTROLLERS &&
+           smu_bar_size >= SCI_SMU_BAR_SIZE*SCI_MAX_CONTROLLERS)
+               return SCI_MAX_CONTROLLERS;
+       else
+               return 1;
+}
+
+static int isci_setup_interrupts(struct pci_dev *pdev)
+{
+       int err, i, num_msix;
+       struct isci_host *ihost;
+       struct isci_pci_info *pci_info = to_pci_info(pdev);
+
+       /*
+        *  Determine the number of vectors associated with this
+        *  PCI function.
+        */
+       num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT;
+
+       for (i = 0; i < num_msix; i++)
+               pci_info->msix_entries[i].entry = i;
+
+       err = pci_enable_msix(pdev, pci_info->msix_entries, num_msix);
+       if (err)
+               goto intx;
+
+       for (i = 0; i < num_msix; i++) {
+               int id = i / SCI_NUM_MSI_X_INT;
+               struct msix_entry *msix = &pci_info->msix_entries[i];
+               irq_handler_t isr;
+
+               ihost = pci_info->hosts[id];
+               /* odd numbered vectors are error interrupts */
+               if (i & 1)
+                       isr = isci_error_isr;
+               else
+                       isr = isci_msix_isr;
+
+               err = devm_request_irq(&pdev->dev, msix->vector, isr, 0,
+                                      DRV_NAME"-msix", ihost);
+               if (!err)
+                       continue;
+
+               dev_info(&pdev->dev, "msix setup failed falling back to intx\n");
+               while (i--) {
+                       id = i / SCI_NUM_MSI_X_INT;
+                       ihost = pci_info->hosts[id];
+                       msix = &pci_info->msix_entries[i];
+                       devm_free_irq(&pdev->dev, msix->vector, ihost);
+               }
+               pci_disable_msix(pdev);
+               goto intx;
+       }
+       return 0;
+
+ intx:
+       for_each_isci_host(i, ihost, pdev) {
+               err = devm_request_irq(&pdev->dev, pdev->irq, isci_intx_isr,
+                                      IRQF_SHARED, DRV_NAME"-intx", ihost);
+               if (err)
+                       break;
+       }
+       return err;
+}
+
+static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
+{
+       struct isci_host *isci_host;
+       struct Scsi_Host *shost;
+       int err;
+
+       isci_host = devm_kzalloc(&pdev->dev, sizeof(*isci_host), GFP_KERNEL);
+       if (!isci_host)
+               return NULL;
+
+       isci_host->pdev = pdev;
+       isci_host->id = id;
+
+       shost = scsi_host_alloc(&isci_sht, sizeof(void *));
+       if (!shost)
+               return NULL;
+       isci_host->shost = shost;
+
+       err = isci_host_init(isci_host);
+       if (err)
+               goto err_shost;
+
+       SHOST_TO_SAS_HA(shost) = &isci_host->sas_ha;
+       isci_host->sas_ha.core.shost = shost;
+       shost->transportt = isci_transport_template;
+
+       shost->max_id = ~0;
+       shost->max_lun = ~0;
+       shost->max_cmd_len = MAX_COMMAND_SIZE;
+
+       err = scsi_add_host(shost, &pdev->dev);
+       if (err)
+               goto err_shost;
+
+       err = isci_register_sas_ha(isci_host);
+       if (err)
+               goto err_shost_remove;
+
+       err = device_create_file(&shost->shost_dev, &dev_attr_isci_id);
+       if (err)
+               goto err_unregister_ha;
+
+       return isci_host;
+
+ err_unregister_ha:
+       sas_unregister_ha(&(isci_host->sas_ha));
+ err_shost_remove:
+       scsi_remove_host(shost);
+ err_shost:
+       scsi_host_put(shost);
+
+       return NULL;
+}
+
+static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct isci_pci_info *pci_info;
+       int err, i;
+       struct isci_host *isci_host;
+       const struct firmware *fw = NULL;
+       struct isci_orom *orom = NULL;
+       char *source = "(platform)";
+
+       dev_info(&pdev->dev, "driver configured for rev: %d silicon\n",
+                pdev->revision);
+
+       pci_info = devm_kzalloc(&pdev->dev, sizeof(*pci_info), GFP_KERNEL);
+       if (!pci_info)
+               return -ENOMEM;
+       pci_set_drvdata(pdev, pci_info);
+
+       if (efi_enabled)
+               orom = isci_get_efi_var(pdev);
+
+       if (!orom)
+               orom = isci_request_oprom(pdev);
+
+       for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
+               if (sci_oem_parameters_validate(&orom->ctrl[i])) {
+                       dev_warn(&pdev->dev,
+                                "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
+                       devm_kfree(&pdev->dev, orom);
+                       orom = NULL;
+                       break;
+               }
+       }
+
+       if (!orom) {
+               source = "(firmware)";
+               orom = isci_request_firmware(pdev, fw);
+               if (!orom) {
+                       /* TODO convert this to WARN_TAINT_ONCE once the
+                        * orom/efi parameter support is widely available
+                        */
+                       dev_warn(&pdev->dev,
+                                "Loading user firmware failed, using default "
+                                "values\n");
+                       dev_warn(&pdev->dev,
+                                "Default OEM configuration being used: 4 "
+                                "narrow ports, and default SAS Addresses\n");
+               }
+       }
+
+       if (orom)
+               dev_info(&pdev->dev,
+                        "OEM SAS parameters (version: %u.%u) loaded %s\n",
+                        (orom->hdr.version & 0xf0) >> 4,
+                        (orom->hdr.version & 0xf), source);
+
+       pci_info->orom = orom;
+
+       err = isci_pci_init(pdev);
+       if (err)
+               return err;
+
+       for (i = 0; i < num_controllers(pdev); i++) {
+               struct isci_host *h = isci_host_alloc(pdev, i);
+
+               if (!h) {
+                       err = -ENOMEM;
+                       goto err_host_alloc;
+               }
+               pci_info->hosts[i] = h;
+       }
+
+       err = isci_setup_interrupts(pdev);
+       if (err)
+               goto err_host_alloc;
+
+       for_each_isci_host(i, isci_host, pdev)
+               scsi_scan_host(isci_host->shost);
+
+       return 0;
+
+ err_host_alloc:
+       for_each_isci_host(i, isci_host, pdev)
+               isci_unregister(isci_host);
+       return err;
+}
+
+static void __devexit isci_pci_remove(struct pci_dev *pdev)
+{
+       struct isci_host *ihost;
+       int i;
+
+       for_each_isci_host(i, ihost, pdev) {
+               isci_unregister(ihost);
+               isci_host_deinit(ihost);
+               sci_controller_disable_interrupts(ihost);
+       }
+}
+
+static struct pci_driver isci_pci_driver = {
+       .name           = DRV_NAME,
+       .id_table       = isci_id_table,
+       .probe          = isci_pci_probe,
+       .remove         = __devexit_p(isci_pci_remove),
+};
+
+static __init int isci_init(void)
+{
+       int err;
+
+       pr_info("%s: Intel(R) C600 SAS Controller Driver\n", DRV_NAME);
+
+       isci_transport_template = sas_domain_attach_transport(&isci_transport_ops);
+       if (!isci_transport_template)
+               return -ENOMEM;
+
+       err = pci_register_driver(&isci_pci_driver);
+       if (err)
+               sas_release_transport(isci_transport_template);
+
+       return err;
+}
+
+static __exit void isci_exit(void)
+{
+       pci_unregister_driver(&isci_pci_driver);
+       sas_release_transport(isci_transport_template);
+}
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_FIRMWARE(ISCI_FW_NAME);
+module_init(isci_init);
+module_exit(isci_exit);
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
new file mode 100644 (file)
index 0000000..d1de633
--- /dev/null
@@ -0,0 +1,538 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __ISCI_H__
+#define __ISCI_H__
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+
+#define DRV_NAME "isci"
+#define SCI_PCI_BAR_COUNT 2
+#define SCI_NUM_MSI_X_INT 2
+#define SCI_SMU_BAR       0
+#define SCI_SMU_BAR_SIZE  (16*1024)
+#define SCI_SCU_BAR       1
+#define SCI_SCU_BAR_SIZE  (4*1024*1024)
+#define SCI_IO_SPACE_BAR0 2
+#define SCI_IO_SPACE_BAR1 3
+#define ISCI_CAN_QUEUE_VAL 250 /* < SCI_MAX_IO_REQUESTS ? */
+#define SCIC_CONTROLLER_STOP_TIMEOUT 5000
+
+#define SCI_CONTROLLER_INVALID_IO_TAG 0xFFFF
+
+#define SCI_MAX_PHYS  (4UL)
+#define SCI_MAX_PORTS SCI_MAX_PHYS
+#define SCI_MAX_SMP_PHYS  (384) /* not silicon constrained */
+#define SCI_MAX_REMOTE_DEVICES (256UL)
+#define SCI_MAX_IO_REQUESTS (256UL)
+#define SCI_MAX_SEQ (16)
+#define SCI_MAX_MSIX_MESSAGES  (2)
+#define SCI_MAX_SCATTER_GATHER_ELEMENTS 130 /* not silicon constrained */
+#define SCI_MAX_CONTROLLERS 2
+#define SCI_MAX_DOMAINS  SCI_MAX_PORTS
+
+#define SCU_MAX_CRITICAL_NOTIFICATIONS    (384)
+#define SCU_MAX_EVENTS_SHIFT             (7)
+#define SCU_MAX_EVENTS                    (1 << SCU_MAX_EVENTS_SHIFT)
+#define SCU_MAX_UNSOLICITED_FRAMES        (128)
+#define SCU_MAX_COMPLETION_QUEUE_SCRATCH  (128)
+#define SCU_MAX_COMPLETION_QUEUE_ENTRIES  (SCU_MAX_CRITICAL_NOTIFICATIONS \
+                                          + SCU_MAX_EVENTS \
+                                          + SCU_MAX_UNSOLICITED_FRAMES \
+                                          + SCI_MAX_IO_REQUESTS \
+                                          + SCU_MAX_COMPLETION_QUEUE_SCRATCH)
+#define SCU_MAX_COMPLETION_QUEUE_SHIFT   (ilog2(SCU_MAX_COMPLETION_QUEUE_ENTRIES))
+
+#define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096)
+#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE   (1024)
+#define SCU_INVALID_FRAME_INDEX             (0xFFFF)
+
+#define SCU_IO_REQUEST_MAX_SGE_SIZE         (0x00FFFFFF)
+#define SCU_IO_REQUEST_MAX_TRANSFER_LENGTH  (0x00FFFFFF)
+
+static inline void check_sizes(void)
+{
+       BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_EVENTS);
+       BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES <= 8);
+       BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_UNSOLICITED_FRAMES);
+       BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_COMPLETION_QUEUE_ENTRIES);
+       BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES > SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES);
+       BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_IO_REQUESTS);
+       BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_SEQ);
+}
+
+/**
+ * enum sci_status - This is the general return status enumeration for non-IO,
+ *    non-task management related SCI interface methods.
+ *
+ *
+ */
+enum sci_status {
+       /**
+        * This member indicates successful completion.
+        */
+       SCI_SUCCESS = 0,
+
+       /**
+        * This value indicates that the calling method completed successfully,
+        * but that the IO may have completed before having it's start method
+        * invoked.  This occurs during SAT translation for requests that do
+        * not require an IO to the target or for any other requests that may
+        * be completed without having to submit IO.
+        */
+       SCI_SUCCESS_IO_COMPLETE_BEFORE_START,
+
+       /**
+        *  This Value indicates that the SCU hardware returned an early response
+        *  because the io request specified more data than is returned by the
+        *  target device (mode pages, inquiry data, etc.). The completion routine
+        *  will handle this case to get the actual number of bytes transferred.
+        */
+       SCI_SUCCESS_IO_DONE_EARLY,
+
+       /**
+        * This member indicates that the object for which a state change is
+        * being requested is already in said state.
+        */
+       SCI_WARNING_ALREADY_IN_STATE,
+
+       /**
+        * This member indicates interrupt coalescence timer may cause SAS
+        * specification compliance issues (i.e. SMP target mode response
+        * frames must be returned within 1.9 milliseconds).
+        */
+       SCI_WARNING_TIMER_CONFLICT,
+
+       /**
+        * This field indicates a sequence of action is not completed yet. Mostly,
+        * this status is used when multiple ATA commands are needed in a SATI translation.
+        */
+       SCI_WARNING_SEQUENCE_INCOMPLETE,
+
+       /**
+        * This member indicates that there was a general failure.
+        */
+       SCI_FAILURE,
+
+       /**
+        * This member indicates that the SCI implementation is unable to complete
+        * an operation due to a critical flaw the prevents any further operation
+        * (i.e. an invalid pointer).
+        */
+       SCI_FATAL_ERROR,
+
+       /**
+        * This member indicates the calling function failed, because the state
+        * of the controller is in a state that prevents successful completion.
+        */
+       SCI_FAILURE_INVALID_STATE,
+
+       /**
+        * This member indicates the calling function failed, because there is
+        * insufficient resources/memory to complete the request.
+        */
+       SCI_FAILURE_INSUFFICIENT_RESOURCES,
+
+       /**
+        * This member indicates the calling function failed, because the
+        * controller object required for the operation can't be located.
+        */
+       SCI_FAILURE_CONTROLLER_NOT_FOUND,
+
+       /**
+        * This member indicates the calling function failed, because the
+        * discovered controller type is not supported by the library.
+        */
+       SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE,
+
+       /**
+        * This member indicates the calling function failed, because the
+        * requested initialization data version isn't supported.
+        */
+       SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION,
+
+       /**
+        * This member indicates the calling function failed, because the
+        * requested configuration of SAS Phys into SAS Ports is not supported.
+        */
+       SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION,
+
+       /**
+        * This member indicates the calling function failed, because the
+        * requested protocol is not supported by the remote device, port,
+        * or controller.
+        */
+       SCI_FAILURE_UNSUPPORTED_PROTOCOL,
+
+       /**
+        * This member indicates the calling function failed, because the
+        * requested information type is not supported by the SCI implementation.
+        */
+       SCI_FAILURE_UNSUPPORTED_INFORMATION_TYPE,
+
+       /**
+        * This member indicates the calling function failed, because the
+        * device already exists.
+        */
+       SCI_FAILURE_DEVICE_EXISTS,
+
+       /**
+        * This member indicates the calling function failed, because adding
+        * a phy to the object is not possible.
+        */
+       SCI_FAILURE_ADDING_PHY_UNSUPPORTED,
+
+       /**
+        * This member indicates the calling function failed, because the
+        * requested information type is not supported by the SCI implementation.
+        */
+       SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD,
+
+       /**
+        * This member indicates the calling function failed, because the SCI
+        * implementation does not support the supplied time limit.
+        */
+       SCI_FAILURE_UNSUPPORTED_TIME_LIMIT,
+
+       /**
+        * This member indicates the calling method failed, because the SCI
+        * implementation does not contain the specified Phy.
+        */
+       SCI_FAILURE_INVALID_PHY,
+
+       /**
+        * This member indicates the calling method failed, because the SCI
+        * implementation does not contain the specified Port.
+        */
+       SCI_FAILURE_INVALID_PORT,
+
+       /**
+        * This member indicates the calling method was partly successful
+        * The port was reset but not all phys in port are operational
+        */
+       SCI_FAILURE_RESET_PORT_PARTIAL_SUCCESS,
+
+       /**
+        * This member indicates that calling method failed
+        * The port reset did not complete because none of the phys are operational
+        */
+       SCI_FAILURE_RESET_PORT_FAILURE,
+
+       /**
+        * This member indicates the calling method failed, because the SCI
+        * implementation does not contain the specified remote device.
+        */
+       SCI_FAILURE_INVALID_REMOTE_DEVICE,
+
+       /**
+        * This member indicates the calling method failed, because the remote
+        * device is in a bad state and requires a reset.
+        */
+       SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
+
+       /**
+        * This member indicates the calling method failed, because the SCI
+        * implementation does not contain or support the specified IO tag.
+        */
+       SCI_FAILURE_INVALID_IO_TAG,
+
+       /**
+        * This member indicates that the operation failed and the user should
+        * check the response data associated with the IO.
+        */
+       SCI_FAILURE_IO_RESPONSE_VALID,
+
+       /**
+        * This member indicates that the operation failed, the failure is
+        * controller implementation specific, and the response data associated
+        * with the request is not valid.  You can query for the controller
+        * specific error information via sci_controller_get_request_status()
+        */
+       SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
+
+       /**
+        * This member indicated that the operation failed because the
+        * user requested this IO to be terminated.
+        */
+       SCI_FAILURE_IO_TERMINATED,
+
+       /**
+        * This member indicates that the operation failed and the associated
+        * request requires a SCSI abort task to be sent to the target.
+        */
+       SCI_FAILURE_IO_REQUIRES_SCSI_ABORT,
+
+       /**
+        * This member indicates that the operation failed because the supplied
+        * device could not be located.
+        */
+       SCI_FAILURE_DEVICE_NOT_FOUND,
+
+       /**
+        * This member indicates that the operation failed because the
+        * objects association is required and is not correctly set.
+        */
+       SCI_FAILURE_INVALID_ASSOCIATION,
+
+       /**
+        * This member indicates that the operation failed, because a timeout
+        * occurred.
+        */
+       SCI_FAILURE_TIMEOUT,
+
+       /**
+        * This member indicates that the operation failed, because the user
+        * specified a value that is either invalid or not supported.
+        */
+       SCI_FAILURE_INVALID_PARAMETER_VALUE,
+
+       /**
+        * This value indicates that the operation failed, because the number
+        * of messages (MSI-X) is not supported.
+        */
+       SCI_FAILURE_UNSUPPORTED_MESSAGE_COUNT,
+
+       /**
+        * This value indicates that the method failed due to a lack of
+        * available NCQ tags.
+        */
+       SCI_FAILURE_NO_NCQ_TAG_AVAILABLE,
+
+       /**
+        * This value indicates that a protocol violation has occurred on the
+        * link.
+        */
+       SCI_FAILURE_PROTOCOL_VIOLATION,
+
+       /**
+        * This value indicates a failure condition that retry may help to clear.
+        */
+       SCI_FAILURE_RETRY_REQUIRED,
+
+       /**
+        * This field indicates the retry limit was reached when a retry is attempted
+        */
+       SCI_FAILURE_RETRY_LIMIT_REACHED,
+
+       /**
+        * This member indicates the calling method was partly successful.
+        * Mostly, this status is used when a LUN_RESET issued to an expander attached
+        * STP device in READY NCQ substate needs to have RNC suspended/resumed
+        * before posting TC.
+        */
+       SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS,
+
+       /**
+        * This field indicates an illegal phy connection based on the routing attribute
+        * of both expander phy attached to each other.
+        */
+       SCI_FAILURE_ILLEGAL_ROUTING_ATTRIBUTE_CONFIGURATION,
+
+       /**
+        * This field indicates a CONFIG ROUTE INFO command has a response with function result
+        * INDEX DOES NOT EXIST, usually means exceeding max route index.
+        */
+       SCI_FAILURE_EXCEED_MAX_ROUTE_INDEX,
+
+       /**
+        * This value indicates that an unsupported PCI device ID has been
+        * specified.  This indicates that attempts to invoke
+        * sci_library_allocate_controller() will fail.
+        */
+       SCI_FAILURE_UNSUPPORTED_PCI_DEVICE_ID
+
+};
+
+/**
+ * enum sci_io_status - This enumeration depicts all of the possible IO
+ *    completion status values.  Each value in this enumeration maps directly
+ *    to a value in the enum sci_status enumeration.  Please refer to that
+ *    enumeration for detailed comments concerning what the status represents.
+ *
+ * Add the API to retrieve the SCU status from the core. Check to see that the
+ * following status are properly handled: - SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL
+ * - SCI_IO_FAILURE_INVALID_IO_TAG
+ */
+enum sci_io_status {
+       SCI_IO_SUCCESS                         = SCI_SUCCESS,
+       SCI_IO_FAILURE                         = SCI_FAILURE,
+       SCI_IO_SUCCESS_COMPLETE_BEFORE_START   = SCI_SUCCESS_IO_COMPLETE_BEFORE_START,
+       SCI_IO_SUCCESS_IO_DONE_EARLY           = SCI_SUCCESS_IO_DONE_EARLY,
+       SCI_IO_FAILURE_INVALID_STATE           = SCI_FAILURE_INVALID_STATE,
+       SCI_IO_FAILURE_INSUFFICIENT_RESOURCES  = SCI_FAILURE_INSUFFICIENT_RESOURCES,
+       SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL    = SCI_FAILURE_UNSUPPORTED_PROTOCOL,
+       SCI_IO_FAILURE_RESPONSE_VALID          = SCI_FAILURE_IO_RESPONSE_VALID,
+       SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
+       SCI_IO_FAILURE_TERMINATED              = SCI_FAILURE_IO_TERMINATED,
+       SCI_IO_FAILURE_REQUIRES_SCSI_ABORT     = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT,
+       SCI_IO_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE,
+       SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE    = SCI_FAILURE_NO_NCQ_TAG_AVAILABLE,
+       SCI_IO_FAILURE_PROTOCOL_VIOLATION      = SCI_FAILURE_PROTOCOL_VIOLATION,
+
+       SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
+
+       SCI_IO_FAILURE_RETRY_REQUIRED      = SCI_FAILURE_RETRY_REQUIRED,
+       SCI_IO_FAILURE_RETRY_LIMIT_REACHED = SCI_FAILURE_RETRY_LIMIT_REACHED,
+       SCI_IO_FAILURE_INVALID_REMOTE_DEVICE = SCI_FAILURE_INVALID_REMOTE_DEVICE
+};
+
+/**
+ * enum sci_task_status - This enumeration depicts all of the possible task
+ *    completion status values.  Each value in this enumeration maps directly
+ *    to a value in the enum sci_status enumeration.  Please refer to that
+ *    enumeration for detailed comments concerning what the status represents.
+ *
+ * Check to see that the following status are properly handled:
+ */
+enum sci_task_status {
+       SCI_TASK_SUCCESS                         = SCI_SUCCESS,
+       SCI_TASK_FAILURE                         = SCI_FAILURE,
+       SCI_TASK_FAILURE_INVALID_STATE           = SCI_FAILURE_INVALID_STATE,
+       SCI_TASK_FAILURE_INSUFFICIENT_RESOURCES  = SCI_FAILURE_INSUFFICIENT_RESOURCES,
+       SCI_TASK_FAILURE_UNSUPPORTED_PROTOCOL    = SCI_FAILURE_UNSUPPORTED_PROTOCOL,
+       SCI_TASK_FAILURE_INVALID_TAG             = SCI_FAILURE_INVALID_IO_TAG,
+       SCI_TASK_FAILURE_RESPONSE_VALID          = SCI_FAILURE_IO_RESPONSE_VALID,
+       SCI_TASK_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
+       SCI_TASK_FAILURE_TERMINATED              = SCI_FAILURE_IO_TERMINATED,
+       SCI_TASK_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE,
+
+       SCI_TASK_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
+       SCI_TASK_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS = SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS
+
+};
+
+/**
+ * sci_swab32_cpy - convert between scsi and scu-hardware byte format
+ * @dest: receive the 4-byte endian swapped version of src
+ * @src: word aligned source buffer
+ *
+ * scu hardware handles SSP/SMP control, response, and unidentified
+ * frames in "big endian dword" order.  Regardless of host endian this
+ * is always a swab32()-per-dword conversion of the standard definition,
+ * i.e. single byte fields swapped and multi-byte fields in little-
+ * endian
+ */
+static inline void sci_swab32_cpy(void *_dest, void *_src, ssize_t word_cnt)
+{
+       u32 *dest = _dest, *src = _src;
+
+       while (--word_cnt >= 0)
+               dest[word_cnt] = swab32(src[word_cnt]);
+}
+
+extern unsigned char no_outbound_task_to;
+extern u16 ssp_max_occ_to;
+extern u16 stp_max_occ_to;
+extern u16 ssp_inactive_to;
+extern u16 stp_inactive_to;
+extern unsigned char phy_gen;
+extern unsigned char max_concurr_spinup;
+
+irqreturn_t isci_msix_isr(int vec, void *data);
+irqreturn_t isci_intx_isr(int vec, void *data);
+irqreturn_t isci_error_isr(int vec, void *data);
+
+/*
+ * Each timer is associated with a cancellation flag that is set when
+ * del_timer() is called and checked in the timer callback function. This
+ * is needed since del_timer_sync() cannot be called with sci_lock held.
+ * For deinit however, del_timer_sync() is used without holding the lock.
+ */
+struct sci_timer {
+       struct timer_list       timer;
+       bool                    cancel;
+};
+
+static inline
+void sci_init_timer(struct sci_timer *tmr, void (*fn)(unsigned long))
+{
+       tmr->timer.function = fn;
+       tmr->timer.data = (unsigned long) tmr;
+       tmr->cancel = 0;
+       init_timer(&tmr->timer);
+}
+
+static inline void sci_mod_timer(struct sci_timer *tmr, unsigned long msec)
+{
+       tmr->cancel = 0;
+       mod_timer(&tmr->timer, jiffies + msecs_to_jiffies(msec));
+}
+
+static inline void sci_del_timer(struct sci_timer *tmr)
+{
+       tmr->cancel = 1;
+       del_timer(&tmr->timer);
+}
+
+struct sci_base_state_machine {
+       const struct sci_base_state *state_table;
+       u32 initial_state_id;
+       u32 current_state_id;
+       u32 previous_state_id;
+};
+
+typedef void (*sci_state_transition_t)(struct sci_base_state_machine *sm);
+
+struct sci_base_state {
+       sci_state_transition_t enter_state;     /* Called on state entry */
+       sci_state_transition_t exit_state;      /* Called on state exit */
+};
+
+extern void sci_init_sm(struct sci_base_state_machine *sm,
+                       const struct sci_base_state *state_table,
+                       u32 initial_state);
+extern void sci_change_state(struct sci_base_state_machine *sm, u32 next_state);
+#endif  /* __ISCI_H__ */
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
new file mode 100644 (file)
index 0000000..79313a7
--- /dev/null
@@ -0,0 +1,1312 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "isci.h"
+#include "host.h"
+#include "phy.h"
+#include "scu_event_codes.h"
+#include "probe_roms.h"
+
+/* Maximum arbitration wait time in micro-seconds */
+#define SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME  (700)
+
+enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy)
+{
+       return iphy->max_negotiated_speed;
+}
+
+static enum sci_status
+sci_phy_transport_layer_initialization(struct isci_phy *iphy,
+                                      struct scu_transport_layer_registers __iomem *reg)
+{
+       u32 tl_control;
+
+       iphy->transport_layer_registers = reg;
+
+       writel(SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX,
+               &iphy->transport_layer_registers->stp_rni);
+
+       /*
+        * Hardware team recommends that we enable the STP prefetch for all
+        * transports
+        */
+       tl_control = readl(&iphy->transport_layer_registers->control);
+       tl_control |= SCU_TLCR_GEN_BIT(STP_WRITE_DATA_PREFETCH);
+       writel(tl_control, &iphy->transport_layer_registers->control);
+
+       return SCI_SUCCESS;
+}
+
+static enum sci_status
+sci_phy_link_layer_initialization(struct isci_phy *iphy,
+                                 struct scu_link_layer_registers __iomem *reg)
+{
+       struct isci_host *ihost = iphy->owning_port->owning_controller;
+       int phy_idx = iphy->phy_index;
+       struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx];
+       struct sci_phy_oem_params *phy_oem =
+               &ihost->oem_parameters.phys[phy_idx];
+       u32 phy_configuration;
+       struct sci_phy_cap phy_cap;
+       u32 parity_check = 0;
+       u32 parity_count = 0;
+       u32 llctl, link_rate;
+       u32 clksm_value = 0;
+
+       iphy->link_layer_registers = reg;
+
+       /* Set our IDENTIFY frame data */
+       #define SCI_END_DEVICE 0x01
+
+       writel(SCU_SAS_TIID_GEN_BIT(SMP_INITIATOR) |
+              SCU_SAS_TIID_GEN_BIT(SSP_INITIATOR) |
+              SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) |
+              SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) |
+              SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE),
+              &iphy->link_layer_registers->transmit_identification);
+
+       /* Write the device SAS Address */
+       writel(0xFEDCBA98,
+              &iphy->link_layer_registers->sas_device_name_high);
+       writel(phy_idx, &iphy->link_layer_registers->sas_device_name_low);
+
+       /* Write the source SAS Address */
+       writel(phy_oem->sas_address.high,
+               &iphy->link_layer_registers->source_sas_address_high);
+       writel(phy_oem->sas_address.low,
+               &iphy->link_layer_registers->source_sas_address_low);
+
+       /* Clear and Set the PHY Identifier */
+       writel(0, &iphy->link_layer_registers->identify_frame_phy_id);
+       writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx),
+               &iphy->link_layer_registers->identify_frame_phy_id);
+
+       /* Change the initial state of the phy configuration register */
+       phy_configuration =
+               readl(&iphy->link_layer_registers->phy_configuration);
+
+       /* Hold OOB state machine in reset */
+       phy_configuration |=  SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+       writel(phy_configuration,
+               &iphy->link_layer_registers->phy_configuration);
+
+       /* Configure the SNW capabilities */
+       phy_cap.all = 0;
+       phy_cap.start = 1;
+       phy_cap.gen3_no_ssc = 1;
+       phy_cap.gen2_no_ssc = 1;
+       phy_cap.gen1_no_ssc = 1;
+       if (ihost->oem_parameters.controller.do_enable_ssc == true) {
+               phy_cap.gen3_ssc = 1;
+               phy_cap.gen2_ssc = 1;
+               phy_cap.gen1_ssc = 1;
+       }
+
+       /*
+        * The SAS specification indicates that the phy_capabilities that
+        * are transmitted shall have an even parity.  Calculate the parity. */
+       parity_check = phy_cap.all;
+       while (parity_check != 0) {
+               if (parity_check & 0x1)
+                       parity_count++;
+               parity_check >>= 1;
+       }
+
+       /*
+        * If parity indicates there are an odd number of bits set, then
+        * set the parity bit to 1 in the phy capabilities. */
+       if ((parity_count % 2) != 0)
+               phy_cap.parity = 1;
+
+       writel(phy_cap.all, &iphy->link_layer_registers->phy_capabilities);
+
+       /* Set the enable spinup period but disable the ability to send
+        * notify enable spinup
+        */
+       writel(SCU_ENSPINUP_GEN_VAL(COUNT,
+                       phy_user->notify_enable_spin_up_insertion_frequency),
+               &iphy->link_layer_registers->notify_enable_spinup_control);
+
+       /* Write the ALIGN Insertion Ferequency for connected phy and
+        * inpendent of connected state
+        */
+       clksm_value = SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(CONNECTED,
+                       phy_user->in_connection_align_insertion_frequency);
+
+       clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL,
+                       phy_user->align_insertion_frequency);
+
+       writel(clksm_value, &iphy->link_layer_registers->clock_skew_management);
+
+       /* @todo Provide a way to write this register correctly */
+       writel(0x02108421,
+               &iphy->link_layer_registers->afe_lookup_table_control);
+
+       llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT,
+               (u8)ihost->user_parameters.no_outbound_task_timeout);
+
+       switch (phy_user->max_speed_generation) {
+       case SCIC_SDS_PARM_GEN3_SPEED:
+               link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3;
+               break;
+       case SCIC_SDS_PARM_GEN2_SPEED:
+               link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2;
+               break;
+       default:
+               link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1;
+               break;
+       }
+       llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
+       writel(llctl, &iphy->link_layer_registers->link_layer_control);
+
+       if (is_a2(ihost->pdev)) {
+               /* Program the max ARB time for the PHY to 700us so we inter-operate with
+                * the PMC expander which shuts down PHYs if the expander PHY generates too
+                * many breaks.  This time value will guarantee that the initiator PHY will
+                * generate the break.
+                */
+               writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME,
+                       &iphy->link_layer_registers->maximum_arbitration_wait_timer_timeout);
+       }
+
+       /* Disable link layer hang detection, rely on the OS timeout for I/O timeouts. */
+       writel(0, &iphy->link_layer_registers->link_layer_hang_detection_timeout);
+
+       /* We can exit the initial state to the stopped state */
+       sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
+
+       return SCI_SUCCESS;
+}
+
+static void phy_sata_timeout(unsigned long data)
+{
+       struct sci_timer *tmr = (struct sci_timer *)data;
+       struct isci_phy *iphy = container_of(tmr, typeof(*iphy), sata_timer);
+       struct isci_host *ihost = iphy->owning_port->owning_controller;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       if (tmr->cancel)
+               goto done;
+
+       dev_dbg(sciphy_to_dev(iphy),
+                "%s: SCIC SDS Phy 0x%p did not receive signature fis before "
+                "timeout.\n",
+                __func__,
+                iphy);
+
+       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+done:
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/**
+ * This method returns the port currently containing this phy. If the phy is
+ *    currently contained by the dummy port, then the phy is considered to not
+ *    be part of a port.
+ * @sci_phy: This parameter specifies the phy for which to retrieve the
+ *    containing port.
+ *
+ * This method returns a handle to a port that contains the supplied phy.
+ * NULL This value is returned if the phy is not part of a real
+ * port (i.e. it's contained in the dummy port). !NULL All other
+ * values indicate a handle/pointer to the port containing the phy.
+ */
+struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy)
+{
+       struct isci_port *iport = iphy->owning_port;
+
+       if (iport->physical_port_index == SCIC_SDS_DUMMY_PORT)
+               return NULL;
+
+       return iphy->owning_port;
+}
+
+/**
+ * This method will assign a port to the phy object.
+ * @out]: iphy This parameter specifies the phy for which to assign a port
+ *    object.
+ *
+ *
+ */
+void sci_phy_set_port(
+       struct isci_phy *iphy,
+       struct isci_port *iport)
+{
+       iphy->owning_port = iport;
+
+       if (iphy->bcn_received_while_port_unassigned) {
+               iphy->bcn_received_while_port_unassigned = false;
+               sci_port_broadcast_change_received(iphy->owning_port, iphy);
+       }
+}
+
+enum sci_status sci_phy_initialize(struct isci_phy *iphy,
+                                  struct scu_transport_layer_registers __iomem *tl,
+                                  struct scu_link_layer_registers __iomem *ll)
+{
+       /* Perfrom the initialization of the TL hardware */
+       sci_phy_transport_layer_initialization(iphy, tl);
+
+       /* Perofrm the initialization of the PE hardware */
+       sci_phy_link_layer_initialization(iphy, ll);
+
+       /* There is nothing that needs to be done in this state just
+        * transition to the stopped state
+        */
+       sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
+
+       return SCI_SUCCESS;
+}
+
+/**
+ * This method assigns the direct attached device ID for this phy.
+ *
+ * @iphy The phy for which the direct attached device id is to
+ *       be assigned.
+ * @device_id The direct attached device ID to assign to the phy.
+ *       This will either be the RNi for the device or an invalid RNi if there
+ *       is no current device assigned to the phy.
+ */
+void sci_phy_setup_transport(struct isci_phy *iphy, u32 device_id)
+{
+       u32 tl_control;
+
+       writel(device_id, &iphy->transport_layer_registers->stp_rni);
+
+       /*
+        * The read should guarantee that the first write gets posted
+        * before the next write
+        */
+       tl_control = readl(&iphy->transport_layer_registers->control);
+       tl_control |= SCU_TLCR_GEN_BIT(CLEAR_TCI_NCQ_MAPPING_TABLE);
+       writel(tl_control, &iphy->transport_layer_registers->control);
+}
+
+static void sci_phy_suspend(struct isci_phy *iphy)
+{
+       u32 scu_sas_pcfg_value;
+
+       scu_sas_pcfg_value =
+               readl(&iphy->link_layer_registers->phy_configuration);
+       scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE);
+       writel(scu_sas_pcfg_value,
+               &iphy->link_layer_registers->phy_configuration);
+
+       sci_phy_setup_transport(iphy, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
+}
+
+void sci_phy_resume(struct isci_phy *iphy)
+{
+       u32 scu_sas_pcfg_value;
+
+       scu_sas_pcfg_value =
+               readl(&iphy->link_layer_registers->phy_configuration);
+       scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE);
+       writel(scu_sas_pcfg_value,
+               &iphy->link_layer_registers->phy_configuration);
+}
+
+void sci_phy_get_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
+{
+       sas->high = readl(&iphy->link_layer_registers->source_sas_address_high);
+       sas->low = readl(&iphy->link_layer_registers->source_sas_address_low);
+}
+
+void sci_phy_get_attached_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
+{
+       struct sas_identify_frame *iaf;
+
+       iaf = &iphy->frame_rcvd.iaf;
+       memcpy(sas, iaf->sas_addr, SAS_ADDR_SIZE);
+}
+
+void sci_phy_get_protocols(struct isci_phy *iphy, struct sci_phy_proto *proto)
+{
+       proto->all = readl(&iphy->link_layer_registers->transmit_identification);
+}
+
+enum sci_status sci_phy_start(struct isci_phy *iphy)
+{
+       enum sci_phy_states state = iphy->sm.current_state_id;
+
+       if (state != SCI_PHY_STOPPED) {
+               dev_dbg(sciphy_to_dev(iphy),
+                        "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+       return SCI_SUCCESS;
+}
+
+enum sci_status sci_phy_stop(struct isci_phy *iphy)
+{
+       enum sci_phy_states state = iphy->sm.current_state_id;
+
+       switch (state) {
+       case SCI_PHY_SUB_INITIAL:
+       case SCI_PHY_SUB_AWAIT_OSSP_EN:
+       case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+       case SCI_PHY_SUB_AWAIT_SAS_POWER:
+       case SCI_PHY_SUB_AWAIT_SATA_POWER:
+       case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
+       case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+       case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
+       case SCI_PHY_SUB_FINAL:
+       case SCI_PHY_READY:
+               break;
+       default:
+               dev_dbg(sciphy_to_dev(iphy),
+                       "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
+       return SCI_SUCCESS;
+}
+
+enum sci_status sci_phy_reset(struct isci_phy *iphy)
+{
+       enum sci_phy_states state = iphy->sm.current_state_id;
+
+       if (state != SCI_PHY_READY) {
+               dev_dbg(sciphy_to_dev(iphy),
+                       "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       sci_change_state(&iphy->sm, SCI_PHY_RESETTING);
+       return SCI_SUCCESS;
+}
+
+enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy)
+{
+       enum sci_phy_states state = iphy->sm.current_state_id;
+
+       switch (state) {
+       case SCI_PHY_SUB_AWAIT_SAS_POWER: {
+               u32 enable_spinup;
+
+               enable_spinup = readl(&iphy->link_layer_registers->notify_enable_spinup_control);
+               enable_spinup |= SCU_ENSPINUP_GEN_BIT(ENABLE);
+               writel(enable_spinup, &iphy->link_layer_registers->notify_enable_spinup_control);
+
+               /* Change state to the final state this substate machine has run to completion */
+               sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL);
+
+               return SCI_SUCCESS;
+       }
+       case SCI_PHY_SUB_AWAIT_SATA_POWER: {
+               u32 scu_sas_pcfg_value;
+
+               /* Release the spinup hold state and reset the OOB state machine */
+               scu_sas_pcfg_value =
+                       readl(&iphy->link_layer_registers->phy_configuration);
+               scu_sas_pcfg_value &=
+                       ~(SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD) | SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE));
+               scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+               writel(scu_sas_pcfg_value,
+                       &iphy->link_layer_registers->phy_configuration);
+
+               /* Now restart the OOB operation */
+               scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+               scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+               writel(scu_sas_pcfg_value,
+                       &iphy->link_layer_registers->phy_configuration);
+
+               /* Change state to the final state this substate machine has run to completion */
+               sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_PHY_EN);
+
+               return SCI_SUCCESS;
+       }
+       default:
+               dev_dbg(sciphy_to_dev(iphy),
+                       "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+static void sci_phy_start_sas_link_training(struct isci_phy *iphy)
+{
+       /* continue the link training for the phy as if it were a SAS PHY
+        * instead of a SATA PHY. This is done because the completion queue had a SAS
+        * PHY DETECTED event when the state machine was expecting a SATA PHY event.
+        */
+       u32 phy_control;
+
+       phy_control = readl(&iphy->link_layer_registers->phy_configuration);
+       phy_control |= SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD);
+       writel(phy_control,
+              &iphy->link_layer_registers->phy_configuration);
+
+       sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN);
+
+       iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS;
+}
+
+static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
+{
+       /* This method continues the link training for the phy as if it were a SATA PHY
+        * instead of a SAS PHY.  This is done because the completion queue had a SATA
+        * SPINUP HOLD event when the state machine was expecting a SAS PHY event. none
+        */
+       sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER);
+
+       iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
+}
+
+/**
+ * sci_phy_complete_link_training - perform processing common to
+ *    all protocols upon completion of link training.
+ * @sci_phy: This parameter specifies the phy object for which link training
+ *    has completed.
+ * @max_link_rate: This parameter specifies the maximum link rate to be
+ *    associated with this phy.
+ * @next_state: This parameter specifies the next state for the phy's starting
+ *    sub-state machine.
+ *
+ */
+static void sci_phy_complete_link_training(struct isci_phy *iphy,
+                                          enum sas_linkrate max_link_rate,
+                                          u32 next_state)
+{
+       iphy->max_negotiated_speed = max_link_rate;
+
+       sci_change_state(&iphy->sm, next_state);
+}
+
+enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
+{
+       enum sci_phy_states state = iphy->sm.current_state_id;
+
+       switch (state) {
+       case SCI_PHY_SUB_AWAIT_OSSP_EN:
+               switch (scu_get_event_code(event_code)) {
+               case SCU_EVENT_SAS_PHY_DETECTED:
+                       sci_phy_start_sas_link_training(iphy);
+                       iphy->is_in_link_training = true;
+                       break;
+               case SCU_EVENT_SATA_SPINUP_HOLD:
+                       sci_phy_start_sata_link_training(iphy);
+                       iphy->is_in_link_training = true;
+                       break;
+               default:
+                       dev_dbg(sciphy_to_dev(iphy),
+                               "%s: PHY starting substate machine received "
+                               "unexpected event_code %x\n",
+                               __func__,
+                               event_code);
+                       return SCI_FAILURE;
+               }
+               return SCI_SUCCESS;
+       case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+               switch (scu_get_event_code(event_code)) {
+               case SCU_EVENT_SAS_PHY_DETECTED:
+                       /*
+                        * Why is this being reported again by the controller?
+                        * We would re-enter this state so just stay here */
+                       break;
+               case SCU_EVENT_SAS_15:
+               case SCU_EVENT_SAS_15_SSC:
+                       sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS,
+                                                      SCI_PHY_SUB_AWAIT_IAF_UF);
+                       break;
+               case SCU_EVENT_SAS_30:
+               case SCU_EVENT_SAS_30_SSC:
+                       sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS,
+                                                      SCI_PHY_SUB_AWAIT_IAF_UF);
+                       break;
+               case SCU_EVENT_SAS_60:
+               case SCU_EVENT_SAS_60_SSC:
+                       sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS,
+                                                      SCI_PHY_SUB_AWAIT_IAF_UF);
+                       break;
+               case SCU_EVENT_SATA_SPINUP_HOLD:
+                       /*
+                        * We were doing SAS PHY link training and received a SATA PHY event
+                        * continue OOB/SN as if this were a SATA PHY */
+                       sci_phy_start_sata_link_training(iphy);
+                       break;
+               case SCU_EVENT_LINK_FAILURE:
+                       /* Link failure change state back to the starting state */
+                       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+                       break;
+               default:
+                       dev_warn(sciphy_to_dev(iphy),
+                                "%s: PHY starting substate machine received "
+                                "unexpected event_code %x\n",
+                                __func__, event_code);
+
+                       return SCI_FAILURE;
+                       break;
+               }
+               return SCI_SUCCESS;
+       case SCI_PHY_SUB_AWAIT_IAF_UF:
+               switch (scu_get_event_code(event_code)) {
+               case SCU_EVENT_SAS_PHY_DETECTED:
+                       /* Backup the state machine */
+                       sci_phy_start_sas_link_training(iphy);
+                       break;
+               case SCU_EVENT_SATA_SPINUP_HOLD:
+                       /* We were doing SAS PHY link training and received a
+                        * SATA PHY event continue OOB/SN as if this were a
+                        * SATA PHY
+                        */
+                       sci_phy_start_sata_link_training(iphy);
+                       break;
+               case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
+               case SCU_EVENT_LINK_FAILURE:
+               case SCU_EVENT_HARD_RESET_RECEIVED:
+                       /* Start the oob/sn state machine over again */
+                       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+                       break;
+               default:
+                       dev_warn(sciphy_to_dev(iphy),
+                                "%s: PHY starting substate machine received "
+                                "unexpected event_code %x\n",
+                                __func__, event_code);
+                       return SCI_FAILURE;
+               }
+               return SCI_SUCCESS;
+       case SCI_PHY_SUB_AWAIT_SAS_POWER:
+               switch (scu_get_event_code(event_code)) {
+               case SCU_EVENT_LINK_FAILURE:
+                       /* Link failure change state back to the starting state */
+                       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+                       break;
+               default:
+                       dev_warn(sciphy_to_dev(iphy),
+                               "%s: PHY starting substate machine received unexpected "
+                               "event_code %x\n",
+                               __func__,
+                               event_code);
+                       return SCI_FAILURE;
+               }
+               return SCI_SUCCESS;
+       case SCI_PHY_SUB_AWAIT_SATA_POWER:
+               switch (scu_get_event_code(event_code)) {
+               case SCU_EVENT_LINK_FAILURE:
+                       /* Link failure change state back to the starting state */
+                       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+                       break;
+               case SCU_EVENT_SATA_SPINUP_HOLD:
+                       /* These events are received every 10ms and are
+                        * expected while in this state
+                        */
+                       break;
+
+               case SCU_EVENT_SAS_PHY_DETECTED:
+                       /* There has been a change in the phy type before OOB/SN for the
+                        * SATA finished start down the SAS link traning path.
+                        */
+                       sci_phy_start_sas_link_training(iphy);
+                       break;
+
+               default:
+                       dev_warn(sciphy_to_dev(iphy),
+                                "%s: PHY starting substate machine received "
+                                "unexpected event_code %x\n",
+                                __func__, event_code);
+
+                       return SCI_FAILURE;
+               }
+               return SCI_SUCCESS;
+       case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
+               switch (scu_get_event_code(event_code)) {
+               case SCU_EVENT_LINK_FAILURE:
+                       /* Link failure change state back to the starting state */
+                       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+                       break;
+               case SCU_EVENT_SATA_SPINUP_HOLD:
+                       /* These events might be received since we dont know how many may be in
+                        * the completion queue while waiting for power
+                        */
+                       break;
+               case SCU_EVENT_SATA_PHY_DETECTED:
+                       iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
+
+                       /* We have received the SATA PHY notification change state */
+                       sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
+                       break;
+               case SCU_EVENT_SAS_PHY_DETECTED:
+                       /* There has been a change in the phy type before OOB/SN for the
+                        * SATA finished start down the SAS link traning path.
+                        */
+                       sci_phy_start_sas_link_training(iphy);
+                       break;
+               default:
+                       dev_warn(sciphy_to_dev(iphy),
+                                "%s: PHY starting substate machine received "
+                                "unexpected event_code %x\n",
+                                __func__,
+                                event_code);
+
+                       return SCI_FAILURE;;
+               }
+               return SCI_SUCCESS;
+       case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+               switch (scu_get_event_code(event_code)) {
+               case SCU_EVENT_SATA_PHY_DETECTED:
+                       /*
+                        * The hardware reports multiple SATA PHY detected events
+                        * ignore the extras */
+                       break;
+               case SCU_EVENT_SATA_15:
+               case SCU_EVENT_SATA_15_SSC:
+                       sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS,
+                                                      SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
+                       break;
+               case SCU_EVENT_SATA_30:
+               case SCU_EVENT_SATA_30_SSC:
+                       sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS,
+                                                      SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
+                       break;
+               case SCU_EVENT_SATA_60:
+               case SCU_EVENT_SATA_60_SSC:
+                       sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS,
+                                                      SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
+                       break;
+               case SCU_EVENT_LINK_FAILURE:
+                       /* Link failure change state back to the starting state */
+                       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+                       break;
+               case SCU_EVENT_SAS_PHY_DETECTED:
+                       /*
+                        * There has been a change in the phy type before OOB/SN for the
+                        * SATA finished start down the SAS link traning path. */
+                       sci_phy_start_sas_link_training(iphy);
+                       break;
+               default:
+                       dev_warn(sciphy_to_dev(iphy),
+                                "%s: PHY starting substate machine received "
+                                "unexpected event_code %x\n",
+                                __func__, event_code);
+
+                       return SCI_FAILURE;
+               }
+
+               return SCI_SUCCESS;
+       case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
+               switch (scu_get_event_code(event_code)) {
+               case SCU_EVENT_SATA_PHY_DETECTED:
+                       /* Backup the state machine */
+                       sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
+                       break;
+
+               case SCU_EVENT_LINK_FAILURE:
+                       /* Link failure change state back to the starting state */
+                       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+                       break;
+
+               default:
+                       dev_warn(sciphy_to_dev(iphy),
+                                "%s: PHY starting substate machine received "
+                                "unexpected event_code %x\n",
+                                __func__,
+                                event_code);
+
+                       return SCI_FAILURE;
+               }
+               return SCI_SUCCESS;
+       case SCI_PHY_READY:
+               switch (scu_get_event_code(event_code)) {
+               case SCU_EVENT_LINK_FAILURE:
+                       /* Link failure change state back to the starting state */
+                       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+                       break;
+               case SCU_EVENT_BROADCAST_CHANGE:
+                       /* Broadcast change received. Notify the port. */
+                       if (phy_get_non_dummy_port(iphy) != NULL)
+                               sci_port_broadcast_change_received(iphy->owning_port, iphy);
+                       else
+                               iphy->bcn_received_while_port_unassigned = true;
+                       break;
+               default:
+                       dev_warn(sciphy_to_dev(iphy),
+                                "%sP SCIC PHY 0x%p ready state machine received "
+                                "unexpected event_code %x\n",
+                                __func__, iphy, event_code);
+                       return SCI_FAILURE_INVALID_STATE;
+               }
+               return SCI_SUCCESS;
+       case SCI_PHY_RESETTING:
+               switch (scu_get_event_code(event_code)) {
+               case SCU_EVENT_HARD_RESET_TRANSMITTED:
+                       /* Link failure change state back to the starting state */
+                       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+                       break;
+               default:
+                       dev_warn(sciphy_to_dev(iphy),
+                                "%s: SCIC PHY 0x%p resetting state machine received "
+                                "unexpected event_code %x\n",
+                                __func__, iphy, event_code);
+
+                       return SCI_FAILURE_INVALID_STATE;
+                       break;
+               }
+               return SCI_SUCCESS;
+       default:
+               dev_dbg(sciphy_to_dev(iphy),
+                       "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index)
+{
+       enum sci_phy_states state = iphy->sm.current_state_id;
+       struct isci_host *ihost = iphy->owning_port->owning_controller;
+       enum sci_status result;
+       unsigned long flags;
+
+       switch (state) {
+       case SCI_PHY_SUB_AWAIT_IAF_UF: {
+               u32 *frame_words;
+               struct sas_identify_frame iaf;
+
+               result = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+                                                                 frame_index,
+                                                                 (void **)&frame_words);
+
+               if (result != SCI_SUCCESS)
+                       return result;
+
+               sci_swab32_cpy(&iaf, frame_words, sizeof(iaf) / sizeof(u32));
+               if (iaf.frame_type == 0) {
+                       u32 state;
+
+                       spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
+                       memcpy(&iphy->frame_rcvd.iaf, &iaf, sizeof(iaf));
+                       spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
+                       if (iaf.smp_tport) {
+                               /* We got the IAF for an expander PHY go to the final
+                                * state since there are no power requirements for
+                                * expander phys.
+                                */
+                               state = SCI_PHY_SUB_FINAL;
+                       } else {
+                               /* We got the IAF we can now go to the await spinup
+                                * semaphore state
+                                */
+                               state = SCI_PHY_SUB_AWAIT_SAS_POWER;
+                       }
+                       sci_change_state(&iphy->sm, state);
+                       result = SCI_SUCCESS;
+               } else
+                       dev_warn(sciphy_to_dev(iphy),
+                               "%s: PHY starting substate machine received "
+                               "unexpected frame id %x\n",
+                               __func__, frame_index);
+
+               sci_controller_release_frame(ihost, frame_index);
+               return result;
+       }
+       case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: {
+               struct dev_to_host_fis *frame_header;
+               u32 *fis_frame_data;
+
+               result = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+                                                                 frame_index,
+                                                                 (void **)&frame_header);
+
+               if (result != SCI_SUCCESS)
+                       return result;
+
+               if ((frame_header->fis_type == FIS_REGD2H) &&
+                   !(frame_header->status & ATA_BUSY)) {
+                       sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+                                                                frame_index,
+                                                                (void **)&fis_frame_data);
+
+                       spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
+                       sci_controller_copy_sata_response(&iphy->frame_rcvd.fis,
+                                                         frame_header,
+                                                         fis_frame_data);
+                       spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
+
+                       /* got IAF we can now go to the await spinup semaphore state */
+                       sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL);
+
+                       result = SCI_SUCCESS;
+               } else
+                       dev_warn(sciphy_to_dev(iphy),
+                                "%s: PHY starting substate machine received "
+                                "unexpected frame id %x\n",
+                                __func__, frame_index);
+
+               /* Regardless of the result we are done with this frame with it */
+               sci_controller_release_frame(ihost, frame_index);
+
+               return result;
+       }
+       default:
+               dev_dbg(sciphy_to_dev(iphy),
+                       "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+}
+
+static void sci_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+       /* This is just an temporary state go off to the starting state */
+       sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_OSSP_EN);
+}
+
+static void sci_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+       struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+       sci_controller_power_control_queue_insert(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+       struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+       sci_controller_power_control_queue_remove(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+       struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+       sci_controller_power_control_queue_insert(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+       struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+       sci_controller_power_control_queue_remove(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+       sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
+}
+
+static void sci_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+       sci_del_timer(&iphy->sata_timer);
+}
+
+static void sci_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+       sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
+}
+
+static void sci_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+       sci_del_timer(&iphy->sata_timer);
+}
+
+static void sci_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+       if (sci_port_link_detected(iphy->owning_port, iphy)) {
+
+               /*
+                * Clear the PE suspend condition so we can actually
+                * receive SIG FIS
+                * The hardware will not respond to the XRDY until the PE
+                * suspend condition is cleared.
+                */
+               sci_phy_resume(iphy);
+
+               sci_mod_timer(&iphy->sata_timer,
+                             SCIC_SDS_SIGNATURE_FIS_TIMEOUT);
+       } else
+               iphy->is_in_link_training = false;
+}
+
+static void sci_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+       sci_del_timer(&iphy->sata_timer);
+}
+
+static void sci_phy_starting_final_substate_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+       /* State machine has run to completion so exit out and change
+        * the base state machine to the ready state
+        */
+       sci_change_state(&iphy->sm, SCI_PHY_READY);
+}
+
+/**
+ *
+ * @sci_phy: This is the struct isci_phy object to stop.
+ *
+ * This method will stop the struct isci_phy object. This does not reset the
+ * protocol engine it just suspends it and places it in a state where it will
+ * not cause the end device to power up. none
+ */
+static void scu_link_layer_stop_protocol_engine(
+       struct isci_phy *iphy)
+{
+       u32 scu_sas_pcfg_value;
+       u32 enable_spinup_value;
+
+       /* Suspend the protocol engine and place it in a sata spinup hold state */
+       scu_sas_pcfg_value =
+               readl(&iphy->link_layer_registers->phy_configuration);
+       scu_sas_pcfg_value |=
+               (SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
+                SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE) |
+                SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD));
+       writel(scu_sas_pcfg_value,
+              &iphy->link_layer_registers->phy_configuration);
+
+       /* Disable the notify enable spinup primitives */
+       enable_spinup_value = readl(&iphy->link_layer_registers->notify_enable_spinup_control);
+       enable_spinup_value &= ~SCU_ENSPINUP_GEN_BIT(ENABLE);
+       writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control);
+}
+
+/**
+ *
+ *
+ * This method will start the OOB/SN state machine for this struct isci_phy object.
+ */
+static void scu_link_layer_start_oob(
+       struct isci_phy *iphy)
+{
+       u32 scu_sas_pcfg_value;
+
+       scu_sas_pcfg_value =
+               readl(&iphy->link_layer_registers->phy_configuration);
+       scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+       scu_sas_pcfg_value &=
+               ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
+               SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
+       writel(scu_sas_pcfg_value,
+              &iphy->link_layer_registers->phy_configuration);
+}
+
+/**
+ *
+ *
+ * This method will transmit a hard reset request on the specified phy. The SCU
+ * hardware requires that we reset the OOB state machine and set the hard reset
+ * bit in the phy configuration register. We then must start OOB over with the
+ * hard reset bit set.
+ */
+static void scu_link_layer_tx_hard_reset(
+       struct isci_phy *iphy)
+{
+       u32 phy_configuration_value;
+
+       /*
+        * SAS Phys must wait for the HARD_RESET_TX event notification to transition
+        * to the starting state. */
+       phy_configuration_value =
+               readl(&iphy->link_layer_registers->phy_configuration);
+       phy_configuration_value |=
+               (SCU_SAS_PCFG_GEN_BIT(HARD_RESET) |
+                SCU_SAS_PCFG_GEN_BIT(OOB_RESET));
+       writel(phy_configuration_value,
+              &iphy->link_layer_registers->phy_configuration);
+
+       /* Now take the OOB state machine out of reset */
+       phy_configuration_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+       phy_configuration_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+       writel(phy_configuration_value,
+              &iphy->link_layer_registers->phy_configuration);
+}
+
+static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+       struct isci_port *iport = iphy->owning_port;
+       struct isci_host *ihost = iport->owning_controller;
+
+       /*
+        * @todo We need to get to the controller to place this PE in a
+        * reset state
+        */
+       sci_del_timer(&iphy->sata_timer);
+
+       scu_link_layer_stop_protocol_engine(iphy);
+
+       if (iphy->sm.previous_state_id != SCI_PHY_INITIAL)
+               sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy);
+}
+
+static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+       struct isci_port *iport = iphy->owning_port;
+       struct isci_host *ihost = iport->owning_controller;
+
+       scu_link_layer_stop_protocol_engine(iphy);
+       scu_link_layer_start_oob(iphy);
+
+       /* We don't know what kind of phy we are going to be just yet */
+       iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN;
+       iphy->bcn_received_while_port_unassigned = false;
+
+       if (iphy->sm.previous_state_id == SCI_PHY_READY)
+               sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy);
+
+       sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL);
+}
+
+static void sci_phy_ready_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+       struct isci_port *iport = iphy->owning_port;
+       struct isci_host *ihost = iport->owning_controller;
+
+       sci_controller_link_up(ihost, phy_get_non_dummy_port(iphy), iphy);
+}
+
+static void sci_phy_ready_state_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+       sci_phy_suspend(iphy);
+}
+
+static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+       /* The phy is being reset, therefore deactivate it from the port.  In
+        * the resetting state we don't notify the user regarding link up and
+        * link down notifications
+        */
+       sci_port_deactivate_phy(iphy->owning_port, iphy, false);
+
+       if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+               scu_link_layer_tx_hard_reset(iphy);
+       } else {
+               /* The SCU does not need to have a discrete reset state so
+                * just go back to the starting state.
+                */
+               sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+       }
+}
+
+static const struct sci_base_state sci_phy_state_table[] = {
+       [SCI_PHY_INITIAL] = { },
+       [SCI_PHY_STOPPED] = {
+               .enter_state = sci_phy_stopped_state_enter,
+       },
+       [SCI_PHY_STARTING] = {
+               .enter_state = sci_phy_starting_state_enter,
+       },
+       [SCI_PHY_SUB_INITIAL] = {
+               .enter_state = sci_phy_starting_initial_substate_enter,
+       },
+       [SCI_PHY_SUB_AWAIT_OSSP_EN] = { },
+       [SCI_PHY_SUB_AWAIT_SAS_SPEED_EN] = { },
+       [SCI_PHY_SUB_AWAIT_IAF_UF] = { },
+       [SCI_PHY_SUB_AWAIT_SAS_POWER] = {
+               .enter_state = sci_phy_starting_await_sas_power_substate_enter,
+               .exit_state  = sci_phy_starting_await_sas_power_substate_exit,
+       },
+       [SCI_PHY_SUB_AWAIT_SATA_POWER] = {
+               .enter_state = sci_phy_starting_await_sata_power_substate_enter,
+               .exit_state  = sci_phy_starting_await_sata_power_substate_exit
+       },
+       [SCI_PHY_SUB_AWAIT_SATA_PHY_EN] = {
+               .enter_state = sci_phy_starting_await_sata_phy_substate_enter,
+               .exit_state  = sci_phy_starting_await_sata_phy_substate_exit
+       },
+       [SCI_PHY_SUB_AWAIT_SATA_SPEED_EN] = {
+               .enter_state = sci_phy_starting_await_sata_speed_substate_enter,
+               .exit_state  = sci_phy_starting_await_sata_speed_substate_exit
+       },
+       [SCI_PHY_SUB_AWAIT_SIG_FIS_UF] = {
+               .enter_state = sci_phy_starting_await_sig_fis_uf_substate_enter,
+               .exit_state  = sci_phy_starting_await_sig_fis_uf_substate_exit
+       },
+       [SCI_PHY_SUB_FINAL] = {
+               .enter_state = sci_phy_starting_final_substate_enter,
+       },
+       [SCI_PHY_READY] = {
+               .enter_state = sci_phy_ready_state_enter,
+               .exit_state = sci_phy_ready_state_exit,
+       },
+       [SCI_PHY_RESETTING] = {
+               .enter_state = sci_phy_resetting_state_enter,
+       },
+       [SCI_PHY_FINAL] = { },
+};
+
+void sci_phy_construct(struct isci_phy *iphy,
+                           struct isci_port *iport, u8 phy_index)
+{
+       sci_init_sm(&iphy->sm, sci_phy_state_table, SCI_PHY_INITIAL);
+
+       /* Copy the rest of the input data to our locals */
+       iphy->owning_port = iport;
+       iphy->phy_index = phy_index;
+       iphy->bcn_received_while_port_unassigned = false;
+       iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN;
+       iphy->link_layer_registers = NULL;
+       iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
+
+       /* Create the SIGNATURE FIS Timeout timer for this phy */
+       sci_init_timer(&iphy->sata_timer, phy_sata_timeout);
+}
+
+void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index)
+{
+       struct sci_oem_params *oem = &ihost->oem_parameters;
+       u64 sci_sas_addr;
+       __be64 sas_addr;
+
+       sci_sas_addr = oem->phys[index].sas_address.high;
+       sci_sas_addr <<= 32;
+       sci_sas_addr |= oem->phys[index].sas_address.low;
+       sas_addr = cpu_to_be64(sci_sas_addr);
+       memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr));
+
+       iphy->isci_port = NULL;
+       iphy->sas_phy.enabled = 0;
+       iphy->sas_phy.id = index;
+       iphy->sas_phy.sas_addr = &iphy->sas_addr[0];
+       iphy->sas_phy.frame_rcvd = (u8 *)&iphy->frame_rcvd;
+       iphy->sas_phy.ha = &ihost->sas_ha;
+       iphy->sas_phy.lldd_phy = iphy;
+       iphy->sas_phy.enabled = 1;
+       iphy->sas_phy.class = SAS;
+       iphy->sas_phy.iproto = SAS_PROTOCOL_ALL;
+       iphy->sas_phy.tproto = 0;
+       iphy->sas_phy.type = PHY_TYPE_PHYSICAL;
+       iphy->sas_phy.role = PHY_ROLE_INITIATOR;
+       iphy->sas_phy.oob_mode = OOB_NOT_CONNECTED;
+       iphy->sas_phy.linkrate = SAS_LINK_RATE_UNKNOWN;
+       memset(&iphy->frame_rcvd, 0, sizeof(iphy->frame_rcvd));
+}
+
+
+/**
+ * isci_phy_control() - This function is one of the SAS Domain Template
+ *    functions. This is a phy management function.
+ * @phy: This parameter specifies the sphy being controlled.
+ * @func: This parameter specifies the phy control function being invoked.
+ * @buf: This parameter is specific to the phy function being invoked.
+ *
+ * status, zero indicates success.
+ */
+int isci_phy_control(struct asd_sas_phy *sas_phy,
+                    enum phy_func func,
+                    void *buf)
+{
+       int ret = 0;
+       struct isci_phy *iphy = sas_phy->lldd_phy;
+       struct isci_port *iport = iphy->isci_port;
+       struct isci_host *ihost = sas_phy->ha->lldd_ha;
+       unsigned long flags;
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: phy %p; func %d; buf %p; isci phy %p, port %p\n",
+               __func__, sas_phy, func, buf, iphy, iport);
+
+       switch (func) {
+       case PHY_FUNC_DISABLE:
+               spin_lock_irqsave(&ihost->scic_lock, flags);
+               sci_phy_stop(iphy);
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
+               break;
+
+       case PHY_FUNC_LINK_RESET:
+               spin_lock_irqsave(&ihost->scic_lock, flags);
+               sci_phy_stop(iphy);
+               sci_phy_start(iphy);
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
+               break;
+
+       case PHY_FUNC_HARD_RESET:
+               if (!iport)
+                       return -ENODEV;
+
+               /* Perform the port reset. */
+               ret = isci_port_perform_hard_reset(ihost, iport, iphy);
+
+               break;
+
+       default:
+               dev_dbg(&ihost->pdev->dev,
+                          "%s: phy %p; func %d NOT IMPLEMENTED!\n",
+                          __func__, sas_phy, func);
+               ret = -ENOSYS;
+               break;
+       }
+       return ret;
+}
diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h
new file mode 100644 (file)
index 0000000..67699c8
--- /dev/null
@@ -0,0 +1,504 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ISCI_PHY_H_
+#define _ISCI_PHY_H_
+
+#include <scsi/sas.h>
+#include <scsi/libsas.h>
+#include "isci.h"
+#include "sas.h"
+
+/* This is the timeout value for the SATA phy to wait for a SIGNATURE FIS
+ * before restarting the starting state machine.  Technically, the old parallel
+ * ATA specification required up to 30 seconds for a device to issue its
+ * signature FIS as a result of a soft reset.  Now we see that devices respond
+ * generally within 15 seconds, but we'll use 25 for now.
+ */
+#define SCIC_SDS_SIGNATURE_FIS_TIMEOUT    25000
+
+/* This is the timeout for the SATA OOB/SN because the hardware does not
+ * recognize a hot plug after OOB signal but before the SN signals.  We need to
+ * make sure after a hotplug timeout if we have not received the speed event
+ * notification from the hardware that we restart the hardware OOB state
+ * machine.
+ */
+#define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT  250
+
+enum sci_phy_protocol {
+       SCIC_SDS_PHY_PROTOCOL_UNKNOWN,
+       SCIC_SDS_PHY_PROTOCOL_SAS,
+       SCIC_SDS_PHY_PROTOCOL_SATA,
+       SCIC_SDS_MAX_PHY_PROTOCOLS
+};
+
+/**
+ * isci_phy - hba local phy infrastructure
+ * @sm:
+ * @protocol: attached device protocol
+ * @phy_index: physical index relative to the controller (0-3)
+ * @bcn_received_while_port_unassigned: bcn to report after port association
+ * @sata_timer: timeout SATA signature FIS arrival
+ */
+struct isci_phy {
+       struct sci_base_state_machine sm;
+       struct isci_port *owning_port;
+       enum sas_linkrate max_negotiated_speed;
+       enum sci_phy_protocol protocol;
+       u8 phy_index;
+       bool bcn_received_while_port_unassigned;
+       bool is_in_link_training;
+       struct sci_timer sata_timer;
+       struct scu_transport_layer_registers __iomem *transport_layer_registers;
+       struct scu_link_layer_registers __iomem *link_layer_registers;
+       struct asd_sas_phy sas_phy;
+       struct isci_port *isci_port;
+       u8 sas_addr[SAS_ADDR_SIZE];
+       union {
+               struct sas_identify_frame iaf;
+               struct dev_to_host_fis fis;
+       } frame_rcvd;
+};
+
+static inline struct isci_phy *to_iphy(struct asd_sas_phy *sas_phy)
+{
+       struct isci_phy *iphy = container_of(sas_phy, typeof(*iphy), sas_phy);
+
+       return iphy;
+}
+
+struct sci_phy_cap {
+       union {
+               struct {
+                       /*
+                        * The SAS specification indicates the start bit shall
+                        * always be set to
+                        * 1.  This implementation will have the start bit set
+                        * to 0 if the PHY CAPABILITIES were either not
+                        * received or speed negotiation failed.
+                        */
+                       u8 start:1;
+                       u8 tx_ssc_type:1;
+                       u8 res1:2;
+                       u8 req_logical_linkrate:4;
+
+                       u32 gen1_no_ssc:1;
+                       u32 gen1_ssc:1;
+                       u32 gen2_no_ssc:1;
+                       u32 gen2_ssc:1;
+                       u32 gen3_no_ssc:1;
+                       u32 gen3_ssc:1;
+                       u32 res2:17;
+                       u32 parity:1;
+               };
+               u32 all;
+       };
+}  __packed;
+
+/* this data structure reflects the link layer transmit identification reg */
+struct sci_phy_proto {
+       union {
+               struct {
+                       u16 _r_a:1;
+                       u16 smp_iport:1;
+                       u16 stp_iport:1;
+                       u16 ssp_iport:1;
+                       u16 _r_b:4;
+                       u16 _r_c:1;
+                       u16 smp_tport:1;
+                       u16 stp_tport:1;
+                       u16 ssp_tport:1;
+                       u16 _r_d:4;
+               };
+               u16 all;
+       };
+} __packed;
+
+
+/**
+ * struct sci_phy_properties - This structure defines the properties common to
+ *    all phys that can be retrieved.
+ *
+ *
+ */
+struct sci_phy_properties {
+       /**
+        * This field specifies the port that currently contains the
+        * supplied phy.  This field may be set to NULL
+        * if the phy is not currently contained in a port.
+        */
+       struct isci_port *iport;
+
+       /**
+        * This field specifies the link rate at which the phy is
+        * currently operating.
+        */
+       enum sas_linkrate negotiated_link_rate;
+
+       /**
+        * This field specifies the index of the phy in relation to other
+        * phys within the controller.  This index is zero relative.
+        */
+       u8 index;
+};
+
+/**
+ * struct sci_sas_phy_properties - This structure defines the properties,
+ *    specific to a SAS phy, that can be retrieved.
+ *
+ *
+ */
+struct sci_sas_phy_properties {
+       /**
+        * This field delineates the Identify Address Frame received
+        * from the remote end point.
+        */
+       struct sas_identify_frame rcvd_iaf;
+
+       /**
+        * This field delineates the Phy capabilities structure received
+        * from the remote end point.
+        */
+       struct sci_phy_cap rcvd_cap;
+
+};
+
+/**
+ * struct sci_sata_phy_properties - This structure defines the properties,
+ *    specific to a SATA phy, that can be retrieved.
+ *
+ *
+ */
+struct sci_sata_phy_properties {
+       /**
+        * This field delineates the signature FIS received from the
+        * attached target.
+        */
+       struct dev_to_host_fis signature_fis;
+
+       /**
+        * This field specifies to the user if a port selector is connected
+        * on the specified phy.
+        */
+       bool is_port_selector_present;
+
+};
+
+/**
+ * enum sci_phy_counter_id - This enumeration depicts the various pieces of
+ *    optional information that can be retrieved for a specific phy.
+ *
+ *
+ */
+enum sci_phy_counter_id {
+       /**
+        * This PHY information field tracks the number of frames received.
+        */
+       SCIC_PHY_COUNTER_RECEIVED_FRAME,
+
+       /**
+        * This PHY information field tracks the number of frames transmitted.
+        */
+       SCIC_PHY_COUNTER_TRANSMITTED_FRAME,
+
+       /**
+        * This PHY information field tracks the number of DWORDs received.
+        */
+       SCIC_PHY_COUNTER_RECEIVED_FRAME_WORD,
+
+       /**
+        * This PHY information field tracks the number of DWORDs transmitted.
+        */
+       SCIC_PHY_COUNTER_TRANSMITTED_FRAME_DWORD,
+
+       /**
+        * This PHY information field tracks the number of times DWORD
+        * synchronization was lost.
+        */
+       SCIC_PHY_COUNTER_LOSS_OF_SYNC_ERROR,
+
+       /**
+        * This PHY information field tracks the number of received DWORDs with
+        * running disparity errors.
+        */
+       SCIC_PHY_COUNTER_RECEIVED_DISPARITY_ERROR,
+
+       /**
+        * This PHY information field tracks the number of received frames with a
+        * CRC error (not including short or truncated frames).
+        */
+       SCIC_PHY_COUNTER_RECEIVED_FRAME_CRC_ERROR,
+
+       /**
+        * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT)
+        * primitives received.
+        */
+       SCIC_PHY_COUNTER_RECEIVED_DONE_ACK_NAK_TIMEOUT,
+
+       /**
+        * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT)
+        * primitives transmitted.
+        */
+       SCIC_PHY_COUNTER_TRANSMITTED_DONE_ACK_NAK_TIMEOUT,
+
+       /**
+        * This PHY information field tracks the number of times the inactivity
+        * timer for connections on the phy has been utilized.
+        */
+       SCIC_PHY_COUNTER_INACTIVITY_TIMER_EXPIRED,
+
+       /**
+        * This PHY information field tracks the number of DONE (CREDIT TIMEOUT)
+        * primitives received.
+        */
+       SCIC_PHY_COUNTER_RECEIVED_DONE_CREDIT_TIMEOUT,
+
+       /**
+        * This PHY information field tracks the number of DONE (CREDIT TIMEOUT)
+        * primitives transmitted.
+        */
+       SCIC_PHY_COUNTER_TRANSMITTED_DONE_CREDIT_TIMEOUT,
+
+       /**
+        * This PHY information field tracks the number of CREDIT BLOCKED
+        * primitives received.
+        * @note Depending on remote device implementation, credit blocks
+        *       may occur regularly.
+        */
+       SCIC_PHY_COUNTER_RECEIVED_CREDIT_BLOCKED,
+
+       /**
+        * This PHY information field contains the number of short frames
+        * received.  A short frame is simply a frame smaller then what is
+        * allowed by either the SAS or SATA specification.
+        */
+       SCIC_PHY_COUNTER_RECEIVED_SHORT_FRAME,
+
+       /**
+        * This PHY information field contains the number of frames received after
+        * credit has been exhausted.
+        */
+       SCIC_PHY_COUNTER_RECEIVED_FRAME_WITHOUT_CREDIT,
+
+       /**
+        * This PHY information field contains the number of frames received after
+        * a DONE has been received.
+        */
+       SCIC_PHY_COUNTER_RECEIVED_FRAME_AFTER_DONE,
+
+       /**
+        * This PHY information field contains the number of times the phy
+        * failed to achieve DWORD synchronization during speed negotiation.
+        */
+       SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR
+};
+
+enum sci_phy_states {
+       /**
+        * Simply the initial state for the base domain state machine.
+        */
+       SCI_PHY_INITIAL,
+
+       /**
+        * This state indicates that the phy has successfully been stopped.
+        * In this state no new IO operations are permitted on this phy.
+        * This state is entered from the INITIAL state.
+        * This state is entered from the STARTING state.
+        * This state is entered from the READY state.
+        * This state is entered from the RESETTING state.
+        */
+       SCI_PHY_STOPPED,
+
+       /**
+        * This state indicates that the phy is in the process of becomming
+        * ready.  In this state no new IO operations are permitted on this phy.
+        * This state is entered from the STOPPED state.
+        * This state is entered from the READY state.
+        * This state is entered from the RESETTING state.
+        */
+       SCI_PHY_STARTING,
+
+       /**
+        * Initial state
+        */
+       SCI_PHY_SUB_INITIAL,
+
+       /**
+        * Wait state for the hardware OSSP event type notification
+        */
+       SCI_PHY_SUB_AWAIT_OSSP_EN,
+
+       /**
+        * Wait state for the PHY speed notification
+        */
+       SCI_PHY_SUB_AWAIT_SAS_SPEED_EN,
+
+       /**
+        * Wait state for the IAF Unsolicited frame notification
+        */
+       SCI_PHY_SUB_AWAIT_IAF_UF,
+
+       /**
+        * Wait state for the request to consume power
+        */
+       SCI_PHY_SUB_AWAIT_SAS_POWER,
+
+       /**
+        * Wait state for request to consume power
+        */
+       SCI_PHY_SUB_AWAIT_SATA_POWER,
+
+       /**
+        * Wait state for the SATA PHY notification
+        */
+       SCI_PHY_SUB_AWAIT_SATA_PHY_EN,
+
+       /**
+        * Wait for the SATA PHY speed notification
+        */
+       SCI_PHY_SUB_AWAIT_SATA_SPEED_EN,
+
+       /**
+        * Wait state for the SIGNATURE FIS unsolicited frame notification
+        */
+       SCI_PHY_SUB_AWAIT_SIG_FIS_UF,
+
+       /**
+        * Exit state for this state machine
+        */
+       SCI_PHY_SUB_FINAL,
+
+       /**
+        * This state indicates the the phy is now ready.  Thus, the user
+        * is able to perform IO operations utilizing this phy as long as it
+        * is currently part of a valid port.
+        * This state is entered from the STARTING state.
+        */
+       SCI_PHY_READY,
+
+       /**
+        * This state indicates that the phy is in the process of being reset.
+        * In this state no new IO operations are permitted on this phy.
+        * This state is entered from the READY state.
+        */
+       SCI_PHY_RESETTING,
+
+       /**
+        * Simply the final state for the base phy state machine.
+        */
+       SCI_PHY_FINAL,
+};
+
+void sci_phy_construct(
+       struct isci_phy *iphy,
+       struct isci_port *iport,
+       u8 phy_index);
+
+struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy);
+
+void sci_phy_set_port(
+       struct isci_phy *iphy,
+       struct isci_port *iport);
+
+enum sci_status sci_phy_initialize(
+       struct isci_phy *iphy,
+       struct scu_transport_layer_registers __iomem *transport_layer_registers,
+       struct scu_link_layer_registers __iomem *link_layer_registers);
+
+enum sci_status sci_phy_start(
+       struct isci_phy *iphy);
+
+enum sci_status sci_phy_stop(
+       struct isci_phy *iphy);
+
+enum sci_status sci_phy_reset(
+       struct isci_phy *iphy);
+
+void sci_phy_resume(
+       struct isci_phy *iphy);
+
+void sci_phy_setup_transport(
+       struct isci_phy *iphy,
+       u32 device_id);
+
+enum sci_status sci_phy_event_handler(
+       struct isci_phy *iphy,
+       u32 event_code);
+
+enum sci_status sci_phy_frame_handler(
+       struct isci_phy *iphy,
+       u32 frame_index);
+
+enum sci_status sci_phy_consume_power_handler(
+       struct isci_phy *iphy);
+
+void sci_phy_get_sas_address(
+       struct isci_phy *iphy,
+       struct sci_sas_address *sas_address);
+
+void sci_phy_get_attached_sas_address(
+       struct isci_phy *iphy,
+       struct sci_sas_address *sas_address);
+
+struct sci_phy_proto;
+void sci_phy_get_protocols(
+       struct isci_phy *iphy,
+       struct sci_phy_proto *protocols);
+enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy);
+
+struct isci_host;
+void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index);
+int isci_phy_control(struct asd_sas_phy *phy, enum phy_func func, void *buf);
+
+#endif /* !defined(_ISCI_PHY_H_) */
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
new file mode 100644 (file)
index 0000000..8f6f9b7
--- /dev/null
@@ -0,0 +1,1757 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "isci.h"
+#include "port.h"
+#include "request.h"
+
+#define SCIC_SDS_PORT_HARD_RESET_TIMEOUT  (1000)
+#define SCU_DUMMY_INDEX    (0xFFFF)
+
+static void isci_port_change_state(struct isci_port *iport, enum isci_status status)
+{
+       unsigned long flags;
+
+       dev_dbg(&iport->isci_host->pdev->dev,
+               "%s: iport = %p, state = 0x%x\n",
+               __func__, iport, status);
+
+       /* XXX pointless lock */
+       spin_lock_irqsave(&iport->state_lock, flags);
+       iport->status = status;
+       spin_unlock_irqrestore(&iport->state_lock, flags);
+}
+
+static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
+{
+       u8 index;
+
+       proto->all = 0;
+       for (index = 0; index < SCI_MAX_PHYS; index++) {
+               struct isci_phy *iphy = iport->phy_table[index];
+
+               if (!iphy)
+                       continue;
+               sci_phy_get_protocols(iphy, proto);
+       }
+}
+
+static u32 sci_port_get_phys(struct isci_port *iport)
+{
+       u32 index;
+       u32 mask;
+
+       mask = 0;
+       for (index = 0; index < SCI_MAX_PHYS; index++)
+               if (iport->phy_table[index])
+                       mask |= (1 << index);
+
+       return mask;
+}
+
+/**
+ * sci_port_get_properties() - This method simply returns the properties
+ *    regarding the port, such as: physical index, protocols, sas address, etc.
+ * @port: this parameter specifies the port for which to retrieve the physical
+ *    index.
+ * @properties: This parameter specifies the properties structure into which to
+ *    copy the requested information.
+ *
+ * Indicate if the user specified a valid port. SCI_SUCCESS This value is
+ * returned if the specified port was valid. SCI_FAILURE_INVALID_PORT This
+ * value is returned if the specified port is not valid.  When this value is
+ * returned, no data is copied to the properties output parameter.
+ */
+static enum sci_status sci_port_get_properties(struct isci_port *iport,
+                                               struct sci_port_properties *prop)
+{
+       if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
+               return SCI_FAILURE_INVALID_PORT;
+
+       prop->index = iport->logical_port_index;
+       prop->phy_mask = sci_port_get_phys(iport);
+       sci_port_get_sas_address(iport, &prop->local.sas_address);
+       sci_port_get_protocols(iport, &prop->local.protocols);
+       sci_port_get_attached_sas_address(iport, &prop->remote.sas_address);
+
+       return SCI_SUCCESS;
+}
+
+static void sci_port_bcn_enable(struct isci_port *iport)
+{
+       struct isci_phy *iphy;
+       u32 val;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
+               iphy = iport->phy_table[i];
+               if (!iphy)
+                       continue;
+               val = readl(&iphy->link_layer_registers->link_layer_control);
+               /* clear the bit by writing 1. */
+               writel(val, &iphy->link_layer_registers->link_layer_control);
+       }
+}
+
+/* called under sci_lock to stabilize phy:port associations */
+void isci_port_bcn_enable(struct isci_host *ihost, struct isci_port *iport)
+{
+       int i;
+
+       clear_bit(IPORT_BCN_BLOCKED, &iport->flags);
+       wake_up(&ihost->eventq);
+
+       if (!test_and_clear_bit(IPORT_BCN_PENDING, &iport->flags))
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
+               struct isci_phy *iphy = iport->phy_table[i];
+
+               if (!iphy)
+                       continue;
+
+               ihost->sas_ha.notify_port_event(&iphy->sas_phy,
+                                               PORTE_BROADCAST_RCVD);
+               break;
+       }
+}
+
+static void isci_port_bc_change_received(struct isci_host *ihost,
+                                        struct isci_port *iport,
+                                        struct isci_phy *iphy)
+{
+       if (iport && test_bit(IPORT_BCN_BLOCKED, &iport->flags)) {
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: disabled BCN; isci_phy = %p, sas_phy = %p\n",
+                       __func__, iphy, &iphy->sas_phy);
+               set_bit(IPORT_BCN_PENDING, &iport->flags);
+               atomic_inc(&iport->event);
+               wake_up(&ihost->eventq);
+       } else {
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: isci_phy = %p, sas_phy = %p\n",
+                       __func__, iphy, &iphy->sas_phy);
+
+               ihost->sas_ha.notify_port_event(&iphy->sas_phy,
+                                               PORTE_BROADCAST_RCVD);
+       }
+       sci_port_bcn_enable(iport);
+}
+
+static void isci_port_link_up(struct isci_host *isci_host,
+                             struct isci_port *iport,
+                             struct isci_phy *iphy)
+{
+       unsigned long flags;
+       struct sci_port_properties properties;
+       unsigned long success = true;
+
+       BUG_ON(iphy->isci_port != NULL);
+
+       iphy->isci_port = iport;
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: isci_port = %p\n",
+               __func__, iport);
+
+       spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
+
+       isci_port_change_state(iphy->isci_port, isci_starting);
+
+       sci_port_get_properties(iport, &properties);
+
+       if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) {
+               u64 attached_sas_address;
+
+               iphy->sas_phy.oob_mode = SATA_OOB_MODE;
+               iphy->sas_phy.frame_rcvd_size = sizeof(struct dev_to_host_fis);
+
+               /*
+                * For direct-attached SATA devices, the SCI core will
+                * automagically assign a SAS address to the end device
+                * for the purpose of creating a port. This SAS address
+                * will not be the same as assigned to the PHY and needs
+                * to be obtained from struct sci_port_properties properties.
+                */
+               attached_sas_address = properties.remote.sas_address.high;
+               attached_sas_address <<= 32;
+               attached_sas_address |= properties.remote.sas_address.low;
+               swab64s(&attached_sas_address);
+
+               memcpy(&iphy->sas_phy.attached_sas_addr,
+                      &attached_sas_address, sizeof(attached_sas_address));
+       } else if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+               iphy->sas_phy.oob_mode = SAS_OOB_MODE;
+               iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame);
+
+               /* Copy the attached SAS address from the IAF */
+               memcpy(iphy->sas_phy.attached_sas_addr,
+                      iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE);
+       } else {
+               dev_err(&isci_host->pdev->dev, "%s: unkown target\n", __func__);
+               success = false;
+       }
+
+       iphy->sas_phy.phy->negotiated_linkrate = sci_phy_linkrate(iphy);
+
+       spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
+
+       /* Notify libsas that we have an address frame, if indeed
+        * we've found an SSP, SMP, or STP target */
+       if (success)
+               isci_host->sas_ha.notify_port_event(&iphy->sas_phy,
+                                                   PORTE_BYTES_DMAED);
+}
+
+
+/**
+ * isci_port_link_down() - This function is called by the sci core when a link
+ *    becomes inactive.
+ * @isci_host: This parameter specifies the isci host object.
+ * @phy: This parameter specifies the isci phy with the active link.
+ * @port: This parameter specifies the isci port with the active link.
+ *
+ */
+static void isci_port_link_down(struct isci_host *isci_host,
+                               struct isci_phy *isci_phy,
+                               struct isci_port *isci_port)
+{
+       struct isci_remote_device *isci_device;
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: isci_port = %p\n", __func__, isci_port);
+
+       if (isci_port) {
+
+               /* check to see if this is the last phy on this port. */
+               if (isci_phy->sas_phy.port &&
+                   isci_phy->sas_phy.port->num_phys == 1) {
+                       atomic_inc(&isci_port->event);
+                       isci_port_bcn_enable(isci_host, isci_port);
+
+                       /* change the state for all devices on this port.  The
+                        * next task sent to this device will be returned as
+                        * SAS_TASK_UNDELIVERED, and the scsi mid layer will
+                        * remove the target
+                        */
+                       list_for_each_entry(isci_device,
+                                           &isci_port->remote_dev_list,
+                                           node) {
+                               dev_dbg(&isci_host->pdev->dev,
+                                       "%s: isci_device = %p\n",
+                                       __func__, isci_device);
+                               set_bit(IDEV_GONE, &isci_device->flags);
+                       }
+               }
+               isci_port_change_state(isci_port, isci_stopping);
+       }
+
+       /* Notify libsas of the borken link, this will trigger calls to our
+        * isci_port_deformed and isci_dev_gone functions.
+        */
+       sas_phy_disconnected(&isci_phy->sas_phy);
+       isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy,
+                                          PHYE_LOSS_OF_SIGNAL);
+
+       isci_phy->isci_port = NULL;
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: isci_port = %p - Done\n", __func__, isci_port);
+}
+
+
+/**
+ * isci_port_ready() - This function is called by the sci core when a link
+ *    becomes ready.
+ * @isci_host: This parameter specifies the isci host object.
+ * @port: This parameter specifies the sci port with the active link.
+ *
+ */
+static void isci_port_ready(struct isci_host *isci_host, struct isci_port *isci_port)
+{
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: isci_port = %p\n", __func__, isci_port);
+
+       complete_all(&isci_port->start_complete);
+       isci_port_change_state(isci_port, isci_ready);
+       return;
+}
+
+/**
+ * isci_port_not_ready() - This function is called by the sci core when a link
+ *    is not ready. All remote devices on this link will be removed if they are
+ *    in the stopping state.
+ * @isci_host: This parameter specifies the isci host object.
+ * @port: This parameter specifies the sci port with the active link.
+ *
+ */
+static void isci_port_not_ready(struct isci_host *isci_host, struct isci_port *isci_port)
+{
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: isci_port = %p\n", __func__, isci_port);
+}
+
+static void isci_port_stop_complete(struct isci_host *ihost,
+                                   struct isci_port *iport,
+                                   enum sci_status completion_status)
+{
+       dev_dbg(&ihost->pdev->dev, "Port stop complete\n");
+}
+
+/**
+ * isci_port_hard_reset_complete() - This function is called by the sci core
+ *    when the hard reset complete notification has been received.
+ * @port: This parameter specifies the sci port with the active link.
+ * @completion_status: This parameter specifies the core status for the reset
+ *    process.
+ *
+ */
+static void isci_port_hard_reset_complete(struct isci_port *isci_port,
+                                         enum sci_status completion_status)
+{
+       dev_dbg(&isci_port->isci_host->pdev->dev,
+               "%s: isci_port = %p, completion_status=%x\n",
+                    __func__, isci_port, completion_status);
+
+       /* Save the status of the hard reset from the port. */
+       isci_port->hard_reset_status = completion_status;
+
+       complete_all(&isci_port->hard_reset_complete);
+}
+
+/* This method will return a true value if the specified phy can be assigned to
+ * this port The following is a list of phys for each port that are allowed: -
+ * Port 0 - 3 2 1 0 - Port 1 -     1 - Port 2 - 3 2 - Port 3 - 3 This method
+ * doesn't preclude all configurations.  It merely ensures that a phy is part
+ * of the allowable set of phy identifiers for that port.  For example, one
+ * could assign phy 3 to port 0 and no other phys.  Please refer to
+ * sci_port_is_phy_mask_valid() for information regarding whether the
+ * phy_mask for a port can be supported. bool true if this is a valid phy
+ * assignment for the port false if this is not a valid phy assignment for the
+ * port
+ */
+bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index)
+{
+       struct isci_host *ihost = iport->owning_controller;
+       struct sci_user_parameters *user = &ihost->user_parameters;
+
+       /* Initialize to invalid value. */
+       u32 existing_phy_index = SCI_MAX_PHYS;
+       u32 index;
+
+       if ((iport->physical_port_index == 1) && (phy_index != 1))
+               return false;
+
+       if (iport->physical_port_index == 3 && phy_index != 3)
+               return false;
+
+       if (iport->physical_port_index == 2 &&
+           (phy_index == 0 || phy_index == 1))
+               return false;
+
+       for (index = 0; index < SCI_MAX_PHYS; index++)
+               if (iport->phy_table[index] && index != phy_index)
+                       existing_phy_index = index;
+
+       /* Ensure that all of the phys in the port are capable of
+        * operating at the same maximum link rate.
+        */
+       if (existing_phy_index < SCI_MAX_PHYS &&
+           user->phys[phy_index].max_speed_generation !=
+           user->phys[existing_phy_index].max_speed_generation)
+               return false;
+
+       return true;
+}
+
+/**
+ *
+ * @sci_port: This is the port object for which to determine if the phy mask
+ *    can be supported.
+ *
+ * This method will return a true value if the port's phy mask can be supported
+ * by the SCU. The following is a list of valid PHY mask configurations for
+ * each port: - Port 0 - [[3  2] 1] 0 - Port 1 -        [1] - Port 2 - [[3] 2]
+ * - Port 3 -  [3] This method returns a boolean indication specifying if the
+ * phy mask can be supported. true if this is a valid phy assignment for the
+ * port false if this is not a valid phy assignment for the port
+ */
+static bool sci_port_is_phy_mask_valid(
+       struct isci_port *iport,
+       u32 phy_mask)
+{
+       if (iport->physical_port_index == 0) {
+               if (((phy_mask & 0x0F) == 0x0F)
+                   || ((phy_mask & 0x03) == 0x03)
+                   || ((phy_mask & 0x01) == 0x01)
+                   || (phy_mask == 0))
+                       return true;
+       } else if (iport->physical_port_index == 1) {
+               if (((phy_mask & 0x02) == 0x02)
+                   || (phy_mask == 0))
+                       return true;
+       } else if (iport->physical_port_index == 2) {
+               if (((phy_mask & 0x0C) == 0x0C)
+                   || ((phy_mask & 0x04) == 0x04)
+                   || (phy_mask == 0))
+                       return true;
+       } else if (iport->physical_port_index == 3) {
+               if (((phy_mask & 0x08) == 0x08)
+                   || (phy_mask == 0))
+                       return true;
+       }
+
+       return false;
+}
+
+/*
+ * This method retrieves a currently active (i.e. connected) phy contained in
+ * the port.  Currently, the lowest order phy that is connected is returned.
+ * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is
+ * returned if there are no currently active (i.e. connected to a remote end
+ * point) phys contained in the port. All other values specify a struct sci_phy
+ * object that is active in the port.
+ */
+static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport)
+{
+       u32 index;
+       struct isci_phy *iphy;
+
+       for (index = 0; index < SCI_MAX_PHYS; index++) {
+               /* Ensure that the phy is both part of the port and currently
+                * connected to the remote end-point.
+                */
+               iphy = iport->phy_table[index];
+               if (iphy && sci_port_active_phy(iport, iphy))
+                       return iphy;
+       }
+
+       return NULL;
+}
+
+static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy)
+{
+       /* Check to see if we can add this phy to a port
+        * that means that the phy is not part of a port and that the port does
+        * not already have a phy assinged to the phy index.
+        */
+       if (!iport->phy_table[iphy->phy_index] &&
+           !phy_get_non_dummy_port(iphy) &&
+           sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
+               /* Phy is being added in the stopped state so we are in MPC mode
+                * make logical port index = physical port index
+                */
+               iport->logical_port_index = iport->physical_port_index;
+               iport->phy_table[iphy->phy_index] = iphy;
+               sci_phy_set_port(iphy, iport);
+
+               return SCI_SUCCESS;
+       }
+
+       return SCI_FAILURE;
+}
+
+static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy)
+{
+       /* Make sure that this phy is part of this port */
+       if (iport->phy_table[iphy->phy_index] == iphy &&
+           phy_get_non_dummy_port(iphy) == iport) {
+               struct isci_host *ihost = iport->owning_controller;
+
+               /* Yep it is assigned to this port so remove it */
+               sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]);
+               iport->phy_table[iphy->phy_index] = NULL;
+               return SCI_SUCCESS;
+       }
+
+       return SCI_FAILURE;
+}
+
+void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
+{
+       u32 index;
+
+       sas->high = 0;
+       sas->low  = 0;
+       for (index = 0; index < SCI_MAX_PHYS; index++)
+               if (iport->phy_table[index])
+                       sci_phy_get_sas_address(iport->phy_table[index], sas);
+}
+
+void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
+{
+       struct isci_phy *iphy;
+
+       /*
+        * Ensure that the phy is both part of the port and currently
+        * connected to the remote end-point.
+        */
+       iphy = sci_port_get_a_connected_phy(iport);
+       if (iphy) {
+               if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) {
+                       sci_phy_get_attached_sas_address(iphy, sas);
+               } else {
+                       sci_phy_get_sas_address(iphy, sas);
+                       sas->low += iphy->phy_index;
+               }
+       } else {
+               sas->high = 0;
+               sas->low  = 0;
+       }
+}
+
+/**
+ * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround
+ *
+ * @sci_port: logical port on which we need to create the remote node context
+ * @rni: remote node index for this remote node context.
+ *
+ * This routine will construct a dummy remote node context data structure
+ * This structure will be posted to the hardware to work around a scheduler
+ * error in the hardware.
+ */
+static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni)
+{
+       union scu_remote_node_context *rnc;
+
+       rnc = &iport->owning_controller->remote_node_context_table[rni];
+
+       memset(rnc, 0, sizeof(union scu_remote_node_context));
+
+       rnc->ssp.remote_sas_address_hi = 0;
+       rnc->ssp.remote_sas_address_lo = 0;
+
+       rnc->ssp.remote_node_index = rni;
+       rnc->ssp.remote_node_port_width = 1;
+       rnc->ssp.logical_port_index = iport->physical_port_index;
+
+       rnc->ssp.nexus_loss_timer_enable = false;
+       rnc->ssp.check_bit = false;
+       rnc->ssp.is_valid = true;
+       rnc->ssp.is_remote_node_context = true;
+       rnc->ssp.function_number = 0;
+       rnc->ssp.arbitration_wait_time = 0;
+}
+
+/*
+ * construct a dummy task context data structure.  This
+ * structure will be posted to the hardwre to work around a scheduler error
+ * in the hardware.
+ */
+static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag)
+{
+       struct isci_host *ihost = iport->owning_controller;
+       struct scu_task_context *task_context;
+
+       task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
+       memset(task_context, 0, sizeof(struct scu_task_context));
+
+       task_context->initiator_request = 1;
+       task_context->connection_rate = 1;
+       task_context->logical_port_index = iport->physical_port_index;
+       task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
+       task_context->task_index = ISCI_TAG_TCI(tag);
+       task_context->valid = SCU_TASK_CONTEXT_VALID;
+       task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+       task_context->remote_node_index = iport->reserved_rni;
+       task_context->do_not_dma_ssp_good_response = 1;
+       task_context->task_phase = 0x01;
+}
+
+static void sci_port_destroy_dummy_resources(struct isci_port *iport)
+{
+       struct isci_host *ihost = iport->owning_controller;
+
+       if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG)
+               isci_free_tag(ihost, iport->reserved_tag);
+
+       if (iport->reserved_rni != SCU_DUMMY_INDEX)
+               sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes,
+                                                                    1, iport->reserved_rni);
+
+       iport->reserved_rni = SCU_DUMMY_INDEX;
+       iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
+}
+
+void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
+{
+       u8 index;
+
+       for (index = 0; index < SCI_MAX_PHYS; index++) {
+               if (iport->active_phy_mask & (1 << index))
+                       sci_phy_setup_transport(iport->phy_table[index], device_id);
+       }
+}
+
+static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy,
+                                 bool do_notify_user)
+{
+       struct isci_host *ihost = iport->owning_controller;
+
+       if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA)
+               sci_phy_resume(iphy);
+
+       iport->active_phy_mask |= 1 << iphy->phy_index;
+
+       sci_controller_clear_invalid_phy(ihost, iphy);
+
+       if (do_notify_user == true)
+               isci_port_link_up(ihost, iport, iphy);
+}
+
+void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
+                            bool do_notify_user)
+{
+       struct isci_host *ihost = iport->owning_controller;
+
+       iport->active_phy_mask &= ~(1 << iphy->phy_index);
+
+       iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
+
+       /* Re-assign the phy back to the LP as if it were a narrow port */
+       writel(iphy->phy_index,
+               &iport->port_pe_configuration_register[iphy->phy_index]);
+
+       if (do_notify_user == true)
+               isci_port_link_down(ihost, iphy, iport);
+}
+
+static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy)
+{
+       struct isci_host *ihost = iport->owning_controller;
+
+       /*
+        * Check to see if we have alreay reported this link as bad and if
+        * not go ahead and tell the SCI_USER that we have discovered an
+        * invalid link.
+        */
+       if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) {
+               ihost->invalid_phy_mask |= 1 << iphy->phy_index;
+               dev_warn(&ihost->pdev->dev, "Invalid link up!\n");
+       }
+}
+
+static bool is_port_ready_state(enum sci_port_states state)
+{
+       switch (state) {
+       case SCI_PORT_READY:
+       case SCI_PORT_SUB_WAITING:
+       case SCI_PORT_SUB_OPERATIONAL:
+       case SCI_PORT_SUB_CONFIGURING:
+               return true;
+       default:
+               return false;
+       }
+}
+
+/* flag dummy rnc hanling when exiting a ready state */
+static void port_state_machine_change(struct isci_port *iport,
+                                     enum sci_port_states state)
+{
+       struct sci_base_state_machine *sm = &iport->sm;
+       enum sci_port_states old_state = sm->current_state_id;
+
+       if (is_port_ready_state(old_state) && !is_port_ready_state(state))
+               iport->ready_exit = true;
+
+       sci_change_state(sm, state);
+       iport->ready_exit = false;
+}
+
+/**
+ * sci_port_general_link_up_handler - phy can be assigned to port?
+ * @sci_port: sci_port object for which has a phy that has gone link up.
+ * @sci_phy: This is the struct isci_phy object that has gone link up.
+ * @do_notify_user: This parameter specifies whether to inform the user (via
+ *    sci_port_link_up()) as to the fact that a new phy as become ready.
+ *
+ * Determine if this phy can be assigned to this
+ * port . If the phy is not a valid PHY for
+ * this port then the function will notify the user. A PHY can only be
+ * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in
+ * the same port. none
+ */
+static void sci_port_general_link_up_handler(struct isci_port *iport,
+                                                 struct isci_phy *iphy,
+                                                 bool do_notify_user)
+{
+       struct sci_sas_address port_sas_address;
+       struct sci_sas_address phy_sas_address;
+
+       sci_port_get_attached_sas_address(iport, &port_sas_address);
+       sci_phy_get_attached_sas_address(iphy, &phy_sas_address);
+
+       /* If the SAS address of the new phy matches the SAS address of
+        * other phys in the port OR this is the first phy in the port,
+        * then activate the phy and allow it to be used for operations
+        * in this port.
+        */
+       if ((phy_sas_address.high == port_sas_address.high &&
+            phy_sas_address.low  == port_sas_address.low) ||
+           iport->active_phy_mask == 0) {
+               struct sci_base_state_machine *sm = &iport->sm;
+
+               sci_port_activate_phy(iport, iphy, do_notify_user);
+               if (sm->current_state_id == SCI_PORT_RESETTING)
+                       port_state_machine_change(iport, SCI_PORT_READY);
+       } else
+               sci_port_invalid_link_up(iport, iphy);
+}
+
+
+
+/**
+ * This method returns false if the port only has a single phy object assigned.
+ *     If there are no phys or more than one phy then the method will return
+ *    true.
+ * @sci_port: The port for which the wide port condition is to be checked.
+ *
+ * bool true Is returned if this is a wide ported port. false Is returned if
+ * this is a narrow port.
+ */
+static bool sci_port_is_wide(struct isci_port *iport)
+{
+       u32 index;
+       u32 phy_count = 0;
+
+       for (index = 0; index < SCI_MAX_PHYS; index++) {
+               if (iport->phy_table[index] != NULL) {
+                       phy_count++;
+               }
+       }
+
+       return phy_count != 1;
+}
+
+/**
+ * This method is called by the PHY object when the link is detected. if the
+ *    port wants the PHY to continue on to the link up state then the port
+ *    layer must return true.  If the port object returns false the phy object
+ *    must halt its attempt to go link up.
+ * @sci_port: The port associated with the phy object.
+ * @sci_phy: The phy object that is trying to go link up.
+ *
+ * true if the phy object can continue to the link up condition. true Is
+ * returned if this phy can continue to the ready state. false Is returned if
+ * can not continue on to the ready state. This notification is in place for
+ * wide ports and direct attached phys.  Since there are no wide ported SATA
+ * devices this could become an invalid port configuration.
+ */
+bool sci_port_link_detected(
+       struct isci_port *iport,
+       struct isci_phy *iphy)
+{
+       if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
+           (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) &&
+           sci_port_is_wide(iport)) {
+               sci_port_invalid_link_up(iport, iphy);
+
+               return false;
+       }
+
+       return true;
+}
+
+static void port_timeout(unsigned long data)
+{
+       struct sci_timer *tmr = (struct sci_timer *)data;
+       struct isci_port *iport = container_of(tmr, typeof(*iport), timer);
+       struct isci_host *ihost = iport->owning_controller;
+       unsigned long flags;
+       u32 current_state;
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       if (tmr->cancel)
+               goto done;
+
+       current_state = iport->sm.current_state_id;
+
+       if (current_state == SCI_PORT_RESETTING) {
+               /* if the port is still in the resetting state then the timeout
+                * fired before the reset completed.
+                */
+               port_state_machine_change(iport, SCI_PORT_FAILED);
+       } else if (current_state == SCI_PORT_STOPPED) {
+               /* if the port is stopped then the start request failed In this
+                * case stay in the stopped state.
+                */
+               dev_err(sciport_to_dev(iport),
+                       "%s: SCIC Port 0x%p failed to stop before tiemout.\n",
+                       __func__,
+                       iport);
+       } else if (current_state == SCI_PORT_STOPPING) {
+               /* if the port is still stopping then the stop has not completed */
+               isci_port_stop_complete(iport->owning_controller,
+                                       iport,
+                                       SCI_FAILURE_TIMEOUT);
+       } else {
+               /* The port is in the ready state and we have a timer
+                * reporting a timeout this should not happen.
+                */
+               dev_err(sciport_to_dev(iport),
+                       "%s: SCIC Port 0x%p is processing a timeout operation "
+                       "in state %d.\n", __func__, iport, current_state);
+       }
+
+done:
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/* --------------------------------------------------------------------------- */
+
+/**
+ * This function updates the hardwares VIIT entry for this port.
+ *
+ *
+ */
+static void sci_port_update_viit_entry(struct isci_port *iport)
+{
+       struct sci_sas_address sas_address;
+
+       sci_port_get_sas_address(iport, &sas_address);
+
+       writel(sas_address.high,
+               &iport->viit_registers->initiator_sas_address_hi);
+       writel(sas_address.low,
+               &iport->viit_registers->initiator_sas_address_lo);
+
+       /* This value get cleared just in case its not already cleared */
+       writel(0, &iport->viit_registers->reserved);
+
+       /* We are required to update the status register last */
+       writel(SCU_VIIT_ENTRY_ID_VIIT |
+              SCU_VIIT_IPPT_INITIATOR |
+              ((1 << iport->physical_port_index) << SCU_VIIT_ENTRY_LPVIE_SHIFT) |
+              SCU_VIIT_STATUS_ALL_VALID,
+              &iport->viit_registers->status);
+}
+
+enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport)
+{
+       u16 index;
+       struct isci_phy *iphy;
+       enum sas_linkrate max_allowed_speed = SAS_LINK_RATE_6_0_GBPS;
+
+       /*
+        * Loop through all of the phys in this port and find the phy with the
+        * lowest maximum link rate. */
+       for (index = 0; index < SCI_MAX_PHYS; index++) {
+               iphy = iport->phy_table[index];
+               if (iphy && sci_port_active_phy(iport, iphy) &&
+                   iphy->max_negotiated_speed < max_allowed_speed)
+                       max_allowed_speed = iphy->max_negotiated_speed;
+       }
+
+       return max_allowed_speed;
+}
+
+static void sci_port_suspend_port_task_scheduler(struct isci_port *iport)
+{
+       u32 pts_control_value;
+
+       pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+       pts_control_value |= SCU_PTSxCR_GEN_BIT(SUSPEND);
+       writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+/**
+ * sci_port_post_dummy_request() - post dummy/workaround request
+ * @sci_port: port to post task
+ *
+ * Prevent the hardware scheduler from posting new requests to the front
+ * of the scheduler queue causing a starvation problem for currently
+ * ongoing requests.
+ *
+ */
+static void sci_port_post_dummy_request(struct isci_port *iport)
+{
+       struct isci_host *ihost = iport->owning_controller;
+       u16 tag = iport->reserved_tag;
+       struct scu_task_context *tc;
+       u32 command;
+
+       tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
+       tc->abort = 0;
+
+       command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+                 iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
+                 ISCI_TAG_TCI(tag);
+
+       sci_controller_post_request(ihost, command);
+}
+
+/**
+ * This routine will abort the dummy request.  This will alow the hardware to
+ * power down parts of the silicon to save power.
+ *
+ * @sci_port: The port on which the task must be aborted.
+ *
+ */
+static void sci_port_abort_dummy_request(struct isci_port *iport)
+{
+       struct isci_host *ihost = iport->owning_controller;
+       u16 tag = iport->reserved_tag;
+       struct scu_task_context *tc;
+       u32 command;
+
+       tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
+       tc->abort = 1;
+
+       command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT |
+                 iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
+                 ISCI_TAG_TCI(tag);
+
+       sci_controller_post_request(ihost, command);
+}
+
+/**
+ *
+ * @sci_port: This is the struct isci_port object to resume.
+ *
+ * This method will resume the port task scheduler for this port object. none
+ */
+static void
+sci_port_resume_port_task_scheduler(struct isci_port *iport)
+{
+       u32 pts_control_value;
+
+       pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+       pts_control_value &= ~SCU_PTSxCR_GEN_BIT(SUSPEND);
+       writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+       sci_port_suspend_port_task_scheduler(iport);
+
+       iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS;
+
+       if (iport->active_phy_mask != 0) {
+               /* At least one of the phys on the port is ready */
+               port_state_machine_change(iport,
+                                         SCI_PORT_SUB_OPERATIONAL);
+       }
+}
+
+static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
+{
+       u32 index;
+       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+       struct isci_host *ihost = iport->owning_controller;
+
+       isci_port_ready(ihost, iport);
+
+       for (index = 0; index < SCI_MAX_PHYS; index++) {
+               if (iport->phy_table[index]) {
+                       writel(iport->physical_port_index,
+                               &iport->port_pe_configuration_register[
+                                       iport->phy_table[index]->phy_index]);
+               }
+       }
+
+       sci_port_update_viit_entry(iport);
+
+       sci_port_resume_port_task_scheduler(iport);
+
+       /*
+        * Post the dummy task for the port so the hardware can schedule
+        * io correctly
+        */
+       sci_port_post_dummy_request(iport);
+}
+
+static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport)
+{
+       struct isci_host *ihost = iport->owning_controller;
+       u8 phys_index = iport->physical_port_index;
+       union scu_remote_node_context *rnc;
+       u16 rni = iport->reserved_rni;
+       u32 command;
+
+       rnc = &ihost->remote_node_context_table[rni];
+
+       rnc->ssp.is_valid = false;
+
+       /* ensure the preceding tc abort request has reached the
+        * controller and give it ample time to act before posting the rnc
+        * invalidate
+        */
+       readl(&ihost->smu_registers->interrupt_status); /* flush */
+       udelay(10);
+
+       command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE |
+                 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
+
+       sci_controller_post_request(ihost, command);
+}
+
+/**
+ *
+ * @object: This is the object which is cast to a struct isci_port object.
+ *
+ * This method will perform the actions required by the struct isci_port on
+ * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports
+ * the port not ready and suspends the port task scheduler. none
+ */
+static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+       struct isci_host *ihost = iport->owning_controller;
+
+       /*
+        * Kill the dummy task for this port if it has not yet posted
+        * the hardware will treat this as a NOP and just return abort
+        * complete.
+        */
+       sci_port_abort_dummy_request(iport);
+
+       isci_port_not_ready(ihost, iport);
+
+       if (iport->ready_exit)
+               sci_port_invalidate_dummy_remote_node(iport);
+}
+
+static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+       struct isci_host *ihost = iport->owning_controller;
+
+       if (iport->active_phy_mask == 0) {
+               isci_port_not_ready(ihost, iport);
+
+               port_state_machine_change(iport,
+                                         SCI_PORT_SUB_WAITING);
+       } else if (iport->started_request_count == 0)
+               port_state_machine_change(iport,
+                                         SCI_PORT_SUB_OPERATIONAL);
+}
+
+static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+       sci_port_suspend_port_task_scheduler(iport);
+       if (iport->ready_exit)
+               sci_port_invalidate_dummy_remote_node(iport);
+}
+
+enum sci_status sci_port_start(struct isci_port *iport)
+{
+       struct isci_host *ihost = iport->owning_controller;
+       enum sci_status status = SCI_SUCCESS;
+       enum sci_port_states state;
+       u32 phy_mask;
+
+       state = iport->sm.current_state_id;
+       if (state != SCI_PORT_STOPPED) {
+               dev_warn(sciport_to_dev(iport),
+                        "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       if (iport->assigned_device_count > 0) {
+               /* TODO This is a start failure operation because
+                * there are still devices assigned to this port.
+                * There must be no devices assigned to a port on a
+                * start operation.
+                */
+               return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+       }
+
+       if (iport->reserved_rni == SCU_DUMMY_INDEX) {
+               u16 rni = sci_remote_node_table_allocate_remote_node(
+                               &ihost->available_remote_nodes, 1);
+
+               if (rni != SCU_DUMMY_INDEX)
+                       sci_port_construct_dummy_rnc(iport, rni);
+               else
+                       status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
+               iport->reserved_rni = rni;
+       }
+
+       if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
+               u16 tag;
+
+               tag = isci_alloc_tag(ihost);
+               if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
+                       status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
+               else
+                       sci_port_construct_dummy_task(iport, tag);
+               iport->reserved_tag = tag;
+       }
+
+       if (status == SCI_SUCCESS) {
+               phy_mask = sci_port_get_phys(iport);
+
+               /*
+                * There are one or more phys assigned to this port.  Make sure
+                * the port's phy mask is in fact legal and supported by the
+                * silicon.
+                */
+               if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) {
+                       port_state_machine_change(iport,
+                                                 SCI_PORT_READY);
+
+                       return SCI_SUCCESS;
+               }
+               status = SCI_FAILURE;
+       }
+
+       if (status != SCI_SUCCESS)
+               sci_port_destroy_dummy_resources(iport);
+
+       return status;
+}
+
+enum sci_status sci_port_stop(struct isci_port *iport)
+{
+       enum sci_port_states state;
+
+       state = iport->sm.current_state_id;
+       switch (state) {
+       case SCI_PORT_STOPPED:
+               return SCI_SUCCESS;
+       case SCI_PORT_SUB_WAITING:
+       case SCI_PORT_SUB_OPERATIONAL:
+       case SCI_PORT_SUB_CONFIGURING:
+       case SCI_PORT_RESETTING:
+               port_state_machine_change(iport,
+                                         SCI_PORT_STOPPING);
+               return SCI_SUCCESS;
+       default:
+               dev_warn(sciport_to_dev(iport),
+                        "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
+{
+       enum sci_status status = SCI_FAILURE_INVALID_PHY;
+       struct isci_phy *iphy = NULL;
+       enum sci_port_states state;
+       u32 phy_index;
+
+       state = iport->sm.current_state_id;
+       if (state != SCI_PORT_SUB_OPERATIONAL) {
+               dev_warn(sciport_to_dev(iport),
+                        "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       /* Select a phy on which we can send the hard reset request. */
+       for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) {
+               iphy = iport->phy_table[phy_index];
+               if (iphy && !sci_port_active_phy(iport, iphy)) {
+                       /*
+                        * We found a phy but it is not ready select
+                        * different phy
+                        */
+                       iphy = NULL;
+               }
+       }
+
+       /* If we have a phy then go ahead and start the reset procedure */
+       if (!iphy)
+               return status;
+       status = sci_phy_reset(iphy);
+
+       if (status != SCI_SUCCESS)
+               return status;
+
+       sci_mod_timer(&iport->timer, timeout);
+       iport->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED;
+
+       port_state_machine_change(iport, SCI_PORT_RESETTING);
+       return SCI_SUCCESS;
+}
+
+/**
+ * sci_port_add_phy() -
+ * @sci_port: This parameter specifies the port in which the phy will be added.
+ * @sci_phy: This parameter is the phy which is to be added to the port.
+ *
+ * This method will add a PHY to the selected port. This method returns an
+ * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other
+ * status is a failure to add the phy to the port.
+ */
+enum sci_status sci_port_add_phy(struct isci_port *iport,
+                                     struct isci_phy *iphy)
+{
+       enum sci_status status;
+       enum sci_port_states state;
+
+       state = iport->sm.current_state_id;
+       switch (state) {
+       case SCI_PORT_STOPPED: {
+               struct sci_sas_address port_sas_address;
+
+               /* Read the port assigned SAS Address if there is one */
+               sci_port_get_sas_address(iport, &port_sas_address);
+
+               if (port_sas_address.high != 0 && port_sas_address.low != 0) {
+                       struct sci_sas_address phy_sas_address;
+
+                       /* Make sure that the PHY SAS Address matches the SAS Address
+                        * for this port
+                        */
+                       sci_phy_get_sas_address(iphy, &phy_sas_address);
+
+                       if (port_sas_address.high != phy_sas_address.high ||
+                           port_sas_address.low  != phy_sas_address.low)
+                               return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+               }
+               return sci_port_set_phy(iport, iphy);
+       }
+       case SCI_PORT_SUB_WAITING:
+       case SCI_PORT_SUB_OPERATIONAL:
+               status = sci_port_set_phy(iport, iphy);
+
+               if (status != SCI_SUCCESS)
+                       return status;
+
+               sci_port_general_link_up_handler(iport, iphy, true);
+               iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
+               port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
+
+               return status;
+       case SCI_PORT_SUB_CONFIGURING:
+               status = sci_port_set_phy(iport, iphy);
+
+               if (status != SCI_SUCCESS)
+                       return status;
+               sci_port_general_link_up_handler(iport, iphy, true);
+
+               /* Re-enter the configuring state since this may be the last phy in
+                * the port.
+                */
+               port_state_machine_change(iport,
+                                         SCI_PORT_SUB_CONFIGURING);
+               return SCI_SUCCESS;
+       default:
+               dev_warn(sciport_to_dev(iport),
+                        "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+/**
+ * sci_port_remove_phy() -
+ * @sci_port: This parameter specifies the port in which the phy will be added.
+ * @sci_phy: This parameter is the phy which is to be added to the port.
+ *
+ * This method will remove the PHY from the selected PORT. This method returns
+ * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any
+ * other status is a failure to add the phy to the port.
+ */
+enum sci_status sci_port_remove_phy(struct isci_port *iport,
+                                        struct isci_phy *iphy)
+{
+       enum sci_status status;
+       enum sci_port_states state;
+
+       state = iport->sm.current_state_id;
+
+       switch (state) {
+       case SCI_PORT_STOPPED:
+               return sci_port_clear_phy(iport, iphy);
+       case SCI_PORT_SUB_OPERATIONAL:
+               status = sci_port_clear_phy(iport, iphy);
+               if (status != SCI_SUCCESS)
+                       return status;
+
+               sci_port_deactivate_phy(iport, iphy, true);
+               iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
+               port_state_machine_change(iport,
+                                         SCI_PORT_SUB_CONFIGURING);
+               return SCI_SUCCESS;
+       case SCI_PORT_SUB_CONFIGURING:
+               status = sci_port_clear_phy(iport, iphy);
+
+               if (status != SCI_SUCCESS)
+                       return status;
+               sci_port_deactivate_phy(iport, iphy, true);
+
+               /* Re-enter the configuring state since this may be the last phy in
+                * the port
+                */
+               port_state_machine_change(iport,
+                                         SCI_PORT_SUB_CONFIGURING);
+               return SCI_SUCCESS;
+       default:
+               dev_warn(sciport_to_dev(iport),
+                        "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+enum sci_status sci_port_link_up(struct isci_port *iport,
+                                     struct isci_phy *iphy)
+{
+       enum sci_port_states state;
+
+       state = iport->sm.current_state_id;
+       switch (state) {
+       case SCI_PORT_SUB_WAITING:
+               /* Since this is the first phy going link up for the port we
+                * can just enable it and continue
+                */
+               sci_port_activate_phy(iport, iphy, true);
+
+               port_state_machine_change(iport,
+                                         SCI_PORT_SUB_OPERATIONAL);
+               return SCI_SUCCESS;
+       case SCI_PORT_SUB_OPERATIONAL:
+               sci_port_general_link_up_handler(iport, iphy, true);
+               return SCI_SUCCESS;
+       case SCI_PORT_RESETTING:
+               /* TODO We should  make  sure  that  the phy  that  has gone
+                * link up is the same one on which we sent the reset.  It is
+                * possible that the phy on which we sent  the reset is not the
+                * one that has  gone  link up  and we  want to make sure that
+                * phy being reset  comes  back.  Consider the case where a
+                * reset is sent but before the hardware processes the reset it
+                * get a link up on  the  port because of a hot plug event.
+                * because  of  the reset request this phy will go link down
+                * almost immediately.
+                */
+
+               /* In the resetting state we don't notify the user regarding
+                * link up and link down notifications.
+                */
+               sci_port_general_link_up_handler(iport, iphy, false);
+               return SCI_SUCCESS;
+       default:
+               dev_warn(sciport_to_dev(iport),
+                        "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+enum sci_status sci_port_link_down(struct isci_port *iport,
+                                       struct isci_phy *iphy)
+{
+       enum sci_port_states state;
+
+       state = iport->sm.current_state_id;
+       switch (state) {
+       case SCI_PORT_SUB_OPERATIONAL:
+               sci_port_deactivate_phy(iport, iphy, true);
+
+               /* If there are no active phys left in the port, then
+                * transition the port to the WAITING state until such time
+                * as a phy goes link up
+                */
+               if (iport->active_phy_mask == 0)
+                       port_state_machine_change(iport,
+                                                 SCI_PORT_SUB_WAITING);
+               return SCI_SUCCESS;
+       case SCI_PORT_RESETTING:
+               /* In the resetting state we don't notify the user regarding
+                * link up and link down notifications. */
+               sci_port_deactivate_phy(iport, iphy, false);
+               return SCI_SUCCESS;
+       default:
+               dev_warn(sciport_to_dev(iport),
+                        "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+enum sci_status sci_port_start_io(struct isci_port *iport,
+                                 struct isci_remote_device *idev,
+                                 struct isci_request *ireq)
+{
+       enum sci_port_states state;
+
+       state = iport->sm.current_state_id;
+       switch (state) {
+       case SCI_PORT_SUB_WAITING:
+               return SCI_FAILURE_INVALID_STATE;
+       case SCI_PORT_SUB_OPERATIONAL:
+               iport->started_request_count++;
+               return SCI_SUCCESS;
+       default:
+               dev_warn(sciport_to_dev(iport),
+                        "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+enum sci_status sci_port_complete_io(struct isci_port *iport,
+                                    struct isci_remote_device *idev,
+                                    struct isci_request *ireq)
+{
+       enum sci_port_states state;
+
+       state = iport->sm.current_state_id;
+       switch (state) {
+       case SCI_PORT_STOPPED:
+               dev_warn(sciport_to_dev(iport),
+                        "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       case SCI_PORT_STOPPING:
+               sci_port_decrement_request_count(iport);
+
+               if (iport->started_request_count == 0)
+                       port_state_machine_change(iport,
+                                                 SCI_PORT_STOPPED);
+               break;
+       case SCI_PORT_READY:
+       case SCI_PORT_RESETTING:
+       case SCI_PORT_FAILED:
+       case SCI_PORT_SUB_WAITING:
+       case SCI_PORT_SUB_OPERATIONAL:
+               sci_port_decrement_request_count(iport);
+               break;
+       case SCI_PORT_SUB_CONFIGURING:
+               sci_port_decrement_request_count(iport);
+               if (iport->started_request_count == 0) {
+                       port_state_machine_change(iport,
+                                                 SCI_PORT_SUB_OPERATIONAL);
+               }
+               break;
+       }
+       return SCI_SUCCESS;
+}
+
+static void sci_port_enable_port_task_scheduler(struct isci_port *iport)
+{
+       u32 pts_control_value;
+
+        /* enable the port task scheduler in a suspended state */
+       pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+       pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND);
+       writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+static void sci_port_disable_port_task_scheduler(struct isci_port *iport)
+{
+       u32 pts_control_value;
+
+       pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+       pts_control_value &=
+               ~(SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND));
+       writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+static void sci_port_post_dummy_remote_node(struct isci_port *iport)
+{
+       struct isci_host *ihost = iport->owning_controller;
+       u8 phys_index = iport->physical_port_index;
+       union scu_remote_node_context *rnc;
+       u16 rni = iport->reserved_rni;
+       u32 command;
+
+       rnc = &ihost->remote_node_context_table[rni];
+       rnc->ssp.is_valid = true;
+
+       command = SCU_CONTEXT_COMMAND_POST_RNC_32 |
+                 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
+
+       sci_controller_post_request(ihost, command);
+
+       /* ensure hardware has seen the post rnc command and give it
+        * ample time to act before sending the suspend
+        */
+       readl(&ihost->smu_registers->interrupt_status); /* flush */
+       udelay(10);
+
+       command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX |
+                 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
+
+       sci_controller_post_request(ihost, command);
+}
+
+static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+       if (iport->sm.previous_state_id == SCI_PORT_STOPPING) {
+               /*
+                * If we enter this state becasuse of a request to stop
+                * the port then we want to disable the hardwares port
+                * task scheduler. */
+               sci_port_disable_port_task_scheduler(iport);
+       }
+}
+
+static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+       /* Enable and suspend the port task scheduler */
+       sci_port_enable_port_task_scheduler(iport);
+}
+
+static void sci_port_ready_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+       struct isci_host *ihost = iport->owning_controller;
+       u32 prev_state;
+
+       prev_state = iport->sm.previous_state_id;
+       if (prev_state  == SCI_PORT_RESETTING)
+               isci_port_hard_reset_complete(iport, SCI_SUCCESS);
+       else
+               isci_port_not_ready(ihost, iport);
+
+       /* Post and suspend the dummy remote node context for this port. */
+       sci_port_post_dummy_remote_node(iport);
+
+       /* Start the ready substate machine */
+       port_state_machine_change(iport,
+                                 SCI_PORT_SUB_WAITING);
+}
+
+static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+       sci_del_timer(&iport->timer);
+}
+
+static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+       sci_del_timer(&iport->timer);
+
+       sci_port_destroy_dummy_resources(iport);
+}
+
+static void sci_port_failed_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+       isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
+}
+
+/* --------------------------------------------------------------------------- */
+
+static const struct sci_base_state sci_port_state_table[] = {
+       [SCI_PORT_STOPPED] = {
+               .enter_state = sci_port_stopped_state_enter,
+               .exit_state  = sci_port_stopped_state_exit
+       },
+       [SCI_PORT_STOPPING] = {
+               .exit_state  = sci_port_stopping_state_exit
+       },
+       [SCI_PORT_READY] = {
+               .enter_state = sci_port_ready_state_enter,
+       },
+       [SCI_PORT_SUB_WAITING] = {
+               .enter_state = sci_port_ready_substate_waiting_enter,
+       },
+       [SCI_PORT_SUB_OPERATIONAL] = {
+               .enter_state = sci_port_ready_substate_operational_enter,
+               .exit_state  = sci_port_ready_substate_operational_exit
+       },
+       [SCI_PORT_SUB_CONFIGURING] = {
+               .enter_state = sci_port_ready_substate_configuring_enter,
+               .exit_state  = sci_port_ready_substate_configuring_exit
+       },
+       [SCI_PORT_RESETTING] = {
+               .exit_state  = sci_port_resetting_state_exit
+       },
+       [SCI_PORT_FAILED] = {
+               .enter_state = sci_port_failed_state_enter,
+       }
+};
+
+void sci_port_construct(struct isci_port *iport, u8 index,
+                            struct isci_host *ihost)
+{
+       sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED);
+
+       iport->logical_port_index  = SCIC_SDS_DUMMY_PORT;
+       iport->physical_port_index = index;
+       iport->active_phy_mask     = 0;
+       iport->ready_exit             = false;
+
+       iport->owning_controller = ihost;
+
+       iport->started_request_count = 0;
+       iport->assigned_device_count = 0;
+
+       iport->reserved_rni = SCU_DUMMY_INDEX;
+       iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
+
+       sci_init_timer(&iport->timer, port_timeout);
+
+       iport->port_task_scheduler_registers = NULL;
+
+       for (index = 0; index < SCI_MAX_PHYS; index++)
+               iport->phy_table[index] = NULL;
+}
+
+void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index)
+{
+       INIT_LIST_HEAD(&iport->remote_dev_list);
+       INIT_LIST_HEAD(&iport->domain_dev_list);
+       spin_lock_init(&iport->state_lock);
+       init_completion(&iport->start_complete);
+       iport->isci_host = ihost;
+       isci_port_change_state(iport, isci_freed);
+       atomic_set(&iport->event, 0);
+}
+
+/**
+ * isci_port_get_state() - This function gets the status of the port object.
+ * @isci_port: This parameter points to the isci_port object
+ *
+ * status of the object as a isci_status enum.
+ */
+enum isci_status isci_port_get_state(
+       struct isci_port *isci_port)
+{
+       return isci_port->status;
+}
+
+void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
+{
+       struct isci_host *ihost = iport->owning_controller;
+
+       /* notify the user. */
+       isci_port_bc_change_received(ihost, iport, iphy);
+}
+
+int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
+                                struct isci_phy *iphy)
+{
+       unsigned long flags;
+       enum sci_status status;
+       int idx, ret = TMF_RESP_FUNC_COMPLETE;
+
+       dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
+               __func__, iport);
+
+       init_completion(&iport->hard_reset_complete);
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT
+       status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
+
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       if (status == SCI_SUCCESS) {
+               wait_for_completion(&iport->hard_reset_complete);
+
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: iport = %p; hard reset completion\n",
+                       __func__, iport);
+
+               if (iport->hard_reset_status != SCI_SUCCESS)
+                       ret = TMF_RESP_FUNC_FAILED;
+       } else {
+               ret = TMF_RESP_FUNC_FAILED;
+
+               dev_err(&ihost->pdev->dev,
+                       "%s: iport = %p; sci_port_hard_reset call"
+                       " failed 0x%x\n",
+                       __func__, iport, status);
+
+       }
+
+       /* If the hard reset for the port has failed, consider this
+        * the same as link failures on all phys in the port.
+        */
+       if (ret != TMF_RESP_FUNC_COMPLETE) {
+
+               dev_err(&ihost->pdev->dev,
+                       "%s: iport = %p; hard reset failed "
+                       "(0x%x) - driving explicit link fail for all phys\n",
+                       __func__, iport, iport->hard_reset_status);
+
+               /* Down all phys in the port. */
+               spin_lock_irqsave(&ihost->scic_lock, flags);
+               for (idx = 0; idx < SCI_MAX_PHYS; ++idx) {
+                       struct isci_phy *iphy = iport->phy_table[idx];
+
+                       if (!iphy)
+                               continue;
+                       sci_phy_stop(iphy);
+                       sci_phy_start(iphy);
+               }
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
+       }
+       return ret;
+}
+
+/**
+ * isci_port_deformed() - This function is called by libsas when a port becomes
+ *    inactive.
+ * @phy: This parameter specifies the libsas phy with the inactive port.
+ *
+ */
+void isci_port_deformed(struct asd_sas_phy *phy)
+{
+       pr_debug("%s: sas_phy = %p\n", __func__, phy);
+}
+
+/**
+ * isci_port_formed() - This function is called by libsas when a port becomes
+ *    active.
+ * @phy: This parameter specifies the libsas phy with the active port.
+ *
+ */
+void isci_port_formed(struct asd_sas_phy *phy)
+{
+       pr_debug("%s: sas_phy = %p, sas_port = %p\n", __func__, phy, phy->port);
+}
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
new file mode 100644 (file)
index 0000000..b50ecd4
--- /dev/null
@@ -0,0 +1,306 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ISCI_PORT_H_
+#define _ISCI_PORT_H_
+
+#include <scsi/libsas.h>
+#include "isci.h"
+#include "sas.h"
+#include "phy.h"
+
+#define SCIC_SDS_DUMMY_PORT   0xFF
+
+struct isci_phy;
+struct isci_host;
+
+enum isci_status {
+       isci_freed        = 0x00,
+       isci_starting     = 0x01,
+       isci_ready        = 0x02,
+       isci_ready_for_io = 0x03,
+       isci_stopping     = 0x04,
+       isci_stopped      = 0x05,
+};
+
+/**
+ * struct isci_port - isci direct attached sas port object
+ * @event: counts bcns and port stop events (for bcn filtering)
+ * @ready_exit: several states constitute 'ready'. When exiting ready we
+ *              need to take extra port-teardown actions that are
+ *              skipped when exiting to another 'ready' state.
+ * @logical_port_index: software port index
+ * @physical_port_index: hardware port index
+ * @active_phy_mask: identifies phy members
+ * @reserved_tag:
+ * @reserved_rni: reserver for port task scheduler workaround
+ * @started_request_count: reference count for outstanding commands
+ * @not_ready_reason: set during state transitions and notified
+ * @timer: timeout start/stop operations
+ */
+struct isci_port {
+       enum isci_status status;
+       #define IPORT_BCN_BLOCKED 0
+       #define IPORT_BCN_PENDING 1
+       unsigned long flags;
+       atomic_t event;
+       struct isci_host *isci_host;
+       struct asd_sas_port sas_port;
+       struct list_head remote_dev_list;
+       spinlock_t state_lock;
+       struct list_head domain_dev_list;
+       struct completion start_complete;
+       struct completion hard_reset_complete;
+       enum sci_status hard_reset_status;
+       struct sci_base_state_machine sm;
+       bool ready_exit;
+       u8 logical_port_index;
+       u8 physical_port_index;
+       u8 active_phy_mask;
+       u16 reserved_rni;
+       u16 reserved_tag;
+       u32 started_request_count;
+       u32 assigned_device_count;
+       u32 not_ready_reason;
+       struct isci_phy *phy_table[SCI_MAX_PHYS];
+       struct isci_host *owning_controller;
+       struct sci_timer timer;
+       struct scu_port_task_scheduler_registers __iomem *port_task_scheduler_registers;
+       /* XXX rework: only one register, no need to replicate per-port */
+       u32 __iomem *port_pe_configuration_register;
+       struct scu_viit_entry __iomem *viit_registers;
+};
+
+enum sci_port_not_ready_reason_code {
+       SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS,
+       SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED,
+       SCIC_PORT_NOT_READY_INVALID_PORT_CONFIGURATION,
+       SCIC_PORT_NOT_READY_RECONFIGURING,
+
+       SCIC_PORT_NOT_READY_REASON_CODE_MAX
+};
+
+struct sci_port_end_point_properties {
+       struct sci_sas_address sas_address;
+       struct sci_phy_proto protocols;
+};
+
+struct sci_port_properties {
+       u32 index;
+       struct sci_port_end_point_properties local;
+       struct sci_port_end_point_properties remote;
+       u32 phy_mask;
+};
+
+/**
+ * enum sci_port_states - This enumeration depicts all the states for the
+ *    common port state machine.
+ *
+ *
+ */
+enum sci_port_states {
+       /**
+        * This state indicates that the port has successfully been stopped.
+        * In this state no new IO operations are permitted.
+        * This state is entered from the STOPPING state.
+        */
+       SCI_PORT_STOPPED,
+
+       /**
+        * This state indicates that the port is in the process of stopping.
+        * In this state no new IO operations are permitted, but existing IO
+        * operations are allowed to complete.
+        * This state is entered from the READY state.
+        */
+       SCI_PORT_STOPPING,
+
+       /**
+        * This state indicates the port is now ready.  Thus, the user is
+        * able to perform IO operations on this port.
+        * This state is entered from the STARTING state.
+        */
+       SCI_PORT_READY,
+
+       /**
+        * The substate where the port is started and ready but has no
+        * active phys.
+        */
+       SCI_PORT_SUB_WAITING,
+
+       /**
+        * The substate where the port is started and ready and there is
+        * at least one phy operational.
+        */
+       SCI_PORT_SUB_OPERATIONAL,
+
+       /**
+        * The substate where the port is started and there was an
+        * add/remove phy event.  This state is only used in Automatic
+        * Port Configuration Mode (APC)
+        */
+       SCI_PORT_SUB_CONFIGURING,
+
+       /**
+        * This state indicates the port is in the process of performing a hard
+        * reset.  Thus, the user is unable to perform IO operations on this
+        * port.
+        * This state is entered from the READY state.
+        */
+       SCI_PORT_RESETTING,
+
+       /**
+        * This state indicates the port has failed a reset request.  This state
+        * is entered when a port reset request times out.
+        * This state is entered from the RESETTING state.
+        */
+       SCI_PORT_FAILED,
+
+
+};
+
+static inline void sci_port_decrement_request_count(struct isci_port *iport)
+{
+       if (WARN_ONCE(iport->started_request_count == 0,
+                      "%s: tried to decrement started_request_count past 0!?",
+                       __func__))
+               /* pass */;
+       else
+               iport->started_request_count--;
+}
+
+#define sci_port_active_phy(port, phy) \
+       (((port)->active_phy_mask & (1 << (phy)->phy_index)) != 0)
+
+void sci_port_construct(
+       struct isci_port *iport,
+       u8 port_index,
+       struct isci_host *ihost);
+
+enum sci_status sci_port_start(struct isci_port *iport);
+enum sci_status sci_port_stop(struct isci_port *iport);
+
+enum sci_status sci_port_add_phy(
+       struct isci_port *iport,
+       struct isci_phy *iphy);
+
+enum sci_status sci_port_remove_phy(
+       struct isci_port *iport,
+       struct isci_phy *iphy);
+
+void sci_port_setup_transports(
+       struct isci_port *iport,
+       u32 device_id);
+
+void isci_port_bcn_enable(struct isci_host *, struct isci_port *);
+
+void sci_port_deactivate_phy(
+       struct isci_port *iport,
+       struct isci_phy *iphy,
+       bool do_notify_user);
+
+bool sci_port_link_detected(
+       struct isci_port *iport,
+       struct isci_phy *iphy);
+
+enum sci_status sci_port_link_up(struct isci_port *iport,
+                                     struct isci_phy *iphy);
+enum sci_status sci_port_link_down(struct isci_port *iport,
+                                       struct isci_phy *iphy);
+
+struct isci_request;
+struct isci_remote_device;
+enum sci_status sci_port_start_io(
+       struct isci_port *iport,
+       struct isci_remote_device *idev,
+       struct isci_request *ireq);
+
+enum sci_status sci_port_complete_io(
+       struct isci_port *iport,
+       struct isci_remote_device *idev,
+       struct isci_request *ireq);
+
+enum sas_linkrate sci_port_get_max_allowed_speed(
+       struct isci_port *iport);
+
+void sci_port_broadcast_change_received(
+       struct isci_port *iport,
+       struct isci_phy *iphy);
+
+bool sci_port_is_valid_phy_assignment(
+       struct isci_port *iport,
+       u32 phy_index);
+
+void sci_port_get_sas_address(
+       struct isci_port *iport,
+       struct sci_sas_address *sas_address);
+
+void sci_port_get_attached_sas_address(
+       struct isci_port *iport,
+       struct sci_sas_address *sas_address);
+
+enum isci_status isci_port_get_state(
+       struct isci_port *isci_port);
+
+void isci_port_formed(struct asd_sas_phy *);
+void isci_port_deformed(struct asd_sas_phy *);
+
+void isci_port_init(
+       struct isci_port *port,
+       struct isci_host *host,
+       int index);
+
+int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
+                                struct isci_phy *iphy);
+#endif /* !defined(_ISCI_PORT_H_) */
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
new file mode 100644 (file)
index 0000000..486b113
--- /dev/null
@@ -0,0 +1,754 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "host.h"
+
+#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT    (10)
+#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT    (10)
+#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION  (100)
+
+enum SCIC_SDS_APC_ACTIVITY {
+       SCIC_SDS_APC_SKIP_PHY,
+       SCIC_SDS_APC_ADD_PHY,
+       SCIC_SDS_APC_START_TIMER,
+
+       SCIC_SDS_APC_ACTIVITY_MAX
+};
+
+/*
+ * ******************************************************************************
+ * General port configuration agent routines
+ * ****************************************************************************** */
+
+/**
+ *
+ * @address_one: A SAS Address to be compared.
+ * @address_two: A SAS Address to be compared.
+ *
+ * Compare the two SAS Address and if SAS Address One is greater than SAS
+ * Address Two then return > 0 else if SAS Address One is less than SAS Address
+ * Two return < 0 Otherwise they are the same return 0 A signed value of x > 0
+ * > y where x is returned for Address One > Address Two y is returned for
+ * Address One < Address Two 0 is returned ofr Address One = Address Two
+ */
+static s32 sci_sas_address_compare(
+       struct sci_sas_address address_one,
+       struct sci_sas_address address_two)
+{
+       if (address_one.high > address_two.high) {
+               return 1;
+       } else if (address_one.high < address_two.high) {
+               return -1;
+       } else if (address_one.low > address_two.low) {
+               return 1;
+       } else if (address_one.low < address_two.low) {
+               return -1;
+       }
+
+       /* The two SAS Address must be identical */
+       return 0;
+}
+
+/**
+ *
+ * @controller: The controller object used for the port search.
+ * @phy: The phy object to match.
+ *
+ * This routine will find a matching port for the phy.  This means that the
+ * port and phy both have the same broadcast sas address and same received sas
+ * address. The port address or the NULL if there is no matching
+ * port. port address if the port can be found to match the phy.
+ * NULL if there is no matching port for the phy.
+ */
+static struct isci_port *sci_port_configuration_agent_find_port(
+       struct isci_host *ihost,
+       struct isci_phy *iphy)
+{
+       u8 i;
+       struct sci_sas_address port_sas_address;
+       struct sci_sas_address port_attached_device_address;
+       struct sci_sas_address phy_sas_address;
+       struct sci_sas_address phy_attached_device_address;
+
+       /*
+        * Since this phy can be a member of a wide port check to see if one or
+        * more phys match the sent and received SAS address as this phy in which
+        * case it should participate in the same port.
+        */
+       sci_phy_get_sas_address(iphy, &phy_sas_address);
+       sci_phy_get_attached_sas_address(iphy, &phy_attached_device_address);
+
+       for (i = 0; i < ihost->logical_port_entries; i++) {
+               struct isci_port *iport = &ihost->ports[i];
+
+               sci_port_get_sas_address(iport, &port_sas_address);
+               sci_port_get_attached_sas_address(iport, &port_attached_device_address);
+
+               if (sci_sas_address_compare(port_sas_address, phy_sas_address) == 0 &&
+                   sci_sas_address_compare(port_attached_device_address, phy_attached_device_address) == 0)
+                       return iport;
+       }
+
+       return NULL;
+}
+
+/**
+ *
+ * @controller: This is the controller object that contains the port agent
+ * @port_agent: This is the port configruation agent for the controller.
+ *
+ * This routine will validate the port configuration is correct for the SCU
+ * hardware.  The SCU hardware allows for port configurations as follows. LP0
+ * -> (PE0), (PE0, PE1), (PE0, PE1, PE2, PE3) LP1 -> (PE1) LP2 -> (PE2), (PE2,
+ * PE3) LP3 -> (PE3) enum sci_status SCI_SUCCESS the port configuration is valid for
+ * this port configuration agent. SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION
+ * the port configuration is not valid for this port configuration agent.
+ */
+static enum sci_status sci_port_configuration_agent_validate_ports(
+       struct isci_host *ihost,
+       struct sci_port_configuration_agent *port_agent)
+{
+       struct sci_sas_address first_address;
+       struct sci_sas_address second_address;
+
+       /*
+        * Sanity check the max ranges for all the phys the max index
+        * is always equal to the port range index */
+       if (port_agent->phy_valid_port_range[0].max_index != 0 ||
+           port_agent->phy_valid_port_range[1].max_index != 1 ||
+           port_agent->phy_valid_port_range[2].max_index != 2 ||
+           port_agent->phy_valid_port_range[3].max_index != 3)
+               return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+
+       /*
+        * This is a request to configure a single x4 port or at least attempt
+        * to make all the phys into a single port */
+       if (port_agent->phy_valid_port_range[0].min_index == 0 &&
+           port_agent->phy_valid_port_range[1].min_index == 0 &&
+           port_agent->phy_valid_port_range[2].min_index == 0 &&
+           port_agent->phy_valid_port_range[3].min_index == 0)
+               return SCI_SUCCESS;
+
+       /*
+        * This is a degenerate case where phy 1 and phy 2 are assigned
+        * to the same port this is explicitly disallowed by the hardware
+        * unless they are part of the same x4 port and this condition was
+        * already checked above. */
+       if (port_agent->phy_valid_port_range[2].min_index == 1) {
+               return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+       }
+
+       /*
+        * PE0 and PE3 can never have the same SAS Address unless they
+        * are part of the same x4 wide port and we have already checked
+        * for this condition. */
+       sci_phy_get_sas_address(&ihost->phys[0], &first_address);
+       sci_phy_get_sas_address(&ihost->phys[3], &second_address);
+
+       if (sci_sas_address_compare(first_address, second_address) == 0) {
+               return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+       }
+
+       /*
+        * PE0 and PE1 are configured into a 2x1 ports make sure that the
+        * SAS Address for PE0 and PE2 are different since they can not be
+        * part of the same port. */
+       if (port_agent->phy_valid_port_range[0].min_index == 0 &&
+           port_agent->phy_valid_port_range[1].min_index == 1) {
+               sci_phy_get_sas_address(&ihost->phys[0], &first_address);
+               sci_phy_get_sas_address(&ihost->phys[2], &second_address);
+
+               if (sci_sas_address_compare(first_address, second_address) == 0) {
+                       return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+               }
+       }
+
+       /*
+        * PE2 and PE3 are configured into a 2x1 ports make sure that the
+        * SAS Address for PE1 and PE3 are different since they can not be
+        * part of the same port. */
+       if (port_agent->phy_valid_port_range[2].min_index == 2 &&
+           port_agent->phy_valid_port_range[3].min_index == 3) {
+               sci_phy_get_sas_address(&ihost->phys[1], &first_address);
+               sci_phy_get_sas_address(&ihost->phys[3], &second_address);
+
+               if (sci_sas_address_compare(first_address, second_address) == 0) {
+                       return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+               }
+       }
+
+       return SCI_SUCCESS;
+}
+
+/*
+ * ******************************************************************************
+ * Manual port configuration agent routines
+ * ****************************************************************************** */
+
+/* verify all of the phys in the same port are using the same SAS address */
+static enum sci_status
+sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
+                                             struct sci_port_configuration_agent *port_agent)
+{
+       u32 phy_mask;
+       u32 assigned_phy_mask;
+       struct sci_sas_address sas_address;
+       struct sci_sas_address phy_assigned_address;
+       u8 port_index;
+       u8 phy_index;
+
+       assigned_phy_mask = 0;
+       sas_address.high = 0;
+       sas_address.low = 0;
+
+       for (port_index = 0; port_index < SCI_MAX_PORTS; port_index++) {
+               phy_mask = ihost->oem_parameters.ports[port_index].phy_mask;
+
+               if (!phy_mask)
+                       continue;
+               /*
+                * Make sure that one or more of the phys were not already assinged to
+                * a different port. */
+               if ((phy_mask & ~assigned_phy_mask) == 0) {
+                       return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+               }
+
+               /* Find the starting phy index for this round through the loop */
+               for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) {
+                       if ((phy_mask & (1 << phy_index)) == 0)
+                               continue;
+                       sci_phy_get_sas_address(&ihost->phys[phy_index],
+                                                    &sas_address);
+
+                       /*
+                        * The phy_index can be used as the starting point for the
+                        * port range since the hardware starts all logical ports
+                        * the same as the PE index. */
+                       port_agent->phy_valid_port_range[phy_index].min_index = port_index;
+                       port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+
+                       if (phy_index != port_index) {
+                               return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+                       }
+
+                       break;
+               }
+
+               /*
+                * See how many additional phys are being added to this logical port.
+                * Note: We have not moved the current phy_index so we will actually
+                *       compare the startting phy with itself.
+                *       This is expected and required to add the phy to the port. */
+               while (phy_index < SCI_MAX_PHYS) {
+                       if ((phy_mask & (1 << phy_index)) == 0)
+                               continue;
+                       sci_phy_get_sas_address(&ihost->phys[phy_index],
+                                                    &phy_assigned_address);
+
+                       if (sci_sas_address_compare(sas_address, phy_assigned_address) != 0) {
+                               /*
+                                * The phy mask specified that this phy is part of the same port
+                                * as the starting phy and it is not so fail this configuration */
+                               return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+                       }
+
+                       port_agent->phy_valid_port_range[phy_index].min_index = port_index;
+                       port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+
+                       sci_port_add_phy(&ihost->ports[port_index],
+                                             &ihost->phys[phy_index]);
+
+                       assigned_phy_mask |= (1 << phy_index);
+               }
+
+               phy_index++;
+       }
+
+       return sci_port_configuration_agent_validate_ports(ihost, port_agent);
+}
+
+static void mpc_agent_timeout(unsigned long data)
+{
+       u8 index;
+       struct sci_timer *tmr = (struct sci_timer *)data;
+       struct sci_port_configuration_agent *port_agent;
+       struct isci_host *ihost;
+       unsigned long flags;
+       u16 configure_phy_mask;
+
+       port_agent = container_of(tmr, typeof(*port_agent), timer);
+       ihost = container_of(port_agent, typeof(*ihost), port_agent);
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       if (tmr->cancel)
+               goto done;
+
+       port_agent->timer_pending = false;
+
+       /* Find the mask of phys that are reported read but as yet unconfigured into a port */
+       configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask;
+
+       for (index = 0; index < SCI_MAX_PHYS; index++) {
+               struct isci_phy *iphy = &ihost->phys[index];
+
+               if (configure_phy_mask & (1 << index)) {
+                       port_agent->link_up_handler(ihost, port_agent,
+                                                   phy_get_non_dummy_port(iphy),
+                                                   iphy);
+               }
+       }
+
+done:
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+static void sci_mpc_agent_link_up(struct isci_host *ihost,
+                                      struct sci_port_configuration_agent *port_agent,
+                                      struct isci_port *iport,
+                                      struct isci_phy *iphy)
+{
+       /* If the port is NULL then the phy was not assigned to a port.
+        * This is because the phy was not given the same SAS Address as
+        * the other PHYs in the port.
+        */
+       if (!iport)
+               return;
+
+       port_agent->phy_ready_mask |= (1 << iphy->phy_index);
+       sci_port_link_up(iport, iphy);
+       if ((iport->active_phy_mask & (1 << iphy->phy_index)))
+               port_agent->phy_configured_mask |= (1 << iphy->phy_index);
+}
+
+/**
+ *
+ * @controller: This is the controller object that receives the link down
+ *    notification.
+ * @port: This is the port object associated with the phy.  If the is no
+ *    associated port this is an NULL.  The port is an invalid
+ *    handle only if the phy was never port of this port.  This happens when
+ *    the phy is not broadcasting the same SAS address as the other phys in the
+ *    assigned port.
+ * @phy: This is the phy object which has gone link down.
+ *
+ * This function handles the manual port configuration link down notifications.
+ * Since all ports and phys are associated at initialization time we just turn
+ * around and notifiy the port object of the link down event.  If this PHY is
+ * not associated with a port there is no action taken. Is it possible to get a
+ * link down notification from a phy that has no assocoated port?
+ */
+static void sci_mpc_agent_link_down(
+       struct isci_host *ihost,
+       struct sci_port_configuration_agent *port_agent,
+       struct isci_port *iport,
+       struct isci_phy *iphy)
+{
+       if (iport != NULL) {
+               /*
+                * If we can form a new port from the remainder of the phys
+                * then we want to start the timer to allow the SCI User to
+                * cleanup old devices and rediscover the port before
+                * rebuilding the port with the phys that remain in the ready
+                * state.
+                */
+               port_agent->phy_ready_mask &= ~(1 << iphy->phy_index);
+               port_agent->phy_configured_mask &= ~(1 << iphy->phy_index);
+
+               /*
+                * Check to see if there are more phys waiting to be
+                * configured into a port. If there are allow the SCI User
+                * to tear down this port, if necessary, and then reconstruct
+                * the port after the timeout.
+                */
+               if ((port_agent->phy_configured_mask == 0x0000) &&
+                   (port_agent->phy_ready_mask != 0x0000) &&
+                   !port_agent->timer_pending) {
+                       port_agent->timer_pending = true;
+
+                       sci_mod_timer(&port_agent->timer,
+                                     SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT);
+               }
+
+               sci_port_link_down(iport, iphy);
+       }
+}
+
+/* verify phys are assigned a valid SAS address for automatic port
+ * configuration mode.
+ */
+static enum sci_status
+sci_apc_agent_validate_phy_configuration(struct isci_host *ihost,
+                                             struct sci_port_configuration_agent *port_agent)
+{
+       u8 phy_index;
+       u8 port_index;
+       struct sci_sas_address sas_address;
+       struct sci_sas_address phy_assigned_address;
+
+       phy_index = 0;
+
+       while (phy_index < SCI_MAX_PHYS) {
+               port_index = phy_index;
+
+               /* Get the assigned SAS Address for the first PHY on the controller. */
+               sci_phy_get_sas_address(&ihost->phys[phy_index],
+                                           &sas_address);
+
+               while (++phy_index < SCI_MAX_PHYS) {
+                       sci_phy_get_sas_address(&ihost->phys[phy_index],
+                                                    &phy_assigned_address);
+
+                       /* Verify each of the SAS address are all the same for every PHY */
+                       if (sci_sas_address_compare(sas_address, phy_assigned_address) == 0) {
+                               port_agent->phy_valid_port_range[phy_index].min_index = port_index;
+                               port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+                       } else {
+                               port_agent->phy_valid_port_range[phy_index].min_index = phy_index;
+                               port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+                               break;
+                       }
+               }
+       }
+
+       return sci_port_configuration_agent_validate_ports(ihost, port_agent);
+}
+
+static void sci_apc_agent_configure_ports(struct isci_host *ihost,
+                                              struct sci_port_configuration_agent *port_agent,
+                                              struct isci_phy *iphy,
+                                              bool start_timer)
+{
+       u8 port_index;
+       enum sci_status status;
+       struct isci_port *iport;
+       enum SCIC_SDS_APC_ACTIVITY apc_activity = SCIC_SDS_APC_SKIP_PHY;
+
+       iport = sci_port_configuration_agent_find_port(ihost, iphy);
+
+       if (iport) {
+               if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index))
+                       apc_activity = SCIC_SDS_APC_ADD_PHY;
+               else
+                       apc_activity = SCIC_SDS_APC_SKIP_PHY;
+       } else {
+               /*
+                * There is no matching Port for this PHY so lets search through the
+                * Ports and see if we can add the PHY to its own port or maybe start
+                * the timer and wait to see if a wider port can be made.
+                *
+                * Note the break when we reach the condition of the port id == phy id */
+               for (port_index = port_agent->phy_valid_port_range[iphy->phy_index].min_index;
+                    port_index <= port_agent->phy_valid_port_range[iphy->phy_index].max_index;
+                    port_index++) {
+
+                       iport = &ihost->ports[port_index];
+
+                       /* First we must make sure that this PHY can be added to this Port. */
+                       if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
+                               /*
+                                * Port contains a PHY with a greater PHY ID than the current
+                                * PHY that has gone link up.  This phy can not be part of any
+                                * port so skip it and move on. */
+                               if (iport->active_phy_mask > (1 << iphy->phy_index)) {
+                                       apc_activity = SCIC_SDS_APC_SKIP_PHY;
+                                       break;
+                               }
+
+                               /*
+                                * We have reached the end of our Port list and have not found
+                                * any reason why we should not either add the PHY to the port
+                                * or wait for more phys to become active. */
+                               if (iport->physical_port_index == iphy->phy_index) {
+                                       /*
+                                        * The Port either has no active PHYs.
+                                        * Consider that if the port had any active PHYs we would have
+                                        * or active PHYs with
+                                        * a lower PHY Id than this PHY. */
+                                       if (apc_activity != SCIC_SDS_APC_START_TIMER) {
+                                               apc_activity = SCIC_SDS_APC_ADD_PHY;
+                                       }
+
+                                       break;
+                               }
+
+                               /*
+                                * The current Port has no active PHYs and this PHY could be part
+                                * of this Port.  Since we dont know as yet setup to start the
+                                * timer and see if there is a better configuration. */
+                               if (iport->active_phy_mask == 0) {
+                                       apc_activity = SCIC_SDS_APC_START_TIMER;
+                               }
+                       } else if (iport->active_phy_mask != 0) {
+                               /*
+                                * The Port has an active phy and the current Phy can not
+                                * participate in this port so skip the PHY and see if
+                                * there is a better configuration. */
+                               apc_activity = SCIC_SDS_APC_SKIP_PHY;
+                       }
+               }
+       }
+
+       /*
+        * Check to see if the start timer operations should instead map to an
+        * add phy operation.  This is caused because we have been waiting to
+        * add a phy to a port but could not becuase the automatic port
+        * configuration engine had a choice of possible ports for the phy.
+        * Since we have gone through a timeout we are going to restrict the
+        * choice to the smallest possible port. */
+       if (
+               (start_timer == false)
+               && (apc_activity == SCIC_SDS_APC_START_TIMER)
+               ) {
+               apc_activity = SCIC_SDS_APC_ADD_PHY;
+       }
+
+       switch (apc_activity) {
+       case SCIC_SDS_APC_ADD_PHY:
+               status = sci_port_add_phy(iport, iphy);
+
+               if (status == SCI_SUCCESS) {
+                       port_agent->phy_configured_mask |= (1 << iphy->phy_index);
+               }
+               break;
+
+       case SCIC_SDS_APC_START_TIMER:
+               /*
+                * This can occur for either a link down event, or a link
+                * up event where we cannot yet tell the port to which a
+                * phy belongs.
+                */
+               if (port_agent->timer_pending)
+                       sci_del_timer(&port_agent->timer);
+
+               port_agent->timer_pending = true;
+               sci_mod_timer(&port_agent->timer,
+                             SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
+               break;
+
+       case SCIC_SDS_APC_SKIP_PHY:
+       default:
+               /* do nothing the PHY can not be made part of a port at this time. */
+               break;
+       }
+}
+
+/**
+ * sci_apc_agent_link_up - handle apc link up events
+ * @scic: This is the controller object that receives the link up
+ *    notification.
+ * @sci_port: This is the port object associated with the phy.  If the is no
+ *    associated port this is an NULL.
+ * @sci_phy: This is the phy object which has gone link up.
+ *
+ * This method handles the automatic port configuration for link up
+ * notifications. Is it possible to get a link down notification from a phy
+ * that has no assocoated port?
+ */
+static void sci_apc_agent_link_up(struct isci_host *ihost,
+                                      struct sci_port_configuration_agent *port_agent,
+                                      struct isci_port *iport,
+                                      struct isci_phy *iphy)
+{
+       u8 phy_index  = iphy->phy_index;
+
+       if (!iport) {
+               /* the phy is not the part of this port */
+               port_agent->phy_ready_mask |= 1 << phy_index;
+               sci_apc_agent_configure_ports(ihost, port_agent, iphy, true);
+       } else {
+               /* the phy is already the part of the port */
+               u32 port_state = iport->sm.current_state_id;
+
+               /* if the PORT'S state is resetting then the link up is from
+                * port hard reset in this case, we need to tell the port
+                * that link up is recieved
+                */
+               BUG_ON(port_state != SCI_PORT_RESETTING);
+               port_agent->phy_ready_mask |= 1 << phy_index;
+               sci_port_link_up(iport, iphy);
+       }
+}
+
+/**
+ *
+ * @controller: This is the controller object that receives the link down
+ *    notification.
+ * @iport: This is the port object associated with the phy.  If the is no
+ *    associated port this is an NULL.
+ * @iphy: This is the phy object which has gone link down.
+ *
+ * This method handles the automatic port configuration link down
+ * notifications. not associated with a port there is no action taken. Is it
+ * possible to get a link down notification from a phy that has no assocoated
+ * port?
+ */
+static void sci_apc_agent_link_down(
+       struct isci_host *ihost,
+       struct sci_port_configuration_agent *port_agent,
+       struct isci_port *iport,
+       struct isci_phy *iphy)
+{
+       port_agent->phy_ready_mask &= ~(1 << iphy->phy_index);
+
+       if (!iport)
+               return;
+       if (port_agent->phy_configured_mask & (1 << iphy->phy_index)) {
+               enum sci_status status;
+
+               status = sci_port_remove_phy(iport, iphy);
+
+               if (status == SCI_SUCCESS)
+                       port_agent->phy_configured_mask &= ~(1 << iphy->phy_index);
+       }
+}
+
+/* configure the phys into ports when the timer fires */
+static void apc_agent_timeout(unsigned long data)
+{
+       u32 index;
+       struct sci_timer *tmr = (struct sci_timer *)data;
+       struct sci_port_configuration_agent *port_agent;
+       struct isci_host *ihost;
+       unsigned long flags;
+       u16 configure_phy_mask;
+
+       port_agent = container_of(tmr, typeof(*port_agent), timer);
+       ihost = container_of(port_agent, typeof(*ihost), port_agent);
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       if (tmr->cancel)
+               goto done;
+
+       port_agent->timer_pending = false;
+
+       configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask;
+
+       if (!configure_phy_mask)
+               return;
+
+       for (index = 0; index < SCI_MAX_PHYS; index++) {
+               if ((configure_phy_mask & (1 << index)) == 0)
+                       continue;
+
+               sci_apc_agent_configure_ports(ihost, port_agent,
+                                                  &ihost->phys[index], false);
+       }
+
+done:
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/*
+ * ******************************************************************************
+ * Public port configuration agent routines
+ * ****************************************************************************** */
+
+/**
+ *
+ *
+ * This method will construct the port configuration agent for operation. This
+ * call is universal for both manual port configuration and automatic port
+ * configuration modes.
+ */
+void sci_port_configuration_agent_construct(
+       struct sci_port_configuration_agent *port_agent)
+{
+       u32 index;
+
+       port_agent->phy_configured_mask = 0x00;
+       port_agent->phy_ready_mask = 0x00;
+
+       port_agent->link_up_handler = NULL;
+       port_agent->link_down_handler = NULL;
+
+       port_agent->timer_pending = false;
+
+       for (index = 0; index < SCI_MAX_PORTS; index++) {
+               port_agent->phy_valid_port_range[index].min_index = 0;
+               port_agent->phy_valid_port_range[index].max_index = 0;
+       }
+}
+
+enum sci_status sci_port_configuration_agent_initialize(
+       struct isci_host *ihost,
+       struct sci_port_configuration_agent *port_agent)
+{
+       enum sci_status status;
+       enum sci_port_configuration_mode mode;
+
+       mode = ihost->oem_parameters.controller.mode_type;
+
+       if (mode == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
+               status = sci_mpc_agent_validate_phy_configuration(
+                               ihost, port_agent);
+
+               port_agent->link_up_handler = sci_mpc_agent_link_up;
+               port_agent->link_down_handler = sci_mpc_agent_link_down;
+
+               sci_init_timer(&port_agent->timer, mpc_agent_timeout);
+       } else {
+               status = sci_apc_agent_validate_phy_configuration(
+                               ihost, port_agent);
+
+               port_agent->link_up_handler = sci_apc_agent_link_up;
+               port_agent->link_down_handler = sci_apc_agent_link_down;
+
+               sci_init_timer(&port_agent->timer, apc_agent_timeout);
+       }
+
+       return status;
+}
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
new file mode 100644 (file)
index 0000000..b5f4341
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ */
+
+/* probe_roms - scan for oem parameters */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/uaccess.h>
+#include <linux/efi.h>
+#include <asm/probe_roms.h>
+
+#include "isci.h"
+#include "task.h"
+#include "probe_roms.h"
+
+static efi_char16_t isci_efivar_name[] = {
+       'R', 's', 't', 'S', 'c', 'u', 'O'
+};
+
+struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
+{
+       void __iomem *oprom = pci_map_biosrom(pdev);
+       struct isci_orom *rom = NULL;
+       size_t len, i;
+       int j;
+       char oem_sig[4];
+       struct isci_oem_hdr oem_hdr;
+       u8 *tmp, sum;
+
+       if (!oprom)
+               return NULL;
+
+       len = pci_biosrom_size(pdev);
+       rom = devm_kzalloc(&pdev->dev, sizeof(*rom), GFP_KERNEL);
+       if (!rom) {
+               dev_warn(&pdev->dev,
+                        "Unable to allocate memory for orom\n");
+               return NULL;
+       }
+
+       for (i = 0; i < len && rom; i += ISCI_OEM_SIG_SIZE) {
+               memcpy_fromio(oem_sig, oprom + i, ISCI_OEM_SIG_SIZE);
+
+               /* we think we found the OEM table */
+               if (memcmp(oem_sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) == 0) {
+                       size_t copy_len;
+
+                       memcpy_fromio(&oem_hdr, oprom + i, sizeof(oem_hdr));
+
+                       copy_len = min(oem_hdr.len - sizeof(oem_hdr),
+                                      sizeof(*rom));
+
+                       memcpy_fromio(rom,
+                                     oprom + i + sizeof(oem_hdr),
+                                     copy_len);
+
+                       /* calculate checksum */
+                       tmp = (u8 *)&oem_hdr;
+                       for (j = 0, sum = 0; j < sizeof(oem_hdr); j++, tmp++)
+                               sum += *tmp;
+
+                       tmp = (u8 *)rom;
+                       for (j = 0; j < sizeof(*rom); j++, tmp++)
+                               sum += *tmp;
+
+                       if (sum != 0) {
+                               dev_warn(&pdev->dev,
+                                        "OEM table checksum failed\n");
+                               continue;
+                       }
+
+                       /* keep going if that's not the oem param table */
+                       if (memcmp(rom->hdr.signature,
+                                  ISCI_ROM_SIG,
+                                  ISCI_ROM_SIG_SIZE) != 0)
+                               continue;
+
+                       dev_info(&pdev->dev,
+                                "OEM parameter table found in OROM\n");
+                       break;
+               }
+       }
+
+       if (i >= len) {
+               dev_err(&pdev->dev, "oprom parse error\n");
+               devm_kfree(&pdev->dev, rom);
+               rom = NULL;
+       }
+       pci_unmap_biosrom(oprom);
+
+       return rom;
+}
+
+enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
+                                         struct isci_orom *orom, int scu_index)
+{
+       /* check for valid inputs */
+       if (scu_index < 0 || scu_index >= SCI_MAX_CONTROLLERS ||
+           scu_index > orom->hdr.num_elements || !oem)
+               return -EINVAL;
+
+       *oem = orom->ctrl[scu_index];
+       return 0;
+}
+
+struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw)
+{
+       struct isci_orom *orom = NULL, *data;
+       int i, j;
+
+       if (request_firmware(&fw, ISCI_FW_NAME, &pdev->dev) != 0)
+               return NULL;
+
+       if (fw->size < sizeof(*orom))
+               goto out;
+
+       data = (struct isci_orom *)fw->data;
+
+       if (strncmp(ISCI_ROM_SIG, data->hdr.signature,
+                   strlen(ISCI_ROM_SIG)) != 0)
+               goto out;
+
+       orom = devm_kzalloc(&pdev->dev, fw->size, GFP_KERNEL);
+       if (!orom)
+               goto out;
+
+       memcpy(orom, fw->data, fw->size);
+
+       if (is_c0(pdev))
+               goto out;
+
+       /*
+        * deprecated: override default amp_control for pre-preproduction
+        * silicon revisions
+        */
+       for (i = 0; i < ARRAY_SIZE(orom->ctrl); i++)
+               for (j = 0; j < ARRAY_SIZE(orom->ctrl[i].phys); j++) {
+                       orom->ctrl[i].phys[j].afe_tx_amp_control0 = 0xe7c03;
+                       orom->ctrl[i].phys[j].afe_tx_amp_control1 = 0xe7c03;
+                       orom->ctrl[i].phys[j].afe_tx_amp_control2 = 0xe7c03;
+                       orom->ctrl[i].phys[j].afe_tx_amp_control3 = 0xe7c03;
+               }
+ out:
+       release_firmware(fw);
+
+       return orom;
+}
+
+static struct efi *get_efi(void)
+{
+#ifdef CONFIG_EFI
+       return &efi;
+#else
+       return NULL;
+#endif
+}
+
+struct isci_orom *isci_get_efi_var(struct pci_dev *pdev)
+{
+       efi_status_t status;
+       struct isci_orom *rom;
+       struct isci_oem_hdr *oem_hdr;
+       u8 *tmp, sum;
+       int j;
+       unsigned long data_len;
+       u8 *efi_data;
+       u32 efi_attrib = 0;
+
+       data_len = 1024;
+       efi_data = devm_kzalloc(&pdev->dev, data_len, GFP_KERNEL);
+       if (!efi_data) {
+               dev_warn(&pdev->dev,
+                        "Unable to allocate memory for EFI data\n");
+               return NULL;
+       }
+
+       rom = (struct isci_orom *)(efi_data + sizeof(struct isci_oem_hdr));
+
+       if (get_efi())
+               status = get_efi()->get_variable(isci_efivar_name,
+                                                &ISCI_EFI_VENDOR_GUID,
+                                                &efi_attrib,
+                                                &data_len,
+                                                efi_data);
+       else
+               status = EFI_NOT_FOUND;
+
+       if (status != EFI_SUCCESS) {
+               dev_warn(&pdev->dev,
+                        "Unable to obtain EFI var data for OEM parms\n");
+               return NULL;
+       }
+
+       oem_hdr = (struct isci_oem_hdr *)efi_data;
+
+       if (memcmp(oem_hdr->sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) != 0) {
+               dev_warn(&pdev->dev,
+                        "Invalid OEM header signature\n");
+               return NULL;
+       }
+
+       /* calculate checksum */
+       tmp = (u8 *)efi_data;
+       for (j = 0, sum = 0; j < (sizeof(*oem_hdr) + sizeof(*rom)); j++, tmp++)
+               sum += *tmp;
+
+       if (sum != 0) {
+               dev_warn(&pdev->dev,
+                        "OEM table checksum failed\n");
+               return NULL;
+       }
+
+       if (memcmp(rom->hdr.signature,
+                  ISCI_ROM_SIG,
+                  ISCI_ROM_SIG_SIZE) != 0) {
+               dev_warn(&pdev->dev,
+                        "Invalid OEM table signature\n");
+               return NULL;
+       }
+
+       return rom;
+}
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h
new file mode 100644 (file)
index 0000000..dc007e6
--- /dev/null
@@ -0,0 +1,249 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ISCI_PROBE_ROMS_H_
+#define _ISCI_PROBE_ROMS_H_
+
+#ifdef __KERNEL__
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include <linux/efi.h>
+#include "isci.h"
+
+#define SCIC_SDS_PARM_NO_SPEED   0
+
+/* generation 1 (i.e. 1.5 Gb/s) */
+#define SCIC_SDS_PARM_GEN1_SPEED 1
+
+/* generation 2 (i.e. 3.0 Gb/s) */
+#define SCIC_SDS_PARM_GEN2_SPEED 2
+
+/* generation 3 (i.e. 6.0 Gb/s) */
+#define SCIC_SDS_PARM_GEN3_SPEED 3
+#define SCIC_SDS_PARM_MAX_SPEED SCIC_SDS_PARM_GEN3_SPEED
+
+/* parameters that can be set by module parameters */
+struct sci_user_parameters {
+       struct sci_phy_user_params {
+               /**
+                * This field specifies the NOTIFY (ENABLE SPIN UP) primitive
+                * insertion frequency for this phy index.
+                */
+               u32 notify_enable_spin_up_insertion_frequency;
+
+               /**
+                * This method specifies the number of transmitted DWORDs within which
+                * to transmit a single ALIGN primitive.  This value applies regardless
+                * of what type of device is attached or connection state.  A value of
+                * 0 indicates that no ALIGN primitives will be inserted.
+                */
+               u16 align_insertion_frequency;
+
+               /**
+                * This method specifies the number of transmitted DWORDs within which
+                * to transmit 2 ALIGN primitives.  This applies for SAS connections
+                * only.  A minimum value of 3 is required for this field.
+                */
+               u16 in_connection_align_insertion_frequency;
+
+               /**
+                * This field indicates the maximum speed generation to be utilized
+                * by phys in the supplied port.
+                * - A value of 1 indicates generation 1 (i.e. 1.5 Gb/s).
+                * - A value of 2 indicates generation 2 (i.e. 3.0 Gb/s).
+                * - A value of 3 indicates generation 3 (i.e. 6.0 Gb/s).
+                */
+               u8 max_speed_generation;
+
+       } phys[SCI_MAX_PHYS];
+
+       /**
+        * This field specifies the maximum number of direct attached devices
+        * that can have power supplied to them simultaneously.
+        */
+       u8 max_number_concurrent_device_spin_up;
+
+       /**
+        * This field specifies the number of seconds to allow a phy to consume
+        * power before yielding to another phy.
+        *
+        */
+       u8 phy_spin_up_delay_interval;
+
+       /**
+        * These timer values specifies how long a link will remain open with no
+        * activity in increments of a microsecond, it can be in increments of
+        * 100 microseconds if the upper most bit is set.
+        *
+        */
+       u16 stp_inactivity_timeout;
+       u16 ssp_inactivity_timeout;
+
+       /**
+        * These timer values specifies how long a link will remain open in increments
+        * of 100 microseconds.
+        *
+        */
+       u16 stp_max_occupancy_timeout;
+       u16 ssp_max_occupancy_timeout;
+
+       /**
+        * This timer value specifies how long a link will remain open with no
+        * outbound traffic in increments of a microsecond.
+        *
+        */
+       u8 no_outbound_task_timeout;
+
+};
+
+#define SCIC_SDS_PARM_PHY_MASK_MIN 0x0
+#define SCIC_SDS_PARM_PHY_MASK_MAX 0xF
+#define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4
+
+struct sci_oem_params;
+int sci_oem_parameters_validate(struct sci_oem_params *oem);
+
+struct isci_orom;
+struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
+enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
+                                         struct isci_orom *orom, int scu_index);
+struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw);
+struct isci_orom *isci_get_efi_var(struct pci_dev *pdev);
+
+struct isci_oem_hdr {
+       u8 sig[4];
+       u8 rev_major;
+       u8 rev_minor;
+       u16 len;
+       u8 checksum;
+       u8 reserved1;
+       u16 reserved2;
+} __attribute__ ((packed));
+
+#else
+#define SCI_MAX_PORTS 4
+#define SCI_MAX_PHYS 4
+#define SCI_MAX_CONTROLLERS 2
+#endif
+
+#define ISCI_FW_NAME           "isci/isci_firmware.bin"
+
+#define ROMSIGNATURE           0xaa55
+
+#define ISCI_OEM_SIG           "$OEM"
+#define ISCI_OEM_SIG_SIZE      4
+#define ISCI_ROM_SIG           "ISCUOEMB"
+#define ISCI_ROM_SIG_SIZE      8
+
+#define ISCI_EFI_VENDOR_GUID   \
+       EFI_GUID(0x193dfefa, 0xa445, 0x4302, 0x99, 0xd8, 0xef, 0x3a, 0xad, \
+                       0x1a, 0x04, 0xc6)
+#define ISCI_EFI_VAR_NAME      "RstScuO"
+
+/* Allowed PORT configuration modes APC Automatic PORT configuration mode is
+ * defined by the OEM configuration parameters providing no PHY_MASK parameters
+ * for any PORT. i.e. There are no phys assigned to any of the ports at start.
+ * MPC Manual PORT configuration mode is defined by the OEM configuration
+ * parameters providing a PHY_MASK value for any PORT.  It is assumed that any
+ * PORT with no PHY_MASK is an invalid port and not all PHYs must be assigned.
+ * A PORT_PHY mask that assigns just a single PHY to a port and no other PHYs
+ * being assigned is sufficient to declare manual PORT configuration.
+ */
+enum sci_port_configuration_mode {
+       SCIC_PORT_MANUAL_CONFIGURATION_MODE = 0,
+       SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE = 1
+};
+
+struct sci_bios_oem_param_block_hdr {
+       uint8_t signature[ISCI_ROM_SIG_SIZE];
+       uint16_t total_block_length;
+       uint8_t hdr_length;
+       uint8_t version;
+       uint8_t preboot_source;
+       uint8_t num_elements;
+       uint16_t element_length;
+       uint8_t reserved[8];
+} __attribute__ ((packed));
+
+struct sci_oem_params {
+       struct {
+               uint8_t mode_type;
+               uint8_t max_concurrent_dev_spin_up;
+               uint8_t do_enable_ssc;
+               uint8_t reserved;
+       } controller;
+
+       struct {
+               uint8_t phy_mask;
+       } ports[SCI_MAX_PORTS];
+
+       struct sci_phy_oem_params {
+               struct {
+                       uint32_t high;
+                       uint32_t low;
+               } sas_address;
+
+               uint32_t afe_tx_amp_control0;
+               uint32_t afe_tx_amp_control1;
+               uint32_t afe_tx_amp_control2;
+               uint32_t afe_tx_amp_control3;
+       } phys[SCI_MAX_PHYS];
+} __attribute__ ((packed));
+
+struct isci_orom {
+       struct sci_bios_oem_param_block_hdr hdr;
+       struct sci_oem_params ctrl[SCI_MAX_CONTROLLERS];
+} __attribute__ ((packed));
+
+#endif
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
new file mode 100644 (file)
index 0000000..9b266c7
--- /dev/null
@@ -0,0 +1,1934 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCU_REGISTERS_H_
+#define _SCU_REGISTERS_H_
+
+/**
+ * This file contains the constants and structures for the SCU memory mapped
+ *    registers.
+ *
+ *
+ */
+
+#define SCU_VIIT_ENTRY_ID_MASK         (0xC0000000)
+#define SCU_VIIT_ENTRY_ID_SHIFT        (30)
+
+#define SCU_VIIT_ENTRY_FUNCTION_MASK   (0x0FF00000)
+#define SCU_VIIT_ENTRY_FUNCTION_SHIFT  (20)
+
+#define SCU_VIIT_ENTRY_IPPTMODE_MASK   (0x0001F800)
+#define SCU_VIIT_ENTRY_IPPTMODE_SHIFT  (12)
+
+#define SCU_VIIT_ENTRY_LPVIE_MASK      (0x00000F00)
+#define SCU_VIIT_ENTRY_LPVIE_SHIFT     (8)
+
+#define SCU_VIIT_ENTRY_STATUS_MASK     (0x000000FF)
+#define SCU_VIIT_ENTRY_STATUS_SHIFT    (0)
+
+#define SCU_VIIT_ENTRY_ID_INVALID   (0 << SCU_VIIT_ENTRY_ID_SHIFT)
+#define SCU_VIIT_ENTRY_ID_VIIT      (1 << SCU_VIIT_ENTRY_ID_SHIFT)
+#define SCU_VIIT_ENTRY_ID_IIT       (2 << SCU_VIIT_ENTRY_ID_SHIFT)
+#define SCU_VIIT_ENTRY_ID_VIRT_EXP  (3 << SCU_VIIT_ENTRY_ID_SHIFT)
+
+#define SCU_VIIT_IPPT_SSP_INITIATOR (0x01 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+#define SCU_VIIT_IPPT_SMP_INITIATOR (0x02 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+#define SCU_VIIT_IPPT_STP_INITIATOR (0x04 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+#define SCU_VIIT_IPPT_INITIATOR            \
+       (\
+               SCU_VIIT_IPPT_SSP_INITIATOR  \
+               | SCU_VIIT_IPPT_SMP_INITIATOR  \
+               | SCU_VIIT_IPPT_STP_INITIATOR  \
+       )
+
+#define SCU_VIIT_STATUS_RNC_VALID      (0x01 << SCU_VIIT_ENTRY_STATUS_SHIFT)
+#define SCU_VIIT_STATUS_ADDRESS_VALID  (0x02 << SCU_VIIT_ENTRY_STATUS_SHIFT)
+#define SCU_VIIT_STATUS_RNI_VALID      (0x04 << SCU_VIIT_ENTRY_STATUS_SHIFT)
+#define SCU_VIIT_STATUS_ALL_VALID      \
+       (\
+               SCU_VIIT_STATUS_RNC_VALID       \
+               | SCU_VIIT_STATUS_ADDRESS_VALID   \
+               | SCU_VIIT_STATUS_RNI_VALID       \
+       )
+
+#define SCU_VIIT_IPPT_SMP_TARGET    (0x10 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+
+/**
+ * struct scu_viit_entry - This is the SCU Virtual Initiator Table Entry
+ *
+ *
+ */
+struct scu_viit_entry {
+       /**
+        * This must be encoded as to the type of initiator that is being constructed
+        * for this port.
+        */
+       u32 status;
+
+       /**
+        * Virtual initiator high SAS Address
+        */
+       u32 initiator_sas_address_hi;
+
+       /**
+        * Virtual initiator low SAS Address
+        */
+       u32 initiator_sas_address_lo;
+
+       /**
+        * This must be 0
+        */
+       u32 reserved;
+
+};
+
+
+/* IIT Status Defines */
+#define SCU_IIT_ENTRY_ID_MASK                (0xC0000000)
+#define SCU_IIT_ENTRY_ID_SHIFT               (30)
+
+#define SCU_IIT_ENTRY_STATUS_UPDATE_MASK     (0x20000000)
+#define SCU_IIT_ENTRY_STATUS_UPDATE_SHIFT    (29)
+
+#define SCU_IIT_ENTRY_LPI_MASK               (0x00000F00)
+#define SCU_IIT_ENTRY_LPI_SHIFT              (8)
+
+#define SCU_IIT_ENTRY_STATUS_MASK            (0x000000FF)
+#define SCU_IIT_ENTRY_STATUS_SHIFT           (0)
+
+/* IIT Remote Initiator Defines */
+#define SCU_IIT_ENTRY_REMOTE_TAG_MASK  (0x0000FFFF)
+#define SCU_IIT_ENTRY_REMOTE_TAG_SHIFT (0)
+
+#define SCU_IIT_ENTRY_REMOTE_RNC_MASK  (0x0FFF0000)
+#define SCU_IIT_ENTRY_REMOTE_RNC_SHIFT (16)
+
+#define SCU_IIT_ENTRY_ID_INVALID   (0 << SCU_IIT_ENTRY_ID_SHIFT)
+#define SCU_IIT_ENTRY_ID_VIIT      (1 << SCU_IIT_ENTRY_ID_SHIFT)
+#define SCU_IIT_ENTRY_ID_IIT       (2 << SCU_IIT_ENTRY_ID_SHIFT)
+#define SCU_IIT_ENTRY_ID_VIRT_EXP  (3 << SCU_IIT_ENTRY_ID_SHIFT)
+
+/**
+ * struct scu_iit_entry - This will be implemented later when we support
+ *    virtual functions
+ *
+ *
+ */
+struct scu_iit_entry {
+       u32 status;
+       u32 remote_initiator_sas_address_hi;
+       u32 remote_initiator_sas_address_lo;
+       u32 remote_initiator;
+
+};
+
+/* Generate a value for an SCU register */
+#define SCU_GEN_VALUE(name, value) \
+       (((value) << name ## _SHIFT) & (name ## _MASK))
+
+/*
+ * Generate a bit value for an SCU register
+ * Make sure that the register MASK is just a single bit */
+#define SCU_GEN_BIT(name) \
+       SCU_GEN_VALUE(name, ((u32)1))
+
+#define SCU_SET_BIT(name, reg_value) \
+       ((reg_value) | SCU_GEN_BIT(name))
+
+#define SCU_CLEAR_BIT(name, reg_value) \
+       ((reg_value)$ ~(SCU_GEN_BIT(name)))
+
+/*
+ * *****************************************************************************
+ * Unions for bitfield definitions of SCU Registers
+ * SMU Post Context Port
+ * ***************************************************************************** */
+#define SMU_POST_CONTEXT_PORT_CONTEXT_INDEX_SHIFT         (0)
+#define SMU_POST_CONTEXT_PORT_CONTEXT_INDEX_MASK          (0x00000FFF)
+#define SMU_POST_CONTEXT_PORT_LOGICAL_PORT_INDEX_SHIFT    (12)
+#define SMU_POST_CONTEXT_PORT_LOGICAL_PORT_INDEX_MASK     (0x0000F000)
+#define SMU_POST_CONTEXT_PORT_PROTOCOL_ENGINE_SHIFT       (16)
+#define SMU_POST_CONTEXT_PORT_PROTOCOL_ENGINE_MASK        (0x00030000)
+#define SMU_POST_CONTEXT_PORT_COMMAND_CONTEXT_SHIFT       (18)
+#define SMU_POST_CONTEXT_PORT_COMMAND_CONTEXT_MASK        (0x00FC0000)
+#define SMU_POST_CONTEXT_PORT_RESERVED_MASK               (0xFF000000)
+
+#define SMU_PCP_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SMU_POST_CONTEXT_PORT_ ## name, value)
+
+/* ***************************************************************************** */
+#define SMU_INTERRUPT_STATUS_COMPLETION_SHIFT       (31)
+#define SMU_INTERRUPT_STATUS_COMPLETION_MASK        (0x80000000)
+#define SMU_INTERRUPT_STATUS_QUEUE_SUSPEND_SHIFT    (1)
+#define SMU_INTERRUPT_STATUS_QUEUE_SUSPEND_MASK     (0x00000002)
+#define SMU_INTERRUPT_STATUS_QUEUE_ERROR_SHIFT      (0)
+#define SMU_INTERRUPT_STATUS_QUEUE_ERROR_MASK       (0x00000001)
+#define SMU_INTERRUPT_STATUS_RESERVED_MASK          (0x7FFFFFFC)
+
+#define SMU_ISR_GEN_BIT(name) \
+       SCU_GEN_BIT(SMU_INTERRUPT_STATUS_ ## name)
+
+#define SMU_ISR_QUEUE_ERROR   SMU_ISR_GEN_BIT(QUEUE_ERROR)
+#define SMU_ISR_QUEUE_SUSPEND SMU_ISR_GEN_BIT(QUEUE_SUSPEND)
+#define SMU_ISR_COMPLETION    SMU_ISR_GEN_BIT(COMPLETION)
+
+/* ***************************************************************************** */
+#define SMU_INTERRUPT_MASK_COMPLETION_SHIFT         (31)
+#define SMU_INTERRUPT_MASK_COMPLETION_MASK          (0x80000000)
+#define SMU_INTERRUPT_MASK_QUEUE_SUSPEND_SHIFT      (1)
+#define SMU_INTERRUPT_MASK_QUEUE_SUSPEND_MASK       (0x00000002)
+#define SMU_INTERRUPT_MASK_QUEUE_ERROR_SHIFT        (0)
+#define SMU_INTERRUPT_MASK_QUEUE_ERROR_MASK         (0x00000001)
+#define SMU_INTERRUPT_MASK_RESERVED_MASK            (0x7FFFFFFC)
+
+#define SMU_IMR_GEN_BIT(name) \
+       SCU_GEN_BIT(SMU_INTERRUPT_MASK_ ## name)
+
+#define SMU_IMR_QUEUE_ERROR   SMU_IMR_GEN_BIT(QUEUE_ERROR)
+#define SMU_IMR_QUEUE_SUSPEND SMU_IMR_GEN_BIT(QUEUE_SUSPEND)
+#define SMU_IMR_COMPLETION    SMU_IMR_GEN_BIT(COMPLETION)
+
+/* ***************************************************************************** */
+#define SMU_INTERRUPT_COALESCING_CONTROL_TIMER_SHIFT    (0)
+#define SMU_INTERRUPT_COALESCING_CONTROL_TIMER_MASK     (0x0000001F)
+#define SMU_INTERRUPT_COALESCING_CONTROL_NUMBER_SHIFT   (8)
+#define SMU_INTERRUPT_COALESCING_CONTROL_NUMBER_MASK    (0x0000FF00)
+#define SMU_INTERRUPT_COALESCING_CONTROL_RESERVED_MASK  (0xFFFF00E0)
+
+#define SMU_ICC_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SMU_INTERRUPT_COALESCING_CONTROL_ ## name, value)
+
+/* ***************************************************************************** */
+#define SMU_TASK_CONTEXT_RANGE_START_SHIFT      (0)
+#define SMU_TASK_CONTEXT_RANGE_START_MASK       (0x00000FFF)
+#define SMU_TASK_CONTEXT_RANGE_ENDING_SHIFT     (16)
+#define SMU_TASK_CONTEXT_RANGE_ENDING_MASK      (0x0FFF0000)
+#define SMU_TASK_CONTEXT_RANGE_ENABLE_SHIFT     (31)
+#define SMU_TASK_CONTEXT_RANGE_ENABLE_MASK      (0x80000000)
+#define SMU_TASK_CONTEXT_RANGE_RESERVED_MASK    (0x7000F000)
+
+#define SMU_TCR_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SMU_TASK_CONTEXT_RANGE_ ## name, value)
+
+#define SMU_TCR_GEN_BIT(name, value) \
+       SCU_GEN_BIT(SMU_TASK_CONTEXT_RANGE_ ## name)
+
+/* ***************************************************************************** */
+
+#define SMU_COMPLETION_QUEUE_PUT_POINTER_SHIFT          (0)
+#define SMU_COMPLETION_QUEUE_PUT_POINTER_MASK           (0x00003FFF)
+#define SMU_COMPLETION_QUEUE_PUT_CYCLE_BIT_SHIFT        (15)
+#define SMU_COMPLETION_QUEUE_PUT_CYCLE_BIT_MASK         (0x00008000)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_POINTER_SHIFT    (16)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_POINTER_MASK     (0x03FF0000)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_CYCLE_BIT_SHIFT  (26)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_CYCLE_BIT_MASK   (0x04000000)
+#define SMU_COMPLETION_QUEUE_PUT_RESERVED_MASK          (0xF8004000)
+
+#define SMU_CQPR_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_PUT_ ## name, value)
+
+#define SMU_CQPR_GEN_BIT(name) \
+       SCU_GEN_BIT(SMU_COMPLETION_QUEUE_PUT_ ## name)
+
+/* ***************************************************************************** */
+
+#define SMU_COMPLETION_QUEUE_GET_POINTER_SHIFT          (0)
+#define SMU_COMPLETION_QUEUE_GET_POINTER_MASK           (0x00003FFF)
+#define SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT        (15)
+#define SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_MASK         (0x00008000)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT    (16)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK     (0x03FF0000)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT  (26)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_MASK   (0x04000000)
+#define SMU_COMPLETION_QUEUE_GET_ENABLE_SHIFT           (30)
+#define SMU_COMPLETION_QUEUE_GET_ENABLE_MASK            (0x40000000)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_ENABLE_SHIFT     (31)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_ENABLE_MASK      (0x80000000)
+#define SMU_COMPLETION_QUEUE_GET_RESERVED_MASK          (0x38004000)
+
+#define SMU_CQGR_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_GET_ ## name, value)
+
+#define SMU_CQGR_GEN_BIT(name) \
+       SCU_GEN_BIT(SMU_COMPLETION_QUEUE_GET_ ## name)
+
+#define SMU_CQGR_CYCLE_BIT \
+       SMU_CQGR_GEN_BIT(CYCLE_BIT)
+
+#define SMU_CQGR_EVENT_CYCLE_BIT \
+       SMU_CQGR_GEN_BIT(EVENT_CYCLE_BIT)
+
+#define SMU_CQGR_GET_POINTER_SET(value)        \
+       SMU_CQGR_GEN_VAL(POINTER, value)
+
+
+/* ***************************************************************************** */
+#define SMU_COMPLETION_QUEUE_CONTROL_QUEUE_LIMIT_SHIFT  (0)
+#define SMU_COMPLETION_QUEUE_CONTROL_QUEUE_LIMIT_MASK   (0x00003FFF)
+#define SMU_COMPLETION_QUEUE_CONTROL_EVENT_LIMIT_SHIFT  (16)
+#define SMU_COMPLETION_QUEUE_CONTROL_EVENT_LIMIT_MASK   (0x03FF0000)
+#define SMU_COMPLETION_QUEUE_CONTROL_RESERVED_MASK      (0xFC00C000)
+
+#define SMU_CQC_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_CONTROL_ ## name, value)
+
+#define SMU_CQC_QUEUE_LIMIT_SET(value) \
+       SMU_CQC_GEN_VAL(QUEUE_LIMIT, value)
+
+#define SMU_CQC_EVENT_LIMIT_SET(value) \
+       SMU_CQC_GEN_VAL(EVENT_LIMIT, value)
+
+
+/* ***************************************************************************** */
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT    (0)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK     (0x00000FFF)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT    (12)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK     (0x00007000)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT   (15)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK    (0x07FF8000)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_SHIFT   (27)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_MASK    (0x08000000)
+#define SMU_DEVICE_CONTEXT_CAPACITY_RESERVED_MASK   (0xF0000000)
+
+#define SMU_DCC_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SMU_DEVICE_CONTEXT_CAPACITY_ ## name, value)
+
+#define SMU_DCC_GET_MAX_PEG(value) \
+       (\
+               ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_MASK) \
+               >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT \
+       )
+
+#define SMU_DCC_GET_MAX_LP(value) \
+       (\
+               ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
+               >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT \
+       )
+
+#define SMU_DCC_GET_MAX_TC(value) \
+       (\
+               ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
+               >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT \
+       )
+
+#define SMU_DCC_GET_MAX_RNC(value) \
+       (\
+               ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
+               >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT \
+       )
+
+/* -------------------------------------------------------------------------- */
+
+#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_SHIFT      (0)
+#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_MASK       (0x00000001)
+#define SMU_CONTROL_STATUS_COMPLETION_BYTE_SWAP_ENABLE_SHIFT    (1)
+#define SMU_CONTROL_STATUS_COMPLETION_BYTE_SWAP_ENABLE_MASK     (0x00000002)
+#define SMU_CONTROL_STATUS_CONTEXT_RAM_INIT_COMPLETED_SHIFT     (16)
+#define SMU_CONTROL_STATUS_CONTEXT_RAM_INIT_COMPLETED_MASK      (0x00010000)
+#define SMU_CONTROL_STATUS_SCHEDULER_RAM_INIT_COMPLETED_SHIFT   (17)
+#define SMU_CONTROL_STATUS_SCHEDULER_RAM_INIT_COMPLETED_MASK    (0x00020000)
+#define SMU_CONTROL_STATUS_RESERVED_MASK                        (0xFFFCFFFC)
+
+#define SMU_SMUCSR_GEN_BIT(name) \
+       SCU_GEN_BIT(SMU_CONTROL_STATUS_ ## name)
+
+#define SMU_SMUCSR_SCHEDULER_RAM_INIT_COMPLETED        \
+       (SMU_SMUCSR_GEN_BIT(SCHEDULER_RAM_INIT_COMPLETED))
+
+#define SMU_SMUCSR_CONTEXT_RAM_INIT_COMPLETED  \
+       (SMU_SMUCSR_GEN_BIT(CONTEXT_RAM_INIT_COMPLETED))
+
+#define SCU_RAM_INIT_COMPLETED \
+       (\
+               SMU_SMUCSR_CONTEXT_RAM_INIT_COMPLETED \
+               | SMU_SMUCSR_SCHEDULER_RAM_INIT_COMPLETED \
+       )
+
+/* -------------------------------------------------------------------------- */
+
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE0_SHIFT  (0)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE0_MASK   (0x00000001)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE1_SHIFT  (1)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE1_MASK   (0x00000002)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE2_SHIFT  (2)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE2_MASK   (0x00000004)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE3_SHIFT  (3)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE3_MASK   (0x00000008)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE0_SHIFT  (8)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE0_MASK   (0x00000100)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE1_SHIFT  (9)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE1_MASK   (0x00000200)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE2_SHIFT  (10)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE2_MASK   (0x00000400)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE3_SHIFT  (11)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE3_MASK   (0x00000800)
+
+#define SMU_RESET_PROTOCOL_ENGINE(peg, pe) \
+       ((1 << (pe)) << ((peg) * 8))
+
+#define SMU_RESET_PEG_PROTOCOL_ENGINES(peg) \
+       (\
+               SMU_RESET_PROTOCOL_ENGINE(peg, 0) \
+               | SMU_RESET_PROTOCOL_ENGINE(peg, 1) \
+               | SMU_RESET_PROTOCOL_ENGINE(peg, 2) \
+               | SMU_RESET_PROTOCOL_ENGINE(peg, 3) \
+       )
+
+#define SMU_RESET_ALL_PROTOCOL_ENGINES() \
+       (\
+               SMU_RESET_PEG_PROTOCOL_ENGINES(0) \
+               | SMU_RESET_PEG_PROTOCOL_ENGINES(1) \
+       )
+
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP0_SHIFT  (16)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP0_MASK   (0x00010000)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP2_SHIFT  (17)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP2_MASK   (0x00020000)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP0_SHIFT  (18)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP0_MASK   (0x00040000)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP2_SHIFT  (19)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP2_MASK   (0x00080000)
+
+#define SMU_RESET_WIDE_PORT_QUEUE(peg, wide_port) \
+       ((1 << ((wide_port) / 2)) << ((peg) * 2) << 16)
+
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_SHIFT      (20)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_MASK       (0x00100000)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_SHIFT      (21)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_MASK       (0x00200000)
+#define SMU_SOFTRESET_CONTROL_RESET_SCU_SHIFT       (22)
+#define SMU_SOFTRESET_CONTROL_RESET_SCU_MASK        (0x00400000)
+
+/*
+ * It seems to make sense that if you are going to reset the protocol
+ * engine group that you would also reset all of the protocol engines */
+#define SMU_RESET_PROTOCOL_ENGINE_GROUP(peg) \
+       (\
+               (1 << ((peg) + 20)) \
+               | SMU_RESET_WIDE_PORT_QUEUE(peg, 0) \
+               | SMU_RESET_WIDE_PORT_QUEUE(peg, 1) \
+               | SMU_RESET_PEG_PROTOCOL_ENGINES(peg) \
+       )
+
+#define SMU_RESET_ALL_PROTOCOL_ENGINE_GROUPS() \
+       (\
+               SMU_RESET_PROTOCOL_ENGINE_GROUP(0) \
+               | SMU_RESET_PROTOCOL_ENGINE_GROUP(1) \
+       )
+
+#define SMU_RESET_SCU()  (0xFFFFFFFF)
+
+
+
+/* ***************************************************************************** */
+#define SMU_TASK_CONTEXT_ASSIGNMENT_STARTING_SHIFT              (0)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_STARTING_MASK               (0x00000FFF)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_ENDING_SHIFT                (16)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_ENDING_MASK                 (0x0FFF0000)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_RANGE_CHECK_ENABLE_SHIFT    (31)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_RANGE_CHECK_ENABLE_MASK     (0x80000000)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_RESERVED_MASK               (0x7000F000)
+
+#define SMU_TCA_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SMU_TASK_CONTEXT_ASSIGNMENT_ ## name, value)
+
+#define SMU_TCA_GEN_BIT(name) \
+       SCU_GEN_BIT(SMU_TASK_CONTEXT_ASSIGNMENT_ ## name)
+
+/* ***************************************************************************** */
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_QUEUE_SIZE_SHIFT   (0)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_QUEUE_SIZE_MASK    (0x00000FFF)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_RESERVED_MASK      (0xFFFFF000)
+
+#define SCU_UFQC_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_ ## name, value)
+
+#define SCU_UFQC_QUEUE_SIZE_SET(value) \
+       SCU_UFQC_GEN_VAL(QUEUE_SIZE, value)
+
+/* ***************************************************************************** */
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_POINTER_SHIFT      (0)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_POINTER_MASK       (0x00000FFF)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_CYCLE_BIT_SHIFT    (12)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_CYCLE_BIT_MASK     (0x00001000)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_RESERVED_MASK      (0xFFFFE000)
+
+#define SCU_UFQPP_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_ ## name, value)
+
+#define SCU_UFQPP_GEN_BIT(name)        \
+       SCU_GEN_BIT(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_ ## name)
+
+/*
+ * *****************************************************************************
+ * * SDMA Registers
+ * ***************************************************************************** */
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_POINTER_SHIFT      (0)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_POINTER_MASK       (0x00000FFF)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_CYCLE_BIT_SHIFT    (12)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_CYCLE_BIT_MASK     (12)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ENABLE_BIT_SHIFT   (31)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ENABLE_BIT_MASK    (0x80000000)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_RESERVED_MASK      (0x7FFFE000)
+
+#define SCU_UFQGP_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ ## name, value)
+
+#define SCU_UFQGP_GEN_BIT(name)        \
+       SCU_GEN_BIT(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ ## name)
+
+#define SCU_UFQGP_CYCLE_BIT(value) \
+       SCU_UFQGP_GEN_BIT(CYCLE_BIT, value)
+
+#define SCU_UFQGP_GET_POINTER(value) \
+       SCU_UFQGP_GEN_VALUE(POINTER, value)
+
+#define SCU_UFQGP_ENABLE(value)        \
+       (SCU_UFQGP_GEN_BIT(ENABLE) | value)
+
+#define SCU_UFQGP_DISABLE(value) \
+       (~SCU_UFQGP_GEN_BIT(ENABLE) & value)
+
+#define SCU_UFQGP_VALUE(bit, value) \
+       (SCU_UFQGP_CYCLE_BIT(bit) | SCU_UFQGP_GET_POINTER(value))
+
+/* ***************************************************************************** */
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SHIFT                               (0)
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_MASK                                (0x0000FFFF)
+#define SCU_PDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_SHIFT                    (16)
+#define SCU_PDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_MASK                     (0x00010000)
+#define SCU_PDMA_CONFIGURATION_PCI_NO_SNOOP_ENABLE_SHIFT                            (17)
+#define SCU_PDMA_CONFIGURATION_PCI_NO_SNOOP_ENABLE_MASK                             (0x00020000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_BYTE_SWAP_SHIFT                   (18)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_BYTE_SWAP_MASK                    (0x00040000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_SGL_FETCH_SHIFT               (19)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_SGL_FETCH_MASK                (0x00080000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_RX_HEADER_RAM_WRITE_SHIFT     (20)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_RX_HEADER_RAM_WRITE_MASK      (0x00100000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_UF_ADDRESS_FETCH_SHIFT        (21)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_UF_ADDRESS_FETCH_MASK         (0x00200000)
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SELECT_SHIFT                        (22)
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SELECT_MASK                         (0x00400000)
+#define SCU_PDMA_CONFIGURATION_RESERVED_MASK                                        (0xFF800000)
+
+#define SCU_PDMACR_GEN_VALUE(name, value) \
+       SCU_GEN_VALUE(SCU_PDMA_CONFIGURATION_ ## name, value)
+
+#define SCU_PDMACR_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_PDMA_CONFIGURATION_ ## name)
+
+#define SCU_PDMACR_BE_GEN_BIT(name) \
+       SCU_PCMACR_GEN_BIT(BIG_ENDIAN_CONTROL_ ## name)
+
+/* ***************************************************************************** */
+#define SCU_CDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_SHIFT                    (8)
+#define SCU_CDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_MASK                     (0x00000100)
+
+#define SCU_CDMACR_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_CDMA_CONFIGURATION_ ## name)
+
+/*
+ * *****************************************************************************
+ * * SCU Link Layer Registers
+ * ***************************************************************************** */
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_TIMEOUT_SHIFT             (0)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_TIMEOUT_MASK              (0x000000FF)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_LOCK_TIME_SHIFT           (8)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_LOCK_TIME_MASK            (0x0000FF00)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_RATE_CHANGE_DELAY_SHIFT   (16)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_RATE_CHANGE_DELAY_MASK    (0x00FF0000)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_DWORD_SYNC_TIMEOUT_SHIFT  (24)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_DWORD_SYNC_TIMEOUT_MASK   (0xFF000000)
+#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_REQUIRED_MASK             (0x00000000)
+#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_DEFAULT_MASK              (0x7D00676F)
+#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_RESERVED_MASK             (0x00FF0000)
+
+#define SCU_SAS_SPDTOV_GEN_VALUE(name, value) \
+       SCU_GEN_VALUE(SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_ ## name, value)
+
+
+#define SCU_LINK_STATUS_DWORD_SYNC_AQUIRED_SHIFT            (2)
+#define SCU_LINK_STATUS_DWORD_SYNC_AQUIRED_MASK             (0x00000004)
+#define SCU_LINK_STATUS_TRANSMIT_PORT_SELECTION_DONE_SHIFT  (4)
+#define SCU_LINK_STATUS_TRANSMIT_PORT_SELECTION_DONE_MASK   (0x00000010)
+#define SCU_LINK_STATUS_RECEIVER_CREDIT_EXHAUSTED_SHIFT     (5)
+#define SCU_LINK_STATUS_RECEIVER_CREDIT_EXHAUSTED_MASK      (0x00000020)
+#define SCU_LINK_STATUS_RESERVED_MASK                       (0xFFFFFFCD)
+
+#define SCU_SAS_LLSTA_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_LINK_STATUS_ ## name)
+
+
+/* TODO: Where is the SATA_PSELTOV register? */
+
+/*
+ * *****************************************************************************
+ * * SCU SAS Maximum Arbitration Wait Time Timeout Register
+ * ***************************************************************************** */
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_VALUE_SHIFT       (0)
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_VALUE_MASK        (0x00007FFF)
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_SCALE_SHIFT       (15)
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_SCALE_MASK        (0x00008000)
+
+#define SCU_SAS_MAWTTOV_GEN_VALUE(name, value) \
+       SCU_GEN_VALUE(SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_ ## name, value)
+
+#define SCU_SAS_MAWTTOV_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_ ## name)
+
+
+/*
+ * TODO: Where is the SAS_LNKTOV regsiter?
+ * TODO: Where is the SAS_PHYTOV register? */
+
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_SHIFT            (1)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_MASK             (0x00000002)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_TARGET_SHIFT            (2)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_TARGET_MASK             (0x00000004)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_TARGET_SHIFT            (3)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_TARGET_MASK             (0x00000008)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DA_SATA_HOST_SHIFT          (8)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DA_SATA_HOST_MASK           (0x00000100)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_INITIATOR_SHIFT         (9)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_INITIATOR_MASK          (0x00000200)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_INITIATOR_SHIFT         (10)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_INITIATOR_MASK          (0x00000400)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_INITIATOR_SHIFT         (11)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_INITIATOR_MASK          (0x00000800)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_REASON_CODE_SHIFT           (16)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_REASON_CODE_MASK            (0x000F0000)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_ADDRESS_FRAME_TYPE_SHIFT    (24)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_ADDRESS_FRAME_TYPE_MASK     (0x0F000000)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DEVICE_TYPE_SHIFT           (28)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DEVICE_TYPE_MASK            (0x70000000)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_RESERVED_MASK               (0x80F0F1F1)
+
+#define SCU_SAS_TIID_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_SAS_TRANSMIT_IDENTIFICATION_ ## name, value)
+
+#define SCU_SAS_TIID_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_SAS_TRANSMIT_IDENTIFICATION_ ## name)
+
+/* SAS Identify Frame PHY Identifier Register */
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_BREAK_REPLY_CAPABLE_SHIFT      (16)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_BREAK_REPLY_CAPABLE_MASK       (0x00010000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_REQUESTED_INSIDE_ZPSDS_SHIFT   (17)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_REQUESTED_INSIDE_ZPSDS_MASK    (0x00020000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_INSIDE_ZPSDS_PERSISTENT_SHIFT  (18)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_INSIDE_ZPSDS_PERSISTENT_MASK   (0x00040000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ID_SHIFT                       (24)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ID_MASK                        (0xFF000000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_RESERVED_MASK                  (0x00F800FF)
+
+#define SCU_SAS_TIPID_GEN_VALUE(name, value) \
+       SCU_GEN_VALUE(SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ ## name, value)
+
+#define SCU_SAS_TIPID_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ ## name)
+
+
+#define SCU_SAS_PHY_CONFIGURATION_TX_PARITY_CHECK_SHIFT                     (4)
+#define SCU_SAS_PHY_CONFIGURATION_TX_PARITY_CHECK_MASK                      (0x00000010)
+#define SCU_SAS_PHY_CONFIGURATION_TX_BAD_CRC_SHIFT                          (6)
+#define SCU_SAS_PHY_CONFIGURATION_TX_BAD_CRC_MASK                           (0x00000040)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_SCRAMBLER_SHIFT                   (7)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_SCRAMBLER_MASK                    (0x00000080)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_DESCRAMBLER_SHIFT                 (8)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_DESCRAMBLER_MASK                  (0x00000100)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_CREDIT_INSERTION_SHIFT            (9)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_CREDIT_INSERTION_MASK             (0x00000200)
+#define SCU_SAS_PHY_CONFIGURATION_SUSPEND_PROTOCOL_ENGINE_SHIFT             (11)
+#define SCU_SAS_PHY_CONFIGURATION_SUSPEND_PROTOCOL_ENGINE_MASK              (0x00000800)
+#define SCU_SAS_PHY_CONFIGURATION_SATA_SPINUP_HOLD_SHIFT                    (12)
+#define SCU_SAS_PHY_CONFIGURATION_SATA_SPINUP_HOLD_MASK                     (0x00001000)
+#define SCU_SAS_PHY_CONFIGURATION_TRANSMIT_PORT_SELECTION_SIGNAL_SHIFT      (13)
+#define SCU_SAS_PHY_CONFIGURATION_TRANSMIT_PORT_SELECTION_SIGNAL_MASK       (0x00002000)
+#define SCU_SAS_PHY_CONFIGURATION_HARD_RESET_SHIFT                          (14)
+#define SCU_SAS_PHY_CONFIGURATION_HARD_RESET_MASK                           (0x00004000)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ENABLE_SHIFT                          (15)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ENABLE_MASK                           (0x00008000)
+#define SCU_SAS_PHY_CONFIGURATION_ENABLE_FRAME_TX_INSERT_ALIGN_SHIFT        (23)
+#define SCU_SAS_PHY_CONFIGURATION_ENABLE_FRAME_TX_INSERT_ALIGN_MASK         (0x00800000)
+#define SCU_SAS_PHY_CONFIGURATION_FORWARD_IDENTIFY_FRAME_SHIFT              (27)
+#define SCU_SAS_PHY_CONFIGURATION_FORWARD_IDENTIFY_FRAME_MASK               (0x08000000)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_BYTE_TRANSPOSE_STP_FRAME_SHIFT    (28)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_BYTE_TRANSPOSE_STP_FRAME_MASK     (0x10000000)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_RESET_SHIFT                           (29)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_RESET_MASK                            (0x20000000)
+#define SCU_SAS_PHY_CONFIGURATION_THREE_IAF_ENABLE_SHIFT                    (30)
+#define SCU_SAS_PHY_CONFIGURATION_THREE_IAF_ENABLE_MASK                     (0x40000000)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ALIGN0_ENABLE_SHIFT                   (31)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ALIGN0_ENABLE_MASK                    (0x80000000)
+#define SCU_SAS_PHY_CONFIGURATION_REQUIRED_MASK                             (0x0100000F)
+#define SCU_SAS_PHY_CONFIGURATION_DEFAULT_MASK                              (0x4180100F)
+#define SCU_SAS_PHY_CONFIGURATION_RESERVED_MASK                             (0x00000000)
+
+#define SCU_SAS_PCFG_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_SAS_PHY_CONFIGURATION_ ## name)
+
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_GENERAL_SHIFT      (0)
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_GENERAL_MASK       (0x000007FF)
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_CONNECTED_SHIFT    (16)
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_CONNECTED_MASK     (0x00ff0000)
+
+#define SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_##name, value)
+
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_COUNT_SHIFT    (0)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_COUNT_MASK     (0x0003FFFF)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ENABLE_SHIFT   (31)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ENABLE_MASK    (0x80000000)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_RESERVED_MASK  (0x7FFC0000)
+
+#define SCU_ENSPINUP_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ ## name, value)
+
+#define SCU_ENSPINUP_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ ## name)
+
+
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_TXSSCTYPE_SHIFT     (1)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_TXSSCTYPE_MASK      (0x00000002)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_RLLRATE_SHIFT       (4)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_RLLRATE_MASK        (0x000000F0)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO15GBPS_SHIFT     (8)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO15GBPS_MASK      (0x00000100)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW15GBPS_SHIFT      (9)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW15GBPS_MASK       (0x00000201)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO30GBPS_SHIFT     (10)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO30GBPS_MASK      (0x00000401)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW30GBPS_SHIFT      (11)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW30GBPS_MASK       (0x00000801)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO60GBPS_SHIFT     (12)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO60GBPS_MASK      (0x00001001)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW60GBPS_SHIFT      (13)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW60GBPS_MASK       (0x00002001)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_EVEN_PARITY_SHIFT   (31)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_EVEN_PARITY_MASK    (0x80000000)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_DEFAULT_MASK        (0x00003F01)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_REQUIRED_MASK       (0x00000001)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_RESERVED_MASK       (0x7FFFC00D)
+
+#define SCU_SAS_PHYCAP_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_LINK_LAYER_PHY_CAPABILITIES_ ## name, value)
+
+#define SCU_SAS_PHYCAP_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_LINK_LAYER_PHY_CAPABILITIES_ ## name)
+
+
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_VIRTUAL_EXPANDER_PHY_ZONE_GROUP_SHIFT  (0)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_VIRTUAL_EXPANDER_PHY_ZONE_GROUP_MASK   (0x000000FF)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_INSIDE_SOURCE_ZONE_GROUP_SHIFT         (31)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_INSIDE_SOURCE_ZONE_GROUP_MASK          (0x80000000)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_RESERVED_MASK                          (0x7FFFFF00)
+
+#define SCU_PSZGCR_GEN_VAL(name, value)        \
+       SCU_GEN_VALUE(SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_ ## name, value)
+
+#define SCU_PSZGCR_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_ ## name)
+
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_LOCKED_SHIFT        (1)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_LOCKED_MASK         (0x00000002)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_UPDATING_SHIFT      (2)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_UPDATING_MASK       (0x00000004)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_LOCKED_SHIFT        (4)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_LOCKED_MASK         (0x00000010)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_UPDATING_SHIFT      (5)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_UPDATING_MASK       (0x00000020)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE0_SHIFT (16)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE0_MASK  (0x00030000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE0_SHIFT      (19)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE0_MASK       (0x00080000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE1_SHIFT (20)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE1_MASK  (0x00300000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE1_SHIFT      (23)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE1_MASK       (0x00800000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE2_SHIFT (24)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE2_MASK  (0x03000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE2_SHIFT      (27)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE2_MASK       (0x08000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE3_SHIFT (28)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE3_MASK  (0x30000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE3_SHIFT      (31)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE3_MASK       (0x80000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_RESERVED_MASK             (0x4444FFC9)
+
+#define SCU_PEG_SCUVZECR_GEN_VAL(name, val) \
+       SCU_GEN_VALUE(SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ ## name, val)
+
+#define SCU_PEG_SCUVZECR_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ ## name)
+
+
+/*
+ * *****************************************************************************
+ * * Port Task Scheduler registers shift and mask values
+ * ***************************************************************************** */
+#define SCU_PTSG_CONTROL_IT_NEXUS_TIMEOUT_SHIFT     (0)
+#define SCU_PTSG_CONTROL_IT_NEXUS_TIMEOUT_MASK      (0x0000FFFF)
+#define SCU_PTSG_CONTROL_TASK_TIMEOUT_SHIFT         (16)
+#define SCU_PTSG_CONTROL_TASK_TIMEOUT_MASK          (0x00FF0000)
+#define SCU_PTSG_CONTROL_PTSG_ENABLE_SHIFT          (24)
+#define SCU_PTSG_CONTROL_PTSG_ENABLE_MASK           (0x01000000)
+#define SCU_PTSG_CONTROL_ETM_ENABLE_SHIFT           (25)
+#define SCU_PTSG_CONTROL_ETM_ENABLE_MASK            (0x02000000)
+#define SCU_PTSG_CONTROL_DEFAULT_MASK               (0x00020002)
+#define SCU_PTSG_CONTROL_REQUIRED_MASK              (0x00000000)
+#define SCU_PTSG_CONTROL_RESERVED_MASK              (0xFC000000)
+
+#define SCU_PTSGCR_GEN_VAL(name, val) \
+       SCU_GEN_VALUE(SCU_PTSG_CONTROL_ ## name, val)
+
+#define SCU_PTSGCR_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_PTSG_CONTROL_ ## name)
+
+
+/* ***************************************************************************** */
+#define SCU_PTSG_REAL_TIME_CLOCK_SHIFT          (0)
+#define SCU_PTSG_REAL_TIME_CLOCK_MASK           (0x0000FFFF)
+#define SCU_PTSG_REAL_TIME_CLOCK_RESERVED_MASK  (0xFFFF0000)
+
+#define SCU_RTCR_GEN_VAL(name, val) \
+       SCU_GEN_VALUE(SCU_PTSG_ ## name, val)
+
+
+#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_PRESCALER_VALUE_SHIFT  (0)
+#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_PRESCALER_VALUE_MASK   (0x00FFFFFF)
+#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_RESERVED_MASK          (0xFF000000)
+
+#define SCU_RTCCR_GEN_VAL(name, val) \
+       SCU_GEN_VALUE(SCU_PTSG_REAL_TIME_CLOCK_CONTROL_ ## name, val)
+
+
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_SUSPEND_SHIFT  (0)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_SUSPEND_MASK   (0x00000001)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ENABLE_SHIFT   (1)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ENABLE_MASK    (0x00000002)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_RESERVED_MASK  (0xFFFFFFFC)
+
+#define SCU_PTSxCR_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ ## name)
+
+
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_NEXT_RN_VALID_SHIFT             (0)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_NEXT_RN_VALID_MASK              (0x00000001)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ACTIVE_RNSC_LIST_VALID_SHIFT    (1)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ACTIVE_RNSC_LIST_VALID_MASK     (0x00000002)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_PTS_SUSPENDED_SHIFT             (2)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_PTS_SUSPENDED_MASK              (0x00000004)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_RESERVED_MASK                   (0xFFFFFFF8)
+
+#define SCU_PTSxSR_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ ## name)
+
+
+/*
+ * *****************************************************************************
+ * * SGPIO Register shift and mask values
+ * ***************************************************************************** */
+#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_SHIFT                    (0)
+#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_MASK                     (0x00000001)
+#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_SHIFT       (1)
+#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_MASK        (0x00000002)
+#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_SHIFT (2)
+#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_MASK  (0x00000004)
+#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_SHIFT                  (15)
+#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_MASK                   (0x00008000)
+#define SCU_SGPIO_CONTROL_SGPIO_RESERVED_MASK                   (0xFFFF7FF8)
+
+#define SCU_SGICRx_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_SGPIO_CONTROL_SGPIO_ ## name)
+
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_SHIFT      (0)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_MASK       (0x0000000F)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_SHIFT      (4)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_MASK       (0x000000F0)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_SHIFT      (8)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_MASK       (0x00000F00)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_SHIFT      (12)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_MASK       (0x0000F000)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_RESERVED_MASK (0xFFFF0000)
+
+#define SCU_SGPBRx_GEN_VAL(name, value)        \
+       SCU_GEN_VALUE(SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_ ## name, value)
+
+#define SCU_SGPIO_START_DRIVE_LOWER_R0_SHIFT        (0)
+#define SCU_SGPIO_START_DRIVE_LOWER_R0_MASK         (0x00000003)
+#define SCU_SGPIO_START_DRIVE_LOWER_R1_SHIFT        (4)
+#define SCU_SGPIO_START_DRIVE_LOWER_R1_MASK         (0x00000030)
+#define SCU_SGPIO_START_DRIVE_LOWER_R2_SHIFT        (8)
+#define SCU_SGPIO_START_DRIVE_LOWER_R2_MASK         (0x00000300)
+#define SCU_SGPIO_START_DRIVE_LOWER_R3_SHIFT        (12)
+#define SCU_SGPIO_START_DRIVE_LOWER_R3_MASK         (0x00003000)
+#define SCU_SGPIO_START_DRIVE_LOWER_RESERVED_MASK   (0xFFFF8888)
+
+#define SCU_SGSDLRx_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value)
+
+#define SCU_SGPIO_START_DRIVE_UPPER_R0_SHIFT        (0)
+#define SCU_SGPIO_START_DRIVE_UPPER_R0_MASK         (0x00000003)
+#define SCU_SGPIO_START_DRIVE_UPPER_R1_SHIFT        (4)
+#define SCU_SGPIO_START_DRIVE_UPPER_R1_MASK         (0x00000030)
+#define SCU_SGPIO_START_DRIVE_UPPER_R2_SHIFT        (8)
+#define SCU_SGPIO_START_DRIVE_UPPER_R2_MASK         (0x00000300)
+#define SCU_SGPIO_START_DRIVE_UPPER_R3_SHIFT        (12)
+#define SCU_SGPIO_START_DRIVE_UPPER_R3_MASK         (0x00003000)
+#define SCU_SGPIO_START_DRIVE_UPPER_RESERVED_MASK   (0xFFFF8888)
+
+#define SCU_SGSDURx_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value)
+
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_SHIFT      (0)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_MASK       (0x00000003)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_SHIFT      (4)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_MASK       (0x00000030)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_SHIFT      (8)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_MASK       (0x00000300)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_SHIFT      (12)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_MASK       (0x00003000)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_RESERVED_MASK (0xFFFF8888)
+
+#define SCU_SGSIDLRx_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value)
+
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_SHIFT      (0)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_MASK       (0x00000003)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_SHIFT      (4)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_MASK       (0x00000030)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_SHIFT      (8)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_MASK       (0x00000300)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_SHIFT      (12)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_MASK       (0x00003000)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_RESERVED_MASK (0xFFFF8888)
+
+#define SCU_SGSIDURx_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value)
+
+#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_SHIFT            (0)
+#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_MASK             (0x0000000F)
+#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_RESERVED_MASK    (0xFFFFFFF0)
+
+#define SCU_SGVSCR_GEN_VAL(value) \
+       SCU_GEN_VALUE(SCU_SGPIO_VENDOR_SPECIFIC_CODE ## name, value)
+
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_SHIFT           (0)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_MASK            (0x00000003)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_SHIFT    (2)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_MASK     (0x00000004)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_SHIFT      (3)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_MASK       (0x00000008)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_SHIFT           (4)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_MASK            (0x00000030)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_SHIFT    (6)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_MASK     (0x00000040)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_SHIFT      (7)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_MASK       (0x00000080)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_SHIFT           (8)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_MASK            (0x00000300)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_SHIFT    (10)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_MASK     (0x00000400)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_SHIFT      (11)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_MASK       (0x00000800)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_RESERVED_MASK               (0xFFFFF000)
+
+#define SCU_SGODSR_GEN_VAL(name, value)        \
+       SCU_GEN_VALUE(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name, value)
+
+#define SCU_SGODSR_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name)
+
+/*
+ * *****************************************************************************
+ * * SMU Registers
+ * ***************************************************************************** */
+
+/*
+ * ----------------------------------------------------------------------------
+ * SMU Registers
+ * These registers are based off of BAR0
+ *
+ * To calculate the offset for other functions use
+ *       BAR0 + FN# * SystemPageSize * 2
+ *
+ * The TCA is only accessable from FN#0 (Physical Function) and each
+ * is programmed by (BAR0 + SCU_SMU_TCA_OFFSET + (FN# * 0x04)) or
+ *    TCA0 for FN#0 is at BAR0 + 0x0400
+ *    TCA1 for FN#1 is at BAR0 + 0x0404
+ *    etc.
+ * ----------------------------------------------------------------------------
+ * Accessable to all FN#s */
+#define SCU_SMU_PCP_OFFSET          0x0000
+#define SCU_SMU_AMR_OFFSET          0x0004
+#define SCU_SMU_ISR_OFFSET          0x0010
+#define SCU_SMU_IMR_OFFSET          0x0014
+#define SCU_SMU_ICC_OFFSET          0x0018
+#define SCU_SMU_HTTLBAR_OFFSET      0x0020
+#define SCU_SMU_HTTUBAR_OFFSET      0x0024
+#define SCU_SMU_TCR_OFFSET          0x0028
+#define SCU_SMU_CQLBAR_OFFSET       0x0030
+#define SCU_SMU_CQUBAR_OFFSET       0x0034
+#define SCU_SMU_CQPR_OFFSET         0x0040
+#define SCU_SMU_CQGR_OFFSET         0x0044
+#define SCU_SMU_CQC_OFFSET          0x0048
+/* Accessable to FN#0 only */
+#define SCU_SMU_RNCLBAR_OFFSET      0x0080
+#define SCU_SMU_RNCUBAR_OFFSET      0x0084
+#define SCU_SMU_DCC_OFFSET          0x0090
+#define SCU_SMU_DFC_OFFSET          0x0094
+#define SCU_SMU_SMUCSR_OFFSET       0x0098
+#define SCU_SMU_SCUSRCR_OFFSET      0x009C
+#define SCU_SMU_SMAW_OFFSET         0x00A0
+#define SCU_SMU_SMDW_OFFSET         0x00A4
+/* Accessable to FN#0 only */
+#define SCU_SMU_TCA_OFFSET          0x0400
+/* Accessable to all FN#s */
+#define SCU_SMU_MT_MLAR0_OFFSET     0x2000
+#define SCU_SMU_MT_MUAR0_OFFSET     0x2004
+#define SCU_SMU_MT_MDR0_OFFSET      0x2008
+#define SCU_SMU_MT_VCR0_OFFSET      0x200C
+#define SCU_SMU_MT_MLAR1_OFFSET     0x2010
+#define SCU_SMU_MT_MUAR1_OFFSET     0x2014
+#define SCU_SMU_MT_MDR1_OFFSET      0x2018
+#define SCU_SMU_MT_VCR1_OFFSET      0x201C
+#define SCU_SMU_MPBA_OFFSET         0x3000
+
+/**
+ * struct smu_registers - These are the SMU registers
+ *
+ *
+ */
+struct smu_registers {
+/* 0x0000 PCP */
+       u32 post_context_port;
+/* 0x0004 AMR */
+       u32 address_modifier;
+       u32 reserved_08;
+       u32 reserved_0C;
+/* 0x0010 ISR */
+       u32 interrupt_status;
+/* 0x0014 IMR */
+       u32 interrupt_mask;
+/* 0x0018 ICC */
+       u32 interrupt_coalesce_control;
+       u32 reserved_1C;
+/* 0x0020 HTTLBAR */
+       u32 host_task_table_lower;
+/* 0x0024 HTTUBAR */
+       u32 host_task_table_upper;
+/* 0x0028 TCR */
+       u32 task_context_range;
+       u32 reserved_2C;
+/* 0x0030 CQLBAR */
+       u32 completion_queue_lower;
+/* 0x0034 CQUBAR */
+       u32 completion_queue_upper;
+       u32 reserved_38;
+       u32 reserved_3C;
+/* 0x0040 CQPR */
+       u32 completion_queue_put;
+/* 0x0044 CQGR */
+       u32 completion_queue_get;
+/* 0x0048 CQC */
+       u32 completion_queue_control;
+       u32 reserved_4C;
+       u32 reserved_5x[4];
+       u32 reserved_6x[4];
+       u32 reserved_7x[4];
+/*
+ * Accessable to FN#0 only
+ * 0x0080 RNCLBAR */
+       u32 remote_node_context_lower;
+/* 0x0084 RNCUBAR */
+       u32 remote_node_context_upper;
+       u32 reserved_88;
+       u32 reserved_8C;
+/* 0x0090 DCC */
+       u32 device_context_capacity;
+/* 0x0094 DFC */
+       u32 device_function_capacity;
+/* 0x0098 SMUCSR */
+       u32 control_status;
+/* 0x009C SCUSRCR */
+       u32 soft_reset_control;
+/* 0x00A0 SMAW */
+       u32 mmr_address_window;
+/* 0x00A4 SMDW */
+       u32 mmr_data_window;
+       u32 reserved_A8;
+       u32 reserved_AC;
+/* A whole bunch of reserved space */
+       u32 reserved_Bx[4];
+       u32 reserved_Cx[4];
+       u32 reserved_Dx[4];
+       u32 reserved_Ex[4];
+       u32 reserved_Fx[4];
+       u32 reserved_1xx[64];
+       u32 reserved_2xx[64];
+       u32 reserved_3xx[64];
+/*
+ * Accessable to FN#0 only
+ * 0x0400 TCA */
+       u32 task_context_assignment[256];
+/* MSI-X registers not included */
+};
+
+/*
+ * *****************************************************************************
+ * SDMA Registers
+ * ***************************************************************************** */
+#define SCU_SDMA_BASE               0x6000
+#define SCU_SDMA_PUFATLHAR_OFFSET   0x0000
+#define SCU_SDMA_PUFATUHAR_OFFSET   0x0004
+#define SCU_SDMA_UFLHBAR_OFFSET     0x0008
+#define SCU_SDMA_UFUHBAR_OFFSET     0x000C
+#define SCU_SDMA_UFQC_OFFSET        0x0010
+#define SCU_SDMA_UFQPP_OFFSET       0x0014
+#define SCU_SDMA_UFQGP_OFFSET       0x0018
+#define SCU_SDMA_PDMACR_OFFSET      0x001C
+#define SCU_SDMA_CDMACR_OFFSET      0x0080
+
+/**
+ * struct scu_sdma_registers - These are the SCU SDMA Registers
+ *
+ *
+ */
+struct scu_sdma_registers {
+/* 0x0000 PUFATLHAR */
+       u32 uf_address_table_lower;
+/* 0x0004 PUFATUHAR */
+       u32 uf_address_table_upper;
+/* 0x0008 UFLHBAR */
+       u32 uf_header_base_address_lower;
+/* 0x000C UFUHBAR */
+       u32 uf_header_base_address_upper;
+/* 0x0010 UFQC */
+       u32 unsolicited_frame_queue_control;
+/* 0x0014 UFQPP */
+       u32 unsolicited_frame_put_pointer;
+/* 0x0018 UFQGP */
+       u32 unsolicited_frame_get_pointer;
+/* 0x001C PDMACR */
+       u32 pdma_configuration;
+/* Reserved until offset 0x80 */
+       u32 reserved_0020_007C[0x18];
+/* 0x0080 CDMACR */
+       u32 cdma_configuration;
+/* Remainder SDMA register space */
+       u32 reserved_0084_0400[0xDF];
+
+};
+
+/*
+ * *****************************************************************************
+ * * SCU Link Registers
+ * ***************************************************************************** */
+#define SCU_PEG0_OFFSET    0x0000
+#define SCU_PEG1_OFFSET    0x8000
+
+#define SCU_TL0_OFFSET     0x0000
+#define SCU_TL1_OFFSET     0x0400
+#define SCU_TL2_OFFSET     0x0800
+#define SCU_TL3_OFFSET     0x0C00
+
+#define SCU_LL_OFFSET      0x0080
+#define SCU_LL0_OFFSET     (SCU_TL0_OFFSET + SCU_LL_OFFSET)
+#define SCU_LL1_OFFSET     (SCU_TL1_OFFSET + SCU_LL_OFFSET)
+#define SCU_LL2_OFFSET     (SCU_TL2_OFFSET + SCU_LL_OFFSET)
+#define SCU_LL3_OFFSET     (SCU_TL3_OFFSET + SCU_LL_OFFSET)
+
+/* Transport Layer Offsets (PEG + TL) */
+#define SCU_TLCR_OFFSET         0x0000
+#define SCU_TLADTR_OFFSET       0x0004
+#define SCU_TLTTMR_OFFSET       0x0008
+#define SCU_TLEECR0_OFFSET      0x000C
+#define SCU_STPTLDARNI_OFFSET   0x0010
+
+
+#define SCU_TLCR_HASH_SAS_CHECKING_ENABLE_SHIFT    (0)
+#define SCU_TLCR_HASH_SAS_CHECKING_ENABLE_MASK     (0x00000001)
+#define SCU_TLCR_CLEAR_TCI_NCQ_MAPPING_TABLE_SHIFT (1)
+#define SCU_TLCR_CLEAR_TCI_NCQ_MAPPING_TABLE_MASK  (0x00000002)
+#define SCU_TLCR_STP_WRITE_DATA_PREFETCH_SHIFT     (3)
+#define SCU_TLCR_STP_WRITE_DATA_PREFETCH_MASK      (0x00000008)
+#define SCU_TLCR_CMD_NAK_STATUS_CODE_SHIFT         (4)
+#define SCU_TLCR_CMD_NAK_STATUS_CODE_MASK          (0x00000010)
+#define SCU_TLCR_RESERVED_MASK                     (0xFFFFFFEB)
+
+#define SCU_TLCR_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_TLCR_ ## name)
+
+/**
+ * struct scu_transport_layer_registers - These are the SCU Transport Layer
+ *    registers
+ *
+ *
+ */
+struct scu_transport_layer_registers {
+       /* 0x0000 TLCR */
+       u32 control;
+       /* 0x0004 TLADTR */
+       u32 arbitration_delay_timer;
+       /* 0x0008 TLTTMR */
+       u32 timer_test_mode;
+       /* 0x000C reserved */
+       u32 reserved_0C;
+       /* 0x0010 STPTLDARNI */
+       u32 stp_rni;
+       /* 0x0014 TLFEWPORCTRL */
+       u32 tlfe_wpo_read_control;
+       /* 0x0018 TLFEWPORDATA */
+       u32 tlfe_wpo_read_data;
+       /* 0x001C RXTLSSCSR1 */
+       u32 rxtl_single_step_control_status_1;
+       /* 0x0020 RXTLSSCSR2 */
+       u32 rxtl_single_step_control_status_2;
+       /* 0x0024 AWTRDDCR */
+       u32 tlfe_awt_retry_delay_debug_control;
+       /* Remainder of TL memory space */
+       u32 reserved_0028_007F[0x16];
+
+};
+
+/* Protocol Engine Group Registers */
+#define SCU_SCUVZECRx_OFFSET        0x1080
+
+/* Link Layer Offsets (PEG + TL + LL) */
+#define SCU_SAS_SPDTOV_OFFSET       0x0000
+#define SCU_SAS_LLSTA_OFFSET        0x0004
+#define SCU_SATA_PSELTOV_OFFSET     0x0008
+#define SCU_SAS_TIMETOV_OFFSET      0x0010
+#define SCU_SAS_LOSTOT_OFFSET       0x0014
+#define SCU_SAS_LNKTOV_OFFSET       0x0018
+#define SCU_SAS_PHYTOV_OFFSET       0x001C
+#define SCU_SAS_AFERCNT_OFFSET      0x0020
+#define SCU_SAS_WERCNT_OFFSET       0x0024
+#define SCU_SAS_TIID_OFFSET         0x0028
+#define SCU_SAS_TIDNH_OFFSET        0x002C
+#define SCU_SAS_TIDNL_OFFSET        0x0030
+#define SCU_SAS_TISSAH_OFFSET       0x0034
+#define SCU_SAS_TISSAL_OFFSET       0x0038
+#define SCU_SAS_TIPID_OFFSET        0x003C
+#define SCU_SAS_TIRES2_OFFSET       0x0040
+#define SCU_SAS_ADRSTA_OFFSET       0x0044
+#define SCU_SAS_MAWTTOV_OFFSET      0x0048
+#define SCU_SAS_FRPLDFIL_OFFSET     0x0054
+#define SCU_SAS_RFCNT_OFFSET        0x0060
+#define SCU_SAS_TFCNT_OFFSET        0x0064
+#define SCU_SAS_RFDCNT_OFFSET       0x0068
+#define SCU_SAS_TFDCNT_OFFSET       0x006C
+#define SCU_SAS_LERCNT_OFFSET       0x0070
+#define SCU_SAS_RDISERRCNT_OFFSET   0x0074
+#define SCU_SAS_CRERCNT_OFFSET      0x0078
+#define SCU_STPCTL_OFFSET           0x007C
+#define SCU_SAS_PCFG_OFFSET         0x0080
+#define SCU_SAS_CLKSM_OFFSET        0x0084
+#define SCU_SAS_TXCOMWAKE_OFFSET    0x0088
+#define SCU_SAS_TXCOMINIT_OFFSET    0x008C
+#define SCU_SAS_TXCOMSAS_OFFSET     0x0090
+#define SCU_SAS_COMINIT_OFFSET      0x0094
+#define SCU_SAS_COMWAKE_OFFSET      0x0098
+#define SCU_SAS_COMSAS_OFFSET       0x009C
+#define SCU_SAS_SFERCNT_OFFSET      0x00A0
+#define SCU_SAS_CDFERCNT_OFFSET     0x00A4
+#define SCU_SAS_DNFERCNT_OFFSET     0x00A8
+#define SCU_SAS_PRSTERCNT_OFFSET    0x00AC
+#define SCU_SAS_CNTCTL_OFFSET       0x00B0
+#define SCU_SAS_SSPTOV_OFFSET       0x00B4
+#define SCU_FTCTL_OFFSET            0x00B8
+#define SCU_FRCTL_OFFSET            0x00BC
+#define SCU_FTWMRK_OFFSET           0x00C0
+#define SCU_ENSPINUP_OFFSET         0x00C4
+#define SCU_SAS_TRNTOV_OFFSET       0x00C8
+#define SCU_SAS_PHYCAP_OFFSET       0x00CC
+#define SCU_SAS_PHYCTL_OFFSET       0x00D0
+#define SCU_SAS_LLCTL_OFFSET        0x00D8
+#define SCU_AFE_XCVRCR_OFFSET       0x00DC
+#define SCU_AFE_LUTCR_OFFSET        0x00E0
+
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT                  (0)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK                   (0x00000003)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1                   (0)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2                   (1)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3                   (2)
+#define SCU_SAS_LINK_LAYER_CONTROL_BROADCAST_PRIMITIVE_SHIFT            (2)
+#define SCU_SAS_LINK_LAYER_CONTROL_BROADCAST_PRIMITIVE_MASK             (0x000003FC)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_ACTIVE_TASK_DISABLE_SHIFT   (16)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_ACTIVE_TASK_DISABLE_MASK    (0x00010000)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_OUTBOUND_TASK_DISABLE_SHIFT (17)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_OUTBOUND_TASK_DISABLE_MASK  (0x00020000)
+#define SCU_SAS_LINK_LAYER_CONTROL_NO_OUTBOUND_TASK_TIMEOUT_SHIFT       (24)
+#define SCU_SAS_LINK_LAYER_CONTROL_NO_OUTBOUND_TASK_TIMEOUT_MASK        (0xFF000000)
+#define SCU_SAS_LINK_LAYER_CONTROL_RESERVED                             (0x00FCFC00)
+
+#define SCU_SAS_LLCTL_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_SAS_LINK_LAYER_CONTROL_ ## name, value)
+
+#define SCU_SAS_LLCTL_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_SAS_LINK_LAYER_CONTROL_ ## name)
+
+
+/* #define SCU_FRXHECR_DCNT_OFFSET      0x00B0 */
+#define SCU_PSZGCR_OFFSET           0x00E4
+#define SCU_SAS_RECPHYCAP_OFFSET    0x00E8
+/* #define SCU_TX_LUTSEL_OFFSET         0x00B8 */
+
+#define SCU_SAS_PTxC_OFFSET         0x00D4 /* Same offset as SAS_TCTSTM */
+
+/**
+ * struct scu_link_layer_registers - SCU Link Layer Registers
+ *
+ *
+ */
+struct scu_link_layer_registers {
+/* 0x0000 SAS_SPDTOV */
+       u32 speed_negotiation_timers;
+/* 0x0004 SAS_LLSTA */
+       u32 link_layer_status;
+/* 0x0008 SATA_PSELTOV */
+       u32 port_selector_timeout;
+       u32 reserved0C;
+/* 0x0010 SAS_TIMETOV */
+       u32 timeout_unit_value;
+/* 0x0014 SAS_RCDTOV */
+       u32 rcd_timeout;
+/* 0x0018 SAS_LNKTOV */
+       u32 link_timer_timeouts;
+/* 0x001C SAS_PHYTOV */
+       u32 sas_phy_timeouts;
+/* 0x0020 SAS_AFERCNT */
+       u32 received_address_frame_error_counter;
+/* 0x0024 SAS_WERCNT */
+       u32 invalid_dword_counter;
+/* 0x0028 SAS_TIID */
+       u32 transmit_identification;
+/* 0x002C SAS_TIDNH */
+       u32 sas_device_name_high;
+/* 0x0030 SAS_TIDNL */
+       u32 sas_device_name_low;
+/* 0x0034 SAS_TISSAH */
+       u32 source_sas_address_high;
+/* 0x0038 SAS_TISSAL */
+       u32 source_sas_address_low;
+/* 0x003C SAS_TIPID */
+       u32 identify_frame_phy_id;
+/* 0x0040 SAS_TIRES2 */
+       u32 identify_frame_reserved;
+/* 0x0044 SAS_ADRSTA */
+       u32 received_address_frame;
+/* 0x0048 SAS_MAWTTOV */
+       u32 maximum_arbitration_wait_timer_timeout;
+/* 0x004C SAS_PTxC */
+       u32 transmit_primitive;
+/* 0x0050 SAS_RORES */
+       u32 error_counter_event_notification_control;
+/* 0x0054 SAS_FRPLDFIL */
+       u32 frxq_payload_fill_threshold;
+/* 0x0058 SAS_LLHANG_TOT */
+       u32 link_layer_hang_detection_timeout;
+       u32 reserved_5C;
+/* 0x0060 SAS_RFCNT */
+       u32 received_frame_count;
+/* 0x0064 SAS_TFCNT */
+       u32 transmit_frame_count;
+/* 0x0068 SAS_RFDCNT */
+       u32 received_dword_count;
+/* 0x006C SAS_TFDCNT */
+       u32 transmit_dword_count;
+/* 0x0070 SAS_LERCNT */
+       u32 loss_of_sync_error_count;
+/* 0x0074 SAS_RDISERRCNT */
+       u32 running_disparity_error_count;
+/* 0x0078 SAS_CRERCNT */
+       u32 received_frame_crc_error_count;
+/* 0x007C STPCTL */
+       u32 stp_control;
+/* 0x0080 SAS_PCFG */
+       u32 phy_configuration;
+/* 0x0084 SAS_CLKSM */
+       u32 clock_skew_management;
+/* 0x0088 SAS_TXCOMWAKE */
+       u32 transmit_comwake_signal;
+/* 0x008C SAS_TXCOMINIT */
+       u32 transmit_cominit_signal;
+/* 0x0090 SAS_TXCOMSAS */
+       u32 transmit_comsas_signal;
+/* 0x0094 SAS_COMINIT */
+       u32 cominit_control;
+/* 0x0098 SAS_COMWAKE */
+       u32 comwake_control;
+/* 0x009C SAS_COMSAS */
+       u32 comsas_control;
+/* 0x00A0 SAS_SFERCNT */
+       u32 received_short_frame_count;
+/* 0x00A4 SAS_CDFERCNT */
+       u32 received_frame_without_credit_count;
+/* 0x00A8 SAS_DNFERCNT */
+       u32 received_frame_after_done_count;
+/* 0x00AC SAS_PRSTERCNT */
+       u32 phy_reset_problem_count;
+/* 0x00B0 SAS_CNTCTL */
+       u32 counter_control;
+/* 0x00B4 SAS_SSPTOV */
+       u32 ssp_timer_timeout_values;
+/* 0x00B8 FTCTL */
+       u32 ftx_control;
+/* 0x00BC FRCTL */
+       u32 frx_control;
+/* 0x00C0 FTWMRK */
+       u32 ftx_watermark;
+/* 0x00C4 ENSPINUP */
+       u32 notify_enable_spinup_control;
+/* 0x00C8 SAS_TRNTOV */
+       u32 sas_training_sequence_timer_values;
+/* 0x00CC SAS_PHYCAP */
+       u32 phy_capabilities;
+/* 0x00D0 SAS_PHYCTL */
+       u32 phy_control;
+       u32 reserved_d4;
+/* 0x00D8 LLCTL */
+       u32 link_layer_control;
+/* 0x00DC AFE_XCVRCR */
+       u32 afe_xcvr_control;
+/* 0x00E0 AFE_LUTCR */
+       u32 afe_lookup_table_control;
+/* 0x00E4 PSZGCR */
+       u32 phy_source_zone_group_control;
+/* 0x00E8 SAS_RECPHYCAP */
+       u32 receive_phycap;
+       u32 reserved_ec;
+/* 0x00F0 SNAFERXRSTCTL */
+       u32 speed_negotiation_afe_rx_reset_control;
+/* 0x00F4 SAS_SSIPMCTL */
+       u32 power_management_control;
+/* 0x00F8 SAS_PSPREQ_PRIM */
+       u32 sas_pm_partial_request_primitive;
+/* 0x00FC SAS_PSSREQ_PRIM */
+       u32 sas_pm_slumber_request_primitive;
+/* 0x0100 SAS_PPSACK_PRIM */
+       u32 sas_pm_ack_primitive_register;
+/* 0x0104 SAS_PSNAK_PRIM */
+       u32 sas_pm_nak_primitive_register;
+/* 0x0108 SAS_SSIPMTOV */
+       u32 sas_primitive_timeout;
+       u32 reserved_10c;
+/* 0x0110 - 0x011C PLAPRDCTRLxREG */
+       u32 pla_product_control[4];
+/* 0x0120 PLAPRDSUMREG */
+       u32 pla_product_sum;
+/* 0x0124 PLACONTROLREG */
+       u32 pla_control;
+/* Remainder of memory space 896 bytes */
+       u32 reserved_0128_037f[0x96];
+
+};
+
+/*
+ * 0x00D4 // Same offset as SAS_TCTSTM SAS_PTxC
+ *   u32   primitive_transmit_control; */
+
+/*
+ * ----------------------------------------------------------------------------
+ * SGPIO
+ * ---------------------------------------------------------------------------- */
+#define SCU_SGPIO_OFFSET         0x1400
+
+/* #define SCU_SGPIO_OFFSET         0x6000   // later moves to 0x1400 see HSD 652625 */
+#define SCU_SGPIO_SGICR_OFFSET   0x0000
+#define SCU_SGPIO_SGPBR_OFFSET   0x0004
+#define SCU_SGPIO_SGSDLR_OFFSET  0x0008
+#define SCU_SGPIO_SGSDUR_OFFSET  0x000C
+#define SCU_SGPIO_SGSIDLR_OFFSET 0x0010
+#define SCU_SGPIO_SGSIDUR_OFFSET 0x0014
+#define SCU_SGPIO_SGVSCR_OFFSET  0x0018
+/* Address from 0x0820 to 0x083C */
+#define SCU_SGPIO_SGODSR_OFFSET  0x0020
+
+/**
+ * struct scu_sgpio_registers - SCU SGPIO Registers
+ *
+ *
+ */
+struct scu_sgpio_registers {
+/* 0x0000 SGPIO_SGICR */
+       u32 interface_control;
+/* 0x0004 SGPIO_SGPBR */
+       u32 blink_rate;
+/* 0x0008 SGPIO_SGSDLR */
+       u32 start_drive_lower;
+/* 0x000C SGPIO_SGSDUR */
+       u32 start_drive_upper;
+/* 0x0010 SGPIO_SGSIDLR */
+       u32 serial_input_lower;
+/* 0x0014 SGPIO_SGSIDUR */
+       u32 serial_input_upper;
+/* 0x0018 SGPIO_SGVSCR */
+       u32 vendor_specific_code;
+/* 0x0020 SGPIO_SGODSR */
+       u32 ouput_data_select[8];
+/* Remainder of memory space 256 bytes */
+       u32 reserved_1444_14ff[0x31];
+
+};
+
+/*
+ * *****************************************************************************
+ * * Defines for VIIT entry offsets
+ * * Access additional entries by SCU_VIIT_BASE + index * 0x10
+ * ***************************************************************************** */
+#define     SCU_VIIT_BASE     0x1c00
+
+struct scu_viit_registers {
+       u32 registers[256];
+};
+
+/*
+ * *****************************************************************************
+ * * SCU PORT TASK SCHEDULER REGISTERS
+ * ***************************************************************************** */
+
+#define SCU_PTSG_BASE               0x1000
+
+#define SCU_PTSG_PTSGCR_OFFSET      0x0000
+#define SCU_PTSG_RTCR_OFFSET        0x0004
+#define SCU_PTSG_RTCCR_OFFSET       0x0008
+#define SCU_PTSG_PTS0CR_OFFSET      0x0010
+#define SCU_PTSG_PTS0SR_OFFSET      0x0014
+#define SCU_PTSG_PTS1CR_OFFSET      0x0018
+#define SCU_PTSG_PTS1SR_OFFSET      0x001C
+#define SCU_PTSG_PTS2CR_OFFSET      0x0020
+#define SCU_PTSG_PTS2SR_OFFSET      0x0024
+#define SCU_PTSG_PTS3CR_OFFSET      0x0028
+#define SCU_PTSG_PTS3SR_OFFSET      0x002C
+#define SCU_PTSG_PCSPE0CR_OFFSET    0x0030
+#define SCU_PTSG_PCSPE1CR_OFFSET    0x0034
+#define SCU_PTSG_PCSPE2CR_OFFSET    0x0038
+#define SCU_PTSG_PCSPE3CR_OFFSET    0x003C
+#define SCU_PTSG_ETMTSCCR_OFFSET    0x0040
+#define SCU_PTSG_ETMRNSCCR_OFFSET   0x0044
+
+/**
+ * struct scu_port_task_scheduler_registers - These are the control/stats pairs
+ *    for each Port Task Scheduler.
+ *
+ *
+ */
+struct scu_port_task_scheduler_registers {
+       u32 control;
+       u32 status;
+};
+
+/**
+ * struct scu_port_task_scheduler_group_registers - These are the PORT Task
+ *    Scheduler registers
+ *
+ *
+ */
+struct scu_port_task_scheduler_group_registers {
+/* 0x0000 PTSGCR */
+       u32 control;
+/* 0x0004 RTCR */
+       u32 real_time_clock;
+/* 0x0008 RTCCR */
+       u32 real_time_clock_control;
+/* 0x000C */
+       u32 reserved_0C;
+/*
+ * 0x0010 PTS0CR
+ * 0x0014 PTS0SR
+ * 0x0018 PTS1CR
+ * 0x001C PTS1SR
+ * 0x0020 PTS2CR
+ * 0x0024 PTS2SR
+ * 0x0028 PTS3CR
+ * 0x002C PTS3SR */
+       struct scu_port_task_scheduler_registers port[4];
+/*
+ * 0x0030 PCSPE0CR
+ * 0x0034 PCSPE1CR
+ * 0x0038 PCSPE2CR
+ * 0x003C PCSPE3CR */
+       u32 protocol_engine[4];
+/* 0x0040 ETMTSCCR */
+       u32 tc_scanning_interval_control;
+/* 0x0044 ETMRNSCCR */
+       u32 rnc_scanning_interval_control;
+/* Remainder of memory space 128 bytes */
+       u32 reserved_1048_107f[0x0E];
+
+};
+
+#define SCU_PTSG_SCUVZECR_OFFSET        0x003C
+
+/*
+ * *****************************************************************************
+ * * AFE REGISTERS
+ * ***************************************************************************** */
+#define SCU_AFE_MMR_BASE                  0xE000
+
+/*
+ * AFE 0 is at offset 0x0800
+ * AFE 1 is at offset 0x0900
+ * AFE 2 is at offset 0x0a00
+ * AFE 3 is at offset 0x0b00 */
+struct scu_afe_transceiver {
+       /* 0x0000 AFE_XCVR_CTRL0 */
+       u32 afe_xcvr_control0;
+       /* 0x0004 AFE_XCVR_CTRL1 */
+       u32 afe_xcvr_control1;
+       /* 0x0008 */
+       u32 reserved_0008;
+       /* 0x000c afe_dfx_rx_control0 */
+       u32 afe_dfx_rx_control0;
+       /* 0x0010 AFE_DFX_RX_CTRL1 */
+       u32 afe_dfx_rx_control1;
+       /* 0x0014 */
+       u32 reserved_0014;
+       /* 0x0018 AFE_DFX_RX_STS0 */
+       u32 afe_dfx_rx_status0;
+       /* 0x001c AFE_DFX_RX_STS1 */
+       u32 afe_dfx_rx_status1;
+       /* 0x0020 */
+       u32 reserved_0020;
+       /* 0x0024 AFE_TX_CTRL */
+       u32 afe_tx_control;
+       /* 0x0028 AFE_TX_AMP_CTRL0 */
+       u32 afe_tx_amp_control0;
+       /* 0x002c AFE_TX_AMP_CTRL1 */
+       u32 afe_tx_amp_control1;
+       /* 0x0030 AFE_TX_AMP_CTRL2 */
+       u32 afe_tx_amp_control2;
+       /* 0x0034 AFE_TX_AMP_CTRL3 */
+       u32 afe_tx_amp_control3;
+       /* 0x0038 afe_tx_ssc_control */
+       u32 afe_tx_ssc_control;
+       /* 0x003c */
+       u32 reserved_003c;
+       /* 0x0040 AFE_RX_SSC_CTRL0 */
+       u32 afe_rx_ssc_control0;
+       /* 0x0044 AFE_RX_SSC_CTRL1 */
+       u32 afe_rx_ssc_control1;
+       /* 0x0048 AFE_RX_SSC_CTRL2 */
+       u32 afe_rx_ssc_control2;
+       /* 0x004c AFE_RX_EQ_STS0 */
+       u32 afe_rx_eq_status0;
+       /* 0x0050 AFE_RX_EQ_STS1 */
+       u32 afe_rx_eq_status1;
+       /* 0x0054 AFE_RX_CDR_STS */
+       u32 afe_rx_cdr_status;
+       /* 0x0058 */
+       u32 reserved_0058;
+       /* 0x005c AFE_CHAN_CTRL */
+       u32 afe_channel_control;
+       /* 0x0060-0x006c */
+       u32 reserved_0060_006c[0x04];
+       /* 0x0070 AFE_XCVR_EC_STS0 */
+       u32 afe_xcvr_error_capture_status0;
+       /* 0x0074 AFE_XCVR_EC_STS1 */
+       u32 afe_xcvr_error_capture_status1;
+       /* 0x0078 AFE_XCVR_EC_STS2 */
+       u32 afe_xcvr_error_capture_status2;
+       /* 0x007c afe_xcvr_ec_status3 */
+       u32 afe_xcvr_error_capture_status3;
+       /* 0x0080 AFE_XCVR_EC_STS4 */
+       u32 afe_xcvr_error_capture_status4;
+       /* 0x0084 AFE_XCVR_EC_STS5 */
+       u32 afe_xcvr_error_capture_status5;
+       /* 0x0088-0x00fc */
+       u32 reserved_008c_00fc[0x1e];
+};
+
+/**
+ * struct scu_afe_registers - AFE Regsiters
+ *
+ *
+ */
+/* Uaoa AFE registers */
+struct scu_afe_registers {
+       /* 0Xe000 AFE_BIAS_CTRL */
+       u32 afe_bias_control;
+       u32 reserved_0004;
+       /* 0x0008 AFE_PLL_CTRL0 */
+       u32 afe_pll_control0;
+       /* 0x000c AFE_PLL_CTRL1 */
+       u32 afe_pll_control1;
+       /* 0x0010 AFE_PLL_CTRL2 */
+       u32 afe_pll_control2;
+       /* 0x0014 AFE_CB_STS */
+       u32 afe_common_block_status;
+       /* 0x0018-0x007c */
+       u32 reserved_18_7c[0x1a];
+       /* 0x0080 AFE_PMSN_MCTRL0 */
+       u32 afe_pmsn_master_control0;
+       /* 0x0084 AFE_PMSN_MCTRL1 */
+       u32 afe_pmsn_master_control1;
+       /* 0x0088 AFE_PMSN_MCTRL2 */
+       u32 afe_pmsn_master_control2;
+       /* 0x008C-0x00fc */
+       u32 reserved_008c_00fc[0x1D];
+       /* 0x0100 AFE_DFX_MST_CTRL0 */
+       u32 afe_dfx_master_control0;
+       /* 0x0104 AFE_DFX_MST_CTRL1 */
+       u32 afe_dfx_master_control1;
+       /* 0x0108 AFE_DFX_DCL_CTRL */
+       u32 afe_dfx_dcl_control;
+       /* 0x010c AFE_DFX_DMON_CTRL */
+       u32 afe_dfx_digital_monitor_control;
+       /* 0x0110 AFE_DFX_AMONP_CTRL */
+       u32 afe_dfx_analog_p_monitor_control;
+       /* 0x0114 AFE_DFX_AMONN_CTRL */
+       u32 afe_dfx_analog_n_monitor_control;
+       /* 0x0118 AFE_DFX_NTL_STS */
+       u32 afe_dfx_ntl_status;
+       /* 0x011c AFE_DFX_FIFO_STS0 */
+       u32 afe_dfx_fifo_status0;
+       /* 0x0120 AFE_DFX_FIFO_STS1 */
+       u32 afe_dfx_fifo_status1;
+       /* 0x0124 AFE_DFX_MPAT_CTRL */
+       u32 afe_dfx_master_pattern_control;
+       /* 0x0128 AFE_DFX_P0_CTRL */
+       u32 afe_dfx_p0_control;
+       /* 0x012c-0x01a8 AFE_DFX_P0_DRx */
+       u32 afe_dfx_p0_data[32];
+       /* 0x01ac */
+       u32 reserved_01ac;
+       /* 0x01b0-0x020c AFE_DFX_P0_IRx */
+       u32 afe_dfx_p0_instruction[24];
+       /* 0x0210 */
+       u32 reserved_0210;
+       /* 0x0214 AFE_DFX_P1_CTRL */
+       u32 afe_dfx_p1_control;
+       /* 0x0218-0x245 AFE_DFX_P1_DRx */
+       u32 afe_dfx_p1_data[16];
+       /* 0x0258-0x029c */
+       u32 reserved_0258_029c[0x12];
+       /* 0x02a0-0x02bc AFE_DFX_P1_IRx */
+       u32 afe_dfx_p1_instruction[8];
+       /* 0x02c0-0x2fc */
+       u32 reserved_02c0_02fc[0x10];
+       /* 0x0300 AFE_DFX_TX_PMSN_CTRL */
+       u32 afe_dfx_tx_pmsn_control;
+       /* 0x0304 AFE_DFX_RX_PMSN_CTRL */
+       u32 afe_dfx_rx_pmsn_control;
+       u32 reserved_0308;
+       /* 0x030c AFE_DFX_NOA_CTRL0 */
+       u32 afe_dfx_noa_control0;
+       /* 0x0310 AFE_DFX_NOA_CTRL1 */
+       u32 afe_dfx_noa_control1;
+       /* 0x0314 AFE_DFX_NOA_CTRL2 */
+       u32 afe_dfx_noa_control2;
+       /* 0x0318 AFE_DFX_NOA_CTRL3 */
+       u32 afe_dfx_noa_control3;
+       /* 0x031c AFE_DFX_NOA_CTRL4 */
+       u32 afe_dfx_noa_control4;
+       /* 0x0320 AFE_DFX_NOA_CTRL5 */
+       u32 afe_dfx_noa_control5;
+       /* 0x0324 AFE_DFX_NOA_CTRL6 */
+       u32 afe_dfx_noa_control6;
+       /* 0x0328 AFE_DFX_NOA_CTRL7 */
+       u32 afe_dfx_noa_control7;
+       /* 0x032c-0x07fc */
+       u32 reserved_032c_07fc[0x135];
+
+       /* 0x0800-0x0bfc */
+       struct scu_afe_transceiver scu_afe_xcvr[4];
+
+       /* 0x0c00-0x0ffc */
+       u32 reserved_0c00_0ffc[0x0100];
+};
+
+struct scu_protocol_engine_group_registers {
+       u32 table[0xE0];
+};
+
+
+struct scu_viit_iit {
+       u32 table[256];
+};
+
+/**
+ * Placeholder for the ZONE Partition Table information ZONING will not be
+ *    included in the 1.1 release.
+ *
+ *
+ */
+struct scu_zone_partition_table {
+       u32 table[2048];
+};
+
+/**
+ * Placeholder for the CRAM register since I am not sure if we need to
+ *    read/write to these registers as yet.
+ *
+ *
+ */
+struct scu_completion_ram {
+       u32 ram[128];
+};
+
+/**
+ * Placeholder for the FBRAM registers since I am not sure if we need to
+ *    read/write to these registers as yet.
+ *
+ *
+ */
+struct scu_frame_buffer_ram {
+       u32 ram[128];
+};
+
+#define scu_scratch_ram_SIZE_IN_DWORDS  256
+
+/**
+ * Placeholder for the scratch RAM registers.
+ *
+ *
+ */
+struct scu_scratch_ram {
+       u32 ram[scu_scratch_ram_SIZE_IN_DWORDS];
+};
+
+/**
+ * Placeholder since I am not yet sure what these registers are here for.
+ *
+ *
+ */
+struct noa_protocol_engine_partition {
+       u32 reserved[64];
+};
+
+/**
+ * Placeholder since I am not yet sure what these registers are here for.
+ *
+ *
+ */
+struct noa_hub_partition {
+       u32 reserved[64];
+};
+
+/**
+ * Placeholder since I am not yet sure what these registers are here for.
+ *
+ *
+ */
+struct noa_host_interface_partition {
+       u32 reserved[64];
+};
+
+/**
+ * struct transport_link_layer_pair - The SCU Hardware pairs up the TL
+ *    registers with the LL registers so we must place them adjcent to make the
+ *    array of registers in the PEG.
+ *
+ *
+ */
+struct transport_link_layer_pair {
+       struct scu_transport_layer_registers tl;
+       struct scu_link_layer_registers ll;
+};
+
+/**
+ * struct scu_peg_registers - SCU Protocol Engine Memory mapped register space.
+ *     These registers are unique to each protocol engine group.  There can be
+ *    at most two PEG for a single SCU part.
+ *
+ *
+ */
+struct scu_peg_registers {
+       struct transport_link_layer_pair pe[4];
+       struct scu_port_task_scheduler_group_registers ptsg;
+       struct scu_protocol_engine_group_registers peg;
+       struct scu_sgpio_registers sgpio;
+       u32 reserved_01500_1BFF[0x1C0];
+       struct scu_viit_entry viit[64];
+       struct scu_zone_partition_table zpt0;
+       struct scu_zone_partition_table zpt1;
+};
+
+/**
+ * struct scu_registers - SCU regsiters including both PEG registers if we turn
+ *    on that compile option. All of these registers are in the memory mapped
+ *    space returned from BAR1.
+ *
+ *
+ */
+struct scu_registers {
+       /* 0x0000 - PEG 0 */
+       struct scu_peg_registers peg0;
+
+       /* 0x6000 - SDMA and Miscellaneous */
+       struct scu_sdma_registers sdma;
+       struct scu_completion_ram cram;
+       struct scu_frame_buffer_ram fbram;
+       u32 reserved_6800_69FF[0x80];
+       struct noa_protocol_engine_partition noa_pe;
+       struct noa_hub_partition noa_hub;
+       struct noa_host_interface_partition noa_if;
+       u32 reserved_6d00_7fff[0x4c0];
+
+       /* 0x8000 - PEG 1 */
+       struct scu_peg_registers peg1;
+
+       /* 0xE000 - AFE Registers */
+       struct scu_afe_registers afe;
+
+       /* 0xF000 - reserved */
+       u32 reserved_f000_211fff[0x80c00];
+
+       /* 0x212000 - scratch RAM */
+       struct scu_scratch_ram scratch_ram;
+};
+
+#endif   /* _SCU_REGISTERS_HEADER_ */
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
new file mode 100644 (file)
index 0000000..b6e6368
--- /dev/null
@@ -0,0 +1,1501 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <scsi/sas.h>
+#include "isci.h"
+#include "port.h"
+#include "remote_device.h"
+#include "request.h"
+#include "remote_node_context.h"
+#include "scu_event_codes.h"
+#include "task.h"
+
+/**
+ * isci_remote_device_not_ready() - This function is called by the ihost when
+ *    the remote device is not ready. We mark the isci device as ready (not
+ *    "ready_for_io") and signal the waiting proccess.
+ * @isci_host: This parameter specifies the isci host object.
+ * @isci_device: This parameter specifies the remote device
+ *
+ * sci_lock is held on entrance to this function.
+ */
+static void isci_remote_device_not_ready(struct isci_host *ihost,
+                                 struct isci_remote_device *idev, u32 reason)
+{
+       struct isci_request *ireq;
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: isci_device = %p\n", __func__, idev);
+
+       switch (reason) {
+       case SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED:
+               set_bit(IDEV_GONE, &idev->flags);
+               break;
+       case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
+               set_bit(IDEV_IO_NCQERROR, &idev->flags);
+
+               /* Kill all outstanding requests for the device. */
+               list_for_each_entry(ireq, &idev->reqs_in_process, dev_node) {
+
+                       dev_dbg(&ihost->pdev->dev,
+                               "%s: isci_device = %p request = %p\n",
+                               __func__, idev, ireq);
+
+                       sci_controller_terminate_request(ihost,
+                                                         idev,
+                                                         ireq);
+               }
+               /* Fall through into the default case... */
+       default:
+               clear_bit(IDEV_IO_READY, &idev->flags);
+               break;
+       }
+}
+
+/**
+ * isci_remote_device_ready() - This function is called by the ihost when the
+ *    remote device is ready. We mark the isci device as ready and signal the
+ *    waiting proccess.
+ * @ihost: our valid isci_host
+ * @idev: remote device
+ *
+ */
+static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+       dev_dbg(&ihost->pdev->dev,
+               "%s: idev = %p\n", __func__, idev);
+
+       clear_bit(IDEV_IO_NCQERROR, &idev->flags);
+       set_bit(IDEV_IO_READY, &idev->flags);
+       if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags))
+               wake_up(&ihost->eventq);
+}
+
+/* called once the remote node context is ready to be freed.
+ * The remote device can now report that its stop operation is complete. none
+ */
+static void rnc_destruct_done(void *_dev)
+{
+       struct isci_remote_device *idev = _dev;
+
+       BUG_ON(idev->started_request_count != 0);
+       sci_change_state(&idev->sm, SCI_DEV_STOPPED);
+}
+
+static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev)
+{
+       struct isci_host *ihost = idev->owning_port->owning_controller;
+       enum sci_status status  = SCI_SUCCESS;
+       u32 i;
+
+       for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
+               struct isci_request *ireq = ihost->reqs[i];
+               enum sci_status s;
+
+               if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
+                   ireq->target_device != idev)
+                       continue;
+
+               s = sci_controller_terminate_request(ihost, idev, ireq);
+               if (s != SCI_SUCCESS)
+                       status = s;
+       }
+
+       return status;
+}
+
+enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
+                                       u32 timeout)
+{
+       struct sci_base_state_machine *sm = &idev->sm;
+       enum sci_remote_device_states state = sm->current_state_id;
+
+       switch (state) {
+       case SCI_DEV_INITIAL:
+       case SCI_DEV_FAILED:
+       case SCI_DEV_FINAL:
+       default:
+               dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+                        __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       case SCI_DEV_STOPPED:
+               return SCI_SUCCESS;
+       case SCI_DEV_STARTING:
+               /* device not started so there had better be no requests */
+               BUG_ON(idev->started_request_count != 0);
+               sci_remote_node_context_destruct(&idev->rnc,
+                                                     rnc_destruct_done, idev);
+               /* Transition to the stopping state and wait for the
+                * remote node to complete being posted and invalidated.
+                */
+               sci_change_state(sm, SCI_DEV_STOPPING);
+               return SCI_SUCCESS;
+       case SCI_DEV_READY:
+       case SCI_STP_DEV_IDLE:
+       case SCI_STP_DEV_CMD:
+       case SCI_STP_DEV_NCQ:
+       case SCI_STP_DEV_NCQ_ERROR:
+       case SCI_STP_DEV_AWAIT_RESET:
+       case SCI_SMP_DEV_IDLE:
+       case SCI_SMP_DEV_CMD:
+               sci_change_state(sm, SCI_DEV_STOPPING);
+               if (idev->started_request_count == 0) {
+                       sci_remote_node_context_destruct(&idev->rnc,
+                                                             rnc_destruct_done, idev);
+                       return SCI_SUCCESS;
+               } else
+                       return sci_remote_device_terminate_requests(idev);
+               break;
+       case SCI_DEV_STOPPING:
+               /* All requests should have been terminated, but if there is an
+                * attempt to stop a device already in the stopping state, then
+                * try again to terminate.
+                */
+               return sci_remote_device_terminate_requests(idev);
+       case SCI_DEV_RESETTING:
+               sci_change_state(sm, SCI_DEV_STOPPING);
+               return SCI_SUCCESS;
+       }
+}
+
+enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
+{
+       struct sci_base_state_machine *sm = &idev->sm;
+       enum sci_remote_device_states state = sm->current_state_id;
+
+       switch (state) {
+       case SCI_DEV_INITIAL:
+       case SCI_DEV_STOPPED:
+       case SCI_DEV_STARTING:
+       case SCI_SMP_DEV_IDLE:
+       case SCI_SMP_DEV_CMD:
+       case SCI_DEV_STOPPING:
+       case SCI_DEV_FAILED:
+       case SCI_DEV_RESETTING:
+       case SCI_DEV_FINAL:
+       default:
+               dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+                        __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       case SCI_DEV_READY:
+       case SCI_STP_DEV_IDLE:
+       case SCI_STP_DEV_CMD:
+       case SCI_STP_DEV_NCQ:
+       case SCI_STP_DEV_NCQ_ERROR:
+       case SCI_STP_DEV_AWAIT_RESET:
+               sci_change_state(sm, SCI_DEV_RESETTING);
+               return SCI_SUCCESS;
+       }
+}
+
+enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev)
+{
+       struct sci_base_state_machine *sm = &idev->sm;
+       enum sci_remote_device_states state = sm->current_state_id;
+
+       if (state != SCI_DEV_RESETTING) {
+               dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+                        __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       sci_change_state(sm, SCI_DEV_READY);
+       return SCI_SUCCESS;
+}
+
+enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
+                                              u32 suspend_type)
+{
+       struct sci_base_state_machine *sm = &idev->sm;
+       enum sci_remote_device_states state = sm->current_state_id;
+
+       if (state != SCI_STP_DEV_CMD) {
+               dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+                        __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       return sci_remote_node_context_suspend(&idev->rnc,
+                                                   suspend_type, NULL, NULL);
+}
+
+enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
+                                                    u32 frame_index)
+{
+       struct sci_base_state_machine *sm = &idev->sm;
+       enum sci_remote_device_states state = sm->current_state_id;
+       struct isci_host *ihost = idev->owning_port->owning_controller;
+       enum sci_status status;
+
+       switch (state) {
+       case SCI_DEV_INITIAL:
+       case SCI_DEV_STOPPED:
+       case SCI_DEV_STARTING:
+       case SCI_STP_DEV_IDLE:
+       case SCI_SMP_DEV_IDLE:
+       case SCI_DEV_FINAL:
+       default:
+               dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+                        __func__, state);
+               /* Return the frame back to the controller */
+               sci_controller_release_frame(ihost, frame_index);
+               return SCI_FAILURE_INVALID_STATE;
+       case SCI_DEV_READY:
+       case SCI_STP_DEV_NCQ_ERROR:
+       case SCI_STP_DEV_AWAIT_RESET:
+       case SCI_DEV_STOPPING:
+       case SCI_DEV_FAILED:
+       case SCI_DEV_RESETTING: {
+               struct isci_request *ireq;
+               struct ssp_frame_hdr hdr;
+               void *frame_header;
+               ssize_t word_cnt;
+
+               status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+                                                                      frame_index,
+                                                                      &frame_header);
+               if (status != SCI_SUCCESS)
+                       return status;
+
+               word_cnt = sizeof(hdr) / sizeof(u32);
+               sci_swab32_cpy(&hdr, frame_header, word_cnt);
+
+               ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag));
+               if (ireq && ireq->target_device == idev) {
+                       /* The IO request is now in charge of releasing the frame */
+                       status = sci_io_request_frame_handler(ireq, frame_index);
+               } else {
+                       /* We could not map this tag to a valid IO
+                        * request Just toss the frame and continue
+                        */
+                       sci_controller_release_frame(ihost, frame_index);
+               }
+               break;
+       }
+       case SCI_STP_DEV_NCQ: {
+               struct dev_to_host_fis *hdr;
+
+               status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+                                                                      frame_index,
+                                                                      (void **)&hdr);
+               if (status != SCI_SUCCESS)
+                       return status;
+
+               if (hdr->fis_type == FIS_SETDEVBITS &&
+                   (hdr->status & ATA_ERR)) {
+                       idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
+
+                       /* TODO Check sactive and complete associated IO if any. */
+                       sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR);
+               } else if (hdr->fis_type == FIS_REGD2H &&
+                          (hdr->status & ATA_ERR)) {
+                       /*
+                        * Some devices return D2H FIS when an NCQ error is detected.
+                        * Treat this like an SDB error FIS ready reason.
+                        */
+                       idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
+                       sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR);
+               } else
+                       status = SCI_FAILURE;
+
+               sci_controller_release_frame(ihost, frame_index);
+               break;
+       }
+       case SCI_STP_DEV_CMD:
+       case SCI_SMP_DEV_CMD:
+               /* The device does not process any UF received from the hardware while
+                * in this state.  All unsolicited frames are forwarded to the io request
+                * object.
+                */
+               status = sci_io_request_frame_handler(idev->working_request, frame_index);
+               break;
+       }
+
+       return status;
+}
+
+static bool is_remote_device_ready(struct isci_remote_device *idev)
+{
+
+       struct sci_base_state_machine *sm = &idev->sm;
+       enum sci_remote_device_states state = sm->current_state_id;
+
+       switch (state) {
+       case SCI_DEV_READY:
+       case SCI_STP_DEV_IDLE:
+       case SCI_STP_DEV_CMD:
+       case SCI_STP_DEV_NCQ:
+       case SCI_STP_DEV_NCQ_ERROR:
+       case SCI_STP_DEV_AWAIT_RESET:
+       case SCI_SMP_DEV_IDLE:
+       case SCI_SMP_DEV_CMD:
+               return true;
+       default:
+               return false;
+       }
+}
+
+enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
+                                                    u32 event_code)
+{
+       struct sci_base_state_machine *sm = &idev->sm;
+       enum sci_remote_device_states state = sm->current_state_id;
+       enum sci_status status;
+
+       switch (scu_get_event_type(event_code)) {
+       case SCU_EVENT_TYPE_RNC_OPS_MISC:
+       case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+       case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+               status = sci_remote_node_context_event_handler(&idev->rnc, event_code);
+               break;
+       case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
+               if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) {
+                       status = SCI_SUCCESS;
+
+                       /* Suspend the associated RNC */
+                       sci_remote_node_context_suspend(&idev->rnc,
+                                                             SCI_SOFTWARE_SUSPENSION,
+                                                             NULL, NULL);
+
+                       dev_dbg(scirdev_to_dev(idev),
+                               "%s: device: %p event code: %x: %s\n",
+                               __func__, idev, event_code,
+                               is_remote_device_ready(idev)
+                               ? "I_T_Nexus_Timeout event"
+                               : "I_T_Nexus_Timeout event in wrong state");
+
+                       break;
+               }
+       /* Else, fall through and treat as unhandled... */
+       default:
+               dev_dbg(scirdev_to_dev(idev),
+                       "%s: device: %p event code: %x: %s\n",
+                       __func__, idev, event_code,
+                       is_remote_device_ready(idev)
+                       ? "unexpected event"
+                       : "unexpected event in wrong state");
+               status = SCI_FAILURE_INVALID_STATE;
+               break;
+       }
+
+       if (status != SCI_SUCCESS)
+               return status;
+
+       if (state == SCI_STP_DEV_IDLE) {
+
+               /* We pick up suspension events to handle specifically to this
+                * state. We resume the RNC right away.
+                */
+               if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
+                   scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX)
+                       status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
+       }
+
+       return status;
+}
+
+static void sci_remote_device_start_request(struct isci_remote_device *idev,
+                                                struct isci_request *ireq,
+                                                enum sci_status status)
+{
+       struct isci_port *iport = idev->owning_port;
+
+       /* cleanup requests that failed after starting on the port */
+       if (status != SCI_SUCCESS)
+               sci_port_complete_io(iport, idev, ireq);
+       else {
+               kref_get(&idev->kref);
+               idev->started_request_count++;
+       }
+}
+
+enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
+                                               struct isci_remote_device *idev,
+                                               struct isci_request *ireq)
+{
+       struct sci_base_state_machine *sm = &idev->sm;
+       enum sci_remote_device_states state = sm->current_state_id;
+       struct isci_port *iport = idev->owning_port;
+       enum sci_status status;
+
+       switch (state) {
+       case SCI_DEV_INITIAL:
+       case SCI_DEV_STOPPED:
+       case SCI_DEV_STARTING:
+       case SCI_STP_DEV_NCQ_ERROR:
+       case SCI_DEV_STOPPING:
+       case SCI_DEV_FAILED:
+       case SCI_DEV_RESETTING:
+       case SCI_DEV_FINAL:
+       default:
+               dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+                        __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       case SCI_DEV_READY:
+               /* attempt to start an io request for this device object. The remote
+                * device object will issue the start request for the io and if
+                * successful it will start the request for the port object then
+                * increment its own request count.
+                */
+               status = sci_port_start_io(iport, idev, ireq);
+               if (status != SCI_SUCCESS)
+                       return status;
+
+               status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+               if (status != SCI_SUCCESS)
+                       break;
+
+               status = sci_request_start(ireq);
+               break;
+       case SCI_STP_DEV_IDLE: {
+               /* handle the start io operation for a sata device that is in
+                * the command idle state. - Evalute the type of IO request to
+                * be started - If its an NCQ request change to NCQ substate -
+                * If its any other command change to the CMD substate
+                *
+                * If this is a softreset we may want to have a different
+                * substate.
+                */
+               enum sci_remote_device_states new_state;
+               struct sas_task *task = isci_request_access_task(ireq);
+
+               status = sci_port_start_io(iport, idev, ireq);
+               if (status != SCI_SUCCESS)
+                       return status;
+
+               status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+               if (status != SCI_SUCCESS)
+                       break;
+
+               status = sci_request_start(ireq);
+               if (status != SCI_SUCCESS)
+                       break;
+
+               if (task->ata_task.use_ncq)
+                       new_state = SCI_STP_DEV_NCQ;
+               else {
+                       idev->working_request = ireq;
+                       new_state = SCI_STP_DEV_CMD;
+               }
+               sci_change_state(sm, new_state);
+               break;
+       }
+       case SCI_STP_DEV_NCQ: {
+               struct sas_task *task = isci_request_access_task(ireq);
+
+               if (task->ata_task.use_ncq) {
+                       status = sci_port_start_io(iport, idev, ireq);
+                       if (status != SCI_SUCCESS)
+                               return status;
+
+                       status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+                       if (status != SCI_SUCCESS)
+                               break;
+
+                       status = sci_request_start(ireq);
+               } else
+                       return SCI_FAILURE_INVALID_STATE;
+               break;
+       }
+       case SCI_STP_DEV_AWAIT_RESET:
+               return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+       case SCI_SMP_DEV_IDLE:
+               status = sci_port_start_io(iport, idev, ireq);
+               if (status != SCI_SUCCESS)
+                       return status;
+
+               status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+               if (status != SCI_SUCCESS)
+                       break;
+
+               status = sci_request_start(ireq);
+               if (status != SCI_SUCCESS)
+                       break;
+
+               idev->working_request = ireq;
+               sci_change_state(&idev->sm, SCI_SMP_DEV_CMD);
+               break;
+       case SCI_STP_DEV_CMD:
+       case SCI_SMP_DEV_CMD:
+               /* device is already handling a command it can not accept new commands
+                * until this one is complete.
+                */
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       sci_remote_device_start_request(idev, ireq, status);
+       return status;
+}
+
+static enum sci_status common_complete_io(struct isci_port *iport,
+                                         struct isci_remote_device *idev,
+                                         struct isci_request *ireq)
+{
+       enum sci_status status;
+
+       status = sci_request_complete(ireq);
+       if (status != SCI_SUCCESS)
+               return status;
+
+       status = sci_port_complete_io(iport, idev, ireq);
+       if (status != SCI_SUCCESS)
+               return status;
+
+       sci_remote_device_decrement_request_count(idev);
+       return status;
+}
+
+enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
+                                                  struct isci_remote_device *idev,
+                                                  struct isci_request *ireq)
+{
+       struct sci_base_state_machine *sm = &idev->sm;
+       enum sci_remote_device_states state = sm->current_state_id;
+       struct isci_port *iport = idev->owning_port;
+       enum sci_status status;
+
+       switch (state) {
+       case SCI_DEV_INITIAL:
+       case SCI_DEV_STOPPED:
+       case SCI_DEV_STARTING:
+       case SCI_STP_DEV_IDLE:
+       case SCI_SMP_DEV_IDLE:
+       case SCI_DEV_FAILED:
+       case SCI_DEV_FINAL:
+       default:
+               dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+                        __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       case SCI_DEV_READY:
+       case SCI_STP_DEV_AWAIT_RESET:
+       case SCI_DEV_RESETTING:
+               status = common_complete_io(iport, idev, ireq);
+               break;
+       case SCI_STP_DEV_CMD:
+       case SCI_STP_DEV_NCQ:
+       case SCI_STP_DEV_NCQ_ERROR:
+               status = common_complete_io(iport, idev, ireq);
+               if (status != SCI_SUCCESS)
+                       break;
+
+               if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+                       /* This request causes hardware error, device needs to be Lun Reset.
+                        * So here we force the state machine to IDLE state so the rest IOs
+                        * can reach RNC state handler, these IOs will be completed by RNC with
+                        * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
+                        */
+                       sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
+               } else if (idev->started_request_count == 0)
+                       sci_change_state(sm, SCI_STP_DEV_IDLE);
+               break;
+       case SCI_SMP_DEV_CMD:
+               status = common_complete_io(iport, idev, ireq);
+               if (status != SCI_SUCCESS)
+                       break;
+               sci_change_state(sm, SCI_SMP_DEV_IDLE);
+               break;
+       case SCI_DEV_STOPPING:
+               status = common_complete_io(iport, idev, ireq);
+               if (status != SCI_SUCCESS)
+                       break;
+
+               if (idev->started_request_count == 0)
+                       sci_remote_node_context_destruct(&idev->rnc,
+                                                        rnc_destruct_done,
+                                                        idev);
+               break;
+       }
+
+       if (status != SCI_SUCCESS)
+               dev_err(scirdev_to_dev(idev),
+                       "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
+                       "could not complete\n", __func__, iport,
+                       idev, ireq, status);
+       else
+               isci_put_device(idev);
+
+       return status;
+}
+
+static void sci_remote_device_continue_request(void *dev)
+{
+       struct isci_remote_device *idev = dev;
+
+       /* we need to check if this request is still valid to continue. */
+       if (idev->working_request)
+               sci_controller_continue_io(idev->working_request);
+}
+
+enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
+                                                 struct isci_remote_device *idev,
+                                                 struct isci_request *ireq)
+{
+       struct sci_base_state_machine *sm = &idev->sm;
+       enum sci_remote_device_states state = sm->current_state_id;
+       struct isci_port *iport = idev->owning_port;
+       enum sci_status status;
+
+       switch (state) {
+       case SCI_DEV_INITIAL:
+       case SCI_DEV_STOPPED:
+       case SCI_DEV_STARTING:
+       case SCI_SMP_DEV_IDLE:
+       case SCI_SMP_DEV_CMD:
+       case SCI_DEV_STOPPING:
+       case SCI_DEV_FAILED:
+       case SCI_DEV_RESETTING:
+       case SCI_DEV_FINAL:
+       default:
+               dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+                        __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       case SCI_STP_DEV_IDLE:
+       case SCI_STP_DEV_CMD:
+       case SCI_STP_DEV_NCQ:
+       case SCI_STP_DEV_NCQ_ERROR:
+       case SCI_STP_DEV_AWAIT_RESET:
+               status = sci_port_start_io(iport, idev, ireq);
+               if (status != SCI_SUCCESS)
+                       return status;
+
+               status = sci_remote_node_context_start_task(&idev->rnc, ireq);
+               if (status != SCI_SUCCESS)
+                       goto out;
+
+               status = sci_request_start(ireq);
+               if (status != SCI_SUCCESS)
+                       goto out;
+
+               /* Note: If the remote device state is not IDLE this will
+                * replace the request that probably resulted in the task
+                * management request.
+                */
+               idev->working_request = ireq;
+               sci_change_state(sm, SCI_STP_DEV_CMD);
+
+               /* The remote node context must cleanup the TCi to NCQ mapping
+                * table.  The only way to do this correctly is to either write
+                * to the TLCR register or to invalidate and repost the RNC. In
+                * either case the remote node context state machine will take
+                * the correct action when the remote node context is suspended
+                * and later resumed.
+                */
+               sci_remote_node_context_suspend(&idev->rnc,
+                               SCI_SOFTWARE_SUSPENSION, NULL, NULL);
+               sci_remote_node_context_resume(&idev->rnc,
+                               sci_remote_device_continue_request,
+                                                   idev);
+
+       out:
+               sci_remote_device_start_request(idev, ireq, status);
+               /* We need to let the controller start request handler know that
+                * it can't post TC yet. We will provide a callback function to
+                * post TC when RNC gets resumed.
+                */
+               return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
+       case SCI_DEV_READY:
+               status = sci_port_start_io(iport, idev, ireq);
+               if (status != SCI_SUCCESS)
+                       return status;
+
+               status = sci_remote_node_context_start_task(&idev->rnc, ireq);
+               if (status != SCI_SUCCESS)
+                       break;
+
+               status = sci_request_start(ireq);
+               break;
+       }
+       sci_remote_device_start_request(idev, ireq, status);
+
+       return status;
+}
+
+void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request)
+{
+       struct isci_port *iport = idev->owning_port;
+       u32 context;
+
+       context = request |
+                 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+                 (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+                 idev->rnc.remote_node_index;
+
+       sci_controller_post_request(iport->owning_controller, context);
+}
+
+/* called once the remote node context has transisitioned to a
+ * ready state.  This is the indication that the remote device object can also
+ * transition to ready.
+ */
+static void remote_device_resume_done(void *_dev)
+{
+       struct isci_remote_device *idev = _dev;
+
+       if (is_remote_device_ready(idev))
+               return;
+
+       /* go 'ready' if we are not already in a ready state */
+       sci_change_state(&idev->sm, SCI_DEV_READY);
+}
+
+static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
+{
+       struct isci_remote_device *idev = _dev;
+       struct isci_host *ihost = idev->owning_port->owning_controller;
+
+       /* For NCQ operation we do not issue a isci_remote_device_not_ready().
+        * As a result, avoid sending the ready notification.
+        */
+       if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ)
+               isci_remote_device_ready(ihost, idev);
+}
+
+static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+       /* Initial state is a transitional state to the stopped state */
+       sci_change_state(&idev->sm, SCI_DEV_STOPPED);
+}
+
+/**
+ * sci_remote_device_destruct() - free remote node context and destruct
+ * @remote_device: This parameter specifies the remote device to be destructed.
+ *
+ * Remote device objects are a limited resource.  As such, they must be
+ * protected.  Thus calls to construct and destruct are mutually exclusive and
+ * non-reentrant. The return value shall indicate if the device was
+ * successfully destructed or if some failure occurred. enum sci_status This value
+ * is returned if the device is successfully destructed.
+ * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied
+ * device isn't valid (e.g. it's already been destoryed, the handle isn't
+ * valid, etc.).
+ */
+static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev)
+{
+       struct sci_base_state_machine *sm = &idev->sm;
+       enum sci_remote_device_states state = sm->current_state_id;
+       struct isci_host *ihost;
+
+       if (state != SCI_DEV_STOPPED) {
+               dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+                        __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       ihost = idev->owning_port->owning_controller;
+       sci_controller_free_remote_node_context(ihost, idev,
+                                                    idev->rnc.remote_node_index);
+       idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+       sci_change_state(sm, SCI_DEV_FINAL);
+
+       return SCI_SUCCESS;
+}
+
+/**
+ * isci_remote_device_deconstruct() - This function frees an isci_remote_device.
+ * @ihost: This parameter specifies the isci host object.
+ * @idev: This parameter specifies the remote device to be freed.
+ *
+ */
+static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+       dev_dbg(&ihost->pdev->dev,
+               "%s: isci_device = %p\n", __func__, idev);
+
+       /* There should not be any outstanding io's. All paths to
+        * here should go through isci_remote_device_nuke_requests.
+        * If we hit this condition, we will need a way to complete
+        * io requests in process */
+       BUG_ON(!list_empty(&idev->reqs_in_process));
+
+       sci_remote_device_destruct(idev);
+       list_del_init(&idev->node);
+       isci_put_device(idev);
+}
+
+static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+       struct isci_host *ihost = idev->owning_port->owning_controller;
+       u32 prev_state;
+
+       /* If we are entering from the stopping state let the SCI User know that
+        * the stop operation has completed.
+        */
+       prev_state = idev->sm.previous_state_id;
+       if (prev_state == SCI_DEV_STOPPING)
+               isci_remote_device_deconstruct(ihost, idev);
+
+       sci_controller_remote_device_stopped(ihost, idev);
+}
+
+static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+       struct isci_host *ihost = idev->owning_port->owning_controller;
+
+       isci_remote_device_not_ready(ihost, idev,
+                                    SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED);
+}
+
+static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+       struct isci_host *ihost = idev->owning_port->owning_controller;
+       struct domain_device *dev = idev->domain_dev;
+
+       if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
+               sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
+       } else if (dev_is_expander(dev)) {
+               sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
+       } else
+               isci_remote_device_ready(ihost, idev);
+}
+
+static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+       struct domain_device *dev = idev->domain_dev;
+
+       if (dev->dev_type == SAS_END_DEV) {
+               struct isci_host *ihost = idev->owning_port->owning_controller;
+
+               isci_remote_device_not_ready(ihost, idev,
+                                            SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED);
+       }
+}
+
+static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+       sci_remote_node_context_suspend(
+               &idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL);
+}
+
+static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+       sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
+}
+
+static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+       idev->working_request = NULL;
+       if (sci_remote_node_context_is_ready(&idev->rnc)) {
+               /*
+                * Since the RNC is ready, it's alright to finish completion
+                * processing (e.g. signal the remote device is ready). */
+               sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev);
+       } else {
+               sci_remote_node_context_resume(&idev->rnc,
+                       sci_stp_remote_device_ready_idle_substate_resume_complete_handler,
+                       idev);
+       }
+}
+
+static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+       struct isci_host *ihost = idev->owning_port->owning_controller;
+
+       BUG_ON(idev->working_request == NULL);
+
+       isci_remote_device_not_ready(ihost, idev,
+                                    SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED);
+}
+
+static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+       struct isci_host *ihost = idev->owning_port->owning_controller;
+
+       if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED)
+               isci_remote_device_not_ready(ihost, idev,
+                                            idev->not_ready_reason);
+}
+
+static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+       struct isci_host *ihost = idev->owning_port->owning_controller;
+
+       isci_remote_device_ready(ihost, idev);
+}
+
+static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+       struct isci_host *ihost = idev->owning_port->owning_controller;
+
+       BUG_ON(idev->working_request == NULL);
+
+       isci_remote_device_not_ready(ihost, idev,
+                                    SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED);
+}
+
+static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+       idev->working_request = NULL;
+}
+
+static const struct sci_base_state sci_remote_device_state_table[] = {
+       [SCI_DEV_INITIAL] = {
+               .enter_state = sci_remote_device_initial_state_enter,
+       },
+       [SCI_DEV_STOPPED] = {
+               .enter_state = sci_remote_device_stopped_state_enter,
+       },
+       [SCI_DEV_STARTING] = {
+               .enter_state = sci_remote_device_starting_state_enter,
+       },
+       [SCI_DEV_READY] = {
+               .enter_state = sci_remote_device_ready_state_enter,
+               .exit_state  = sci_remote_device_ready_state_exit
+       },
+       [SCI_STP_DEV_IDLE] = {
+               .enter_state = sci_stp_remote_device_ready_idle_substate_enter,
+       },
+       [SCI_STP_DEV_CMD] = {
+               .enter_state = sci_stp_remote_device_ready_cmd_substate_enter,
+       },
+       [SCI_STP_DEV_NCQ] = { },
+       [SCI_STP_DEV_NCQ_ERROR] = {
+               .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
+       },
+       [SCI_STP_DEV_AWAIT_RESET] = { },
+       [SCI_SMP_DEV_IDLE] = {
+               .enter_state = sci_smp_remote_device_ready_idle_substate_enter,
+       },
+       [SCI_SMP_DEV_CMD] = {
+               .enter_state = sci_smp_remote_device_ready_cmd_substate_enter,
+               .exit_state  = sci_smp_remote_device_ready_cmd_substate_exit,
+       },
+       [SCI_DEV_STOPPING] = { },
+       [SCI_DEV_FAILED] = { },
+       [SCI_DEV_RESETTING] = {
+               .enter_state = sci_remote_device_resetting_state_enter,
+               .exit_state  = sci_remote_device_resetting_state_exit
+       },
+       [SCI_DEV_FINAL] = { },
+};
+
+/**
+ * sci_remote_device_construct() - common construction
+ * @sci_port: SAS/SATA port through which this device is accessed.
+ * @sci_dev: remote device to construct
+ *
+ * This routine just performs benign initialization and does not
+ * allocate the remote_node_context which is left to
+ * sci_remote_device_[de]a_construct().  sci_remote_device_destruct()
+ * frees the remote_node_context(s) for the device.
+ */
+static void sci_remote_device_construct(struct isci_port *iport,
+                                 struct isci_remote_device *idev)
+{
+       idev->owning_port = iport;
+       idev->started_request_count = 0;
+
+       sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL);
+
+       sci_remote_node_context_construct(&idev->rnc,
+                                              SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
+}
+
+/**
+ * sci_remote_device_da_construct() - construct direct attached device.
+ *
+ * The information (e.g. IAF, Signature FIS, etc.) necessary to build
+ * the device is known to the SCI Core since it is contained in the
+ * sci_phy object.  Remote node context(s) is/are a global resource
+ * allocated by this routine, freed by sci_remote_device_destruct().
+ *
+ * Returns:
+ * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
+ * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
+ * sata-only controller instance.
+ * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
+ */
+static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
+                                                      struct isci_remote_device *idev)
+{
+       enum sci_status status;
+       struct domain_device *dev = idev->domain_dev;
+
+       sci_remote_device_construct(iport, idev);
+
+       /*
+        * This information is request to determine how many remote node context
+        * entries will be needed to store the remote node.
+        */
+       idev->is_direct_attached = true;
+       status = sci_controller_allocate_remote_node_context(iport->owning_controller,
+                                                                 idev,
+                                                                 &idev->rnc.remote_node_index);
+
+       if (status != SCI_SUCCESS)
+               return status;
+
+       if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
+           (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
+               /* pass */;
+       else
+               return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+       idev->connection_rate = sci_port_get_max_allowed_speed(iport);
+
+       /* / @todo Should I assign the port width by reading all of the phys on the port? */
+       idev->device_port_width = 1;
+
+       return SCI_SUCCESS;
+}
+
+/**
+ * sci_remote_device_ea_construct() - construct expander attached device
+ *
+ * Remote node context(s) is/are a global resource allocated by this
+ * routine, freed by sci_remote_device_destruct().
+ *
+ * Returns:
+ * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
+ * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
+ * sata-only controller instance.
+ * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
+ */
+static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
+                                                      struct isci_remote_device *idev)
+{
+       struct domain_device *dev = idev->domain_dev;
+       enum sci_status status;
+
+       sci_remote_device_construct(iport, idev);
+
+       status = sci_controller_allocate_remote_node_context(iport->owning_controller,
+                                                                 idev,
+                                                                 &idev->rnc.remote_node_index);
+       if (status != SCI_SUCCESS)
+               return status;
+
+       if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
+           (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
+               /* pass */;
+       else
+               return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+       /*
+        * For SAS-2 the physical link rate is actually a logical link
+        * rate that incorporates multiplexing.  The SCU doesn't
+        * incorporate multiplexing and for the purposes of the
+        * connection the logical link rate is that same as the
+        * physical.  Furthermore, the SAS-2 and SAS-1.1 fields overlay
+        * one another, so this code works for both situations. */
+       idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
+                                        dev->linkrate);
+
+       /* / @todo Should I assign the port width by reading all of the phys on the port? */
+       idev->device_port_width = 1;
+
+       return SCI_SUCCESS;
+}
+
+/**
+ * sci_remote_device_start() - This method will start the supplied remote
+ *    device.  This method enables normal IO requests to flow through to the
+ *    remote device.
+ * @remote_device: This parameter specifies the device to be started.
+ * @timeout: This parameter specifies the number of milliseconds in which the
+ *    start operation should complete.
+ *
+ * An indication of whether the device was successfully started. SCI_SUCCESS
+ * This value is returned if the device was successfully started.
+ * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start
+ * the device when there have been no phys added to it.
+ */
+static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
+                                               u32 timeout)
+{
+       struct sci_base_state_machine *sm = &idev->sm;
+       enum sci_remote_device_states state = sm->current_state_id;
+       enum sci_status status;
+
+       if (state != SCI_DEV_STOPPED) {
+               dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+                        __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       status = sci_remote_node_context_resume(&idev->rnc,
+                                                    remote_device_resume_done,
+                                                    idev);
+       if (status != SCI_SUCCESS)
+               return status;
+
+       sci_change_state(sm, SCI_DEV_STARTING);
+
+       return SCI_SUCCESS;
+}
+
+static enum sci_status isci_remote_device_construct(struct isci_port *iport,
+                                                   struct isci_remote_device *idev)
+{
+       struct isci_host *ihost = iport->isci_host;
+       struct domain_device *dev = idev->domain_dev;
+       enum sci_status status;
+
+       if (dev->parent && dev_is_expander(dev->parent))
+               status = sci_remote_device_ea_construct(iport, idev);
+       else
+               status = sci_remote_device_da_construct(iport, idev);
+
+       if (status != SCI_SUCCESS) {
+               dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n",
+                       __func__, status);
+
+               return status;
+       }
+
+       /* start the device. */
+       status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT);
+
+       if (status != SCI_SUCCESS)
+               dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n",
+                        status);
+
+       return status;
+}
+
+void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+       DECLARE_COMPLETION_ONSTACK(aborted_task_completion);
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: idev = %p\n", __func__, idev);
+
+       /* Cleanup all requests pending for this device. */
+       isci_terminate_pending_requests(ihost, idev);
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: idev = %p, done\n", __func__, idev);
+}
+
+/**
+ * This function builds the isci_remote_device when a libsas dev_found message
+ *    is received.
+ * @isci_host: This parameter specifies the isci host object.
+ * @port: This parameter specifies the isci_port conected to this device.
+ *
+ * pointer to new isci_remote_device.
+ */
+static struct isci_remote_device *
+isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
+{
+       struct isci_remote_device *idev;
+       int i;
+
+       for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
+               idev = &ihost->devices[i];
+               if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags))
+                       break;
+       }
+
+       if (i >= SCI_MAX_REMOTE_DEVICES) {
+               dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
+               return NULL;
+       }
+
+       if (WARN_ONCE(!list_empty(&idev->reqs_in_process), "found requests in process\n"))
+               return NULL;
+
+       if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
+               return NULL;
+
+       return idev;
+}
+
+void isci_remote_device_release(struct kref *kref)
+{
+       struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref);
+       struct isci_host *ihost = idev->isci_port->isci_host;
+
+       idev->domain_dev = NULL;
+       idev->isci_port = NULL;
+       clear_bit(IDEV_START_PENDING, &idev->flags);
+       clear_bit(IDEV_STOP_PENDING, &idev->flags);
+       clear_bit(IDEV_IO_READY, &idev->flags);
+       clear_bit(IDEV_GONE, &idev->flags);
+       clear_bit(IDEV_EH, &idev->flags);
+       smp_mb__before_clear_bit();
+       clear_bit(IDEV_ALLOCATED, &idev->flags);
+       wake_up(&ihost->eventq);
+}
+
+/**
+ * isci_remote_device_stop() - This function is called internally to stop the
+ *    remote device.
+ * @isci_host: This parameter specifies the isci host object.
+ * @isci_device: This parameter specifies the remote device.
+ *
+ * The status of the ihost request to stop.
+ */
+enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+       enum sci_status status;
+       unsigned long flags;
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: isci_device = %p\n", __func__, idev);
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+       idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
+       set_bit(IDEV_GONE, &idev->flags);
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       /* Kill all outstanding requests. */
+       isci_remote_device_nuke_requests(ihost, idev);
+
+       set_bit(IDEV_STOP_PENDING, &idev->flags);
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+       status = sci_remote_device_stop(idev, 50);
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       /* Wait for the stop complete callback. */
+       if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n"))
+               /* nothing to wait for */;
+       else
+               wait_for_device_stop(ihost, idev);
+
+       return status;
+}
+
+/**
+ * isci_remote_device_gone() - This function is called by libsas when a domain
+ *    device is removed.
+ * @domain_device: This parameter specifies the libsas domain device.
+ *
+ */
+void isci_remote_device_gone(struct domain_device *dev)
+{
+       struct isci_host *ihost = dev_to_ihost(dev);
+       struct isci_remote_device *idev = dev->lldd_dev;
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: domain_device = %p, isci_device = %p, isci_port = %p\n",
+               __func__, dev, idev, idev->isci_port);
+
+       isci_remote_device_stop(ihost, idev);
+}
+
+
+/**
+ * isci_remote_device_found() - This function is called by libsas when a remote
+ *    device is discovered. A remote device object is created and started. the
+ *    function then sleeps until the sci core device started message is
+ *    received.
+ * @domain_device: This parameter specifies the libsas domain device.
+ *
+ * status, zero indicates success.
+ */
+int isci_remote_device_found(struct domain_device *domain_dev)
+{
+       struct isci_host *isci_host = dev_to_ihost(domain_dev);
+       struct isci_port *isci_port;
+       struct isci_phy *isci_phy;
+       struct asd_sas_port *sas_port;
+       struct asd_sas_phy *sas_phy;
+       struct isci_remote_device *isci_device;
+       enum sci_status status;
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: domain_device = %p\n", __func__, domain_dev);
+
+       wait_for_start(isci_host);
+
+       sas_port = domain_dev->port;
+       sas_phy = list_first_entry(&sas_port->phy_list, struct asd_sas_phy,
+                                  port_phy_el);
+       isci_phy = to_iphy(sas_phy);
+       isci_port = isci_phy->isci_port;
+
+       /* we are being called for a device on this port,
+        * so it has to come up eventually
+        */
+       wait_for_completion(&isci_port->start_complete);
+
+       if ((isci_stopping == isci_port_get_state(isci_port)) ||
+           (isci_stopped == isci_port_get_state(isci_port)))
+               return -ENODEV;
+
+       isci_device = isci_remote_device_alloc(isci_host, isci_port);
+       if (!isci_device)
+               return -ENODEV;
+
+       kref_init(&isci_device->kref);
+       INIT_LIST_HEAD(&isci_device->node);
+
+       spin_lock_irq(&isci_host->scic_lock);
+       isci_device->domain_dev = domain_dev;
+       isci_device->isci_port = isci_port;
+       list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
+
+       set_bit(IDEV_START_PENDING, &isci_device->flags);
+       status = isci_remote_device_construct(isci_port, isci_device);
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: isci_device = %p\n",
+               __func__, isci_device);
+
+       if (status == SCI_SUCCESS) {
+               /* device came up, advertise it to the world */
+               domain_dev->lldd_dev = isci_device;
+       } else
+               isci_put_device(isci_device);
+       spin_unlock_irq(&isci_host->scic_lock);
+
+       /* wait for the device ready callback. */
+       wait_for_device_start(isci_host, isci_device);
+
+       return status == SCI_SUCCESS ? 0 : -ENODEV;
+}
+/**
+ * isci_device_is_reset_pending() - This function will check if there is any
+ *    pending reset condition on the device.
+ * @request: This parameter is the isci_device object.
+ *
+ * true if there is a reset pending for the device.
+ */
+bool isci_device_is_reset_pending(
+       struct isci_host *isci_host,
+       struct isci_remote_device *isci_device)
+{
+       struct isci_request *isci_request;
+       struct isci_request *tmp_req;
+       bool reset_is_pending = false;
+       unsigned long flags;
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: isci_device = %p\n", __func__, isci_device);
+
+       spin_lock_irqsave(&isci_host->scic_lock, flags);
+
+       /* Check for reset on all pending requests. */
+       list_for_each_entry_safe(isci_request, tmp_req,
+                                &isci_device->reqs_in_process, dev_node) {
+               dev_dbg(&isci_host->pdev->dev,
+                       "%s: isci_device = %p request = %p\n",
+                       __func__, isci_device, isci_request);
+
+               if (isci_request->ttype == io_task) {
+                       struct sas_task *task = isci_request_access_task(
+                               isci_request);
+
+                       spin_lock(&task->task_state_lock);
+                       if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
+                               reset_is_pending = true;
+                       spin_unlock(&task->task_state_lock);
+               }
+       }
+
+       spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: isci_device = %p reset_is_pending = %d\n",
+               __func__, isci_device, reset_is_pending);
+
+       return reset_is_pending;
+}
+
+/**
+ * isci_device_clear_reset_pending() - This function will clear if any pending
+ *    reset condition flags on the device.
+ * @request: This parameter is the isci_device object.
+ *
+ * true if there is a reset pending for the device.
+ */
+void isci_device_clear_reset_pending(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+       struct isci_request *isci_request;
+       struct isci_request *tmp_req;
+       unsigned long flags = 0;
+
+       dev_dbg(&ihost->pdev->dev, "%s: idev=%p, ihost=%p\n",
+                __func__, idev, ihost);
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       /* Clear reset pending on all pending requests. */
+       list_for_each_entry_safe(isci_request, tmp_req,
+                                &idev->reqs_in_process, dev_node) {
+               dev_dbg(&ihost->pdev->dev, "%s: idev = %p request = %p\n",
+                        __func__, idev, isci_request);
+
+               if (isci_request->ttype == io_task) {
+
+                       unsigned long flags2;
+                       struct sas_task *task = isci_request_access_task(
+                               isci_request);
+
+                       spin_lock_irqsave(&task->task_state_lock, flags2);
+                       task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
+                       spin_unlock_irqrestore(&task->task_state_lock, flags2);
+               }
+       }
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
new file mode 100644 (file)
index 0000000..57ccfc3
--- /dev/null
@@ -0,0 +1,352 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ISCI_REMOTE_DEVICE_H_
+#define _ISCI_REMOTE_DEVICE_H_
+#include <scsi/libsas.h>
+#include <linux/kref.h>
+#include "scu_remote_node_context.h"
+#include "remote_node_context.h"
+#include "port.h"
+
+enum sci_remote_device_not_ready_reason_code {
+       SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED,
+       SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED,
+       SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED,
+       SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED,
+       SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED,
+       SCIC_REMOTE_DEVICE_NOT_READY_REASON_CODE_MAX
+};
+
+/**
+ * isci_remote_device - isci representation of a sas expander / end point
+ * @device_port_width: hw setting for number of simultaneous connections
+ * @connection_rate: per-taskcontext connection rate for this device
+ * @working_request: SATA requests have no tag we for unaccelerated
+ *                   protocols we need a method to associate unsolicited
+ *                   frames with a pending request
+ */
+struct isci_remote_device {
+       #define IDEV_START_PENDING 0
+       #define IDEV_STOP_PENDING 1
+       #define IDEV_ALLOCATED 2
+       #define IDEV_EH 3
+       #define IDEV_GONE 4
+       #define IDEV_IO_READY 5
+       #define IDEV_IO_NCQERROR 6
+       unsigned long flags;
+       struct kref kref;
+       struct isci_port *isci_port;
+       struct domain_device *domain_dev;
+       struct list_head node;
+       struct list_head reqs_in_process;
+       struct sci_base_state_machine sm;
+       u32 device_port_width;
+       enum sas_linkrate connection_rate;
+       bool is_direct_attached;
+       struct isci_port *owning_port;
+       struct sci_remote_node_context rnc;
+       /* XXX unify with device reference counting and delete */
+       u32 started_request_count;
+       struct isci_request *working_request;
+       u32 not_ready_reason;
+};
+
+#define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000
+
+/* device reference routines must be called under sci_lock */
+static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev)
+{
+       struct isci_remote_device *idev = dev->lldd_dev;
+
+       if (idev && !test_bit(IDEV_GONE, &idev->flags)) {
+               kref_get(&idev->kref);
+               return idev;
+       }
+
+       return NULL;
+}
+
+void isci_remote_device_release(struct kref *kref);
+static inline void isci_put_device(struct isci_remote_device *idev)
+{
+       if (idev)
+               kref_put(&idev->kref, isci_remote_device_release);
+}
+
+enum sci_status isci_remote_device_stop(struct isci_host *ihost,
+                                       struct isci_remote_device *idev);
+void isci_remote_device_nuke_requests(struct isci_host *ihost,
+                                     struct isci_remote_device *idev);
+void isci_remote_device_gone(struct domain_device *domain_dev);
+int isci_remote_device_found(struct domain_device *domain_dev);
+bool isci_device_is_reset_pending(struct isci_host *ihost,
+                                 struct isci_remote_device *idev);
+void isci_device_clear_reset_pending(struct isci_host *ihost,
+                                    struct isci_remote_device *idev);
+/**
+ * sci_remote_device_stop() - This method will stop both transmission and
+ *    reception of link activity for the supplied remote device.  This method
+ *    disables normal IO requests from flowing through to the remote device.
+ * @remote_device: This parameter specifies the device to be stopped.
+ * @timeout: This parameter specifies the number of milliseconds in which the
+ *    stop operation should complete.
+ *
+ * An indication of whether the device was successfully stopped. SCI_SUCCESS
+ * This value is returned if the transmission and reception for the device was
+ * successfully stopped.
+ */
+enum sci_status sci_remote_device_stop(
+       struct isci_remote_device *idev,
+       u32 timeout);
+
+/**
+ * sci_remote_device_reset() - This method will reset the device making it
+ *    ready for operation. This method must be called anytime the device is
+ *    reset either through a SMP phy control or a port hard reset request.
+ * @remote_device: This parameter specifies the device to be reset.
+ *
+ * This method does not actually cause the device hardware to be reset. This
+ * method resets the software object so that it will be operational after a
+ * device hardware reset completes. An indication of whether the device reset
+ * was accepted. SCI_SUCCESS This value is returned if the device reset is
+ * started.
+ */
+enum sci_status sci_remote_device_reset(
+       struct isci_remote_device *idev);
+
+/**
+ * sci_remote_device_reset_complete() - This method informs the device object
+ *    that the reset operation is complete and the device can resume operation
+ *    again.
+ * @remote_device: This parameter specifies the device which is to be informed
+ *    of the reset complete operation.
+ *
+ * An indication that the device is resuming operation. SCI_SUCCESS the device
+ * is resuming operation.
+ */
+enum sci_status sci_remote_device_reset_complete(
+       struct isci_remote_device *idev);
+
+/**
+ * enum sci_remote_device_states - This enumeration depicts all the states
+ *    for the common remote device state machine.
+ *
+ *
+ */
+enum sci_remote_device_states {
+       /**
+        * Simply the initial state for the base remote device state machine.
+        */
+       SCI_DEV_INITIAL,
+
+       /**
+        * This state indicates that the remote device has successfully been
+        * stopped.  In this state no new IO operations are permitted.
+        * This state is entered from the INITIAL state.
+        * This state is entered from the STOPPING state.
+        */
+       SCI_DEV_STOPPED,
+
+       /**
+        * This state indicates the the remote device is in the process of
+        * becoming ready (i.e. starting).  In this state no new IO operations
+        * are permitted.
+        * This state is entered from the STOPPED state.
+        */
+       SCI_DEV_STARTING,
+
+       /**
+        * This state indicates the remote device is now ready.  Thus, the user
+        * is able to perform IO operations on the remote device.
+        * This state is entered from the STARTING state.
+        */
+       SCI_DEV_READY,
+
+       /**
+        * This is the idle substate for the stp remote device.  When there are no
+        * active IO for the device it is is in this state.
+        */
+       SCI_STP_DEV_IDLE,
+
+       /**
+        * This is the command state for for the STP remote device.  This state is
+        * entered when the device is processing a non-NCQ command.  The device object
+        * will fail any new start IO requests until this command is complete.
+        */
+       SCI_STP_DEV_CMD,
+
+       /**
+        * This is the NCQ state for the STP remote device.  This state is entered
+        * when the device is processing an NCQ reuqest.  It will remain in this state
+        * so long as there is one or more NCQ requests being processed.
+        */
+       SCI_STP_DEV_NCQ,
+
+       /**
+        * This is the NCQ error state for the STP remote device.  This state is
+        * entered when an SDB error FIS is received by the device object while in the
+        * NCQ state.  The device object will only accept a READ LOG command while in
+        * this state.
+        */
+       SCI_STP_DEV_NCQ_ERROR,
+
+       /**
+        * This is the READY substate indicates the device is waiting for the RESET task
+        * coming to be recovered from certain hardware specific error.
+        */
+       SCI_STP_DEV_AWAIT_RESET,
+
+       /**
+        * This is the ready operational substate for the remote device.  This is the
+        * normal operational state for a remote device.
+        */
+       SCI_SMP_DEV_IDLE,
+
+       /**
+        * This is the suspended state for the remote device.  This is the state that
+        * the device is placed in when a RNC suspend is received by the SCU hardware.
+        */
+       SCI_SMP_DEV_CMD,
+
+       /**
+        * This state indicates that the remote device is in the process of
+        * stopping.  In this state no new IO operations are permitted, but
+        * existing IO operations are allowed to complete.
+        * This state is entered from the READY state.
+        * This state is entered from the FAILED state.
+        */
+       SCI_DEV_STOPPING,
+
+       /**
+        * This state indicates that the remote device has failed.
+        * In this state no new IO operations are permitted.
+        * This state is entered from the INITIALIZING state.
+        * This state is entered from the READY state.
+        */
+       SCI_DEV_FAILED,
+
+       /**
+        * This state indicates the device is being reset.
+        * In this state no new IO operations are permitted.
+        * This state is entered from the READY state.
+        */
+       SCI_DEV_RESETTING,
+
+       /**
+        * Simply the final state for the base remote device state machine.
+        */
+       SCI_DEV_FINAL,
+};
+
+static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc)
+{
+       struct isci_remote_device *idev;
+
+       idev = container_of(rnc, typeof(*idev), rnc);
+
+       return idev;
+}
+
+static inline bool dev_is_expander(struct domain_device *dev)
+{
+       return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV;
+}
+
+static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev)
+{
+       /* XXX delete this voodoo when converting to the top-level device
+        * reference count
+        */
+       if (WARN_ONCE(idev->started_request_count == 0,
+                     "%s: tried to decrement started_request_count past 0!?",
+                       __func__))
+               /* pass */;
+       else
+               idev->started_request_count--;
+}
+
+enum sci_status sci_remote_device_frame_handler(
+       struct isci_remote_device *idev,
+       u32 frame_index);
+
+enum sci_status sci_remote_device_event_handler(
+       struct isci_remote_device *idev,
+       u32 event_code);
+
+enum sci_status sci_remote_device_start_io(
+       struct isci_host *ihost,
+       struct isci_remote_device *idev,
+       struct isci_request *ireq);
+
+enum sci_status sci_remote_device_start_task(
+       struct isci_host *ihost,
+       struct isci_remote_device *idev,
+       struct isci_request *ireq);
+
+enum sci_status sci_remote_device_complete_io(
+       struct isci_host *ihost,
+       struct isci_remote_device *idev,
+       struct isci_request *ireq);
+
+enum sci_status sci_remote_device_suspend(
+       struct isci_remote_device *idev,
+       u32 suspend_type);
+
+void sci_remote_device_post_request(
+       struct isci_remote_device *idev,
+       u32 request);
+
+#endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
new file mode 100644 (file)
index 0000000..748e833
--- /dev/null
@@ -0,0 +1,627 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "host.h"
+#include "isci.h"
+#include "remote_device.h"
+#include "remote_node_context.h"
+#include "scu_event_codes.h"
+#include "scu_task_context.h"
+
+
+/**
+ *
+ * @sci_rnc: The RNC for which the is posted request is being made.
+ *
+ * This method will return true if the RNC is not in the initial state.  In all
+ * other states the RNC is considered active and this will return true. The
+ * destroy request of the state machine drives the RNC back to the initial
+ * state.  If the state machine changes then this routine will also have to be
+ * changed. bool true if the state machine is not in the initial state false if
+ * the state machine is in the initial state
+ */
+
+/**
+ *
+ * @sci_rnc: The state of the remote node context object to check.
+ *
+ * This method will return true if the remote node context is in a READY state
+ * otherwise it will return false bool true if the remote node context is in
+ * the ready state. false if the remote node context is not in the ready state.
+ */
+bool sci_remote_node_context_is_ready(
+       struct sci_remote_node_context *sci_rnc)
+{
+       u32 current_state = sci_rnc->sm.current_state_id;
+
+       if (current_state == SCI_RNC_READY) {
+               return true;
+       }
+
+       return false;
+}
+
+static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
+{
+       if (id < ihost->remote_node_entries &&
+           ihost->device_table[id])
+               return &ihost->remote_node_context_table[id];
+
+       return NULL;
+}
+
+static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc)
+{
+       struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+       struct domain_device *dev = idev->domain_dev;
+       int rni = sci_rnc->remote_node_index;
+       union scu_remote_node_context *rnc;
+       struct isci_host *ihost;
+       __le64 sas_addr;
+
+       ihost = idev->owning_port->owning_controller;
+       rnc = sci_rnc_by_id(ihost, rni);
+
+       memset(rnc, 0, sizeof(union scu_remote_node_context)
+               * sci_remote_device_node_count(idev));
+
+       rnc->ssp.remote_node_index = rni;
+       rnc->ssp.remote_node_port_width = idev->device_port_width;
+       rnc->ssp.logical_port_index = idev->owning_port->physical_port_index;
+
+       /* sas address is __be64, context ram format is __le64 */
+       sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr));
+       rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr);
+       rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr);
+
+       rnc->ssp.nexus_loss_timer_enable = true;
+       rnc->ssp.check_bit               = false;
+       rnc->ssp.is_valid                = false;
+       rnc->ssp.is_remote_node_context  = true;
+       rnc->ssp.function_number         = 0;
+
+       rnc->ssp.arbitration_wait_time = 0;
+
+       if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+               rnc->ssp.connection_occupancy_timeout =
+                       ihost->user_parameters.stp_max_occupancy_timeout;
+               rnc->ssp.connection_inactivity_timeout =
+                       ihost->user_parameters.stp_inactivity_timeout;
+       } else {
+               rnc->ssp.connection_occupancy_timeout  =
+                       ihost->user_parameters.ssp_max_occupancy_timeout;
+               rnc->ssp.connection_inactivity_timeout =
+                       ihost->user_parameters.ssp_inactivity_timeout;
+       }
+
+       rnc->ssp.initial_arbitration_wait_time = 0;
+
+       /* Open Address Frame Parameters */
+       rnc->ssp.oaf_connection_rate = idev->connection_rate;
+       rnc->ssp.oaf_features = 0;
+       rnc->ssp.oaf_source_zone_group = 0;
+       rnc->ssp.oaf_more_compatibility_features = 0;
+}
+
+/**
+ *
+ * @sci_rnc:
+ * @callback:
+ * @callback_parameter:
+ *
+ * This method will setup the remote node context object so it will transition
+ * to its ready state.  If the remote node context is already setup to
+ * transition to its final state then this function does nothing. none
+ */
+static void sci_remote_node_context_setup_to_resume(
+       struct sci_remote_node_context *sci_rnc,
+       scics_sds_remote_node_context_callback callback,
+       void *callback_parameter)
+{
+       if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) {
+               sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY;
+               sci_rnc->user_callback     = callback;
+               sci_rnc->user_cookie       = callback_parameter;
+       }
+}
+
+static void sci_remote_node_context_setup_to_destory(
+       struct sci_remote_node_context *sci_rnc,
+       scics_sds_remote_node_context_callback callback,
+       void *callback_parameter)
+{
+       sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL;
+       sci_rnc->user_callback     = callback;
+       sci_rnc->user_cookie       = callback_parameter;
+}
+
+/**
+ *
+ *
+ * This method just calls the user callback function and then resets the
+ * callback.
+ */
+static void sci_remote_node_context_notify_user(
+       struct sci_remote_node_context *rnc)
+{
+       if (rnc->user_callback != NULL) {
+               (*rnc->user_callback)(rnc->user_cookie);
+
+               rnc->user_callback = NULL;
+               rnc->user_cookie = NULL;
+       }
+}
+
+static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
+{
+       if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
+               sci_remote_node_context_resume(rnc, rnc->user_callback,
+                                                   rnc->user_cookie);
+}
+
+static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
+{
+       union scu_remote_node_context *rnc_buffer;
+       struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+       struct domain_device *dev = idev->domain_dev;
+       struct isci_host *ihost = idev->owning_port->owning_controller;
+
+       rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
+
+       rnc_buffer->ssp.is_valid = true;
+
+       if (!idev->is_direct_attached &&
+           (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) {
+               sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
+       } else {
+               sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
+
+               if (idev->is_direct_attached)
+                       sci_port_setup_transports(idev->owning_port,
+                                                 sci_rnc->remote_node_index);
+       }
+}
+
+static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc)
+{
+       union scu_remote_node_context *rnc_buffer;
+       struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+       struct isci_host *ihost = idev->owning_port->owning_controller;
+
+       rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
+
+       rnc_buffer->ssp.is_valid = false;
+
+       sci_remote_device_post_request(rnc_to_dev(sci_rnc),
+                                      SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE);
+}
+
+static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
+{
+       struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+       /* Check to see if we have gotten back to the initial state because
+        * someone requested to destroy the remote node context object.
+        */
+       if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
+               rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
+               sci_remote_node_context_notify_user(rnc);
+       }
+}
+
+static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm)
+{
+       struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm);
+
+       sci_remote_node_context_validate_context_buffer(sci_rnc);
+}
+
+static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm)
+{
+       struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+       sci_remote_node_context_invalidate_context_buffer(rnc);
+}
+
+static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm)
+{
+       struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+       struct isci_remote_device *idev;
+       struct domain_device *dev;
+
+       idev = rnc_to_dev(rnc);
+       dev = idev->domain_dev;
+
+       /*
+        * For direct attached SATA devices we need to clear the TLCR
+        * NCQ to TCi tag mapping on the phy and in cases where we
+        * resume because of a target reset we also need to update
+        * the STPTLDARNI register with the RNi of the device
+        */
+       if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
+           idev->is_direct_attached)
+               sci_port_setup_transports(idev->owning_port,
+                                              rnc->remote_node_index);
+
+       sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
+}
+
+static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
+{
+       struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+       rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
+
+       if (rnc->user_callback)
+               sci_remote_node_context_notify_user(rnc);
+}
+
+static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm)
+{
+       struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+       sci_remote_node_context_continue_state_transitions(rnc);
+}
+
+static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
+{
+       struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+       sci_remote_node_context_continue_state_transitions(rnc);
+}
+
+static const struct sci_base_state sci_remote_node_context_state_table[] = {
+       [SCI_RNC_INITIAL] = {
+               .enter_state = sci_remote_node_context_initial_state_enter,
+       },
+       [SCI_RNC_POSTING] = {
+               .enter_state = sci_remote_node_context_posting_state_enter,
+       },
+       [SCI_RNC_INVALIDATING] = {
+               .enter_state = sci_remote_node_context_invalidating_state_enter,
+       },
+       [SCI_RNC_RESUMING] = {
+               .enter_state = sci_remote_node_context_resuming_state_enter,
+       },
+       [SCI_RNC_READY] = {
+               .enter_state = sci_remote_node_context_ready_state_enter,
+       },
+       [SCI_RNC_TX_SUSPENDED] = {
+               .enter_state = sci_remote_node_context_tx_suspended_state_enter,
+       },
+       [SCI_RNC_TX_RX_SUSPENDED] = {
+               .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
+       },
+       [SCI_RNC_AWAIT_SUSPENSION] = { },
+};
+
+void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
+                                           u16 remote_node_index)
+{
+       memset(rnc, 0, sizeof(struct sci_remote_node_context));
+
+       rnc->remote_node_index = remote_node_index;
+       rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
+
+       sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
+}
+
+enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
+                                                          u32 event_code)
+{
+       enum scis_sds_remote_node_context_states state;
+
+       state = sci_rnc->sm.current_state_id;
+       switch (state) {
+       case SCI_RNC_POSTING:
+               switch (scu_get_event_code(event_code)) {
+               case SCU_EVENT_POST_RNC_COMPLETE:
+                       sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
+                       break;
+               default:
+                       goto out;
+               }
+               break;
+       case SCI_RNC_INVALIDATING:
+               if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
+                       if (sci_rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL)
+                               state = SCI_RNC_INITIAL;
+                       else
+                               state = SCI_RNC_POSTING;
+                       sci_change_state(&sci_rnc->sm, state);
+               } else {
+                       switch (scu_get_event_type(event_code)) {
+                       case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+                       case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+                               /* We really dont care if the hardware is going to suspend
+                                * the device since it's being invalidated anyway */
+                               dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+                                       "%s: SCIC Remote Node Context 0x%p was "
+                                       "suspeneded by hardware while being "
+                                       "invalidated.\n", __func__, sci_rnc);
+                               break;
+                       default:
+                               goto out;
+                       }
+               }
+               break;
+       case SCI_RNC_RESUMING:
+               if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) {
+                       sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
+               } else {
+                       switch (scu_get_event_type(event_code)) {
+                       case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+                       case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+                               /* We really dont care if the hardware is going to suspend
+                                * the device since it's being resumed anyway */
+                               dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+                                       "%s: SCIC Remote Node Context 0x%p was "
+                                       "suspeneded by hardware while being resumed.\n",
+                                       __func__, sci_rnc);
+                               break;
+                       default:
+                               goto out;
+                       }
+               }
+               break;
+       case SCI_RNC_READY:
+               switch (scu_get_event_type(event_code)) {
+               case SCU_EVENT_TL_RNC_SUSPEND_TX:
+                       sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
+                       sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+                       break;
+               case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
+                       sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
+                       sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+                       break;
+               default:
+                       goto out;
+               }
+               break;
+       case SCI_RNC_AWAIT_SUSPENSION:
+               switch (scu_get_event_type(event_code)) {
+               case SCU_EVENT_TL_RNC_SUSPEND_TX:
+                       sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
+                       sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+                       break;
+               case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
+                       sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
+                       sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+                       break;
+               default:
+                       goto out;
+               }
+               break;
+       default:
+               dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+                        "%s: invalid state %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+       return SCI_SUCCESS;
+
+ out:
+       dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+                "%s: code: %#x state: %d\n", __func__, event_code, state);
+       return SCI_FAILURE;
+
+}
+
+enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
+                                                     scics_sds_remote_node_context_callback cb_fn,
+                                                     void *cb_p)
+{
+       enum scis_sds_remote_node_context_states state;
+
+       state = sci_rnc->sm.current_state_id;
+       switch (state) {
+       case SCI_RNC_INVALIDATING:
+               sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
+               return SCI_SUCCESS;
+       case SCI_RNC_POSTING:
+       case SCI_RNC_RESUMING:
+       case SCI_RNC_READY:
+       case SCI_RNC_TX_SUSPENDED:
+       case SCI_RNC_TX_RX_SUSPENDED:
+       case SCI_RNC_AWAIT_SUSPENSION:
+               sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
+               sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
+               return SCI_SUCCESS;
+       case SCI_RNC_INITIAL:
+               dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+                        "%s: invalid state %d\n", __func__, state);
+               /* We have decided that the destruct request on the remote node context
+                * can not fail since it is either in the initial/destroyed state or is
+                * can be destroyed.
+                */
+               return SCI_SUCCESS;
+       default:
+               dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+                        "%s: invalid state %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
+                                                    u32 suspend_type,
+                                                    scics_sds_remote_node_context_callback cb_fn,
+                                                    void *cb_p)
+{
+       enum scis_sds_remote_node_context_states state;
+
+       state = sci_rnc->sm.current_state_id;
+       if (state != SCI_RNC_READY) {
+               dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+                        "%s: invalid state %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       sci_rnc->user_callback   = cb_fn;
+       sci_rnc->user_cookie     = cb_p;
+       sci_rnc->suspension_code = suspend_type;
+
+       if (suspend_type == SCI_SOFTWARE_SUSPENSION) {
+               sci_remote_device_post_request(rnc_to_dev(sci_rnc),
+                                                   SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX);
+       }
+
+       sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
+       return SCI_SUCCESS;
+}
+
+enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
+                                                   scics_sds_remote_node_context_callback cb_fn,
+                                                   void *cb_p)
+{
+       enum scis_sds_remote_node_context_states state;
+
+       state = sci_rnc->sm.current_state_id;
+       switch (state) {
+       case SCI_RNC_INITIAL:
+               if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
+                       return SCI_FAILURE_INVALID_STATE;
+
+               sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+               sci_remote_node_context_construct_buffer(sci_rnc);
+               sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
+               return SCI_SUCCESS;
+       case SCI_RNC_POSTING:
+       case SCI_RNC_INVALIDATING:
+       case SCI_RNC_RESUMING:
+               if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
+                       return SCI_FAILURE_INVALID_STATE;
+
+               sci_rnc->user_callback = cb_fn;
+               sci_rnc->user_cookie   = cb_p;
+               return SCI_SUCCESS;
+       case SCI_RNC_TX_SUSPENDED: {
+               struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+               struct domain_device *dev = idev->domain_dev;
+
+               sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+
+               /* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */
+               if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev))
+                       sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
+               else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+                       if (idev->is_direct_attached) {
+                               /* @todo Fix this since I am being silly in writing to the STPTLDARNI register. */
+                               sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
+                       } else {
+                               sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
+                       }
+               } else
+                       return SCI_FAILURE;
+               return SCI_SUCCESS;
+       }
+       case SCI_RNC_TX_RX_SUSPENDED:
+               sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+               sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
+               return SCI_FAILURE_INVALID_STATE;
+       case SCI_RNC_AWAIT_SUSPENSION:
+               sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+               return SCI_SUCCESS;
+       default:
+               dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+                        "%s: invalid state %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
+                                                            struct isci_request *ireq)
+{
+       enum scis_sds_remote_node_context_states state;
+
+       state = sci_rnc->sm.current_state_id;
+
+       switch (state) {
+       case SCI_RNC_READY:
+               return SCI_SUCCESS;
+       case SCI_RNC_TX_SUSPENDED:
+       case SCI_RNC_TX_RX_SUSPENDED:
+       case SCI_RNC_AWAIT_SUSPENSION:
+               dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+                        "%s: invalid state %d\n", __func__, state);
+               return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+       default:
+               break;
+       }
+       dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+               "%s: requested to start IO while still resuming, %d\n",
+               __func__, state);
+       return SCI_FAILURE_INVALID_STATE;
+}
+
+enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
+                                                       struct isci_request *ireq)
+{
+       enum scis_sds_remote_node_context_states state;
+
+       state = sci_rnc->sm.current_state_id;
+       switch (state) {
+       case SCI_RNC_RESUMING:
+       case SCI_RNC_READY:
+       case SCI_RNC_AWAIT_SUSPENSION:
+               return SCI_SUCCESS;
+       case SCI_RNC_TX_SUSPENDED:
+       case SCI_RNC_TX_RX_SUSPENDED:
+               sci_remote_node_context_resume(sci_rnc, NULL, NULL);
+               return SCI_SUCCESS;
+       default:
+               dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+                        "%s: invalid state %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h
new file mode 100644 (file)
index 0000000..41580ad
--- /dev/null
@@ -0,0 +1,224 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCIC_SDS_REMOTE_NODE_CONTEXT_H_
+#define _SCIC_SDS_REMOTE_NODE_CONTEXT_H_
+
+/**
+ * This file contains the structures, constants, and prototypes associated with
+ *    the remote node context in the silicon.  It exists to model and manage
+ *    the remote node context in the silicon.
+ *
+ *
+ */
+
+#include "isci.h"
+
+/**
+ *
+ *
+ * This constant represents an invalid remote device id, it is used to program
+ * the STPDARNI register so the driver knows when it has received a SIGNATURE
+ * FIS from the SCU.
+ */
+#define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX    0x0FFF
+
+#define SCU_HARDWARE_SUSPENSION  (0)
+#define SCI_SOFTWARE_SUSPENSION  (1)
+
+struct isci_request;
+struct isci_remote_device;
+struct sci_remote_node_context;
+
+typedef void (*scics_sds_remote_node_context_callback)(void *);
+
+/**
+ * This is the enumeration of the remote node context states.
+ */
+enum scis_sds_remote_node_context_states {
+       /**
+        * This state is the initial state for a remote node context.  On a resume
+        * request the remote node context will transition to the posting state.
+        */
+       SCI_RNC_INITIAL,
+
+       /**
+        * This is a transition state that posts the RNi to the hardware. Once the RNC
+        * is posted the remote node context will be made ready.
+        */
+       SCI_RNC_POSTING,
+
+       /**
+        * This is a transition state that will post an RNC invalidate to the
+        * hardware.  Once the invalidate is complete the remote node context will
+        * transition to the posting state.
+        */
+       SCI_RNC_INVALIDATING,
+
+       /**
+        * This is a transition state that will post an RNC resume to the hardare.
+        * Once the event notification of resume complete is received the remote node
+        * context will transition to the ready state.
+        */
+       SCI_RNC_RESUMING,
+
+       /**
+        * This is the state that the remote node context must be in to accept io
+        * request operations.
+        */
+       SCI_RNC_READY,
+
+       /**
+        * This is the state that the remote node context transitions to when it gets
+        * a TX suspend notification from the hardware.
+        */
+       SCI_RNC_TX_SUSPENDED,
+
+       /**
+        * This is the state that the remote node context transitions to when it gets
+        * a TX RX suspend notification from the hardware.
+        */
+       SCI_RNC_TX_RX_SUSPENDED,
+
+       /**
+        * This state is a wait state for the remote node context that waits for a
+        * suspend notification from the hardware.  This state is entered when either
+        * there is a request to supend the remote node context or when there is a TC
+        * completion where the remote node will be suspended by the hardware.
+        */
+       SCI_RNC_AWAIT_SUSPENSION
+};
+
+/**
+ *
+ *
+ * This enumeration is used to define the end destination state for the remote
+ * node context.
+ */
+enum sci_remote_node_context_destination_state {
+       SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED,
+       SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY,
+       SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL
+};
+
+/**
+ * struct sci_remote_node_context - This structure contains the data
+ *    associated with the remote node context object.  The remote node context
+ *    (RNC) object models the the remote device information necessary to manage
+ *    the silicon RNC.
+ */
+struct sci_remote_node_context {
+       /**
+        * This field indicates the remote node index (RNI) associated with
+        * this RNC.
+        */
+       u16 remote_node_index;
+
+       /**
+        * This field is the recored suspension code or the reason for the remote node
+        * context suspension.
+        */
+       u32 suspension_code;
+
+       /**
+        * This field is true if the remote node context is resuming from its current
+        * state.  This can cause an automatic resume on receiving a suspension
+        * notification.
+        */
+       enum sci_remote_node_context_destination_state destination_state;
+
+       /**
+        * This field contains the callback function that the user requested to be
+        * called when the requested state transition is complete.
+        */
+       scics_sds_remote_node_context_callback user_callback;
+
+       /**
+        * This field contains the parameter that is called when the user requested
+        * state transition is completed.
+        */
+       void *user_cookie;
+
+       /**
+        * This field contains the data for the object's state machine.
+        */
+       struct sci_base_state_machine sm;
+};
+
+void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
+                                           u16 remote_node_index);
+
+
+bool sci_remote_node_context_is_ready(
+       struct sci_remote_node_context *sci_rnc);
+
+enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
+                                                          u32 event_code);
+enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
+                                                     scics_sds_remote_node_context_callback callback,
+                                                     void *callback_parameter);
+enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
+                                                    u32 suspend_type,
+                                                    scics_sds_remote_node_context_callback cb_fn,
+                                                    void *cb_p);
+enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
+                                                   scics_sds_remote_node_context_callback cb_fn,
+                                                   void *cb_p);
+enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
+                                                       struct isci_request *ireq);
+enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
+                                                     struct isci_request *ireq);
+
+#endif  /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/remote_node_table.c b/drivers/scsi/isci/remote_node_table.c
new file mode 100644 (file)
index 0000000..301b314
--- /dev/null
@@ -0,0 +1,598 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * This file contains the implementation of the SCIC_SDS_REMOTE_NODE_TABLE
+ *    public, protected, and private methods.
+ *
+ *
+ */
+#include "remote_node_table.h"
+#include "remote_node_context.h"
+
+/**
+ *
+ * @remote_node_table: This is the remote node index table from which the
+ *    selection will be made.
+ * @group_table_index: This is the index to the group table from which to
+ *    search for an available selection.
+ *
+ * This routine will find the bit position in absolute bit terms of the next 32
+ * + bit position.  If there are available bits in the first u32 then it is
+ * just bit position. u32 This is the absolute bit position for an available
+ * group.
+ */
+static u32 sci_remote_node_table_get_group_index(
+       struct sci_remote_node_table *remote_node_table,
+       u32 group_table_index)
+{
+       u32 dword_index;
+       u32 *group_table;
+       u32 bit_index;
+
+       group_table = remote_node_table->remote_node_groups[group_table_index];
+
+       for (dword_index = 0; dword_index < remote_node_table->group_array_size; dword_index++) {
+               if (group_table[dword_index] != 0) {
+                       for (bit_index = 0; bit_index < 32; bit_index++) {
+                               if ((group_table[dword_index] & (1 << bit_index)) != 0) {
+                                       return (dword_index * 32) + bit_index;
+                               }
+                       }
+               }
+       }
+
+       return SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX;
+}
+
+/**
+ *
+ * @out]: remote_node_table This the remote node table in which to clear the
+ *    selector.
+ * @set_index: This is the remote node selector in which the change will be
+ *    made.
+ * @group_index: This is the bit index in the table to be modified.
+ *
+ * This method will clear the group index entry in the specified group index
+ * table. none
+ */
+static void sci_remote_node_table_clear_group_index(
+       struct sci_remote_node_table *remote_node_table,
+       u32 group_table_index,
+       u32 group_index)
+{
+       u32 dword_index;
+       u32 bit_index;
+       u32 *group_table;
+
+       BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
+       BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
+
+       dword_index = group_index / 32;
+       bit_index   = group_index % 32;
+       group_table = remote_node_table->remote_node_groups[group_table_index];
+
+       group_table[dword_index] = group_table[dword_index] & ~(1 << bit_index);
+}
+
+/**
+ *
+ * @out]: remote_node_table This the remote node table in which to set the
+ *    selector.
+ * @group_table_index: This is the remote node selector in which the change
+ *    will be made.
+ * @group_index: This is the bit position in the table to be modified.
+ *
+ * This method will set the group index bit entry in the specified gropu index
+ * table. none
+ */
+static void sci_remote_node_table_set_group_index(
+       struct sci_remote_node_table *remote_node_table,
+       u32 group_table_index,
+       u32 group_index)
+{
+       u32 dword_index;
+       u32 bit_index;
+       u32 *group_table;
+
+       BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
+       BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
+
+       dword_index = group_index / 32;
+       bit_index   = group_index % 32;
+       group_table = remote_node_table->remote_node_groups[group_table_index];
+
+       group_table[dword_index] = group_table[dword_index] | (1 << bit_index);
+}
+
+/**
+ *
+ * @out]: remote_node_table This is the remote node table in which to modify
+ *    the remote node availability.
+ * @remote_node_index: This is the remote node index that is being returned to
+ *    the table.
+ *
+ * This method will set the remote to available in the remote node allocation
+ * table. none
+ */
+static void sci_remote_node_table_set_node_index(
+       struct sci_remote_node_table *remote_node_table,
+       u32 remote_node_index)
+{
+       u32 dword_location;
+       u32 dword_remainder;
+       u32 slot_normalized;
+       u32 slot_position;
+
+       BUG_ON(
+               (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+               <= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
+               );
+
+       dword_location  = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
+       dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
+       slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
+       slot_position   = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
+
+       remote_node_table->available_remote_nodes[dword_location] |=
+               1 << (slot_normalized + slot_position);
+}
+
+/**
+ *
+ * @out]: remote_node_table This is the remote node table from which to clear
+ *    the available remote node bit.
+ * @remote_node_index: This is the remote node index which is to be cleared
+ *    from the table.
+ *
+ * This method clears the remote node index from the table of available remote
+ * nodes. none
+ */
+static void sci_remote_node_table_clear_node_index(
+       struct sci_remote_node_table *remote_node_table,
+       u32 remote_node_index)
+{
+       u32 dword_location;
+       u32 dword_remainder;
+       u32 slot_position;
+       u32 slot_normalized;
+
+       BUG_ON(
+               (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+               <= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
+               );
+
+       dword_location  = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
+       dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
+       slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
+       slot_position   = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
+
+       remote_node_table->available_remote_nodes[dword_location] &=
+               ~(1 << (slot_normalized + slot_position));
+}
+
+/**
+ *
+ * @out]: remote_node_table The remote node table from which the slot will be
+ *    cleared.
+ * @group_index: The index for the slot that is to be cleared.
+ *
+ * This method clears the entire table slot at the specified slot index. none
+ */
+static void sci_remote_node_table_clear_group(
+       struct sci_remote_node_table *remote_node_table,
+       u32 group_index)
+{
+       u32 dword_location;
+       u32 dword_remainder;
+       u32 dword_value;
+
+       BUG_ON(
+               (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+               <= (group_index / SCU_STP_REMOTE_NODE_COUNT)
+               );
+
+       dword_location  = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+       dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+
+       dword_value = remote_node_table->available_remote_nodes[dword_location];
+       dword_value &= ~(SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
+       remote_node_table->available_remote_nodes[dword_location] = dword_value;
+}
+
+/**
+ *
+ * @remote_node_table:
+ *
+ * THis method sets an entire remote node group in the remote node table.
+ */
+static void sci_remote_node_table_set_group(
+       struct sci_remote_node_table *remote_node_table,
+       u32 group_index)
+{
+       u32 dword_location;
+       u32 dword_remainder;
+       u32 dword_value;
+
+       BUG_ON(
+               (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+               <= (group_index / SCU_STP_REMOTE_NODE_COUNT)
+               );
+
+       dword_location  = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+       dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+
+       dword_value = remote_node_table->available_remote_nodes[dword_location];
+       dword_value |= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
+       remote_node_table->available_remote_nodes[dword_location] = dword_value;
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table that for which the group
+ *    value is to be returned.
+ * @group_index: This is the group index to use to find the group value.
+ *
+ * This method will return the group value for the specified group index. The
+ * bit values at the specified remote node group index.
+ */
+static u8 sci_remote_node_table_get_group_value(
+       struct sci_remote_node_table *remote_node_table,
+       u32 group_index)
+{
+       u32 dword_location;
+       u32 dword_remainder;
+       u32 dword_value;
+
+       dword_location  = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+       dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+
+       dword_value = remote_node_table->available_remote_nodes[dword_location];
+       dword_value &= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
+       dword_value = dword_value >> (dword_remainder * 4);
+
+       return (u8)dword_value;
+}
+
+/**
+ *
+ * @out]: remote_node_table The remote that which is to be initialized.
+ * @remote_node_entries: The number of entries to put in the table.
+ *
+ * This method will initialize the remote node table for use. none
+ */
+void sci_remote_node_table_initialize(
+       struct sci_remote_node_table *remote_node_table,
+       u32 remote_node_entries)
+{
+       u32 index;
+
+       /*
+        * Initialize the raw data we could improve the speed by only initializing
+        * those entries that we are actually going to be used */
+       memset(
+               remote_node_table->available_remote_nodes,
+               0x00,
+               sizeof(remote_node_table->available_remote_nodes)
+               );
+
+       memset(
+               remote_node_table->remote_node_groups,
+               0x00,
+               sizeof(remote_node_table->remote_node_groups)
+               );
+
+       /* Initialize the available remote node sets */
+       remote_node_table->available_nodes_array_size = (u16)
+                                                       (remote_node_entries / SCIC_SDS_REMOTE_NODES_PER_DWORD)
+                                                       + ((remote_node_entries % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0);
+
+
+       /* Initialize each full DWORD to a FULL SET of remote nodes */
+       for (index = 0; index < remote_node_entries; index++) {
+               sci_remote_node_table_set_node_index(remote_node_table, index);
+       }
+
+       remote_node_table->group_array_size = (u16)
+                                             (remote_node_entries / (SCU_STP_REMOTE_NODE_COUNT * 32))
+                                             + ((remote_node_entries % (SCU_STP_REMOTE_NODE_COUNT * 32)) != 0);
+
+       for (index = 0; index < (remote_node_entries / SCU_STP_REMOTE_NODE_COUNT); index++) {
+               /*
+                * These are all guaranteed to be full slot values so fill them in the
+                * available sets of 3 remote nodes */
+               sci_remote_node_table_set_group_index(remote_node_table, 2, index);
+       }
+
+       /* Now fill in any remainders that we may find */
+       if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 2) {
+               sci_remote_node_table_set_group_index(remote_node_table, 1, index);
+       } else if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 1) {
+               sci_remote_node_table_set_group_index(remote_node_table, 0, index);
+       }
+}
+
+/**
+ *
+ * @out]: remote_node_table The remote node table from which to allocate a
+ *    remote node.
+ * @table_index: The group index that is to be used for the search.
+ *
+ * This method will allocate a single RNi from the remote node table.  The
+ * table index will determine from which remote node group table to search.
+ * This search may fail and another group node table can be specified.  The
+ * function is designed to allow a serach of the available single remote node
+ * group up to the triple remote node group.  If an entry is found in the
+ * specified table the remote node is removed and the remote node groups are
+ * updated. The RNi value or an invalid remote node context if an RNi can not
+ * be found.
+ */
+static u16 sci_remote_node_table_allocate_single_remote_node(
+       struct sci_remote_node_table *remote_node_table,
+       u32 group_table_index)
+{
+       u8 index;
+       u8 group_value;
+       u32 group_index;
+       u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+
+       group_index = sci_remote_node_table_get_group_index(
+               remote_node_table, group_table_index);
+
+       /* We could not find an available slot in the table selector 0 */
+       if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
+               group_value = sci_remote_node_table_get_group_value(
+                       remote_node_table, group_index);
+
+               for (index = 0; index < SCU_STP_REMOTE_NODE_COUNT; index++) {
+                       if (((1 << index) & group_value) != 0) {
+                               /* We have selected a bit now clear it */
+                               remote_node_index = (u16)(group_index * SCU_STP_REMOTE_NODE_COUNT
+                                                         + index);
+
+                               sci_remote_node_table_clear_group_index(
+                                       remote_node_table, group_table_index, group_index
+                                       );
+
+                               sci_remote_node_table_clear_node_index(
+                                       remote_node_table, remote_node_index
+                                       );
+
+                               if (group_table_index > 0) {
+                                       sci_remote_node_table_set_group_index(
+                                               remote_node_table, group_table_index - 1, group_index
+                                               );
+                               }
+
+                               break;
+                       }
+               }
+       }
+
+       return remote_node_index;
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table from which to allocate the
+ *    remote node entries.
+ * @group_table_index: THis is the group table index which must equal two (2)
+ *    for this operation.
+ *
+ * This method will allocate three consecutive remote node context entries. If
+ * there are no remaining triple entries the function will return a failure.
+ * The remote node index that represents three consecutive remote node entries
+ * or an invalid remote node context if none can be found.
+ */
+static u16 sci_remote_node_table_allocate_triple_remote_node(
+       struct sci_remote_node_table *remote_node_table,
+       u32 group_table_index)
+{
+       u32 group_index;
+       u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+
+       group_index = sci_remote_node_table_get_group_index(
+               remote_node_table, group_table_index);
+
+       if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
+               remote_node_index = (u16)group_index * SCU_STP_REMOTE_NODE_COUNT;
+
+               sci_remote_node_table_clear_group_index(
+                       remote_node_table, group_table_index, group_index
+                       );
+
+               sci_remote_node_table_clear_group(
+                       remote_node_table, group_index
+                       );
+       }
+
+       return remote_node_index;
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table from which the remote node
+ *    allocation is to take place.
+ * @remote_node_count: This is ther remote node count which is one of
+ *    SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3).
+ *
+ * This method will allocate a remote node that mataches the remote node count
+ * specified by the caller.  Valid values for remote node count is
+ * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). u16 This is
+ * the remote node index that is returned or an invalid remote node context.
+ */
+u16 sci_remote_node_table_allocate_remote_node(
+       struct sci_remote_node_table *remote_node_table,
+       u32 remote_node_count)
+{
+       u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+
+       if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
+               remote_node_index =
+                       sci_remote_node_table_allocate_single_remote_node(
+                               remote_node_table, 0);
+
+               if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+                       remote_node_index =
+                               sci_remote_node_table_allocate_single_remote_node(
+                                       remote_node_table, 1);
+               }
+
+               if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+                       remote_node_index =
+                               sci_remote_node_table_allocate_single_remote_node(
+                                       remote_node_table, 2);
+               }
+       } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
+               remote_node_index =
+                       sci_remote_node_table_allocate_triple_remote_node(
+                               remote_node_table, 2);
+       }
+
+       return remote_node_index;
+}
+
+/**
+ *
+ * @remote_node_table:
+ *
+ * This method will free a single remote node index back to the remote node
+ * table.  This routine will update the remote node groups
+ */
+static void sci_remote_node_table_release_single_remote_node(
+       struct sci_remote_node_table *remote_node_table,
+       u16 remote_node_index)
+{
+       u32 group_index;
+       u8 group_value;
+
+       group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
+
+       group_value = sci_remote_node_table_get_group_value(remote_node_table, group_index);
+
+       /*
+        * Assert that we are not trying to add an entry to a slot that is already
+        * full. */
+       BUG_ON(group_value == SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE);
+
+       if (group_value == 0x00) {
+               /*
+                * There are no entries in this slot so it must be added to the single
+                * slot table. */
+               sci_remote_node_table_set_group_index(remote_node_table, 0, group_index);
+       } else if ((group_value & (group_value - 1)) == 0) {
+               /*
+                * There is only one entry in this slot so it must be moved from the
+                * single slot table to the dual slot table */
+               sci_remote_node_table_clear_group_index(remote_node_table, 0, group_index);
+               sci_remote_node_table_set_group_index(remote_node_table, 1, group_index);
+       } else {
+               /*
+                * There are two entries in the slot so it must be moved from the dual
+                * slot table to the tripple slot table. */
+               sci_remote_node_table_clear_group_index(remote_node_table, 1, group_index);
+               sci_remote_node_table_set_group_index(remote_node_table, 2, group_index);
+       }
+
+       sci_remote_node_table_set_node_index(remote_node_table, remote_node_index);
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table to which the remote node
+ *    index is to be freed.
+ *
+ * This method will release a group of three consecutive remote nodes back to
+ * the free remote nodes.
+ */
+static void sci_remote_node_table_release_triple_remote_node(
+       struct sci_remote_node_table *remote_node_table,
+       u16 remote_node_index)
+{
+       u32 group_index;
+
+       group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
+
+       sci_remote_node_table_set_group_index(
+               remote_node_table, 2, group_index
+               );
+
+       sci_remote_node_table_set_group(remote_node_table, group_index);
+}
+
+/**
+ *
+ * @remote_node_table: The remote node table to which the remote node index is
+ *    to be freed.
+ * @remote_node_count: This is the count of consecutive remote nodes that are
+ *    to be freed.
+ *
+ * This method will release the remote node index back into the remote node
+ * table free pool.
+ */
+void sci_remote_node_table_release_remote_node_index(
+       struct sci_remote_node_table *remote_node_table,
+       u32 remote_node_count,
+       u16 remote_node_index)
+{
+       if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
+               sci_remote_node_table_release_single_remote_node(
+                       remote_node_table, remote_node_index);
+       } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
+               sci_remote_node_table_release_triple_remote_node(
+                       remote_node_table, remote_node_index);
+       }
+}
+
diff --git a/drivers/scsi/isci/remote_node_table.h b/drivers/scsi/isci/remote_node_table.h
new file mode 100644 (file)
index 0000000..721ab98
--- /dev/null
@@ -0,0 +1,188 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCIC_SDS_REMOTE_NODE_TABLE_H_
+#define _SCIC_SDS_REMOTE_NODE_TABLE_H_
+
+#include "isci.h"
+
+/**
+ *
+ *
+ * Remote node sets are sets of remote node index in the remtoe node table The
+ * SCU hardware requires that STP remote node entries take three consecutive
+ * remote node index so the table is arranged in sets of three. The bits are
+ * used as 0111 0111 to make a byte and the bits define the set of three remote
+ * nodes to use as a sequence.
+ */
+#define SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE 2
+
+/**
+ *
+ *
+ * Since the remote node table is organized as DWORDS take the remote node sets
+ * in bytes and represent them in DWORDs. The lowest ordered bits are the ones
+ * used in case full DWORD is not being used. i.e. 0000 0000 0000 0000 0111
+ * 0111 0111 0111 // if only a single WORD is in use in the DWORD.
+ */
+#define SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD \
+       (sizeof(u32) * SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE)
+/**
+ *
+ *
+ * This is a count of the numeber of remote nodes that can be represented in a
+ * byte
+ */
+#define SCIC_SDS_REMOTE_NODES_PER_BYTE \
+       (SCU_STP_REMOTE_NODE_COUNT * SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE)
+
+/**
+ *
+ *
+ * This is a count of the number of remote nodes that can be represented in a
+ * DWROD
+ */
+#define SCIC_SDS_REMOTE_NODES_PER_DWORD        \
+       (sizeof(u32) * SCIC_SDS_REMOTE_NODES_PER_BYTE)
+
+/**
+ *
+ *
+ * This is the number of bits in a remote node group
+ */
+#define SCIC_SDS_REMOTE_NODES_BITS_PER_GROUP   4
+
+#define SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX      (0xFFFFFFFF)
+#define SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE    (0x07)
+#define SCIC_SDS_REMOTE_NODE_TABLE_EMPTY_SLOT_VALUE   (0x00)
+
+/**
+ *
+ *
+ * Expander attached sata remote node count
+ */
+#define SCU_STP_REMOTE_NODE_COUNT        3
+
+/**
+ *
+ *
+ * Expander or direct attached ssp remote node count
+ */
+#define SCU_SSP_REMOTE_NODE_COUNT        1
+
+/**
+ *
+ *
+ * Direct attached STP remote node count
+ */
+#define SCU_SATA_REMOTE_NODE_COUNT       1
+
+/**
+ * struct sci_remote_node_table -
+ *
+ *
+ */
+struct sci_remote_node_table {
+       /**
+        * This field contains the array size in dwords
+        */
+       u16 available_nodes_array_size;
+
+       /**
+        * This field contains the array size of the
+        */
+       u16 group_array_size;
+
+       /**
+        * This field is the array of available remote node entries in bits.
+        * Because of the way STP remote node data is allocated on the SCU hardware
+        * the remote nodes must occupy three consecutive remote node context
+        * entries.  For ease of allocation and de-allocation we have broken the
+        * sets of three into a single nibble.  When the STP RNi is allocated all
+        * of the bits in the nibble are cleared.  This math results in a table size
+        * of MAX_REMOTE_NODES / CONSECUTIVE RNi ENTRIES for STP / 2 entries per byte.
+        */
+       u32 available_remote_nodes[
+               (SCI_MAX_REMOTE_DEVICES / SCIC_SDS_REMOTE_NODES_PER_DWORD)
+               + ((SCI_MAX_REMOTE_DEVICES % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0)];
+
+       /**
+        * This field is the nibble selector for the above table.  There are three
+        * possible selectors each for fast lookup when trying to find one, two or
+        * three remote node entries.
+        */
+       u32 remote_node_groups[
+               SCU_STP_REMOTE_NODE_COUNT][
+               (SCI_MAX_REMOTE_DEVICES / (32 * SCU_STP_REMOTE_NODE_COUNT))
+               + ((SCI_MAX_REMOTE_DEVICES % (32 * SCU_STP_REMOTE_NODE_COUNT)) != 0)];
+
+};
+
+/* --------------------------------------------------------------------------- */
+
+void sci_remote_node_table_initialize(
+       struct sci_remote_node_table *remote_node_table,
+       u32 remote_node_entries);
+
+u16 sci_remote_node_table_allocate_remote_node(
+       struct sci_remote_node_table *remote_node_table,
+       u32 remote_node_count);
+
+void sci_remote_node_table_release_remote_node_index(
+       struct sci_remote_node_table *remote_node_table,
+       u32 remote_node_count,
+       u16 remote_node_index);
+
+#endif /* _SCIC_SDS_REMOTE_NODE_TABLE_H_ */
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
new file mode 100644 (file)
index 0000000..a46e07a
--- /dev/null
@@ -0,0 +1,3391 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "isci.h"
+#include "task.h"
+#include "request.h"
+#include "scu_completion_codes.h"
+#include "scu_event_codes.h"
+#include "sas.h"
+
+static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
+                                                       int idx)
+{
+       if (idx == 0)
+               return &ireq->tc->sgl_pair_ab;
+       else if (idx == 1)
+               return &ireq->tc->sgl_pair_cd;
+       else if (idx < 0)
+               return NULL;
+       else
+               return &ireq->sg_table[idx - 2];
+}
+
+static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
+                                         struct isci_request *ireq, u32 idx)
+{
+       u32 offset;
+
+       if (idx == 0) {
+               offset = (void *) &ireq->tc->sgl_pair_ab -
+                        (void *) &ihost->task_context_table[0];
+               return ihost->task_context_dma + offset;
+       } else if (idx == 1) {
+               offset = (void *) &ireq->tc->sgl_pair_cd -
+                        (void *) &ihost->task_context_table[0];
+               return ihost->task_context_dma + offset;
+       }
+
+       return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
+}
+
+static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
+{
+       e->length = sg_dma_len(sg);
+       e->address_upper = upper_32_bits(sg_dma_address(sg));
+       e->address_lower = lower_32_bits(sg_dma_address(sg));
+       e->address_modifier = 0;
+}
+
+static void sci_request_build_sgl(struct isci_request *ireq)
+{
+       struct isci_host *ihost = ireq->isci_host;
+       struct sas_task *task = isci_request_access_task(ireq);
+       struct scatterlist *sg = NULL;
+       dma_addr_t dma_addr;
+       u32 sg_idx = 0;
+       struct scu_sgl_element_pair *scu_sg   = NULL;
+       struct scu_sgl_element_pair *prev_sg  = NULL;
+
+       if (task->num_scatter > 0) {
+               sg = task->scatter;
+
+               while (sg) {
+                       scu_sg = to_sgl_element_pair(ireq, sg_idx);
+                       init_sgl_element(&scu_sg->A, sg);
+                       sg = sg_next(sg);
+                       if (sg) {
+                               init_sgl_element(&scu_sg->B, sg);
+                               sg = sg_next(sg);
+                       } else
+                               memset(&scu_sg->B, 0, sizeof(scu_sg->B));
+
+                       if (prev_sg) {
+                               dma_addr = to_sgl_element_pair_dma(ihost,
+                                                                  ireq,
+                                                                  sg_idx);
+
+                               prev_sg->next_pair_upper =
+                                       upper_32_bits(dma_addr);
+                               prev_sg->next_pair_lower =
+                                       lower_32_bits(dma_addr);
+                       }
+
+                       prev_sg = scu_sg;
+                       sg_idx++;
+               }
+       } else {        /* handle when no sg */
+               scu_sg = to_sgl_element_pair(ireq, sg_idx);
+
+               dma_addr = dma_map_single(&ihost->pdev->dev,
+                                         task->scatter,
+                                         task->total_xfer_len,
+                                         task->data_dir);
+
+               ireq->zero_scatter_daddr = dma_addr;
+
+               scu_sg->A.length = task->total_xfer_len;
+               scu_sg->A.address_upper = upper_32_bits(dma_addr);
+               scu_sg->A.address_lower = lower_32_bits(dma_addr);
+       }
+
+       if (scu_sg) {
+               scu_sg->next_pair_upper = 0;
+               scu_sg->next_pair_lower = 0;
+       }
+}
+
+static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
+{
+       struct ssp_cmd_iu *cmd_iu;
+       struct sas_task *task = isci_request_access_task(ireq);
+
+       cmd_iu = &ireq->ssp.cmd;
+
+       memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
+       cmd_iu->add_cdb_len = 0;
+       cmd_iu->_r_a = 0;
+       cmd_iu->_r_b = 0;
+       cmd_iu->en_fburst = 0; /* unsupported */
+       cmd_iu->task_prio = task->ssp_task.task_prio;
+       cmd_iu->task_attr = task->ssp_task.task_attr;
+       cmd_iu->_r_c = 0;
+
+       sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
+                      sizeof(task->ssp_task.cdb) / sizeof(u32));
+}
+
+static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
+{
+       struct ssp_task_iu *task_iu;
+       struct sas_task *task = isci_request_access_task(ireq);
+       struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
+
+       task_iu = &ireq->ssp.tmf;
+
+       memset(task_iu, 0, sizeof(struct ssp_task_iu));
+
+       memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
+
+       task_iu->task_func = isci_tmf->tmf_code;
+       task_iu->task_tag =
+               (ireq->ttype == tmf_task) ?
+               isci_tmf->io_tag :
+               SCI_CONTROLLER_INVALID_IO_TAG;
+}
+
+/**
+ * This method is will fill in the SCU Task Context for any type of SSP request.
+ * @sci_req:
+ * @task_context:
+ *
+ */
+static void scu_ssp_reqeust_construct_task_context(
+       struct isci_request *ireq,
+       struct scu_task_context *task_context)
+{
+       dma_addr_t dma_addr;
+       struct isci_remote_device *idev;
+       struct isci_port *iport;
+
+       idev = ireq->target_device;
+       iport = idev->owning_port;
+
+       /* Fill in the TC with the its required data */
+       task_context->abort = 0;
+       task_context->priority = 0;
+       task_context->initiator_request = 1;
+       task_context->connection_rate = idev->connection_rate;
+       task_context->protocol_engine_index = ISCI_PEG;
+       task_context->logical_port_index = iport->physical_port_index;
+       task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
+       task_context->valid = SCU_TASK_CONTEXT_VALID;
+       task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+
+       task_context->remote_node_index = idev->rnc.remote_node_index;
+       task_context->command_code = 0;
+
+       task_context->link_layer_control = 0;
+       task_context->do_not_dma_ssp_good_response = 1;
+       task_context->strict_ordering = 0;
+       task_context->control_frame = 0;
+       task_context->timeout_enable = 0;
+       task_context->block_guard_enable = 0;
+
+       task_context->address_modifier = 0;
+
+       /* task_context->type.ssp.tag = ireq->io_tag; */
+       task_context->task_phase = 0x01;
+
+       ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+                             (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+                             (iport->physical_port_index <<
+                              SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+                             ISCI_TAG_TCI(ireq->io_tag));
+
+       /*
+        * Copy the physical address for the command buffer to the
+        * SCU Task Context
+        */
+       dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
+
+       task_context->command_iu_upper = upper_32_bits(dma_addr);
+       task_context->command_iu_lower = lower_32_bits(dma_addr);
+
+       /*
+        * Copy the physical address for the response buffer to the
+        * SCU Task Context
+        */
+       dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
+
+       task_context->response_iu_upper = upper_32_bits(dma_addr);
+       task_context->response_iu_lower = lower_32_bits(dma_addr);
+}
+
+/**
+ * This method is will fill in the SCU Task Context for a SSP IO request.
+ * @sci_req:
+ *
+ */
+static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
+                                                     enum dma_data_direction dir,
+                                                     u32 len)
+{
+       struct scu_task_context *task_context = ireq->tc;
+
+       scu_ssp_reqeust_construct_task_context(ireq, task_context);
+
+       task_context->ssp_command_iu_length =
+               sizeof(struct ssp_cmd_iu) / sizeof(u32);
+       task_context->type.ssp.frame_type = SSP_COMMAND;
+
+       switch (dir) {
+       case DMA_FROM_DEVICE:
+       case DMA_NONE:
+       default:
+               task_context->task_type = SCU_TASK_TYPE_IOREAD;
+               break;
+       case DMA_TO_DEVICE:
+               task_context->task_type = SCU_TASK_TYPE_IOWRITE;
+               break;
+       }
+
+       task_context->transfer_length_bytes = len;
+
+       if (task_context->transfer_length_bytes > 0)
+               sci_request_build_sgl(ireq);
+}
+
+/**
+ * This method will fill in the SCU Task Context for a SSP Task request.  The
+ *    following important settings are utilized: -# priority ==
+ *    SCU_TASK_PRIORITY_HIGH.  This ensures that the task request is issued
+ *    ahead of other task destined for the same Remote Node. -# task_type ==
+ *    SCU_TASK_TYPE_IOREAD.  This simply indicates that a normal request type
+ *    (i.e. non-raw frame) is being utilized to perform task management. -#
+ *    control_frame == 1.  This ensures that the proper endianess is set so
+ *    that the bytes are transmitted in the right order for a task frame.
+ * @sci_req: This parameter specifies the task request object being
+ *    constructed.
+ *
+ */
+static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
+{
+       struct scu_task_context *task_context = ireq->tc;
+
+       scu_ssp_reqeust_construct_task_context(ireq, task_context);
+
+       task_context->control_frame                = 1;
+       task_context->priority                     = SCU_TASK_PRIORITY_HIGH;
+       task_context->task_type                    = SCU_TASK_TYPE_RAW_FRAME;
+       task_context->transfer_length_bytes        = 0;
+       task_context->type.ssp.frame_type          = SSP_TASK;
+       task_context->ssp_command_iu_length =
+               sizeof(struct ssp_task_iu) / sizeof(u32);
+}
+
+/**
+ * This method is will fill in the SCU Task Context for any type of SATA
+ *    request.  This is called from the various SATA constructors.
+ * @sci_req: The general IO request object which is to be used in
+ *    constructing the SCU task context.
+ * @task_context: The buffer pointer for the SCU task context which is being
+ *    constructed.
+ *
+ * The general io request construction is complete. The buffer assignment for
+ * the command buffer is complete. none Revisit task context construction to
+ * determine what is common for SSP/SMP/STP task context structures.
+ */
+static void scu_sata_reqeust_construct_task_context(
+       struct isci_request *ireq,
+       struct scu_task_context *task_context)
+{
+       dma_addr_t dma_addr;
+       struct isci_remote_device *idev;
+       struct isci_port *iport;
+
+       idev = ireq->target_device;
+       iport = idev->owning_port;
+
+       /* Fill in the TC with the its required data */
+       task_context->abort = 0;
+       task_context->priority = SCU_TASK_PRIORITY_NORMAL;
+       task_context->initiator_request = 1;
+       task_context->connection_rate = idev->connection_rate;
+       task_context->protocol_engine_index = ISCI_PEG;
+       task_context->logical_port_index = iport->physical_port_index;
+       task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
+       task_context->valid = SCU_TASK_CONTEXT_VALID;
+       task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+
+       task_context->remote_node_index = idev->rnc.remote_node_index;
+       task_context->command_code = 0;
+
+       task_context->link_layer_control = 0;
+       task_context->do_not_dma_ssp_good_response = 1;
+       task_context->strict_ordering = 0;
+       task_context->control_frame = 0;
+       task_context->timeout_enable = 0;
+       task_context->block_guard_enable = 0;
+
+       task_context->address_modifier = 0;
+       task_context->task_phase = 0x01;
+
+       task_context->ssp_command_iu_length =
+               (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
+
+       /* Set the first word of the H2D REG FIS */
+       task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
+
+       ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+                             (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+                             (iport->physical_port_index <<
+                              SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+                             ISCI_TAG_TCI(ireq->io_tag));
+       /*
+        * Copy the physical address for the command buffer to the SCU Task
+        * Context. We must offset the command buffer by 4 bytes because the
+        * first 4 bytes are transfered in the body of the TC.
+        */
+       dma_addr = sci_io_request_get_dma_addr(ireq,
+                                               ((char *) &ireq->stp.cmd) +
+                                               sizeof(u32));
+
+       task_context->command_iu_upper = upper_32_bits(dma_addr);
+       task_context->command_iu_lower = lower_32_bits(dma_addr);
+
+       /* SATA Requests do not have a response buffer */
+       task_context->response_iu_upper = 0;
+       task_context->response_iu_lower = 0;
+}
+
+static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq)
+{
+       struct scu_task_context *task_context = ireq->tc;
+
+       scu_sata_reqeust_construct_task_context(ireq, task_context);
+
+       task_context->control_frame         = 0;
+       task_context->priority              = SCU_TASK_PRIORITY_NORMAL;
+       task_context->task_type             = SCU_TASK_TYPE_SATA_RAW_FRAME;
+       task_context->type.stp.fis_type     = FIS_REGH2D;
+       task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
+}
+
+static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
+                                                         bool copy_rx_frame)
+{
+       struct isci_stp_request *stp_req = &ireq->stp.req;
+
+       scu_stp_raw_request_construct_task_context(ireq);
+
+       stp_req->status = 0;
+       stp_req->sgl.offset = 0;
+       stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
+
+       if (copy_rx_frame) {
+               sci_request_build_sgl(ireq);
+               stp_req->sgl.index = 0;
+       } else {
+               /* The user does not want the data copied to the SGL buffer location */
+               stp_req->sgl.index = -1;
+       }
+
+       return SCI_SUCCESS;
+}
+
+/**
+ *
+ * @sci_req: This parameter specifies the request to be constructed as an
+ *    optimized request.
+ * @optimized_task_type: This parameter specifies whether the request is to be
+ *    an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
+ *    value of 1 indicates NCQ.
+ *
+ * This method will perform request construction common to all types of STP
+ * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
+ * returns an indication as to whether the construction was successful.
+ */
+static void sci_stp_optimized_request_construct(struct isci_request *ireq,
+                                                    u8 optimized_task_type,
+                                                    u32 len,
+                                                    enum dma_data_direction dir)
+{
+       struct scu_task_context *task_context = ireq->tc;
+
+       /* Build the STP task context structure */
+       scu_sata_reqeust_construct_task_context(ireq, task_context);
+
+       /* Copy over the SGL elements */
+       sci_request_build_sgl(ireq);
+
+       /* Copy over the number of bytes to be transfered */
+       task_context->transfer_length_bytes = len;
+
+       if (dir == DMA_TO_DEVICE) {
+               /*
+                * The difference between the DMA IN and DMA OUT request task type
+                * values are consistent with the difference between FPDMA READ
+                * and FPDMA WRITE values.  Add the supplied task type parameter
+                * to this difference to set the task type properly for this
+                * DATA OUT (WRITE) case. */
+               task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
+                                                                - SCU_TASK_TYPE_DMA_IN);
+       } else {
+               /*
+                * For the DATA IN (READ) case, simply save the supplied
+                * optimized task type. */
+               task_context->task_type = optimized_task_type;
+       }
+}
+
+
+
+static enum sci_status
+sci_io_request_construct_sata(struct isci_request *ireq,
+                              u32 len,
+                              enum dma_data_direction dir,
+                              bool copy)
+{
+       enum sci_status status = SCI_SUCCESS;
+       struct sas_task *task = isci_request_access_task(ireq);
+
+       /* check for management protocols */
+       if (ireq->ttype == tmf_task) {
+               struct isci_tmf *tmf = isci_request_access_tmf(ireq);
+
+               if (tmf->tmf_code == isci_tmf_sata_srst_high ||
+                   tmf->tmf_code == isci_tmf_sata_srst_low) {
+                       scu_stp_raw_request_construct_task_context(ireq);
+                       return SCI_SUCCESS;
+               } else {
+                       dev_err(&ireq->owning_controller->pdev->dev,
+                               "%s: Request 0x%p received un-handled SAT "
+                               "management protocol 0x%x.\n",
+                               __func__, ireq, tmf->tmf_code);
+
+                       return SCI_FAILURE;
+               }
+       }
+
+       if (!sas_protocol_ata(task->task_proto)) {
+               dev_err(&ireq->owning_controller->pdev->dev,
+                       "%s: Non-ATA protocol in SATA path: 0x%x\n",
+                       __func__,
+                       task->task_proto);
+               return SCI_FAILURE;
+
+       }
+
+       /* non data */
+       if (task->data_dir == DMA_NONE) {
+               scu_stp_raw_request_construct_task_context(ireq);
+               return SCI_SUCCESS;
+       }
+
+       /* NCQ */
+       if (task->ata_task.use_ncq) {
+               sci_stp_optimized_request_construct(ireq,
+                                                        SCU_TASK_TYPE_FPDMAQ_READ,
+                                                        len, dir);
+               return SCI_SUCCESS;
+       }
+
+       /* DMA */
+       if (task->ata_task.dma_xfer) {
+               sci_stp_optimized_request_construct(ireq,
+                                                        SCU_TASK_TYPE_DMA_IN,
+                                                        len, dir);
+               return SCI_SUCCESS;
+       } else /* PIO */
+               return sci_stp_pio_request_construct(ireq, copy);
+
+       return status;
+}
+
+static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq)
+{
+       struct sas_task *task = isci_request_access_task(ireq);
+
+       ireq->protocol = SCIC_SSP_PROTOCOL;
+
+       scu_ssp_io_request_construct_task_context(ireq,
+                                                 task->data_dir,
+                                                 task->total_xfer_len);
+
+       sci_io_request_build_ssp_command_iu(ireq);
+
+       sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+       return SCI_SUCCESS;
+}
+
+enum sci_status sci_task_request_construct_ssp(
+       struct isci_request *ireq)
+{
+       /* Construct the SSP Task SCU Task Context */
+       scu_ssp_task_request_construct_task_context(ireq);
+
+       /* Fill in the SSP Task IU */
+       sci_task_request_build_ssp_task_iu(ireq);
+
+       sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+       return SCI_SUCCESS;
+}
+
+static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq)
+{
+       enum sci_status status;
+       bool copy = false;
+       struct sas_task *task = isci_request_access_task(ireq);
+
+       ireq->protocol = SCIC_STP_PROTOCOL;
+
+       copy = (task->data_dir == DMA_NONE) ? false : true;
+
+       status = sci_io_request_construct_sata(ireq,
+                                               task->total_xfer_len,
+                                               task->data_dir,
+                                               copy);
+
+       if (status == SCI_SUCCESS)
+               sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+       return status;
+}
+
+enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
+{
+       enum sci_status status = SCI_SUCCESS;
+
+       /* check for management protocols */
+       if (ireq->ttype == tmf_task) {
+               struct isci_tmf *tmf = isci_request_access_tmf(ireq);
+
+               if (tmf->tmf_code == isci_tmf_sata_srst_high ||
+                   tmf->tmf_code == isci_tmf_sata_srst_low) {
+                       scu_stp_raw_request_construct_task_context(ireq);
+               } else {
+                       dev_err(&ireq->owning_controller->pdev->dev,
+                               "%s: Request 0x%p received un-handled SAT "
+                               "Protocol 0x%x.\n",
+                               __func__, ireq, tmf->tmf_code);
+
+                       return SCI_FAILURE;
+               }
+       }
+
+       if (status != SCI_SUCCESS)
+               return status;
+       sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+       return status;
+}
+
+/**
+ * sci_req_tx_bytes - bytes transferred when reply underruns request
+ * @sci_req: request that was terminated early
+ */
+#define SCU_TASK_CONTEXT_SRAM 0x200000
+static u32 sci_req_tx_bytes(struct isci_request *ireq)
+{
+       struct isci_host *ihost = ireq->owning_controller;
+       u32 ret_val = 0;
+
+       if (readl(&ihost->smu_registers->address_modifier) == 0) {
+               void __iomem *scu_reg_base = ihost->scu_registers;
+
+               /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
+                *   BAR1 is the scu_registers
+                *   0x20002C = 0x200000 + 0x2c
+                *            = start of task context SRAM + offset of (type.ssp.data_offset)
+                *   TCi is the io_tag of struct sci_request
+                */
+               ret_val = readl(scu_reg_base +
+                               (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
+                               ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
+       }
+
+       return ret_val;
+}
+
+enum sci_status sci_request_start(struct isci_request *ireq)
+{
+       enum sci_base_request_states state;
+       struct scu_task_context *tc = ireq->tc;
+       struct isci_host *ihost = ireq->owning_controller;
+
+       state = ireq->sm.current_state_id;
+       if (state != SCI_REQ_CONSTRUCTED) {
+               dev_warn(&ihost->pdev->dev,
+                       "%s: SCIC IO Request requested to start while in wrong "
+                        "state %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
+
+       switch (tc->protocol_type) {
+       case SCU_TASK_CONTEXT_PROTOCOL_SMP:
+       case SCU_TASK_CONTEXT_PROTOCOL_SSP:
+               /* SSP/SMP Frame */
+               tc->type.ssp.tag = ireq->io_tag;
+               tc->type.ssp.target_port_transfer_tag = 0xFFFF;
+               break;
+
+       case SCU_TASK_CONTEXT_PROTOCOL_STP:
+               /* STP/SATA Frame
+                * tc->type.stp.ncq_tag = ireq->ncq_tag;
+                */
+               break;
+
+       case SCU_TASK_CONTEXT_PROTOCOL_NONE:
+               /* / @todo When do we set no protocol type? */
+               break;
+
+       default:
+               /* This should never happen since we build the IO
+                * requests */
+               break;
+       }
+
+       /* Add to the post_context the io tag value */
+       ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag);
+
+       /* Everything is good go ahead and change state */
+       sci_change_state(&ireq->sm, SCI_REQ_STARTED);
+
+       return SCI_SUCCESS;
+}
+
+enum sci_status
+sci_io_request_terminate(struct isci_request *ireq)
+{
+       enum sci_base_request_states state;
+
+       state = ireq->sm.current_state_id;
+
+       switch (state) {
+       case SCI_REQ_CONSTRUCTED:
+               ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
+               ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               return SCI_SUCCESS;
+       case SCI_REQ_STARTED:
+       case SCI_REQ_TASK_WAIT_TC_COMP:
+       case SCI_REQ_SMP_WAIT_RESP:
+       case SCI_REQ_SMP_WAIT_TC_COMP:
+       case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+       case SCI_REQ_STP_UDMA_WAIT_D2H:
+       case SCI_REQ_STP_NON_DATA_WAIT_H2D:
+       case SCI_REQ_STP_NON_DATA_WAIT_D2H:
+       case SCI_REQ_STP_PIO_WAIT_H2D:
+       case SCI_REQ_STP_PIO_WAIT_FRAME:
+       case SCI_REQ_STP_PIO_DATA_IN:
+       case SCI_REQ_STP_PIO_DATA_OUT:
+       case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
+       case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
+       case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
+               sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
+               return SCI_SUCCESS;
+       case SCI_REQ_TASK_WAIT_TC_RESP:
+               sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               return SCI_SUCCESS;
+       case SCI_REQ_ABORTING:
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               return SCI_SUCCESS;
+       case SCI_REQ_COMPLETED:
+       default:
+               dev_warn(&ireq->owning_controller->pdev->dev,
+                        "%s: SCIC IO Request requested to abort while in wrong "
+                        "state %d\n",
+                        __func__,
+                        ireq->sm.current_state_id);
+               break;
+       }
+
+       return SCI_FAILURE_INVALID_STATE;
+}
+
+enum sci_status sci_request_complete(struct isci_request *ireq)
+{
+       enum sci_base_request_states state;
+       struct isci_host *ihost = ireq->owning_controller;
+
+       state = ireq->sm.current_state_id;
+       if (WARN_ONCE(state != SCI_REQ_COMPLETED,
+                     "isci: request completion from wrong state (%d)\n", state))
+               return SCI_FAILURE_INVALID_STATE;
+
+       if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
+               sci_controller_release_frame(ihost,
+                                                 ireq->saved_rx_frame_index);
+
+       /* XXX can we just stop the machine and remove the 'final' state? */
+       sci_change_state(&ireq->sm, SCI_REQ_FINAL);
+       return SCI_SUCCESS;
+}
+
+enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
+                                                 u32 event_code)
+{
+       enum sci_base_request_states state;
+       struct isci_host *ihost = ireq->owning_controller;
+
+       state = ireq->sm.current_state_id;
+
+       if (state != SCI_REQ_STP_PIO_DATA_IN) {
+               dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %d\n",
+                        __func__, event_code, state);
+
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       switch (scu_get_event_specifier(event_code)) {
+       case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
+               /* We are waiting for data and the SCU has R_ERR the data frame.
+                * Go back to waiting for the D2H Register FIS
+                */
+               sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+               return SCI_SUCCESS;
+       default:
+               dev_err(&ihost->pdev->dev,
+                       "%s: pio request unexpected event %#x\n",
+                       __func__, event_code);
+
+               /* TODO Should we fail the PIO request when we get an
+                * unexpected event?
+                */
+               return SCI_FAILURE;
+       }
+}
+
+/*
+ * This function copies response data for requests returning response data
+ *    instead of sense data.
+ * @sci_req: This parameter specifies the request object for which to copy
+ *    the response data.
+ */
+static void sci_io_request_copy_response(struct isci_request *ireq)
+{
+       void *resp_buf;
+       u32 len;
+       struct ssp_response_iu *ssp_response;
+       struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
+
+       ssp_response = &ireq->ssp.rsp;
+
+       resp_buf = &isci_tmf->resp.resp_iu;
+
+       len = min_t(u32,
+                   SSP_RESP_IU_MAX_SIZE,
+                   be32_to_cpu(ssp_response->response_data_len));
+
+       memcpy(resp_buf, ssp_response->resp_data, len);
+}
+
+static enum sci_status
+request_started_state_tc_event(struct isci_request *ireq,
+                              u32 completion_code)
+{
+       struct ssp_response_iu *resp_iu;
+       u8 datapres;
+
+       /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
+        * to determine SDMA status
+        */
+       switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+               ireq->scu_status = SCU_TASK_DONE_GOOD;
+               ireq->sci_status = SCI_SUCCESS;
+               break;
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
+               /* There are times when the SCU hardware will return an early
+                * response because the io request specified more data than is
+                * returned by the target device (mode pages, inquiry data,
+                * etc.).  We must check the response stats to see if this is
+                * truly a failed request or a good request that just got
+                * completed early.
+                */
+               struct ssp_response_iu *resp = &ireq->ssp.rsp;
+               ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
+
+               sci_swab32_cpy(&ireq->ssp.rsp,
+                              &ireq->ssp.rsp,
+                              word_cnt);
+
+               if (resp->status == 0) {
+                       ireq->scu_status = SCU_TASK_DONE_GOOD;
+                       ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
+               } else {
+                       ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+                       ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+               }
+               break;
+       }
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
+               ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
+
+               sci_swab32_cpy(&ireq->ssp.rsp,
+                              &ireq->ssp.rsp,
+                              word_cnt);
+
+               ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+               ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+               break;
+       }
+
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
+               /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
+                * guaranteed to be received before this completion status is
+                * posted?
+                */
+               resp_iu = &ireq->ssp.rsp;
+               datapres = resp_iu->datapres;
+
+               if (datapres == 1 || datapres == 2) {
+                       ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+                       ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+               } else {
+                       ireq->scu_status = SCU_TASK_DONE_GOOD;
+                       ireq->sci_status = SCI_SUCCESS;
+               }
+               break;
+       /* only stp device gets suspended. */
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
+               if (ireq->protocol == SCIC_STP_PROTOCOL) {
+                       ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+                                          SCU_COMPLETION_TL_STATUS_SHIFT;
+                       ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+               } else {
+                       ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+                                          SCU_COMPLETION_TL_STATUS_SHIFT;
+                       ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+               }
+               break;
+
+       /* both stp/ssp device gets suspended */
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
+               ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+                                  SCU_COMPLETION_TL_STATUS_SHIFT;
+               ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+               break;
+
+       /* neither ssp nor stp gets suspended. */
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
+       default:
+               ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+                                  SCU_COMPLETION_TL_STATUS_SHIFT;
+               ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+               break;
+       }
+
+       /*
+        * TODO: This is probably wrong for ACK/NAK timeout conditions
+        */
+
+       /* In all cases we will treat this as the completion of the IO req. */
+       sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+       return SCI_SUCCESS;
+}
+
+static enum sci_status
+request_aborting_state_tc_event(struct isci_request *ireq,
+                               u32 completion_code)
+{
+       switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+       case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
+       case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
+               ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
+               ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+
+       default:
+               /* Unless we get some strange error wait for the task abort to complete
+                * TODO: Should there be a state change for this completion?
+                */
+               break;
+       }
+
+       return SCI_SUCCESS;
+}
+
+static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
+                                                      u32 completion_code)
+{
+       switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+               ireq->scu_status = SCU_TASK_DONE_GOOD;
+               ireq->sci_status = SCI_SUCCESS;
+               sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
+               break;
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
+               /* Currently, the decision is to simply allow the task request
+                * to timeout if the task IU wasn't received successfully.
+                * There is a potential for receiving multiple task responses if
+                * we decide to send the task IU again.
+                */
+               dev_warn(&ireq->owning_controller->pdev->dev,
+                        "%s: TaskRequest:0x%p CompletionCode:%x - "
+                        "ACK/NAK timeout\n", __func__, ireq,
+                        completion_code);
+
+               sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
+               break;
+       default:
+               /*
+                * All other completion status cause the IO to be complete.
+                * If a NAK was received, then it is up to the user to retry
+                * the request.
+                */
+               ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+               ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       }
+
+       return SCI_SUCCESS;
+}
+
+static enum sci_status
+smp_request_await_response_tc_event(struct isci_request *ireq,
+                                   u32 completion_code)
+{
+       switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+               /* In the AWAIT RESPONSE state, any TC completion is
+                * unexpected.  but if the TC has success status, we
+                * complete the IO anyway.
+                */
+               ireq->scu_status = SCU_TASK_DONE_GOOD;
+               ireq->sci_status = SCI_SUCCESS;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
+               /* These status has been seen in a specific LSI
+                * expander, which sometimes is not able to send smp
+                * response within 2 ms. This causes our hardware break
+                * the connection and set TC completion with one of
+                * these SMP_XXX_XX_ERR status. For these type of error,
+                * we ask ihost user to retry the request.
+                */
+               ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR;
+               ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       default:
+               /* All other completion status cause the IO to be complete.  If a NAK
+                * was received, then it is up to the user to retry the request
+                */
+               ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+               ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       }
+
+       return SCI_SUCCESS;
+}
+
+static enum sci_status
+smp_request_await_tc_event(struct isci_request *ireq,
+                          u32 completion_code)
+{
+       switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+               ireq->scu_status = SCU_TASK_DONE_GOOD;
+               ireq->sci_status = SCI_SUCCESS;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       default:
+               /* All other completion status cause the IO to be
+                * complete.  If a NAK was received, then it is up to
+                * the user to retry the request.
+                */
+               ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+               ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       }
+
+       return SCI_SUCCESS;
+}
+
+static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
+{
+       struct scu_sgl_element *sgl;
+       struct scu_sgl_element_pair *sgl_pair;
+       struct isci_request *ireq = to_ireq(stp_req);
+       struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl;
+
+       sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
+       if (!sgl_pair)
+               sgl = NULL;
+       else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) {
+               if (sgl_pair->B.address_lower == 0 &&
+                   sgl_pair->B.address_upper == 0) {
+                       sgl = NULL;
+               } else {
+                       pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B;
+                       sgl = &sgl_pair->B;
+               }
+       } else {
+               if (sgl_pair->next_pair_lower == 0 &&
+                   sgl_pair->next_pair_upper == 0) {
+                       sgl = NULL;
+               } else {
+                       pio_sgl->index++;
+                       pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A;
+                       sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
+                       sgl = &sgl_pair->A;
+               }
+       }
+
+       return sgl;
+}
+
+static enum sci_status
+stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
+                                       u32 completion_code)
+{
+       switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+               ireq->scu_status = SCU_TASK_DONE_GOOD;
+               ireq->sci_status = SCI_SUCCESS;
+               sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
+               break;
+
+       default:
+               /* All other completion status cause the IO to be
+                * complete.  If a NAK was received, then it is up to
+                * the user to retry the request.
+                */
+               ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+               ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       }
+
+       return SCI_SUCCESS;
+}
+
+#define SCU_MAX_FRAME_BUFFER_SIZE  0x400  /* 1K is the maximum SCU frame data payload */
+
+/* transmit DATA_FIS from (current sgl + offset) for input
+ * parameter length. current sgl and offset is alreay stored in the IO request
+ */
+static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame(
+       struct isci_request *ireq,
+       u32 length)
+{
+       struct isci_stp_request *stp_req = &ireq->stp.req;
+       struct scu_task_context *task_context = ireq->tc;
+       struct scu_sgl_element_pair *sgl_pair;
+       struct scu_sgl_element *current_sgl;
+
+       /* Recycle the TC and reconstruct it for sending out DATA FIS containing
+        * for the data from current_sgl+offset for the input length
+        */
+       sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
+       if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A)
+               current_sgl = &sgl_pair->A;
+       else
+               current_sgl = &sgl_pair->B;
+
+       /* update the TC */
+       task_context->command_iu_upper = current_sgl->address_upper;
+       task_context->command_iu_lower = current_sgl->address_lower;
+       task_context->transfer_length_bytes = length;
+       task_context->type.stp.fis_type = FIS_DATA;
+
+       /* send the new TC out. */
+       return sci_controller_continue_io(ireq);
+}
+
+static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
+{
+       struct isci_stp_request *stp_req = &ireq->stp.req;
+       struct scu_sgl_element_pair *sgl_pair;
+       struct scu_sgl_element *sgl;
+       enum sci_status status;
+       u32 offset;
+       u32 len = 0;
+
+       offset = stp_req->sgl.offset;
+       sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
+       if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__))
+               return SCI_FAILURE;
+
+       if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) {
+               sgl = &sgl_pair->A;
+               len = sgl_pair->A.length - offset;
+       } else {
+               sgl = &sgl_pair->B;
+               len = sgl_pair->B.length - offset;
+       }
+
+       if (stp_req->pio_len == 0)
+               return SCI_SUCCESS;
+
+       if (stp_req->pio_len >= len) {
+               status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
+               if (status != SCI_SUCCESS)
+                       return status;
+               stp_req->pio_len -= len;
+
+               /* update the current sgl, offset and save for future */
+               sgl = pio_sgl_next(stp_req);
+               offset = 0;
+       } else if (stp_req->pio_len < len) {
+               sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
+
+               /* Sgl offset will be adjusted and saved for future */
+               offset += stp_req->pio_len;
+               sgl->address_lower += stp_req->pio_len;
+               stp_req->pio_len = 0;
+       }
+
+       stp_req->sgl.offset = offset;
+
+       return status;
+}
+
+/**
+ *
+ * @stp_request: The request that is used for the SGL processing.
+ * @data_buffer: The buffer of data to be copied.
+ * @length: The length of the data transfer.
+ *
+ * Copy the data from the buffer for the length specified to the IO reqeust SGL
+ * specified data region. enum sci_status
+ */
+static enum sci_status
+sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
+                                                 u8 *data_buf, u32 len)
+{
+       struct isci_request *ireq;
+       u8 *src_addr;
+       int copy_len;
+       struct sas_task *task;
+       struct scatterlist *sg;
+       void *kaddr;
+       int total_len = len;
+
+       ireq = to_ireq(stp_req);
+       task = isci_request_access_task(ireq);
+       src_addr = data_buf;
+
+       if (task->num_scatter > 0) {
+               sg = task->scatter;
+
+               while (total_len > 0) {
+                       struct page *page = sg_page(sg);
+
+                       copy_len = min_t(int, total_len, sg_dma_len(sg));
+                       kaddr = kmap_atomic(page, KM_IRQ0);
+                       memcpy(kaddr + sg->offset, src_addr, copy_len);
+                       kunmap_atomic(kaddr, KM_IRQ0);
+                       total_len -= copy_len;
+                       src_addr += copy_len;
+                       sg = sg_next(sg);
+               }
+       } else {
+               BUG_ON(task->total_xfer_len < total_len);
+               memcpy(task->scatter, src_addr, total_len);
+       }
+
+       return SCI_SUCCESS;
+}
+
+/**
+ *
+ * @sci_req: The PIO DATA IN request that is to receive the data.
+ * @data_buffer: The buffer to copy from.
+ *
+ * Copy the data buffer to the io request data region. enum sci_status
+ */
+static enum sci_status sci_stp_request_pio_data_in_copy_data(
+       struct isci_stp_request *stp_req,
+       u8 *data_buffer)
+{
+       enum sci_status status;
+
+       /*
+        * If there is less than 1K remaining in the transfer request
+        * copy just the data for the transfer */
+       if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
+               status = sci_stp_request_pio_data_in_copy_data_buffer(
+                       stp_req, data_buffer, stp_req->pio_len);
+
+               if (status == SCI_SUCCESS)
+                       stp_req->pio_len = 0;
+       } else {
+               /* We are transfering the whole frame so copy */
+               status = sci_stp_request_pio_data_in_copy_data_buffer(
+                       stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
+
+               if (status == SCI_SUCCESS)
+                       stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE;
+       }
+
+       return status;
+}
+
+static enum sci_status
+stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
+                                             u32 completion_code)
+{
+       enum sci_status status = SCI_SUCCESS;
+
+       switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+               ireq->scu_status = SCU_TASK_DONE_GOOD;
+               ireq->sci_status = SCI_SUCCESS;
+               sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+               break;
+
+       default:
+               /* All other completion status cause the IO to be
+                * complete.  If a NAK was received, then it is up to
+                * the user to retry the request.
+                */
+               ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+               ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       }
+
+       return status;
+}
+
+static enum sci_status
+pio_data_out_tx_done_tc_event(struct isci_request *ireq,
+                             u32 completion_code)
+{
+       enum sci_status status = SCI_SUCCESS;
+       bool all_frames_transferred = false;
+       struct isci_stp_request *stp_req = &ireq->stp.req;
+
+       switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+               /* Transmit data */
+               if (stp_req->pio_len != 0) {
+                       status = sci_stp_request_pio_data_out_transmit_data(ireq);
+                       if (status == SCI_SUCCESS) {
+                               if (stp_req->pio_len == 0)
+                                       all_frames_transferred = true;
+                       }
+               } else if (stp_req->pio_len == 0) {
+                       /*
+                        * this will happen if the all data is written at the
+                        * first time after the pio setup fis is received
+                        */
+                       all_frames_transferred  = true;
+               }
+
+               /* all data transferred. */
+               if (all_frames_transferred) {
+                       /*
+                        * Change the state to SCI_REQ_STP_PIO_DATA_IN
+                        * and wait for PIO_SETUP fis / or D2H REg fis. */
+                       sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+               }
+               break;
+
+       default:
+               /*
+                * All other completion status cause the IO to be complete.
+                * If a NAK was received, then it is up to the user to retry
+                * the request.
+                */
+               ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+               ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       }
+
+       return status;
+}
+
+static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq,
+                                                                      u32 frame_index)
+{
+       struct isci_host *ihost = ireq->owning_controller;
+       struct dev_to_host_fis *frame_header;
+       enum sci_status status;
+       u32 *frame_buffer;
+
+       status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+                                                              frame_index,
+                                                              (void **)&frame_header);
+
+       if ((status == SCI_SUCCESS) &&
+           (frame_header->fis_type == FIS_REGD2H)) {
+               sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+                                                             frame_index,
+                                                             (void **)&frame_buffer);
+
+               sci_controller_copy_sata_response(&ireq->stp.rsp,
+                                                      frame_header,
+                                                      frame_buffer);
+       }
+
+       sci_controller_release_frame(ihost, frame_index);
+
+       return status;
+}
+
+enum sci_status
+sci_io_request_frame_handler(struct isci_request *ireq,
+                                 u32 frame_index)
+{
+       struct isci_host *ihost = ireq->owning_controller;
+       struct isci_stp_request *stp_req = &ireq->stp.req;
+       enum sci_base_request_states state;
+       enum sci_status status;
+       ssize_t word_cnt;
+
+       state = ireq->sm.current_state_id;
+       switch (state)  {
+       case SCI_REQ_STARTED: {
+               struct ssp_frame_hdr ssp_hdr;
+               void *frame_header;
+
+               sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+                                                             frame_index,
+                                                             &frame_header);
+
+               word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
+               sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
+
+               if (ssp_hdr.frame_type == SSP_RESPONSE) {
+                       struct ssp_response_iu *resp_iu;
+                       ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
+
+                       sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+                                                                     frame_index,
+                                                                     (void **)&resp_iu);
+
+                       sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt);
+
+                       resp_iu = &ireq->ssp.rsp;
+
+                       if (resp_iu->datapres == 0x01 ||
+                           resp_iu->datapres == 0x02) {
+                               ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+                               ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+                       } else {
+                               ireq->scu_status = SCU_TASK_DONE_GOOD;
+                               ireq->sci_status = SCI_SUCCESS;
+                       }
+               } else {
+                       /* not a response frame, why did it get forwarded? */
+                       dev_err(&ihost->pdev->dev,
+                               "%s: SCIC IO Request 0x%p received unexpected "
+                               "frame %d type 0x%02x\n", __func__, ireq,
+                               frame_index, ssp_hdr.frame_type);
+               }
+
+               /*
+                * In any case we are done with this frame buffer return it to
+                * the controller
+                */
+               sci_controller_release_frame(ihost, frame_index);
+
+               return SCI_SUCCESS;
+       }
+
+       case SCI_REQ_TASK_WAIT_TC_RESP:
+               sci_io_request_copy_response(ireq);
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               sci_controller_release_frame(ihost, frame_index);
+               return SCI_SUCCESS;
+
+       case SCI_REQ_SMP_WAIT_RESP: {
+               struct smp_resp *rsp_hdr = &ireq->smp.rsp;
+               void *frame_header;
+
+               sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+                                                             frame_index,
+                                                             &frame_header);
+
+               /* byte swap the header. */
+               word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
+               sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
+
+               if (rsp_hdr->frame_type == SMP_RESPONSE) {
+                       void *smp_resp;
+
+                       sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+                                                                     frame_index,
+                                                                     &smp_resp);
+
+                       word_cnt = (sizeof(struct smp_resp) - SMP_RESP_HDR_SZ) /
+                               sizeof(u32);
+
+                       sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
+                                      smp_resp, word_cnt);
+
+                       ireq->scu_status = SCU_TASK_DONE_GOOD;
+                       ireq->sci_status = SCI_SUCCESS;
+                       sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
+               } else {
+                       /*
+                        * This was not a response frame why did it get
+                        * forwarded?
+                        */
+                       dev_err(&ihost->pdev->dev,
+                               "%s: SCIC SMP Request 0x%p received unexpected "
+                               "frame %d type 0x%02x\n",
+                               __func__,
+                               ireq,
+                               frame_index,
+                               rsp_hdr->frame_type);
+
+                       ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
+                       ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+                       sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               }
+
+               sci_controller_release_frame(ihost, frame_index);
+
+               return SCI_SUCCESS;
+       }
+
+       case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+               return sci_stp_request_udma_general_frame_handler(ireq,
+                                                                      frame_index);
+
+       case SCI_REQ_STP_UDMA_WAIT_D2H:
+               /* Use the general frame handler to copy the resposne data */
+               status = sci_stp_request_udma_general_frame_handler(ireq, frame_index);
+
+               if (status != SCI_SUCCESS)
+                       return status;
+
+               ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+               ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               return SCI_SUCCESS;
+
+       case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
+               struct dev_to_host_fis *frame_header;
+               u32 *frame_buffer;
+
+               status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+                                                                      frame_index,
+                                                                      (void **)&frame_header);
+
+               if (status != SCI_SUCCESS) {
+                       dev_err(&ihost->pdev->dev,
+                               "%s: SCIC IO Request 0x%p could not get frame "
+                               "header for frame index %d, status %x\n",
+                               __func__,
+                               stp_req,
+                               frame_index,
+                               status);
+
+                       return status;
+               }
+
+               switch (frame_header->fis_type) {
+               case FIS_REGD2H:
+                       sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+                                                                     frame_index,
+                                                                     (void **)&frame_buffer);
+
+                       sci_controller_copy_sata_response(&ireq->stp.rsp,
+                                                              frame_header,
+                                                              frame_buffer);
+
+                       /* The command has completed with error */
+                       ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+                       ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+                       break;
+
+               default:
+                       dev_warn(&ihost->pdev->dev,
+                                "%s: IO Request:0x%p Frame Id:%d protocol "
+                                 "violation occurred\n", __func__, stp_req,
+                                 frame_index);
+
+                       ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
+                       ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
+                       break;
+               }
+
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+
+               /* Frame has been decoded return it to the controller */
+               sci_controller_release_frame(ihost, frame_index);
+
+               return status;
+       }
+
+       case SCI_REQ_STP_PIO_WAIT_FRAME: {
+               struct sas_task *task = isci_request_access_task(ireq);
+               struct dev_to_host_fis *frame_header;
+               u32 *frame_buffer;
+
+               status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+                                                                      frame_index,
+                                                                      (void **)&frame_header);
+
+               if (status != SCI_SUCCESS) {
+                       dev_err(&ihost->pdev->dev,
+                               "%s: SCIC IO Request 0x%p could not get frame "
+                               "header for frame index %d, status %x\n",
+                               __func__, stp_req, frame_index, status);
+                       return status;
+               }
+
+               switch (frame_header->fis_type) {
+               case FIS_PIO_SETUP:
+                       /* Get from the frame buffer the PIO Setup Data */
+                       sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+                                                                     frame_index,
+                                                                     (void **)&frame_buffer);
+
+                       /* Get the data from the PIO Setup The SCU Hardware
+                        * returns first word in the frame_header and the rest
+                        * of the data is in the frame buffer so we need to
+                        * back up one dword
+                        */
+
+                       /* transfer_count: first 16bits in the 4th dword */
+                       stp_req->pio_len = frame_buffer[3] & 0xffff;
+
+                       /* status: 4th byte in the 3rd dword */
+                       stp_req->status = (frame_buffer[2] >> 24) & 0xff;
+
+                       sci_controller_copy_sata_response(&ireq->stp.rsp,
+                                                              frame_header,
+                                                              frame_buffer);
+
+                       ireq->stp.rsp.status = stp_req->status;
+
+                       /* The next state is dependent on whether the
+                        * request was PIO Data-in or Data out
+                        */
+                       if (task->data_dir == DMA_FROM_DEVICE) {
+                               sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
+                       } else if (task->data_dir == DMA_TO_DEVICE) {
+                               /* Transmit data */
+                               status = sci_stp_request_pio_data_out_transmit_data(ireq);
+                               if (status != SCI_SUCCESS)
+                                       break;
+                               sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
+                       }
+                       break;
+
+               case FIS_SETDEVBITS:
+                       sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+                       break;
+
+               case FIS_REGD2H:
+                       if (frame_header->status & ATA_BUSY) {
+                               /*
+                                * Now why is the drive sending a D2H Register
+                                * FIS when it is still busy?  Do nothing since
+                                * we are still in the right state.
+                                */
+                               dev_dbg(&ihost->pdev->dev,
+                                       "%s: SCIC PIO Request 0x%p received "
+                                       "D2H Register FIS with BSY status "
+                                       "0x%x\n",
+                                       __func__,
+                                       stp_req,
+                                       frame_header->status);
+                               break;
+                       }
+
+                       sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+                                                                     frame_index,
+                                                                     (void **)&frame_buffer);
+
+                       sci_controller_copy_sata_response(&ireq->stp.req,
+                                                              frame_header,
+                                                              frame_buffer);
+
+                       ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+                       ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+                       sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+                       break;
+
+               default:
+                       /* FIXME: what do we do here? */
+                       break;
+               }
+
+               /* Frame is decoded return it to the controller */
+               sci_controller_release_frame(ihost, frame_index);
+
+               return status;
+       }
+
+       case SCI_REQ_STP_PIO_DATA_IN: {
+               struct dev_to_host_fis *frame_header;
+               struct sata_fis_data *frame_buffer;
+
+               status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+                                                                      frame_index,
+                                                                      (void **)&frame_header);
+
+               if (status != SCI_SUCCESS) {
+                       dev_err(&ihost->pdev->dev,
+                               "%s: SCIC IO Request 0x%p could not get frame "
+                               "header for frame index %d, status %x\n",
+                               __func__,
+                               stp_req,
+                               frame_index,
+                               status);
+                       return status;
+               }
+
+               if (frame_header->fis_type != FIS_DATA) {
+                       dev_err(&ihost->pdev->dev,
+                               "%s: SCIC PIO Request 0x%p received frame %d "
+                               "with fis type 0x%02x when expecting a data "
+                               "fis.\n",
+                               __func__,
+                               stp_req,
+                               frame_index,
+                               frame_header->fis_type);
+
+                       ireq->scu_status = SCU_TASK_DONE_GOOD;
+                       ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT;
+                       sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+
+                       /* Frame is decoded return it to the controller */
+                       sci_controller_release_frame(ihost, frame_index);
+                       return status;
+               }
+
+               if (stp_req->sgl.index < 0) {
+                       ireq->saved_rx_frame_index = frame_index;
+                       stp_req->pio_len = 0;
+               } else {
+                       sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+                                                                     frame_index,
+                                                                     (void **)&frame_buffer);
+
+                       status = sci_stp_request_pio_data_in_copy_data(stp_req,
+                                                                           (u8 *)frame_buffer);
+
+                       /* Frame is decoded return it to the controller */
+                       sci_controller_release_frame(ihost, frame_index);
+               }
+
+               /* Check for the end of the transfer, are there more
+                * bytes remaining for this data transfer
+                */
+               if (status != SCI_SUCCESS || stp_req->pio_len != 0)
+                       return status;
+
+               if ((stp_req->status & ATA_BUSY) == 0) {
+                       ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+                       ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+                       sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               } else {
+                       sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+               }
+               return status;
+       }
+
+       case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
+               struct dev_to_host_fis *frame_header;
+               u32 *frame_buffer;
+
+               status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+                                                                      frame_index,
+                                                                      (void **)&frame_header);
+               if (status != SCI_SUCCESS) {
+                       dev_err(&ihost->pdev->dev,
+                               "%s: SCIC IO Request 0x%p could not get frame "
+                               "header for frame index %d, status %x\n",
+                               __func__,
+                               stp_req,
+                               frame_index,
+                               status);
+                       return status;
+               }
+
+               switch (frame_header->fis_type) {
+               case FIS_REGD2H:
+                       sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+                                                                     frame_index,
+                                                                     (void **)&frame_buffer);
+
+                       sci_controller_copy_sata_response(&ireq->stp.rsp,
+                                                              frame_header,
+                                                              frame_buffer);
+
+                       /* The command has completed with error */
+                       ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+                       ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+                       break;
+
+               default:
+                       dev_warn(&ihost->pdev->dev,
+                                "%s: IO Request:0x%p Frame Id:%d protocol "
+                                "violation occurred\n",
+                                __func__,
+                                stp_req,
+                                frame_index);
+
+                       ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
+                       ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
+                       break;
+               }
+
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+
+               /* Frame has been decoded return it to the controller */
+               sci_controller_release_frame(ihost, frame_index);
+
+               return status;
+       }
+       case SCI_REQ_ABORTING:
+               /*
+                * TODO: Is it even possible to get an unsolicited frame in the
+                * aborting state?
+                */
+               sci_controller_release_frame(ihost, frame_index);
+               return SCI_SUCCESS;
+
+       default:
+               dev_warn(&ihost->pdev->dev,
+                        "%s: SCIC IO Request given unexpected frame %x while "
+                        "in state %d\n",
+                        __func__,
+                        frame_index,
+                        state);
+
+               sci_controller_release_frame(ihost, frame_index);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
+                                                      u32 completion_code)
+{
+       enum sci_status status = SCI_SUCCESS;
+
+       switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+               ireq->scu_status = SCU_TASK_DONE_GOOD;
+               ireq->sci_status = SCI_SUCCESS;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
+               /* We must check ther response buffer to see if the D2H
+                * Register FIS was received before we got the TC
+                * completion.
+                */
+               if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
+                       sci_remote_device_suspend(ireq->target_device,
+                               SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
+
+                       ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+                       ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+                       sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               } else {
+                       /* If we have an error completion status for the
+                        * TC then we can expect a D2H register FIS from
+                        * the device so we must change state to wait
+                        * for it
+                        */
+                       sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
+               }
+               break;
+
+       /* TODO Check to see if any of these completion status need to
+        * wait for the device to host register fis.
+        */
+       /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
+        * - this comes only for B0
+        */
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
+               sci_remote_device_suspend(ireq->target_device,
+                       SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
+       /* Fall through to the default case */
+       default:
+               /* All other completion status cause the IO to be complete. */
+               ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+               ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       }
+
+       return status;
+}
+
+static enum sci_status
+stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq,
+                                                  u32 completion_code)
+{
+       switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+               ireq->scu_status = SCU_TASK_DONE_GOOD;
+               ireq->sci_status = SCI_SUCCESS;
+               sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
+               break;
+
+       default:
+               /*
+                * All other completion status cause the IO to be complete.
+                * If a NAK was received, then it is up to the user to retry
+                * the request.
+                */
+               ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+               ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       }
+
+       return SCI_SUCCESS;
+}
+
+static enum sci_status
+stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
+                                                    u32 completion_code)
+{
+       switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+               ireq->scu_status = SCU_TASK_DONE_GOOD;
+               ireq->sci_status = SCI_SUCCESS;
+               sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
+               break;
+
+       default:
+               /* All other completion status cause the IO to be complete.  If
+                * a NAK was received, then it is up to the user to retry the
+                * request.
+                */
+               ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+               ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       }
+
+       return SCI_SUCCESS;
+}
+
+enum sci_status
+sci_io_request_tc_completion(struct isci_request *ireq,
+                                 u32 completion_code)
+{
+       enum sci_base_request_states state;
+       struct isci_host *ihost = ireq->owning_controller;
+
+       state = ireq->sm.current_state_id;
+
+       switch (state) {
+       case SCI_REQ_STARTED:
+               return request_started_state_tc_event(ireq, completion_code);
+
+       case SCI_REQ_TASK_WAIT_TC_COMP:
+               return ssp_task_request_await_tc_event(ireq,
+                                                      completion_code);
+
+       case SCI_REQ_SMP_WAIT_RESP:
+               return smp_request_await_response_tc_event(ireq,
+                                                          completion_code);
+
+       case SCI_REQ_SMP_WAIT_TC_COMP:
+               return smp_request_await_tc_event(ireq, completion_code);
+
+       case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+               return stp_request_udma_await_tc_event(ireq,
+                                                      completion_code);
+
+       case SCI_REQ_STP_NON_DATA_WAIT_H2D:
+               return stp_request_non_data_await_h2d_tc_event(ireq,
+                                                              completion_code);
+
+       case SCI_REQ_STP_PIO_WAIT_H2D:
+               return stp_request_pio_await_h2d_completion_tc_event(ireq,
+                                                                    completion_code);
+
+       case SCI_REQ_STP_PIO_DATA_OUT:
+               return pio_data_out_tx_done_tc_event(ireq, completion_code);
+
+       case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
+               return stp_request_soft_reset_await_h2d_asserted_tc_event(ireq,
+                                                                         completion_code);
+
+       case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
+               return stp_request_soft_reset_await_h2d_diagnostic_tc_event(ireq,
+                                                                           completion_code);
+
+       case SCI_REQ_ABORTING:
+               return request_aborting_state_tc_event(ireq,
+                                                      completion_code);
+
+       default:
+               dev_warn(&ihost->pdev->dev,
+                        "%s: SCIC IO Request given task completion "
+                        "notification %x while in wrong state %d\n",
+                        __func__,
+                        completion_code,
+                        state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+/**
+ * isci_request_process_response_iu() - This function sets the status and
+ *    response iu, in the task struct, from the request object for the upper
+ *    layer driver.
+ * @sas_task: This parameter is the task struct from the upper layer driver.
+ * @resp_iu: This parameter points to the response iu of the completed request.
+ * @dev: This parameter specifies the linux device struct.
+ *
+ * none.
+ */
+static void isci_request_process_response_iu(
+       struct sas_task *task,
+       struct ssp_response_iu *resp_iu,
+       struct device *dev)
+{
+       dev_dbg(dev,
+               "%s: resp_iu = %p "
+               "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
+               "resp_iu->response_data_len = %x, "
+               "resp_iu->sense_data_len = %x\nrepsonse data: ",
+               __func__,
+               resp_iu,
+               resp_iu->status,
+               resp_iu->datapres,
+               resp_iu->response_data_len,
+               resp_iu->sense_data_len);
+
+       task->task_status.stat = resp_iu->status;
+
+       /* libsas updates the task status fields based on the response iu. */
+       sas_ssp_task_response(dev, task, resp_iu);
+}
+
+/**
+ * isci_request_set_open_reject_status() - This function prepares the I/O
+ *    completion for OPEN_REJECT conditions.
+ * @request: This parameter is the completed isci_request object.
+ * @response_ptr: This parameter specifies the service response for the I/O.
+ * @status_ptr: This parameter specifies the exec status for the I/O.
+ * @complete_to_host_ptr: This parameter specifies the action to be taken by
+ *    the LLDD with respect to completing this request or forcing an abort
+ *    condition on the I/O.
+ * @open_rej_reason: This parameter specifies the encoded reason for the
+ *    abandon-class reject.
+ *
+ * none.
+ */
+static void isci_request_set_open_reject_status(
+       struct isci_request *request,
+       struct sas_task *task,
+       enum service_response *response_ptr,
+       enum exec_status *status_ptr,
+       enum isci_completion_selection *complete_to_host_ptr,
+       enum sas_open_rej_reason open_rej_reason)
+{
+       /* Task in the target is done. */
+       set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+       *response_ptr                     = SAS_TASK_UNDELIVERED;
+       *status_ptr                       = SAS_OPEN_REJECT;
+       *complete_to_host_ptr             = isci_perform_normal_io_completion;
+       task->task_status.open_rej_reason = open_rej_reason;
+}
+
+/**
+ * isci_request_handle_controller_specific_errors() - This function decodes
+ *    controller-specific I/O completion error conditions.
+ * @request: This parameter is the completed isci_request object.
+ * @response_ptr: This parameter specifies the service response for the I/O.
+ * @status_ptr: This parameter specifies the exec status for the I/O.
+ * @complete_to_host_ptr: This parameter specifies the action to be taken by
+ *    the LLDD with respect to completing this request or forcing an abort
+ *    condition on the I/O.
+ *
+ * none.
+ */
+static void isci_request_handle_controller_specific_errors(
+       struct isci_remote_device *idev,
+       struct isci_request *request,
+       struct sas_task *task,
+       enum service_response *response_ptr,
+       enum exec_status *status_ptr,
+       enum isci_completion_selection *complete_to_host_ptr)
+{
+       unsigned int cstatus;
+
+       cstatus = request->scu_status;
+
+       dev_dbg(&request->isci_host->pdev->dev,
+               "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
+               "- controller status = 0x%x\n",
+               __func__, request, cstatus);
+
+       /* Decode the controller-specific errors; most
+        * important is to recognize those conditions in which
+        * the target may still have a task outstanding that
+        * must be aborted.
+        *
+        * Note that there are SCU completion codes being
+        * named in the decode below for which SCIC has already
+        * done work to handle them in a way other than as
+        * a controller-specific completion code; these are left
+        * in the decode below for completeness sake.
+        */
+       switch (cstatus) {
+       case SCU_TASK_DONE_DMASETUP_DIRERR:
+       /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
+       case SCU_TASK_DONE_XFERCNT_ERR:
+               /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
+               if (task->task_proto == SAS_PROTOCOL_SMP) {
+                       /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
+                       *response_ptr = SAS_TASK_COMPLETE;
+
+                       /* See if the device has been/is being stopped. Note
+                        * that we ignore the quiesce state, since we are
+                        * concerned about the actual device state.
+                        */
+                       if (!idev)
+                               *status_ptr = SAS_DEVICE_UNKNOWN;
+                       else
+                               *status_ptr = SAS_ABORTED_TASK;
+
+                       set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+                       *complete_to_host_ptr =
+                               isci_perform_normal_io_completion;
+               } else {
+                       /* Task in the target is not done. */
+                       *response_ptr = SAS_TASK_UNDELIVERED;
+
+                       if (!idev)
+                               *status_ptr = SAS_DEVICE_UNKNOWN;
+                       else
+                               *status_ptr = SAM_STAT_TASK_ABORTED;
+
+                       clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+                       *complete_to_host_ptr =
+                               isci_perform_error_io_completion;
+               }
+
+               break;
+
+       case SCU_TASK_DONE_CRC_ERR:
+       case SCU_TASK_DONE_NAK_CMD_ERR:
+       case SCU_TASK_DONE_EXCESS_DATA:
+       case SCU_TASK_DONE_UNEXP_FIS:
+       /* Also SCU_TASK_DONE_UNEXP_RESP: */
+       case SCU_TASK_DONE_VIIT_ENTRY_NV:       /* TODO - conditions? */
+       case SCU_TASK_DONE_IIT_ENTRY_NV:        /* TODO - conditions? */
+       case SCU_TASK_DONE_RNCNV_OUTBOUND:      /* TODO - conditions? */
+               /* These are conditions in which the target
+                * has completed the task, so that no cleanup
+                * is necessary.
+                */
+               *response_ptr = SAS_TASK_COMPLETE;
+
+               /* See if the device has been/is being stopped. Note
+                * that we ignore the quiesce state, since we are
+                * concerned about the actual device state.
+                */
+               if (!idev)
+                       *status_ptr = SAS_DEVICE_UNKNOWN;
+               else
+                       *status_ptr = SAS_ABORTED_TASK;
+
+               set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+               *complete_to_host_ptr = isci_perform_normal_io_completion;
+               break;
+
+
+       /* Note that the only open reject completion codes seen here will be
+        * abandon-class codes; all others are automatically retried in the SCU.
+        */
+       case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
+
+               isci_request_set_open_reject_status(
+                       request, task, response_ptr, status_ptr,
+                       complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
+               break;
+
+       case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
+
+               /* Note - the return of AB0 will change when
+                * libsas implements detection of zone violations.
+                */
+               isci_request_set_open_reject_status(
+                       request, task, response_ptr, status_ptr,
+                       complete_to_host_ptr, SAS_OREJ_RESV_AB0);
+               break;
+
+       case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
+
+               isci_request_set_open_reject_status(
+                       request, task, response_ptr, status_ptr,
+                       complete_to_host_ptr, SAS_OREJ_RESV_AB1);
+               break;
+
+       case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
+
+               isci_request_set_open_reject_status(
+                       request, task, response_ptr, status_ptr,
+                       complete_to_host_ptr, SAS_OREJ_RESV_AB2);
+               break;
+
+       case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
+
+               isci_request_set_open_reject_status(
+                       request, task, response_ptr, status_ptr,
+                       complete_to_host_ptr, SAS_OREJ_RESV_AB3);
+               break;
+
+       case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
+
+               isci_request_set_open_reject_status(
+                       request, task, response_ptr, status_ptr,
+                       complete_to_host_ptr, SAS_OREJ_BAD_DEST);
+               break;
+
+       case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
+
+               isci_request_set_open_reject_status(
+                       request, task, response_ptr, status_ptr,
+                       complete_to_host_ptr, SAS_OREJ_STP_NORES);
+               break;
+
+       case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
+
+               isci_request_set_open_reject_status(
+                       request, task, response_ptr, status_ptr,
+                       complete_to_host_ptr, SAS_OREJ_EPROTO);
+               break;
+
+       case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
+
+               isci_request_set_open_reject_status(
+                       request, task, response_ptr, status_ptr,
+                       complete_to_host_ptr, SAS_OREJ_CONN_RATE);
+               break;
+
+       case SCU_TASK_DONE_LL_R_ERR:
+       /* Also SCU_TASK_DONE_ACK_NAK_TO: */
+       case SCU_TASK_DONE_LL_PERR:
+       case SCU_TASK_DONE_LL_SY_TERM:
+       /* Also SCU_TASK_DONE_NAK_ERR:*/
+       case SCU_TASK_DONE_LL_LF_TERM:
+       /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
+       case SCU_TASK_DONE_LL_ABORT_ERR:
+       case SCU_TASK_DONE_SEQ_INV_TYPE:
+       /* Also SCU_TASK_DONE_UNEXP_XR: */
+       case SCU_TASK_DONE_XR_IU_LEN_ERR:
+       case SCU_TASK_DONE_INV_FIS_LEN:
+       /* Also SCU_TASK_DONE_XR_WD_LEN: */
+       case SCU_TASK_DONE_SDMA_ERR:
+       case SCU_TASK_DONE_OFFSET_ERR:
+       case SCU_TASK_DONE_MAX_PLD_ERR:
+       case SCU_TASK_DONE_LF_ERR:
+       case SCU_TASK_DONE_SMP_RESP_TO_ERR:  /* Escalate to dev reset? */
+       case SCU_TASK_DONE_SMP_LL_RX_ERR:
+       case SCU_TASK_DONE_UNEXP_DATA:
+       case SCU_TASK_DONE_UNEXP_SDBFIS:
+       case SCU_TASK_DONE_REG_ERR:
+       case SCU_TASK_DONE_SDB_ERR:
+       case SCU_TASK_DONE_TASK_ABORT:
+       default:
+               /* Task in the target is not done. */
+               *response_ptr = SAS_TASK_UNDELIVERED;
+               *status_ptr = SAM_STAT_TASK_ABORTED;
+
+               if (task->task_proto == SAS_PROTOCOL_SMP) {
+                       set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+                       *complete_to_host_ptr = isci_perform_normal_io_completion;
+               } else {
+                       clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+                       *complete_to_host_ptr = isci_perform_error_io_completion;
+               }
+               break;
+       }
+}
+
+/**
+ * isci_task_save_for_upper_layer_completion() - This function saves the
+ *    request for later completion to the upper layer driver.
+ * @host: This parameter is a pointer to the host on which the the request
+ *    should be queued (either as an error or success).
+ * @request: This parameter is the completed request.
+ * @response: This parameter is the response code for the completed task.
+ * @status: This parameter is the status code for the completed task.
+ *
+ * none.
+ */
+static void isci_task_save_for_upper_layer_completion(
+       struct isci_host *host,
+       struct isci_request *request,
+       enum service_response response,
+       enum exec_status status,
+       enum isci_completion_selection task_notification_selection)
+{
+       struct sas_task *task = isci_request_access_task(request);
+
+       task_notification_selection
+               = isci_task_set_completion_status(task, response, status,
+                                                 task_notification_selection);
+
+       /* Tasks aborted specifically by a call to the lldd_abort_task
+        * function should not be completed to the host in the regular path.
+        */
+       switch (task_notification_selection) {
+
+       case isci_perform_normal_io_completion:
+
+               /* Normal notification (task_done) */
+               dev_dbg(&host->pdev->dev,
+                       "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
+                       __func__,
+                       task,
+                       task->task_status.resp, response,
+                       task->task_status.stat, status);
+               /* Add to the completed list. */
+               list_add(&request->completed_node,
+                        &host->requests_to_complete);
+
+               /* Take the request off the device's pending request list. */
+               list_del_init(&request->dev_node);
+               break;
+
+       case isci_perform_aborted_io_completion:
+               /* No notification to libsas because this request is
+                * already in the abort path.
+                */
+               dev_dbg(&host->pdev->dev,
+                        "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
+                        __func__,
+                        task,
+                        task->task_status.resp, response,
+                        task->task_status.stat, status);
+
+               /* Wake up whatever process was waiting for this
+                * request to complete.
+                */
+               WARN_ON(request->io_request_completion == NULL);
+
+               if (request->io_request_completion != NULL) {
+
+                       /* Signal whoever is waiting that this
+                       * request is complete.
+                       */
+                       complete(request->io_request_completion);
+               }
+               break;
+
+       case isci_perform_error_io_completion:
+               /* Use sas_task_abort */
+               dev_dbg(&host->pdev->dev,
+                        "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
+                        __func__,
+                        task,
+                        task->task_status.resp, response,
+                        task->task_status.stat, status);
+               /* Add to the aborted list. */
+               list_add(&request->completed_node,
+                        &host->requests_to_errorback);
+               break;
+
+       default:
+               dev_dbg(&host->pdev->dev,
+                        "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
+                        __func__,
+                        task,
+                        task->task_status.resp, response,
+                        task->task_status.stat, status);
+
+               /* Add to the error to libsas list. */
+               list_add(&request->completed_node,
+                        &host->requests_to_errorback);
+               break;
+       }
+}
+
+static void isci_request_process_stp_response(struct sas_task *task,
+                                             void *response_buffer)
+{
+       struct dev_to_host_fis *d2h_reg_fis = response_buffer;
+       struct task_status_struct *ts = &task->task_status;
+       struct ata_task_resp *resp = (void *)&ts->buf[0];
+
+       resp->frame_len = le16_to_cpu(*(__le16 *)(response_buffer + 6));
+       memcpy(&resp->ending_fis[0], response_buffer + 16, 24);
+       ts->buf_valid_size = sizeof(*resp);
+
+       /**
+        * If the device fault bit is set in the status register, then
+        * set the sense data and return.
+        */
+       if (d2h_reg_fis->status & ATA_DF)
+               ts->stat = SAS_PROTO_RESPONSE;
+       else
+               ts->stat = SAM_STAT_GOOD;
+
+       ts->resp = SAS_TASK_COMPLETE;
+}
+
+static void isci_request_io_request_complete(struct isci_host *ihost,
+                                            struct isci_request *request,
+                                            enum sci_io_status completion_status)
+{
+       struct sas_task *task = isci_request_access_task(request);
+       struct ssp_response_iu *resp_iu;
+       void *resp_buf;
+       unsigned long task_flags;
+       struct isci_remote_device *idev = isci_lookup_device(task->dev);
+       enum service_response response       = SAS_TASK_UNDELIVERED;
+       enum exec_status status         = SAS_ABORTED_TASK;
+       enum isci_request_status request_status;
+       enum isci_completion_selection complete_to_host
+               = isci_perform_normal_io_completion;
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: request = %p, task = %p,\n"
+               "task->data_dir = %d completion_status = 0x%x\n",
+               __func__,
+               request,
+               task,
+               task->data_dir,
+               completion_status);
+
+       spin_lock(&request->state_lock);
+       request_status = request->status;
+
+       /* Decode the request status.  Note that if the request has been
+        * aborted by a task management function, we don't care
+        * what the status is.
+        */
+       switch (request_status) {
+
+       case aborted:
+               /* "aborted" indicates that the request was aborted by a task
+                * management function, since once a task management request is
+                * perfomed by the device, the request only completes because
+                * of the subsequent driver terminate.
+                *
+                * Aborted also means an external thread is explicitly managing
+                * this request, so that we do not complete it up the stack.
+                *
+                * The target is still there (since the TMF was successful).
+                */
+               set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+               response = SAS_TASK_COMPLETE;
+
+               /* See if the device has been/is being stopped. Note
+                * that we ignore the quiesce state, since we are
+                * concerned about the actual device state.
+                */
+               if (!idev)
+                       status = SAS_DEVICE_UNKNOWN;
+               else
+                       status = SAS_ABORTED_TASK;
+
+               complete_to_host = isci_perform_aborted_io_completion;
+               /* This was an aborted request. */
+
+               spin_unlock(&request->state_lock);
+               break;
+
+       case aborting:
+               /* aborting means that the task management function tried and
+                * failed to abort the request. We need to note the request
+                * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
+                * target as down.
+                *
+                * Aborting also means an external thread is explicitly managing
+                * this request, so that we do not complete it up the stack.
+                */
+               set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+               response = SAS_TASK_UNDELIVERED;
+
+               if (!idev)
+                       /* The device has been /is being stopped. Note that
+                        * we ignore the quiesce state, since we are
+                        * concerned about the actual device state.
+                        */
+                       status = SAS_DEVICE_UNKNOWN;
+               else
+                       status = SAS_PHY_DOWN;
+
+               complete_to_host = isci_perform_aborted_io_completion;
+
+               /* This was an aborted request. */
+
+               spin_unlock(&request->state_lock);
+               break;
+
+       case terminating:
+
+               /* This was an terminated request.  This happens when
+                * the I/O is being terminated because of an action on
+                * the device (reset, tear down, etc.), and the I/O needs
+                * to be completed up the stack.
+                */
+               set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+               response = SAS_TASK_UNDELIVERED;
+
+               /* See if the device has been/is being stopped. Note
+                * that we ignore the quiesce state, since we are
+                * concerned about the actual device state.
+                */
+               if (!idev)
+                       status = SAS_DEVICE_UNKNOWN;
+               else
+                       status = SAS_ABORTED_TASK;
+
+               complete_to_host = isci_perform_aborted_io_completion;
+
+               /* This was a terminated request. */
+
+               spin_unlock(&request->state_lock);
+               break;
+
+       case dead:
+               /* This was a terminated request that timed-out during the
+                * termination process.  There is no task to complete to
+                * libsas.
+                */
+               complete_to_host = isci_perform_normal_io_completion;
+               spin_unlock(&request->state_lock);
+               break;
+
+       default:
+
+               /* The request is done from an SCU HW perspective. */
+               request->status = completed;
+
+               spin_unlock(&request->state_lock);
+
+               /* This is an active request being completed from the core. */
+               switch (completion_status) {
+
+               case SCI_IO_FAILURE_RESPONSE_VALID:
+                       dev_dbg(&ihost->pdev->dev,
+                               "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
+                               __func__,
+                               request,
+                               task);
+
+                       if (sas_protocol_ata(task->task_proto)) {
+                               resp_buf = &request->stp.rsp;
+                               isci_request_process_stp_response(task,
+                                                                 resp_buf);
+                       } else if (SAS_PROTOCOL_SSP == task->task_proto) {
+
+                               /* crack the iu response buffer. */
+                               resp_iu = &request->ssp.rsp;
+                               isci_request_process_response_iu(task, resp_iu,
+                                                                &ihost->pdev->dev);
+
+                       } else if (SAS_PROTOCOL_SMP == task->task_proto) {
+
+                               dev_err(&ihost->pdev->dev,
+                                       "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
+                                       "SAS_PROTOCOL_SMP protocol\n",
+                                       __func__);
+
+                       } else
+                               dev_err(&ihost->pdev->dev,
+                                       "%s: unknown protocol\n", __func__);
+
+                       /* use the task status set in the task struct by the
+                        * isci_request_process_response_iu call.
+                        */
+                       set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+                       response = task->task_status.resp;
+                       status = task->task_status.stat;
+                       break;
+
+               case SCI_IO_SUCCESS:
+               case SCI_IO_SUCCESS_IO_DONE_EARLY:
+
+                       response = SAS_TASK_COMPLETE;
+                       status   = SAM_STAT_GOOD;
+                       set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+                       if (task->task_proto == SAS_PROTOCOL_SMP) {
+                               void *rsp = &request->smp.rsp;
+
+                               dev_dbg(&ihost->pdev->dev,
+                                       "%s: SMP protocol completion\n",
+                                       __func__);
+
+                               sg_copy_from_buffer(
+                                       &task->smp_task.smp_resp, 1,
+                                       rsp, sizeof(struct smp_resp));
+                       } else if (completion_status
+                                  == SCI_IO_SUCCESS_IO_DONE_EARLY) {
+
+                               /* This was an SSP / STP / SATA transfer.
+                                * There is a possibility that less data than
+                                * the maximum was transferred.
+                                */
+                               u32 transferred_length = sci_req_tx_bytes(request);
+
+                               task->task_status.residual
+                                       = task->total_xfer_len - transferred_length;
+
+                               /* If there were residual bytes, call this an
+                                * underrun.
+                                */
+                               if (task->task_status.residual != 0)
+                                       status = SAS_DATA_UNDERRUN;
+
+                               dev_dbg(&ihost->pdev->dev,
+                                       "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
+                                       __func__,
+                                       status);
+
+                       } else
+                               dev_dbg(&ihost->pdev->dev,
+                                       "%s: SCI_IO_SUCCESS\n",
+                                       __func__);
+
+                       break;
+
+               case SCI_IO_FAILURE_TERMINATED:
+                       dev_dbg(&ihost->pdev->dev,
+                               "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
+                               __func__,
+                               request,
+                               task);
+
+                       /* The request was terminated explicitly.  No handling
+                        * is needed in the SCSI error handler path.
+                        */
+                       set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+                       response = SAS_TASK_UNDELIVERED;
+
+                       /* See if the device has been/is being stopped. Note
+                        * that we ignore the quiesce state, since we are
+                        * concerned about the actual device state.
+                        */
+                       if (!idev)
+                               status = SAS_DEVICE_UNKNOWN;
+                       else
+                               status = SAS_ABORTED_TASK;
+
+                       complete_to_host = isci_perform_normal_io_completion;
+                       break;
+
+               case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
+
+                       isci_request_handle_controller_specific_errors(
+                               idev, request, task, &response, &status,
+                               &complete_to_host);
+
+                       break;
+
+               case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
+                       /* This is a special case, in that the I/O completion
+                        * is telling us that the device needs a reset.
+                        * In order for the device reset condition to be
+                        * noticed, the I/O has to be handled in the error
+                        * handler.  Set the reset flag and cause the
+                        * SCSI error thread to be scheduled.
+                        */
+                       spin_lock_irqsave(&task->task_state_lock, task_flags);
+                       task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+                       spin_unlock_irqrestore(&task->task_state_lock, task_flags);
+
+                       /* Fail the I/O. */
+                       response = SAS_TASK_UNDELIVERED;
+                       status = SAM_STAT_TASK_ABORTED;
+
+                       complete_to_host = isci_perform_error_io_completion;
+                       clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+                       break;
+
+               case SCI_FAILURE_RETRY_REQUIRED:
+
+                       /* Fail the I/O so it can be retried. */
+                       response = SAS_TASK_UNDELIVERED;
+                       if (!idev)
+                               status = SAS_DEVICE_UNKNOWN;
+                       else
+                               status = SAS_ABORTED_TASK;
+
+                       complete_to_host = isci_perform_normal_io_completion;
+                       set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+                       break;
+
+
+               default:
+                       /* Catch any otherwise unhandled error codes here. */
+                       dev_dbg(&ihost->pdev->dev,
+                                "%s: invalid completion code: 0x%x - "
+                                "isci_request = %p\n",
+                                __func__, completion_status, request);
+
+                       response = SAS_TASK_UNDELIVERED;
+
+                       /* See if the device has been/is being stopped. Note
+                        * that we ignore the quiesce state, since we are
+                        * concerned about the actual device state.
+                        */
+                       if (!idev)
+                               status = SAS_DEVICE_UNKNOWN;
+                       else
+                               status = SAS_ABORTED_TASK;
+
+                       if (SAS_PROTOCOL_SMP == task->task_proto) {
+                               set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+                               complete_to_host = isci_perform_normal_io_completion;
+                       } else {
+                               clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+                               complete_to_host = isci_perform_error_io_completion;
+                       }
+                       break;
+               }
+               break;
+       }
+
+       switch (task->task_proto) {
+       case SAS_PROTOCOL_SSP:
+               if (task->data_dir == DMA_NONE)
+                       break;
+               if (task->num_scatter == 0)
+                       /* 0 indicates a single dma address */
+                       dma_unmap_single(&ihost->pdev->dev,
+                                        request->zero_scatter_daddr,
+                                        task->total_xfer_len, task->data_dir);
+               else  /* unmap the sgl dma addresses */
+                       dma_unmap_sg(&ihost->pdev->dev, task->scatter,
+                                    request->num_sg_entries, task->data_dir);
+               break;
+       case SAS_PROTOCOL_SMP: {
+               struct scatterlist *sg = &task->smp_task.smp_req;
+               struct smp_req *smp_req;
+               void *kaddr;
+
+               dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
+
+               /* need to swab it back in case the command buffer is re-used */
+               kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
+               smp_req = kaddr + sg->offset;
+               sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
+               kunmap_atomic(kaddr, KM_IRQ0);
+               break;
+       }
+       default:
+               break;
+       }
+
+       /* Put the completed request on the correct list */
+       isci_task_save_for_upper_layer_completion(ihost, request, response,
+                                                 status, complete_to_host
+                                                 );
+
+       /* complete the io request to the core. */
+       sci_controller_complete_io(ihost, request->target_device, request);
+       isci_put_device(idev);
+
+       /* set terminated handle so it cannot be completed or
+        * terminated again, and to cause any calls into abort
+        * task to recognize the already completed case.
+        */
+       set_bit(IREQ_TERMINATED, &request->flags);
+}
+
+static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+       struct domain_device *dev = ireq->target_device->domain_dev;
+       struct sas_task *task;
+
+       /* XXX as hch said always creating an internal sas_task for tmf
+        * requests would simplify the driver
+        */
+       task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
+
+       /* all unaccelerated request types (non ssp or ncq) handled with
+        * substates
+        */
+       if (!task && dev->dev_type == SAS_END_DEV) {
+               sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP);
+       } else if (!task &&
+                  (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
+                   isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
+               sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED);
+       } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
+               sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP);
+       } else if (task && sas_protocol_ata(task->task_proto) &&
+                  !task->ata_task.use_ncq) {
+               u32 state;
+
+               if (task->data_dir == DMA_NONE)
+                       state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
+               else if (task->ata_task.dma_xfer)
+                       state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
+               else /* PIO */
+                       state = SCI_REQ_STP_PIO_WAIT_H2D;
+
+               sci_change_state(sm, state);
+       }
+}
+
+static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+       struct isci_host *ihost = ireq->owning_controller;
+
+       /* Tell the SCI_USER that the IO request is complete */
+       if (!test_bit(IREQ_TMF, &ireq->flags))
+               isci_request_io_request_complete(ihost, ireq,
+                                                ireq->sci_status);
+       else
+               isci_task_request_complete(ihost, ireq, ireq->sci_status);
+}
+
+static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+       /* Setting the abort bit in the Task Context is required by the silicon. */
+       ireq->tc->abort = 1;
+}
+
+static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+       ireq->target_device->working_request = ireq;
+}
+
+static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+       ireq->target_device->working_request = ireq;
+}
+
+static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+       ireq->target_device->working_request = ireq;
+}
+
+static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+       struct scu_task_context *tc = ireq->tc;
+       struct host_to_dev_fis *h2d_fis;
+       enum sci_status status;
+
+       /* Clear the SRST bit */
+       h2d_fis = &ireq->stp.cmd;
+       h2d_fis->control = 0;
+
+       /* Clear the TC control bit */
+       tc->control_frame = 0;
+
+       status = sci_controller_continue_io(ireq);
+       WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
+}
+
+static const struct sci_base_state sci_request_state_table[] = {
+       [SCI_REQ_INIT] = { },
+       [SCI_REQ_CONSTRUCTED] = { },
+       [SCI_REQ_STARTED] = {
+               .enter_state = sci_request_started_state_enter,
+       },
+       [SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
+               .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter,
+       },
+       [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
+       [SCI_REQ_STP_PIO_WAIT_H2D] = {
+               .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter,
+       },
+       [SCI_REQ_STP_PIO_WAIT_FRAME] = { },
+       [SCI_REQ_STP_PIO_DATA_IN] = { },
+       [SCI_REQ_STP_PIO_DATA_OUT] = { },
+       [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
+       [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
+       [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
+               .enter_state = sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
+       },
+       [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
+               .enter_state = sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
+       },
+       [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
+       [SCI_REQ_TASK_WAIT_TC_COMP] = { },
+       [SCI_REQ_TASK_WAIT_TC_RESP] = { },
+       [SCI_REQ_SMP_WAIT_RESP] = { },
+       [SCI_REQ_SMP_WAIT_TC_COMP] = { },
+       [SCI_REQ_COMPLETED] = {
+               .enter_state = sci_request_completed_state_enter,
+       },
+       [SCI_REQ_ABORTING] = {
+               .enter_state = sci_request_aborting_state_enter,
+       },
+       [SCI_REQ_FINAL] = { },
+};
+
+static void
+sci_general_request_construct(struct isci_host *ihost,
+                                  struct isci_remote_device *idev,
+                                  struct isci_request *ireq)
+{
+       sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
+
+       ireq->target_device = idev;
+       ireq->protocol = SCIC_NO_PROTOCOL;
+       ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
+
+       ireq->sci_status   = SCI_SUCCESS;
+       ireq->scu_status   = 0;
+       ireq->post_context = 0xFFFFFFFF;
+}
+
+static enum sci_status
+sci_io_request_construct(struct isci_host *ihost,
+                         struct isci_remote_device *idev,
+                         struct isci_request *ireq)
+{
+       struct domain_device *dev = idev->domain_dev;
+       enum sci_status status = SCI_SUCCESS;
+
+       /* Build the common part of the request */
+       sci_general_request_construct(ihost, idev, ireq);
+
+       if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
+               return SCI_FAILURE_INVALID_REMOTE_DEVICE;
+
+       if (dev->dev_type == SAS_END_DEV)
+               /* pass */;
+       else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
+               memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
+       else if (dev_is_expander(dev))
+               /* pass */;
+       else
+               return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+       memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab));
+
+       return status;
+}
+
+enum sci_status sci_task_request_construct(struct isci_host *ihost,
+                                           struct isci_remote_device *idev,
+                                           u16 io_tag, struct isci_request *ireq)
+{
+       struct domain_device *dev = idev->domain_dev;
+       enum sci_status status = SCI_SUCCESS;
+
+       /* Build the common part of the request */
+       sci_general_request_construct(ihost, idev, ireq);
+
+       if (dev->dev_type == SAS_END_DEV ||
+           dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+               set_bit(IREQ_TMF, &ireq->flags);
+               memset(ireq->tc, 0, sizeof(struct scu_task_context));
+       } else
+               status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+       return status;
+}
+
+static enum sci_status isci_request_ssp_request_construct(
+       struct isci_request *request)
+{
+       enum sci_status status;
+
+       dev_dbg(&request->isci_host->pdev->dev,
+               "%s: request = %p\n",
+               __func__,
+               request);
+       status = sci_io_request_construct_basic_ssp(request);
+       return status;
+}
+
+static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq)
+{
+       struct sas_task *task = isci_request_access_task(ireq);
+       struct host_to_dev_fis *fis = &ireq->stp.cmd;
+       struct ata_queued_cmd *qc = task->uldd_task;
+       enum sci_status status;
+
+       dev_dbg(&ireq->isci_host->pdev->dev,
+               "%s: ireq = %p\n",
+               __func__,
+               ireq);
+
+       memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
+       if (!task->ata_task.device_control_reg_update)
+               fis->flags |= 0x80;
+       fis->flags &= 0xF0;
+
+       status = sci_io_request_construct_basic_sata(ireq);
+
+       if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
+                  qc->tf.command == ATA_CMD_FPDMA_READ)) {
+               fis->sector_count = qc->tag << 3;
+               ireq->tc->type.stp.ncq_tag = qc->tag;
+       }
+
+       return status;
+}
+
+static enum sci_status
+sci_io_request_construct_smp(struct device *dev,
+                             struct isci_request *ireq,
+                             struct sas_task *task)
+{
+       struct scatterlist *sg = &task->smp_task.smp_req;
+       struct isci_remote_device *idev;
+       struct scu_task_context *task_context;
+       struct isci_port *iport;
+       struct smp_req *smp_req;
+       void *kaddr;
+       u8 req_len;
+       u32 cmd;
+
+       kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
+       smp_req = kaddr + sg->offset;
+       /*
+        * Look at the SMP requests' header fields; for certain SAS 1.x SMP
+        * functions under SAS 2.0, a zero request length really indicates
+        * a non-zero default length.
+        */
+       if (smp_req->req_len == 0) {
+               switch (smp_req->func) {
+               case SMP_DISCOVER:
+               case SMP_REPORT_PHY_ERR_LOG:
+               case SMP_REPORT_PHY_SATA:
+               case SMP_REPORT_ROUTE_INFO:
+                       smp_req->req_len = 2;
+                       break;
+               case SMP_CONF_ROUTE_INFO:
+               case SMP_PHY_CONTROL:
+               case SMP_PHY_TEST_FUNCTION:
+                       smp_req->req_len = 9;
+                       break;
+                       /* Default - zero is a valid default for 2.0. */
+               }
+       }
+       req_len = smp_req->req_len;
+       sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
+       cmd = *(u32 *) smp_req;
+       kunmap_atomic(kaddr, KM_IRQ0);
+
+       if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
+               return SCI_FAILURE;
+
+       ireq->protocol = SCIC_SMP_PROTOCOL;
+
+       /* byte swap the smp request. */
+
+       task_context = ireq->tc;
+
+       idev = ireq->target_device;
+       iport = idev->owning_port;
+
+       /*
+        * Fill in the TC with the its required data
+        * 00h
+        */
+       task_context->priority = 0;
+       task_context->initiator_request = 1;
+       task_context->connection_rate = idev->connection_rate;
+       task_context->protocol_engine_index = ISCI_PEG;
+       task_context->logical_port_index = iport->physical_port_index;
+       task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
+       task_context->abort = 0;
+       task_context->valid = SCU_TASK_CONTEXT_VALID;
+       task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+
+       /* 04h */
+       task_context->remote_node_index = idev->rnc.remote_node_index;
+       task_context->command_code = 0;
+       task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
+
+       /* 08h */
+       task_context->link_layer_control = 0;
+       task_context->do_not_dma_ssp_good_response = 1;
+       task_context->strict_ordering = 0;
+       task_context->control_frame = 1;
+       task_context->timeout_enable = 0;
+       task_context->block_guard_enable = 0;
+
+       /* 0ch */
+       task_context->address_modifier = 0;
+
+       /* 10h */
+       task_context->ssp_command_iu_length = req_len;
+
+       /* 14h */
+       task_context->transfer_length_bytes = 0;
+
+       /*
+        * 18h ~ 30h, protocol specific
+        * since commandIU has been build by framework at this point, we just
+        * copy the frist DWord from command IU to this location. */
+       memcpy(&task_context->type.smp, &cmd, sizeof(u32));
+
+       /*
+        * 40h
+        * "For SMP you could program it to zero. We would prefer that way
+        * so that done code will be consistent." - Venki
+        */
+       task_context->task_phase = 0;
+
+       ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+                             (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+                              (iport->physical_port_index <<
+                               SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+                             ISCI_TAG_TCI(ireq->io_tag));
+       /*
+        * Copy the physical address for the command buffer to the SCU Task
+        * Context command buffer should not contain command header.
+        */
+       task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg));
+       task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32));
+
+       /* SMP response comes as UF, so no need to set response IU address. */
+       task_context->response_iu_upper = 0;
+       task_context->response_iu_lower = 0;
+
+       sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+       return SCI_SUCCESS;
+}
+
+/*
+ * isci_smp_request_build() - This function builds the smp request.
+ * @ireq: This parameter points to the isci_request allocated in the
+ *    request construct function.
+ *
+ * SCI_SUCCESS on successfull completion, or specific failure code.
+ */
+static enum sci_status isci_smp_request_build(struct isci_request *ireq)
+{
+       struct sas_task *task = isci_request_access_task(ireq);
+       struct device *dev = &ireq->isci_host->pdev->dev;
+       enum sci_status status = SCI_FAILURE;
+
+       status = sci_io_request_construct_smp(dev, ireq, task);
+       if (status != SCI_SUCCESS)
+               dev_dbg(&ireq->isci_host->pdev->dev,
+                        "%s: failed with status = %d\n",
+                        __func__,
+                        status);
+
+       return status;
+}
+
+/**
+ * isci_io_request_build() - This function builds the io request object.
+ * @ihost: This parameter specifies the ISCI host object
+ * @request: This parameter points to the isci_request object allocated in the
+ *    request construct function.
+ * @sci_device: This parameter is the handle for the sci core's remote device
+ *    object that is the destination for this request.
+ *
+ * SCI_SUCCESS on successfull completion, or specific failure code.
+ */
+static enum sci_status isci_io_request_build(struct isci_host *ihost,
+                                            struct isci_request *request,
+                                            struct isci_remote_device *idev)
+{
+       enum sci_status status = SCI_SUCCESS;
+       struct sas_task *task = isci_request_access_task(request);
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: idev = 0x%p; request = %p, "
+               "num_scatter = %d\n",
+               __func__,
+               idev,
+               request,
+               task->num_scatter);
+
+       /* map the sgl addresses, if present.
+        * libata does the mapping for sata devices
+        * before we get the request.
+        */
+       if (task->num_scatter &&
+           !sas_protocol_ata(task->task_proto) &&
+           !(SAS_PROTOCOL_SMP & task->task_proto)) {
+
+               request->num_sg_entries = dma_map_sg(
+                       &ihost->pdev->dev,
+                       task->scatter,
+                       task->num_scatter,
+                       task->data_dir
+                       );
+
+               if (request->num_sg_entries == 0)
+                       return SCI_FAILURE_INSUFFICIENT_RESOURCES;
+       }
+
+       status = sci_io_request_construct(ihost, idev, request);
+
+       if (status != SCI_SUCCESS) {
+               dev_dbg(&ihost->pdev->dev,
+                        "%s: failed request construct\n",
+                        __func__);
+               return SCI_FAILURE;
+       }
+
+       switch (task->task_proto) {
+       case SAS_PROTOCOL_SMP:
+               status = isci_smp_request_build(request);
+               break;
+       case SAS_PROTOCOL_SSP:
+               status = isci_request_ssp_request_construct(request);
+               break;
+       case SAS_PROTOCOL_SATA:
+       case SAS_PROTOCOL_STP:
+       case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+               status = isci_request_stp_request_construct(request);
+               break;
+       default:
+               dev_dbg(&ihost->pdev->dev,
+                        "%s: unknown protocol\n", __func__);
+               return SCI_FAILURE;
+       }
+
+       return SCI_SUCCESS;
+}
+
+static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
+{
+       struct isci_request *ireq;
+
+       ireq = ihost->reqs[ISCI_TAG_TCI(tag)];
+       ireq->io_tag = tag;
+       ireq->io_request_completion = NULL;
+       ireq->flags = 0;
+       ireq->num_sg_entries = 0;
+       INIT_LIST_HEAD(&ireq->completed_node);
+       INIT_LIST_HEAD(&ireq->dev_node);
+       isci_request_change_state(ireq, allocated);
+
+       return ireq;
+}
+
+static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
+                                                    struct sas_task *task,
+                                                    u16 tag)
+{
+       struct isci_request *ireq;
+
+       ireq = isci_request_from_tag(ihost, tag);
+       ireq->ttype_ptr.io_task_ptr = task;
+       ireq->ttype = io_task;
+       task->lldd_task = ireq;
+
+       return ireq;
+}
+
+struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
+                                              struct isci_tmf *isci_tmf,
+                                              u16 tag)
+{
+       struct isci_request *ireq;
+
+       ireq = isci_request_from_tag(ihost, tag);
+       ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
+       ireq->ttype = tmf_task;
+
+       return ireq;
+}
+
+int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
+                        struct sas_task *task, u16 tag)
+{
+       enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+       struct isci_request *ireq;
+       unsigned long flags;
+       int ret = 0;
+
+       /* do common allocation and init of request object. */
+       ireq = isci_io_request_from_tag(ihost, task, tag);
+
+       status = isci_io_request_build(ihost, ireq, idev);
+       if (status != SCI_SUCCESS) {
+               dev_dbg(&ihost->pdev->dev,
+                        "%s: request_construct failed - status = 0x%x\n",
+                        __func__,
+                        status);
+               return status;
+       }
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) {
+
+               if (isci_task_is_ncq_recovery(task)) {
+
+                       /* The device is in an NCQ recovery state.  Issue the
+                        * request on the task side.  Note that it will
+                        * complete on the I/O request side because the
+                        * request was built that way (ie.
+                        * ireq->is_task_management_request is false).
+                        */
+                       status = sci_controller_start_task(ihost,
+                                                           idev,
+                                                           ireq);
+               } else {
+                       status = SCI_FAILURE;
+               }
+       } else {
+               /* send the request, let the core assign the IO TAG.    */
+               status = sci_controller_start_io(ihost, idev,
+                                                 ireq);
+       }
+
+       if (status != SCI_SUCCESS &&
+           status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+               dev_dbg(&ihost->pdev->dev,
+                        "%s: failed request start (0x%x)\n",
+                        __func__, status);
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
+               return status;
+       }
+
+       /* Either I/O started OK, or the core has signaled that
+        * the device needs a target reset.
+        *
+        * In either case, hold onto the I/O for later.
+        *
+        * Update it's status and add it to the list in the
+        * remote device object.
+        */
+       list_add(&ireq->dev_node, &idev->reqs_in_process);
+
+       if (status == SCI_SUCCESS) {
+               isci_request_change_state(ireq, started);
+       } else {
+               /* The request did not really start in the
+                * hardware, so clear the request handle
+                * here so no terminations will be done.
+                */
+               set_bit(IREQ_TERMINATED, &ireq->flags);
+               isci_request_change_state(ireq, completed);
+       }
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       if (status ==
+           SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+               /* Signal libsas that we need the SCSI error
+                * handler thread to work on this I/O and that
+                * we want a device reset.
+                */
+               spin_lock_irqsave(&task->task_state_lock, flags);
+               task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+               spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+               /* Cause this task to be scheduled in the SCSI error
+                * handler thread.
+                */
+               isci_execpath_callback(ihost, task,
+                                      sas_task_abort);
+
+               /* Change the status, since we are holding
+                * the I/O until it is managed by the SCSI
+                * error handler.
+                */
+               status = SCI_SUCCESS;
+       }
+
+       return ret;
+}
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
new file mode 100644 (file)
index 0000000..7a1d5a9
--- /dev/null
@@ -0,0 +1,448 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ISCI_REQUEST_H_
+#define _ISCI_REQUEST_H_
+
+#include "isci.h"
+#include "host.h"
+#include "scu_task_context.h"
+
+/**
+ * struct isci_request_status - This enum defines the possible states of an I/O
+ *    request.
+ *
+ *
+ */
+enum isci_request_status {
+       unallocated = 0x00,
+       allocated   = 0x01,
+       started     = 0x02,
+       completed   = 0x03,
+       aborting    = 0x04,
+       aborted     = 0x05,
+       terminating = 0x06,
+       dead        = 0x07
+};
+
+enum task_type {
+       io_task  = 0,
+       tmf_task = 1
+};
+
+enum sci_request_protocol {
+       SCIC_NO_PROTOCOL,
+       SCIC_SMP_PROTOCOL,
+       SCIC_SSP_PROTOCOL,
+       SCIC_STP_PROTOCOL
+}; /* XXX remove me, use sas_task.{dev|task_proto} instead */;
+
+/**
+ * isci_stp_request - extra request infrastructure to handle pio/atapi protocol
+ * @pio_len - number of bytes requested at PIO setup
+ * @status - pio setup ending status value to tell us if we need
+ *          to wait for another fis or if the transfer is complete.  Upon
+ *           receipt of a d2h fis this will be the status field of that fis.
+ * @sgl - track pio transfer progress as we iterate through the sgl
+ * @device_cdb_len - atapi device advertises it's transfer constraints at setup
+ */
+struct isci_stp_request {
+       u32 pio_len;
+       u8 status;
+
+       struct isci_stp_pio_sgl {
+               int index;
+               u8 set;
+               u32 offset;
+       } sgl;
+       u32 device_cdb_len;
+};
+
+struct isci_request {
+       enum isci_request_status status;
+       #define IREQ_COMPLETE_IN_TARGET 0
+       #define IREQ_TERMINATED 1
+       #define IREQ_TMF 2
+       #define IREQ_ACTIVE 3
+       unsigned long flags;
+       /* XXX kill ttype and ttype_ptr, allocate full sas_task */
+       enum task_type ttype;
+       union ttype_ptr_union {
+               struct sas_task *io_task_ptr;   /* When ttype==io_task  */
+               struct isci_tmf *tmf_task_ptr;  /* When ttype==tmf_task */
+       } ttype_ptr;
+       struct isci_host *isci_host;
+       /* For use in the requests_to_{complete|abort} lists: */
+       struct list_head completed_node;
+       /* For use in the reqs_in_process list: */
+       struct list_head dev_node;
+       spinlock_t state_lock;
+       dma_addr_t request_daddr;
+       dma_addr_t zero_scatter_daddr;
+       unsigned int num_sg_entries;
+       /* Note: "io_request_completion" is completed in two different ways
+        * depending on whether this is a TMF or regular request.
+        * - TMF requests are completed in the thread that started them;
+        * - regular requests are completed in the request completion callback
+        *   function.
+        * This difference in operation allows the aborter of a TMF request
+        * to be sure that once the TMF request completes, the I/O that the
+        * TMF was aborting is guaranteed to have completed.
+        *
+        * XXX kill io_request_completion
+        */
+       struct completion *io_request_completion;
+       struct sci_base_state_machine sm;
+       struct isci_host *owning_controller;
+       struct isci_remote_device *target_device;
+       u16 io_tag;
+       enum sci_request_protocol protocol;
+       u32 scu_status; /* hardware result */
+       u32 sci_status; /* upper layer disposition */
+       u32 post_context;
+       struct scu_task_context *tc;
+       /* could be larger with sg chaining */
+       #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2)
+       struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
+       /* This field is a pointer to the stored rx frame data.  It is used in
+        * STP internal requests and SMP response frames.  If this field is
+        * non-NULL the saved frame must be released on IO request completion.
+        */
+       u32 saved_rx_frame_index;
+
+       union {
+               struct {
+                       union {
+                               struct ssp_cmd_iu cmd;
+                               struct ssp_task_iu tmf;
+                       };
+                       union {
+                               struct ssp_response_iu rsp;
+                               u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
+                       };
+               } ssp;
+               struct {
+                       struct smp_resp rsp;
+               } smp;
+               struct {
+                       struct isci_stp_request req;
+                       struct host_to_dev_fis cmd;
+                       struct dev_to_host_fis rsp;
+               } stp;
+       };
+};
+
+static inline struct isci_request *to_ireq(struct isci_stp_request *stp_req)
+{
+       struct isci_request *ireq;
+
+       ireq = container_of(stp_req, typeof(*ireq), stp.req);
+       return ireq;
+}
+
+/**
+ * enum sci_base_request_states - This enumeration depicts all the states for
+ *    the common request state machine.
+ *
+ *
+ */
+enum sci_base_request_states {
+       /*
+        * Simply the initial state for the base request state machine.
+        */
+       SCI_REQ_INIT,
+
+       /*
+        * This state indicates that the request has been constructed.
+        * This state is entered from the INITIAL state.
+        */
+       SCI_REQ_CONSTRUCTED,
+
+       /*
+        * This state indicates that the request has been started. This state
+        * is entered from the CONSTRUCTED state.
+        */
+       SCI_REQ_STARTED,
+
+       SCI_REQ_STP_UDMA_WAIT_TC_COMP,
+       SCI_REQ_STP_UDMA_WAIT_D2H,
+
+       SCI_REQ_STP_NON_DATA_WAIT_H2D,
+       SCI_REQ_STP_NON_DATA_WAIT_D2H,
+
+       SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED,
+       SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG,
+       SCI_REQ_STP_SOFT_RESET_WAIT_D2H,
+
+       /*
+        * While in this state the IO request object is waiting for the TC
+        * completion notification for the H2D Register FIS
+        */
+       SCI_REQ_STP_PIO_WAIT_H2D,
+
+       /*
+        * While in this state the IO request object is waiting for either a
+        * PIO Setup FIS or a D2H register FIS.  The type of frame received is
+        * based on the result of the prior frame and line conditions.
+        */
+       SCI_REQ_STP_PIO_WAIT_FRAME,
+
+       /*
+        * While in this state the IO request object is waiting for a DATA
+        * frame from the device.
+        */
+       SCI_REQ_STP_PIO_DATA_IN,
+
+       /*
+        * While in this state the IO request object is waiting to transmit
+        * the next data frame to the device.
+        */
+       SCI_REQ_STP_PIO_DATA_OUT,
+
+       /*
+        * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
+        * task management request is waiting for the transmission of the
+        * initial frame (i.e. command, task, etc.).
+        */
+       SCI_REQ_TASK_WAIT_TC_COMP,
+
+       /*
+        * This sub-state indicates that the started task management request
+        * is waiting for the reception of an unsolicited frame
+        * (i.e. response IU).
+        */
+       SCI_REQ_TASK_WAIT_TC_RESP,
+
+       /*
+        * This sub-state indicates that the started task management request
+        * is waiting for the reception of an unsolicited frame
+        * (i.e. response IU).
+        */
+       SCI_REQ_SMP_WAIT_RESP,
+
+       /*
+        * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP
+        * request is waiting for the transmission of the initial frame
+        * (i.e. command, task, etc.).
+        */
+       SCI_REQ_SMP_WAIT_TC_COMP,
+
+       /*
+        * This state indicates that the request has completed.
+        * This state is entered from the STARTED state. This state is entered
+        * from the ABORTING state.
+        */
+       SCI_REQ_COMPLETED,
+
+       /*
+        * This state indicates that the request is in the process of being
+        * terminated/aborted.
+        * This state is entered from the CONSTRUCTED state.
+        * This state is entered from the STARTED state.
+        */
+       SCI_REQ_ABORTING,
+
+       /*
+        * Simply the final state for the base request state machine.
+        */
+       SCI_REQ_FINAL,
+};
+
+enum sci_status sci_request_start(struct isci_request *ireq);
+enum sci_status sci_io_request_terminate(struct isci_request *ireq);
+enum sci_status
+sci_io_request_event_handler(struct isci_request *ireq,
+                                 u32 event_code);
+enum sci_status
+sci_io_request_frame_handler(struct isci_request *ireq,
+                                 u32 frame_index);
+enum sci_status
+sci_task_request_terminate(struct isci_request *ireq);
+extern enum sci_status
+sci_request_complete(struct isci_request *ireq);
+extern enum sci_status
+sci_io_request_tc_completion(struct isci_request *ireq, u32 code);
+
+/* XXX open code in caller */
+static inline dma_addr_t
+sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr)
+{
+
+       char *requested_addr = (char *)virt_addr;
+       char *base_addr = (char *)ireq;
+
+       BUG_ON(requested_addr < base_addr);
+       BUG_ON((requested_addr - base_addr) >= sizeof(*ireq));
+
+       return ireq->request_daddr + (requested_addr - base_addr);
+}
+
+/**
+ * isci_request_change_state() - This function sets the status of the request
+ *    object.
+ * @request: This parameter points to the isci_request object
+ * @status: This Parameter is the new status of the object
+ *
+ */
+static inline enum isci_request_status
+isci_request_change_state(struct isci_request *isci_request,
+                         enum isci_request_status status)
+{
+       enum isci_request_status old_state;
+       unsigned long flags;
+
+       dev_dbg(&isci_request->isci_host->pdev->dev,
+               "%s: isci_request = %p, state = 0x%x\n",
+               __func__,
+               isci_request,
+               status);
+
+       BUG_ON(isci_request == NULL);
+
+       spin_lock_irqsave(&isci_request->state_lock, flags);
+       old_state = isci_request->status;
+       isci_request->status = status;
+       spin_unlock_irqrestore(&isci_request->state_lock, flags);
+
+       return old_state;
+}
+
+/**
+ * isci_request_change_started_to_newstate() - This function sets the status of
+ *    the request object.
+ * @request: This parameter points to the isci_request object
+ * @status: This Parameter is the new status of the object
+ *
+ * state previous to any change.
+ */
+static inline enum isci_request_status
+isci_request_change_started_to_newstate(struct isci_request *isci_request,
+                                       struct completion *completion_ptr,
+                                       enum isci_request_status newstate)
+{
+       enum isci_request_status old_state;
+       unsigned long flags;
+
+       spin_lock_irqsave(&isci_request->state_lock, flags);
+
+       old_state = isci_request->status;
+
+       if (old_state == started || old_state == aborting) {
+               BUG_ON(isci_request->io_request_completion != NULL);
+
+               isci_request->io_request_completion = completion_ptr;
+               isci_request->status = newstate;
+       }
+
+       spin_unlock_irqrestore(&isci_request->state_lock, flags);
+
+       dev_dbg(&isci_request->isci_host->pdev->dev,
+               "%s: isci_request = %p, old_state = 0x%x\n",
+               __func__,
+               isci_request,
+               old_state);
+
+       return old_state;
+}
+
+/**
+ * isci_request_change_started_to_aborted() - This function sets the status of
+ *    the request object.
+ * @request: This parameter points to the isci_request object
+ * @completion_ptr: This parameter is saved as the kernel completion structure
+ *    signalled when the old request completes.
+ *
+ * state previous to any change.
+ */
+static inline enum isci_request_status
+isci_request_change_started_to_aborted(struct isci_request *isci_request,
+                                      struct completion *completion_ptr)
+{
+       return isci_request_change_started_to_newstate(isci_request,
+                                                      completion_ptr,
+                                                      aborted);
+}
+
+#define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr)
+
+#define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr)
+
+struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
+                                              struct isci_tmf *isci_tmf,
+                                              u16 tag);
+int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
+                        struct sas_task *task, u16 tag);
+void isci_terminate_pending_requests(struct isci_host *ihost,
+                                    struct isci_remote_device *idev);
+enum sci_status
+sci_task_request_construct(struct isci_host *ihost,
+                           struct isci_remote_device *idev,
+                           u16 io_tag,
+                           struct isci_request *ireq);
+enum sci_status
+sci_task_request_construct_ssp(struct isci_request *ireq);
+enum sci_status
+sci_task_request_construct_sata(struct isci_request *ireq);
+void sci_smp_request_copy_response(struct isci_request *ireq);
+
+static inline int isci_task_is_ncq_recovery(struct sas_task *task)
+{
+       return (sas_protocol_ata(task->task_proto) &&
+               task->ata_task.fis.command == ATA_CMD_READ_LOG_EXT &&
+               task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ);
+
+}
+
+#endif /* !defined(_ISCI_REQUEST_H_) */
diff --git a/drivers/scsi/isci/sas.h b/drivers/scsi/isci/sas.h
new file mode 100644 (file)
index 0000000..462b151
--- /dev/null
@@ -0,0 +1,219 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCI_SAS_H_
+#define _SCI_SAS_H_
+
+#include <linux/kernel.h>
+
+/*
+ * SATA FIS Types These constants depict the various SATA FIS types devined in
+ * the serial ATA specification.
+ * XXX: This needs to go into <scsi/sas.h>
+ */
+#define FIS_REGH2D          0x27
+#define FIS_REGD2H          0x34
+#define FIS_SETDEVBITS      0xA1
+#define FIS_DMA_ACTIVATE    0x39
+#define FIS_DMA_SETUP       0x41
+#define FIS_BIST_ACTIVATE   0x58
+#define FIS_PIO_SETUP       0x5F
+#define FIS_DATA            0x46
+
+/**************************************************************************/
+#define SSP_RESP_IU_MAX_SIZE   280
+
+/*
+ * contents of the SSP COMMAND INFORMATION UNIT.
+ * For specific information on each of these individual fields please
+ * reference the SAS specification SSP transport layer section.
+ * XXX: This needs to go into <scsi/sas.h>
+ */
+struct ssp_cmd_iu {
+       u8 LUN[8];
+       u8 add_cdb_len:6;
+       u8 _r_a:2;
+       u8 _r_b;
+       u8 en_fburst:1;
+       u8 task_prio:4;
+       u8 task_attr:3;
+       u8 _r_c;
+
+       u8 cdb[16];
+}  __packed;
+
+/*
+ * contents of the SSP TASK INFORMATION UNIT.
+ * For specific information on each of these individual fields please
+ * reference the SAS specification SSP transport layer section.
+ * XXX: This needs to go into <scsi/sas.h>
+ */
+struct ssp_task_iu {
+       u8 LUN[8];
+       u8 _r_a;
+       u8 task_func;
+       u8 _r_b[4];
+       u16 task_tag;
+       u8 _r_c[12];
+}  __packed;
+
+
+/*
+ * struct smp_req_phy_id - This structure defines the contents of
+ *    an SMP Request that is comprised of the struct smp_request_header and a
+ *    phy identifier.
+ *    Examples: SMP_REQUEST_DISCOVER, SMP_REQUEST_REPORT_PHY_SATA.
+ *
+ * For specific information on each of these individual fields please reference
+ * the SAS specification.
+ */
+struct smp_req_phy_id {
+       u8 _r_a[4];             /* bytes 4-7 */
+
+       u8 ign_zone_grp:1;      /* byte 8 */
+       u8 _r_b:7;
+
+       u8 phy_id;              /* byte 9 */
+       u8 _r_c;                /* byte 10 */
+       u8 _r_d;                /* byte 11 */
+}  __packed;
+
+/*
+ * struct smp_req_config_route_info - This structure defines the
+ *    contents of an SMP Configure Route Information request.
+ *
+ * For specific information on each of these individual fields please reference
+ * the SAS specification.
+ */
+struct smp_req_conf_rtinfo {
+       u16 exp_change_cnt;             /* bytes 4-5 */
+       u8 exp_rt_idx_hi;               /* byte 6 */
+       u8 exp_rt_idx;                  /* byte 7 */
+
+       u8 _r_a;                        /* byte 8 */
+       u8 phy_id;                      /* byte 9 */
+       u16 _r_b;                       /* bytes 10-11 */
+
+       u8 _r_c:7;                      /* byte 12 */
+       u8 dis_rt_entry:1;
+       u8 _r_d[3];                     /* bytes 13-15 */
+
+       u8 rt_sas_addr[8];              /* bytes 16-23 */
+       u8 _r_e[16];                    /* bytes 24-39 */
+}  __packed;
+
+/*
+ * struct smp_req_phycntl - This structure defines the contents of an
+ *    SMP Phy Controller request.
+ *
+ * For specific information on each of these individual fields please reference
+ * the SAS specification.
+ */
+struct smp_req_phycntl {
+       u16 exp_change_cnt;             /* byte 4-5 */
+
+       u8 _r_a[3];                     /* bytes 6-8 */
+
+       u8 phy_id;                      /* byte 9 */
+       u8 phy_op;                      /* byte 10 */
+
+       u8 upd_pathway:1;               /* byte 11 */
+       u8 _r_b:7;
+
+       u8 _r_c[12];                    /* byte 12-23 */
+
+       u8 att_dev_name[8];             /* byte 24-31 */
+
+       u8 _r_d:4;                      /* byte 32 */
+       u8 min_linkrate:4;
+
+       u8 _r_e:4;                      /* byte 33 */
+       u8 max_linkrate:4;
+
+       u8 _r_f[2];                     /* byte 34-35 */
+
+       u8 pathway:4;                   /* byte 36 */
+       u8 _r_g:4;
+
+       u8 _r_h[3];                     /* bytes 37-39 */
+}  __packed;
+
+/*
+ * struct smp_req - This structure simply unionizes the existing request
+ *    structures into a common request type.
+ *
+ * XXX: This data structure may need to go to scsi/sas.h
+ */
+struct smp_req {
+       u8 type;                /* byte 0 */
+       u8 func;                /* byte 1 */
+       u8 alloc_resp_len;      /* byte 2 */
+       u8 req_len;             /* byte 3 */
+       u8 req_data[0];
+}  __packed;
+
+#define SMP_RESP_HDR_SZ        4
+
+/*
+ * struct sci_sas_address - This structure depicts how a SAS address is
+ *    represented by SCI.
+ * XXX convert this to u8 [SAS_ADDR_SIZE] like the rest of libsas
+ *
+ */
+struct sci_sas_address {
+       u32 high;
+       u32 low;
+};
+#endif
diff --git a/drivers/scsi/isci/scu_completion_codes.h b/drivers/scsi/isci/scu_completion_codes.h
new file mode 100644 (file)
index 0000000..c8b329c
--- /dev/null
@@ -0,0 +1,283 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCU_COMPLETION_CODES_HEADER_
+#define _SCU_COMPLETION_CODES_HEADER_
+
+/**
+ * This file contains the constants and macros for the SCU hardware completion
+ *    codes.
+ *
+ *
+ */
+
+#define SCU_COMPLETION_TYPE_SHIFT      28
+#define SCU_COMPLETION_TYPE_MASK       0x70000000
+
+/**
+ * SCU_COMPLETION_TYPE() -
+ *
+ * This macro constructs an SCU completion type
+ */
+#define SCU_COMPLETION_TYPE(type) \
+       ((u32)(type) << SCU_COMPLETION_TYPE_SHIFT)
+
+/**
+ * SCU_COMPLETION_TYPE() -
+ *
+ * These macros contain the SCU completion types SCU_COMPLETION_TYPE
+ */
+#define SCU_COMPLETION_TYPE_TASK       SCU_COMPLETION_TYPE(0)
+#define SCU_COMPLETION_TYPE_SDMA       SCU_COMPLETION_TYPE(1)
+#define SCU_COMPLETION_TYPE_UFI        SCU_COMPLETION_TYPE(2)
+#define SCU_COMPLETION_TYPE_EVENT      SCU_COMPLETION_TYPE(3)
+#define SCU_COMPLETION_TYPE_NOTIFY     SCU_COMPLETION_TYPE(4)
+
+/**
+ *
+ *
+ * These constants provide the shift and mask values for the various parts of
+ * an SCU completion code.
+ */
+#define SCU_COMPLETION_STATUS_MASK       0x0FFC0000
+#define SCU_COMPLETION_TL_STATUS_MASK    0x0FC00000
+#define SCU_COMPLETION_TL_STATUS_SHIFT   22
+#define SCU_COMPLETION_SDMA_STATUS_MASK  0x003C0000
+#define SCU_COMPLETION_PEG_MASK          0x00010000
+#define SCU_COMPLETION_PORT_MASK         0x00007000
+#define SCU_COMPLETION_PE_MASK           SCU_COMPLETION_PORT_MASK
+#define SCU_COMPLETION_PE_SHIFT          12
+#define SCU_COMPLETION_INDEX_MASK        0x00000FFF
+
+/**
+ * SCU_GET_COMPLETION_TYPE() -
+ *
+ * This macro returns the SCU completion type.
+ */
+#define SCU_GET_COMPLETION_TYPE(completion_code) \
+       ((completion_code) & SCU_COMPLETION_TYPE_MASK)
+
+/**
+ * SCU_GET_COMPLETION_STATUS() -
+ *
+ * This macro returns the SCU completion status.
+ */
+#define SCU_GET_COMPLETION_STATUS(completion_code) \
+       ((completion_code) & SCU_COMPLETION_STATUS_MASK)
+
+/**
+ * SCU_GET_COMPLETION_TL_STATUS() -
+ *
+ * This macro returns the transport layer completion status.
+ */
+#define SCU_GET_COMPLETION_TL_STATUS(completion_code) \
+       ((completion_code) & SCU_COMPLETION_TL_STATUS_MASK)
+
+/**
+ * SCU_MAKE_COMPLETION_STATUS() -
+ *
+ * This macro takes a completion code and performs the shift and mask
+ * operations to turn it into a completion code that can be compared to a
+ * SCU_GET_COMPLETION_TL_STATUS.
+ */
+#define SCU_MAKE_COMPLETION_STATUS(completion_code) \
+       ((u32)(completion_code) << SCU_COMPLETION_TL_STATUS_SHIFT)
+
+/**
+ * SCU_NORMALIZE_COMPLETION_STATUS() -
+ *
+ * This macro takes a SCU_GET_COMPLETION_TL_STATUS and normalizes it for a
+ * return code.
+ */
+#define SCU_NORMALIZE_COMPLETION_STATUS(completion_code) \
+       (\
+               ((completion_code) & SCU_COMPLETION_TL_STATUS_MASK) \
+               >> SCU_COMPLETION_TL_STATUS_SHIFT \
+       )
+
+/**
+ * SCU_GET_COMPLETION_SDMA_STATUS() -
+ *
+ * This macro returns the SDMA completion status.
+ */
+#define SCU_GET_COMPLETION_SDMA_STATUS(completion_code)        \
+       ((completion_code) & SCU_COMPLETION_SDMA_STATUS_MASK)
+
+/**
+ * SCU_GET_COMPLETION_PEG() -
+ *
+ * This macro returns the Protocol Engine Group from the completion code.
+ */
+#define SCU_GET_COMPLETION_PEG(completion_code)        \
+       ((completion_code) & SCU_COMPLETION_PEG_MASK)
+
+/**
+ * SCU_GET_COMPLETION_PORT() -
+ *
+ * This macro reuturns the logical port index from the completion code.
+ */
+#define SCU_GET_COMPLETION_PORT(completion_code) \
+       ((completion_code) & SCU_COMPLETION_PORT_MASK)
+
+/**
+ * SCU_GET_PROTOCOL_ENGINE_INDEX() -
+ *
+ * This macro returns the PE index from the completion code.
+ */
+#define SCU_GET_PROTOCOL_ENGINE_INDEX(completion_code) \
+       (((completion_code) & SCU_COMPLETION_PE_MASK) >> SCU_COMPLETION_PE_SHIFT)
+
+/**
+ * SCU_GET_COMPLETION_INDEX() -
+ *
+ * This macro returns the index of the completion which is either a TCi or an
+ * RNi depending on the completion type.
+ */
+#define SCU_GET_COMPLETION_INDEX(completion_code) \
+       ((completion_code) & SCU_COMPLETION_INDEX_MASK)
+
+#define SCU_UNSOLICITED_FRAME_MASK     0x0FFF0000
+#define SCU_UNSOLICITED_FRAME_SHIFT    16
+
+/**
+ * SCU_GET_FRAME_INDEX() -
+ *
+ * This macro returns a normalized frame index from an unsolicited frame
+ * completion.
+ */
+#define SCU_GET_FRAME_INDEX(completion_code) \
+       (\
+               ((completion_code) & SCU_UNSOLICITED_FRAME_MASK) \
+               >> SCU_UNSOLICITED_FRAME_SHIFT \
+       )
+
+#define SCU_UNSOLICITED_FRAME_ERROR_MASK  0x00008000
+
+/**
+ * SCU_GET_FRAME_ERROR() -
+ *
+ * This macro returns a zero (0) value if there is no frame error otherwise it
+ * returns non-zero (!0).
+ */
+#define SCU_GET_FRAME_ERROR(completion_code) \
+       ((completion_code) & SCU_UNSOLICITED_FRAME_ERROR_MASK)
+
+/**
+ *
+ *
+ * These constants represent normalized completion codes which must be shifted
+ * 18 bits to match it with the hardware completion code. In a 16-bit compiler,
+ * immediate constants are 16-bit values (the size of an int). If we shift
+ * those by 18 bits, we completely lose the value. To ensure the value is a
+ * 32-bit value like we want, each immediate value must be cast to a u32.
+ */
+#define SCU_TASK_DONE_GOOD                                  ((u32)0x00)
+#define SCU_TASK_DONE_CRC_ERR                               ((u32)0x14)
+#define SCU_TASK_DONE_CHECK_RESPONSE                        ((u32)0x14)
+#define SCU_TASK_DONE_GEN_RESPONSE                          ((u32)0x15)
+#define SCU_TASK_DONE_NAK_CMD_ERR                           ((u32)0x16)
+#define SCU_TASK_DONE_CMD_LL_R_ERR                          ((u32)0x16)
+#define SCU_TASK_DONE_LL_R_ERR                              ((u32)0x17)
+#define SCU_TASK_DONE_ACK_NAK_TO                            ((u32)0x17)
+#define SCU_TASK_DONE_LL_PERR                               ((u32)0x18)
+#define SCU_TASK_DONE_LL_SY_TERM                            ((u32)0x19)
+#define SCU_TASK_DONE_NAK_ERR                               ((u32)0x19)
+#define SCU_TASK_DONE_LL_LF_TERM                            ((u32)0x1A)
+#define SCU_TASK_DONE_DATA_LEN_ERR                          ((u32)0x1A)
+#define SCU_TASK_DONE_LL_CL_TERM                            ((u32)0x1B)
+#define SCU_TASK_DONE_LL_ABORT_ERR                          ((u32)0x1B)
+#define SCU_TASK_DONE_SEQ_INV_TYPE                          ((u32)0x1C)
+#define SCU_TASK_DONE_UNEXP_XR                              ((u32)0x1C)
+#define SCU_TASK_DONE_INV_FIS_TYPE                          ((u32)0x1D)
+#define SCU_TASK_DONE_XR_IU_LEN_ERR                         ((u32)0x1D)
+#define SCU_TASK_DONE_INV_FIS_LEN                           ((u32)0x1E)
+#define SCU_TASK_DONE_XR_WD_LEN                             ((u32)0x1E)
+#define SCU_TASK_DONE_SDMA_ERR                              ((u32)0x1F)
+#define SCU_TASK_DONE_OFFSET_ERR                            ((u32)0x20)
+#define SCU_TASK_DONE_MAX_PLD_ERR                           ((u32)0x21)
+#define SCU_TASK_DONE_EXCESS_DATA                           ((u32)0x22)
+#define SCU_TASK_DONE_LF_ERR                                ((u32)0x23)
+#define SCU_TASK_DONE_UNEXP_FIS                             ((u32)0x24)
+#define SCU_TASK_DONE_UNEXP_RESP                            ((u32)0x24)
+#define SCU_TASK_DONE_EARLY_RESP                            ((u32)0x25)
+#define SCU_TASK_DONE_SMP_RESP_TO_ERR                       ((u32)0x26)
+#define SCU_TASK_DONE_DMASETUP_DIRERR                       ((u32)0x27)
+#define SCU_TASK_DONE_SMP_UFI_ERR                           ((u32)0x27)
+#define SCU_TASK_DONE_XFERCNT_ERR                           ((u32)0x28)
+#define SCU_TASK_DONE_SMP_FRM_TYPE_ERR                      ((u32)0x28)
+#define SCU_TASK_DONE_SMP_LL_RX_ERR                         ((u32)0x29)
+#define SCU_TASK_DONE_RESP_LEN_ERR                          ((u32)0x2A)
+#define SCU_TASK_DONE_UNEXP_DATA                            ((u32)0x2B)
+#define SCU_TASK_DONE_OPEN_FAIL                             ((u32)0x2C)
+#define SCU_TASK_DONE_UNEXP_SDBFIS                          ((u32)0x2D)
+#define SCU_TASK_DONE_REG_ERR                               ((u32)0x2E)
+#define SCU_TASK_DONE_SDB_ERR                               ((u32)0x2F)
+#define SCU_TASK_DONE_TASK_ABORT                            ((u32)0x30)
+#define SCU_TASK_DONE_CMD_SDMA_ERR                          ((U32)0x32)
+#define SCU_TASK_DONE_CMD_LL_ABORT_ERR                      ((U32)0x33)
+#define SCU_TASK_OPEN_REJECT_WRONG_DESTINATION              ((u32)0x34)
+#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1             ((u32)0x35)
+#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2             ((u32)0x36)
+#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3             ((u32)0x37)
+#define SCU_TASK_OPEN_REJECT_BAD_DESTINATION                ((u32)0x38)
+#define SCU_TASK_OPEN_REJECT_ZONE_VIOLATION                 ((u32)0x39)
+#define SCU_TASK_DONE_VIIT_ENTRY_NV                         ((u32)0x3A)
+#define SCU_TASK_DONE_IIT_ENTRY_NV                          ((u32)0x3B)
+#define SCU_TASK_DONE_RNCNV_OUTBOUND                        ((u32)0x3C)
+#define SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY             ((u32)0x3D)
+#define SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED         ((u32)0x3E)
+#define SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED  ((u32)0x3F)
+
+#endif /* _SCU_COMPLETION_CODES_HEADER_ */
diff --git a/drivers/scsi/isci/scu_event_codes.h b/drivers/scsi/isci/scu_event_codes.h
new file mode 100644 (file)
index 0000000..36a945a
--- /dev/null
@@ -0,0 +1,336 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SCU_EVENT_CODES_HEADER__
+#define __SCU_EVENT_CODES_HEADER__
+
+/**
+ * This file contains the constants and macros for the SCU event codes.
+ *
+ *
+ */
+
+#define SCU_EVENT_TYPE_CODE_SHIFT      24
+#define SCU_EVENT_TYPE_CODE_MASK       0x0F000000
+
+#define SCU_EVENT_SPECIFIC_CODE_SHIFT  18
+#define SCU_EVENT_SPECIFIC_CODE_MASK   0x00FC0000
+
+#define SCU_EVENT_CODE_MASK \
+       (SCU_EVENT_TYPE_CODE_MASK | SCU_EVENT_SPECIFIC_CODE_MASK)
+
+/**
+ * SCU_EVENT_TYPE() -
+ *
+ * This macro constructs an SCU event type from the type value.
+ */
+#define SCU_EVENT_TYPE(type) \
+       ((u32)(type) << SCU_EVENT_TYPE_CODE_SHIFT)
+
+/**
+ * SCU_EVENT_SPECIFIC() -
+ *
+ * This macro constructs an SCU event specifier from the code value.
+ */
+#define SCU_EVENT_SPECIFIC(code) \
+       ((u32)(code) << SCU_EVENT_SPECIFIC_CODE_SHIFT)
+
+/**
+ * SCU_EVENT_MESSAGE() -
+ *
+ * This macro constructs a combines an SCU event type and SCU event specifier
+ * from the type and code values.
+ */
+#define SCU_EVENT_MESSAGE(type, code) \
+       ((type) | SCU_EVENT_SPECIFIC(code))
+
+/**
+ * SCU_EVENT_TYPE() -
+ *
+ * SCU_EVENT_TYPES
+ */
+#define SCU_EVENT_TYPE_SMU_COMMAND_ERROR  SCU_EVENT_TYPE(0x08)
+#define SCU_EVENT_TYPE_SMU_PCQ_ERROR      SCU_EVENT_TYPE(0x09)
+#define SCU_EVENT_TYPE_SMU_ERROR          SCU_EVENT_TYPE(0x00)
+#define SCU_EVENT_TYPE_TRANSPORT_ERROR    SCU_EVENT_TYPE(0x01)
+#define SCU_EVENT_TYPE_BROADCAST_CHANGE   SCU_EVENT_TYPE(0x02)
+#define SCU_EVENT_TYPE_OSSP_EVENT         SCU_EVENT_TYPE(0x03)
+#define SCU_EVENT_TYPE_FATAL_MEMORY_ERROR SCU_EVENT_TYPE(0x0F)
+#define SCU_EVENT_TYPE_RNC_SUSPEND_TX     SCU_EVENT_TYPE(0x04)
+#define SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX  SCU_EVENT_TYPE(0x05)
+#define SCU_EVENT_TYPE_RNC_OPS_MISC       SCU_EVENT_TYPE(0x06)
+#define SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT SCU_EVENT_TYPE(0x07)
+#define SCU_EVENT_TYPE_ERR_CNT_EVENT      SCU_EVENT_TYPE(0x0A)
+
+/**
+ *
+ *
+ * SCU_EVENT_SPECIFIERS
+ */
+#define SCU_EVENT_SPECIFIER_DRIVER_SUSPEND 0x20
+#define SCU_EVENT_SPECIFIER_RNC_RELEASE    0x00
+
+/**
+ *
+ *
+ * SMU_COMMAND_EVENTS
+ */
+#define SCU_EVENT_INVALID_CONTEXT_COMMAND \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_COMMAND_ERROR, 0x00)
+
+/**
+ *
+ *
+ * SMU_PCQ_EVENTS
+ */
+#define SCU_EVENT_UNCORRECTABLE_PCQ_ERROR \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_PCQ_ERROR, 0x00)
+
+/**
+ *
+ *
+ * SMU_EVENTS
+ */
+#define SCU_EVENT_UNCORRECTABLE_REGISTER_WRITE \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x02)
+#define SCU_EVENT_UNCORRECTABLE_REGISTER_READ \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x03)
+#define SCU_EVENT_PCIE_INTERFACE_ERROR \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x04)
+#define SCU_EVENT_FUNCTION_LEVEL_RESET \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x05)
+
+/**
+ *
+ *
+ * TRANSPORT_LEVEL_ERRORS
+ */
+#define SCU_EVENT_ACK_NAK_TIMEOUT_ERROR        \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_TRANSPORT_ERROR, 0x00)
+
+/**
+ *
+ *
+ * BROADCAST_CHANGE_EVENTS
+ */
+#define SCU_EVENT_BROADCAST_CHANGE \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x01)
+#define SCU_EVENT_BROADCAST_RESERVED0 \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x02)
+#define SCU_EVENT_BROADCAST_RESERVED1 \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x03)
+#define SCU_EVENT_BROADCAST_SES        \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x04)
+#define SCU_EVENT_BROADCAST_EXPANDER \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x05)
+#define SCU_EVENT_BROADCAST_AEN        \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x06)
+#define SCU_EVENT_BROADCAST_RESERVED3 \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x07)
+#define SCU_EVENT_BROADCAST_RESERVED4 \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x08)
+#define SCU_EVENT_PE_SUSPENDED \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x09)
+
+/**
+ *
+ *
+ * OSSP_EVENTS
+ */
+#define SCU_EVENT_PORT_SELECTOR_DETECTED \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x10)
+#define SCU_EVENT_SENT_PORT_SELECTION \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x11)
+#define SCU_EVENT_HARD_RESET_TRANSMITTED \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x12)
+#define SCU_EVENT_HARD_RESET_RECEIVED \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x13)
+#define SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x15)
+#define SCU_EVENT_LINK_FAILURE \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x16)
+#define SCU_EVENT_SATA_SPINUP_HOLD \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x17)
+#define SCU_EVENT_SAS_15_SSC \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x18)
+#define SCU_EVENT_SAS_15 \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x19)
+#define SCU_EVENT_SAS_30_SSC \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1A)
+#define SCU_EVENT_SAS_30 \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1B)
+#define SCU_EVENT_SAS_60_SSC \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1C)
+#define SCU_EVENT_SAS_60 \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1D)
+#define SCU_EVENT_SATA_15_SSC \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1E)
+#define SCU_EVENT_SATA_15 \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1F)
+#define SCU_EVENT_SATA_30_SSC \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x20)
+#define SCU_EVENT_SATA_30 \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x21)
+#define SCU_EVENT_SATA_60_SSC \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x22)
+#define SCU_EVENT_SATA_60 \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x23)
+#define SCU_EVENT_SAS_PHY_DETECTED \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x24)
+#define SCU_EVENT_SATA_PHY_DETECTED \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x25)
+
+/**
+ *
+ *
+ * FATAL_INTERNAL_MEMORY_ERROR_EVENTS
+ */
+#define SCU_EVENT_TSC_RNSC_UNCORRECTABLE_ERROR \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR,  0x00)
+#define SCU_EVENT_TC_RNC_UNCORRECTABLE_ERROR \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR,  0x01)
+#define SCU_EVENT_ZPT_UNCORRECTABLE_ERROR \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR,  0x02)
+
+/**
+ *
+ *
+ * REMOTE_NODE_SUSPEND_EVENTS
+ */
+#define SCU_EVENT_TL_RNC_SUSPEND_TX \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x00)
+#define SCU_EVENT_TL_RNC_SUSPEND_TX_RX \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x00)
+#define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x20)
+#define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX_RX        \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x20)
+
+/**
+ *
+ *
+ * REMOTE_NODE_MISC_EVENTS
+ */
+#define SCU_EVENT_POST_RCN_RELEASE \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, SCU_EVENT_SPECIFIER_RNC_RELEASE)
+#define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_ENABLE \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x01)
+#define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_DISABLE \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x02)
+#define SCU_EVENT_POST_RNC_COMPLETE \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x03)
+#define SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x04)
+
+/**
+ *
+ *
+ * ERROR_COUNT_EVENT
+ */
+#define SCU_EVENT_RX_CREDIT_BLOCKED_RECEIVED \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x00)
+#define SCU_EVENT_TX_DONE_CREDIT_TIMEOUT \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x01)
+#define SCU_EVENT_RX_DONE_CREDIT_TIMEOUT \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x02)
+
+/**
+ * scu_get_event_type() -
+ *
+ * This macro returns the SCU event type from the event code.
+ */
+#define scu_get_event_type(event_code) \
+       ((event_code) & SCU_EVENT_TYPE_CODE_MASK)
+
+/**
+ * scu_get_event_specifier() -
+ *
+ * This macro returns the SCU event specifier from the event code.
+ */
+#define scu_get_event_specifier(event_code) \
+       ((event_code) & SCU_EVENT_SPECIFIC_CODE_MASK)
+
+/**
+ * scu_get_event_code() -
+ *
+ * This macro returns the combined SCU event type and SCU event specifier from
+ * the event code.
+ */
+#define scu_get_event_code(event_code) \
+       ((event_code) & SCU_EVENT_CODE_MASK)
+
+
+/**
+ *
+ *
+ * PTS_SCHEDULE_EVENT
+ */
+#define SCU_EVENT_SMP_RESPONSE_NO_PE \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x00)
+#define SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE \
+       scu_get_event_specifier(SCU_EVENT_SMP_RESPONSE_NO_PE)
+
+#define SCU_EVENT_TASK_TIMEOUT \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x01)
+#define SCU_EVENT_SPECIFIC_TASK_TIMEOUT        \
+       scu_get_event_specifier(SCU_EVENT_TASK_TIMEOUT)
+
+#define SCU_EVENT_IT_NEXUS_TIMEOUT \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x02)
+#define SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT \
+       scu_get_event_specifier(SCU_EVENT_IT_NEXUS_TIMEOUT)
+
+
+#endif /* __SCU_EVENT_CODES_HEADER__ */
diff --git a/drivers/scsi/isci/scu_remote_node_context.h b/drivers/scsi/isci/scu_remote_node_context.h
new file mode 100644 (file)
index 0000000..33745ad
--- /dev/null
@@ -0,0 +1,229 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SCU_REMOTE_NODE_CONTEXT_HEADER__
+#define __SCU_REMOTE_NODE_CONTEXT_HEADER__
+
+/**
+ * This file contains the structures and constatns used by the SCU hardware to
+ *    describe a remote node context.
+ *
+ *
+ */
+
+/**
+ * struct ssp_remote_node_context - This structure contains the SCU hardware
+ *    definition for an SSP remote node.
+ *
+ *
+ */
+struct ssp_remote_node_context {
+       /* WORD 0 */
+
+       /**
+        * This field is the remote node index assigned for this remote node. All
+        * remote nodes must have a unique remote node index. The value of the remote
+        * node index can not exceed the maximum number of remote nodes reported in
+        * the SCU device context capacity register.
+        */
+       u32 remote_node_index:12;
+       u32 reserved0_1:4;
+
+       /**
+        * This field tells the SCU hardware how many simultaneous connections that
+        * this remote node will support.
+        */
+       u32 remote_node_port_width:4;
+
+       /**
+        * This field tells the SCU hardware which logical port to associate with this
+        * remote node.
+        */
+       u32 logical_port_index:3;
+       u32 reserved0_2:5;
+
+       /**
+        * This field will enable the I_T nexus loss timer for this remote node.
+        */
+       u32 nexus_loss_timer_enable:1;
+
+       /**
+        * This field is the for driver debug only and is not used.
+        */
+       u32 check_bit:1;
+
+       /**
+        * This field must be set to true when the hardware DMAs the remote node
+        * context to the hardware SRAM.  When the remote node is being invalidated
+        * this field must be set to false.
+        */
+       u32 is_valid:1;
+
+       /**
+        * This field must be set to true.
+        */
+       u32 is_remote_node_context:1;
+
+       /* WORD 1 - 2 */
+
+       /**
+        * This is the low word of the remote device SAS Address
+        */
+       u32 remote_sas_address_lo;
+
+       /**
+        * This field is the high word of the remote device SAS Address
+        */
+       u32 remote_sas_address_hi;
+
+       /* WORD 3 */
+       /**
+        * This field reprensets the function number assigned to this remote device.
+        * This value must match the virtual function number that is being used to
+        * communicate to the device.
+        */
+       u32 function_number:8;
+       u32 reserved3_1:8;
+
+       /**
+        * This field provides the driver a way to cheat on the arbitration wait time
+        * for this remote node.
+        */
+       u32 arbitration_wait_time:16;
+
+       /* WORD 4 */
+       /**
+        * This field tells the SCU hardware how long this device may occupy the
+        * connection before it must be closed.
+        */
+       u32 connection_occupancy_timeout:16;
+
+       /**
+        * This field tells the SCU hardware how long to maintain a connection when
+        * there are no frames being transmitted on the link.
+        */
+       u32 connection_inactivity_timeout:16;
+
+       /* WORD  5 */
+       /**
+        * This field allows the driver to cheat on the arbitration wait time for this
+        * remote node.
+        */
+       u32 initial_arbitration_wait_time:16;
+
+       /**
+        * This field is tells the hardware what to program for the connection rate in
+        * the open address frame.  See the SAS spec for valid values.
+        */
+       u32 oaf_connection_rate:4;
+
+       /**
+        * This field tells the SCU hardware what to program for the features in the
+        * open address frame.  See the SAS spec for valid values.
+        */
+       u32 oaf_features:4;
+
+       /**
+        * This field tells the SCU hardware what to use for the source zone group in
+        * the open address frame.  See the SAS spec for more details on zoning.
+        */
+       u32 oaf_source_zone_group:8;
+
+       /* WORD 6 */
+       /**
+        * This field tells the SCU hardware what to use as the more capibilities in
+        * the open address frame. See the SAS Spec for details.
+        */
+       u32 oaf_more_compatibility_features;
+
+       /* WORD 7 */
+       u32 reserved7;
+
+};
+
+/**
+ * struct stp_remote_node_context - This structure contains the SCU hardware
+ *    definition for a STP remote node.
+ *
+ * STP Targets are not yet supported so this definition is a placeholder until
+ * we do support them.
+ */
+struct stp_remote_node_context {
+       /**
+        * Placeholder data for the STP remote node.
+        */
+       u32 data[8];
+
+};
+
+/**
+ * This union combines the SAS and SATA remote node definitions.
+ *
+ * union scu_remote_node_context
+ */
+union scu_remote_node_context {
+       /**
+        * SSP Remote Node
+        */
+       struct ssp_remote_node_context ssp;
+
+       /**
+        * STP Remote Node
+        */
+       struct stp_remote_node_context stp;
+
+};
+
+#endif /* __SCU_REMOTE_NODE_CONTEXT_HEADER__ */
diff --git a/drivers/scsi/isci/scu_task_context.h b/drivers/scsi/isci/scu_task_context.h
new file mode 100644 (file)
index 0000000..7df87d9
--- /dev/null
@@ -0,0 +1,942 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCU_TASK_CONTEXT_H_
+#define _SCU_TASK_CONTEXT_H_
+
+/**
+ * This file contains the structures and constants for the SCU hardware task
+ *    context.
+ *
+ *
+ */
+
+
+/**
+ * enum scu_ssp_task_type - This enumberation defines the various SSP task
+ *    types the SCU hardware will accept. The definition for the various task
+ *    types the SCU hardware will accept can be found in the DS specification.
+ *
+ *
+ */
+typedef enum {
+       SCU_TASK_TYPE_IOREAD,           /* /< IO READ direction or no direction */
+       SCU_TASK_TYPE_IOWRITE,          /* /< IO Write direction */
+       SCU_TASK_TYPE_SMP_REQUEST,      /* /< SMP Request type */
+       SCU_TASK_TYPE_RESPONSE,         /* /< Driver generated response frame (targt mode) */
+       SCU_TASK_TYPE_RAW_FRAME,        /* /< Raw frame request type */
+       SCU_TASK_TYPE_PRIMITIVE         /* /< Request for a primitive to be transmitted */
+} scu_ssp_task_type;
+
+/**
+ * enum scu_sata_task_type - This enumeration defines the various SATA task
+ *    types the SCU hardware will accept. The definition for the various task
+ *    types the SCU hardware will accept can be found in the DS specification.
+ *
+ *
+ */
+typedef enum {
+       SCU_TASK_TYPE_DMA_IN,           /* /< Read request */
+       SCU_TASK_TYPE_FPDMAQ_READ,      /* /< NCQ read request */
+       SCU_TASK_TYPE_PACKET_DMA_IN,    /* /< Packet read request */
+       SCU_TASK_TYPE_SATA_RAW_FRAME,   /* /< Raw frame request */
+       RESERVED_4,
+       RESERVED_5,
+       RESERVED_6,
+       RESERVED_7,
+       SCU_TASK_TYPE_DMA_OUT,          /* /< Write request */
+       SCU_TASK_TYPE_FPDMAQ_WRITE,     /* /< NCQ write Request */
+       SCU_TASK_TYPE_PACKET_DMA_OUT    /* /< Packet write request */
+} scu_sata_task_type;
+
+
+/**
+ *
+ *
+ * SCU_CONTEXT_TYPE
+ */
+#define SCU_TASK_CONTEXT_TYPE  0
+#define SCU_RNC_CONTEXT_TYPE   1
+
+/**
+ *
+ *
+ * SCU_TASK_CONTEXT_VALIDITY
+ */
+#define SCU_TASK_CONTEXT_INVALID          0
+#define SCU_TASK_CONTEXT_VALID            1
+
+/**
+ *
+ *
+ * SCU_COMMAND_CODE
+ */
+#define SCU_COMMAND_CODE_INITIATOR_NEW_TASK   0
+#define SCU_COMMAND_CODE_ACTIVE_TASK          1
+#define SCU_COMMAND_CODE_PRIMITIVE_SEQ_TASK   2
+#define SCU_COMMAND_CODE_TARGET_RAW_FRAMES    3
+
+/**
+ *
+ *
+ * SCU_TASK_PRIORITY
+ */
+/**
+ *
+ *
+ * This priority is used when there is no priority request for this request.
+ */
+#define SCU_TASK_PRIORITY_NORMAL          0
+
+/**
+ *
+ *
+ * This priority indicates that the task should be scheduled to the head of the
+ * queue.  The task will NOT be executed if the TX is suspended for the remote
+ * node.
+ */
+#define SCU_TASK_PRIORITY_HEAD_OF_Q       1
+
+/**
+ *
+ *
+ * This priority indicates that the task will be executed before all
+ * SCU_TASK_PRIORITY_NORMAL and SCU_TASK_PRIORITY_HEAD_OF_Q tasks. The task
+ * WILL be executed if the TX is suspended for the remote node.
+ */
+#define SCU_TASK_PRIORITY_HIGH            2
+
+/**
+ *
+ *
+ * This task priority is reserved and should not be used.
+ */
+#define SCU_TASK_PRIORITY_RESERVED        3
+
+#define SCU_TASK_INITIATOR_MODE           1
+#define SCU_TASK_TARGET_MODE              0
+
+#define SCU_TASK_REGULAR                  0
+#define SCU_TASK_ABORTED                  1
+
+/* direction bit defintion */
+/**
+ *
+ *
+ * SATA_DIRECTION
+ */
+#define SCU_SATA_WRITE_DATA_DIRECTION     0
+#define SCU_SATA_READ_DATA_DIRECTION      1
+
+/**
+ *
+ *
+ * SCU_COMMAND_CONTEXT_MACROS These macros provide the mask and shift
+ * operations to construct the various SCU commands
+ */
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_SHIFT           21
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK            0x00E00000
+#define scu_get_command_request_type(x)        \
+       ((x) & SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK)
+
+#define SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_SHIFT        18
+#define SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK         0x001C0000
+#define scu_get_command_request_subtype(x) \
+       ((x) & SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK)
+
+#define SCU_CONTEXT_COMMAND_REQUEST_FULLTYPE_MASK       \
+       (\
+               SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK             \
+               | SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK          \
+       )
+#define scu_get_command_request_full_type(x) \
+       ((x) & SCU_CONTEXT_COMMAND_REQUEST_FULLTYPE_MASK)
+
+#define SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT  16
+#define SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_MASK   0x00010000
+#define scu_get_command_protocl_engine_group(x)        \
+       ((x) & SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_MASK)
+
+#define SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT           12
+#define SCU_CONTEXT_COMMAND_LOGICAL_PORT_MASK            0x00007000
+#define scu_get_command_reqeust_logical_port(x)        \
+       ((x) & SCU_CONTEXT_COMMAND_LOGICAL_PORT_MASK)
+
+
+#define MAKE_SCU_CONTEXT_COMMAND_TYPE(type) \
+       ((u32)(type) << SCU_CONTEXT_COMMAND_REQUEST_TYPE_SHIFT)
+
+/**
+ * MAKE_SCU_CONTEXT_COMMAND_TYPE() -
+ *
+ * SCU_COMMAND_TYPES These constants provide the grouping of the different SCU
+ * command types.
+ */
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC    MAKE_SCU_CONTEXT_COMMAND_TYPE(0)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC    MAKE_SCU_CONTEXT_COMMAND_TYPE(1)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC   MAKE_SCU_CONTEXT_COMMAND_TYPE(2)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC   MAKE_SCU_CONTEXT_COMMAND_TYPE(3)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC  MAKE_SCU_CONTEXT_COMMAND_TYPE(6)
+
+#define MAKE_SCU_CONTEXT_COMMAND_REQUEST(type, command)        \
+       ((type) | ((command) << SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_SHIFT))
+
+/**
+ *
+ *
+ * SCU_REQUEST_TYPES These constants are the various request types that can be
+ * posted to the SCU hardware.
+ */
+#define SCU_CONTEXT_COMMAND_REQUST_POST_TC \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC, 0))
+
+#define SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC, 1))
+
+#define SCU_CONTEXT_COMMAND_REQUST_DUMP_TC \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC, 0))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_32        \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 0))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_96        \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 1))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE        \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 2))
+
+#define SCU_CONTEXT_COMMAND_DUMP_RNC_32        \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC, 0))
+
+#define SCU_CONTEXT_COMMAND_DUMP_RNC_96        \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC, 1))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX        \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 0))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 1))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_RESUME \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 2))
+
+#define SCU_CONTEXT_IT_NEXUS_LOSS_TIMER_ENABLE \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 3))
+
+#define SCU_CONTEXT_IT_NEXUS_LOSS_TIMER_DISABLE        \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 4))
+
+/**
+ *
+ *
+ * SCU_TASK_CONTEXT_PROTOCOL SCU Task context protocol types this is uesd to
+ * program the SCU Task context protocol field in word 0x00.
+ */
+#define SCU_TASK_CONTEXT_PROTOCOL_SMP    0x00
+#define SCU_TASK_CONTEXT_PROTOCOL_SSP    0x01
+#define SCU_TASK_CONTEXT_PROTOCOL_STP    0x02
+#define SCU_TASK_CONTEXT_PROTOCOL_NONE   0x07
+
+/**
+ * struct ssp_task_context - This is the SCU hardware definition for an SSP
+ *    request.
+ *
+ *
+ */
+struct ssp_task_context {
+       /* OFFSET 0x18 */
+       u32 reserved00:24;
+       u32 frame_type:8;
+
+       /* OFFSET 0x1C */
+       u32 reserved01;
+
+       /* OFFSET 0x20 */
+       u32 fill_bytes:2;
+       u32 reserved02:6;
+       u32 changing_data_pointer:1;
+       u32 retransmit:1;
+       u32 retry_data_frame:1;
+       u32 tlr_control:2;
+       u32 reserved03:19;
+
+       /* OFFSET 0x24 */
+       u32 uiRsvd4;
+
+       /* OFFSET 0x28 */
+       u32 target_port_transfer_tag:16;
+       u32 tag:16;
+
+       /* OFFSET 0x2C */
+       u32 data_offset;
+};
+
+/**
+ * struct stp_task_context - This is the SCU hardware definition for an STP
+ *    request.
+ *
+ *
+ */
+struct stp_task_context {
+       /* OFFSET 0x18 */
+       u32 fis_type:8;
+       u32 pm_port:4;
+       u32 reserved0:3;
+       u32 control:1;
+       u32 command:8;
+       u32 features:8;
+
+       /* OFFSET 0x1C */
+       u32 reserved1;
+
+       /* OFFSET 0x20 */
+       u32 reserved2;
+
+       /* OFFSET 0x24 */
+       u32 reserved3;
+
+       /* OFFSET 0x28 */
+       u32 ncq_tag:5;
+       u32 reserved4:27;
+
+       /* OFFSET 0x2C */
+       u32 data_offset; /* TODO: What is this used for? */
+};
+
+/**
+ * struct smp_task_context - This is the SCU hardware definition for an SMP
+ *    request.
+ *
+ *
+ */
+struct smp_task_context {
+       /* OFFSET 0x18 */
+       u32 response_length:8;
+       u32 function_result:8;
+       u32 function:8;
+       u32 frame_type:8;
+
+       /* OFFSET 0x1C */
+       u32 smp_response_ufi:12;
+       u32 reserved1:20;
+
+       /* OFFSET 0x20 */
+       u32 reserved2;
+
+       /* OFFSET 0x24 */
+       u32 reserved3;
+
+       /* OFFSET 0x28 */
+       u32 reserved4;
+
+       /* OFFSET 0x2C */
+       u32 reserved5;
+};
+
+/**
+ * struct primitive_task_context - This is the SCU hardware definition used
+ *    when the driver wants to send a primitive on the link.
+ *
+ *
+ */
+struct primitive_task_context {
+       /* OFFSET 0x18 */
+       /**
+        * This field is the control word and it must be 0.
+        */
+       u32 control; /* /< must be set to 0 */
+
+       /* OFFSET 0x1C */
+       /**
+        * This field specifies the primitive that is to be transmitted.
+        */
+       u32 sequence;
+
+       /* OFFSET 0x20 */
+       u32 reserved0;
+
+       /* OFFSET 0x24 */
+       u32 reserved1;
+
+       /* OFFSET 0x28 */
+       u32 reserved2;
+
+       /* OFFSET 0x2C */
+       u32 reserved3;
+};
+
+/**
+ * The union of the protocols that can be selected in the SCU task context
+ *    field.
+ *
+ * protocol_context
+ */
+union protocol_context {
+       struct ssp_task_context ssp;
+       struct stp_task_context stp;
+       struct smp_task_context smp;
+       struct primitive_task_context primitive;
+       u32 words[6];
+};
+
+/**
+ * struct scu_sgl_element - This structure represents a single SCU defined SGL
+ *    element. SCU SGLs contain a 64 bit address with the maximum data transfer
+ *    being 24 bits in size.  The SGL can not cross a 4GB boundary.
+ *
+ * struct scu_sgl_element
+ */
+struct scu_sgl_element {
+       /**
+        * This field is the upper 32 bits of the 64 bit physical address.
+        */
+       u32 address_upper;
+
+       /**
+        * This field is the lower 32 bits of the 64 bit physical address.
+        */
+       u32 address_lower;
+
+       /**
+        * This field is the number of bytes to transfer.
+        */
+       u32 length:24;
+
+       /**
+        * This field is the address modifier to be used when a virtual function is
+        * requesting a data transfer.
+        */
+       u32 address_modifier:8;
+
+};
+
+#define SCU_SGL_ELEMENT_PAIR_A   0
+#define SCU_SGL_ELEMENT_PAIR_B   1
+
+/**
+ * struct scu_sgl_element_pair - This structure is the SCU hardware definition
+ *    of a pair of SGL elements. The SCU hardware always works on SGL pairs.
+ *    They are refered to in the DS specification as SGL A and SGL B.  Each SGL
+ *    pair is followed by the address of the next pair.
+ *
+ *
+ */
+struct scu_sgl_element_pair {
+       /* OFFSET 0x60-0x68 */
+       /**
+        * This field is the SGL element A of the SGL pair.
+        */
+       struct scu_sgl_element A;
+
+       /* OFFSET 0x6C-0x74 */
+       /**
+        * This field is the SGL element B of the SGL pair.
+        */
+       struct scu_sgl_element B;
+
+       /* OFFSET 0x78-0x7C */
+       /**
+        * This field is the upper 32 bits of the 64 bit address to the next SGL
+        * element pair.
+        */
+       u32 next_pair_upper;
+
+       /**
+        * This field is the lower 32 bits of the 64 bit address to the next SGL
+        * element pair.
+        */
+       u32 next_pair_lower;
+
+};
+
+/**
+ * struct transport_snapshot - This structure is the SCU hardware scratch area
+ *    for the task context. This is set to 0 by the driver but can be read by
+ *    issuing a dump TC request to the SCU.
+ *
+ *
+ */
+struct transport_snapshot {
+       /* OFFSET 0x48 */
+       u32 xfer_rdy_write_data_length;
+
+       /* OFFSET 0x4C */
+       u32 data_offset;
+
+       /* OFFSET 0x50 */
+       u32 data_transfer_size:24;
+       u32 reserved_50_0:8;
+
+       /* OFFSET 0x54 */
+       u32 next_initiator_write_data_offset;
+
+       /* OFFSET 0x58 */
+       u32 next_initiator_write_data_xfer_size:24;
+       u32 reserved_58_0:8;
+};
+
+/**
+ * struct scu_task_context - This structure defines the contents of the SCU
+ *    silicon task context. It lays out all of the fields according to the
+ *    expected order and location for the Storage Controller unit.
+ *
+ *
+ */
+struct scu_task_context {
+       /* OFFSET 0x00 ------ */
+       /**
+        * This field must be encoded to one of the valid SCU task priority values
+        *    - SCU_TASK_PRIORITY_NORMAL
+        *    - SCU_TASK_PRIORITY_HEAD_OF_Q
+        *    - SCU_TASK_PRIORITY_HIGH
+        */
+       u32 priority:2;
+
+       /**
+        * This field must be set to true if this is an initiator generated request.
+        * Until target mode is supported all task requests are initiator requests.
+        */
+       u32 initiator_request:1;
+
+       /**
+        * This field must be set to one of the valid connection rates valid values
+        * are 0x8, 0x9, and 0xA.
+        */
+       u32 connection_rate:4;
+
+       /**
+        * This field muse be programed when generating an SMP response since the SMP
+        * connection remains open until the SMP response is generated.
+        */
+       u32 protocol_engine_index:3;
+
+       /**
+        * This field must contain the logical port for the task request.
+        */
+       u32 logical_port_index:3;
+
+       /**
+        * This field must be set to one of the SCU_TASK_CONTEXT_PROTOCOL values
+        *    - SCU_TASK_CONTEXT_PROTOCOL_SMP
+        *    - SCU_TASK_CONTEXT_PROTOCOL_SSP
+        *    - SCU_TASK_CONTEXT_PROTOCOL_STP
+        *    - SCU_TASK_CONTEXT_PROTOCOL_NONE
+        */
+       u32 protocol_type:3;
+
+       /**
+        * This filed must be set to the TCi allocated for this task
+        */
+       u32 task_index:12;
+
+       /**
+        * This field is reserved and must be set to 0x00
+        */
+       u32 reserved_00_0:1;
+
+       /**
+        * For a normal task request this must be set to 0.  If this is an abort of
+        * this task request it must be set to 1.
+        */
+       u32 abort:1;
+
+       /**
+        * This field must be set to true for the SCU hardware to process the task.
+        */
+       u32 valid:1;
+
+       /**
+        * This field must be set to SCU_TASK_CONTEXT_TYPE
+        */
+       u32 context_type:1;
+
+       /* OFFSET 0x04 */
+       /**
+        * This field contains the RNi that is the target of this request.
+        */
+       u32 remote_node_index:12;
+
+       /**
+        * This field is programmed if this is a mirrored request, which we are not
+        * using, in which case it is the RNi for the mirrored target.
+        */
+       u32 mirrored_node_index:12;
+
+       /**
+        * This field is programmed with the direction of the SATA reqeust
+        *    - SCU_SATA_WRITE_DATA_DIRECTION
+        *    - SCU_SATA_READ_DATA_DIRECTION
+        */
+       u32 sata_direction:1;
+
+       /**
+        * This field is programmsed with one of the following SCU_COMMAND_CODE
+        *    - SCU_COMMAND_CODE_INITIATOR_NEW_TASK
+        *    - SCU_COMMAND_CODE_ACTIVE_TASK
+        *    - SCU_COMMAND_CODE_PRIMITIVE_SEQ_TASK
+        *    - SCU_COMMAND_CODE_TARGET_RAW_FRAMES
+        */
+       u32 command_code:2;
+
+       /**
+        * This field is set to true if the remote node should be suspended.
+        * This bit is only valid for SSP & SMP target devices.
+        */
+       u32 suspend_node:1;
+
+       /**
+        * This field is programmed with one of the following command type codes
+        *
+        * For SAS requests use the scu_ssp_task_type
+        *    - SCU_TASK_TYPE_IOREAD
+        *    - SCU_TASK_TYPE_IOWRITE
+        *    - SCU_TASK_TYPE_SMP_REQUEST
+        *    - SCU_TASK_TYPE_RESPONSE
+        *    - SCU_TASK_TYPE_RAW_FRAME
+        *    - SCU_TASK_TYPE_PRIMITIVE
+        *
+        * For SATA requests use the scu_sata_task_type
+        *    - SCU_TASK_TYPE_DMA_IN
+        *    - SCU_TASK_TYPE_FPDMAQ_READ
+        *    - SCU_TASK_TYPE_PACKET_DMA_IN
+        *    - SCU_TASK_TYPE_SATA_RAW_FRAME
+        *    - SCU_TASK_TYPE_DMA_OUT
+        *    - SCU_TASK_TYPE_FPDMAQ_WRITE
+        *    - SCU_TASK_TYPE_PACKET_DMA_OUT
+        */
+       u32 task_type:4;
+
+       /* OFFSET 0x08 */
+       /**
+        * This field is reserved and the must be set to 0x00
+        */
+       u32 link_layer_control:8; /* presently all reserved */
+
+       /**
+        * This field is set to true when TLR is to be enabled
+        */
+       u32 ssp_tlr_enable:1;
+
+       /**
+        * This is field specifies if the SCU DMAs a response frame to host
+        * memory for good response frames when operating in target mode.
+        */
+       u32 dma_ssp_target_good_response:1;
+
+       /**
+        * This field indicates if the SCU should DMA the response frame to
+        * host memory.
+        */
+       u32 do_not_dma_ssp_good_response:1;
+
+       /**
+        * This field is set to true when strict ordering is to be enabled
+        */
+       u32 strict_ordering:1;
+
+       /**
+        * This field indicates the type of endianess to be utilized for the
+        * frame.  command, task, and response frames utilized control_frame
+        * set to 1.
+        */
+       u32 control_frame:1;
+
+       /**
+        * This field is reserved and the driver should set to 0x00
+        */
+       u32 tl_control_reserved:3;
+
+       /**
+        * This field is set to true when the SCU hardware task timeout control is to
+        * be enabled
+        */
+       u32 timeout_enable:1;
+
+       /**
+        * This field is reserved and the driver should set it to 0x00
+        */
+       u32 pts_control_reserved:7;
+
+       /**
+        * This field should be set to true when block guard is to be enabled
+        */
+       u32 block_guard_enable:1;
+
+       /**
+        * This field is reserved and the driver should set to 0x00
+        */
+       u32 sdma_control_reserved:7;
+
+       /* OFFSET 0x0C */
+       /**
+        * This field is the address modifier for this io request it should be
+        * programmed with the virtual function that is making the request.
+        */
+       u32 address_modifier:16;
+
+       /**
+        * @todo What we support mirrored SMP response frame?
+        */
+       u32 mirrored_protocol_engine:3;  /* mirrored protocol Engine Index */
+
+       /**
+        * If this is a mirrored request the logical port index for the mirrored RNi
+        * must be programmed.
+        */
+       u32 mirrored_logical_port:4;  /* mirrored local port index */
+
+       /**
+        * This field is reserved and the driver must set it to 0x00
+        */
+       u32 reserved_0C_0:8;
+
+       /**
+        * This field must be set to true if the mirrored request processing is to be
+        * enabled.
+        */
+       u32 mirror_request_enable:1;  /* Mirrored request Enable */
+
+       /* OFFSET 0x10 */
+       /**
+        * This field is the command iu length in dwords
+        */
+       u32 ssp_command_iu_length:8;
+
+       /**
+        * This is the target TLR enable bit it must be set to 0 when creatning the
+        * task context.
+        */
+       u32 xfer_ready_tlr_enable:1;
+
+       /**
+        * This field is reserved and the driver must set it to 0x00
+        */
+       u32 reserved_10_0:7;
+
+       /**
+        * This is the maximum burst size that the SCU hardware will send in one
+        * connection its value is (N x 512) and N must be a multiple of 2.  If the
+        * value is 0x00 then maximum burst size is disabled.
+        */
+       u32 ssp_max_burst_size:16;
+
+       /* OFFSET 0x14 */
+       /**
+        * This filed is set to the number of bytes to be transfered in the request.
+        */
+       u32 transfer_length_bytes:24; /* In terms of bytes */
+
+       /**
+        * This field is reserved and the driver should set it to 0x00
+        */
+       u32 reserved_14_0:8;
+
+       /* OFFSET 0x18-0x2C */
+       /**
+        * This union provides for the protocol specif part of the SCU Task Context.
+        */
+       union protocol_context type;
+
+       /* OFFSET 0x30-0x34 */
+       /**
+        * This field is the upper 32 bits of the 64 bit physical address of the
+        * command iu buffer
+        */
+       u32 command_iu_upper;
+
+       /**
+        * This field is the lower 32 bits of the 64 bit physical address of the
+        * command iu buffer
+        */
+       u32 command_iu_lower;
+
+       /* OFFSET 0x38-0x3C */
+       /**
+        * This field is the upper 32 bits of the 64 bit physical address of the
+        * response iu buffer
+        */
+       u32 response_iu_upper;
+
+       /**
+        * This field is the lower 32 bits of the 64 bit physical address of the
+        * response iu buffer
+        */
+       u32 response_iu_lower;
+
+       /* OFFSET 0x40 */
+       /**
+        * This field is set to the task phase of the SCU hardware. The driver must
+        * set this to 0x01
+        */
+       u32 task_phase:8;
+
+       /**
+        * This field is set to the transport layer task status.  The driver must set
+        * this to 0x00
+        */
+       u32 task_status:8;
+
+       /**
+        * This field is used during initiator write TLR
+        */
+       u32 previous_extended_tag:4;
+
+       /**
+        * This field is set the maximum number of retries for a STP non-data FIS
+        */
+       u32 stp_retry_count:2;
+
+       /**
+        * This field is reserved and the driver must set it to 0x00
+        */
+       u32 reserved_40_1:2;
+
+       /**
+        * This field is used by the SCU TL to determine when to take a snapshot when
+        * tranmitting read data frames.
+        *    - 0x00 The entire IO
+        *    - 0x01 32k
+        *    - 0x02 64k
+        *    - 0x04 128k
+        *    - 0x08 256k
+        */
+       u32 ssp_tlr_threshold:4;
+
+       /**
+        * This field is reserved and the driver must set it to 0x00
+        */
+       u32 reserved_40_2:4;
+
+       /* OFFSET 0x44 */
+       u32 write_data_length; /* read only set to 0 */
+
+       /* OFFSET 0x48-0x58 */
+       struct transport_snapshot snapshot; /* read only set to 0 */
+
+       /* OFFSET 0x5C */
+       u32 block_protection_enable:1;
+       u32 block_size:2;
+       u32 block_protection_function:2;
+       u32 reserved_5C_0:9;
+       u32 active_sgl_element:2;  /* read only set to 0 */
+       u32 sgl_exhausted:1;  /* read only set to 0 */
+       u32 payload_data_transfer_error:4;  /* read only set to 0 */
+       u32 frame_buffer_offset:11; /* read only set to 0 */
+
+       /* OFFSET 0x60-0x7C */
+       /**
+        * This field is the first SGL element pair found in the TC data structure.
+        */
+       struct scu_sgl_element_pair sgl_pair_ab;
+       /* OFFSET 0x80-0x9C */
+       /**
+        * This field is the second SGL element pair found in the TC data structure.
+        */
+       struct scu_sgl_element_pair sgl_pair_cd;
+
+       /* OFFSET 0xA0-BC */
+       struct scu_sgl_element_pair sgl_snapshot_ac;
+
+       /* OFFSET 0xC0 */
+       u32 active_sgl_element_pair; /* read only set to 0 */
+
+       /* OFFSET 0xC4-0xCC */
+       u32 reserved_C4_CC[3];
+
+       /* OFFSET 0xD0 */
+       u32 intermediate_crc_value:16;
+       u32 initial_crc_seed:16;
+
+       /* OFFSET 0xD4 */
+       u32 application_tag_for_verify:16;
+       u32 application_tag_for_generate:16;
+
+       /* OFFSET 0xD8 */
+       u32 reference_tag_seed_for_verify_function;
+
+       /* OFFSET 0xDC */
+       u32 reserved_DC;
+
+       /* OFFSET 0xE0 */
+       u32 reserved_E0_0:16;
+       u32 application_tag_mask_for_generate:16;
+
+       /* OFFSET 0xE4 */
+       u32 block_protection_control:16;
+       u32 application_tag_mask_for_verify:16;
+
+       /* OFFSET 0xE8 */
+       u32 block_protection_error:8;
+       u32 reserved_E8_0:24;
+
+       /* OFFSET 0xEC */
+       u32 reference_tag_seed_for_verify;
+
+       /* OFFSET 0xF0 */
+       u32 intermediate_crc_valid_snapshot:16;
+       u32 reserved_F0_0:16;
+
+       /* OFFSET 0xF4 */
+       u32 reference_tag_seed_for_verify_function_snapshot;
+
+       /* OFFSET 0xF8 */
+       u32 snapshot_of_reserved_dword_DC_of_tc;
+
+       /* OFFSET 0xFC */
+       u32 reference_tag_seed_for_generate_function_snapshot;
+
+};
+
+#endif /* _SCU_TASK_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
new file mode 100644 (file)
index 0000000..d6bcdd0
--- /dev/null
@@ -0,0 +1,1676 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/completion.h>
+#include <linux/irqflags.h>
+#include "sas.h"
+#include <scsi/libsas.h>
+#include "remote_device.h"
+#include "remote_node_context.h"
+#include "isci.h"
+#include "request.h"
+#include "task.h"
+#include "host.h"
+
+/**
+* isci_task_refuse() - complete the request to the upper layer driver in
+*     the case where an I/O needs to be completed back in the submit path.
+* @ihost: host on which the the request was queued
+* @task: request to complete
+* @response: response code for the completed task.
+* @status: status code for the completed task.
+*
+*/
+static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
+                            enum service_response response,
+                            enum exec_status status)
+
+{
+       enum isci_completion_selection disposition;
+
+       disposition = isci_perform_normal_io_completion;
+       disposition = isci_task_set_completion_status(task, response, status,
+                                                     disposition);
+
+       /* Tasks aborted specifically by a call to the lldd_abort_task
+        * function should not be completed to the host in the regular path.
+        */
+       switch (disposition) {
+       case isci_perform_normal_io_completion:
+               /* Normal notification (task_done) */
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: Normal - task = %p, response=%d, "
+                       "status=%d\n",
+                       __func__, task, response, status);
+
+               task->lldd_task = NULL;
+
+               isci_execpath_callback(ihost, task, task->task_done);
+               break;
+
+       case isci_perform_aborted_io_completion:
+               /*
+                * No notification because this request is already in the
+                * abort path.
+                */
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: Aborted - task = %p, response=%d, "
+                       "status=%d\n",
+                       __func__, task, response, status);
+               break;
+
+       case isci_perform_error_io_completion:
+               /* Use sas_task_abort */
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: Error - task = %p, response=%d, "
+                       "status=%d\n",
+                       __func__, task, response, status);
+
+               isci_execpath_callback(ihost, task, sas_task_abort);
+               break;
+
+       default:
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: isci task notification default case!",
+                       __func__);
+               sas_task_abort(task);
+               break;
+       }
+}
+
+#define for_each_sas_task(num, task) \
+       for (; num > 0; num--,\
+            task = list_entry(task->list.next, struct sas_task, list))
+
+
+static inline int isci_device_io_ready(struct isci_remote_device *idev,
+                                      struct sas_task *task)
+{
+       return idev ? test_bit(IDEV_IO_READY, &idev->flags) ||
+                     (test_bit(IDEV_IO_NCQERROR, &idev->flags) &&
+                      isci_task_is_ncq_recovery(task))
+                   : 0;
+}
+/**
+ * isci_task_execute_task() - This function is one of the SAS Domain Template
+ *    functions. This function is called by libsas to send a task down to
+ *    hardware.
+ * @task: This parameter specifies the SAS task to send.
+ * @num: This parameter specifies the number of tasks to queue.
+ * @gfp_flags: This parameter specifies the context of this call.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
+{
+       struct isci_host *ihost = dev_to_ihost(task->dev);
+       struct isci_remote_device *idev;
+       unsigned long flags;
+       bool io_ready;
+       u16 tag;
+
+       dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
+
+       for_each_sas_task(num, task) {
+               enum sci_status status = SCI_FAILURE;
+
+               spin_lock_irqsave(&ihost->scic_lock, flags);
+               idev = isci_lookup_device(task->dev);
+               io_ready = isci_device_io_ready(idev, task);
+               tag = isci_alloc_tag(ihost);
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+               dev_dbg(&ihost->pdev->dev,
+                       "task: %p, num: %d dev: %p idev: %p:%#lx cmd = %p\n",
+                       task, num, task->dev, idev, idev ? idev->flags : 0,
+                       task->uldd_task);
+
+               if (!idev) {
+                       isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
+                                        SAS_DEVICE_UNKNOWN);
+               } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
+                       /* Indicate QUEUE_FULL so that the scsi midlayer
+                        * retries.
+                         */
+                       isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
+                                        SAS_QUEUE_FULL);
+               } else {
+                       /* There is a device and it's ready for I/O. */
+                       spin_lock_irqsave(&task->task_state_lock, flags);
+
+                       if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+                               /* The I/O was aborted. */
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                      flags);
+
+                               isci_task_refuse(ihost, task,
+                                                SAS_TASK_UNDELIVERED,
+                                                SAM_STAT_TASK_ABORTED);
+                       } else {
+                               task->task_state_flags |= SAS_TASK_AT_INITIATOR;
+                               spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+                               /* build and send the request. */
+                               status = isci_request_execute(ihost, idev, task, tag);
+
+                               if (status != SCI_SUCCESS) {
+
+                                       spin_lock_irqsave(&task->task_state_lock, flags);
+                                       /* Did not really start this command. */
+                                       task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+                                       spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+                                       /* Indicate QUEUE_FULL so that the scsi
+                                       * midlayer retries. if the request
+                                       * failed for remote device reasons,
+                                       * it gets returned as
+                                       * SAS_TASK_UNDELIVERED next time
+                                       * through.
+                                       */
+                                       isci_task_refuse(ihost, task,
+                                                        SAS_TASK_COMPLETE,
+                                                        SAS_QUEUE_FULL);
+                               }
+                       }
+               }
+               if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
+                       spin_lock_irqsave(&ihost->scic_lock, flags);
+                       /* command never hit the device, so just free
+                        * the tci and skip the sequence increment
+                        */
+                       isci_tci_free(ihost, ISCI_TAG_TCI(tag));
+                       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+               }
+               isci_put_device(idev);
+       }
+       return 0;
+}
+
+static enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq)
+{
+       struct isci_tmf *isci_tmf;
+       enum sci_status status;
+
+       if (tmf_task != ireq->ttype)
+               return SCI_FAILURE;
+
+       isci_tmf = isci_request_access_tmf(ireq);
+
+       switch (isci_tmf->tmf_code) {
+
+       case isci_tmf_sata_srst_high:
+       case isci_tmf_sata_srst_low: {
+               struct host_to_dev_fis *fis = &ireq->stp.cmd;
+
+               memset(fis, 0, sizeof(*fis));
+
+               fis->fis_type  =  0x27;
+               fis->flags     &= ~0x80;
+               fis->flags     &= 0xF0;
+               if (isci_tmf->tmf_code == isci_tmf_sata_srst_high)
+                       fis->control |= ATA_SRST;
+               else
+                       fis->control &= ~ATA_SRST;
+               break;
+       }
+       /* other management commnd go here... */
+       default:
+               return SCI_FAILURE;
+       }
+
+       /* core builds the protocol specific request
+        *  based on the h2d fis.
+        */
+       status = sci_task_request_construct_sata(ireq);
+
+       return status;
+}
+
+static struct isci_request *isci_task_request_build(struct isci_host *ihost,
+                                                   struct isci_remote_device *idev,
+                                                   u16 tag, struct isci_tmf *isci_tmf)
+{
+       enum sci_status status = SCI_FAILURE;
+       struct isci_request *ireq = NULL;
+       struct domain_device *dev;
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: isci_tmf = %p\n", __func__, isci_tmf);
+
+       dev = idev->domain_dev;
+
+       /* do common allocation and init of request object. */
+       ireq = isci_tmf_request_from_tag(ihost, isci_tmf, tag);
+       if (!ireq)
+               return NULL;
+
+       /* let the core do it's construct. */
+       status = sci_task_request_construct(ihost, idev, tag,
+                                            ireq);
+
+       if (status != SCI_SUCCESS) {
+               dev_warn(&ihost->pdev->dev,
+                        "%s: sci_task_request_construct failed - "
+                        "status = 0x%x\n",
+                        __func__,
+                        status);
+               return NULL;
+       }
+
+       /* XXX convert to get this from task->tproto like other drivers */
+       if (dev->dev_type == SAS_END_DEV) {
+               isci_tmf->proto = SAS_PROTOCOL_SSP;
+               status = sci_task_request_construct_ssp(ireq);
+               if (status != SCI_SUCCESS)
+                       return NULL;
+       }
+
+       if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+               isci_tmf->proto = SAS_PROTOCOL_SATA;
+               status = isci_sata_management_task_request_build(ireq);
+
+               if (status != SCI_SUCCESS)
+                       return NULL;
+       }
+       return ireq;
+}
+
+static int isci_task_execute_tmf(struct isci_host *ihost,
+                                struct isci_remote_device *idev,
+                                struct isci_tmf *tmf, unsigned long timeout_ms)
+{
+       DECLARE_COMPLETION_ONSTACK(completion);
+       enum sci_task_status status = SCI_TASK_FAILURE;
+       struct isci_request *ireq;
+       int ret = TMF_RESP_FUNC_FAILED;
+       unsigned long flags;
+       unsigned long timeleft;
+       u16 tag;
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+       tag = isci_alloc_tag(ihost);
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
+               return ret;
+
+       /* sanity check, return TMF_RESP_FUNC_FAILED
+        * if the device is not there and ready.
+        */
+       if (!idev ||
+           (!test_bit(IDEV_IO_READY, &idev->flags) &&
+            !test_bit(IDEV_IO_NCQERROR, &idev->flags))) {
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: idev = %p not ready (%#lx)\n",
+                       __func__,
+                       idev, idev ? idev->flags : 0);
+               goto err_tci;
+       } else
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: idev = %p\n",
+                       __func__, idev);
+
+       /* Assign the pointer to the TMF's completion kernel wait structure. */
+       tmf->complete = &completion;
+
+       ireq = isci_task_request_build(ihost, idev, tag, tmf);
+       if (!ireq)
+               goto err_tci;
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       /* start the TMF io. */
+       status = sci_controller_start_task(ihost, idev, ireq);
+
+       if (status != SCI_TASK_SUCCESS) {
+               dev_dbg(&ihost->pdev->dev,
+                        "%s: start_io failed - status = 0x%x, request = %p\n",
+                        __func__,
+                        status,
+                        ireq);
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
+               goto err_tci;
+       }
+
+       if (tmf->cb_state_func != NULL)
+               tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data);
+
+       isci_request_change_state(ireq, started);
+
+       /* add the request to the remote device request list. */
+       list_add(&ireq->dev_node, &idev->reqs_in_process);
+
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       /* Wait for the TMF to complete, or a timeout. */
+       timeleft = wait_for_completion_timeout(&completion,
+                                              msecs_to_jiffies(timeout_ms));
+
+       if (timeleft == 0) {
+               spin_lock_irqsave(&ihost->scic_lock, flags);
+
+               if (tmf->cb_state_func != NULL)
+                       tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data);
+
+               sci_controller_terminate_request(ihost,
+                                                 idev,
+                                                 ireq);
+
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+               wait_for_completion(tmf->complete);
+       }
+
+       isci_print_tmf(tmf);
+
+       if (tmf->status == SCI_SUCCESS)
+               ret =  TMF_RESP_FUNC_COMPLETE;
+       else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) {
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: tmf.status == "
+                       "SCI_FAILURE_IO_RESPONSE_VALID\n",
+                       __func__);
+               ret =  TMF_RESP_FUNC_COMPLETE;
+       }
+       /* Else - leave the default "failed" status alone. */
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: completed request = %p\n",
+               __func__,
+               ireq);
+
+       return ret;
+
+ err_tci:
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+       isci_tci_free(ihost, ISCI_TAG_TCI(tag));
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       return ret;
+}
+
+static void isci_task_build_tmf(struct isci_tmf *tmf,
+                               enum isci_tmf_function_codes code,
+                               void (*tmf_sent_cb)(enum isci_tmf_cb_state,
+                                                   struct isci_tmf *,
+                                                   void *),
+                               void *cb_data)
+{
+       memset(tmf, 0, sizeof(*tmf));
+
+       tmf->tmf_code      = code;
+       tmf->cb_state_func = tmf_sent_cb;
+       tmf->cb_data       = cb_data;
+}
+
+static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
+                                          enum isci_tmf_function_codes code,
+                                          void (*tmf_sent_cb)(enum isci_tmf_cb_state,
+                                                              struct isci_tmf *,
+                                                              void *),
+                                          struct isci_request *old_request)
+{
+       isci_task_build_tmf(tmf, code, tmf_sent_cb, old_request);
+       tmf->io_tag = old_request->io_tag;
+}
+
+/**
+ * isci_task_validate_request_to_abort() - This function checks the given I/O
+ *    against the "started" state.  If the request is still "started", it's
+ *    state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD
+ *    BEFORE CALLING THIS FUNCTION.
+ * @isci_request: This parameter specifies the request object to control.
+ * @isci_host: This parameter specifies the ISCI host object
+ * @isci_device: This is the device to which the request is pending.
+ * @aborted_io_completion: This is a completion structure that will be added to
+ *    the request in case it is changed to aborting; this completion is
+ *    triggered when the request is fully completed.
+ *
+ * Either "started" on successful change of the task status to "aborted", or
+ * "unallocated" if the task cannot be controlled.
+ */
+static enum isci_request_status isci_task_validate_request_to_abort(
+       struct isci_request *isci_request,
+       struct isci_host *isci_host,
+       struct isci_remote_device *isci_device,
+       struct completion *aborted_io_completion)
+{
+       enum isci_request_status old_state = unallocated;
+
+       /* Only abort the task if it's in the
+        *  device's request_in_process list
+        */
+       if (isci_request && !list_empty(&isci_request->dev_node)) {
+               old_state = isci_request_change_started_to_aborted(
+                       isci_request, aborted_io_completion);
+
+       }
+
+       return old_state;
+}
+
+/**
+* isci_request_cleanup_completed_loiterer() - This function will take care of
+*    the final cleanup on any request which has been explicitly terminated.
+* @isci_host: This parameter specifies the ISCI host object
+* @isci_device: This is the device to which the request is pending.
+* @isci_request: This parameter specifies the terminated request object.
+* @task: This parameter is the libsas I/O request.
+*/
+static void isci_request_cleanup_completed_loiterer(
+       struct isci_host          *isci_host,
+       struct isci_remote_device *isci_device,
+       struct isci_request       *isci_request,
+       struct sas_task           *task)
+{
+       unsigned long flags;
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: isci_device=%p, request=%p, task=%p\n",
+               __func__, isci_device, isci_request, task);
+
+       if (task != NULL) {
+
+               spin_lock_irqsave(&task->task_state_lock, flags);
+               task->lldd_task = NULL;
+
+               task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
+
+               isci_set_task_doneflags(task);
+
+               /* If this task is not in the abort path, call task_done. */
+               if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+
+                       spin_unlock_irqrestore(&task->task_state_lock, flags);
+                       task->task_done(task);
+               } else
+                       spin_unlock_irqrestore(&task->task_state_lock, flags);
+       }
+
+       if (isci_request != NULL) {
+               spin_lock_irqsave(&isci_host->scic_lock, flags);
+               list_del_init(&isci_request->dev_node);
+               spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+       }
+}
+
+/**
+ * isci_terminate_request_core() - This function will terminate the given
+ *    request, and wait for it to complete.  This function must only be called
+ *    from a thread that can wait.  Note that the request is terminated and
+ *    completed (back to the host, if started there).
+ * @ihost: This SCU.
+ * @idev: The target.
+ * @isci_request: The I/O request to be terminated.
+ *
+ */
+static void isci_terminate_request_core(struct isci_host *ihost,
+                                       struct isci_remote_device *idev,
+                                       struct isci_request *isci_request)
+{
+       enum sci_status status      = SCI_SUCCESS;
+       bool was_terminated         = false;
+       bool needs_cleanup_handling = false;
+       enum isci_request_status request_status;
+       unsigned long     flags;
+       unsigned long     termination_completed = 1;
+       struct completion *io_request_completion;
+       struct sas_task   *task;
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: device = %p; request = %p\n",
+               __func__, idev, isci_request);
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       io_request_completion = isci_request->io_request_completion;
+
+       task = (isci_request->ttype == io_task)
+               ? isci_request_access_task(isci_request)
+               : NULL;
+
+       /* Note that we are not going to control
+        * the target to abort the request.
+        */
+       set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags);
+
+       /* Make sure the request wasn't just sitting around signalling
+        * device condition (if the request handle is NULL, then the
+        * request completed but needed additional handling here).
+        */
+       if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
+               was_terminated = true;
+               needs_cleanup_handling = true;
+               status = sci_controller_terminate_request(ihost,
+                                                          idev,
+                                                          isci_request);
+       }
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       /*
+        * The only time the request to terminate will
+        * fail is when the io request is completed and
+        * being aborted.
+        */
+       if (status != SCI_SUCCESS) {
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: sci_controller_terminate_request"
+                       " returned = 0x%x\n",
+                       __func__, status);
+
+               isci_request->io_request_completion = NULL;
+
+       } else {
+               if (was_terminated) {
+                       dev_dbg(&ihost->pdev->dev,
+                               "%s: before completion wait (%p/%p)\n",
+                               __func__, isci_request, io_request_completion);
+
+                       /* Wait here for the request to complete. */
+                       #define TERMINATION_TIMEOUT_MSEC 500
+                       termination_completed
+                               = wait_for_completion_timeout(
+                                  io_request_completion,
+                                  msecs_to_jiffies(TERMINATION_TIMEOUT_MSEC));
+
+                       if (!termination_completed) {
+
+                               /* The request to terminate has timed out.  */
+                               spin_lock_irqsave(&ihost->scic_lock,
+                                                 flags);
+
+                               /* Check for state changes. */
+                               if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
+
+                                       /* The best we can do is to have the
+                                        * request die a silent death if it
+                                        * ever really completes.
+                                        *
+                                        * Set the request state to "dead",
+                                        * and clear the task pointer so that
+                                        * an actual completion event callback
+                                        * doesn't do anything.
+                                        */
+                                       isci_request->status = dead;
+                                       isci_request->io_request_completion
+                                               = NULL;
+
+                                       if (isci_request->ttype == io_task) {
+
+                                               /* Break links with the
+                                               * sas_task.
+                                               */
+                                               isci_request->ttype_ptr.io_task_ptr
+                                                       = NULL;
+                                       }
+                               } else
+                                       termination_completed = 1;
+
+                               spin_unlock_irqrestore(&ihost->scic_lock,
+                                                      flags);
+
+                               if (!termination_completed) {
+
+                                       dev_dbg(&ihost->pdev->dev,
+                                               "%s: *** Timeout waiting for "
+                                               "termination(%p/%p)\n",
+                                               __func__, io_request_completion,
+                                               isci_request);
+
+                                       /* The request can no longer be referenced
+                                        * safely since it may go away if the
+                                        * termination every really does complete.
+                                        */
+                                       isci_request = NULL;
+                               }
+                       }
+                       if (termination_completed)
+                               dev_dbg(&ihost->pdev->dev,
+                                       "%s: after completion wait (%p/%p)\n",
+                                       __func__, isci_request, io_request_completion);
+               }
+
+               if (termination_completed) {
+
+                       isci_request->io_request_completion = NULL;
+
+                       /* Peek at the status of the request.  This will tell
+                        * us if there was special handling on the request such that it
+                        * needs to be detached and freed here.
+                        */
+                       spin_lock_irqsave(&isci_request->state_lock, flags);
+                       request_status = isci_request->status;
+
+                       if ((isci_request->ttype == io_task) /* TMFs are in their own thread */
+                           && ((request_status == aborted)
+                               || (request_status == aborting)
+                               || (request_status == terminating)
+                               || (request_status == completed)
+                               || (request_status == dead)
+                               )
+                           ) {
+
+                               /* The completion routine won't free a request in
+                                * the aborted/aborting/etc. states, so we do
+                                * it here.
+                                */
+                               needs_cleanup_handling = true;
+                       }
+                       spin_unlock_irqrestore(&isci_request->state_lock, flags);
+
+               }
+               if (needs_cleanup_handling)
+                       isci_request_cleanup_completed_loiterer(
+                               ihost, idev, isci_request, task);
+       }
+}
+
+/**
+ * isci_terminate_pending_requests() - This function will change the all of the
+ *    requests on the given device's state to "aborting", will terminate the
+ *    requests, and wait for them to complete.  This function must only be
+ *    called from a thread that can wait.  Note that the requests are all
+ *    terminated and completed (back to the host, if started there).
+ * @isci_host: This parameter specifies SCU.
+ * @idev: This parameter specifies the target.
+ *
+ */
+void isci_terminate_pending_requests(struct isci_host *ihost,
+                                    struct isci_remote_device *idev)
+{
+       struct completion request_completion;
+       enum isci_request_status old_state;
+       unsigned long flags;
+       LIST_HEAD(list);
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+       list_splice_init(&idev->reqs_in_process, &list);
+
+       /* assumes that isci_terminate_request_core deletes from the list */
+       while (!list_empty(&list)) {
+               struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node);
+
+               /* Change state to "terminating" if it is currently
+                * "started".
+                */
+               old_state = isci_request_change_started_to_newstate(ireq,
+                                                                   &request_completion,
+                                                                   terminating);
+               switch (old_state) {
+               case started:
+               case completed:
+               case aborting:
+                       break;
+               default:
+                       /* termination in progress, or otherwise dispositioned.
+                        * We know the request was on 'list' so should be safe
+                        * to move it back to reqs_in_process
+                        */
+                       list_move(&ireq->dev_node, &idev->reqs_in_process);
+                       ireq = NULL;
+                       break;
+               }
+
+               if (!ireq)
+                       continue;
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+               init_completion(&request_completion);
+
+               dev_dbg(&ihost->pdev->dev,
+                        "%s: idev=%p request=%p; task=%p old_state=%d\n",
+                        __func__, idev, ireq,
+                       ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL,
+                       old_state);
+
+               /* If the old_state is started:
+                * This request was not already being aborted. If it had been,
+                * then the aborting I/O (ie. the TMF request) would not be in
+                * the aborting state, and thus would be terminated here.  Note
+                * that since the TMF completion's call to the kernel function
+                * "complete()" does not happen until the pending I/O request
+                * terminate fully completes, we do not have to implement a
+                * special wait here for already aborting requests - the
+                * termination of the TMF request will force the request
+                * to finish it's already started terminate.
+                *
+                * If old_state == completed:
+                * This request completed from the SCU hardware perspective
+                * and now just needs cleaning up in terms of freeing the
+                * request and potentially calling up to libsas.
+                *
+                * If old_state == aborting:
+                * This request has already gone through a TMF timeout, but may
+                * not have been terminated; needs cleaning up at least.
+                */
+               isci_terminate_request_core(ihost, idev, ireq);
+               spin_lock_irqsave(&ihost->scic_lock, flags);
+       }
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/**
+ * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
+ *    Template functions.
+ * @lun: This parameter specifies the lun to be reset.
+ *
+ * status, zero indicates success.
+ */
+static int isci_task_send_lu_reset_sas(
+       struct isci_host *isci_host,
+       struct isci_remote_device *isci_device,
+       u8 *lun)
+{
+       struct isci_tmf tmf;
+       int ret = TMF_RESP_FUNC_FAILED;
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: isci_host = %p, isci_device = %p\n",
+               __func__, isci_host, isci_device);
+       /* Send the LUN reset to the target.  By the time the call returns,
+        * the TMF has fully exected in the target (in which case the return
+        * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
+        * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
+        */
+       isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL);
+
+       #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
+       ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
+
+       if (ret == TMF_RESP_FUNC_COMPLETE)
+               dev_dbg(&isci_host->pdev->dev,
+                       "%s: %p: TMF_LU_RESET passed\n",
+                       __func__, isci_device);
+       else
+               dev_dbg(&isci_host->pdev->dev,
+                       "%s: %p: TMF_LU_RESET failed (%x)\n",
+                       __func__, isci_device, ret);
+
+       return ret;
+}
+
+static int isci_task_send_lu_reset_sata(struct isci_host *ihost,
+                                struct isci_remote_device *idev, u8 *lun)
+{
+       int ret = TMF_RESP_FUNC_FAILED;
+       struct isci_tmf tmf;
+
+       /* Send the soft reset to the target */
+       #define ISCI_SRST_TIMEOUT_MS 25000 /* 25 second timeout. */
+       isci_task_build_tmf(&tmf, isci_tmf_sata_srst_high, NULL, NULL);
+
+       ret = isci_task_execute_tmf(ihost, idev, &tmf, ISCI_SRST_TIMEOUT_MS);
+
+       if (ret != TMF_RESP_FUNC_COMPLETE) {
+               dev_dbg(&ihost->pdev->dev,
+                        "%s: Assert SRST failed (%p) = %x",
+                        __func__, idev, ret);
+
+               /* Return the failure so that the LUN reset is escalated
+                * to a target reset.
+                */
+       }
+       return ret;
+}
+
+/**
+ * isci_task_lu_reset() - This function is one of the SAS Domain Template
+ *    functions. This is one of the Task Management functoins called by libsas,
+ *    to reset the given lun. Note the assumption that while this call is
+ *    executing, no I/O will be sent by the host to the device.
+ * @lun: This parameter specifies the lun to be reset.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
+{
+       struct isci_host *isci_host = dev_to_ihost(domain_device);
+       struct isci_remote_device *isci_device;
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&isci_host->scic_lock, flags);
+       isci_device = isci_lookup_device(domain_device);
+       spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
+                __func__, domain_device, isci_host, isci_device);
+
+       if (isci_device)
+               set_bit(IDEV_EH, &isci_device->flags);
+
+       /* If there is a device reset pending on any request in the
+        * device's list, fail this LUN reset request in order to
+        * escalate to the device reset.
+        */
+       if (!isci_device ||
+           isci_device_is_reset_pending(isci_host, isci_device)) {
+               dev_dbg(&isci_host->pdev->dev,
+                        "%s: No dev (%p), or "
+                        "RESET PENDING: domain_device=%p\n",
+                        __func__, isci_device, domain_device);
+               ret = TMF_RESP_FUNC_FAILED;
+               goto out;
+       }
+
+       /* Send the task management part of the reset. */
+       if (sas_protocol_ata(domain_device->tproto)) {
+               ret = isci_task_send_lu_reset_sata(isci_host, isci_device, lun);
+       } else
+               ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
+
+       /* If the LUN reset worked, all the I/O can now be terminated. */
+       if (ret == TMF_RESP_FUNC_COMPLETE)
+               /* Terminate all I/O now. */
+               isci_terminate_pending_requests(isci_host,
+                                               isci_device);
+
+ out:
+       isci_put_device(isci_device);
+       return ret;
+}
+
+
+/*      int (*lldd_clear_nexus_port)(struct asd_sas_port *); */
+int isci_task_clear_nexus_port(struct asd_sas_port *port)
+{
+       return TMF_RESP_FUNC_FAILED;
+}
+
+
+
+int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
+{
+       return TMF_RESP_FUNC_FAILED;
+}
+
+/* Task Management Functions. Must be called from process context.      */
+
+/**
+ * isci_abort_task_process_cb() - This is a helper function for the abort task
+ *    TMF command.  It manages the request state with respect to the successful
+ *    transmission / completion of the abort task request.
+ * @cb_state: This parameter specifies when this function was called - after
+ *    the TMF request has been started and after it has timed-out.
+ * @tmf: This parameter specifies the TMF in progress.
+ *
+ *
+ */
+static void isci_abort_task_process_cb(
+       enum isci_tmf_cb_state cb_state,
+       struct isci_tmf *tmf,
+       void *cb_data)
+{
+       struct isci_request *old_request;
+
+       old_request = (struct isci_request *)cb_data;
+
+       dev_dbg(&old_request->isci_host->pdev->dev,
+               "%s: tmf=%p, old_request=%p\n",
+               __func__, tmf, old_request);
+
+       switch (cb_state) {
+
+       case isci_tmf_started:
+               /* The TMF has been started.  Nothing to do here, since the
+                * request state was already set to "aborted" by the abort
+                * task function.
+                */
+               if ((old_request->status != aborted)
+                       && (old_request->status != completed))
+                       dev_dbg(&old_request->isci_host->pdev->dev,
+                               "%s: Bad request status (%d): tmf=%p, old_request=%p\n",
+                               __func__, old_request->status, tmf, old_request);
+               break;
+
+       case isci_tmf_timed_out:
+
+               /* Set the task's state to "aborting", since the abort task
+                * function thread set it to "aborted" (above) in anticipation
+                * of the task management request working correctly.  Since the
+                * timeout has now fired, the TMF request failed.  We set the
+                * state such that the request completion will indicate the
+                * device is no longer present.
+                */
+               isci_request_change_state(old_request, aborting);
+               break;
+
+       default:
+               dev_dbg(&old_request->isci_host->pdev->dev,
+                       "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n",
+                       __func__, cb_state, tmf, old_request);
+               break;
+       }
+}
+
+/**
+ * isci_task_abort_task() - This function is one of the SAS Domain Template
+ *    functions. This function is called by libsas to abort a specified task.
+ * @task: This parameter specifies the SAS task to abort.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_abort_task(struct sas_task *task)
+{
+       struct isci_host *isci_host = dev_to_ihost(task->dev);
+       DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
+       struct isci_request       *old_request = NULL;
+       enum isci_request_status  old_state;
+       struct isci_remote_device *isci_device = NULL;
+       struct isci_tmf           tmf;
+       int                       ret = TMF_RESP_FUNC_FAILED;
+       unsigned long             flags;
+       bool                      any_dev_reset = false;
+
+       /* Get the isci_request reference from the task.  Note that
+        * this check does not depend on the pending request list
+        * in the device, because tasks driving resets may land here
+        * after completion in the core.
+        */
+       spin_lock_irqsave(&isci_host->scic_lock, flags);
+       spin_lock(&task->task_state_lock);
+
+       old_request = task->lldd_task;
+
+       /* If task is already done, the request isn't valid */
+       if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
+           (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
+           old_request)
+               isci_device = isci_lookup_device(task->dev);
+
+       spin_unlock(&task->task_state_lock);
+       spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: task = %p\n", __func__, task);
+
+       if (!isci_device || !old_request)
+               goto out;
+
+       set_bit(IDEV_EH, &isci_device->flags);
+
+       /* This version of the driver will fail abort requests for
+        * SATA/STP.  Failing the abort request this way will cause the
+        * SCSI error handler thread to escalate to LUN reset
+        */
+       if (sas_protocol_ata(task->task_proto)) {
+               dev_dbg(&isci_host->pdev->dev,
+                           " task %p is for a STP/SATA device;"
+                           " returning TMF_RESP_FUNC_FAILED\n"
+                           " to cause a LUN reset...\n", task);
+               goto out;
+       }
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: old_request == %p\n", __func__, old_request);
+
+       any_dev_reset = isci_device_is_reset_pending(isci_host, isci_device);
+
+       spin_lock_irqsave(&task->task_state_lock, flags);
+
+       any_dev_reset = any_dev_reset || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET);
+
+       /* If the extraction of the request reference from the task
+        * failed, then the request has been completed (or if there is a
+        * pending reset then this abort request function must be failed
+        * in order to escalate to the target reset).
+        */
+       if ((old_request == NULL) || any_dev_reset) {
+
+               /* If the device reset task flag is set, fail the task
+                * management request.  Otherwise, the original request
+                * has completed.
+                */
+               if (any_dev_reset) {
+
+                       /* Turn off the task's DONE to make sure this
+                        * task is escalated to a target reset.
+                        */
+                       task->task_state_flags &= ~SAS_TASK_STATE_DONE;
+
+                       /* Make the reset happen as soon as possible. */
+                       task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+
+                       spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+                       /* Fail the task management request in order to
+                        * escalate to the target reset.
+                        */
+                       ret = TMF_RESP_FUNC_FAILED;
+
+                       dev_dbg(&isci_host->pdev->dev,
+                               "%s: Failing task abort in order to "
+                               "escalate to target reset because\n"
+                               "SAS_TASK_NEED_DEV_RESET is set for "
+                               "task %p on dev %p\n",
+                               __func__, task, isci_device);
+
+
+               } else {
+                       /* The request has already completed and there
+                        * is nothing to do here other than to set the task
+                        * done bit, and indicate that the task abort function
+                        * was sucessful.
+                        */
+                       isci_set_task_doneflags(task);
+
+                       spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+                       ret = TMF_RESP_FUNC_COMPLETE;
+
+                       dev_dbg(&isci_host->pdev->dev,
+                               "%s: abort task not needed for %p\n",
+                               __func__, task);
+               }
+               goto out;
+       } else {
+               spin_unlock_irqrestore(&task->task_state_lock, flags);
+       }
+
+       spin_lock_irqsave(&isci_host->scic_lock, flags);
+
+       /* Check the request status and change to "aborted" if currently
+        * "starting"; if true then set the I/O kernel completion
+        * struct that will be triggered when the request completes.
+        */
+       old_state = isci_task_validate_request_to_abort(
+                               old_request, isci_host, isci_device,
+                               &aborted_io_completion);
+       if ((old_state != started) &&
+           (old_state != completed) &&
+           (old_state != aborting)) {
+
+               spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+               /* The request was already being handled by someone else (because
+               * they got to set the state away from started).
+               */
+               dev_dbg(&isci_host->pdev->dev,
+                       "%s:  device = %p; old_request %p already being aborted\n",
+                       __func__,
+                       isci_device, old_request);
+               ret = TMF_RESP_FUNC_COMPLETE;
+               goto out;
+       }
+       if (task->task_proto == SAS_PROTOCOL_SMP ||
+           test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
+
+               spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+               dev_dbg(&isci_host->pdev->dev,
+                       "%s: SMP request (%d)"
+                       " or complete_in_target (%d), thus no TMF\n",
+                       __func__, (task->task_proto == SAS_PROTOCOL_SMP),
+                       test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags));
+
+               /* Set the state on the task. */
+               isci_task_all_done(task);
+
+               ret = TMF_RESP_FUNC_COMPLETE;
+
+               /* Stopping and SMP devices are not sent a TMF, and are not
+                * reset, but the outstanding I/O request is terminated below.
+                */
+       } else {
+               /* Fill in the tmf stucture */
+               isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
+                                              isci_abort_task_process_cb,
+                                              old_request);
+
+               spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+               #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */
+               ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
+                                           ISCI_ABORT_TASK_TIMEOUT_MS);
+
+               if (ret != TMF_RESP_FUNC_COMPLETE)
+                       dev_dbg(&isci_host->pdev->dev,
+                               "%s: isci_task_send_tmf failed\n",
+                               __func__);
+       }
+       if (ret == TMF_RESP_FUNC_COMPLETE) {
+               set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags);
+
+               /* Clean up the request on our side, and wait for the aborted
+                * I/O to complete.
+                */
+               isci_terminate_request_core(isci_host, isci_device, old_request);
+       }
+
+       /* Make sure we do not leave a reference to aborted_io_completion */
+       old_request->io_request_completion = NULL;
+ out:
+       isci_put_device(isci_device);
+       return ret;
+}
+
+/**
+ * isci_task_abort_task_set() - This function is one of the SAS Domain Template
+ *    functions. This is one of the Task Management functoins called by libsas,
+ *    to abort all task for the given lun.
+ * @d_device: This parameter specifies the domain device associated with this
+ *    request.
+ * @lun: This parameter specifies the lun associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_abort_task_set(
+       struct domain_device *d_device,
+       u8 *lun)
+{
+       return TMF_RESP_FUNC_FAILED;
+}
+
+
+/**
+ * isci_task_clear_aca() - This function is one of the SAS Domain Template
+ *    functions. This is one of the Task Management functoins called by libsas.
+ * @d_device: This parameter specifies the domain device associated with this
+ *    request.
+ * @lun: This parameter specifies the lun       associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_clear_aca(
+       struct domain_device *d_device,
+       u8 *lun)
+{
+       return TMF_RESP_FUNC_FAILED;
+}
+
+
+
+/**
+ * isci_task_clear_task_set() - This function is one of the SAS Domain Template
+ *    functions. This is one of the Task Management functoins called by libsas.
+ * @d_device: This parameter specifies the domain device associated with this
+ *    request.
+ * @lun: This parameter specifies the lun       associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_clear_task_set(
+       struct domain_device *d_device,
+       u8 *lun)
+{
+       return TMF_RESP_FUNC_FAILED;
+}
+
+
+/**
+ * isci_task_query_task() - This function is implemented to cause libsas to
+ *    correctly escalate the failed abort to a LUN or target reset (this is
+ *    because sas_scsi_find_task libsas function does not correctly interpret
+ *    all return codes from the abort task call).  When TMF_RESP_FUNC_SUCC is
+ *    returned, libsas turns this into a LUN reset; when FUNC_FAILED is
+ *    returned, libsas will turn this into a target reset
+ * @task: This parameter specifies the sas task being queried.
+ * @lun: This parameter specifies the lun associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_query_task(
+       struct sas_task *task)
+{
+       /* See if there is a pending device reset for this device. */
+       if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
+               return TMF_RESP_FUNC_FAILED;
+       else
+               return TMF_RESP_FUNC_SUCC;
+}
+
+/*
+ * isci_task_request_complete() - This function is called by the sci core when
+ *    an task request completes.
+ * @ihost: This parameter specifies the ISCI host object
+ * @ireq: This parameter is the completed isci_request object.
+ * @completion_status: This parameter specifies the completion status from the
+ *    sci core.
+ *
+ * none.
+ */
+void
+isci_task_request_complete(struct isci_host *ihost,
+                          struct isci_request *ireq,
+                          enum sci_task_status completion_status)
+{
+       struct isci_tmf *tmf = isci_request_access_tmf(ireq);
+       struct completion *tmf_complete;
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: request = %p, status=%d\n",
+               __func__, ireq, completion_status);
+
+       isci_request_change_state(ireq, completed);
+
+       tmf->status = completion_status;
+       set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
+
+       if (tmf->proto == SAS_PROTOCOL_SSP) {
+               memcpy(&tmf->resp.resp_iu,
+                      &ireq->ssp.rsp,
+                      SSP_RESP_IU_MAX_SIZE);
+       } else if (tmf->proto == SAS_PROTOCOL_SATA) {
+               memcpy(&tmf->resp.d2h_fis,
+                      &ireq->stp.rsp,
+                      sizeof(struct dev_to_host_fis));
+       }
+
+       /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
+       tmf_complete = tmf->complete;
+
+       sci_controller_complete_io(ihost, ireq->target_device, ireq);
+       /* set the 'terminated' flag handle to make sure it cannot be terminated
+        *  or completed again.
+        */
+       set_bit(IREQ_TERMINATED, &ireq->flags);
+
+       isci_request_change_state(ireq, unallocated);
+       list_del_init(&ireq->dev_node);
+
+       /* The task management part completes last. */
+       complete(tmf_complete);
+}
+
+static void isci_smp_task_timedout(unsigned long _task)
+{
+       struct sas_task *task = (void *) _task;
+       unsigned long flags;
+
+       spin_lock_irqsave(&task->task_state_lock, flags);
+       if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+               task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+       spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+       complete(&task->completion);
+}
+
+static void isci_smp_task_done(struct sas_task *task)
+{
+       if (!del_timer(&task->timer))
+               return;
+       complete(&task->completion);
+}
+
+static struct sas_task *isci_alloc_task(void)
+{
+       struct sas_task *task = kzalloc(sizeof(*task), GFP_KERNEL);
+
+       if (task) {
+               INIT_LIST_HEAD(&task->list);
+               spin_lock_init(&task->task_state_lock);
+               task->task_state_flags = SAS_TASK_STATE_PENDING;
+               init_timer(&task->timer);
+               init_completion(&task->completion);
+       }
+
+       return task;
+}
+
+static void isci_free_task(struct isci_host *ihost, struct sas_task  *task)
+{
+       if (task) {
+               BUG_ON(!list_empty(&task->list));
+               kfree(task);
+       }
+}
+
+static int isci_smp_execute_task(struct isci_host *ihost,
+                                struct domain_device *dev, void *req,
+                                int req_size, void *resp, int resp_size)
+{
+       int res, retry;
+       struct sas_task *task = NULL;
+
+       for (retry = 0; retry < 3; retry++) {
+               task = isci_alloc_task();
+               if (!task)
+                       return -ENOMEM;
+
+               task->dev = dev;
+               task->task_proto = dev->tproto;
+               sg_init_one(&task->smp_task.smp_req, req, req_size);
+               sg_init_one(&task->smp_task.smp_resp, resp, resp_size);
+
+               task->task_done = isci_smp_task_done;
+
+               task->timer.data = (unsigned long) task;
+               task->timer.function = isci_smp_task_timedout;
+               task->timer.expires = jiffies + 10*HZ;
+               add_timer(&task->timer);
+
+               res = isci_task_execute_task(task, 1, GFP_KERNEL);
+
+               if (res) {
+                       del_timer(&task->timer);
+                       dev_dbg(&ihost->pdev->dev,
+                               "%s: executing SMP task failed:%d\n",
+                               __func__, res);
+                       goto ex_err;
+               }
+
+               wait_for_completion(&task->completion);
+               res = -ECOMM;
+               if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+                       dev_dbg(&ihost->pdev->dev,
+                               "%s: smp task timed out or aborted\n",
+                               __func__);
+                       isci_task_abort_task(task);
+                       if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+                               dev_dbg(&ihost->pdev->dev,
+                                       "%s: SMP task aborted and not done\n",
+                                       __func__);
+                               goto ex_err;
+                       }
+               }
+               if (task->task_status.resp == SAS_TASK_COMPLETE &&
+                   task->task_status.stat == SAM_STAT_GOOD) {
+                       res = 0;
+                       break;
+               }
+               if (task->task_status.resp == SAS_TASK_COMPLETE &&
+                     task->task_status.stat == SAS_DATA_UNDERRUN) {
+                       /* no error, but return the number of bytes of
+                       * underrun */
+                       res = task->task_status.residual;
+                       break;
+               }
+               if (task->task_status.resp == SAS_TASK_COMPLETE &&
+                     task->task_status.stat == SAS_DATA_OVERRUN) {
+                       res = -EMSGSIZE;
+                       break;
+               } else {
+                       dev_dbg(&ihost->pdev->dev,
+                               "%s: task to dev %016llx response: 0x%x "
+                               "status 0x%x\n", __func__,
+                               SAS_ADDR(dev->sas_addr),
+                               task->task_status.resp,
+                               task->task_status.stat);
+                       isci_free_task(ihost, task);
+                       task = NULL;
+               }
+       }
+ex_err:
+       BUG_ON(retry == 3 && task != NULL);
+       isci_free_task(ihost, task);
+       return res;
+}
+
+#define DISCOVER_REQ_SIZE  16
+#define DISCOVER_RESP_SIZE 56
+
+int isci_smp_get_phy_attached_dev_type(struct isci_host *ihost,
+                                      struct domain_device *dev,
+                                      int phy_id, int *adt)
+{
+       struct smp_resp *disc_resp;
+       u8 *disc_req;
+       int res;
+
+       disc_resp = kzalloc(DISCOVER_RESP_SIZE, GFP_KERNEL);
+       if (!disc_resp)
+               return -ENOMEM;
+
+       disc_req = kzalloc(DISCOVER_REQ_SIZE, GFP_KERNEL);
+       if (disc_req) {
+               disc_req[0] = SMP_REQUEST;
+               disc_req[1] = SMP_DISCOVER;
+               disc_req[9] = phy_id;
+       } else {
+               kfree(disc_resp);
+               return -ENOMEM;
+       }
+       res = isci_smp_execute_task(ihost, dev, disc_req, DISCOVER_REQ_SIZE,
+                                   disc_resp, DISCOVER_RESP_SIZE);
+       if (!res) {
+               if (disc_resp->result != SMP_RESP_FUNC_ACC)
+                       res = disc_resp->result;
+               else
+                       *adt = disc_resp->disc.attached_dev_type;
+       }
+       kfree(disc_req);
+       kfree(disc_resp);
+
+       return res;
+}
+
+static void isci_wait_for_smp_phy_reset(struct isci_remote_device *idev, int phy_num)
+{
+       struct domain_device *dev = idev->domain_dev;
+       struct isci_port *iport = idev->isci_port;
+       struct isci_host *ihost = iport->isci_host;
+       int res, iteration = 0, attached_device_type;
+       #define STP_WAIT_MSECS 25000
+       unsigned long tmo = msecs_to_jiffies(STP_WAIT_MSECS);
+       unsigned long deadline = jiffies + tmo;
+       enum {
+               SMP_PHYWAIT_PHYDOWN,
+               SMP_PHYWAIT_PHYUP,
+               SMP_PHYWAIT_DONE
+       } phy_state = SMP_PHYWAIT_PHYDOWN;
+
+       /* While there is time, wait for the phy to go away and come back */
+       while (time_is_after_jiffies(deadline) && phy_state != SMP_PHYWAIT_DONE) {
+               int event = atomic_read(&iport->event);
+
+               ++iteration;
+
+               tmo = wait_event_timeout(ihost->eventq,
+                                        event != atomic_read(&iport->event) ||
+                                        !test_bit(IPORT_BCN_BLOCKED, &iport->flags),
+                                        tmo);
+               /* link down, stop polling */
+               if (!test_bit(IPORT_BCN_BLOCKED, &iport->flags))
+                       break;
+
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: iport %p, iteration %d,"
+                       " phase %d: time_remaining %lu, bcns = %d\n",
+                       __func__, iport, iteration, phy_state,
+                       tmo, test_bit(IPORT_BCN_PENDING, &iport->flags));
+
+               res = isci_smp_get_phy_attached_dev_type(ihost, dev, phy_num,
+                                                        &attached_device_type);
+               tmo = deadline - jiffies;
+
+               if (res) {
+                       dev_dbg(&ihost->pdev->dev,
+                                "%s: iteration %d, phase %d:"
+                                " SMP error=%d, time_remaining=%lu\n",
+                                __func__, iteration, phy_state, res, tmo);
+                       break;
+               }
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: iport %p, iteration %d,"
+                       " phase %d: time_remaining %lu, bcns = %d, "
+                       "attdevtype = %x\n",
+                       __func__, iport, iteration, phy_state,
+                       tmo, test_bit(IPORT_BCN_PENDING, &iport->flags),
+                       attached_device_type);
+
+               switch (phy_state) {
+               case SMP_PHYWAIT_PHYDOWN:
+                       /* Has the device gone away? */
+                       if (!attached_device_type)
+                               phy_state = SMP_PHYWAIT_PHYUP;
+
+                       break;
+
+               case SMP_PHYWAIT_PHYUP:
+                       /* Has the device come back? */
+                       if (attached_device_type)
+                               phy_state = SMP_PHYWAIT_DONE;
+                       break;
+
+               case SMP_PHYWAIT_DONE:
+                       break;
+               }
+
+       }
+       dev_dbg(&ihost->pdev->dev, "%s: done\n",  __func__);
+}
+
+static int isci_reset_device(struct isci_host *ihost,
+                            struct isci_remote_device *idev)
+{
+       struct sas_phy *phy = sas_find_local_phy(idev->domain_dev);
+       struct isci_port *iport = idev->isci_port;
+       enum sci_status status;
+       unsigned long flags;
+       int rc;
+
+       dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+       status = sci_remote_device_reset(idev);
+       if (status != SCI_SUCCESS) {
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+               dev_dbg(&ihost->pdev->dev,
+                        "%s: sci_remote_device_reset(%p) returned %d!\n",
+                        __func__, idev, status);
+
+               return TMF_RESP_FUNC_FAILED;
+       }
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       /* Make sure all pending requests are able to be fully terminated. */
+       isci_device_clear_reset_pending(ihost, idev);
+
+       /* If this is a device on an expander, disable BCN processing. */
+       if (!scsi_is_sas_phy_local(phy))
+               set_bit(IPORT_BCN_BLOCKED, &iport->flags);
+
+       rc = sas_phy_reset(phy, true);
+
+       /* Terminate in-progress I/O now. */
+       isci_remote_device_nuke_requests(ihost, idev);
+
+       /* Since all pending TCs have been cleaned, resume the RNC. */
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+       status = sci_remote_device_reset_complete(idev);
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       /* If this is a device on an expander, bring the phy back up. */
+       if (!scsi_is_sas_phy_local(phy)) {
+               /* A phy reset will cause the device to go away then reappear.
+                * Since libsas will take action on incoming BCNs (eg. remove
+                * a device going through an SMP phy-control driven reset),
+                * we need to wait until the phy comes back up before letting
+                * discovery proceed in libsas.
+                */
+               isci_wait_for_smp_phy_reset(idev, phy->number);
+
+               spin_lock_irqsave(&ihost->scic_lock, flags);
+               isci_port_bcn_enable(ihost, idev->isci_port);
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
+       }
+
+       if (status != SCI_SUCCESS) {
+               dev_dbg(&ihost->pdev->dev,
+                        "%s: sci_remote_device_reset_complete(%p) "
+                        "returned %d!\n", __func__, idev, status);
+       }
+
+       dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
+
+       return rc;
+}
+
+int isci_task_I_T_nexus_reset(struct domain_device *dev)
+{
+       struct isci_host *ihost = dev_to_ihost(dev);
+       struct isci_remote_device *idev;
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+       idev = isci_lookup_device(dev);
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       if (!idev || !test_bit(IDEV_EH, &idev->flags)) {
+               ret = TMF_RESP_FUNC_COMPLETE;
+               goto out;
+       }
+
+       ret = isci_reset_device(ihost, idev);
+ out:
+       isci_put_device(idev);
+       return ret;
+}
+
+int isci_bus_reset_handler(struct scsi_cmnd *cmd)
+{
+       struct domain_device *dev = sdev_to_domain_dev(cmd->device);
+       struct isci_host *ihost = dev_to_ihost(dev);
+       struct isci_remote_device *idev;
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+       idev = isci_lookup_device(dev);
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       if (!idev) {
+               ret = TMF_RESP_FUNC_COMPLETE;
+               goto out;
+       }
+
+       ret = isci_reset_device(ihost, idev);
+ out:
+       isci_put_device(idev);
+       return ret;
+}
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
new file mode 100644 (file)
index 0000000..4a7fa90
--- /dev/null
@@ -0,0 +1,367 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ISCI_TASK_H_
+#define _ISCI_TASK_H_
+
+#include <scsi/sas_ata.h>
+#include "host.h"
+
+struct isci_request;
+
+/**
+ * enum isci_tmf_cb_state - This enum defines the possible states in which the
+ *    TMF callback function is invoked during the TMF execution process.
+ *
+ *
+ */
+enum isci_tmf_cb_state {
+
+       isci_tmf_init_state = 0,
+       isci_tmf_started,
+       isci_tmf_timed_out
+};
+
+/**
+ * enum isci_tmf_function_codes - This enum defines the possible preparations
+ *    of task management requests.
+ *
+ *
+ */
+enum isci_tmf_function_codes {
+
+       isci_tmf_func_none      = 0,
+       isci_tmf_ssp_task_abort = TMF_ABORT_TASK,
+       isci_tmf_ssp_lun_reset  = TMF_LU_RESET,
+       isci_tmf_sata_srst_high = TMF_LU_RESET + 0x100, /* Non SCSI */
+       isci_tmf_sata_srst_low  = TMF_LU_RESET + 0x101  /* Non SCSI */
+};
+/**
+ * struct isci_tmf - This class represents the task management object which
+ *    acts as an interface to libsas for processing task management requests
+ *
+ *
+ */
+struct isci_tmf {
+
+       struct completion *complete;
+       enum sas_protocol proto;
+       union {
+               struct ssp_response_iu resp_iu;
+               struct dev_to_host_fis d2h_fis;
+               u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
+       } resp;
+       unsigned char lun[8];
+       u16 io_tag;
+       struct isci_remote_device *device;
+       enum isci_tmf_function_codes tmf_code;
+       int status;
+
+       /* The optional callback function allows the user process to
+        * track the TMF transmit / timeout conditions.
+        */
+       void (*cb_state_func)(
+               enum isci_tmf_cb_state,
+               struct isci_tmf *, void *);
+       void *cb_data;
+
+};
+
+static inline void isci_print_tmf(struct isci_tmf *tmf)
+{
+       if (SAS_PROTOCOL_SATA == tmf->proto)
+               dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
+                       "%s: status = %x\n"
+                       "tmf->resp.d2h_fis.status = %x\n"
+                       "tmf->resp.d2h_fis.error = %x\n",
+                       __func__,
+                       tmf->status,
+                       tmf->resp.d2h_fis.status,
+                       tmf->resp.d2h_fis.error);
+       else
+               dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
+                       "%s: status = %x\n"
+                       "tmf->resp.resp_iu.data_present = %x\n"
+                       "tmf->resp.resp_iu.status = %x\n"
+                       "tmf->resp.resp_iu.data_length = %x\n"
+                       "tmf->resp.resp_iu.data[0] = %x\n"
+                       "tmf->resp.resp_iu.data[1] = %x\n"
+                       "tmf->resp.resp_iu.data[2] = %x\n"
+                       "tmf->resp.resp_iu.data[3] = %x\n",
+                       __func__,
+                       tmf->status,
+                       tmf->resp.resp_iu.datapres,
+                       tmf->resp.resp_iu.status,
+                       be32_to_cpu(tmf->resp.resp_iu.response_data_len),
+                       tmf->resp.resp_iu.resp_data[0],
+                       tmf->resp.resp_iu.resp_data[1],
+                       tmf->resp.resp_iu.resp_data[2],
+                       tmf->resp.resp_iu.resp_data[3]);
+}
+
+
+int isci_task_execute_task(
+       struct sas_task *task,
+       int num,
+       gfp_t gfp_flags);
+
+int isci_task_abort_task(
+       struct sas_task *task);
+
+int isci_task_abort_task_set(
+       struct domain_device *d_device,
+       u8 *lun);
+
+int isci_task_clear_aca(
+       struct domain_device *d_device,
+       u8 *lun);
+
+int isci_task_clear_task_set(
+       struct domain_device *d_device,
+       u8 *lun);
+
+int isci_task_query_task(
+       struct sas_task *task);
+
+int isci_task_lu_reset(
+       struct domain_device *d_device,
+       u8 *lun);
+
+int isci_task_clear_nexus_port(
+       struct asd_sas_port *port);
+
+int isci_task_clear_nexus_ha(
+       struct sas_ha_struct *ha);
+
+int isci_task_I_T_nexus_reset(
+       struct domain_device *d_device);
+
+void isci_task_request_complete(
+       struct isci_host *isci_host,
+       struct isci_request *request,
+       enum sci_task_status completion_status);
+
+u16 isci_task_ssp_request_get_io_tag_to_manage(
+       struct isci_request *request);
+
+u8 isci_task_ssp_request_get_function(
+       struct isci_request *request);
+
+
+void *isci_task_ssp_request_get_response_data_address(
+       struct isci_request *request);
+
+u32 isci_task_ssp_request_get_response_data_length(
+       struct isci_request *request);
+
+int isci_queuecommand(
+       struct scsi_cmnd *scsi_cmd,
+       void (*donefunc)(struct scsi_cmnd *));
+
+int isci_bus_reset_handler(struct scsi_cmnd *cmd);
+
+/**
+ * enum isci_completion_selection - This enum defines the possible actions to
+ *    take with respect to a given request's notification back to libsas.
+ *
+ *
+ */
+enum isci_completion_selection {
+
+       isci_perform_normal_io_completion,      /* Normal notify (task_done) */
+       isci_perform_aborted_io_completion,     /* No notification.   */
+       isci_perform_error_io_completion        /* Use sas_task_abort */
+};
+
+static inline void isci_set_task_doneflags(
+       struct sas_task *task)
+{
+       /* Since no futher action will be taken on this task,
+        * make sure to mark it complete from the lldd perspective.
+        */
+       task->task_state_flags |= SAS_TASK_STATE_DONE;
+       task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+       task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+}
+/**
+ * isci_task_all_done() - This function clears the task bits to indicate the
+ *    LLDD is done with the task.
+ *
+ *
+ */
+static inline void isci_task_all_done(
+       struct sas_task *task)
+{
+       unsigned long flags;
+
+       /* Since no futher action will be taken on this task,
+        * make sure to mark it complete from the lldd perspective.
+        */
+       spin_lock_irqsave(&task->task_state_lock, flags);
+       isci_set_task_doneflags(task);
+       spin_unlock_irqrestore(&task->task_state_lock, flags);
+}
+
+/**
+ * isci_task_set_completion_status() - This function sets the completion status
+ *    for the request.
+ * @task: This parameter is the completed request.
+ * @response: This parameter is the response code for the completed task.
+ * @status: This parameter is the status code for the completed task.
+ *
+* @return The new notification mode for the request.
+*/
+static inline enum isci_completion_selection
+isci_task_set_completion_status(
+       struct sas_task *task,
+       enum service_response response,
+       enum exec_status status,
+       enum isci_completion_selection task_notification_selection)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&task->task_state_lock, flags);
+
+       /* If a device reset is being indicated, make sure the I/O
+       * is in the error path.
+       */
+       if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
+               /* Fail the I/O to make sure it goes into the error path. */
+               response = SAS_TASK_UNDELIVERED;
+               status = SAM_STAT_TASK_ABORTED;
+
+               task_notification_selection = isci_perform_error_io_completion;
+       }
+       task->task_status.resp = response;
+       task->task_status.stat = status;
+
+       switch (task_notification_selection) {
+
+       case isci_perform_error_io_completion:
+
+               if (task->task_proto == SAS_PROTOCOL_SMP) {
+                       /* There is no error escalation in the SMP case.
+                        * Convert to a normal completion to avoid the
+                        * timeout in the discovery path and to let the
+                        * next action take place quickly.
+                        */
+                       task_notification_selection
+                               = isci_perform_normal_io_completion;
+
+                       /* Fall through to the normal case... */
+               } else {
+                       /* Use sas_task_abort */
+                       /* Leave SAS_TASK_STATE_DONE clear
+                        * Leave SAS_TASK_AT_INITIATOR set.
+                        */
+                       break;
+               }
+
+       case isci_perform_aborted_io_completion:
+               /* This path can occur with task-managed requests as well as
+                * requests terminated because of LUN or device resets.
+                */
+               /* Fall through to the normal case... */
+       case isci_perform_normal_io_completion:
+               /* Normal notification (task_done) */
+               isci_set_task_doneflags(task);
+               break;
+       default:
+               WARN_ONCE(1, "unknown task_notification_selection: %d\n",
+                        task_notification_selection);
+               break;
+       }
+
+       spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+       return task_notification_selection;
+
+}
+/**
+* isci_execpath_callback() - This function is called from the task
+* execute path when the task needs to callback libsas about the submit-time
+* task failure.  The callback occurs either through the task's done function
+* or through sas_task_abort.  In the case of regular non-discovery SATA/STP I/O
+* requests, libsas takes the host lock before calling execute task.  Therefore
+* in this situation the host lock must be managed before calling the func.
+*
+* @ihost: This parameter is the controller to which the I/O request was sent.
+* @task: This parameter is the I/O request.
+* @func: This parameter is the function to call in the correct context.
+* @status: This parameter is the status code for the completed task.
+*
+*/
+static inline void isci_execpath_callback(struct isci_host *ihost,
+                                         struct sas_task  *task,
+                                         void (*func)(struct sas_task *))
+{
+       struct domain_device *dev = task->dev;
+
+       if (dev_is_sata(dev) && task->uldd_task) {
+               unsigned long flags;
+
+               /* Since we are still in the submit path, and since
+                * libsas takes the host lock on behalf of SATA
+                * devices before I/O starts (in the non-discovery case),
+                * we need to unlock before we can call the callback function.
+                */
+               raw_local_irq_save(flags);
+               spin_unlock(dev->sata_dev.ap->lock);
+               func(task);
+               spin_lock(dev->sata_dev.ap->lock);
+               raw_local_irq_restore(flags);
+       } else
+               func(task);
+}
+#endif /* !defined(_SCI_TASK_H_) */
diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c
new file mode 100644 (file)
index 0000000..e9e1e2a
--- /dev/null
@@ -0,0 +1,225 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "host.h"
+#include "unsolicited_frame_control.h"
+#include "registers.h"
+
+int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
+{
+       struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control;
+       struct sci_unsolicited_frame *uf;
+       u32 buf_len, header_len, i;
+       dma_addr_t dma;
+       size_t size;
+       void *virt;
+
+       /*
+        * Prepare all of the memory sizes for the UF headers, UF address
+        * table, and UF buffers themselves.
+        */
+       buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
+       header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header);
+       size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(dma_addr_t);
+
+       /*
+        * The Unsolicited Frame buffers are set at the start of the UF
+        * memory descriptor entry. The headers and address table will be
+        * placed after the buffers.
+        */
+       virt = dmam_alloc_coherent(&ihost->pdev->dev, size, &dma, GFP_KERNEL);
+       if (!virt)
+               return -ENOMEM;
+
+       /*
+        * Program the location of the UF header table into the SCU.
+        * Notes:
+        * - The address must align on a 64-byte boundary. Guaranteed to be
+        *   on 64-byte boundary already 1KB boundary for unsolicited frames.
+        * - Program unused header entries to overlap with the last
+        *   unsolicited frame.  The silicon will never DMA to these unused
+        *   headers, since we program the UF address table pointers to
+        *   NULL.
+        */
+       uf_control->headers.physical_address = dma + buf_len;
+       uf_control->headers.array = virt + buf_len;
+
+       /*
+        * Program the location of the UF address table into the SCU.
+        * Notes:
+        * - The address must align on a 64-bit boundary. Guaranteed to be on 64
+        *   byte boundary already due to above programming headers being on a
+        *   64-bit boundary and headers are on a 64-bytes in size.
+        */
+       uf_control->address_table.physical_address = dma + buf_len + header_len;
+       uf_control->address_table.array = virt + buf_len + header_len;
+       uf_control->get = 0;
+
+       /*
+        * UF buffer requirements are:
+        * - The last entry in the UF queue is not NULL.
+        * - There is a power of 2 number of entries (NULL or not-NULL)
+        *   programmed into the queue.
+        * - Aligned on a 1KB boundary. */
+
+       /*
+        * Program the actual used UF buffers into the UF address table and
+        * the controller's array of UFs.
+        */
+       for (i = 0; i < SCU_MAX_UNSOLICITED_FRAMES; i++) {
+               uf = &uf_control->buffers.array[i];
+
+               uf_control->address_table.array[i] = dma;
+
+               uf->buffer = virt;
+               uf->header = &uf_control->headers.array[i];
+               uf->state  = UNSOLICITED_FRAME_EMPTY;
+
+               /*
+                * Increment the address of the physical and virtual memory
+                * pointers. Everything is aligned on 1k boundary with an
+                * increment of 1k.
+                */
+               virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
+               dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
+       }
+
+       return 0;
+}
+
+enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control,
+                                                        u32 frame_index,
+                                                        void **frame_header)
+{
+       if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
+               /* Skip the first word in the frame since this is a controll word used
+                * by the hardware.
+                */
+               *frame_header = &uf_control->buffers.array[frame_index].header->data;
+
+               return SCI_SUCCESS;
+       }
+
+       return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+}
+
+enum sci_status sci_unsolicited_frame_control_get_buffer(struct sci_unsolicited_frame_control *uf_control,
+                                                        u32 frame_index,
+                                                        void **frame_buffer)
+{
+       if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
+               *frame_buffer = uf_control->buffers.array[frame_index].buffer;
+
+               return SCI_SUCCESS;
+       }
+
+       return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+}
+
+bool sci_unsolicited_frame_control_release_frame(struct sci_unsolicited_frame_control *uf_control,
+                                                u32 frame_index)
+{
+       u32 frame_get;
+       u32 frame_cycle;
+
+       frame_get   = uf_control->get & (SCU_MAX_UNSOLICITED_FRAMES - 1);
+       frame_cycle = uf_control->get & SCU_MAX_UNSOLICITED_FRAMES;
+
+       /*
+        * In the event there are NULL entries in the UF table, we need to
+        * advance the get pointer in order to find out if this frame should
+        * be released (i.e. update the get pointer)
+        */
+       while (lower_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
+              upper_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
+              frame_get < SCU_MAX_UNSOLICITED_FRAMES)
+               frame_get++;
+
+       /*
+        * The table has a NULL entry as it's last element.  This is
+        * illegal.
+        */
+       BUG_ON(frame_get >= SCU_MAX_UNSOLICITED_FRAMES);
+       if (frame_index >= SCU_MAX_UNSOLICITED_FRAMES)
+               return false;
+
+       uf_control->buffers.array[frame_index].state = UNSOLICITED_FRAME_RELEASED;
+
+       if (frame_get != frame_index) {
+               /*
+                * Frames remain in use until we advance the get pointer
+                * so there is nothing we can do here
+                */
+               return false;
+       }
+
+       /*
+        * The frame index is equal to the current get pointer so we
+        * can now free up all of the frame entries that
+        */
+       while (uf_control->buffers.array[frame_get].state == UNSOLICITED_FRAME_RELEASED) {
+               uf_control->buffers.array[frame_get].state = UNSOLICITED_FRAME_EMPTY;
+
+               if (frame_get+1 == SCU_MAX_UNSOLICITED_FRAMES-1) {
+                       frame_cycle ^= SCU_MAX_UNSOLICITED_FRAMES;
+                       frame_get = 0;
+               } else
+                       frame_get++;
+       }
+
+       uf_control->get = SCU_UFQGP_GEN_BIT(ENABLE_BIT) | frame_cycle | frame_get;
+
+       return true;
+}
diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h
new file mode 100644 (file)
index 0000000..31cb950
--- /dev/null
@@ -0,0 +1,278 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_
+#define _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_
+
+#include "isci.h"
+
+#define SCU_UNSOLICITED_FRAME_HEADER_DATA_DWORDS 15
+
+/**
+ * struct scu_unsolicited_frame_header -
+ *
+ * This structure delineates the format of an unsolicited frame header. The
+ * first DWORD are UF attributes defined by the silicon architecture. The data
+ * depicts actual header information received on the link.
+ */
+struct scu_unsolicited_frame_header {
+       /**
+        * This field indicates if there is an Initiator Index Table entry with
+        * which this header is associated.
+        */
+       u32 iit_exists:1;
+
+       /**
+        * This field simply indicates the protocol type (i.e. SSP, STP, SMP).
+        */
+       u32 protocol_type:3;
+
+       /**
+        * This field indicates if the frame is an address frame (IAF or OAF)
+        * or if it is a information unit frame.
+        */
+       u32 is_address_frame:1;
+
+       /**
+        * This field simply indicates the connection rate at which the frame
+        * was received.
+        */
+       u32 connection_rate:4;
+
+       u32 reserved:23;
+
+       /**
+        * This field represents the actual header data received on the link.
+        */
+       u32 data[SCU_UNSOLICITED_FRAME_HEADER_DATA_DWORDS];
+
+};
+
+
+
+/**
+ * enum unsolicited_frame_state -
+ *
+ * This enumeration represents the current unsolicited frame state.  The
+ * controller object can not updtate the hardware unsolicited frame put pointer
+ * unless it has already processed the priror unsolicited frames.
+ */
+enum unsolicited_frame_state {
+       /**
+        * This state is when the frame is empty and not in use.  It is
+        * different from the released state in that the hardware could DMA
+        * data to this frame buffer.
+        */
+       UNSOLICITED_FRAME_EMPTY,
+
+       /**
+        * This state is set when the frame buffer is in use by by some
+        * object in the system.
+        */
+       UNSOLICITED_FRAME_IN_USE,
+
+       /**
+        * This state is set when the frame is returned to the free pool
+        * but one or more frames prior to this one are still in use.
+        * Once all of the frame before this one are freed it will go to
+        * the empty state.
+        */
+       UNSOLICITED_FRAME_RELEASED,
+
+       UNSOLICITED_FRAME_MAX_STATES
+};
+
+/**
+ * struct sci_unsolicited_frame -
+ *
+ * This is the unsolicited frame data structure it acts as the container for
+ * the current frame state, frame header and frame buffer.
+ */
+struct sci_unsolicited_frame {
+       /**
+        * This field contains the current frame state
+        */
+       enum unsolicited_frame_state state;
+
+       /**
+        * This field points to the frame header data.
+        */
+       struct scu_unsolicited_frame_header *header;
+
+       /**
+        * This field points to the frame buffer data.
+        */
+       void *buffer;
+
+};
+
+/**
+ * struct sci_uf_header_array -
+ *
+ * This structure contains all of the unsolicited frame header information.
+ */
+struct sci_uf_header_array {
+       /**
+        * This field is represents a virtual pointer to the start
+        * address of the UF address table.  The table contains
+        * 64-bit pointers as required by the hardware.
+        */
+       struct scu_unsolicited_frame_header *array;
+
+       /**
+        * This field specifies the physical address location for the UF
+        * buffer array.
+        */
+       dma_addr_t physical_address;
+
+};
+
+/**
+ * struct sci_uf_buffer_array -
+ *
+ * This structure contains all of the unsolicited frame buffer (actual payload)
+ * information.
+ */
+struct sci_uf_buffer_array {
+       /**
+        * This field is the unsolicited frame data its used to manage
+        * the data for the unsolicited frame requests.  It also represents
+        * the virtual address location that corresponds to the
+        * physical_address field.
+        */
+       struct sci_unsolicited_frame array[SCU_MAX_UNSOLICITED_FRAMES];
+
+       /**
+        * This field specifies the physical address location for the UF
+        * buffer array.
+        */
+       dma_addr_t physical_address;
+};
+
+/**
+ * struct sci_uf_address_table_array -
+ *
+ * This object maintains all of the unsolicited frame address table specific
+ * data.  The address table is a collection of 64-bit pointers that point to
+ * 1KB buffers into which the silicon will DMA unsolicited frames.
+ */
+struct sci_uf_address_table_array {
+       /**
+        * This field represents a virtual pointer that refers to the
+        * starting address of the UF address table.
+        * 64-bit pointers are required by the hardware.
+        */
+       dma_addr_t *array;
+
+       /**
+        * This field specifies the physical address location for the UF
+        * address table.
+        */
+       dma_addr_t physical_address;
+
+};
+
+/**
+ * struct sci_unsolicited_frame_control -
+ *
+ * This object contains all of the data necessary to handle unsolicited frames.
+ */
+struct sci_unsolicited_frame_control {
+       /**
+        * This field is the software copy of the unsolicited frame queue
+        * get pointer.  The controller object writes this value to the
+        * hardware to let the hardware put more unsolicited frame entries.
+        */
+       u32 get;
+
+       /**
+        * This field contains all of the unsolicited frame header
+        * specific fields.
+        */
+       struct sci_uf_header_array headers;
+
+       /**
+        * This field contains all of the unsolicited frame buffer
+        * specific fields.
+        */
+       struct sci_uf_buffer_array buffers;
+
+       /**
+        * This field contains all of the unsolicited frame address table
+        * specific fields.
+        */
+       struct sci_uf_address_table_array address_table;
+
+};
+
+struct isci_host;
+
+int sci_unsolicited_frame_control_construct(struct isci_host *ihost);
+
+enum sci_status sci_unsolicited_frame_control_get_header(
+       struct sci_unsolicited_frame_control *uf_control,
+       u32 frame_index,
+       void **frame_header);
+
+enum sci_status sci_unsolicited_frame_control_get_buffer(
+       struct sci_unsolicited_frame_control *uf_control,
+       u32 frame_index,
+       void **frame_buffer);
+
+bool sci_unsolicited_frame_control_release_frame(
+       struct sci_unsolicited_frame_control *uf_control,
+       u32 frame_index);
+
+#endif /* _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_ */
index f4cf9b2..379cf16 100644 (file)
@@ -7,6 +7,7 @@ config BRCMSMAC
        default n
        depends on PCI
        depends on WLAN && MAC80211
+       depends on X86 || MIPS
        select BRCMUTIL
        select FW_LOADER
        select CRC_CCITT
@@ -20,6 +21,7 @@ config BRCMFMAC
        default n
        depends on MMC
        depends on WLAN && CFG80211
+       depends on X86 || MIPS
        select BRCMUTIL
        select FW_LOADER
        select WIRELESS_EXT
index 1502d80..20008a4 100644 (file)
@@ -2,6 +2,7 @@ config COMEDI
        tristate "Data acquisition support (comedi)"
        default N
        depends on m
+       depends on BROKEN || FRV || M32R || MN10300 || SUPERH || TILE || X86
        ---help---
          Enable support a wide range of data acquisition devices
          for Linux.
@@ -160,6 +161,7 @@ config COMEDI_PCL730
 
 config COMEDI_PCL812
        tristate "Advantech PCL-812/813 and ADlink ACL-8112/8113/8113/8216"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for Advantech PCL-812/PG, PCL-813/B, ADLink
@@ -171,6 +173,7 @@ config COMEDI_PCL812
 
 config COMEDI_PCL816
        tristate "Advantech PCL-814 and PCL-816 ISA card support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for Advantech PCL-814 and PCL-816 ISA cards
@@ -180,6 +183,7 @@ config COMEDI_PCL816
 
 config COMEDI_PCL818
        tristate "Advantech PCL-718 and PCL-818 ISA card support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for Advantech PCL-818 ISA cards
@@ -269,6 +273,7 @@ config COMEDI_DAS800
 
 config COMEDI_DAS1800
        tristate "DAS1800 and compatible ISA card support"
+       depends on VIRT_TO_BUS
        select COMEDI_FC
        default N
        ---help---
@@ -340,6 +345,7 @@ config COMEDI_DT2817
 config COMEDI_DT282X
        tristate "Data Translation DT2821 series and DT-EZ ISA card support"
        select COMEDI_FC
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for Data Translation DT2821 series including DT-EZ
@@ -419,6 +425,7 @@ config COMEDI_ADQ12B
 config COMEDI_NI_AT_A2150
        tristate "NI AT-A2150 ISA card support"
        depends on COMEDI_NI_COMMON
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for National Instruments AT-A2150 cards
@@ -536,6 +543,7 @@ if COMEDI_PCI_DRIVERS && PCI
 
 config COMEDI_ADDI_APCI_035
        tristate "ADDI-DATA APCI_035 support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for ADDI-DATA APCI_035 cards
@@ -545,6 +553,7 @@ config COMEDI_ADDI_APCI_035
 
 config COMEDI_ADDI_APCI_1032
        tristate "ADDI-DATA APCI_1032 support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for ADDI-DATA APCI_1032 cards
@@ -554,6 +563,7 @@ config COMEDI_ADDI_APCI_1032
 
 config COMEDI_ADDI_APCI_1500
        tristate "ADDI-DATA APCI_1500 support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for ADDI-DATA APCI_1500 cards
@@ -563,6 +573,7 @@ config COMEDI_ADDI_APCI_1500
 
 config COMEDI_ADDI_APCI_1516
        tristate "ADDI-DATA APCI_1516 support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for ADDI-DATA APCI_1516 cards
@@ -572,6 +583,7 @@ config COMEDI_ADDI_APCI_1516
 
 config COMEDI_ADDI_APCI_1564
        tristate "ADDI-DATA APCI_1564 support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for ADDI-DATA APCI_1564 cards
@@ -581,6 +593,7 @@ config COMEDI_ADDI_APCI_1564
 
 config COMEDI_ADDI_APCI_16XX
        tristate "ADDI-DATA APCI_16xx support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for ADDI-DATA APCI_16xx cards
@@ -590,6 +603,7 @@ config COMEDI_ADDI_APCI_16XX
 
 config COMEDI_ADDI_APCI_2016
        tristate "ADDI-DATA APCI_2016 support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for ADDI-DATA APCI_2016 cards
@@ -599,6 +613,7 @@ config COMEDI_ADDI_APCI_2016
 
 config COMEDI_ADDI_APCI_2032
        tristate "ADDI-DATA APCI_2032 support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for ADDI-DATA APCI_2032 cards
@@ -608,6 +623,7 @@ config COMEDI_ADDI_APCI_2032
 
 config COMEDI_ADDI_APCI_2200
        tristate "ADDI-DATA APCI_2200 support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for ADDI-DATA APCI_2200 cards
@@ -617,6 +633,7 @@ config COMEDI_ADDI_APCI_2200
 
 config COMEDI_ADDI_APCI_3001
        tristate "ADDI-DATA APCI_3001 support"
+       depends on VIRT_TO_BUS
        select COMEDI_FC
        default N
        ---help---
@@ -627,6 +644,7 @@ config COMEDI_ADDI_APCI_3001
 
 config COMEDI_ADDI_APCI_3120
        tristate "ADDI-DATA APCI_3520 support"
+       depends on VIRT_TO_BUS
        select COMEDI_FC
        default N
        ---help---
@@ -637,6 +655,7 @@ config COMEDI_ADDI_APCI_3120
 
 config COMEDI_ADDI_APCI_3501
        tristate "ADDI-DATA APCI_3501 support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for ADDI-DATA APCI_3501 cards
@@ -646,6 +665,7 @@ config COMEDI_ADDI_APCI_3501
 
 config COMEDI_ADDI_APCI_3XXX
        tristate "ADDI-DATA APCI_3xxx support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for ADDI-DATA APCI_3xxx cards
@@ -712,6 +732,7 @@ config COMEDI_ADL_PCI9111
 config COMEDI_ADL_PCI9118
        tristate "ADLink PCI-9118DG, PCI-9118HG, PCI-9118HR support"
        select COMEDI_FC
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for ADlink PCI-9118DG, PCI-9118HG, PCI-9118HR cards
@@ -1287,6 +1308,7 @@ config COMEDI_NI_LABPC
        depends on COMEDI_MITE
        select COMEDI_8255
        select COMEDI_FC
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for National Instruments Lab-PC and compatibles
index f96d5b5..d329635 100644 (file)
@@ -4,7 +4,7 @@
 
 menuconfig IIO
        tristate "Industrial I/O support"
-       depends on !S390
+       depends on GENERIC_HARDIRQS
        help
          The industrial I/O subsystem provides a unified framework for
          drivers for many different types of embedded sensors using a
index 5310a42..1690c0d 100644 (file)
@@ -84,7 +84,6 @@ struct adis16204_state {
 
 int adis16204_set_irq(struct iio_dev *indio_dev, bool enable);
 
-#ifdef CONFIG_IIO_RING_BUFFER
 enum adis16204_scan {
        ADIS16204_SCAN_SUPPLY,
        ADIS16204_SCAN_ACC_X,
@@ -93,6 +92,7 @@ enum adis16204_scan {
        ADIS16204_SCAN_TEMP,
 };
 
+#ifdef CONFIG_IIO_RING_BUFFER
 void adis16204_remove_trigger(struct iio_dev *indio_dev);
 int adis16204_probe_trigger(struct iio_dev *indio_dev);
 
index 58d08db..3153cbe 100644 (file)
@@ -121,8 +121,6 @@ struct adis16209_state {
 
 int adis16209_set_irq(struct iio_dev *indio_dev, bool enable);
 
-#ifdef CONFIG_IIO_RING_BUFFER
-
 #define ADIS16209_SCAN_SUPPLY  0
 #define ADIS16209_SCAN_ACC_X   1
 #define ADIS16209_SCAN_ACC_Y   2
@@ -132,6 +130,8 @@ int adis16209_set_irq(struct iio_dev *indio_dev, bool enable);
 #define ADIS16209_SCAN_INCLI_Y 6
 #define ADIS16209_SCAN_ROT     7
 
+#ifdef CONFIG_IIO_RING_BUFFER
+
 void adis16209_remove_trigger(struct iio_dev *indio_dev);
 int adis16209_probe_trigger(struct iio_dev *indio_dev);
 
index 702dc98..24bf70e 100644 (file)
@@ -104,7 +104,6 @@ struct adis16260_state {
 
 int adis16260_set_irq(struct iio_dev *indio_dev, bool enable);
 
-#ifdef CONFIG_IIO_RING_BUFFER
 /* At the moment triggers are only used for ring buffer
  * filling. This may change!
  */
@@ -115,6 +114,7 @@ int adis16260_set_irq(struct iio_dev *indio_dev, bool enable);
 #define ADIS16260_SCAN_TEMP    3
 #define ADIS16260_SCAN_ANGL    4
 
+#ifdef CONFIG_IIO_RING_BUFFER
 void adis16260_remove_trigger(struct iio_dev *indio_dev);
 int adis16260_probe_trigger(struct iio_dev *indio_dev);
 
index db184d1..e87715b 100644 (file)
@@ -158,7 +158,6 @@ struct adis16400_state {
 
 int adis16400_set_irq(struct iio_dev *indio_dev, bool enable);
 
-#ifdef CONFIG_IIO_RING_BUFFER
 /* At the moment triggers are only used for ring buffer
  * filling. This may change!
  */
@@ -182,6 +181,7 @@ int adis16400_set_irq(struct iio_dev *indio_dev, bool enable);
 #define ADIS16300_SCAN_INCLI_X 12
 #define ADIS16300_SCAN_INCLI_Y 13
 
+#ifdef CONFIG_IIO_RING_BUFFER
 void adis16400_remove_trigger(struct iio_dev *indio_dev);
 int adis16400_probe_trigger(struct iio_dev *indio_dev);
 
index d1ffa32..685fcf6 100644 (file)
@@ -189,7 +189,7 @@ int mei_hw_init(struct mei_device *dev)
                mutex_lock(&dev->device_lock);
        }
 
-       if (!err && !dev->recvd_msg) {
+       if (err <= 0 && !dev->recvd_msg) {
                dev->mei_state = MEI_DISABLED;
                dev_dbg(&dev->pdev->dev,
                        "wait_event_interruptible_timeout failed"
index 2564b03..fff53d0 100644 (file)
@@ -169,10 +169,15 @@ int mei_wd_stop(struct mei_device *dev, bool preserve)
        ret = wait_event_interruptible_timeout(dev->wait_stop_wd,
                                        dev->wd_stopped, 10 * HZ);
        mutex_lock(&dev->device_lock);
-       if (!dev->wd_stopped)
-               dev_dbg(&dev->pdev->dev, "stop wd failed to complete.\n");
-       else
-               dev_dbg(&dev->pdev->dev, "stop wd complete.\n");
+       if (dev->wd_stopped) {
+               dev_dbg(&dev->pdev->dev, "stop wd complete ret=%d.\n", ret);
+               ret = 0;
+       } else {
+               if (!ret)
+                       ret = -ETIMEDOUT;
+               dev_warn(&dev->pdev->dev,
+                       "stop wd failed to complete ret=%d.\n", ret);
+       }
 
        if (preserve)
                dev->wd_timeout = wd_timeout;
index dee2a2c..70c2e7f 100644 (file)
@@ -386,7 +386,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
         */
        se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr,
                                TMR_LUN_RESET);
-       if (!se_cmd->se_tmr_req)
+       if (IS_ERR(se_cmd->se_tmr_req))
                goto release;
        /*
         * Locate the underlying TCM struct se_lun from sc->device->lun
@@ -1017,6 +1017,7 @@ static int tcm_loop_make_nexus(
        struct se_portal_group *se_tpg;
        struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
        struct tcm_loop_nexus *tl_nexus;
+       int ret = -ENOMEM;
 
        if (tl_tpg->tl_hba->tl_nexus) {
                printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n");
@@ -1033,8 +1034,10 @@ static int tcm_loop_make_nexus(
         * Initialize the struct se_session pointer
         */
        tl_nexus->se_sess = transport_init_session();
-       if (!tl_nexus->se_sess)
+       if (IS_ERR(tl_nexus->se_sess)) {
+               ret = PTR_ERR(tl_nexus->se_sess);
                goto out;
+       }
        /*
         * Since we are running in 'demo mode' this call with generate a
         * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
@@ -1060,7 +1063,7 @@ static int tcm_loop_make_nexus(
 
 out:
        kfree(tl_nexus);
-       return -ENOMEM;
+       return ret;
 }
 
 static int tcm_loop_drop_nexus(
@@ -1140,7 +1143,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
         * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
         * tcm_loop_make_nexus()
         */
-       if (strlen(page) > TL_WWN_ADDR_LEN) {
+       if (strlen(page) >= TL_WWN_ADDR_LEN) {
                printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds"
                                " max: %d\n", page, TL_WWN_ADDR_LEN);
                return -EINVAL;
@@ -1321,7 +1324,7 @@ struct se_wwn *tcm_loop_make_scsi_hba(
        return ERR_PTR(-EINVAL);
 
 check_len:
-       if (strlen(name) > TL_WWN_ADDR_LEN) {
+       if (strlen(name) >= TL_WWN_ADDR_LEN) {
                printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds"
                        " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
                        TL_WWN_ADDR_LEN);
index ee6fad9..25c1f49 100644 (file)
@@ -304,7 +304,7 @@ struct target_fabric_configfs *target_fabric_configfs_init(
                printk(KERN_ERR "Unable to locate passed fabric name\n");
                return NULL;
        }
-       if (strlen(name) > TARGET_FABRIC_NAME_SIZE) {
+       if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) {
                printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC"
                        "_NAME_SIZE\n", name);
                return NULL;
@@ -312,7 +312,7 @@ struct target_fabric_configfs *target_fabric_configfs_init(
 
        tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
        if (!(tf))
-               return ERR_PTR(-ENOMEM);
+               return NULL;
 
        INIT_LIST_HEAD(&tf->tf_list);
        atomic_set(&tf->tf_access_cnt, 0);
@@ -851,7 +851,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
                return -EOPNOTSUPP;
        }
 
-       if ((strlen(page) + 1) > INQUIRY_VPD_SERIAL_LEN) {
+       if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
                printk(KERN_ERR "Emulated VPD Unit Serial exceeds"
                " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
                return -EOVERFLOW;
@@ -917,7 +917,7 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
 
                transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
 
-               if ((len + strlen(buf) > PAGE_SIZE))
+               if ((len + strlen(buf) >= PAGE_SIZE))
                        break;
 
                len += sprintf(page+len, "%s", buf);
@@ -962,19 +962,19 @@ static ssize_t target_core_dev_wwn_show_attr_##_name(                     \
                                                                        \
                memset(buf, 0, VPD_TMP_BUF_SIZE);                       \
                transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE);   \
-               if ((len + strlen(buf) > PAGE_SIZE))                    \
+               if ((len + strlen(buf) >= PAGE_SIZE))                   \
                        break;                                          \
                len += sprintf(page+len, "%s", buf);                    \
                                                                        \
                memset(buf, 0, VPD_TMP_BUF_SIZE);                       \
                transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
-               if ((len + strlen(buf) > PAGE_SIZE))                    \
+               if ((len + strlen(buf) >= PAGE_SIZE))                   \
                        break;                                          \
                len += sprintf(page+len, "%s", buf);                    \
                                                                        \
                memset(buf, 0, VPD_TMP_BUF_SIZE);                       \
                transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
-               if ((len + strlen(buf) > PAGE_SIZE))                    \
+               if ((len + strlen(buf) >= PAGE_SIZE))                   \
                        break;                                          \
                len += sprintf(page+len, "%s", buf);                    \
        }                                                               \
@@ -1299,7 +1299,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
                        &i_buf[0] : "", pr_reg->pr_res_key,
                        pr_reg->pr_res_generation);
 
-               if ((len + strlen(buf) > PAGE_SIZE))
+               if ((len + strlen(buf) >= PAGE_SIZE))
                        break;
 
                len += sprintf(page+len, "%s", buf);
@@ -1496,7 +1496,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
                                ret = -ENOMEM;
                                goto out;
                        }
-                       if (strlen(i_port) > PR_APTPL_MAX_IPORT_LEN) {
+                       if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
                                printk(KERN_ERR "APTPL metadata initiator_node="
                                        " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
                                        PR_APTPL_MAX_IPORT_LEN);
@@ -1510,7 +1510,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
                                ret = -ENOMEM;
                                goto out;
                        }
-                       if (strlen(isid) > PR_REG_ISID_LEN) {
+                       if (strlen(isid) >= PR_REG_ISID_LEN) {
                                printk(KERN_ERR "APTPL metadata initiator_isid"
                                        "= exceeds PR_REG_ISID_LEN: %d\n",
                                        PR_REG_ISID_LEN);
@@ -1571,7 +1571,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
                                ret = -ENOMEM;
                                goto out;
                        }
-                       if (strlen(t_port) > PR_APTPL_MAX_TPORT_LEN) {
+                       if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
                                printk(KERN_ERR "APTPL metadata target_node="
                                        " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
                                        PR_APTPL_MAX_TPORT_LEN);
@@ -3052,7 +3052,7 @@ static struct config_group *target_core_call_addhbatotarget(
        int ret;
 
        memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
-       if (strlen(name) > TARGET_CORE_NAME_MAX_LEN) {
+       if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
                printk(KERN_ERR "Passed *name strlen(): %d exceeds"
                        " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
                        TARGET_CORE_NAME_MAX_LEN);
index 8407f9c..ba698ea 100644 (file)
@@ -192,7 +192,7 @@ int transport_get_lun_for_tmr(
                        &SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
        if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
                se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun;
-               dev = se_tmr->tmr_dev = se_lun->lun_se_dev;
+               dev = se_lun->lun_se_dev;
                se_cmd->pr_res_key = deve->pr_res_key;
                se_cmd->orig_fe_lun = unpacked_lun;
                se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
@@ -216,6 +216,7 @@ int transport_get_lun_for_tmr(
                se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
                return -1;
        }
+       se_tmr->tmr_dev = dev;
 
        spin_lock(&dev->se_tmr_lock);
        list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list);
@@ -1430,7 +1431,7 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
        struct se_lun_acl *lacl;
        struct se_node_acl *nacl;
 
-       if (strlen(initiatorname) > TRANSPORT_IQN_LEN) {
+       if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
                printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n",
                        TPG_TFO(tpg)->get_fabric_name());
                *ret = -EOVERFLOW;
index a79f518..b662db3 100644 (file)
@@ -1916,7 +1916,7 @@ static int __core_scsi3_update_aptpl_buf(
                                pr_reg->pr_res_mapped_lun);
                }
 
-               if ((len + strlen(tmp) > pr_aptpl_buf_len)) {
+               if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
                        printk(KERN_ERR "Unable to update renaming"
                                " APTPL metadata\n");
                        spin_unlock(&T10_RES(su_dev)->registration_lock);
@@ -1934,7 +1934,7 @@ static int __core_scsi3_update_aptpl_buf(
                        TPG_TFO(tpg)->tpg_get_tag(tpg),
                        lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
 
-               if ((len + strlen(tmp) > pr_aptpl_buf_len)) {
+               if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
                        printk(KERN_ERR "Unable to update renaming"
                                " APTPL metadata\n");
                        spin_unlock(&T10_RES(su_dev)->registration_lock);
@@ -1986,7 +1986,7 @@ static int __core_scsi3_write_aptpl_to_file(
        memset(iov, 0, sizeof(struct iovec));
        memset(path, 0, 512);
 
-       if (strlen(&wwn->unit_serial[0]) > 512) {
+       if (strlen(&wwn->unit_serial[0]) >= 512) {
                printk(KERN_ERR "WWN value for struct se_device does not fit"
                        " into path buffer\n");
                return -1;
index 59b8b9c..179063d 100644 (file)
@@ -75,10 +75,16 @@ void core_tmr_release_req(
 {
        struct se_device *dev = tmr->tmr_dev;
 
+       if (!dev) {
+               kmem_cache_free(se_tmr_req_cache, tmr);
+               return;
+       }
+
        spin_lock(&dev->se_tmr_lock);
        list_del(&tmr->tmr_list);
-       kmem_cache_free(se_tmr_req_cache, tmr);
        spin_unlock(&dev->se_tmr_lock);
+
+       kmem_cache_free(se_tmr_req_cache, tmr);
 }
 
 static void core_tmr_handle_tas_abort(
index 4dafeb8..4b9b716 100644 (file)
@@ -536,13 +536,13 @@ EXPORT_SYMBOL(transport_register_session);
 void transport_deregister_session_configfs(struct se_session *se_sess)
 {
        struct se_node_acl *se_nacl;
-
+       unsigned long flags;
        /*
         * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
         */
        se_nacl = se_sess->se_node_acl;
        if ((se_nacl)) {
-               spin_lock_irq(&se_nacl->nacl_sess_lock);
+               spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
                list_del(&se_sess->sess_acl_list);
                /*
                 * If the session list is empty, then clear the pointer.
@@ -556,7 +556,7 @@ void transport_deregister_session_configfs(struct se_session *se_sess)
                                        se_nacl->acl_sess_list.prev,
                                        struct se_session, sess_acl_list);
                }
-               spin_unlock_irq(&se_nacl->nacl_sess_lock);
+               spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
        }
 }
 EXPORT_SYMBOL(transport_deregister_session_configfs);
index defff32..7b82f1b 100644 (file)
@@ -144,7 +144,7 @@ enum ft_cmd_state {
  */
 struct ft_cmd {
        enum ft_cmd_state state;
-       u16 lun;                        /* LUN from request */
+       u32 lun;                        /* LUN from request */
        struct ft_sess *sess;           /* session held for cmd */
        struct fc_seq *seq;             /* sequence in exchange mgr */
        struct se_cmd se_cmd;           /* Local TCM I/O descriptor */
index c056a11..b2a1067 100644 (file)
@@ -94,29 +94,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
                16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
 }
 
-/*
- * Get LUN from CDB.
- */
-static int ft_get_lun_for_cmd(struct ft_cmd *cmd, u8 *lunp)
-{
-       u64 lun;
-
-       lun = lunp[1];
-       switch (lunp[0] >> 6) {
-       case 0:
-               break;
-       case 1:
-               lun |= (lunp[0] & 0x3f) << 8;
-               break;
-       default:
-               return -1;
-       }
-       if (lun >= TRANSPORT_MAX_LUNS_PER_TPG)
-               return -1;
-       cmd->lun = lun;
-       return transport_get_lun_for_cmd(&cmd->se_cmd, NULL, lun);
-}
-
 static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
 {
        struct se_queue_obj *qobj;
@@ -418,6 +395,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
 {
        struct se_tmr_req *tmr;
        struct fcp_cmnd *fcp;
+       struct ft_sess *sess;
        u8 tm_func;
 
        fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
@@ -425,13 +403,6 @@ static void ft_send_tm(struct ft_cmd *cmd)
        switch (fcp->fc_tm_flags) {
        case FCP_TMF_LUN_RESET:
                tm_func = TMR_LUN_RESET;
-               if (ft_get_lun_for_cmd(cmd, fcp->fc_lun) < 0) {
-                       ft_dump_cmd(cmd, __func__);
-                       transport_send_check_condition_and_sense(&cmd->se_cmd,
-                               cmd->se_cmd.scsi_sense_reason, 0);
-                       ft_sess_put(cmd->sess);
-                       return;
-               }
                break;
        case FCP_TMF_TGT_RESET:
                tm_func = TMR_TARGET_WARM_RESET;
@@ -463,6 +434,36 @@ static void ft_send_tm(struct ft_cmd *cmd)
                return;
        }
        cmd->se_cmd.se_tmr_req = tmr;
+
+       switch (fcp->fc_tm_flags) {
+       case FCP_TMF_LUN_RESET:
+               cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
+               if (transport_get_lun_for_tmr(&cmd->se_cmd, cmd->lun) < 0) {
+                       /*
+                        * Make sure to clean up newly allocated TMR request
+                        * since "unable to  handle TMR request because failed
+                        * to get to LUN"
+                        */
+                       FT_TM_DBG("Failed to get LUN for TMR func %d, "
+                                 "se_cmd %p, unpacked_lun %d\n",
+                                 tm_func, &cmd->se_cmd, cmd->lun);
+                       ft_dump_cmd(cmd, __func__);
+                       sess = cmd->sess;
+                       transport_send_check_condition_and_sense(&cmd->se_cmd,
+                               cmd->se_cmd.scsi_sense_reason, 0);
+                       transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
+                       ft_sess_put(sess);
+                       return;
+               }
+               break;
+       case FCP_TMF_TGT_RESET:
+       case FCP_TMF_CLR_TASK_SET:
+       case FCP_TMF_ABT_TASK_SET:
+       case FCP_TMF_CLR_ACA:
+               break;
+       default:
+               return;
+       }
        transport_generic_handle_tmr(&cmd->se_cmd);
 }
 
@@ -635,7 +636,8 @@ static void ft_send_cmd(struct ft_cmd *cmd)
 
        fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
 
-       ret = ft_get_lun_for_cmd(cmd, fcp->fc_lun);
+       cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
+       ret = transport_get_lun_for_cmd(&cmd->se_cmd, NULL, cmd->lun);
        if (ret < 0) {
                ft_dump_cmd(cmd, __func__);
                transport_send_check_condition_and_sense(&cmd->se_cmd,
index 4c3c0ef..8c4a240 100644 (file)
@@ -203,7 +203,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
                        /* XXX For now, initiator will retry */
                        if (printk_ratelimit())
                                printk(KERN_ERR "%s: Failed to send frame %p, "
-                                               "xid <0x%x>, remaining <0x%x>, "
+                                               "xid <0x%x>, remaining %zu, "
                                                "lso_max <0x%x>\n",
                                                __func__, fp, ep->xid,
                                                remaining, lport->lso_max);
index a3bd57f..7491e21 100644 (file)
@@ -229,7 +229,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
                return NULL;
 
        sess->se_sess = transport_init_session();
-       if (!sess->se_sess) {
+       if (IS_ERR(sess->se_sess)) {
                kfree(sess);
                return NULL;
        }
@@ -332,7 +332,7 @@ void ft_sess_close(struct se_session *se_sess)
        lport = sess->tport->lport;
        port_id = sess->port_id;
        if (port_id == -1) {
-               mutex_lock(&ft_lport_lock);
+               mutex_unlock(&ft_lport_lock);
                return;
        }
        FT_SESS_DBG("port_id %x\n", port_id);
index 09e8c7d..19b4ae0 100644 (file)
@@ -875,7 +875,8 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
                *dp++ = last << 7 | first << 6 | 1;     /* EA */
                len--;
        }
-       memcpy(dp, skb_pull(dlci->skb, len), len);
+       memcpy(dp, dlci->skb->data, len);
+       skb_pull(dlci->skb, len);
        __gsm_data_queue(dlci, msg);
        if (last)
                dlci->skb = NULL;
@@ -984,10 +985,22 @@ static void gsm_control_reply(struct gsm_mux *gsm, int cmd, u8 *data,
  */
 
 static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
-                                                       u32 modem)
+                                                       u32 modem, int clen)
 {
        int  mlines = 0;
-       u8 brk = modem >> 6;
+       u8 brk = 0;
+
+       /* The modem status command can either contain one octet (v.24 signals)
+          or two octets (v.24 signals + break signals). The length field will
+          either be 2 or 3 respectively. This is specified in section
+          5.4.6.3.7 of the  27.010 mux spec. */
+
+       if (clen == 2)
+               modem = modem & 0x7f;
+       else {
+               brk = modem & 0x7f;
+               modem = (modem >> 7) & 0x7f;
+       };
 
        /* Flow control/ready to communicate */
        if (modem & MDM_FC) {
@@ -1061,7 +1074,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
                        return;
        }
        tty = tty_port_tty_get(&dlci->port);
-       gsm_process_modem(tty, dlci, modem);
+       gsm_process_modem(tty, dlci, modem, clen);
        if (tty) {
                tty_wakeup(tty);
                tty_kref_put(tty);
@@ -1482,12 +1495,13 @@ static void gsm_dlci_begin_close(struct gsm_dlci *dlci)
  *     open we shovel the bits down it, if not we drop them.
  */
 
-static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int len)
+static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int clen)
 {
        /* krefs .. */
        struct tty_port *port = &dlci->port;
        struct tty_struct *tty = tty_port_tty_get(port);
        unsigned int modem = 0;
+       int len = clen;
 
        if (debug & 16)
                pr_debug("%d bytes for tty %p\n", len, tty);
@@ -1507,7 +1521,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int len)
                                if (len == 0)
                                        return;
                        }
-                       gsm_process_modem(tty, dlci, modem);
+                       gsm_process_modem(tty, dlci, modem, clen);
                /* Line state will go via DLCI 0 controls only */
                case 1:
                default:
index 0ad3288..c3954fb 100644 (file)
@@ -1815,6 +1815,7 @@ do_it_again:
                        /* FIXME: does n_tty_set_room need locking ? */
                        n_tty_set_room(tty);
                        timeout = schedule_timeout(timeout);
+                       BUG_ON(!tty->read_buf);
                        continue;
                }
                __set_current_state(TASK_RUNNING);
index b40f7b9..b4129f5 100644 (file)
@@ -3318,6 +3318,7 @@ void serial8250_unregister_port(int line)
                uart->port.flags &= ~UPF_BOOT_AUTOCONF;
                uart->port.type = PORT_UNKNOWN;
                uart->port.dev = &serial8250_isa_devs->dev;
+               uart->capabilities = uart_config[uart->port.type].flags;
                uart_add_one_port(&serial8250_reg, &uart->port);
        } else {
                uart->port.dev = NULL;
index 4b4968a..f41b425 100644 (file)
@@ -973,7 +973,7 @@ ce4100_serial_setup(struct serial_private *priv,
 
 static int
 pci_omegapci_setup(struct serial_private *priv,
-                     struct pciserial_board *board,
+                     const struct pciserial_board *board,
                      struct uart_port *port, int idx)
 {
        return setup_port(priv, port, 2, idx * 8, 0);
@@ -994,6 +994,15 @@ static int skip_tx_en_setup(struct serial_private *priv,
        return pci_default_setup(priv, board, port, idx);
 }
 
+static int pci_eg20t_init(struct pci_dev *dev)
+{
+#if defined(CONFIG_SERIAL_PCH_UART) || defined(CONFIG_SERIAL_PCH_UART_MODULE)
+       return -ENODEV;
+#else
+       return 0;
+#endif
+}
+
 /* This should be in linux/pci_ids.h */
 #define PCI_VENDOR_ID_SBSMODULARIO     0x124B
 #define PCI_SUBVENDOR_ID_SBSMODULARIO  0x124B
@@ -1446,6 +1455,56 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
                .init                   = pci_oxsemi_tornado_init,
                .setup          = pci_default_setup,
        },
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = 0x8811,
+               .init           = pci_eg20t_init,
+       },
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = 0x8812,
+               .init           = pci_eg20t_init,
+       },
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = 0x8813,
+               .init           = pci_eg20t_init,
+       },
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = 0x8814,
+               .init           = pci_eg20t_init,
+       },
+       {
+               .vendor         = 0x10DB,
+               .device         = 0x8027,
+               .init           = pci_eg20t_init,
+       },
+       {
+               .vendor         = 0x10DB,
+               .device         = 0x8028,
+               .init           = pci_eg20t_init,
+       },
+       {
+               .vendor         = 0x10DB,
+               .device         = 0x8029,
+               .init           = pci_eg20t_init,
+       },
+       {
+               .vendor         = 0x10DB,
+               .device         = 0x800C,
+               .init           = pci_eg20t_init,
+       },
+       {
+               .vendor         = 0x10DB,
+               .device         = 0x800D,
+               .init           = pci_eg20t_init,
+       },
+       {
+               .vendor         = 0x10DB,
+               .device         = 0x800D,
+               .init           = pci_eg20t_init,
+       },
        /*
         * Cronyx Omega PCI (PLX-chip based)
         */
index 8dc0541..f5f6831 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
 #include <linux/scatterlist.h>
+#include <linux/delay.h>
 
 #include <asm/io.h>
 #include <asm/sizes.h>
 #define UART_DR_ERROR          (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
 #define UART_DUMMY_DR_RX       (1 << 16)
 
+
+#define UART_WA_SAVE_NR 14
+
+static void pl011_lockup_wa(unsigned long data);
+static const u32 uart_wa_reg[UART_WA_SAVE_NR] = {
+       ST_UART011_DMAWM,
+       ST_UART011_TIMEOUT,
+       ST_UART011_LCRH_RX,
+       UART011_IBRD,
+       UART011_FBRD,
+       ST_UART011_LCRH_TX,
+       UART011_IFLS,
+       ST_UART011_XFCR,
+       ST_UART011_XON1,
+       ST_UART011_XON2,
+       ST_UART011_XOFF1,
+       ST_UART011_XOFF2,
+       UART011_CR,
+       UART011_IMSC
+};
+
+static u32 uart_wa_regdata[UART_WA_SAVE_NR];
+static DECLARE_TASKLET(pl011_lockup_tlet, pl011_lockup_wa, 0);
+
 /* There is by now at least one vendor with differing details, so handle it */
 struct vendor_data {
        unsigned int            ifls;
@@ -72,6 +97,7 @@ struct vendor_data {
        unsigned int            lcrh_tx;
        unsigned int            lcrh_rx;
        bool                    oversampling;
+       bool                    interrupt_may_hang;   /* vendor-specific */
        bool                    dma_threshold;
 };
 
@@ -90,9 +116,12 @@ static struct vendor_data vendor_st = {
        .lcrh_tx                = ST_UART011_LCRH_TX,
        .lcrh_rx                = ST_UART011_LCRH_RX,
        .oversampling           = true,
+       .interrupt_may_hang     = true,
        .dma_threshold          = true,
 };
 
+static struct uart_amba_port *amba_ports[UART_NR];
+
 /* Deals with DMA transactions */
 
 struct pl011_sgbuf {
@@ -132,6 +161,7 @@ struct uart_amba_port {
        unsigned int            lcrh_rx;        /* vendor-specific */
        bool                    autorts;
        char                    type[12];
+       bool                    interrupt_may_hang; /* vendor-specific */
 #ifdef CONFIG_DMA_ENGINE
        /* DMA stuff */
        bool                    using_tx_dma;
@@ -1008,6 +1038,68 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
 #endif
 
 
+/*
+ * pl011_lockup_wa
+ * This workaround aims to break the deadlock situation
+ * when after long transfer over uart in hardware flow
+ * control, uart interrupt registers cannot be cleared.
+ * Hence uart transfer gets blocked.
+ *
+ * It is seen that during such deadlock condition ICR
+ * don't get cleared even on multiple write. This leads
+ * pass_counter to decrease and finally reach zero. This
+ * can be taken as trigger point to run this UART_BT_WA.
+ *
+ */
+static void pl011_lockup_wa(unsigned long data)
+{
+       struct uart_amba_port *uap = amba_ports[0];
+       void __iomem *base = uap->port.membase;
+       struct circ_buf *xmit = &uap->port.state->xmit;
+       struct tty_struct *tty = uap->port.state->port.tty;
+       int buf_empty_retries = 200;
+       int loop;
+
+       /* Stop HCI layer from submitting data for tx */
+       tty->hw_stopped = 1;
+       while (!uart_circ_empty(xmit)) {
+               if (buf_empty_retries-- == 0)
+                       break;
+               udelay(100);
+       }
+
+       /* Backup registers */
+       for (loop = 0; loop < UART_WA_SAVE_NR; loop++)
+               uart_wa_regdata[loop] = readl(base + uart_wa_reg[loop]);
+
+       /* Disable UART so that FIFO data is flushed out */
+       writew(0x00, uap->port.membase + UART011_CR);
+
+       /* Soft reset UART module */
+       if (uap->port.dev->platform_data) {
+               struct amba_pl011_data *plat;
+
+               plat = uap->port.dev->platform_data;
+               if (plat->reset)
+                       plat->reset();
+       }
+
+       /* Restore registers */
+       for (loop = 0; loop < UART_WA_SAVE_NR; loop++)
+               writew(uart_wa_regdata[loop] ,
+                               uap->port.membase + uart_wa_reg[loop]);
+
+       /* Initialise the old status of the modem signals */
+       uap->old_status = readw(uap->port.membase + UART01x_FR) &
+               UART01x_FR_MODEM_ANY;
+
+       if (readl(base + UART011_MIS) & 0x2)
+               printk(KERN_EMERG "UART_BT_WA: ***FAILED***\n");
+
+       /* Start Tx/Rx */
+       tty->hw_stopped = 0;
+}
+
 static void pl011_stop_tx(struct uart_port *port)
 {
        struct uart_amba_port *uap = (struct uart_amba_port *)port;
@@ -1158,8 +1250,11 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
                        if (status & UART011_TXIS)
                                pl011_tx_chars(uap);
 
-                       if (pass_counter-- == 0)
+                       if (pass_counter-- == 0) {
+                               if (uap->interrupt_may_hang)
+                                       tasklet_schedule(&pl011_lockup_tlet);
                                break;
+                       }
 
                        status = readw(uap->port.membase + UART011_MIS);
                } while (status != 0);
@@ -1339,6 +1434,14 @@ static int pl011_startup(struct uart_port *port)
        writew(uap->im, uap->port.membase + UART011_IMSC);
        spin_unlock_irq(&uap->port.lock);
 
+       if (uap->port.dev->platform_data) {
+               struct amba_pl011_data *plat;
+
+               plat = uap->port.dev->platform_data;
+               if (plat->init)
+                       plat->init();
+       }
+
        return 0;
 
  clk_dis:
@@ -1394,6 +1497,15 @@ static void pl011_shutdown(struct uart_port *port)
         * Shut down the clock producer
         */
        clk_disable(uap->clk);
+
+       if (uap->port.dev->platform_data) {
+               struct amba_pl011_data *plat;
+
+               plat = uap->port.dev->platform_data;
+               if (plat->exit)
+                       plat->exit();
+       }
+
 }
 
 static void
@@ -1700,6 +1812,14 @@ static int __init pl011_console_setup(struct console *co, char *options)
        if (!uap)
                return -ENODEV;
 
+       if (uap->port.dev->platform_data) {
+               struct amba_pl011_data *plat;
+
+               plat = uap->port.dev->platform_data;
+               if (plat->init)
+                       plat->init();
+       }
+
        uap->port.uartclk = clk_get_rate(uap->clk);
 
        if (options)
@@ -1774,6 +1894,7 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
        uap->lcrh_rx = vendor->lcrh_rx;
        uap->lcrh_tx = vendor->lcrh_tx;
        uap->fifosize = vendor->fifosize;
+       uap->interrupt_may_hang = vendor->interrupt_may_hang;
        uap->port.dev = &dev->dev;
        uap->port.mapbase = dev->res.start;
        uap->port.membase = base;
index 6d5d6e6..af9b781 100644 (file)
@@ -1709,12 +1709,13 @@ static int atmel_serial_resume(struct platform_device *pdev)
 static int __devinit atmel_serial_probe(struct platform_device *pdev)
 {
        struct atmel_uart_port *port;
+       struct atmel_uart_data *pdata = pdev->dev.platform_data;
        void *data;
        int ret;
 
        BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
 
-       port = &atmel_ports[pdev->id];
+       port = &atmel_ports[pdata->num];
        port->backup_imr = 0;
 
        atmel_init_port(port, pdev);
index a1a0e55..c0b68b9 100644 (file)
@@ -250,6 +250,20 @@ static void bcm_uart_do_rx(struct uart_port *port)
                /* get overrun/fifo empty information from ier
                 * register */
                iestat = bcm_uart_readl(port, UART_IR_REG);
+
+               if (unlikely(iestat & UART_IR_STAT(UART_IR_RXOVER))) {
+                       unsigned int val;
+
+                       /* fifo reset is required to clear
+                        * interrupt */
+                       val = bcm_uart_readl(port, UART_CTL_REG);
+                       val |= UART_CTL_RSTRXFIFO_MASK;
+                       bcm_uart_writel(port, val, UART_CTL_REG);
+
+                       port->icount.overrun++;
+                       tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+               }
+
                if (!(iestat & UART_IR_STAT(UART_IR_RXNOTEMPTY)))
                        break;
 
@@ -284,10 +298,6 @@ static void bcm_uart_do_rx(struct uart_port *port)
                if (uart_handle_sysrq_char(port, c))
                        continue;
 
-               if (unlikely(iestat & UART_IR_STAT(UART_IR_RXOVER))) {
-                       port->icount.overrun++;
-                       tty_insert_flip_char(tty, 0, TTY_OVERRUN);
-               }
 
                if ((cstat & port->ignore_status_mask) == 0)
                        tty_insert_flip_char(tty, c, flag);
index 18f5484..96da178 100644 (file)
@@ -125,7 +125,7 @@ static int __devinit jsm_probe_one(struct pci_dev *pdev, const struct pci_device
        brd->bd_uart_offset = 0x200;
        brd->bd_dividend = 921600;
 
-       brd->re_map_membase = ioremap(brd->membase, 0x1000);
+       brd->re_map_membase = ioremap(brd->membase, pci_resource_len(pdev, 0));
        if (!brd->re_map_membase) {
                dev_err(&pdev->dev,
                        "card has no PCI Memory resources, "
index 1bd2845..a764bf9 100644 (file)
@@ -421,7 +421,6 @@ static int max3110_main_thread(void *_max)
        int ret = 0;
        struct circ_buf *xmit = &max->con_xmit;
 
-       init_waitqueue_head(wq);
        pr_info(PR_FMT "start main thread\n");
 
        do {
@@ -823,7 +822,7 @@ static int __devinit serial_m3110_probe(struct spi_device *spi)
        res = RC_TAG;
        ret = max3110_write_then_read(max, (u8 *)&res, (u8 *)&res, 2, 0);
        if (ret < 0 || res == 0 || res == 0xffff) {
-               printk(KERN_ERR "MAX3111 deemed not present (conf reg %04x)",
+               dev_dbg(&spi->dev, "MAX3111 deemed not present (conf reg %04x)",
                                                                        res);
                ret = -ENODEV;
                goto err_get_page;
@@ -838,6 +837,8 @@ static int __devinit serial_m3110_probe(struct spi_device *spi)
        max->con_xmit.head = 0;
        max->con_xmit.tail = 0;
 
+       init_waitqueue_head(&max->wq);
+
        max->main_thread = kthread_run(max3110_main_thread,
                                        max, "max3110_main");
        if (IS_ERR(max->main_thread)) {
index fb2619f..dd194dc 100644 (file)
@@ -30,7 +30,7 @@ static int s5pv210_serial_setsource(struct uart_port *port,
        struct s3c2410_uartcfg *cfg = port->dev->platform_data;
        unsigned long ucon = rd_regl(port, S3C2410_UCON);
 
-       if ((cfg->clocks_size) == 1)
+       if (cfg->flags & NO_NEED_CHECK_CLKSRC)
                return 0;
 
        if (strcmp(clk->name, "pclk") == 0)
@@ -55,7 +55,7 @@ static int s5pv210_serial_getsource(struct uart_port *port,
 
        clk->divisor = 1;
 
-       if ((cfg->clocks_size) == 1)
+       if (cfg->flags & NO_NEED_CHECK_CLKSRC)
                return 0;
 
        switch (ucon & S5PV210_UCON_CLKMASK) {
index 5d01d32..ef925d5 100644 (file)
@@ -555,7 +555,7 @@ static void tty_ldisc_flush_works(struct tty_struct *tty)
 static int tty_ldisc_wait_idle(struct tty_struct *tty)
 {
        int ret;
-       ret = wait_event_interruptible_timeout(tty_ldisc_idle,
+       ret = wait_event_timeout(tty_ldisc_idle,
                        atomic_read(&tty->ldisc->users) == 1, 5 * HZ);
        if (ret < 0)
                return ret;
@@ -763,6 +763,8 @@ static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
        if (IS_ERR(ld))
                return -1;
 
+       WARN_ON_ONCE(tty_ldisc_wait_idle(tty));
+
        tty_ldisc_close(tty, tty->ldisc);
        tty_ldisc_put(tty->ldisc);
        tty->ldisc = NULL;
index e35a176..34e3da5 100644 (file)
@@ -375,7 +375,7 @@ static int usb_unbind_interface(struct device *dev)
                 * Just re-enable it without affecting the endpoint toggles.
                 */
                usb_enable_interface(udev, intf, false);
-       } else if (!error && !intf->dev.power.in_suspend) {
+       } else if (!error && !intf->dev.power.is_prepared) {
                r = usb_set_interface(udev, intf->altsetting[0].
                                desc.bInterfaceNumber, 0);
                if (r < 0)
@@ -960,7 +960,7 @@ void usb_rebind_intf(struct usb_interface *intf)
        }
 
        /* Try to rebind the interface */
-       if (!intf->dev.power.in_suspend) {
+       if (!intf->dev.power.is_prepared) {
                intf->needs_binding = 0;
                rc = device_attach(&intf->dev);
                if (rc < 0)
@@ -1107,7 +1107,7 @@ static int usb_resume_interface(struct usb_device *udev,
        if (intf->condition == USB_INTERFACE_UNBOUND) {
 
                /* Carry out a deferred switch to altsetting 0 */
-               if (intf->needs_altsetting0 && !intf->dev.power.in_suspend) {
+               if (intf->needs_altsetting0 && !intf->dev.power.is_prepared) {
                        usb_set_interface(udev, intf->altsetting[0].
                                        desc.bInterfaceNumber, 0);
                        intf->needs_altsetting0 = 0;
@@ -1187,13 +1187,22 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
                for (i = n - 1; i >= 0; --i) {
                        intf = udev->actconfig->interface[i];
                        status = usb_suspend_interface(udev, intf, msg);
+
+                       /* Ignore errors during system sleep transitions */
+                       if (!(msg.event & PM_EVENT_AUTO))
+                               status = 0;
                        if (status != 0)
                                break;
                }
        }
-       if (status == 0)
+       if (status == 0) {
                status = usb_suspend_device(udev, msg);
 
+               /* Again, ignore errors during system sleep transitions */
+               if (!(msg.event & PM_EVENT_AUTO))
+                       status = 0;
+       }
+
        /* If the suspend failed, resume interfaces that did get suspended */
        if (status != 0) {
                msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME);
index 90ae175..a428aa0 100644 (file)
@@ -1634,6 +1634,7 @@ void usb_disconnect(struct usb_device **pdev)
 {
        struct usb_device       *udev = *pdev;
        int                     i;
+       struct usb_hcd          *hcd = bus_to_hcd(udev->bus);
 
        if (!udev) {
                pr_debug ("%s nodev\n", __func__);
@@ -1661,7 +1662,9 @@ void usb_disconnect(struct usb_device **pdev)
         * so that the hardware is now fully quiesced.
         */
        dev_dbg (&udev->dev, "unregistering device\n");
+       mutex_lock(hcd->bandwidth_mutex);
        usb_disable_device(udev, 0);
+       mutex_unlock(hcd->bandwidth_mutex);
        usb_hcd_synchronize_unlinks(udev);
 
        usb_remove_ep_devs(&udev->ep0);
@@ -2362,6 +2365,10 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
                                USB_DEVICE_REMOTE_WAKEUP, 0,
                                NULL, 0,
                                USB_CTRL_SET_TIMEOUT);
+
+               /* System sleep transitions should never fail */
+               if (!(msg.event & PM_EVENT_AUTO))
+                       status = 0;
        } else {
                /* device has up to 10 msec to fully suspend */
                dev_dbg(&udev->dev, "usb %ssuspend\n",
@@ -2611,16 +2618,15 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
        struct usb_device       *hdev = hub->hdev;
        unsigned                port1;
 
-       /* fail if children aren't already suspended */
+       /* Warn if children aren't already suspended */
        for (port1 = 1; port1 <= hdev->maxchild; port1++) {
                struct usb_device       *udev;
 
                udev = hdev->children [port1-1];
                if (udev && udev->can_submit) {
-                       if (!(msg.event & PM_EVENT_AUTO))
-                               dev_dbg(&intf->dev, "port %d nyet suspended\n",
-                                               port1);
-                       return -EBUSY;
+                       dev_warn(&intf->dev, "port %d nyet suspended\n", port1);
+                       if (msg.event & PM_EVENT_AUTO)
+                               return -EBUSY;
                }
        }
 
index 5701e85..64c7ab4 100644 (file)
@@ -1135,10 +1135,13 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf,
  * Deallocates hcd/hardware state for the endpoints (nuking all or most
  * pending urbs) and usbcore state for the interfaces, so that usbcore
  * must usb_set_configuration() before any interfaces could be used.
+ *
+ * Must be called with hcd->bandwidth_mutex held.
  */
 void usb_disable_device(struct usb_device *dev, int skip_ep0)
 {
        int i;
+       struct usb_hcd *hcd = bus_to_hcd(dev->bus);
 
        /* getting rid of interfaces will disconnect
         * any drivers bound to them (a key side effect)
@@ -1172,6 +1175,16 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
 
        dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
                skip_ep0 ? "non-ep0" : "all");
+       if (hcd->driver->check_bandwidth) {
+               /* First pass: Cancel URBs, leave endpoint pointers intact. */
+               for (i = skip_ep0; i < 16; ++i) {
+                       usb_disable_endpoint(dev, i, false);
+                       usb_disable_endpoint(dev, i + USB_DIR_IN, false);
+               }
+               /* Remove endpoints from the host controller internal state */
+               usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
+               /* Second pass: remove endpoint pointers */
+       }
        for (i = skip_ep0; i < 16; ++i) {
                usb_disable_endpoint(dev, i, true);
                usb_disable_endpoint(dev, i + USB_DIR_IN, true);
@@ -1727,6 +1740,7 @@ free_interfaces:
        /* if it's already configured, clear out old state first.
         * getting rid of old interfaces means unbinding their drivers.
         */
+       mutex_lock(hcd->bandwidth_mutex);
        if (dev->state != USB_STATE_ADDRESS)
                usb_disable_device(dev, 1);     /* Skip ep0 */
 
@@ -1739,7 +1753,6 @@ free_interfaces:
         * host controller will not allow submissions to dropped endpoints.  If
         * this call fails, the device state is unchanged.
         */
-       mutex_lock(hcd->bandwidth_mutex);
        ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
        if (ret < 0) {
                mutex_unlock(hcd->bandwidth_mutex);
index 98cc8a1..aa248c2 100644 (file)
@@ -44,7 +44,6 @@ static int ehci_ath79_init(struct usb_hcd *hcd)
        struct ehci_hcd *ehci = hcd_to_ehci(hcd);
        struct platform_device *pdev = to_platform_device(hcd->self.controller);
        const struct platform_device_id *id;
-       int hclength;
        int ret;
 
        id = platform_get_device_id(pdev);
@@ -53,20 +52,23 @@ static int ehci_ath79_init(struct usb_hcd *hcd)
                return -EINVAL;
        }
 
-       hclength = HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
        switch (id->driver_data) {
        case EHCI_ATH79_IP_V1:
                ehci->has_synopsys_hc_bug = 1;
 
                ehci->caps = hcd->regs;
-               ehci->regs = hcd->regs + hclength;
+               ehci->regs = hcd->regs +
+                       HC_LENGTH(ehci,
+                                 ehci_readl(ehci, &ehci->caps->hc_capbase));
                break;
 
        case EHCI_ATH79_IP_V2:
                hcd->has_tt = 1;
 
                ehci->caps = hcd->regs + 0x100;
-               ehci->regs = hcd->regs + 0x100 + hclength;
+               ehci->regs = hcd->regs + 0x100 +
+                       HC_LENGTH(ehci,
+                                 ehci_readl(ehci, &ehci->caps->hc_capbase));
                break;
 
        default:
index b435ed6..f8030ee 100644 (file)
@@ -1,4 +1,8 @@
 /*
+ * Enhanced Host Controller Interface (EHCI) driver for USB.
+ *
+ * Maintainer: Alan Stern <stern@rowland.harvard.edu>
+ *
  * Copyright (c) 2000-2004 by David Brownell
  *
  * This program is free software; you can redistribute it and/or modify it
index c9e6e45..55d3d58 100644 (file)
@@ -1555,7 +1555,7 @@ static void kill_transfer(struct usb_hcd *hcd, struct urb *urb,
 
        /* We need to forcefully reclaim the slot since some transfers never
           return, e.g. interrupt transfers and NAKed bulk transfers. */
-       if (usb_pipebulk(urb->pipe)) {
+       if (usb_pipecontrol(urb->pipe) || usb_pipebulk(urb->pipe)) {
                skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
                skip_map |= (1 << qh->slot);
                reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
index 9aa10bd..f9cf3f0 100644 (file)
@@ -1,5 +1,7 @@
 /*
- * OHCI HCD (Host Controller Driver) for USB.
+ * Open Host Controller Interface (OHCI) driver for USB.
+ *
+ * Maintainer: Alan Stern <stern@rowland.harvard.edu>
  *
  * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
  * (C) Copyright 2000-2004 David Brownell <dbrownell@users.sourceforge.net>
index db6f8b9..4586369 100644 (file)
@@ -2517,6 +2517,7 @@ static int __devinit r8a66597_probe(struct platform_device *pdev)
        INIT_LIST_HEAD(&r8a66597->child_device);
 
        hcd->rsrc_start = res->start;
+       hcd->has_tt = 1;
 
        ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | irq_trigger);
        if (ret != 0) {
index 0f8e1d2..fcb7f7e 100644 (file)
@@ -1215,8 +1215,6 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
                ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
                /* dig out max burst from ep companion desc */
                max_packet = ep->ss_ep_comp.bMaxBurst;
-               if (!max_packet)
-                       xhci_warn(xhci, "WARN no SS endpoint bMaxBurst\n");
                ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet));
                break;
        case USB_SPEED_HIGH:
index 17541d0..cb16de2 100644 (file)
@@ -29,6 +29,9 @@
 #define PCI_VENDOR_ID_FRESCO_LOGIC     0x1b73
 #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
 
+#define PCI_VENDOR_ID_ETRON            0x1b6f
+#define PCI_DEVICE_ID_ASROCK_P67       0x7023
+
 static const char hcd_name[] = "xhci_hcd";
 
 /* called after powerup, by probe or system-pm "wakeup" */
@@ -134,6 +137,11 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
                xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
                xhci->limit_active_eps = 64;
        }
+       if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+                       pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
+               xhci->quirks |= XHCI_RESET_ON_RESUME;
+               xhci_dbg(xhci, "QUIRK: Resetting on resume\n");
+       }
 
        /* Make sure the HC is halted. */
        retval = xhci_halt(xhci);
index 800f417..70cacbb 100644 (file)
@@ -1733,6 +1733,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
                frame->status = -EOVERFLOW;
                skip_td = true;
                break;
+       case COMP_DEV_ERR:
        case COMP_STALL:
                frame->status = -EPROTO;
                skip_td = true;
@@ -1767,9 +1768,6 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
                }
        }
 
-       if ((idx == urb_priv->length - 1) && *status == -EINPROGRESS)
-               *status = 0;
-
        return finish_td(xhci, td, event_trb, event, ep, status, false);
 }
 
@@ -1787,8 +1785,7 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
        idx = urb_priv->td_cnt;
        frame = &td->urb->iso_frame_desc[idx];
 
-       /* The transfer is partly done */
-       *status = -EXDEV;
+       /* The transfer is partly done. */
        frame->status = -EXDEV;
 
        /* calc actual length */
@@ -2016,6 +2013,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                                 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
                                 ep_index);
                goto cleanup;
+       case COMP_DEV_ERR:
+               xhci_warn(xhci, "WARN: detect an incompatible device");
+               status = -EPROTO;
+               break;
        case COMP_MISSED_INT:
                /*
                 * When encounter missed service error, one or more isoc tds
@@ -2063,6 +2064,20 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                /* Is this a TRB in the currently executing TD? */
                event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
                                td->last_trb, event_dma);
+
+               /*
+                * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
+                * is not in the current TD pointed by ep_ring->dequeue because
+                * that the hardware dequeue pointer still at the previous TRB
+                * of the current TD. The previous TRB maybe a Link TD or the
+                * last TRB of the previous TD. The command completion handle
+                * will take care the rest.
+                */
+               if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
+                       ret = 0;
+                       goto cleanup;
+               }
+
                if (!event_seg) {
                        if (!ep->skip ||
                            !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
@@ -2158,6 +2173,11 @@ cleanup:
                                                urb->transfer_buffer_length,
                                                status);
                        spin_unlock(&xhci->lock);
+                       /* EHCI, UHCI, and OHCI always unconditionally set the
+                        * urb->status of an isochronous endpoint to 0.
+                        */
+                       if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
+                               status = 0;
                        usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
                        spin_lock(&xhci->lock);
                }
index 06e7023..f5fe1ac 100644 (file)
@@ -759,6 +759,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                msleep(100);
 
        spin_lock_irq(&xhci->lock);
+       if (xhci->quirks & XHCI_RESET_ON_RESUME)
+               hibernated = true;
 
        if (!hibernated) {
                /* step 1: restore register */
@@ -1401,6 +1403,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
        u32 added_ctxs;
        unsigned int last_ctx;
        u32 new_add_flags, new_drop_flags, new_slot_info;
+       struct xhci_virt_device *virt_dev;
        int ret = 0;
 
        ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
@@ -1425,11 +1428,25 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
                return 0;
        }
 
-       in_ctx = xhci->devs[udev->slot_id]->in_ctx;
-       out_ctx = xhci->devs[udev->slot_id]->out_ctx;
+       virt_dev = xhci->devs[udev->slot_id];
+       in_ctx = virt_dev->in_ctx;
+       out_ctx = virt_dev->out_ctx;
        ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
        ep_index = xhci_get_endpoint_index(&ep->desc);
        ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
+
+       /* If this endpoint is already in use, and the upper layers are trying
+        * to add it again without dropping it, reject the addition.
+        */
+       if (virt_dev->eps[ep_index].ring &&
+                       !(le32_to_cpu(ctrl_ctx->drop_flags) &
+                               xhci_get_endpoint_flag(&ep->desc))) {
+               xhci_warn(xhci, "Trying to add endpoint 0x%x "
+                               "without dropping it.\n",
+                               (unsigned int) ep->desc.bEndpointAddress);
+               return -EINVAL;
+       }
+
        /* If the HCD has already noted the endpoint is enabled,
         * ignore this request.
         */
@@ -1445,8 +1462,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
         * process context, not interrupt context (or so documenation
         * for usb_set_interface() and usb_set_configuration() claim).
         */
-       if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
-                               udev, ep, GFP_NOIO) < 0) {
+       if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
                dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
                                __func__, ep->desc.bEndpointAddress);
                return -ENOMEM;
@@ -1537,6 +1553,11 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
                                "and endpoint is not disabled.\n");
                ret = -EINVAL;
                break;
+       case COMP_DEV_ERR:
+               dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint "
+                               "configure command.\n");
+               ret = -ENODEV;
+               break;
        case COMP_SUCCESS:
                dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
                ret = 0;
@@ -1571,6 +1592,11 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
                xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
                ret = -EINVAL;
                break;
+       case COMP_DEV_ERR:
+               dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate "
+                               "context command.\n");
+               ret = -ENODEV;
+               break;
        case COMP_MEL_ERR:
                /* Max Exit Latency too large error */
                dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
@@ -2853,6 +2879,11 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
                dev_warn(&udev->dev, "Device not responding to set address.\n");
                ret = -EPROTO;
                break;
+       case COMP_DEV_ERR:
+               dev_warn(&udev->dev, "ERROR: Incompatible device for address "
+                               "device command.\n");
+               ret = -ENODEV;
+               break;
        case COMP_SUCCESS:
                xhci_dbg(xhci, "Successful Address Device command\n");
                break;
index 7d1ea3b..d8bbf5c 100644 (file)
@@ -874,6 +874,8 @@ struct xhci_transfer_event {
 #define COMP_PING_ERR  20
 /* Event Ring is full */
 #define COMP_ER_FULL   21
+/* Incompatible Device Error */
+#define COMP_DEV_ERR   22
 /* Missed Service Error - HC couldn't service an isoc ep within interval */
 #define COMP_MISSED_INT        23
 /* Successfully stopped command ring */
@@ -1308,6 +1310,7 @@ struct xhci_hcd {
  */
 #define XHCI_EP_LIMIT_QUIRK    (1 << 5)
 #define XHCI_BROKEN_MSI                (1 << 6)
+#define XHCI_RESET_ON_RESUME   (1 << 7)
        unsigned int            num_active_eps;
        unsigned int            limit_active_eps;
        /* There are two roothubs to keep track of bus suspend info for */
index 0a50a35..6aeb363 100644 (file)
@@ -1524,6 +1524,12 @@ static void musb_gadget_fifo_flush(struct usb_ep *ep)
                csr = musb_readw(epio, MUSB_TXCSR);
                if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
                        csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
+                       /*
+                        * Setting both TXPKTRDY and FLUSHFIFO makes controller
+                        * to interrupt current FIFO loading, but not flushing
+                        * the already loaded ones.
+                        */
+                       csr &= ~MUSB_TXCSR_TXPKTRDY;
                        musb_writew(epio, MUSB_TXCSR, csr);
                        /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
                        musb_writew(epio, MUSB_TXCSR, csr);
index 7295e31..8b2473f 100644 (file)
@@ -1575,7 +1575,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
                        /* even if there was an error, we did the dma
                         * for iso_frame_desc->length
                         */
-                       if (d->status != EILSEQ && d->status != -EOVERFLOW)
+                       if (d->status != -EILSEQ && d->status != -EOVERFLOW)
                                d->status = 0;
 
                        if (++qh->iso_idx >= urb->number_of_packets)
index 1627289..2e06b90 100644 (file)
@@ -179,6 +179,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_232RL_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_232H_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) },
@@ -848,7 +849,8 @@ static const char *ftdi_chip_name[] = {
        [FT2232C] = "FT2232C",
        [FT232RL] = "FT232RL",
        [FT2232H] = "FT2232H",
-       [FT4232H] = "FT4232H"
+       [FT4232H] = "FT4232H",
+       [FT232H]  = "FT232H"
 };
 
 
@@ -1168,6 +1170,7 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty,
                break;
        case FT2232H: /* FT2232H chip */
        case FT4232H: /* FT4232H chip */
+       case FT232H:  /* FT232H chip */
                if ((baud <= 12000000) & (baud >= 1200)) {
                        div_value = ftdi_2232h_baud_to_divisor(baud);
                } else if (baud < 1200) {
@@ -1429,9 +1432,12 @@ static void ftdi_determine_type(struct usb_serial_port *port)
        } else if (version < 0x600) {
                /* Assume it's an FT232BM (or FT245BM) */
                priv->chip_type = FT232BM;
-       } else {
-               /* Assume it's an FT232R */
+       } else if (version < 0x900) {
+               /* Assume it's an FT232RL */
                priv->chip_type = FT232RL;
+       } else {
+               /* Assume it's an FT232H */
+               priv->chip_type = FT232H;
        }
        dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]);
 }
@@ -1559,7 +1565,8 @@ static int create_sysfs_attrs(struct usb_serial_port *port)
                     priv->chip_type == FT2232C ||
                     priv->chip_type == FT232RL ||
                     priv->chip_type == FT2232H ||
-                    priv->chip_type == FT4232H)) {
+                    priv->chip_type == FT4232H ||
+                    priv->chip_type == FT232H)) {
                        retval = device_create_file(&port->dev,
                                                    &dev_attr_latency_timer);
                }
@@ -1580,7 +1587,8 @@ static void remove_sysfs_attrs(struct usb_serial_port *port)
                    priv->chip_type == FT2232C ||
                    priv->chip_type == FT232RL ||
                    priv->chip_type == FT2232H ||
-                   priv->chip_type == FT4232H) {
+                   priv->chip_type == FT4232H ||
+                    priv->chip_type == FT232H) {
                        device_remove_file(&port->dev, &dev_attr_latency_timer);
                }
        }
@@ -2212,6 +2220,7 @@ static int ftdi_tiocmget(struct tty_struct *tty)
        case FT232RL:
        case FT2232H:
        case FT4232H:
+       case FT232H:
                len = 2;
                break;
        default:
index 213fe3d..19584fa 100644 (file)
@@ -156,7 +156,8 @@ enum ftdi_chip_type {
        FT2232C = 4,
        FT232RL = 5,
        FT2232H = 6,
-       FT4232H = 7
+       FT4232H = 7,
+       FT232H  = 8
 };
 
 enum ftdi_sio_baudrate {
index ab1fcdf..19156d1 100644 (file)
@@ -22,6 +22,7 @@
 #define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */
 #define FTDI_8U2232C_PID 0x6010 /* Dual channel device */
 #define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
+#define FTDI_232H_PID  0x6014 /* Single channel hi-speed device */
 #define FTDI_SIO_PID   0x8372  /* Product Id SIO application of 8U100AX */
 #define FTDI_232RL_PID  0xFBFA  /* Product ID for FT232RL */
 
index c6d92a5..ea84456 100644 (file)
@@ -1745,6 +1745,7 @@ static int ti_download_firmware(struct ti_device *tdev)
        }
        if (fw_p->size > TI_FIRMWARE_BUF_SIZE) {
                dev_err(&dev->dev, "%s - firmware too large %zu\n", __func__, fw_p->size);
+               release_firmware(fw_p);
                return -ENOENT;
        }
 
index 5fc983c..cf03ad0 100644 (file)
@@ -447,6 +447,8 @@ static int clcdfb_register(struct clcd_fb *fb)
                goto out;
        }
 
+       fb->fb.device           = &fb->dev->dev;
+
        fb->fb.fix.mmio_start   = fb->dev->res.start;
        fb->fb.fix.mmio_len     = resource_size(&fb->dev->res);
 
index bedf5be..0acc7d6 100644 (file)
@@ -555,8 +555,6 @@ static void adjust_aoi_size_position(struct fb_var_screeninfo *var,
 static int fsl_diu_check_var(struct fb_var_screeninfo *var,
                                struct fb_info *info)
 {
-       unsigned long htotal, vtotal;
-
        pr_debug("check_var xres: %d\n", var->xres);
        pr_debug("check_var yres: %d\n", var->yres);
 
@@ -635,20 +633,6 @@ static int fsl_diu_check_var(struct fb_var_screeninfo *var,
 
                break;
        }
-       /* If the pixclock is below the minimum spec'd value then set to
-        * refresh rate for 60Hz since this is supported by most monitors.
-        * Refer to Documentation/fb/ for calculations.
-        */
-       if ((var->pixclock < MIN_PIX_CLK) || (var->pixclock > MAX_PIX_CLK)) {
-               htotal = var->xres + var->right_margin + var->hsync_len +
-                   var->left_margin;
-               vtotal = var->yres + var->lower_margin + var->vsync_len +
-                   var->upper_margin;
-               var->pixclock = (vtotal * htotal * 6UL) / 100UL;
-               var->pixclock = KHZ2PICOS(var->pixclock);
-               pr_debug("pixclock set for 60Hz refresh = %u ps\n",
-                       var->pixclock);
-       }
 
        var->height = -1;
        var->width = -1;
index c6b554f..5a5d092 100644 (file)
@@ -29,7 +29,7 @@ static int  crt_option = 1;
 static char panel_option[32] = "";
 
 /* Modes relevant to the GX1 (taken from modedb.c) */
-static const struct fb_videomode __initdata gx1_modedb[] = {
+static const struct fb_videomode __devinitdata gx1_modedb[] = {
        /* 640x480-60 VESA */
        { NULL, 60, 640, 480, 39682,  48, 16, 33, 10, 96, 2,
          0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
@@ -195,7 +195,7 @@ static int gx1fb_blank(int blank_mode, struct fb_info *info)
        return par->vid_ops->blank_display(info, blank_mode);
 }
 
-static int __init gx1fb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
+static int __devinit gx1fb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
 {
        struct geodefb_par *par = info->par;
        unsigned gx_base;
@@ -268,7 +268,7 @@ static struct fb_ops gx1fb_ops = {
        .fb_imageblit   = cfb_imageblit,
 };
 
-static struct fb_info * __init gx1fb_init_fbinfo(struct device *dev)
+static struct fb_info * __devinit gx1fb_init_fbinfo(struct device *dev)
 {
        struct geodefb_par *par;
        struct fb_info *info;
@@ -318,7 +318,7 @@ static struct fb_info * __init gx1fb_init_fbinfo(struct device *dev)
        return info;
 }
 
-static int __init gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int __devinit gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        struct geodefb_par *par;
        struct fb_info *info;
@@ -382,7 +382,7 @@ static int __init gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *
        return ret;
 }
 
-static void gx1fb_remove(struct pci_dev *pdev)
+static void __devexit gx1fb_remove(struct pci_dev *pdev)
 {
        struct fb_info *info = pci_get_drvdata(pdev);
        struct geodefb_par *par = info->par;
@@ -441,7 +441,7 @@ static struct pci_driver gx1fb_driver = {
        .name           = "gx1fb",
        .id_table       = gx1fb_id_table,
        .probe          = gx1fb_probe,
-       .remove         = gx1fb_remove,
+       .remove         = __devexit_p(gx1fb_remove),
 };
 
 static int __init gx1fb_init(void)
@@ -456,7 +456,7 @@ static int __init gx1fb_init(void)
        return pci_register_driver(&gx1fb_driver);
 }
 
-static void __exit gx1fb_cleanup(void)
+static void __devexit gx1fb_cleanup(void)
 {
        pci_unregister_driver(&gx1fb_driver);
 }
index fbef15f..614251a 100644 (file)
@@ -233,7 +233,7 @@ static int __devinit hecubafb_probe(struct platform_device *dev)
 
        videomemory = vzalloc(videomemorysize);
        if (!videomemory)
-               return retval;
+               goto err_videomem_alloc;
 
        info = framebuffer_alloc(sizeof(struct hecubafb_par), &dev->dev);
        if (!info)
@@ -275,6 +275,7 @@ err_fbreg:
        framebuffer_release(info);
 err_fballoc:
        vfree(videomemory);
+err_videomem_alloc:
        module_put(board->owner);
        return retval;
 }
index 9170c82..cc7d732 100644 (file)
@@ -218,7 +218,7 @@ static inline void meram_get_next_icb_addr(struct sh_mobile_meram_info *pdata,
                icb_offset = 0xc0000000 | (cfg->current_reg << 23);
 
        *icb_addr_y = icb_offset | (cfg->icb[0].marker_icb << 24);
-       if ((*icb_addr_c) && is_nvcolor(cfg->pixelformat))
+       if (is_nvcolor(cfg->pixelformat))
                *icb_addr_c = icb_offset | (cfg->icb[1].marker_icb << 24);
 }
 
index 87f0be1..6294dca 100644 (file)
@@ -1664,7 +1664,7 @@ static void sm501fb_stop(struct sm501fb_info *info)
                           resource_size(info->regs_res));
 }
 
-static int sm501fb_init_fb(struct fb_info *fb,
+static int __devinit sm501fb_init_fb(struct fb_info *fb,
                           enum sm501_controller head,
                           const char *fbname)
 {
index 52b0f3e..816a4fd 100644 (file)
@@ -1233,8 +1233,12 @@ static int dlfb_setup_modes(struct dlfb_data *dev,
                        if (dlfb_is_valid_mode(&info->monspecs.modedb[i], info))
                                fb_add_videomode(&info->monspecs.modedb[i],
                                        &info->modelist);
-                       else /* if we've removed top/best mode */
-                               info->monspecs.misc &= ~FB_MISC_1ST_DETAIL;
+                       else {
+                               if (i == 0)
+                                       /* if we've removed top/best mode */
+                                       info->monspecs.misc
+                                               &= ~FB_MISC_1ST_DETAIL;
+                       }
                }
 
                default_vmode = fb_find_best_display(&info->monspecs,
index a99bbe8..501b340 100644 (file)
@@ -175,6 +175,7 @@ static int vesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
 
 static void vesafb_destroy(struct fb_info *info)
 {
+       fb_dealloc_cmap(&info->cmap);
        if (info->screen_base)
                iounmap(info->screen_base);
        release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
index 022f9eb..9536d38 100644 (file)
@@ -535,8 +535,7 @@ config I6300ESB_WDT
 
 config INTEL_SCU_WATCHDOG
        bool "Intel SCU Watchdog for Mobile Platforms"
-       depends on WATCHDOG
-       depends on INTEL_SCU_IPC
+       depends on X86_MRST
        ---help---
          Hardware driver for the watchdog time built into the Intel SCU
          for Intel Mobile Platforms.
index 750bc52..4ca5d40 100644 (file)
@@ -448,7 +448,7 @@ static void __exit at32_wdt_exit(void)
 }
 module_exit(at32_wdt_exit);
 
-MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>");
+MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
 MODULE_DESCRIPTION("Watchdog driver for Atmel AT32AP700X");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
index 29a7cd4..b146082 100644 (file)
@@ -329,4 +329,4 @@ MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com>");
 MODULE_DESCRIPTION("GE watchdog driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-MODULE_ALIAS("platform: gef_wdt");
+MODULE_ALIAS("platform:gef_wdt");
index 919bdd1..ba43860 100644 (file)
@@ -42,7 +42,6 @@
 #include <linux/sched.h>
 #include <linux/signal.h>
 #include <linux/sfi.h>
-#include <linux/types.h>
 #include <asm/irq.h>
 #include <asm/atomic.h>
 #include <asm/intel_scu_ipc.h>
index 1479dc4..0430e09 100644 (file)
@@ -66,23 +66,18 @@ static struct {
        int default_ticks;
        unsigned long inuse;
        unsigned gpio;
-       int gstate;
+       unsigned int gstate;
 } mtx1_wdt_device;
 
 static void mtx1_wdt_trigger(unsigned long unused)
 {
-       u32 tmp;
-
        spin_lock(&mtx1_wdt_device.lock);
        if (mtx1_wdt_device.running)
                ticks--;
 
        /* toggle wdt gpio */
-       mtx1_wdt_device.gstate = ~mtx1_wdt_device.gstate;
-       if (mtx1_wdt_device.gstate)
-               gpio_direction_output(mtx1_wdt_device.gpio, 1);
-       else
-               gpio_direction_input(mtx1_wdt_device.gpio);
+       mtx1_wdt_device.gstate = !mtx1_wdt_device.gstate;
+       gpio_set_value(mtx1_wdt_device.gpio, mtx1_wdt_device.gstate);
 
        if (mtx1_wdt_device.queue && ticks)
                mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL);
@@ -105,7 +100,7 @@ static void mtx1_wdt_start(void)
        if (!mtx1_wdt_device.queue) {
                mtx1_wdt_device.queue = 1;
                mtx1_wdt_device.gstate = 1;
-               gpio_direction_output(mtx1_wdt_device.gpio, 1);
+               gpio_set_value(mtx1_wdt_device.gpio, 1);
                mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL);
        }
        mtx1_wdt_device.running++;
@@ -120,7 +115,7 @@ static int mtx1_wdt_stop(void)
        if (mtx1_wdt_device.queue) {
                mtx1_wdt_device.queue = 0;
                mtx1_wdt_device.gstate = 0;
-               gpio_direction_output(mtx1_wdt_device.gpio, 0);
+               gpio_set_value(mtx1_wdt_device.gpio, 0);
        }
        ticks = mtx1_wdt_device.default_ticks;
        spin_unlock_irqrestore(&mtx1_wdt_device.lock, flags);
@@ -214,6 +209,12 @@ static int __devinit mtx1_wdt_probe(struct platform_device *pdev)
        int ret;
 
        mtx1_wdt_device.gpio = pdev->resource[0].start;
+       ret = gpio_request_one(mtx1_wdt_device.gpio,
+                               GPIOF_OUT_INIT_HIGH, "mtx1-wdt");
+       if (ret < 0) {
+               dev_err(&pdev->dev, "failed to request gpio");
+               return ret;
+       }
 
        spin_lock_init(&mtx1_wdt_device.lock);
        init_completion(&mtx1_wdt_device.stop);
@@ -239,11 +240,13 @@ static int __devexit mtx1_wdt_remove(struct platform_device *pdev)
                mtx1_wdt_device.queue = 0;
                wait_for_completion(&mtx1_wdt_device.stop);
        }
+
+       gpio_free(mtx1_wdt_device.gpio);
        misc_deregister(&mtx1_wdt_misc);
        return 0;
 }
 
-static struct platform_driver mtx1_wdt = {
+static struct platform_driver mtx1_wdt_driver = {
        .probe = mtx1_wdt_probe,
        .remove = __devexit_p(mtx1_wdt_remove),
        .driver.name = "mtx1-wdt",
@@ -252,12 +255,12 @@ static struct platform_driver mtx1_wdt = {
 
 static int __init mtx1_wdt_init(void)
 {
-       return platform_driver_register(&mtx1_wdt);
+       return platform_driver_register(&mtx1_wdt_driver);
 }
 
 static void __exit mtx1_wdt_exit(void)
 {
-       platform_driver_unregister(&mtx1_wdt);
+       platform_driver_unregister(&mtx1_wdt_driver);
 }
 
 module_init(mtx1_wdt_init);
index 8c4b2d5..871caea 100644 (file)
@@ -320,6 +320,11 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
        struct wm831x_watchdog_pdata *pdata;
        int reg, ret;
 
+       if (wm831x) {
+               dev_err(&pdev->dev, "wm831x watchdog already registered\n");
+               return -EBUSY;
+       }
+
        wm831x = dev_get_drvdata(pdev->dev.parent);
 
        ret = wm831x_reg_read(wm831x, WM831X_WATCHDOG);
index 0d15a3d..5f43bfb 100644 (file)
@@ -82,6 +82,7 @@ fw-shipped-$(CONFIG_SERIAL_8250_CS) += cis/MT5634ZLX.cis cis/RS-COM-2P.cis \
 fw-shipped-$(CONFIG_PCMCIA_SMC91C92) += ositech/Xilinx7OD.bin
 fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \
                                      advansys/3550.bin advansys/38C0800.bin
+fw-shipped-$(CONFIG_SCSI_ISCI) += isci/isci_firmware.bin
 fw-shipped-$(CONFIG_SCSI_QLOGIC_1280) += qlogic/1040.bin qlogic/1280.bin \
                                         qlogic/12160.bin
 fw-shipped-$(CONFIG_SCSI_QLOGICPTI) += qlogic/isp1000.bin
diff --git a/firmware/isci/isci_firmware.bin.ihex b/firmware/isci/isci_firmware.bin.ihex
new file mode 100644 (file)
index 0000000..2e66195
--- /dev/null
@@ -0,0 +1,16 @@
+:10000000495343554F454D42E80018100002000087
+:1000100000000000000000000101000000000000DE
+:10002000FFFFCF5F0100000008DD0B0000FC0F00A8
+:10003000097C0B006EFC0A00FFFFCF5F010000008F
+:1000400008DD0B0000FC0F00097C0B006EFC0A00B1
+:10005000FFFFCF5F0100000008DD0B0000FC0F0078
+:10006000097C0B006EFC0A00FFFFCF5F010000005F
+:1000700008DD0B0000FC0F00097C0B006EFC0A0081
+:100080000101000000000000FFFFCF5F0200000040
+:1000900008DD0B0000FC0F00097C0B006EFC0A0061
+:1000A000FFFFCF5F0200000008DD0B0000FC0F0027
+:1000B000097C0B006EFC0A00FFFFCF5F020000000E
+:1000C00008DD0B0000FC0F00097C0B006EFC0A0031
+:1000D000FFFFCF5F0200000008DD0B0000FC0F00F7
+:0800E000097C0B006EFC0A0014
+:00000001FF
index 1a2421f..610e8e0 100644 (file)
@@ -762,7 +762,19 @@ static struct block_device *bd_start_claiming(struct block_device *bdev,
        if (!disk)
                return ERR_PTR(-ENXIO);
 
-       whole = bdget_disk(disk, 0);
+       /*
+        * Normally, @bdev should equal what's returned from bdget_disk()
+        * if partno is 0; however, some drivers (floppy) use multiple
+        * bdev's for the same physical device and @bdev may be one of the
+        * aliases.  Keep @bdev if partno is 0.  This means claimer
+        * tracking is broken for those devices but it has always been that
+        * way.
+        */
+       if (partno)
+               whole = bdget_disk(disk, 0);
+       else
+               whole = bdgrab(bdev);
+
        module_put(disk->fops->owner);
        put_disk(disk);
        if (!whole)
index 3006287..f30ac05 100644 (file)
@@ -19,7 +19,6 @@
 #ifndef __BTRFS_CTREE__
 #define __BTRFS_CTREE__
 
-#include <linux/version.h>
 #include <linux/mm.h>
 #include <linux/highmem.h>
 #include <linux/fs.h>
index f1cbd02..98c68e6 100644 (file)
@@ -82,19 +82,16 @@ static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
        return root->fs_info->delayed_root;
 }
 
-static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
-                                                       struct inode *inode)
+static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
 {
-       struct btrfs_delayed_node *node;
        struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
        struct btrfs_root *root = btrfs_inode->root;
        u64 ino = btrfs_ino(inode);
-       int ret;
+       struct btrfs_delayed_node *node;
 
-again:
        node = ACCESS_ONCE(btrfs_inode->delayed_node);
        if (node) {
-               atomic_inc(&node->refs);        /* can be accessed */
+               atomic_inc(&node->refs);
                return node;
        }
 
@@ -102,8 +99,10 @@ again:
        node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
        if (node) {
                if (btrfs_inode->delayed_node) {
+                       atomic_inc(&node->refs);        /* can be accessed */
+                       BUG_ON(btrfs_inode->delayed_node != node);
                        spin_unlock(&root->inode_lock);
-                       goto again;
+                       return node;
                }
                btrfs_inode->delayed_node = node;
                atomic_inc(&node->refs);        /* can be accessed */
@@ -113,6 +112,23 @@ again:
        }
        spin_unlock(&root->inode_lock);
 
+       return NULL;
+}
+
+static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
+                                                       struct inode *inode)
+{
+       struct btrfs_delayed_node *node;
+       struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
+       struct btrfs_root *root = btrfs_inode->root;
+       u64 ino = btrfs_ino(inode);
+       int ret;
+
+again:
+       node = btrfs_get_delayed_node(inode);
+       if (node)
+               return node;
+
        node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
        if (!node)
                return ERR_PTR(-ENOMEM);
@@ -548,19 +564,6 @@ struct btrfs_delayed_item *__btrfs_next_delayed_item(
        return next;
 }
 
-static inline struct btrfs_delayed_node *btrfs_get_delayed_node(
-                                                       struct inode *inode)
-{
-       struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
-       struct btrfs_delayed_node *delayed_node;
-
-       delayed_node = btrfs_inode->delayed_node;
-       if (delayed_node)
-               atomic_inc(&delayed_node->refs);
-
-       return delayed_node;
-}
-
 static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root,
                                                   u64 root_id)
 {
@@ -1404,8 +1407,7 @@ end:
 
 int btrfs_inode_delayed_dir_index_count(struct inode *inode)
 {
-       struct btrfs_delayed_node *delayed_node = BTRFS_I(inode)->delayed_node;
-       int ret = 0;
+       struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
 
        if (!delayed_node)
                return -ENOENT;
@@ -1415,11 +1417,14 @@ int btrfs_inode_delayed_dir_index_count(struct inode *inode)
         * a new directory index is added into the delayed node and index_cnt
         * is updated now. So we needn't lock the delayed node.
         */
-       if (!delayed_node->index_cnt)
+       if (!delayed_node->index_cnt) {
+               btrfs_release_delayed_node(delayed_node);
                return -EINVAL;
+       }
 
        BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
-       return ret;
+       btrfs_release_delayed_node(delayed_node);
+       return 0;
 }
 
 void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
@@ -1613,6 +1618,57 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
                                      inode->i_ctime.tv_nsec);
 }
 
+int btrfs_fill_inode(struct inode *inode, u32 *rdev)
+{
+       struct btrfs_delayed_node *delayed_node;
+       struct btrfs_inode_item *inode_item;
+       struct btrfs_timespec *tspec;
+
+       delayed_node = btrfs_get_delayed_node(inode);
+       if (!delayed_node)
+               return -ENOENT;
+
+       mutex_lock(&delayed_node->mutex);
+       if (!delayed_node->inode_dirty) {
+               mutex_unlock(&delayed_node->mutex);
+               btrfs_release_delayed_node(delayed_node);
+               return -ENOENT;
+       }
+
+       inode_item = &delayed_node->inode_item;
+
+       inode->i_uid = btrfs_stack_inode_uid(inode_item);
+       inode->i_gid = btrfs_stack_inode_gid(inode_item);
+       btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
+       inode->i_mode = btrfs_stack_inode_mode(inode_item);
+       inode->i_nlink = btrfs_stack_inode_nlink(inode_item);
+       inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
+       BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
+       BTRFS_I(inode)->sequence = btrfs_stack_inode_sequence(inode_item);
+       inode->i_rdev = 0;
+       *rdev = btrfs_stack_inode_rdev(inode_item);
+       BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
+
+       tspec = btrfs_inode_atime(inode_item);
+       inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec);
+       inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
+
+       tspec = btrfs_inode_mtime(inode_item);
+       inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec);
+       inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
+
+       tspec = btrfs_inode_ctime(inode_item);
+       inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec);
+       inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
+
+       inode->i_generation = BTRFS_I(inode)->generation;
+       BTRFS_I(inode)->index_cnt = (u64)-1;
+
+       mutex_unlock(&delayed_node->mutex);
+       btrfs_release_delayed_node(delayed_node);
+       return 0;
+}
+
 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
                               struct btrfs_root *root, struct inode *inode)
 {
index d1a6a29..8d27af4 100644 (file)
@@ -119,6 +119,7 @@ void btrfs_kill_delayed_inode_items(struct inode *inode);
 
 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
                               struct btrfs_root *root, struct inode *inode);
+int btrfs_fill_inode(struct inode *inode, u32 *rdev);
 
 /* Used for drop dead root */
 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
index 1f61bf5..71cd456 100644 (file)
@@ -4842,7 +4842,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
                                     u64 num_bytes, u64 empty_size,
                                     u64 search_start, u64 search_end,
                                     u64 hint_byte, struct btrfs_key *ins,
-                                    int data)
+                                    u64 data)
 {
        int ret = 0;
        struct btrfs_root *root = orig_root->fs_info->extent_root;
@@ -4869,7 +4869,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
 
        space_info = __find_space_info(root->fs_info, data);
        if (!space_info) {
-               printk(KERN_ERR "No space info for %d\n", data);
+               printk(KERN_ERR "No space info for %llu\n", data);
                return -ENOSPC;
        }
 
index 9f985a4..bf0d615 100644 (file)
@@ -1893,9 +1893,12 @@ void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
 
        while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
                info = rb_entry(node, struct btrfs_free_space, offset_index);
-               unlink_free_space(ctl, info);
-               kfree(info->bitmap);
-               kmem_cache_free(btrfs_free_space_cachep, info);
+               if (!info->bitmap) {
+                       unlink_free_space(ctl, info);
+                       kmem_cache_free(btrfs_free_space_cachep, info);
+               } else {
+                       free_bitmap(ctl, info);
+               }
                if (need_resched()) {
                        spin_unlock(&ctl->tree_lock);
                        cond_resched();
index 0a9b10c..d340f63 100644 (file)
@@ -2509,6 +2509,11 @@ static void btrfs_read_locked_inode(struct inode *inode)
        int maybe_acls;
        u32 rdev;
        int ret;
+       bool filled = false;
+
+       ret = btrfs_fill_inode(inode, &rdev);
+       if (!ret)
+               filled = true;
 
        path = btrfs_alloc_path();
        BUG_ON(!path);
@@ -2520,6 +2525,10 @@ static void btrfs_read_locked_inode(struct inode *inode)
                goto make_bad;
 
        leaf = path->nodes[0];
+
+       if (filled)
+               goto cache_acl;
+
        inode_item = btrfs_item_ptr(leaf, path->slots[0],
                                    struct btrfs_inode_item);
        if (!leaf->map_token)
@@ -2556,7 +2565,7 @@ static void btrfs_read_locked_inode(struct inode *inode)
 
        BTRFS_I(inode)->index_cnt = (u64)-1;
        BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
-
+cache_acl:
        /*
         * try to precache a NULL acl entry for files that don't have
         * any xattrs or acls
@@ -2572,7 +2581,6 @@ static void btrfs_read_locked_inode(struct inode *inode)
        }
 
        btrfs_free_path(path);
-       inode_item = NULL;
 
        switch (inode->i_mode & S_IFMT) {
        case S_IFREG:
@@ -4520,6 +4528,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
        inode_tree_add(inode);
 
        trace_btrfs_inode_new(inode);
+       btrfs_set_inode_last_trans(trans, inode);
 
        return inode;
 fail:
index 53ed1ad..f66cc16 100644 (file)
@@ -156,6 +156,6 @@ config CIFS_ACL
 
 config CIFS_NFSD_EXPORT
          bool "Allow nfsd to export CIFS file system (EXPERIMENTAL)"
-         depends on CIFS && EXPERIMENTAL
+         depends on CIFS && EXPERIMENTAL && BROKEN
          help
           Allows NFS server to export a CIFS mounted share (nfsd over cifs)
index ffb1459..7260e11 100644 (file)
@@ -42,6 +42,7 @@
 #define CIFS_MOUNT_MULTIUSER   0x20000 /* multiuser mount */
 #define CIFS_MOUNT_STRICT_IO   0x40000 /* strict cache mode */
 #define CIFS_MOUNT_RWPIDFORWARD        0x80000 /* use pid forwarding for rw */
+#define CIFS_MOUNT_POSIXACL    0x100000 /* mirror of MS_POSIXACL in mnt_cifs_flags */
 
 struct cifs_sb_info {
        struct rb_root tlink_tree;
index 2f0c586..35f9154 100644 (file)
@@ -104,8 +104,7 @@ cifs_sb_deactive(struct super_block *sb)
 }
 
 static int
-cifs_read_super(struct super_block *sb, struct smb_vol *volume_info,
-               const char *devname, int silent)
+cifs_read_super(struct super_block *sb)
 {
        struct inode *inode;
        struct cifs_sb_info *cifs_sb;
@@ -113,22 +112,16 @@ cifs_read_super(struct super_block *sb, struct smb_vol *volume_info,
 
        cifs_sb = CIFS_SB(sb);
 
-       spin_lock_init(&cifs_sb->tlink_tree_lock);
-       cifs_sb->tlink_tree = RB_ROOT;
+       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
+               sb->s_flags |= MS_POSIXACL;
 
-       rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
-       if (rc)
-               return rc;
-
-       cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
+       if (cifs_sb_master_tcon(cifs_sb)->ses->capabilities & CAP_LARGE_FILES)
+               sb->s_maxbytes = MAX_LFS_FILESIZE;
+       else
+               sb->s_maxbytes = MAX_NON_LFS;
 
-       rc = cifs_mount(sb, cifs_sb, volume_info, devname);
-
-       if (rc) {
-               if (!silent)
-                       cERROR(1, "cifs_mount failed w/return code = %d", rc);
-               goto out_mount_failed;
-       }
+       /* BB FIXME fix time_gran to be larger for LANMAN sessions */
+       sb->s_time_gran = 100;
 
        sb->s_magic = CIFS_MAGIC_NUMBER;
        sb->s_op = &cifs_super_ops;
@@ -170,37 +163,14 @@ out_no_root:
        if (inode)
                iput(inode);
 
-       cifs_umount(sb, cifs_sb);
-
-out_mount_failed:
-       bdi_destroy(&cifs_sb->bdi);
        return rc;
 }
 
-static void
-cifs_put_super(struct super_block *sb)
+static void cifs_kill_sb(struct super_block *sb)
 {
-       int rc = 0;
-       struct cifs_sb_info *cifs_sb;
-
-       cFYI(1, "In cifs_put_super");
-       cifs_sb = CIFS_SB(sb);
-       if (cifs_sb == NULL) {
-               cFYI(1, "Empty cifs superblock info passed to unmount");
-               return;
-       }
-
-       rc = cifs_umount(sb, cifs_sb);
-       if (rc)
-               cERROR(1, "cifs_umount failed with return code %d", rc);
-       if (cifs_sb->mountdata) {
-               kfree(cifs_sb->mountdata);
-               cifs_sb->mountdata = NULL;
-       }
-
-       unload_nls(cifs_sb->local_nls);
-       bdi_destroy(&cifs_sb->bdi);
-       kfree(cifs_sb);
+       struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+       kill_anon_super(sb);
+       cifs_umount(cifs_sb);
 }
 
 static int
@@ -548,7 +518,6 @@ static int cifs_drop_inode(struct inode *inode)
 }
 
 static const struct super_operations cifs_super_ops = {
-       .put_super = cifs_put_super,
        .statfs = cifs_statfs,
        .alloc_inode = cifs_alloc_inode,
        .destroy_inode = cifs_destroy_inode,
@@ -585,7 +554,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
        full_path = cifs_build_path_to_root(vol, cifs_sb,
                                            cifs_sb_master_tcon(cifs_sb));
        if (full_path == NULL)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        cFYI(1, "Get root dentry for %s", full_path);
 
@@ -614,7 +583,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
                        dchild = d_alloc(dparent, &name);
                        if (dchild == NULL) {
                                dput(dparent);
-                               dparent = NULL;
+                               dparent = ERR_PTR(-ENOMEM);
                                goto out;
                        }
                }
@@ -632,7 +601,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
                        if (rc) {
                                dput(dchild);
                                dput(dparent);
-                               dparent = NULL;
+                               dparent = ERR_PTR(rc);
                                goto out;
                        }
                        alias = d_materialise_unique(dchild, inode);
@@ -640,7 +609,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
                                dput(dchild);
                                if (IS_ERR(alias)) {
                                        dput(dparent);
-                                       dparent = NULL;
+                                       dparent = ERR_PTR(-EINVAL); /* XXX */
                                        goto out;
                                }
                                dchild = alias;
@@ -660,6 +629,13 @@ out:
        return dparent;
 }
 
+static int cifs_set_super(struct super_block *sb, void *data)
+{
+       struct cifs_mnt_data *mnt_data = data;
+       sb->s_fs_info = mnt_data->cifs_sb;
+       return set_anon_super(sb, NULL);
+}
+
 static struct dentry *
 cifs_do_mount(struct file_system_type *fs_type,
              int flags, const char *dev_name, void *data)
@@ -680,75 +656,73 @@ cifs_do_mount(struct file_system_type *fs_type,
        cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
        if (cifs_sb == NULL) {
                root = ERR_PTR(-ENOMEM);
-               goto out;
+               goto out_nls;
+       }
+
+       cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
+       if (cifs_sb->mountdata == NULL) {
+               root = ERR_PTR(-ENOMEM);
+               goto out_cifs_sb;
        }
 
        cifs_setup_cifs_sb(volume_info, cifs_sb);
 
+       rc = cifs_mount(cifs_sb, volume_info);
+       if (rc) {
+               if (!(flags & MS_SILENT))
+                       cERROR(1, "cifs_mount failed w/return code = %d", rc);
+               root = ERR_PTR(rc);
+               goto out_mountdata;
+       }
+
        mnt_data.vol = volume_info;
        mnt_data.cifs_sb = cifs_sb;
        mnt_data.flags = flags;
 
-       sb = sget(fs_type, cifs_match_super, set_anon_super, &mnt_data);
+       sb = sget(fs_type, cifs_match_super, cifs_set_super, &mnt_data);
        if (IS_ERR(sb)) {
                root = ERR_CAST(sb);
-               goto out_cifs_sb;
+               cifs_umount(cifs_sb);
+               goto out;
        }
 
-       if (sb->s_fs_info) {
+       if (sb->s_root) {
                cFYI(1, "Use existing superblock");
-               goto out_shared;
-       }
-
-       /*
-        * Copy mount params for use in submounts. Better to do
-        * the copy here and deal with the error before cleanup gets
-        * complicated post-mount.
-        */
-       cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
-       if (cifs_sb->mountdata == NULL) {
-               root = ERR_PTR(-ENOMEM);
-               goto out_super;
-       }
-
-       sb->s_flags = flags;
-       /* BB should we make this contingent on mount parm? */
-       sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
-       sb->s_fs_info = cifs_sb;
+               cifs_umount(cifs_sb);
+       } else {
+               sb->s_flags = flags;
+               /* BB should we make this contingent on mount parm? */
+               sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
+
+               rc = cifs_read_super(sb);
+               if (rc) {
+                       root = ERR_PTR(rc);
+                       goto out_super;
+               }
 
-       rc = cifs_read_super(sb, volume_info, dev_name,
-                            flags & MS_SILENT ? 1 : 0);
-       if (rc) {
-               root = ERR_PTR(rc);
-               goto out_super;
+               sb->s_flags |= MS_ACTIVE;
        }
 
-       sb->s_flags |= MS_ACTIVE;
-
        root = cifs_get_root(volume_info, sb);
-       if (root == NULL)
+       if (IS_ERR(root))
                goto out_super;
 
        cFYI(1, "dentry root is: %p", root);
        goto out;
 
-out_shared:
-       root = cifs_get_root(volume_info, sb);
-       if (root)
-               cFYI(1, "dentry root is: %p", root);
-       goto out;
-
 out_super:
-       kfree(cifs_sb->mountdata);
        deactivate_locked_super(sb);
-
-out_cifs_sb:
-       unload_nls(cifs_sb->local_nls);
-       kfree(cifs_sb);
-
 out:
        cifs_cleanup_volume_info(&volume_info);
        return root;
+
+out_mountdata:
+       kfree(cifs_sb->mountdata);
+out_cifs_sb:
+       kfree(cifs_sb);
+out_nls:
+       unload_nls(volume_info->local_nls);
+       goto out;
 }
 
 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
@@ -837,7 +811,7 @@ struct file_system_type cifs_fs_type = {
        .owner = THIS_MODULE,
        .name = "cifs",
        .mount = cifs_do_mount,
-       .kill_sb = kill_anon_super,
+       .kill_sb = cifs_kill_sb,
        /*  .fs_flags */
 };
 const struct inode_operations cifs_dir_inode_ops = {
index 953f844..257f312 100644 (file)
@@ -157,9 +157,8 @@ extern int cifs_match_super(struct super_block *, void *);
 extern void cifs_cleanup_volume_info(struct smb_vol **pvolume_info);
 extern int cifs_setup_volume_info(struct smb_vol **pvolume_info,
                                  char *mount_data, const char *devname);
-extern int cifs_mount(struct super_block *, struct cifs_sb_info *,
-                     struct smb_vol *, const char *);
-extern int cifs_umount(struct super_block *, struct cifs_sb_info *);
+extern int cifs_mount(struct cifs_sb_info *, struct smb_vol *);
+extern void cifs_umount(struct cifs_sb_info *);
 extern void cifs_dfs_release_automount_timer(void);
 void cifs_proc_init(void);
 void cifs_proc_clean(void);
@@ -218,7 +217,8 @@ extern int get_dfs_path(int xid, struct cifs_ses *pSesInfo,
                        struct dfs_info3_param **preferrals,
                        int remap);
 extern void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
-                                struct super_block *sb, struct smb_vol *vol);
+                                struct cifs_sb_info *cifs_sb,
+                                struct smb_vol *vol);
 extern int CIFSSMBQFSInfo(const int xid, struct cifs_tcon *tcon,
                        struct kstatfs *FSData);
 extern int SMBOldQFSInfo(const int xid, struct cifs_tcon *tcon,
index 12cf72d..c8cb83e 100644 (file)
@@ -2474,14 +2474,6 @@ generic_ip_connect(struct TCP_Server_Info *server)
        if (rc < 0)
                return rc;
 
-       rc = socket->ops->connect(socket, saddr, slen, 0);
-       if (rc < 0) {
-               cFYI(1, "Error %d connecting to server", rc);
-               sock_release(socket);
-               server->ssocket = NULL;
-               return rc;
-       }
-
        /*
         * Eventually check for other socket options to change from
         * the default. sock_setsockopt not used because it expects
@@ -2510,6 +2502,14 @@ generic_ip_connect(struct TCP_Server_Info *server)
                 socket->sk->sk_sndbuf,
                 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo);
 
+       rc = socket->ops->connect(socket, saddr, slen, 0);
+       if (rc < 0) {
+               cFYI(1, "Error %d connecting to server", rc);
+               sock_release(socket);
+               server->ssocket = NULL;
+               return rc;
+       }
+
        if (sport == htons(RFC1001_PORT))
                rc = ip_rfc1001_connect(server);
 
@@ -2546,7 +2546,7 @@ ip_connect(struct TCP_Server_Info *server)
 }
 
 void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
-                         struct super_block *sb, struct smb_vol *vol_info)
+                         struct cifs_sb_info *cifs_sb, struct smb_vol *vol_info)
 {
        /* if we are reconnecting then should we check to see if
         * any requested capabilities changed locally e.g. via
@@ -2600,22 +2600,23 @@ void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
                        cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
                else if (CIFS_UNIX_POSIX_ACL_CAP & cap) {
                        cFYI(1, "negotiated posix acl support");
-                       if (sb)
-                               sb->s_flags |= MS_POSIXACL;
+                       if (cifs_sb)
+                               cifs_sb->mnt_cifs_flags |=
+                                       CIFS_MOUNT_POSIXACL;
                }
 
                if (vol_info && vol_info->posix_paths == 0)
                        cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
                else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
                        cFYI(1, "negotiate posix pathnames");
-                       if (sb)
-                               CIFS_SB(sb)->mnt_cifs_flags |=
+                       if (cifs_sb)
+                               cifs_sb->mnt_cifs_flags |=
                                        CIFS_MOUNT_POSIX_PATHS;
                }
 
-               if (sb && (CIFS_SB(sb)->rsize > 127 * 1024)) {
+               if (cifs_sb && (cifs_sb->rsize > 127 * 1024)) {
                        if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) {
-                               CIFS_SB(sb)->rsize = 127 * 1024;
+                               cifs_sb->rsize = 127 * 1024;
                                cFYI(DBG2, "larger reads not supported by srv");
                        }
                }
@@ -2662,6 +2663,9 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
 {
        INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
 
+       spin_lock_init(&cifs_sb->tlink_tree_lock);
+       cifs_sb->tlink_tree = RB_ROOT;
+
        if (pvolume_info->rsize > CIFSMaxBufSize) {
                cERROR(1, "rsize %d too large, using MaxBufSize",
                        pvolume_info->rsize);
@@ -2750,21 +2754,21 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
 
 /*
  * When the server supports very large writes via POSIX extensions, we can
- * allow up to 2^24 - PAGE_CACHE_SIZE.
+ * allow up to 2^24-1, minus the size of a WRITE_AND_X header, not including
+ * the RFC1001 length.
  *
  * Note that this might make for "interesting" allocation problems during
- * writeback however (as we have to allocate an array of pointers for the
- * pages). A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096.
+ * writeback however as we have to allocate an array of pointers for the
+ * pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096.
  */
-#define CIFS_MAX_WSIZE ((1<<24) - PAGE_CACHE_SIZE)
+#define CIFS_MAX_WSIZE ((1<<24) - 1 - sizeof(WRITE_REQ) + 4)
 
 /*
- * When the server doesn't allow large posix writes, default to a wsize of
- * 128k - PAGE_CACHE_SIZE -- one page less than the largest frame size
- * described in RFC1001. This allows space for the header without going over
- * that by default.
+ * When the server doesn't allow large posix writes, only allow a wsize of
+ * 128k minus the size of the WRITE_AND_X header. That allows for a write up
+ * to the maximum size described by RFC1002.
  */
-#define CIFS_MAX_RFC1001_WSIZE (128 * 1024 - PAGE_CACHE_SIZE)
+#define CIFS_MAX_RFC1002_WSIZE (128 * 1024 - sizeof(WRITE_REQ) + 4)
 
 /*
  * The default wsize is 1M. find_get_pages seems to return a maximum of 256
@@ -2783,11 +2787,18 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
 
        /* can server support 24-bit write sizes? (via UNIX extensions) */
        if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
-               wsize = min_t(unsigned int, wsize, CIFS_MAX_RFC1001_WSIZE);
+               wsize = min_t(unsigned int, wsize, CIFS_MAX_RFC1002_WSIZE);
 
-       /* no CAP_LARGE_WRITE_X? Limit it to 16 bits */
-       if (!(server->capabilities & CAP_LARGE_WRITE_X))
-               wsize = min_t(unsigned int, wsize, USHRT_MAX);
+       /*
+        * no CAP_LARGE_WRITE_X or is signing enabled without CAP_UNIX set?
+        * Limit it to max buffer offered by the server, minus the size of the
+        * WRITEX header, not including the 4 byte RFC1001 length.
+        */
+       if (!(server->capabilities & CAP_LARGE_WRITE_X) ||
+           (!(server->capabilities & CAP_UNIX) &&
+            (server->sec_mode & (SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED))))
+               wsize = min_t(unsigned int, wsize,
+                               server->maxBuf - sizeof(WRITE_REQ) + 4);
 
        /* hard limit of CIFS_MAX_WSIZE */
        wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE);
@@ -2937,7 +2948,11 @@ int cifs_setup_volume_info(struct smb_vol **pvolume_info, char *mount_data,
 
        if (volume_info->nullauth) {
                cFYI(1, "null user");
-               volume_info->username = "";
+               volume_info->username = kzalloc(1, GFP_KERNEL);
+               if (volume_info->username == NULL) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
        } else if (volume_info->username) {
                /* BB fixme parse for domain name here */
                cFYI(1, "Username: %s", volume_info->username);
@@ -2971,8 +2986,7 @@ out:
 }
 
 int
-cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
-          struct smb_vol *volume_info, const char *devname)
+cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
 {
        int rc = 0;
        int xid;
@@ -2983,6 +2997,13 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
        struct tcon_link *tlink;
 #ifdef CONFIG_CIFS_DFS_UPCALL
        int referral_walks_count = 0;
+
+       rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
+       if (rc)
+               return rc;
+
+       cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
+
 try_mount_again:
        /* cleanup activities if we're chasing a referral */
        if (referral_walks_count) {
@@ -3007,6 +3028,7 @@ try_mount_again:
        srvTcp = cifs_get_tcp_session(volume_info);
        if (IS_ERR(srvTcp)) {
                rc = PTR_ERR(srvTcp);
+               bdi_destroy(&cifs_sb->bdi);
                goto out;
        }
 
@@ -3018,14 +3040,6 @@ try_mount_again:
                goto mount_fail_check;
        }
 
-       if (pSesInfo->capabilities & CAP_LARGE_FILES)
-               sb->s_maxbytes = MAX_LFS_FILESIZE;
-       else
-               sb->s_maxbytes = MAX_NON_LFS;
-
-       /* BB FIXME fix time_gran to be larger for LANMAN sessions */
-       sb->s_time_gran = 100;
-
        /* search for existing tcon to this server share */
        tcon = cifs_get_tcon(pSesInfo, volume_info);
        if (IS_ERR(tcon)) {
@@ -3038,7 +3052,7 @@ try_mount_again:
        if (tcon->ses->capabilities & CAP_UNIX) {
                /* reset of caps checks mount to see if unix extensions
                   disabled for just this mount */
-               reset_cifs_unix_caps(xid, tcon, sb, volume_info);
+               reset_cifs_unix_caps(xid, tcon, cifs_sb, volume_info);
                if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
                    (le64_to_cpu(tcon->fsUnixInfo.Capability) &
                     CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
@@ -3161,6 +3175,7 @@ mount_fail_check:
                        cifs_put_smb_ses(pSesInfo);
                else
                        cifs_put_tcp_session(srvTcp);
+               bdi_destroy(&cifs_sb->bdi);
                goto out;
        }
 
@@ -3335,8 +3350,8 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses,
        return rc;
 }
 
-int
-cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
+void
+cifs_umount(struct cifs_sb_info *cifs_sb)
 {
        struct rb_root *root = &cifs_sb->tlink_tree;
        struct rb_node *node;
@@ -3357,7 +3372,10 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
        }
        spin_unlock(&cifs_sb->tlink_tree_lock);
 
-       return 0;
+       bdi_destroy(&cifs_sb->bdi);
+       kfree(cifs_sb->mountdata);
+       unload_nls(cifs_sb->local_nls);
+       kfree(cifs_sb);
 }
 
 int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses)
index 1525d5e..1c5b770 100644 (file)
@@ -90,12 +90,10 @@ smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)
        sg_init_one(&sgout, out, 8);
 
        rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, 8);
-       if (rc) {
+       if (rc)
                cERROR(1, "could not encrypt crypt key rc: %d\n", rc);
-               crypto_free_blkcipher(tfm_des);
-               goto smbhash_err;
-       }
 
+       crypto_free_blkcipher(tfm_des);
 smbhash_err:
        return rc;
 }
index 2e29abb..095c36f 100644 (file)
@@ -125,7 +125,7 @@ struct ext4_ext_path {
  * positive retcode - signal for ext4_ext_walk_space(), see below
  * callback must return valid extent (passed or newly created)
  */
-typedef int (*ext_prepare_callback)(struct inode *, struct ext4_ext_path *,
+typedef int (*ext_prepare_callback)(struct inode *, ext4_lblk_t,
                                        struct ext4_ext_cache *,
                                        struct ext4_extent *, void *);
 
@@ -133,8 +133,11 @@ typedef int (*ext_prepare_callback)(struct inode *, struct ext4_ext_path *,
 #define EXT_BREAK      1
 #define EXT_REPEAT     2
 
-/* Maximum logical block in a file; ext4_extent's ee_block is __le32 */
-#define EXT_MAX_BLOCK  0xffffffff
+/*
+ * Maximum number of logical blocks in a file; ext4_extent's ee_block is
+ * __le32.
+ */
+#define EXT_MAX_BLOCKS 0xffffffff
 
 /*
  * EXT_INIT_MAX_LEN is the maximum number of blocks we can have in an
index 5199bac..f815cc8 100644 (file)
@@ -1408,7 +1408,7 @@ got_index:
 
 /*
  * ext4_ext_next_allocated_block:
- * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
+ * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
  * NOTE: it considers block number from index entry as
  * allocated block. Thus, index entries have to be consistent
  * with leaves.
@@ -1422,7 +1422,7 @@ ext4_ext_next_allocated_block(struct ext4_ext_path *path)
        depth = path->p_depth;
 
        if (depth == 0 && path->p_ext == NULL)
-               return EXT_MAX_BLOCK;
+               return EXT_MAX_BLOCKS;
 
        while (depth >= 0) {
                if (depth == path->p_depth) {
@@ -1439,12 +1439,12 @@ ext4_ext_next_allocated_block(struct ext4_ext_path *path)
                depth--;
        }
 
-       return EXT_MAX_BLOCK;
+       return EXT_MAX_BLOCKS;
 }
 
 /*
  * ext4_ext_next_leaf_block:
- * returns first allocated block from next leaf or EXT_MAX_BLOCK
+ * returns first allocated block from next leaf or EXT_MAX_BLOCKS
  */
 static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
                                        struct ext4_ext_path *path)
@@ -1456,7 +1456,7 @@ static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
 
        /* zero-tree has no leaf blocks at all */
        if (depth == 0)
-               return EXT_MAX_BLOCK;
+               return EXT_MAX_BLOCKS;
 
        /* go to index block */
        depth--;
@@ -1469,7 +1469,7 @@ static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
                depth--;
        }
 
-       return EXT_MAX_BLOCK;
+       return EXT_MAX_BLOCKS;
 }
 
 /*
@@ -1677,13 +1677,13 @@ static unsigned int ext4_ext_check_overlap(struct inode *inode,
         */
        if (b2 < b1) {
                b2 = ext4_ext_next_allocated_block(path);
-               if (b2 == EXT_MAX_BLOCK)
+               if (b2 == EXT_MAX_BLOCKS)
                        goto out;
        }
 
        /* check for wrap through zero on extent logical start block*/
        if (b1 + len1 < b1) {
-               len1 = EXT_MAX_BLOCK - b1;
+               len1 = EXT_MAX_BLOCKS - b1;
                newext->ee_len = cpu_to_le16(len1);
                ret = 1;
        }
@@ -1767,7 +1767,7 @@ repeat:
        fex = EXT_LAST_EXTENT(eh);
        next = ext4_ext_next_leaf_block(inode, path);
        if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
-           && next != EXT_MAX_BLOCK) {
+           && next != EXT_MAX_BLOCKS) {
                ext_debug("next leaf block - %d\n", next);
                BUG_ON(npath != NULL);
                npath = ext4_ext_find_extent(inode, next, NULL);
@@ -1887,7 +1887,7 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
        BUG_ON(func == NULL);
        BUG_ON(inode == NULL);
 
-       while (block < last && block != EXT_MAX_BLOCK) {
+       while (block < last && block != EXT_MAX_BLOCKS) {
                num = last - block;
                /* find extent for this block */
                down_read(&EXT4_I(inode)->i_data_sem);
@@ -1958,7 +1958,7 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
                        err = -EIO;
                        break;
                }
-               err = func(inode, path, &cbex, ex, cbdata);
+               err = func(inode, next, &cbex, ex, cbdata);
                ext4_ext_drop_refs(path);
 
                if (err < 0)
@@ -2020,7 +2020,7 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
        if (ex == NULL) {
                /* there is no extent yet, so gap is [0;-] */
                lblock = 0;
-               len = EXT_MAX_BLOCK;
+               len = EXT_MAX_BLOCKS;
                ext_debug("cache gap(whole file):");
        } else if (block < le32_to_cpu(ex->ee_block)) {
                lblock = block;
@@ -2350,7 +2350,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
                         * never happen because at least one of the end points
                         * needs to be on the edge of the extent.
                         */
-                       if (end == EXT_MAX_BLOCK) {
+                       if (end == EXT_MAX_BLOCKS - 1) {
                                ext_debug("  bad truncate %u:%u\n",
                                                start, end);
                                block = 0;
@@ -2398,7 +2398,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
                         * If this is a truncate, this condition
                         * should never happen
                         */
-                       if (end == EXT_MAX_BLOCK) {
+                       if (end == EXT_MAX_BLOCKS - 1) {
                                ext_debug("  bad truncate %u:%u\n",
                                        start, end);
                                err = -EIO;
@@ -2478,7 +2478,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
                 * we need to remove it from the leaf
                 */
                if (num == 0) {
-                       if (end != EXT_MAX_BLOCK) {
+                       if (end != EXT_MAX_BLOCKS - 1) {
                                /*
                                 * For hole punching, we need to scoot all the
                                 * extents up when an extent is removed so that
@@ -3699,7 +3699,7 @@ void ext4_ext_truncate(struct inode *inode)
 
        last_block = (inode->i_size + sb->s_blocksize - 1)
                        >> EXT4_BLOCK_SIZE_BITS(sb);
-       err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCK);
+       err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
 
        /* In a multi-transaction truncate, we only make the final
         * transaction synchronous.
@@ -3914,14 +3914,13 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
 /*
  * Callback function called for each extent to gather FIEMAP information.
  */
-static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
+static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next,
                       struct ext4_ext_cache *newex, struct ext4_extent *ex,
                       void *data)
 {
        __u64   logical;
        __u64   physical;
        __u64   length;
-       loff_t  size;
        __u32   flags = 0;
        int             ret = 0;
        struct fiemap_extent_info *fieinfo = data;
@@ -4103,8 +4102,7 @@ found_delayed_extent:
        if (ex && ext4_ext_is_uninitialized(ex))
                flags |= FIEMAP_EXTENT_UNWRITTEN;
 
-       size = i_size_read(inode);
-       if (logical + length >= size)
+       if (next == EXT_MAX_BLOCKS)
                flags |= FIEMAP_EXTENT_LAST;
 
        ret = fiemap_fill_next_extent(fieinfo, logical, physical,
@@ -4347,8 +4345,8 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 
                start_blk = start >> inode->i_sb->s_blocksize_bits;
                last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
-               if (last_blk >= EXT_MAX_BLOCK)
-                       last_blk = EXT_MAX_BLOCK-1;
+               if (last_blk >= EXT_MAX_BLOCKS)
+                       last_blk = EXT_MAX_BLOCKS-1;
                len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
 
                /*
index a5763e3..e3126c0 100644 (file)
@@ -2634,7 +2634,7 @@ static int ext4_writepage(struct page *page,
        struct buffer_head *page_bufs = NULL;
        struct inode *inode = page->mapping->host;
 
-       trace_ext4_writepage(inode, page);
+       trace_ext4_writepage(page);
        size = i_size_read(inode);
        if (page->index == size >> PAGE_CACHE_SHIFT)
                len = size & ~PAGE_CACHE_MASK;
index 859f2ae..6ed859d 100644 (file)
@@ -3578,8 +3578,8 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
                free += next - bit;
 
                trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
-               trace_ext4_mb_release_inode_pa(sb, pa->pa_inode, pa,
-                                              grp_blk_start + bit, next - bit);
+               trace_ext4_mb_release_inode_pa(pa, grp_blk_start + bit,
+                                              next - bit);
                mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
                bit = next + 1;
        }
@@ -3608,7 +3608,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
        ext4_group_t group;
        ext4_grpblk_t bit;
 
-       trace_ext4_mb_release_group_pa(sb, pa);
+       trace_ext4_mb_release_group_pa(pa);
        BUG_ON(pa->pa_deleted == 0);
        ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
        BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
@@ -4448,7 +4448,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
  * @inode:             inode
  * @block:             start physical block to free
  * @count:             number of blocks to count
- * @metadata:          Are these metadata blocks
+ * @flags:             flags used by ext4_free_blocks
  */
 void ext4_free_blocks(handle_t *handle, struct inode *inode,
                      struct buffer_head *bh, ext4_fsblk_t block,
index 2b8304b..f57455a 100644 (file)
@@ -1002,12 +1002,12 @@ mext_check_arguments(struct inode *orig_inode,
                return -EINVAL;
        }
 
-       if ((orig_start > EXT_MAX_BLOCK) ||
-           (donor_start > EXT_MAX_BLOCK) ||
-           (*len > EXT_MAX_BLOCK) ||
-           (orig_start + *len > EXT_MAX_BLOCK))  {
+       if ((orig_start >= EXT_MAX_BLOCKS) ||
+           (donor_start >= EXT_MAX_BLOCKS) ||
+           (*len > EXT_MAX_BLOCKS) ||
+           (orig_start + *len >= EXT_MAX_BLOCKS))  {
                ext4_debug("ext4 move extent: Can't handle over [%u] blocks "
-                       "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCK,
+                       "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCKS,
                        orig_inode->i_ino, donor_inode->i_ino);
                return -EINVAL;
        }
index cc5c157..9ea71aa 100644 (file)
@@ -2243,6 +2243,12 @@ static void ext4_orphan_cleanup(struct super_block *sb,
  * in the vfs.  ext4 inode has 48 bits of i_block in fsblock units,
  * so that won't be a limiting factor.
  *
+ * However there is other limiting factor. We do store extents in the form
+ * of starting block and length, hence the resulting length of the extent
+ * covering maximum file size must fit into on-disk format containers as
+ * well. Given that length is always by 1 unit bigger than max unit (because
+ * we count 0 as well) we have to lower the s_maxbytes by one fs block.
+ *
  * Note, this does *not* consider any metadata overhead for vfs i_blocks.
  */
 static loff_t ext4_max_size(int blkbits, int has_huge_files)
@@ -2264,10 +2270,13 @@ static loff_t ext4_max_size(int blkbits, int has_huge_files)
                upper_limit <<= blkbits;
        }
 
-       /* 32-bit extent-start container, ee_block */
-       res = 1LL << 32;
+       /*
+        * 32-bit extent-start container, ee_block. We lower the maxbytes
+        * by one fs block, so ee_len can cover the extent of maximum file
+        * size
+        */
+       res = (1LL << 32) - 1;
        res <<= blkbits;
-       res -= 1;
 
        /* Sanity check against vm- & vfs- imposed limits */
        if (res > upper_limit)
index 0f7e88a..43566d1 100644 (file)
@@ -423,7 +423,14 @@ EXPORT_SYMBOL(remove_inode_hash);
 void end_writeback(struct inode *inode)
 {
        might_sleep();
+       /*
+        * We have to cycle tree_lock here because reclaim can be still in the
+        * process of removing the last page (in __delete_from_page_cache())
+        * and we must not free mapping under it.
+        */
+       spin_lock_irq(&inode->i_data.tree_lock);
        BUG_ON(inode->i_data.nrpages);
+       spin_unlock_irq(&inode->i_data.tree_lock);
        BUG_ON(!list_empty(&inode->i_data.private_list));
        BUG_ON(!(inode->i_state & I_FREEING));
        BUG_ON(inode->i_state & I_CLEAR);
index 6a79fd0..2c62c5a 100644 (file)
@@ -97,10 +97,14 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
 
        if (jh->b_jlist == BJ_None && !buffer_locked(bh) &&
            !buffer_dirty(bh) && !buffer_write_io_error(bh)) {
+               /*
+                * Get our reference so that bh cannot be freed before
+                * we unlock it
+                */
+               get_bh(bh);
                JBUFFER_TRACE(jh, "remove from checkpoint list");
                ret = __jbd2_journal_remove_checkpoint(jh) + 1;
                jbd_unlock_bh_state(bh);
-               jbd2_journal_remove_journal_head(bh);
                BUFFER_TRACE(bh, "release");
                __brelse(bh);
        } else {
@@ -223,8 +227,8 @@ restart:
                        spin_lock(&journal->j_list_lock);
                        goto restart;
                }
+               get_bh(bh);
                if (buffer_locked(bh)) {
-                       atomic_inc(&bh->b_count);
                        spin_unlock(&journal->j_list_lock);
                        jbd_unlock_bh_state(bh);
                        wait_on_buffer(bh);
@@ -243,7 +247,6 @@ restart:
                 */
                released = __jbd2_journal_remove_checkpoint(jh);
                jbd_unlock_bh_state(bh);
-               jbd2_journal_remove_journal_head(bh);
                __brelse(bh);
        }
 
@@ -284,7 +287,7 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
        int ret = 0;
 
        if (buffer_locked(bh)) {
-               atomic_inc(&bh->b_count);
+               get_bh(bh);
                spin_unlock(&journal->j_list_lock);
                jbd_unlock_bh_state(bh);
                wait_on_buffer(bh);
@@ -316,12 +319,12 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
                ret = 1;
                if (unlikely(buffer_write_io_error(bh)))
                        ret = -EIO;
+               get_bh(bh);
                J_ASSERT_JH(jh, !buffer_jbddirty(bh));
                BUFFER_TRACE(bh, "remove from checkpoint");
                __jbd2_journal_remove_checkpoint(jh);
                spin_unlock(&journal->j_list_lock);
                jbd_unlock_bh_state(bh);
-               jbd2_journal_remove_journal_head(bh);
                __brelse(bh);
        } else {
                /*
@@ -554,7 +557,8 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
 /*
  * journal_clean_one_cp_list
  *
- * Find all the written-back checkpoint buffers in the given list and release them.
+ * Find all the written-back checkpoint buffers in the given list and
+ * release them.
  *
  * Called with the journal locked.
  * Called with j_list_lock held.
@@ -663,8 +667,8 @@ out:
  * checkpoint lists.
  *
  * The function returns 1 if it frees the transaction, 0 otherwise.
+ * The function can free jh and bh.
  *
- * This function is called with the journal locked.
  * This function is called with j_list_lock held.
  * This function is called with jbd_lock_bh_state(jh2bh(jh))
  */
@@ -684,13 +688,14 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
        }
        journal = transaction->t_journal;
 
+       JBUFFER_TRACE(jh, "removing from transaction");
        __buffer_unlink(jh);
        jh->b_cp_transaction = NULL;
+       jbd2_journal_put_journal_head(jh);
 
        if (transaction->t_checkpoint_list != NULL ||
            transaction->t_checkpoint_io_list != NULL)
                goto out;
-       JBUFFER_TRACE(jh, "transaction has no more buffers");
 
        /*
         * There is one special case to worry about: if we have just pulled the
@@ -701,10 +706,8 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
         * The locking here around t_state is a bit sleazy.
         * See the comment at the end of jbd2_journal_commit_transaction().
         */
-       if (transaction->t_state != T_FINISHED) {
-               JBUFFER_TRACE(jh, "belongs to running/committing transaction");
+       if (transaction->t_state != T_FINISHED)
                goto out;
-       }
 
        /* OK, that was the last buffer for the transaction: we can now
           safely remove this transaction from the log */
@@ -723,7 +726,6 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
        wake_up(&journal->j_wait_logspace);
        ret = 1;
 out:
-       JBUFFER_TRACE(jh, "exit");
        return ret;
 }
 
@@ -742,6 +744,8 @@ void __jbd2_journal_insert_checkpoint(struct journal_head *jh,
        J_ASSERT_JH(jh, buffer_dirty(jh2bh(jh)) || buffer_jbddirty(jh2bh(jh)));
        J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);
 
+       /* Get reference for checkpointing transaction */
+       jbd2_journal_grab_journal_head(jh2bh(jh));
        jh->b_cp_transaction = transaction;
 
        if (!transaction->t_checkpoint_list) {
index 7f21cf3..eef6979 100644 (file)
@@ -848,10 +848,16 @@ restart_loop:
        while (commit_transaction->t_forget) {
                transaction_t *cp_transaction;
                struct buffer_head *bh;
+               int try_to_free = 0;
 
                jh = commit_transaction->t_forget;
                spin_unlock(&journal->j_list_lock);
                bh = jh2bh(jh);
+               /*
+                * Get a reference so that bh cannot be freed before we are
+                * done with it.
+                */
+               get_bh(bh);
                jbd_lock_bh_state(bh);
                J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
 
@@ -914,28 +920,27 @@ restart_loop:
                        __jbd2_journal_insert_checkpoint(jh, commit_transaction);
                        if (is_journal_aborted(journal))
                                clear_buffer_jbddirty(bh);
-                       JBUFFER_TRACE(jh, "refile for checkpoint writeback");
-                       __jbd2_journal_refile_buffer(jh);
-                       jbd_unlock_bh_state(bh);
                } else {
                        J_ASSERT_BH(bh, !buffer_dirty(bh));
-                       /* The buffer on BJ_Forget list and not jbddirty means
+                       /*
+                        * The buffer on BJ_Forget list and not jbddirty means
                         * it has been freed by this transaction and hence it
                         * could not have been reallocated until this
                         * transaction has committed. *BUT* it could be
                         * reallocated once we have written all the data to
                         * disk and before we process the buffer on BJ_Forget
-                        * list. */
-                       JBUFFER_TRACE(jh, "refile or unfile freed buffer");
-                       __jbd2_journal_refile_buffer(jh);
-                       if (!jh->b_transaction) {
-                               jbd_unlock_bh_state(bh);
-                                /* needs a brelse */
-                               jbd2_journal_remove_journal_head(bh);
-                               release_buffer_page(bh);
-                       } else
-                               jbd_unlock_bh_state(bh);
+                        * list.
+                        */
+                       if (!jh->b_next_transaction)
+                               try_to_free = 1;
                }
+               JBUFFER_TRACE(jh, "refile or unfile buffer");
+               __jbd2_journal_refile_buffer(jh);
+               jbd_unlock_bh_state(bh);
+               if (try_to_free)
+                       release_buffer_page(bh);        /* Drops bh reference */
+               else
+                       __brelse(bh);
                cond_resched_lock(&journal->j_list_lock);
        }
        spin_unlock(&journal->j_list_lock);
index 9a78269..0dfa5b5 100644 (file)
@@ -2078,10 +2078,9 @@ static void journal_free_journal_head(struct journal_head *jh)
  * When a buffer has its BH_JBD bit set it is immune from being released by
  * core kernel code, mainly via ->b_count.
  *
- * A journal_head may be detached from its buffer_head when the journal_head's
- * b_transaction, b_cp_transaction and b_next_transaction pointers are NULL.
- * Various places in JBD call jbd2_journal_remove_journal_head() to indicate that the
- * journal_head can be dropped if needed.
+ * A journal_head is detached from its buffer_head when the journal_head's
+ * b_jcount reaches zero. Running transaction (b_transaction) and checkpoint
+ * transaction (b_cp_transaction) hold their references to b_jcount.
  *
  * Various places in the kernel want to attach a journal_head to a buffer_head
  * _before_ attaching the journal_head to a transaction.  To protect the
@@ -2094,17 +2093,16 @@ static void journal_free_journal_head(struct journal_head *jh)
  *     (Attach a journal_head if needed.  Increments b_jcount)
  *     struct journal_head *jh = jbd2_journal_add_journal_head(bh);
  *     ...
+ *      (Get another reference for transaction)
+ *     jbd2_journal_grab_journal_head(bh);
  *     jh->b_transaction = xxx;
+ *     (Put original reference)
  *     jbd2_journal_put_journal_head(jh);
- *
- * Now, the journal_head's b_jcount is zero, but it is safe from being released
- * because it has a non-zero b_transaction.
  */
 
 /*
  * Give a buffer_head a journal_head.
  *
- * Doesn't need the journal lock.
  * May sleep.
  */
 struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh)
@@ -2168,61 +2166,29 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
        struct journal_head *jh = bh2jh(bh);
 
        J_ASSERT_JH(jh, jh->b_jcount >= 0);
-
-       get_bh(bh);
-       if (jh->b_jcount == 0) {
-               if (jh->b_transaction == NULL &&
-                               jh->b_next_transaction == NULL &&
-                               jh->b_cp_transaction == NULL) {
-                       J_ASSERT_JH(jh, jh->b_jlist == BJ_None);
-                       J_ASSERT_BH(bh, buffer_jbd(bh));
-                       J_ASSERT_BH(bh, jh2bh(jh) == bh);
-                       BUFFER_TRACE(bh, "remove journal_head");
-                       if (jh->b_frozen_data) {
-                               printk(KERN_WARNING "%s: freeing "
-                                               "b_frozen_data\n",
-                                               __func__);
-                               jbd2_free(jh->b_frozen_data, bh->b_size);
-                       }
-                       if (jh->b_committed_data) {
-                               printk(KERN_WARNING "%s: freeing "
-                                               "b_committed_data\n",
-                                               __func__);
-                               jbd2_free(jh->b_committed_data, bh->b_size);
-                       }
-                       bh->b_private = NULL;
-                       jh->b_bh = NULL;        /* debug, really */
-                       clear_buffer_jbd(bh);
-                       __brelse(bh);
-                       journal_free_journal_head(jh);
-               } else {
-                       BUFFER_TRACE(bh, "journal_head was locked");
-               }
+       J_ASSERT_JH(jh, jh->b_transaction == NULL);
+       J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
+       J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);
+       J_ASSERT_JH(jh, jh->b_jlist == BJ_None);
+       J_ASSERT_BH(bh, buffer_jbd(bh));
+       J_ASSERT_BH(bh, jh2bh(jh) == bh);
+       BUFFER_TRACE(bh, "remove journal_head");
+       if (jh->b_frozen_data) {
+               printk(KERN_WARNING "%s: freeing b_frozen_data\n", __func__);
+               jbd2_free(jh->b_frozen_data, bh->b_size);
        }
+       if (jh->b_committed_data) {
+               printk(KERN_WARNING "%s: freeing b_committed_data\n", __func__);
+               jbd2_free(jh->b_committed_data, bh->b_size);
+       }
+       bh->b_private = NULL;
+       jh->b_bh = NULL;        /* debug, really */
+       clear_buffer_jbd(bh);
+       journal_free_journal_head(jh);
 }
 
 /*
- * jbd2_journal_remove_journal_head(): if the buffer isn't attached to a transaction
- * and has a zero b_jcount then remove and release its journal_head.   If we did
- * see that the buffer is not used by any transaction we also "logically"
- * decrement ->b_count.
- *
- * We in fact take an additional increment on ->b_count as a convenience,
- * because the caller usually wants to do additional things with the bh
- * after calling here.
- * The caller of jbd2_journal_remove_journal_head() *must* run __brelse(bh) at some
- * time.  Once the caller has run __brelse(), the buffer is eligible for
- * reaping by try_to_free_buffers().
- */
-void jbd2_journal_remove_journal_head(struct buffer_head *bh)
-{
-       jbd_lock_bh_journal_head(bh);
-       __journal_remove_journal_head(bh);
-       jbd_unlock_bh_journal_head(bh);
-}
-
-/*
- * Drop a reference on the passed journal_head.  If it fell to zero then try to
+ * Drop a reference on the passed journal_head.  If it fell to zero then
  * release the journal_head from the buffer_head.
  */
 void jbd2_journal_put_journal_head(struct journal_head *jh)
@@ -2232,11 +2198,12 @@ void jbd2_journal_put_journal_head(struct journal_head *jh)
        jbd_lock_bh_journal_head(bh);
        J_ASSERT_JH(jh, jh->b_jcount > 0);
        --jh->b_jcount;
-       if (!jh->b_jcount && !jh->b_transaction) {
+       if (!jh->b_jcount) {
                __journal_remove_journal_head(bh);
+               jbd_unlock_bh_journal_head(bh);
                __brelse(bh);
-       }
-       jbd_unlock_bh_journal_head(bh);
+       } else
+               jbd_unlock_bh_journal_head(bh);
 }
 
 /*
index 3eec82d..2d71094 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/module.h>
 
 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
+static void __jbd2_journal_unfile_buffer(struct journal_head *jh);
 
 /*
  * jbd2_get_transaction: obtain a new transaction_t object.
@@ -764,7 +765,6 @@ repeat:
        if (!jh->b_transaction) {
                JBUFFER_TRACE(jh, "no transaction");
                J_ASSERT_JH(jh, !jh->b_next_transaction);
-               jh->b_transaction = transaction;
                JBUFFER_TRACE(jh, "file as BJ_Reserved");
                spin_lock(&journal->j_list_lock);
                __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
@@ -814,7 +814,6 @@ out:
  * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
  * @handle: transaction to add buffer modifications to
  * @bh:     bh to be used for metadata writes
- * @credits: variable that will receive credits for the buffer
  *
  * Returns an error code or 0 on success.
  *
@@ -896,8 +895,6 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
                 * committed and so it's safe to clear the dirty bit.
                 */
                clear_buffer_dirty(jh2bh(jh));
-               jh->b_transaction = transaction;
-
                /* first access by this transaction */
                jh->b_modified = 0;
 
@@ -932,7 +929,6 @@ out:
  *     non-rewindable consequences
  * @handle: transaction
  * @bh: buffer to undo
- * @credits: store the number of taken credits here (if not NULL)
  *
  * Sometimes there is a need to distinguish between metadata which has
  * been committed to disk and that which has not.  The ext3fs code uses
@@ -1232,8 +1228,6 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
                        __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
                } else {
                        __jbd2_journal_unfile_buffer(jh);
-                       jbd2_journal_remove_journal_head(bh);
-                       __brelse(bh);
                        if (!buffer_jbd(bh)) {
                                spin_unlock(&journal->j_list_lock);
                                jbd_unlock_bh_state(bh);
@@ -1556,19 +1550,32 @@ void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
                mark_buffer_dirty(bh);  /* Expose it to the VM */
 }
 
-void __jbd2_journal_unfile_buffer(struct journal_head *jh)
+/*
+ * Remove buffer from all transactions.
+ *
+ * Called with bh_state lock and j_list_lock
+ *
+ * jh and bh may be already freed when this function returns.
+ */
+static void __jbd2_journal_unfile_buffer(struct journal_head *jh)
 {
        __jbd2_journal_temp_unlink_buffer(jh);
        jh->b_transaction = NULL;
+       jbd2_journal_put_journal_head(jh);
 }
 
 void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
 {
-       jbd_lock_bh_state(jh2bh(jh));
+       struct buffer_head *bh = jh2bh(jh);
+
+       /* Get reference so that buffer cannot be freed before we unlock it */
+       get_bh(bh);
+       jbd_lock_bh_state(bh);
        spin_lock(&journal->j_list_lock);
        __jbd2_journal_unfile_buffer(jh);
        spin_unlock(&journal->j_list_lock);
-       jbd_unlock_bh_state(jh2bh(jh));
+       jbd_unlock_bh_state(bh);
+       __brelse(bh);
 }
 
 /*
@@ -1595,8 +1602,6 @@ __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
                if (jh->b_jlist == BJ_None) {
                        JBUFFER_TRACE(jh, "remove from checkpoint list");
                        __jbd2_journal_remove_checkpoint(jh);
-                       jbd2_journal_remove_journal_head(bh);
-                       __brelse(bh);
                }
        }
        spin_unlock(&journal->j_list_lock);
@@ -1659,7 +1664,6 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal,
                /*
                 * We take our own ref against the journal_head here to avoid
                 * having to add tons of locking around each instance of
-                * jbd2_journal_remove_journal_head() and
                 * jbd2_journal_put_journal_head().
                 */
                jh = jbd2_journal_grab_journal_head(bh);
@@ -1697,10 +1701,9 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
        int may_free = 1;
        struct buffer_head *bh = jh2bh(jh);
 
-       __jbd2_journal_unfile_buffer(jh);
-
        if (jh->b_cp_transaction) {
                JBUFFER_TRACE(jh, "on running+cp transaction");
+               __jbd2_journal_temp_unlink_buffer(jh);
                /*
                 * We don't want to write the buffer anymore, clear the
                 * bit so that we don't confuse checks in
@@ -1711,8 +1714,7 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
                may_free = 0;
        } else {
                JBUFFER_TRACE(jh, "on running transaction");
-               jbd2_journal_remove_journal_head(bh);
-               __brelse(bh);
+               __jbd2_journal_unfile_buffer(jh);
        }
        return may_free;
 }
@@ -1990,6 +1992,8 @@ void __jbd2_journal_file_buffer(struct journal_head *jh,
 
        if (jh->b_transaction)
                __jbd2_journal_temp_unlink_buffer(jh);
+       else
+               jbd2_journal_grab_journal_head(bh);
        jh->b_transaction = transaction;
 
        switch (jlist) {
@@ -2041,9 +2045,10 @@ void jbd2_journal_file_buffer(struct journal_head *jh,
  * already started to be used by a subsequent transaction, refile the
  * buffer on that transaction's metadata list.
  *
- * Called under journal->j_list_lock
- *
+ * Called under j_list_lock
  * Called under jbd_lock_bh_state(jh2bh(jh))
+ *
+ * jh and bh may be already free when this function returns
  */
 void __jbd2_journal_refile_buffer(struct journal_head *jh)
 {
@@ -2067,6 +2072,11 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
 
        was_dirty = test_clear_buffer_jbddirty(bh);
        __jbd2_journal_temp_unlink_buffer(jh);
+       /*
+        * We set b_transaction here because b_next_transaction will inherit
+        * our jh reference and thus __jbd2_journal_file_buffer() must not
+        * take a new one.
+        */
        jh->b_transaction = jh->b_next_transaction;
        jh->b_next_transaction = NULL;
        if (buffer_freed(bh))
@@ -2083,30 +2093,21 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
 }
 
 /*
- * For the unlocked version of this call, also make sure that any
- * hanging journal_head is cleaned up if necessary.
- *
- * __jbd2_journal_refile_buffer is usually called as part of a single locked
- * operation on a buffer_head, in which the caller is probably going to
- * be hooking the journal_head onto other lists.  In that case it is up
- * to the caller to remove the journal_head if necessary.  For the
- * unlocked jbd2_journal_refile_buffer call, the caller isn't going to be
- * doing anything else to the buffer so we need to do the cleanup
- * ourselves to avoid a jh leak.
- *
- * *** The journal_head may be freed by this call! ***
+ * __jbd2_journal_refile_buffer() with necessary locking added. We take our
+ * bh reference so that we can safely unlock bh.
+ *
+ * The jh and bh may be freed by this call.
  */
 void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
 {
        struct buffer_head *bh = jh2bh(jh);
 
+       /* Get reference so that buffer cannot be freed before we unlock it */
+       get_bh(bh);
        jbd_lock_bh_state(bh);
        spin_lock(&journal->j_list_lock);
-
        __jbd2_journal_refile_buffer(jh);
        jbd_unlock_bh_state(bh);
-       jbd2_journal_remove_journal_head(bh);
-
        spin_unlock(&journal->j_list_lock);
        __brelse(bh);
 }
index c5ce6c1..2f3f531 100644 (file)
@@ -66,9 +66,9 @@ static int jfs_open(struct inode *inode, struct file *file)
                struct jfs_inode_info *ji = JFS_IP(inode);
                spin_lock_irq(&ji->ag_lock);
                if (ji->active_ag == -1) {
-                       ji->active_ag = ji->agno;
-                       atomic_inc(
-                           &JFS_SBI(inode->i_sb)->bmap->db_active[ji->agno]);
+                       struct jfs_sb_info *jfs_sb = JFS_SBI(inode->i_sb);
+                       ji->active_ag = BLKTOAG(addressPXD(&ji->ixpxd), jfs_sb);
+                       atomic_inc( &jfs_sb->bmap->db_active[ji->active_ag]);
                }
                spin_unlock_irq(&ji->ag_lock);
        }
index ed53a47..b78b2f9 100644 (file)
@@ -397,7 +397,7 @@ int diRead(struct inode *ip)
        release_metapage(mp);
 
        /* set the ag for the inode */
-       JFS_IP(ip)->agno = BLKTOAG(agstart, sbi);
+       JFS_IP(ip)->agstart = agstart;
        JFS_IP(ip)->active_ag = -1;
 
        return (rc);
@@ -901,7 +901,7 @@ int diFree(struct inode *ip)
 
        /* get the allocation group for this ino.
         */
-       agno = JFS_IP(ip)->agno;
+       agno = BLKTOAG(JFS_IP(ip)->agstart, JFS_SBI(ip->i_sb));
 
        /* Lock the AG specific inode map information
         */
@@ -1315,12 +1315,11 @@ int diFree(struct inode *ip)
 static inline void
 diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp)
 {
-       struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
        struct jfs_inode_info *jfs_ip = JFS_IP(ip);
 
        ip->i_ino = (iagno << L2INOSPERIAG) + ino;
        jfs_ip->ixpxd = iagp->inoext[extno];
-       jfs_ip->agno = BLKTOAG(le64_to_cpu(iagp->agstart), sbi);
+       jfs_ip->agstart = le64_to_cpu(iagp->agstart);
        jfs_ip->active_ag = -1;
 }
 
@@ -1379,7 +1378,7 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
         */
 
        /* get the ag number of this iag */
-       agno = JFS_IP(pip)->agno;
+       agno = BLKTOAG(JFS_IP(pip)->agstart, JFS_SBI(pip->i_sb));
 
        if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) {
                /*
@@ -2921,10 +2920,9 @@ int diExtendFS(struct inode *ipimap, struct inode *ipbmap)
                        continue;
                }
 
-               /* agstart that computes to the same ag is treated as same; */
                agstart = le64_to_cpu(iagp->agstart);
-               /* iagp->agstart = agstart & ~(mp->db_agsize - 1); */
                n = agstart >> mp->db_agl2size;
+               iagp->agstart = cpu_to_le64((s64)n << mp->db_agl2size);
 
                /* compute backed inodes */
                numinos = (EXTSPERIAG - le32_to_cpu(iagp->nfreeexts))
index 1439f11..584a4a1 100644 (file)
@@ -50,8 +50,9 @@ struct jfs_inode_info {
        short   btindex;        /* btpage entry index*/
        struct inode *ipimap;   /* inode map                    */
        unsigned long cflag;    /* commit flags         */
+       u64     agstart;        /* agstart of the containing IAG */
        u16     bxflag;         /* xflag of pseudo buffer?      */
-       unchar  agno;           /* ag number                    */
+       unchar  pad;
        signed char active_ag;  /* ag currently allocating from */
        lid_t   blid;           /* lid of pseudo buffer?        */
        lid_t   atlhead;        /* anonymous tlock list head    */
index 8ea5efb..8d0c1c7 100644 (file)
@@ -80,7 +80,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
        int log_formatted = 0;
        struct inode *iplist[1];
        struct jfs_superblock *j_sb, *j_sb2;
-       uint old_agsize;
+       s64 old_agsize;
        int agsizechanged = 0;
        struct buffer_head *bh, *bh2;
 
index adb45ec..e374050 100644 (file)
@@ -708,7 +708,13 @@ static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
 
        if (task->tk_status < 0) {
                dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
-               goto retry_rebind;
+               switch (task->tk_status) {
+               case -EACCES:
+               case -EIO:
+                       goto die;
+               default:
+                       goto retry_rebind;
+               }
        }
        if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
                rpc_delay(task, NLMCLNT_GRACE_WAIT);
index 144f2a3..6f4850d 100644 (file)
@@ -256,7 +256,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
 
        nfs_attr_check_mountpoint(sb, fattr);
 
-       if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0 && (fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0)
+       if (((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0) &&
+           !nfs_attr_use_mounted_on_fileid(fattr))
                goto out_no_inode;
        if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0)
                goto out_no_inode;
@@ -1294,7 +1295,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
                if (new_isize != cur_isize) {
                        /* Do we perhaps have any outstanding writes, or has
                         * the file grown beyond our last write? */
-                       if (nfsi->npages == 0 || new_isize > cur_isize) {
+                       if ((nfsi->npages == 0 && !test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) ||
+                            new_isize > cur_isize) {
                                i_size_write(inode, new_isize);
                                invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
                        }
index b9056cb..2a55347 100644 (file)
@@ -45,6 +45,17 @@ static inline void nfs_attr_check_mountpoint(struct super_block *parent, struct
                fattr->valid |= NFS_ATTR_FATTR_MOUNTPOINT;
 }
 
+static inline int nfs_attr_use_mounted_on_fileid(struct nfs_fattr *fattr)
+{
+       if (((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) == 0) ||
+           (((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) &&
+            ((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0)))
+               return 0;
+
+       fattr->fileid = fattr->mounted_on_fileid;
+       return 1;
+}
+
 struct nfs_clone_mount {
        const struct super_block *sb;
        const struct dentry *dentry;
index 4269088..0bafcc9 100644 (file)
@@ -30,6 +30,7 @@
  */
 
 #include <linux/nfs_fs.h>
+#include <linux/nfs_page.h>
 
 #include "internal.h"
 #include "nfs4filelayout.h"
@@ -552,13 +553,18 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
                __func__, nfl_util, fl->num_fh, fl->first_stripe_index,
                fl->pattern_offset);
 
-       if (!fl->num_fh)
+       /* Note that a zero value for num_fh is legal for STRIPE_SPARSE.
+        * Futher checking is done in filelayout_check_layout */
+       if (fl->num_fh < 0 || fl->num_fh >
+           max(NFS4_PNFS_MAX_STRIPE_CNT, NFS4_PNFS_MAX_MULTI_CNT))
                goto out_err;
 
-       fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *),
-                              gfp_flags);
-       if (!fl->fh_array)
-               goto out_err;
+       if (fl->num_fh > 0) {
+               fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *),
+                                      gfp_flags);
+               if (!fl->fh_array)
+                       goto out_err;
+       }
 
        for (i = 0; i < fl->num_fh; i++) {
                /* Do we want to use a mempool here? */
@@ -661,8 +667,9 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
        u64 p_stripe, r_stripe;
        u32 stripe_unit;
 
-       if (!pnfs_generic_pg_test(pgio, prev, req))
-               return 0;
+       if (!pnfs_generic_pg_test(pgio, prev, req) ||
+           !nfs_generic_pg_test(pgio, prev, req))
+               return false;
 
        if (!pgio->pg_lseg)
                return 1;
index d2c4b59..5879b23 100644 (file)
@@ -2265,12 +2265,14 @@ static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
        return nfs4_map_errors(status);
 }
 
+static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
 /*
  * Get locations and (maybe) other attributes of a referral.
  * Note that we'll actually follow the referral later when
  * we detect fsid mismatch in inode revalidation
  */
-static int nfs4_get_referral(struct inode *dir, const struct qstr *name, struct nfs_fattr *fattr, struct nfs_fh *fhandle)
+static int nfs4_get_referral(struct inode *dir, const struct qstr *name,
+                            struct nfs_fattr *fattr, struct nfs_fh *fhandle)
 {
        int status = -ENOMEM;
        struct page *page = NULL;
@@ -2288,15 +2290,16 @@ static int nfs4_get_referral(struct inode *dir, const struct qstr *name, struct
                goto out;
        /* Make sure server returned a different fsid for the referral */
        if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
-               dprintk("%s: server did not return a different fsid for a referral at %s\n", __func__, name->name);
+               dprintk("%s: server did not return a different fsid for"
+                       " a referral at %s\n", __func__, name->name);
                status = -EIO;
                goto out;
        }
+       /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
+       nfs_fixup_referral_attributes(&locations->fattr);
 
+       /* replace the lookup nfs_fattr with the locations nfs_fattr */
        memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
-       fattr->valid |= NFS_ATTR_FATTR_V4_REFERRAL;
-       if (!fattr->mode)
-               fattr->mode = S_IFDIR;
        memset(fhandle, 0, sizeof(struct nfs_fh));
 out:
        if (page)
@@ -4667,11 +4670,15 @@ static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
        return len;
 }
 
+/*
+ * nfs_fhget will use either the mounted_on_fileid or the fileid
+ */
 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
 {
-       if (!((fattr->valid & NFS_ATTR_FATTR_FILEID) &&
-               (fattr->valid & NFS_ATTR_FATTR_FSID) &&
-               (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)))
+       if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
+              (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
+             (fattr->valid & NFS_ATTR_FATTR_FSID) &&
+             (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)))
                return;
 
        fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
@@ -4686,7 +4693,6 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
        struct nfs_server *server = NFS_SERVER(dir);
        u32 bitmask[2] = {
                [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
-               [1] = FATTR4_WORD1_MOUNTED_ON_FILEID,
        };
        struct nfs4_fs_locations_arg args = {
                .dir_fh = NFS_FH(dir),
@@ -4705,11 +4711,18 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
        int status;
 
        dprintk("%s: start\n", __func__);
+
+       /* Ask for the fileid of the absent filesystem if mounted_on_fileid
+        * is not supported */
+       if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
+               bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
+       else
+               bitmask[0] |= FATTR4_WORD0_FILEID;
+
        nfs_fattr_init(&fs_locations->fattr);
        fs_locations->server = server;
        fs_locations->nlocations = 0;
        status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
-       nfs_fixup_referral_attributes(&fs_locations->fattr);
        dprintk("%s: returned status = %d\n", __func__, status);
        return status;
 }
@@ -5098,7 +5111,6 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
        if (mxresp_sz == 0)
                mxresp_sz = NFS_MAX_FILE_IO_SIZE;
        /* Fore channel attributes */
-       args->fc_attrs.headerpadsz = 0;
        args->fc_attrs.max_rqst_sz = mxrqst_sz;
        args->fc_attrs.max_resp_sz = mxresp_sz;
        args->fc_attrs.max_ops = NFS4_MAX_OPS;
@@ -5111,7 +5123,6 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
                args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
 
        /* Back channel attributes */
-       args->bc_attrs.headerpadsz = 0;
        args->bc_attrs.max_rqst_sz = PAGE_SIZE;
        args->bc_attrs.max_resp_sz = PAGE_SIZE;
        args->bc_attrs.max_resp_sz_cached = 0;
@@ -5131,8 +5142,6 @@ static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args
        struct nfs4_channel_attrs *sent = &args->fc_attrs;
        struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
 
-       if (rcvd->headerpadsz > sent->headerpadsz)
-               return -EINVAL;
        if (rcvd->max_resp_sz > sent->max_resp_sz)
                return -EINVAL;
        /*
@@ -5697,6 +5706,7 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
 {
        struct nfs4_layoutreturn *lrp = calldata;
        struct nfs_server *server;
+       struct pnfs_layout_hdr *lo = NFS_I(lrp->args.inode)->layout;
 
        dprintk("--> %s\n", __func__);
 
@@ -5708,16 +5718,15 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
                nfs_restart_rpc(task, lrp->clp);
                return;
        }
+       spin_lock(&lo->plh_inode->i_lock);
        if (task->tk_status == 0) {
-               struct pnfs_layout_hdr *lo = NFS_I(lrp->args.inode)->layout;
-
                if (lrp->res.lrs_present) {
-                       spin_lock(&lo->plh_inode->i_lock);
                        pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
-                       spin_unlock(&lo->plh_inode->i_lock);
                } else
                        BUG_ON(!list_empty(&lo->plh_segs));
        }
+       lo->plh_block_lgets--;
+       spin_unlock(&lo->plh_inode->i_lock);
        dprintk("<-- %s\n", __func__);
 }
 
index d869a5e..6870bc6 100644 (file)
@@ -255,7 +255,7 @@ static int nfs4_stat_to_errno(int);
 #define decode_fs_locations_maxsz \
                                (0)
 #define encode_secinfo_maxsz   (op_encode_hdr_maxsz + nfs4_name_maxsz)
-#define decode_secinfo_maxsz   (op_decode_hdr_maxsz + 4 + (NFS_MAX_SECFLAVORS * (16 + GSS_OID_MAX_LEN)))
+#define decode_secinfo_maxsz   (op_decode_hdr_maxsz + 1 + ((NFS_MAX_SECFLAVORS * (16 + GSS_OID_MAX_LEN)) / 4))
 
 #if defined(CONFIG_NFS_V4_1)
 #define NFS4_MAX_MACHINE_NAME_LEN (64)
@@ -1725,7 +1725,7 @@ static void encode_create_session(struct xdr_stream *xdr,
        *p++ = cpu_to_be32(args->flags);                        /*flags */
 
        /* Fore Channel */
-       *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */
+       *p++ = cpu_to_be32(0);                          /* header padding size */
        *p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz); /* max req size */
        *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz); /* max resp size */
        *p++ = cpu_to_be32(max_resp_sz_cached);         /* Max resp sz cached */
@@ -1734,7 +1734,7 @@ static void encode_create_session(struct xdr_stream *xdr,
        *p++ = cpu_to_be32(0);                          /* rdmachannel_attrs */
 
        /* Back Channel */
-       *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */
+       *p++ = cpu_to_be32(0);                          /* header padding size */
        *p++ = cpu_to_be32(args->bc_attrs.max_rqst_sz); /* max req size */
        *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz); /* max resp size */
        *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz_cached);  /* Max resp sz cached */
@@ -3098,7 +3098,7 @@ out_overflow:
        return -EIO;
 }
 
-static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap)
+static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap, int32_t *res)
 {
        __be32 *p;
 
@@ -3109,7 +3109,7 @@ static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap)
                if (unlikely(!p))
                        goto out_overflow;
                bitmap[0] &= ~FATTR4_WORD0_RDATTR_ERROR;
-               return -be32_to_cpup(p);
+               *res = -be32_to_cpup(p);
        }
        return 0;
 out_overflow:
@@ -4070,6 +4070,7 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
        int status;
        umode_t fmode = 0;
        uint32_t type;
+       int32_t err;
 
        status = decode_attr_type(xdr, bitmap, &type);
        if (status < 0)
@@ -4095,13 +4096,12 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
                goto xdr_error;
        fattr->valid |= status;
 
-       status = decode_attr_error(xdr, bitmap);
-       if (status == -NFS4ERR_WRONGSEC) {
-               nfs_fixup_secinfo_attributes(fattr, fh);
-               status = 0;
-       }
+       err = 0;
+       status = decode_attr_error(xdr, bitmap, &err);
        if (status < 0)
                goto xdr_error;
+       if (err == -NFS4ERR_WRONGSEC)
+               nfs_fixup_secinfo_attributes(fattr, fh);
 
        status = decode_attr_filehandle(xdr, bitmap, fh);
        if (status < 0)
@@ -4997,12 +4997,14 @@ static int decode_chan_attrs(struct xdr_stream *xdr,
                             struct nfs4_channel_attrs *attrs)
 {
        __be32 *p;
-       u32 nr_attrs;
+       u32 nr_attrs, val;
 
        p = xdr_inline_decode(xdr, 28);
        if (unlikely(!p))
                goto out_overflow;
-       attrs->headerpadsz = be32_to_cpup(p++);
+       val = be32_to_cpup(p++);        /* headerpadsz */
+       if (val)
+               return -EINVAL;         /* no support for header padding yet */
        attrs->max_rqst_sz = be32_to_cpup(p++);
        attrs->max_resp_sz = be32_to_cpup(p++);
        attrs->max_resp_sz_cached = be32_to_cpup(p++);
index 9cf208d..8ff2ea3 100644 (file)
@@ -108,7 +108,6 @@ _dev_list_add(const struct nfs_server *nfss,
                de = n;
        }
 
-       atomic_inc(&de->id_node.ref);
        return de;
 }
 
@@ -1001,6 +1000,9 @@ static bool objio_pg_test(struct nfs_pageio_descriptor *pgio,
        if (!pnfs_generic_pg_test(pgio, prev, req))
                return false;
 
+       if (pgio->pg_lseg == NULL)
+               return true;
+
        return pgio->pg_count + req->wb_bytes <=
                        OBJIO_LSEG(pgio->pg_lseg)->max_io_size;
 }
index dc3956c..1d06f8e 100644 (file)
@@ -291,7 +291,7 @@ objlayout_read_done(struct objlayout_io_state *state, ssize_t status, bool sync)
        struct nfs_read_data *rdata;
 
        state->status = status;
-       dprintk("%s: Begin status=%ld eof=%d\n", __func__, status, eof);
+       dprintk("%s: Begin status=%zd eof=%d\n", __func__, status, eof);
        rdata = state->rpcdata;
        rdata->task.tk_status = status;
        if (status >= 0) {
index 7913961..0098557 100644 (file)
@@ -204,7 +204,7 @@ nfs_wait_on_request(struct nfs_page *req)
                        TASK_UNINTERRUPTIBLE);
 }
 
-static bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req)
+bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req)
 {
        /*
         * FIXME: ideally we should be able to coalesce all requests
@@ -218,6 +218,7 @@ static bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_p
 
        return desc->pg_count + req->wb_bytes <= desc->pg_bsize;
 }
+EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
 
 /**
  * nfs_pageio_init - initialise a page io descriptor
index 8c1309d..29c0ca7 100644 (file)
@@ -634,14 +634,16 @@ _pnfs_return_layout(struct inode *ino)
 
        spin_lock(&ino->i_lock);
        lo = nfsi->layout;
-       if (!lo || !mark_matching_lsegs_invalid(lo, &tmp_list, NULL)) {
+       if (!lo) {
                spin_unlock(&ino->i_lock);
-               dprintk("%s: no layout segments to return\n", __func__);
-               goto out;
+               dprintk("%s: no layout to return\n", __func__);
+               return status;
        }
        stateid = nfsi->layout->plh_stateid;
        /* Reference matched in nfs4_layoutreturn_release */
        get_layout_hdr(lo);
+       mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
+       lo->plh_block_lgets++;
        spin_unlock(&ino->i_lock);
        pnfs_free_lseg_list(&tmp_list);
 
@@ -650,6 +652,9 @@ _pnfs_return_layout(struct inode *ino)
        lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
        if (unlikely(lrp == NULL)) {
                status = -ENOMEM;
+               set_bit(NFS_LAYOUT_RW_FAILED, &lo->plh_flags);
+               set_bit(NFS_LAYOUT_RO_FAILED, &lo->plh_flags);
+               put_layout_hdr(lo);
                goto out;
        }
 
@@ -887,7 +892,7 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo,
                        ret = get_lseg(lseg);
                        break;
                }
-               if (cmp_layout(range, &lseg->pls_range) > 0)
+               if (lseg->pls_range.offset > range->offset)
                        break;
        }
 
@@ -1059,23 +1064,36 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
                gfp_flags = GFP_NOFS;
        }
 
-       if (pgio->pg_count == prev->wb_bytes) {
+       if (pgio->pg_lseg == NULL) {
+               if (pgio->pg_count != prev->wb_bytes)
+                       return true;
                /* This is first coelesce call for a series of nfs_pages */
                pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
                                                   prev->wb_context,
-                                                  req_offset(req),
+                                                  req_offset(prev),
                                                   pgio->pg_count,
                                                   access_type,
                                                   gfp_flags);
-               return true;
+               if (pgio->pg_lseg == NULL)
+                       return true;
        }
 
-       if (pgio->pg_lseg &&
-           req_offset(req) > end_offset(pgio->pg_lseg->pls_range.offset,
-                                        pgio->pg_lseg->pls_range.length))
-               return false;
-
-       return true;
+       /*
+        * Test if a nfs_page is fully contained in the pnfs_layout_range.
+        * Note that this test makes several assumptions:
+        * - that the previous nfs_page in the struct nfs_pageio_descriptor
+        *   is known to lie within the range.
+        *   - that the nfs_page being tested is known to be contiguous with the
+        *   previous nfs_page.
+        *   - Layout ranges are page aligned, so we only have to test the
+        *   start offset of the request.
+        *
+        * Please also note that 'end_offset' is actually the offset of the
+        * first byte that lies outside the pnfs_layout_range. FIXME?
+        *
+        */
+       return req_offset(req) < end_offset(pgio->pg_lseg->pls_range.offset,
+                                        pgio->pg_lseg->pls_range.length);
 }
 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
 
index 48d0a8e..96bf4e6 100644 (file)
@@ -186,6 +186,7 @@ int pnfs_ld_read_done(struct nfs_read_data *);
 /* pnfs_dev.c */
 struct nfs4_deviceid_node {
        struct hlist_node               node;
+       struct hlist_node               tmpnode;
        const struct pnfs_layoutdriver_type *ld;
        const struct nfs_client         *nfs_client;
        struct nfs4_deviceid            deviceid;
index c65e133..f0f8e1e 100644 (file)
@@ -174,6 +174,7 @@ nfs4_init_deviceid_node(struct nfs4_deviceid_node *d,
                        const struct nfs4_deviceid *id)
 {
        INIT_HLIST_NODE(&d->node);
+       INIT_HLIST_NODE(&d->tmpnode);
        d->ld = ld;
        d->nfs_client = nfs_client;
        d->deviceid = *id;
@@ -208,6 +209,7 @@ nfs4_insert_deviceid_node(struct nfs4_deviceid_node *new)
 
        hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]);
        spin_unlock(&nfs4_deviceid_lock);
+       atomic_inc(&new->ref);
 
        return new;
 }
@@ -238,24 +240,29 @@ static void
 _deviceid_purge_client(const struct nfs_client *clp, long hash)
 {
        struct nfs4_deviceid_node *d;
-       struct hlist_node *n, *next;
+       struct hlist_node *n;
        HLIST_HEAD(tmp);
 
+       spin_lock(&nfs4_deviceid_lock);
        rcu_read_lock();
        hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
                if (d->nfs_client == clp && atomic_read(&d->ref)) {
                        hlist_del_init_rcu(&d->node);
-                       hlist_add_head(&d->node, &tmp);
+                       hlist_add_head(&d->tmpnode, &tmp);
                }
        rcu_read_unlock();
+       spin_unlock(&nfs4_deviceid_lock);
 
        if (hlist_empty(&tmp))
                return;
 
        synchronize_rcu();
-       hlist_for_each_entry_safe(d, n, next, &tmp, node)
+       while (!hlist_empty(&tmp)) {
+               d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode);
+               hlist_del(&d->tmpnode);
                if (atomic_dec_and_test(&d->ref))
                        d->ld->free_deviceid_node(d);
+       }
 }
 
 void
@@ -263,8 +270,8 @@ nfs4_deviceid_purge_client(const struct nfs_client *clp)
 {
        long h;
 
-       spin_lock(&nfs4_deviceid_lock);
+       if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS))
+               return;
        for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++)
                _deviceid_purge_client(clp, h);
-       spin_unlock(&nfs4_deviceid_lock);
 }
index d738a7e..2c6d952 100644 (file)
@@ -4,7 +4,6 @@
  * Released under GPL v2.
  */
 
-#include <linux/version.h>
 #include <linux/module.h>
 #include <linux/fs.h>
 #include <linux/buffer_head.h>
index 8a84210..fc5bc27 100644 (file)
@@ -2708,6 +2708,9 @@ static int do_io_accounting(struct task_struct *task, char *buffer, int whole)
        struct task_io_accounting acct = task->ioac;
        unsigned long flags;
 
+       if (!ptrace_may_access(task, PTRACE_MODE_READ))
+               return -EACCES;
+
        if (whole && lock_task_sighand(task, &flags)) {
                struct task_struct *t = task;
 
@@ -2839,7 +2842,7 @@ static const struct pid_entry tgid_base_stuff[] = {
        REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations),
 #endif
 #ifdef CONFIG_TASK_IO_ACCOUNTING
-       INF("io",       S_IRUGO, proc_tgid_io_accounting),
+       INF("io",       S_IRUSR, proc_tgid_io_accounting),
 #endif
 #ifdef CONFIG_HARDWALL
        INF("hardwall",   S_IRUGO, proc_pid_hardwall),
@@ -3181,7 +3184,7 @@ static const struct pid_entry tid_base_stuff[] = {
        REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
 #endif
 #ifdef CONFIG_TASK_IO_ACCOUNTING
-       INF("io",       S_IRUGO, proc_tid_io_accounting),
+       INF("io",       S_IRUSR, proc_tid_io_accounting),
 #endif
 #ifdef CONFIG_HARDWALL
        INF("hardwall",   S_IRUGO, proc_pid_hardwall),
index f0511e8..eed9942 100644 (file)
@@ -27,14 +27,18 @@ static unsigned long romfs_get_unmapped_area(struct file *file,
 {
        struct inode *inode = file->f_mapping->host;
        struct mtd_info *mtd = inode->i_sb->s_mtd;
-       unsigned long isize, offset;
+       unsigned long isize, offset, maxpages, lpages;
 
        if (!mtd)
                goto cant_map_directly;
 
+       /* the mapping mustn't extend beyond the EOF */
+       lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
        isize = i_size_read(inode);
        offset = pgoff << PAGE_SHIFT;
-       if (offset > isize || len > isize || offset > isize - len)
+
+       maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       if ((pgoff >= maxpages) || (maxpages - pgoff < lpages))
                return (unsigned long) -EINVAL;
 
        /* we need to call down to the MTD layer to do the actual mapping */
index c863753..01d2072 100644 (file)
@@ -489,6 +489,13 @@ xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags)
        args.total = 0;
        args.whichfork = XFS_ATTR_FORK;
 
+       /*
+        * we have no control over the attribute names that userspace passes us
+        * to remove, so we have to allow the name lookup prior to attribute
+        * removal to fail.
+        */
+       args.op_flags = XFS_DA_OP_OKNOENT;
+
        /*
         * Attach the dquots to the inode.
         */
index cb9b6d1..3631783 100644 (file)
@@ -253,16 +253,21 @@ xfs_iget_cache_hit(
                        rcu_read_lock();
                        spin_lock(&ip->i_flags_lock);
 
-                       ip->i_flags &= ~XFS_INEW;
-                       ip->i_flags |= XFS_IRECLAIMABLE;
-                       __xfs_inode_set_reclaim_tag(pag, ip);
+                       ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
+                       ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
                        trace_xfs_iget_reclaim_fail(ip);
                        goto out_error;
                }
 
                spin_lock(&pag->pag_ici_lock);
                spin_lock(&ip->i_flags_lock);
-               ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM);
+
+               /*
+                * Clear the per-lifetime state in the inode as we are now
+                * effectively a new inode and need to return to the initial
+                * state before reuse occurs.
+                */
+               ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
                ip->i_flags |= XFS_INEW;
                __xfs_inode_clear_reclaim_tag(mp, pag, ip);
                inode->i_state = I_NEW;
index 3ae6d58..964cfea 100644 (file)
@@ -383,6 +383,16 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
 #define XFS_ITRUNCATED         0x0020  /* truncated down so flush-on-close */
 #define XFS_IDIRTY_RELEASE     0x0040  /* dirty release already seen */
 
+/*
+ * Per-lifetime flags need to be reset when re-using a reclaimable inode during
+ * inode lookup. Thi prevents unintended behaviour on the new inode from
+ * ocurring.
+ */
+#define XFS_IRECLAIM_RESET_FLAGS       \
+       (XFS_IRECLAIMABLE | XFS_IRECLAIM | \
+        XFS_IDIRTY_RELEASE | XFS_ITRUNCATED | \
+        XFS_IFILESTREAM);
+
 /*
  * Flags for inode locking.
  * Bit ranges: 1<<1  - 1<<16-1 -- iolock/ilock modes (bitfield)
index b7a5fe7..6197207 100644 (file)
@@ -960,8 +960,11 @@ xfs_release(
                 * be exposed to that problem.
                 */
                truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
-               if (truncated && VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0)
-                       xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE);
+               if (truncated) {
+                       xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
+                       if (VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0)
+                               xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE);
+               }
        }
 
        if (ip->i_d.di_nlink == 0)
index 5479fdc..514ed45 100644 (file)
@@ -201,6 +201,9 @@ struct amba_pl011_data {
        bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
        void *dma_rx_param;
        void *dma_tx_param;
+        void (*init) (void);
+       void (*exit) (void);
+       void (*reset) (void);
 };
 #endif
 
index 2a7cea5..6395692 100644 (file)
@@ -167,7 +167,7 @@ enum rq_flag_bits {
        (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
 #define REQ_COMMON_MASK \
        (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \
-        REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
+        REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)
 #define REQ_CLONE_MASK         REQ_COMMON_MASK
 
 #define REQ_RAHEAD             (1 << __REQ_RAHEAD)
index b22fb0d..8c7c2de 100644 (file)
@@ -169,7 +169,8 @@ extern void blk_trace_shutdown(struct request_queue *);
 extern int do_blk_trace_setup(struct request_queue *q, char *name,
                              dev_t dev, struct block_device *bdev,
                              struct blk_user_trace_setup *buts);
-extern void __trace_note_message(struct blk_trace *, const char *fmt, ...);
+extern __attribute__((format(printf, 2, 3)))
+void __trace_note_message(struct blk_trace *, const char *fmt, ...);
 
 /**
  * blk_add_trace_msg - Add a (simple) message to the blktrace stream
index ddcb7db..846bb17 100644 (file)
@@ -467,6 +467,8 @@ asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
                                      char __user *optval, unsigned int optlen);
 asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg,
                                   unsigned flags);
+asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
+                                   unsigned vlen, unsigned int flags);
 asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg,
                                   unsigned int flags);
 asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len,
index 7c60d09..f696bcc 100644 (file)
@@ -44,7 +44,7 @@
 #define CN_VAL_DRBD                    0x1
 #define CN_KVP_IDX                     0x9     /* HyperV KVP */
 
-#define CN_NETLINK_USERS               9
+#define CN_NETLINK_USERS               10      /* Highest index + 1 */
 
 /*
  * Maximum connector's message size.
index c66111a..e4f62d8 100644 (file)
@@ -530,7 +530,6 @@ struct device_dma_parameters {
  * @dma_mem:   Internal for coherent mem override.
  * @archdata:  For arch-specific additions.
  * @of_node:   Associated device tree node.
- * @of_match:  Matching of_device_id from driver.
  * @devt:      For creating the sysfs "dev".
  * @devres_lock: Spinlock to protect the resource of the device.
  * @devres_head: The resources list of the device.
@@ -654,13 +653,13 @@ static inline int device_is_registered(struct device *dev)
 
 static inline void device_enable_async_suspend(struct device *dev)
 {
-       if (!dev->power.in_suspend)
+       if (!dev->power.is_prepared)
                dev->power.async_suspend = true;
 }
 
 static inline void device_disable_async_suspend(struct device *dev)
 {
-       if (!dev->power.in_suspend)
+       if (!dev->power.is_prepared)
                dev->power.async_suspend = false;
 }
 
index 6e73e2e..b5b9792 100644 (file)
@@ -639,6 +639,7 @@ struct address_space {
        struct prio_tree_root   i_mmap;         /* tree of private and shared mappings */
        struct list_head        i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
        struct mutex            i_mmap_mutex;   /* protect tree, count, list */
+       /* Protected by tree_lock together with the radix tree */
        unsigned long           nrpages;        /* number of total pages */
        pgoff_t                 writeback_index;/* writeback starts here */
        const struct address_space_operations *a_ops;   /* methods */
index 781d467..daa9952 100644 (file)
  * See mpc8610fb_set_par(), map_video_memory(), and unmap_video_memory()
  */
 #define MEM_ALLOC_THRESHOLD (1024*768*4+32)
-/* Minimum value that the pixel clock can be set to in pico seconds
- * This is determined by platform clock/3 where the minimum platform
- * clock is 533MHz. This gives 5629 pico seconds.
- */
-#define MIN_PIX_CLK 5629
-#define MAX_PIX_CLK 96096
 
 #include <linux/types.h>
 
index 51932e5..fd0dc30 100644 (file)
@@ -135,6 +135,7 @@ struct hrtimer_sleeper {
  * @cpu_base:          per cpu clock base
  * @index:             clock type index for per_cpu support when moving a
  *                     timer to a base on another cpu.
+ * @clockid:           clock id for per_cpu support
  * @active:            red black tree root node for the active timers
  * @resolution:                the resolution of the clock, in nanoseconds
  * @get_time:          function to retrieve the current time of the clock
index 4ecb7b1..d087c2e 100644 (file)
@@ -1024,7 +1024,6 @@ struct journal_s
 
 /* Filing buffers */
 extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
-extern void __jbd2_journal_unfile_buffer(struct journal_head *);
 extern void __jbd2_journal_refile_buffer(struct journal_head *);
 extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *);
 extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
@@ -1165,7 +1164,6 @@ extern void          jbd2_journal_release_jbd_inode(journal_t *journal, struct jbd2_in
  */
 struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh);
 struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh);
-void jbd2_journal_remove_journal_head(struct buffer_head *bh);
 void jbd2_journal_put_journal_head(struct journal_head *jh);
 
 /*
index c928dac..9f7c3eb 100644 (file)
@@ -647,6 +647,13 @@ typedef struct pglist_data {
 #endif
 #define nid_page_nr(nid, pagenr)       pgdat_page_nr(NODE_DATA(nid),(pagenr))
 
+#define node_start_pfn(nid)    (NODE_DATA(nid)->node_start_pfn)
+
+#define node_end_pfn(nid) ({\
+       pg_data_t *__pgdat = NODE_DATA(nid);\
+       __pgdat->node_start_pfn + __pgdat->node_spanned_pages;\
+})
+
 #include <linux/memory_hotplug.h>
 
 extern struct mutex zonelists_mutex;
index 3a34e80..25311b3 100644 (file)
@@ -92,6 +92,9 @@ extern        int nfs_pageio_add_request(struct nfs_pageio_descriptor *,
                                   struct nfs_page *);
 extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc);
 extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t);
+extern bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
+                               struct nfs_page *prev,
+                               struct nfs_page *req);
 extern  int nfs_wait_on_request(struct nfs_page *);
 extern void nfs_unlock_request(struct nfs_page *req);
 extern int nfs_set_page_tag_locked(struct nfs_page *req);
index 5e8444a..00848d8 100644 (file)
@@ -158,7 +158,6 @@ struct nfs_seqid;
 
 /* nfs41 sessions channel attributes */
 struct nfs4_channel_attrs {
-       u32                     headerpadsz;
        u32                     max_rqst_sz;
        u32                     max_resp_sz;
        u32                     max_resp_sz_cached;
index a311008..f8910e1 100644 (file)
 #define PCI_DEVICE_ID_RICOH_RL5C476    0x0476
 #define PCI_DEVICE_ID_RICOH_RL5C478    0x0478
 #define PCI_DEVICE_ID_RICOH_R5C822     0x0822
+#define PCI_DEVICE_ID_RICOH_R5CE823    0xe823
 #define PCI_DEVICE_ID_RICOH_R5C832     0x0832
 #define PCI_DEVICE_ID_RICOH_R5C843     0x0843
 
index 3160648..411e4f4 100644 (file)
@@ -425,7 +425,8 @@ struct dev_pm_info {
        pm_message_t            power_state;
        unsigned int            can_wakeup:1;
        unsigned int            async_suspend:1;
-       unsigned int            in_suspend:1;   /* Owned by the PM core */
+       bool                    is_prepared:1;  /* Owned by the PM core */
+       bool                    is_suspended:1; /* Ditto */
        spinlock_t              lock;
 #ifdef CONFIG_PM_SLEEP
        struct list_head        entry;
index 2b7fec8..aa08fa8 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/swap.h>
 #include <linux/mempolicy.h>
+#include <linux/pagemap.h>
 #include <linux/percpu_counter.h>
 
 /* inode in-kernel data */
@@ -45,7 +46,27 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
        return container_of(inode, struct shmem_inode_info, vfs_inode);
 }
 
+/*
+ * Functions in mm/shmem.c called directly from elsewhere:
+ */
 extern int init_tmpfs(void);
 extern int shmem_fill_super(struct super_block *sb, void *data, int silent);
+extern struct file *shmem_file_setup(const char *name,
+                                       loff_t size, unsigned long flags);
+extern int shmem_zero_setup(struct vm_area_struct *);
+extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
+extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
+                                       pgoff_t index, gfp_t gfp_mask);
+extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
+extern int shmem_unuse(swp_entry_t entry, struct page *page);
+extern void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
+                                       struct page **pagep, swp_entry_t *ent);
+
+static inline struct page *shmem_read_mapping_page(
+                               struct address_space *mapping, pgoff_t index)
+{
+       return shmem_read_mapping_page_gfp(mapping, index,
+                                       mapping_gfp_mask(mapping));
+}
 
 #endif
index f73c482..fe2d8e6 100644 (file)
@@ -84,7 +84,8 @@ struct rpc_task {
 #endif
        unsigned char           tk_priority : 2,/* Task priority */
                                tk_garb_retry : 2,
-                               tk_cred_retry : 2;
+                               tk_cred_retry : 2,
+                               tk_rebind_retry : 2;
 };
 #define tk_xprt                        tk_client->cl_xprt
 
index e705646..a273468 100644 (file)
@@ -300,16 +300,6 @@ static inline void scan_unevictable_unregister_node(struct node *node)
 extern int kswapd_run(int nid);
 extern void kswapd_stop(int nid);
 
-#ifdef CONFIG_MMU
-/* linux/mm/shmem.c */
-extern int shmem_unuse(swp_entry_t entry, struct page *page);
-#endif /* CONFIG_MMU */
-
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-extern void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
-                                       struct page **pagep, swp_entry_t *ent);
-#endif
-
 #ifdef CONFIG_SWAP
 /* linux/mm/page_io.c */
 extern int swap_readpage(struct page *);
index f2046e4..c0b938c 100644 (file)
@@ -178,7 +178,6 @@ struct sock_common {
   *    @sk_dst_cache: destination cache
   *    @sk_dst_lock: destination cache lock
   *    @sk_policy: flow policy
-  *    @sk_rmem_alloc: receive queue bytes committed
   *    @sk_receive_queue: incoming packets
   *    @sk_wmem_alloc: transmit queue bytes committed
   *    @sk_write_queue: Packet sending queue
index 736eac7..af1b49e 100644 (file)
@@ -99,7 +99,14 @@ struct snd_sb_csp_info {
 /* get CSP information */
 #define SNDRV_SB_CSP_IOCTL_INFO                _IOR('H', 0x10, struct snd_sb_csp_info)
 /* load microcode to CSP */
-#define SNDRV_SB_CSP_IOCTL_LOAD_CODE   _IOW('H', 0x11, struct snd_sb_csp_microcode)
+/* NOTE: struct snd_sb_csp_microcode overflows the max size (13 bits)
+ * defined for some architectures like MIPS, and it leads to build errors.
+ * (x86 and co have 14-bit size, thus it's valid, though.)
+ * As a workaround for skipping the size-limit check, here we don't use the
+ * normal _IOW() macro but _IOC() with the manual argument.
+ */
+#define SNDRV_SB_CSP_IOCTL_LOAD_CODE   \
+       _IOC(_IOC_WRITE, 'H', 0x11, sizeof(struct snd_sb_csp_microcode))
 /* unload microcode from CSP */
 #define SNDRV_SB_CSP_IOCTL_UNLOAD_CODE _IO('H', 0x12)
 /* start CSP */
index f1de3e0..3a4bd3a 100644 (file)
@@ -248,8 +248,7 @@ typedef int (*hw_write_t)(void *,const char* ,int);
 extern struct snd_ac97_bus_ops soc_ac97_ops;
 
 enum snd_soc_control_type {
-       SND_SOC_CUSTOM = 1,
-       SND_SOC_I2C,
+       SND_SOC_I2C = 1,
        SND_SOC_SPI,
 };
 
index e09592d..5ce2b2f 100644 (file)
@@ -26,7 +26,7 @@ TRACE_EVENT(ext4_free_inode,
                __field(        umode_t, mode                   )
                __field(        uid_t,  uid                     )
                __field(        gid_t,  gid                     )
-               __field(        blkcnt_t, blocks                )
+               __field(        __u64, blocks                   )
        ),
 
        TP_fast_assign(
@@ -40,9 +40,8 @@ TRACE_EVENT(ext4_free_inode,
 
        TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %llu",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
-                 (unsigned long) __entry->ino,
-                 __entry->mode, __entry->uid, __entry->gid,
-                 (unsigned long long) __entry->blocks)
+                 (unsigned long) __entry->ino, __entry->mode,
+                 __entry->uid, __entry->gid, __entry->blocks)
 );
 
 TRACE_EVENT(ext4_request_inode,
@@ -178,7 +177,7 @@ TRACE_EVENT(ext4_begin_ordered_truncate,
        TP_printk("dev %d,%d ino %lu new_size %lld",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 (long long) __entry->new_size)
+                 __entry->new_size)
 );
 
 DECLARE_EVENT_CLASS(ext4__write_begin,
@@ -204,7 +203,7 @@ DECLARE_EVENT_CLASS(ext4__write_begin,
                __entry->flags  = flags;
        ),
 
-       TP_printk("dev %d,%d ino %lu pos %llu len %u flags %u",
+       TP_printk("dev %d,%d ino %lu pos %lld len %u flags %u",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
                  __entry->pos, __entry->len, __entry->flags)
@@ -248,7 +247,7 @@ DECLARE_EVENT_CLASS(ext4__write_end,
                __entry->copied = copied;
        ),
 
-       TP_printk("dev %d,%d ino %lu pos %llu len %u copied %u",
+       TP_printk("dev %d,%d ino %lu pos %lld len %u copied %u",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
                  __entry->pos, __entry->len, __entry->copied)
@@ -286,29 +285,6 @@ DEFINE_EVENT(ext4__write_end, ext4_da_write_end,
        TP_ARGS(inode, pos, len, copied)
 );
 
-TRACE_EVENT(ext4_writepage,
-       TP_PROTO(struct inode *inode, struct page *page),
-
-       TP_ARGS(inode, page),
-
-       TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
-               __field(        ino_t,  ino                     )
-               __field(        pgoff_t, index                  )
-
-       ),
-
-       TP_fast_assign(
-               __entry->dev    = inode->i_sb->s_dev;
-               __entry->ino    = inode->i_ino;
-               __entry->index  = page->index;
-       ),
-
-       TP_printk("dev %d,%d ino %lu page_index %lu",
-                 MAJOR(__entry->dev), MINOR(__entry->dev),
-                 (unsigned long) __entry->ino, __entry->index)
-);
-
 TRACE_EVENT(ext4_da_writepages,
        TP_PROTO(struct inode *inode, struct writeback_control *wbc),
 
@@ -341,7 +317,7 @@ TRACE_EVENT(ext4_da_writepages,
        ),
 
        TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld "
-                 "range_start %llu range_end %llu sync_mode %d"
+                 "range_start %lld range_end %lld sync_mode %d"
                  "for_kupdate %d range_cyclic %d writeback_index %lu",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino, __entry->nr_to_write,
@@ -449,7 +425,14 @@ DECLARE_EVENT_CLASS(ext4__page_op,
        TP_printk("dev %d,%d ino %lu page_index %lu",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 __entry->index)
+                 (unsigned long) __entry->index)
+);
+
+DEFINE_EVENT(ext4__page_op, ext4_writepage,
+
+       TP_PROTO(struct page *page),
+
+       TP_ARGS(page)
 );
 
 DEFINE_EVENT(ext4__page_op, ext4_readpage,
@@ -489,7 +472,7 @@ TRACE_EVENT(ext4_invalidatepage,
        TP_printk("dev %d,%d ino %lu page_index %lu offset %lu",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 __entry->index, __entry->offset)
+                 (unsigned long) __entry->index, __entry->offset)
 );
 
 TRACE_EVENT(ext4_discard_blocks,
@@ -562,12 +545,10 @@ DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_group_pa,
 );
 
 TRACE_EVENT(ext4_mb_release_inode_pa,
-       TP_PROTO(struct super_block *sb,
-                struct inode *inode,
-                struct ext4_prealloc_space *pa,
+       TP_PROTO(struct ext4_prealloc_space *pa,
                 unsigned long long block, unsigned int count),
 
-       TP_ARGS(sb, inode, pa, block, count),
+       TP_ARGS(pa, block, count),
 
        TP_STRUCT__entry(
                __field(        dev_t,  dev                     )
@@ -578,8 +559,8 @@ TRACE_EVENT(ext4_mb_release_inode_pa,
        ),
 
        TP_fast_assign(
-               __entry->dev            = sb->s_dev;
-               __entry->ino            = inode->i_ino;
+               __entry->dev            = pa->pa_inode->i_sb->s_dev;
+               __entry->ino            = pa->pa_inode->i_ino;
                __entry->block          = block;
                __entry->count          = count;
        ),
@@ -591,10 +572,9 @@ TRACE_EVENT(ext4_mb_release_inode_pa,
 );
 
 TRACE_EVENT(ext4_mb_release_group_pa,
-       TP_PROTO(struct super_block *sb,
-                struct ext4_prealloc_space *pa),
+       TP_PROTO(struct ext4_prealloc_space *pa),
 
-       TP_ARGS(sb, pa),
+       TP_ARGS(pa),
 
        TP_STRUCT__entry(
                __field(        dev_t,  dev                     )
@@ -604,7 +584,7 @@ TRACE_EVENT(ext4_mb_release_group_pa,
        ),
 
        TP_fast_assign(
-               __entry->dev            = sb->s_dev;
+               __entry->dev            = pa->pa_inode->i_sb->s_dev;
                __entry->pa_pstart      = pa->pa_pstart;
                __entry->pa_len         = pa->pa_len;
        ),
@@ -666,10 +646,10 @@ TRACE_EVENT(ext4_request_blocks,
                __field(        ino_t,  ino                     )
                __field(        unsigned int, flags             )
                __field(        unsigned int, len               )
-               __field(        __u64,  logical                 )
+               __field(        __u32,  logical                 )
+               __field(        __u32,  lleft                   )
+               __field(        __u32,  lright                  )
                __field(        __u64,  goal                    )
-               __field(        __u64,  lleft                   )
-               __field(        __u64,  lright                  )
                __field(        __u64,  pleft                   )
                __field(        __u64,  pright                  )
        ),
@@ -687,17 +667,13 @@ TRACE_EVENT(ext4_request_blocks,
                __entry->pright = ar->pright;
        ),
 
-       TP_printk("dev %d,%d ino %lu flags %u len %u lblk %llu goal %llu "
-                 "lleft %llu lright %llu pleft %llu pright %llu ",
+       TP_printk("dev %d,%d ino %lu flags %u len %u lblk %u goal %llu "
+                 "lleft %u lright %u pleft %llu pright %llu ",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
-                 (unsigned long) __entry->ino,
-                 __entry->flags, __entry->len,
-                 (unsigned long long) __entry->logical,
-                 (unsigned long long) __entry->goal,
-                 (unsigned long long) __entry->lleft,
-                 (unsigned long long) __entry->lright,
-                 (unsigned long long) __entry->pleft,
-                 (unsigned long long) __entry->pright)
+                 (unsigned long) __entry->ino, __entry->flags,
+                 __entry->len, __entry->logical, __entry->goal,
+                 __entry->lleft, __entry->lright, __entry->pleft,
+                 __entry->pright)
 );
 
 TRACE_EVENT(ext4_allocate_blocks,
@@ -711,10 +687,10 @@ TRACE_EVENT(ext4_allocate_blocks,
                __field(        __u64,  block                   )
                __field(        unsigned int, flags             )
                __field(        unsigned int, len               )
-               __field(        __u64,  logical                 )
+               __field(        __u32,  logical                 )
+               __field(        __u32,  lleft                   )
+               __field(        __u32,  lright                  )
                __field(        __u64,  goal                    )
-               __field(        __u64,  lleft                   )
-               __field(        __u64,  lright                  )
                __field(        __u64,  pleft                   )
                __field(        __u64,  pright                  )
        ),
@@ -733,17 +709,13 @@ TRACE_EVENT(ext4_allocate_blocks,
                __entry->pright = ar->pright;
        ),
 
-       TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %llu "
-                 "goal %llu lleft %llu lright %llu pleft %llu pright %llu",
+       TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %u "
+                 "goal %llu lleft %u lright %u pleft %llu pright %llu",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
-                 (unsigned long) __entry->ino,
-                 __entry->flags, __entry->len, __entry->block,
-                 (unsigned long long) __entry->logical,
-                 (unsigned long long) __entry->goal,
-                 (unsigned long long) __entry->lleft,
-                 (unsigned long long) __entry->lright,
-                 (unsigned long long) __entry->pleft,
-                 (unsigned long long) __entry->pright)
+                 (unsigned long) __entry->ino, __entry->flags,
+                 __entry->len, __entry->block, __entry->logical,
+                 __entry->goal,  __entry->lleft, __entry->lright,
+                 __entry->pleft, __entry->pright)
 );
 
 TRACE_EVENT(ext4_free_blocks,
@@ -755,10 +727,10 @@ TRACE_EVENT(ext4_free_blocks,
        TP_STRUCT__entry(
                __field(        dev_t,  dev                     )
                __field(        ino_t,  ino                     )
-               __field(      umode_t, mode                     )
+               __field(        umode_t, mode                   )
                __field(        __u64,  block                   )
                __field(        unsigned long,  count           )
-               __field(         int,   flags                   )
+               __field(        int,    flags                   )
        ),
 
        TP_fast_assign(
@@ -798,7 +770,7 @@ TRACE_EVENT(ext4_sync_file_enter,
                __entry->parent         = dentry->d_parent->d_inode->i_ino;
        ),
 
-       TP_printk("dev %d,%d ino %ld parent %ld datasync %d ",
+       TP_printk("dev %d,%d ino %lu parent %lu datasync %d ",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
                  (unsigned long) __entry->parent, __entry->datasync)
@@ -821,7 +793,7 @@ TRACE_EVENT(ext4_sync_file_exit,
                __entry->dev            = inode->i_sb->s_dev;
        ),
 
-       TP_printk("dev %d,%d ino %ld ret %d",
+       TP_printk("dev %d,%d ino %lu ret %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
                  __entry->ret)
@@ -1005,7 +977,7 @@ DECLARE_EVENT_CLASS(ext4__mballoc,
                __entry->result_len     = len;
        ),
 
-       TP_printk("dev %d,%d inode %lu extent %u/%d/%u ",
+       TP_printk("dev %d,%d inode %lu extent %u/%d/%d ",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
                  __entry->result_group, __entry->result_start,
@@ -1093,7 +1065,7 @@ TRACE_EVENT(ext4_da_update_reserve_space,
                  "allocated_meta_blocks %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 __entry->mode,  (unsigned long long) __entry->i_blocks,
+                 __entry->mode, __entry->i_blocks,
                  __entry->used_blocks, __entry->reserved_data_blocks,
                  __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
 );
@@ -1127,7 +1099,7 @@ TRACE_EVENT(ext4_da_reserve_space,
                  "reserved_data_blocks %d reserved_meta_blocks %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 __entry->mode, (unsigned long long) __entry->i_blocks,
+                 __entry->mode, __entry->i_blocks,
                  __entry->md_needed, __entry->reserved_data_blocks,
                  __entry->reserved_meta_blocks)
 );
@@ -1164,7 +1136,7 @@ TRACE_EVENT(ext4_da_release_space,
                  "allocated_meta_blocks %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 __entry->mode, (unsigned long long) __entry->i_blocks,
+                 __entry->mode, __entry->i_blocks,
                  __entry->freed_blocks, __entry->reserved_data_blocks,
                  __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
 );
@@ -1239,14 +1211,15 @@ TRACE_EVENT(ext4_direct_IO_enter,
                __entry->rw     = rw;
        ),
 
-       TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d",
+       TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 (unsigned long long) __entry->pos, __entry->len, __entry->rw)
+                 __entry->pos, __entry->len, __entry->rw)
 );
 
 TRACE_EVENT(ext4_direct_IO_exit,
-       TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw, int ret),
+       TP_PROTO(struct inode *inode, loff_t offset, unsigned long len,
+                int rw, int ret),
 
        TP_ARGS(inode, offset, len, rw, ret),
 
@@ -1268,10 +1241,10 @@ TRACE_EVENT(ext4_direct_IO_exit,
                __entry->ret    = ret;
        ),
 
-       TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d ret %d",
+       TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d ret %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 (unsigned long long) __entry->pos, __entry->len,
+                 __entry->pos, __entry->len,
                  __entry->rw, __entry->ret)
 );
 
@@ -1296,15 +1269,15 @@ TRACE_EVENT(ext4_fallocate_enter,
                __entry->mode   = mode;
        ),
 
-       TP_printk("dev %d,%d ino %ld pos %llu len %llu mode %d",
+       TP_printk("dev %d,%d ino %lu pos %lld len %lld mode %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
-                 (unsigned long) __entry->ino,
-                 (unsigned long long) __entry->pos,
-                 (unsigned long long) __entry->len, __entry->mode)
+                 (unsigned long) __entry->ino, __entry->pos,
+                 __entry->len, __entry->mode)
 );
 
 TRACE_EVENT(ext4_fallocate_exit,
-       TP_PROTO(struct inode *inode, loff_t offset, unsigned int max_blocks, int ret),
+       TP_PROTO(struct inode *inode, loff_t offset,
+                unsigned int max_blocks, int ret),
 
        TP_ARGS(inode, offset, max_blocks, ret),
 
@@ -1312,7 +1285,7 @@ TRACE_EVENT(ext4_fallocate_exit,
                __field(        ino_t,  ino                     )
                __field(        dev_t,  dev                     )
                __field(        loff_t, pos                     )
-               __field(        unsigned,       blocks          )
+               __field(        unsigned int,   blocks          )
                __field(        int,    ret                     )
        ),
 
@@ -1324,10 +1297,10 @@ TRACE_EVENT(ext4_fallocate_exit,
                __entry->ret    = ret;
        ),
 
-       TP_printk("dev %d,%d ino %ld pos %llu blocks %d ret %d",
+       TP_printk("dev %d,%d ino %lu pos %lld blocks %u ret %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 (unsigned long long) __entry->pos, __entry->blocks,
+                 __entry->pos, __entry->blocks,
                  __entry->ret)
 );
 
@@ -1350,7 +1323,7 @@ TRACE_EVENT(ext4_unlink_enter,
                __entry->dev            = dentry->d_inode->i_sb->s_dev;
        ),
 
-       TP_printk("dev %d,%d ino %ld size %lld parent %ld",
+       TP_printk("dev %d,%d ino %lu size %lld parent %lu",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino, __entry->size,
                  (unsigned long) __entry->parent)
@@ -1373,7 +1346,7 @@ TRACE_EVENT(ext4_unlink_exit,
                __entry->ret            = ret;
        ),
 
-       TP_printk("dev %d,%d ino %ld ret %d",
+       TP_printk("dev %d,%d ino %lu ret %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
                  __entry->ret)
@@ -1387,7 +1360,7 @@ DECLARE_EVENT_CLASS(ext4__truncate,
        TP_STRUCT__entry(
                __field(        ino_t,          ino             )
                __field(        dev_t,          dev             )
-               __field(        blkcnt_t,       blocks          )
+               __field(        __u64,          blocks          )
        ),
 
        TP_fast_assign(
@@ -1396,9 +1369,9 @@ DECLARE_EVENT_CLASS(ext4__truncate,
                __entry->blocks = inode->i_blocks;
        ),
 
-       TP_printk("dev %d,%d ino %lu blocks %lu",
+       TP_printk("dev %d,%d ino %lu blocks %llu",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
-                 (unsigned long) __entry->ino, (unsigned long) __entry->blocks)
+                 (unsigned long) __entry->ino, __entry->blocks)
 );
 
 DEFINE_EVENT(ext4__truncate, ext4_truncate_enter,
@@ -1417,7 +1390,7 @@ DEFINE_EVENT(ext4__truncate, ext4_truncate_exit,
 
 DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
        TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
-                unsigned len, unsigned flags),
+                unsigned int len, unsigned int flags),
 
        TP_ARGS(inode, lblk, len, flags),
 
@@ -1425,8 +1398,8 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
                __field(        ino_t,          ino             )
                __field(        dev_t,          dev             )
                __field(        ext4_lblk_t,    lblk            )
-               __field(        unsigned,       len             )
-               __field(        unsigned,       flags           )
+               __field(        unsigned int,   len             )
+               __field(        unsigned int,   flags           )
        ),
 
        TP_fast_assign(
@@ -1440,7 +1413,7 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
        TP_printk("dev %d,%d ino %lu lblk %u len %u flags %u",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 (unsigned) __entry->lblk, __entry->len, __entry->flags)
+                 __entry->lblk, __entry->len, __entry->flags)
 );
 
 DEFINE_EVENT(ext4__map_blocks_enter, ext4_ext_map_blocks_enter,
@@ -1459,7 +1432,7 @@ DEFINE_EVENT(ext4__map_blocks_enter, ext4_ind_map_blocks_enter,
 
 DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
        TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
-                ext4_fsblk_t pblk, unsigned len, int ret),
+                ext4_fsblk_t pblk, unsigned int len, int ret),
 
        TP_ARGS(inode, lblk, pblk, len, ret),
 
@@ -1468,7 +1441,7 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
                __field(        dev_t,          dev             )
                __field(        ext4_lblk_t,    lblk            )
                __field(        ext4_fsblk_t,   pblk            )
-               __field(        unsigned,       len             )
+               __field(        unsigned int,   len             )
                __field(        int,            ret             )
        ),
 
@@ -1484,7 +1457,7 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
        TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u ret %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 (unsigned) __entry->lblk, (unsigned long long) __entry->pblk,
+                 __entry->lblk, __entry->pblk,
                  __entry->len, __entry->ret)
 );
 
@@ -1524,7 +1497,7 @@ TRACE_EVENT(ext4_ext_load_extent,
        TP_printk("dev %d,%d ino %lu lblk %u pblk %llu",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 (unsigned) __entry->lblk, (unsigned long long) __entry->pblk)
+                 __entry->lblk, __entry->pblk)
 );
 
 TRACE_EVENT(ext4_load_inode,
index 2568d22..aae2f40 100644 (file)
@@ -245,30 +245,32 @@ recalibrate:
 
 void __cpuinit calibrate_delay(void)
 {
+       unsigned long lpj;
        static bool printed;
 
        if (preset_lpj) {
-               loops_per_jiffy = preset_lpj;
+               lpj = preset_lpj;
                if (!printed)
                        pr_info("Calibrating delay loop (skipped) "
                                "preset value.. ");
        } else if ((!printed) && lpj_fine) {
-               loops_per_jiffy = lpj_fine;
+               lpj = lpj_fine;
                pr_info("Calibrating delay loop (skipped), "
                        "value calculated using timer frequency.. ");
-       } else if ((loops_per_jiffy = calibrate_delay_direct()) != 0) {
+       } else if ((lpj = calibrate_delay_direct()) != 0) {
                if (!printed)
                        pr_info("Calibrating delay using timer "
                                "specific routine.. ");
        } else {
                if (!printed)
                        pr_info("Calibrating delay loop... ");
-               loops_per_jiffy = calibrate_delay_converge();
+               lpj = calibrate_delay_converge();
        }
        if (!printed)
                pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
-                       loops_per_jiffy/(500000/HZ),
-                       (loops_per_jiffy/(5000/HZ)) % 100, loops_per_jiffy);
+                       lpj/(500000/HZ),
+                       (lpj/(5000/HZ)) % 100, lpj);
 
+       loops_per_jiffy = lpj;
        printed = true;
 }
index 7d02d33..42ddbc6 100644 (file)
@@ -113,8 +113,10 @@ static int snapshot_open(struct inode *inode, struct file *filp)
                if (error)
                        pm_notifier_call_chain(PM_POST_RESTORE);
        }
-       if (error)
+       if (error) {
+               free_basic_memory_bitmaps();
                atomic_inc(&snapshot_device_available);
+       }
        data->frozen = 0;
        data->ready = 0;
        data->platform_support = 0;
index 9ffea36..fc0f220 100644 (file)
@@ -285,16 +285,18 @@ ret:
 static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
 {
        struct listener_list *listeners;
-       struct listener *s, *tmp;
+       struct listener *s, *tmp, *s2;
        unsigned int cpu;
 
        if (!cpumask_subset(mask, cpu_possible_mask))
                return -EINVAL;
 
+       s = NULL;
        if (isadd == REGISTER) {
                for_each_cpu(cpu, mask) {
-                       s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
-                                        cpu_to_node(cpu));
+                       if (!s)
+                               s = kmalloc_node(sizeof(struct listener),
+                                                GFP_KERNEL, cpu_to_node(cpu));
                        if (!s)
                                goto cleanup;
                        s->pid = pid;
@@ -303,9 +305,16 @@ static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
 
                        listeners = &per_cpu(listener_array, cpu);
                        down_write(&listeners->sem);
+                       list_for_each_entry_safe(s2, tmp, &listeners->list, list) {
+                               if (s2->pid == pid)
+                                       goto next_cpu;
+                       }
                        list_add(&s->list, &listeners->list);
+                       s = NULL;
+next_cpu:
                        up_write(&listeners->sem);
                }
+               kfree(s);
                return 0;
        }
 
index 2d96624..59f369f 100644 (file)
@@ -42,15 +42,75 @@ static struct alarm_base {
        clockid_t               base_clockid;
 } alarm_bases[ALARM_NUMTYPE];
 
+/* freezer delta & lock used to handle clock_nanosleep triggered wakeups */
+static ktime_t freezer_delta;
+static DEFINE_SPINLOCK(freezer_delta_lock);
+
 #ifdef CONFIG_RTC_CLASS
 /* rtc timer and device for setting alarm wakeups at suspend */
 static struct rtc_timer                rtctimer;
 static struct rtc_device       *rtcdev;
-#endif
+static DEFINE_SPINLOCK(rtcdev_lock);
 
-/* freezer delta & lock used to handle clock_nanosleep triggered wakeups */
-static ktime_t freezer_delta;
-static DEFINE_SPINLOCK(freezer_delta_lock);
+/**
+ * has_wakealarm - check rtc device has wakealarm ability
+ * @dev: current device
+ * @name_ptr: name to be returned
+ *
+ * This helper function checks to see if the rtc device can wake
+ * from suspend.
+ */
+static int has_wakealarm(struct device *dev, void *name_ptr)
+{
+       struct rtc_device *candidate = to_rtc_device(dev);
+
+       if (!candidate->ops->set_alarm)
+               return 0;
+       if (!device_may_wakeup(candidate->dev.parent))
+               return 0;
+
+       *(const char **)name_ptr = dev_name(dev);
+       return 1;
+}
+
+/**
+ * alarmtimer_get_rtcdev - Return selected rtcdevice
+ *
+ * This function returns the rtc device to use for wakealarms.
+ * If one has not already been chosen, it checks to see if a
+ * functional rtc device is available.
+ */
+static struct rtc_device *alarmtimer_get_rtcdev(void)
+{
+       struct device *dev;
+       char *str;
+       unsigned long flags;
+       struct rtc_device *ret;
+
+       spin_lock_irqsave(&rtcdev_lock, flags);
+       if (!rtcdev) {
+               /* Find an rtc device and init the rtc_timer */
+               dev = class_find_device(rtc_class, NULL, &str, has_wakealarm);
+               /* If we have a device then str is valid. See has_wakealarm() */
+               if (dev) {
+                       rtcdev = rtc_class_open(str);
+                       /*
+                        * Drop the reference we got in class_find_device,
+                        * rtc_open takes its own.
+                        */
+                       put_device(dev);
+                       rtc_timer_init(&rtctimer, NULL, NULL);
+               }
+       }
+       ret = rtcdev;
+       spin_unlock_irqrestore(&rtcdev_lock, flags);
+
+       return ret;
+}
+#else
+#define alarmtimer_get_rtcdev() (0)
+#define rtcdev (0)
+#endif
 
 
 /**
@@ -166,6 +226,7 @@ static int alarmtimer_suspend(struct device *dev)
        struct rtc_time tm;
        ktime_t min, now;
        unsigned long flags;
+       struct rtc_device *rtc;
        int i;
 
        spin_lock_irqsave(&freezer_delta_lock, flags);
@@ -173,8 +234,9 @@ static int alarmtimer_suspend(struct device *dev)
        freezer_delta = ktime_set(0, 0);
        spin_unlock_irqrestore(&freezer_delta_lock, flags);
 
+       rtc = rtcdev;
        /* If we have no rtcdev, just return */
-       if (!rtcdev)
+       if (!rtc)
                return 0;
 
        /* Find the soonest timer to expire*/
@@ -199,12 +261,12 @@ static int alarmtimer_suspend(struct device *dev)
        WARN_ON(min.tv64 < NSEC_PER_SEC);
 
        /* Setup an rtc timer to fire that far in the future */
-       rtc_timer_cancel(rtcdev, &rtctimer);
-       rtc_read_time(rtcdev, &tm);
+       rtc_timer_cancel(rtc, &rtctimer);
+       rtc_read_time(rtc, &tm);
        now = rtc_tm_to_ktime(tm);
        now = ktime_add(now, min);
 
-       rtc_timer_start(rtcdev, &rtctimer, now, ktime_set(0, 0));
+       rtc_timer_start(rtc, &rtctimer, now, ktime_set(0, 0));
 
        return 0;
 }
@@ -322,6 +384,9 @@ static int alarm_clock_getres(const clockid_t which_clock, struct timespec *tp)
 {
        clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid;
 
+       if (!alarmtimer_get_rtcdev())
+               return -ENOTSUPP;
+
        return hrtimer_get_res(baseid, tp);
 }
 
@@ -336,6 +401,9 @@ static int alarm_clock_get(clockid_t which_clock, struct timespec *tp)
 {
        struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)];
 
+       if (!alarmtimer_get_rtcdev())
+               return -ENOTSUPP;
+
        *tp = ktime_to_timespec(base->gettime());
        return 0;
 }
@@ -351,6 +419,9 @@ static int alarm_timer_create(struct k_itimer *new_timer)
        enum  alarmtimer_type type;
        struct alarm_base *base;
 
+       if (!alarmtimer_get_rtcdev())
+               return -ENOTSUPP;
+
        if (!capable(CAP_WAKE_ALARM))
                return -EPERM;
 
@@ -385,6 +456,9 @@ static void alarm_timer_get(struct k_itimer *timr,
  */
 static int alarm_timer_del(struct k_itimer *timr)
 {
+       if (!rtcdev)
+               return -ENOTSUPP;
+
        alarm_cancel(&timr->it.alarmtimer);
        return 0;
 }
@@ -402,6 +476,9 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
                                struct itimerspec *new_setting,
                                struct itimerspec *old_setting)
 {
+       if (!rtcdev)
+               return -ENOTSUPP;
+
        /* Save old values */
        old_setting->it_interval =
                        ktime_to_timespec(timr->it.alarmtimer.period);
@@ -541,6 +618,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
        int ret = 0;
        struct restart_block *restart;
 
+       if (!alarmtimer_get_rtcdev())
+               return -ENOTSUPP;
+
        if (!capable(CAP_WAKE_ALARM))
                return -EPERM;
 
@@ -638,65 +718,3 @@ static int __init alarmtimer_init(void)
 }
 device_initcall(alarmtimer_init);
 
-#ifdef CONFIG_RTC_CLASS
-/**
- * has_wakealarm - check rtc device has wakealarm ability
- * @dev: current device
- * @name_ptr: name to be returned
- *
- * This helper function checks to see if the rtc device can wake
- * from suspend.
- */
-static int __init has_wakealarm(struct device *dev, void *name_ptr)
-{
-       struct rtc_device *candidate = to_rtc_device(dev);
-
-       if (!candidate->ops->set_alarm)
-               return 0;
-       if (!device_may_wakeup(candidate->dev.parent))
-               return 0;
-
-       *(const char **)name_ptr = dev_name(dev);
-       return 1;
-}
-
-/**
- * alarmtimer_init_late - Late initializing of alarmtimer code
- *
- * This function locates a rtc device to use for wakealarms.
- * Run as late_initcall to make sure rtc devices have been
- * registered.
- */
-static int __init alarmtimer_init_late(void)
-{
-       struct device *dev;
-       char *str;
-
-       /* Find an rtc device and init the rtc_timer */
-       dev = class_find_device(rtc_class, NULL, &str, has_wakealarm);
-       /* If we have a device then str is valid. See has_wakealarm() */
-       if (dev) {
-               rtcdev = rtc_class_open(str);
-               /*
-                * Drop the reference we got in class_find_device,
-                * rtc_open takes its own.
-                */
-               put_device(dev);
-       }
-       if (!rtcdev) {
-               printk(KERN_WARNING "No RTC device found, ALARM timers will"
-                       " not wake from suspend");
-       }
-       rtc_timer_init(&rtctimer, NULL, NULL);
-
-       return 0;
-}
-#else
-static int __init alarmtimer_init_late(void)
-{
-       printk(KERN_WARNING "Kernel not built with RTC support, ALARM timers"
-               " will not wake from suspend");
-       return 0;
-}
-#endif
-late_initcall(alarmtimer_init_late);
index cf7d027..ddffc74 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/limits.h>
 #include <linux/mutex.h>
 #include <linux/rbtree.h>
+#include <linux/shmem_fs.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/swapops.h>
index eac0ba5..740c4f5 100644 (file)
@@ -391,10 +391,11 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
        struct task_struct *tsk;
        struct anon_vma *av;
 
-       read_lock(&tasklist_lock);
        av = page_lock_anon_vma(page);
        if (av == NULL) /* Not actually mapped anymore */
-               goto out;
+               return;
+
+       read_lock(&tasklist_lock);
        for_each_process (tsk) {
                struct anon_vma_chain *vmac;
 
@@ -408,9 +409,8 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
                                add_to_kill(tsk, page, vma, to_kill, tkc);
                }
        }
-       page_unlock_anon_vma(av);
-out:
        read_unlock(&tasklist_lock);
+       page_unlock_anon_vma(av);
 }
 
 /*
@@ -424,17 +424,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
        struct prio_tree_iter iter;
        struct address_space *mapping = page->mapping;
 
-       /*
-        * A note on the locking order between the two locks.
-        * We don't rely on this particular order.
-        * If you have some other code that needs a different order
-        * feel free to switch them around. Or add a reverse link
-        * from mm_struct to task_struct, then this could be all
-        * done without taking tasklist_lock and looping over all tasks.
-        */
-
-       read_lock(&tasklist_lock);
        mutex_lock(&mapping->i_mmap_mutex);
+       read_lock(&tasklist_lock);
        for_each_process(tsk) {
                pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
 
@@ -454,8 +445,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
                                add_to_kill(tsk, page, vma, to_kill, tkc);
                }
        }
-       mutex_unlock(&mapping->i_mmap_mutex);
        read_unlock(&tasklist_lock);
+       mutex_unlock(&mapping->i_mmap_mutex);
 }
 
 /*
index 87d9353..40b7531 100644 (file)
@@ -2798,30 +2798,6 @@ void unmap_mapping_range(struct address_space *mapping,
 }
 EXPORT_SYMBOL(unmap_mapping_range);
 
-int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
-{
-       struct address_space *mapping = inode->i_mapping;
-
-       /*
-        * If the underlying filesystem is not going to provide
-        * a way to truncate a range of blocks (punch a hole) -
-        * we should return failure right now.
-        */
-       if (!inode->i_op->truncate_range)
-               return -ENOSYS;
-
-       mutex_lock(&inode->i_mutex);
-       down_write(&inode->i_alloc_sem);
-       unmap_mapping_range(mapping, offset, (end - offset), 1);
-       truncate_inode_pages_range(mapping, offset, end);
-       unmap_mapping_range(mapping, offset, (end - offset), 1);
-       inode->i_op->truncate_range(inode, offset, end);
-       up_write(&inode->i_alloc_sem);
-       mutex_unlock(&inode->i_mutex);
-
-       return 0;
-}
-
 /*
  * We enter with non-exclusive mmap_sem (to exclude vma changes,
  * but allow concurrent faults), and pte mapped but not yet locked.
index 02159c7..c46887b 100644 (file)
@@ -498,7 +498,9 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
         * The node we allocated has no zone fallback lists. For avoiding
         * to access not-initialized zonelist, build here.
         */
+       mutex_lock(&zonelists_mutex);
        build_all_zonelists(NULL);
+       mutex_unlock(&zonelists_mutex);
 
        return pgdat;
 }
@@ -521,7 +523,7 @@ int mem_online_node(int nid)
 
        lock_memory_hotplug();
        pgdat = hotadd_new_pgdat(nid, 0);
-       if (pgdat) {
+       if (!pgdat) {
                ret = -ENOMEM;
                goto out;
        }
index 27dfd3b..23295f6 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -38,9 +38,8 @@
  *                           in arch-dependent flush_dcache_mmap_lock,
  *                           within inode_wb_list_lock in __sync_single_inode)
  *
- * (code doesn't rely on that order so it could be switched around)
- * ->tasklist_lock
- *   anon_vma->mutex      (memory_failure, collect_procs_anon)
+ * anon_vma->mutex,mapping->i_mutex      (memory_failure, collect_procs_anon)
+ *   ->tasklist_lock
  *     pte map lock
  */
 
index d221a1c..fcedf54 100644 (file)
@@ -539,7 +539,7 @@ static void shmem_free_pages(struct list_head *next)
        } while (next);
 }
 
-static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
+void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
 {
        struct shmem_inode_info *info = SHMEM_I(inode);
        unsigned long idx;
@@ -562,6 +562,8 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
        spinlock_t *punch_lock;
        unsigned long upper_limit;
 
+       truncate_inode_pages_range(inode->i_mapping, start, end);
+
        inode->i_ctime = inode->i_mtime = CURRENT_TIME;
        idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
        if (idx >= info->next_index)
@@ -738,16 +740,8 @@ done2:
                 * lowered next_index.  Also, though shmem_getpage checks
                 * i_size before adding to cache, no recheck after: so fix the
                 * narrow window there too.
-                *
-                * Recalling truncate_inode_pages_range and unmap_mapping_range
-                * every time for punch_hole (which never got a chance to clear
-                * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
-                * yet hardly ever necessary: try to optimize them out later.
                 */
                truncate_inode_pages_range(inode->i_mapping, start, end);
-               if (punch_hole)
-                       unmap_mapping_range(inode->i_mapping, start,
-                                                       end - start, 1);
        }
 
        spin_lock(&info->lock);
@@ -766,22 +760,23 @@ done2:
                shmem_free_pages(pages_to_free.next);
        }
 }
+EXPORT_SYMBOL_GPL(shmem_truncate_range);
 
-static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
+static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
 {
        struct inode *inode = dentry->d_inode;
-       loff_t newsize = attr->ia_size;
        int error;
 
        error = inode_change_ok(inode, attr);
        if (error)
                return error;
 
-       if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)
-                                       && newsize != inode->i_size) {
+       if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
+               loff_t oldsize = inode->i_size;
+               loff_t newsize = attr->ia_size;
                struct page *page = NULL;
 
-               if (newsize < inode->i_size) {
+               if (newsize < oldsize) {
                        /*
                         * If truncating down to a partial page, then
                         * if that page is already allocated, hold it
@@ -810,12 +805,19 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
                                spin_unlock(&info->lock);
                        }
                }
-
-               /* XXX(truncate): truncate_setsize should be called last */
-               truncate_setsize(inode, newsize);
+               if (newsize != oldsize) {
+                       i_size_write(inode, newsize);
+                       inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+               }
+               if (newsize < oldsize) {
+                       loff_t holebegin = round_up(newsize, PAGE_SIZE);
+                       unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
+                       shmem_truncate_range(inode, newsize, (loff_t)-1);
+                       /* unmap again to remove racily COWed private pages */
+                       unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
+               }
                if (page)
                        page_cache_release(page);
-               shmem_truncate_range(inode, newsize, (loff_t)-1);
        }
 
        setattr_copy(inode, attr);
@@ -832,7 +834,6 @@ static void shmem_evict_inode(struct inode *inode)
        struct shmem_xattr *xattr, *nxattr;
 
        if (inode->i_mapping->a_ops == &shmem_aops) {
-               truncate_inode_pages(inode->i_mapping, 0);
                shmem_unacct_size(info->flags, inode->i_size);
                inode->i_size = 0;
                shmem_truncate_range(inode, 0, (loff_t)-1);
@@ -2706,7 +2707,7 @@ static const struct file_operations shmem_file_operations = {
 };
 
 static const struct inode_operations shmem_inode_operations = {
-       .setattr        = shmem_notify_change,
+       .setattr        = shmem_setattr,
        .truncate_range = shmem_truncate_range,
 #ifdef CONFIG_TMPFS_XATTR
        .setxattr       = shmem_setxattr,
@@ -2739,7 +2740,7 @@ static const struct inode_operations shmem_dir_inode_operations = {
        .removexattr    = shmem_removexattr,
 #endif
 #ifdef CONFIG_TMPFS_POSIX_ACL
-       .setattr        = shmem_notify_change,
+       .setattr        = shmem_setattr,
        .check_acl      = generic_check_acl,
 #endif
 };
@@ -2752,7 +2753,7 @@ static const struct inode_operations shmem_special_inode_operations = {
        .removexattr    = shmem_removexattr,
 #endif
 #ifdef CONFIG_TMPFS_POSIX_ACL
-       .setattr        = shmem_notify_change,
+       .setattr        = shmem_setattr,
        .check_acl      = generic_check_acl,
 #endif
 };
@@ -2908,6 +2909,12 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
        return 0;
 }
 
+void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
+{
+       truncate_inode_pages_range(inode->i_mapping, start, end);
+}
+EXPORT_SYMBOL_GPL(shmem_truncate_range);
+
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
 /**
  * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
@@ -3028,3 +3035,26 @@ int shmem_zero_setup(struct vm_area_struct *vma)
        vma->vm_flags |= VM_CAN_NONLINEAR;
        return 0;
 }
+
+/**
+ * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
+ * @mapping:   the page's address_space
+ * @index:     the page index
+ * @gfp:       the page allocator flags to use if allocating
+ *
+ * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
+ * with any new page allocations done using the specified allocation flags.
+ * But read_cache_page_gfp() uses the ->readpage() method: which does not
+ * suit tmpfs, since it may have pages in swapcache, and needs to find those
+ * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
+ *
+ * Provide a stub for those callers to start using now, then later
+ * flesh it out to call shmem_getpage() with additional gfp mask, when
+ * shmem_file_splice_read() is added and shmem_readpage() is removed.
+ */
+struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
+                                        pgoff_t index, gfp_t gfp)
+{
+       return read_cache_page_gfp(mapping, index, gfp);
+}
+EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
index d537d29..ff8dc1a 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/vmalloc.h>
 #include <linux/pagemap.h>
 #include <linux/namei.h>
-#include <linux/shm.h>
+#include <linux/shmem_fs.h>
 #include <linux/blkdev.h>
 #include <linux/random.h>
 #include <linux/writeback.h>
index 3a29a61..e13f22e 100644 (file)
@@ -304,6 +304,11 @@ EXPORT_SYMBOL(truncate_inode_pages_range);
  * @lstart: offset from which to truncate
  *
  * Called under (and serialised by) inode->i_mutex.
+ *
+ * Note: When this function returns, there can be a page in the process of
+ * deletion (inside __delete_from_page_cache()) in the specified range.  Thus
+ * mapping->nrpages can be non-zero when this function returns even after
+ * truncation of the whole mapping.
  */
 void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
 {
@@ -603,3 +608,27 @@ int vmtruncate(struct inode *inode, loff_t offset)
        return 0;
 }
 EXPORT_SYMBOL(vmtruncate);
+
+int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
+{
+       struct address_space *mapping = inode->i_mapping;
+
+       /*
+        * If the underlying filesystem is not going to provide
+        * a way to truncate a range of blocks (punch a hole) -
+        * we should return failure right now.
+        */
+       if (!inode->i_op->truncate_range)
+               return -ENOSYS;
+
+       mutex_lock(&inode->i_mutex);
+       down_write(&inode->i_alloc_sem);
+       unmap_mapping_range(mapping, offset, (end - offset), 1);
+       inode->i_op->truncate_range(inode, offset, end);
+       /* unmap again to remove racily COWed private pages */
+       unmap_mapping_range(mapping, offset, (end - offset), 1);
+       up_write(&inode->i_alloc_sem);
+       mutex_unlock(&inode->i_mutex);
+
+       return 0;
+}
index 8ff834e..4f49535 100644 (file)
@@ -1995,14 +1995,13 @@ restart:
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
+static void shrink_zones(int priority, struct zonelist *zonelist,
                                        struct scan_control *sc)
 {
        struct zoneref *z;
        struct zone *zone;
        unsigned long nr_soft_reclaimed;
        unsigned long nr_soft_scanned;
-       unsigned long total_scanned = 0;
 
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                                        gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -2017,19 +2016,23 @@ static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
                                continue;
                        if (zone->all_unreclaimable && priority != DEF_PRIORITY)
                                continue;       /* Let kswapd poll it */
+                       /*
+                        * This steals pages from memory cgroups over softlimit
+                        * and returns the number of reclaimed pages and
+                        * scanned pages. This works for global memory pressure
+                        * and balancing, not for a memcg's limit.
+                        */
+                       nr_soft_scanned = 0;
+                       nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
+                                               sc->order, sc->gfp_mask,
+                                               &nr_soft_scanned);
+                       sc->nr_reclaimed += nr_soft_reclaimed;
+                       sc->nr_scanned += nr_soft_scanned;
+                       /* need some check for avoid more shrink_zone() */
                }
 
-               nr_soft_scanned = 0;
-               nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
-                                                       sc->order, sc->gfp_mask,
-                                                       &nr_soft_scanned);
-               sc->nr_reclaimed += nr_soft_reclaimed;
-               total_scanned += nr_soft_scanned;
-
                shrink_zone(priority, zone, sc);
        }
-
-       return total_scanned;
 }
 
 static bool zone_reclaimable(struct zone *zone)
@@ -2094,7 +2097,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                sc->nr_scanned = 0;
                if (!priority)
                        disable_swap_token(sc->mem_cgroup);
-               total_scanned += shrink_zones(priority, zonelist, sc);
+               shrink_zones(priority, zonelist, sc);
                /*
                 * Don't shrink slabs when reclaiming memory from
                 * over limit cgroups
index 3163330..d3a05b9 100644 (file)
@@ -608,11 +608,11 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
                goto encrypt;
 
 auth:
-       if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
+       if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
                return 0;
 
-       hci_conn_auth(conn, sec_level, auth_type);
-       return 0;
+       if (!hci_conn_auth(conn, sec_level, auth_type))
+               return 0;
 
 encrypt:
        if (conn->link_mode & HCI_LM_ENCRYPT)
index e64a1c2..56fdd91 100644 (file)
@@ -4002,21 +4002,30 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
                        }
                } else if (sk->sk_state == BT_CONNECT2) {
                        struct l2cap_conn_rsp rsp;
-                       __u16 result;
+                       __u16 res, stat;
 
                        if (!status) {
-                               sk->sk_state = BT_CONFIG;
-                               result = L2CAP_CR_SUCCESS;
+                               if (bt_sk(sk)->defer_setup) {
+                                       struct sock *parent = bt_sk(sk)->parent;
+                                       res = L2CAP_CR_PEND;
+                                       stat = L2CAP_CS_AUTHOR_PEND;
+                                       parent->sk_data_ready(parent, 0);
+                               } else {
+                                       sk->sk_state = BT_CONFIG;
+                                       res = L2CAP_CR_SUCCESS;
+                                       stat = L2CAP_CS_NO_INFO;
+                               }
                        } else {
                                sk->sk_state = BT_DISCONN;
                                l2cap_sock_set_timer(sk, HZ / 10);
-                               result = L2CAP_CR_SEC_BLOCK;
+                               res = L2CAP_CR_SEC_BLOCK;
+                               stat = L2CAP_CS_NO_INFO;
                        }
 
                        rsp.scid   = cpu_to_le16(chan->dcid);
                        rsp.dcid   = cpu_to_le16(chan->scid);
-                       rsp.result = cpu_to_le16(result);
-                       rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+                       rsp.result = cpu_to_le16(res);
+                       rsp.status = cpu_to_le16(stat);
                        l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
                                                        sizeof(rsp), &rsp);
                }
index 29b9812..2d85ca7 100644 (file)
@@ -1379,8 +1379,11 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
        if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
                return -EINVAL;
 
-       if (iph->protocol != IPPROTO_IGMP)
+       if (iph->protocol != IPPROTO_IGMP) {
+               if ((iph->daddr & IGMP_LOCAL_GROUP_MASK) != IGMP_LOCAL_GROUP)
+                       BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
                return 0;
+       }
 
        len = ntohs(iph->tot_len);
        if (skb->len < len || len < ip_hdrlen(skb))
index a8024ea..4a7e16b 100644 (file)
@@ -802,8 +802,6 @@ static int __ip_append_data(struct sock *sk,
        skb = skb_peek_tail(queue);
 
        exthdrlen = !skb ? rt->dst.header_len : 0;
-       length += exthdrlen;
-       transhdrlen += exthdrlen;
        mtu = cork->fragsize;
 
        hh_len = LL_RESERVED_SPACE(rt->dst.dev);
@@ -883,17 +881,16 @@ alloc_new_skb:
                        else
                                alloclen = fraglen;
 
+                       alloclen += exthdrlen;
+
                        /* The last fragment gets additional space at tail.
                         * Note, with MSG_MORE we overallocate on fragments,
                         * because we have no idea what fragment will be
                         * the last.
                         */
-                       if (datalen == length + fraggap) {
+                       if (datalen == length + fraggap)
                                alloclen += rt->dst.trailer_len;
-                               /* make sure mtu is not reached */
-                               if (datalen > mtu - fragheaderlen - rt->dst.trailer_len)
-                                       datalen -= ALIGN(rt->dst.trailer_len, 8);
-                       }
+
                        if (transhdrlen) {
                                skb = sock_alloc_send_skb(sk,
                                                alloclen + hh_len + 15,
@@ -926,11 +923,11 @@ alloc_new_skb:
                        /*
                         *      Find where to start putting bytes.
                         */
-                       data = skb_put(skb, fraglen);
+                       data = skb_put(skb, fraglen + exthdrlen);
                        skb_set_network_header(skb, exthdrlen);
                        skb->transport_header = (skb->network_header +
                                                 fragheaderlen);
-                       data += fragheaderlen;
+                       data += fragheaderlen + exthdrlen;
 
                        if (fraggap) {
                                skb->csum = skb_copy_and_csum_bits(
@@ -1064,7 +1061,7 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
         */
        *rtp = NULL;
        cork->fragsize = inet->pmtudisc == IP_PMTUDISC_PROBE ?
-                        rt->dst.dev->mtu : dst_mtu(rt->dst.path);
+                        rt->dst.dev->mtu : dst_mtu(&rt->dst);
        cork->dst = &rt->dst;
        cork->length = 0;
        cork->tx_flags = ipc->tx_flags;
index 4614bab..2e97e3e 100644 (file)
@@ -17,51 +17,35 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
        const struct iphdr *iph = ip_hdr(skb);
        struct rtable *rt;
        struct flowi4 fl4 = {};
-       unsigned long orefdst;
+       __be32 saddr = iph->saddr;
+       __u8 flags = 0;
        unsigned int hh_len;
-       unsigned int type;
 
-       type = inet_addr_type(net, iph->saddr);
-       if (skb->sk && inet_sk(skb->sk)->transparent)
-               type = RTN_LOCAL;
-       if (addr_type == RTN_UNSPEC)
-               addr_type = type;
+       if (!skb->sk && addr_type != RTN_LOCAL) {
+               if (addr_type == RTN_UNSPEC)
+                       addr_type = inet_addr_type(net, saddr);
+               if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST)
+                       flags |= FLOWI_FLAG_ANYSRC;
+               else
+                       saddr = 0;
+       }
 
        /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause
         * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook.
         */
-       if (addr_type == RTN_LOCAL) {
-               fl4.daddr = iph->daddr;
-               if (type == RTN_LOCAL)
-                       fl4.saddr = iph->saddr;
-               fl4.flowi4_tos = RT_TOS(iph->tos);
-               fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
-               fl4.flowi4_mark = skb->mark;
-               fl4.flowi4_flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
-               rt = ip_route_output_key(net, &fl4);
-               if (IS_ERR(rt))
-                       return -1;
-
-               /* Drop old route. */
-               skb_dst_drop(skb);
-               skb_dst_set(skb, &rt->dst);
-       } else {
-               /* non-local src, find valid iif to satisfy
-                * rp-filter when calling ip_route_input. */
-               fl4.daddr = iph->saddr;
-               rt = ip_route_output_key(net, &fl4);
-               if (IS_ERR(rt))
-                       return -1;
+       fl4.daddr = iph->daddr;
+       fl4.saddr = saddr;
+       fl4.flowi4_tos = RT_TOS(iph->tos);
+       fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
+       fl4.flowi4_mark = skb->mark;
+       fl4.flowi4_flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : flags;
+       rt = ip_route_output_key(net, &fl4);
+       if (IS_ERR(rt))
+               return -1;
 
-               orefdst = skb->_skb_refdst;
-               if (ip_route_input(skb, iph->daddr, iph->saddr,
-                                  RT_TOS(iph->tos), rt->dst.dev) != 0) {
-                       dst_release(&rt->dst);
-                       return -1;
-               }
-               dst_release(&rt->dst);
-               refdst_drop(orefdst);
-       }
+       /* Drop old route. */
+       skb_dst_drop(skb);
+       skb_dst_set(skb, &rt->dst);
 
        if (skb_dst(skb)->error)
                return -1;
index 1ff79e5..51f13f8 100644 (file)
@@ -40,7 +40,6 @@ static void send_reset(struct sk_buff *oldskb, int hook)
        struct iphdr *niph;
        const struct tcphdr *oth;
        struct tcphdr _otcph, *tcph;
-       unsigned int addr_type;
 
        /* IP header checks: fragment. */
        if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
@@ -55,6 +54,9 @@ static void send_reset(struct sk_buff *oldskb, int hook)
        if (oth->rst)
                return;
 
+       if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
+               return;
+
        /* Check checksum */
        if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP))
                return;
@@ -101,19 +103,11 @@ static void send_reset(struct sk_buff *oldskb, int hook)
        nskb->csum_start = (unsigned char *)tcph - nskb->head;
        nskb->csum_offset = offsetof(struct tcphdr, check);
 
-       addr_type = RTN_UNSPEC;
-       if (hook != NF_INET_FORWARD
-#ifdef CONFIG_BRIDGE_NETFILTER
-           || (nskb->nf_bridge && nskb->nf_bridge->mask & BRNF_BRIDGED)
-#endif
-          )
-               addr_type = RTN_LOCAL;
-
        /* ip_route_me_harder expects skb->dst to be set */
        skb_dst_set_noref(nskb, skb_dst(oldskb));
 
        nskb->protocol = htons(ETH_P_IP);
-       if (ip_route_me_harder(nskb, addr_type))
+       if (ip_route_me_harder(nskb, RTN_UNSPEC))
                goto free_nskb;
 
        niph->ttl       = ip4_dst_hoplimit(skb_dst(nskb));
index abca870..48cd88e 100644 (file)
@@ -1249,6 +1249,9 @@ csum_copy_err:
 
        if (noblock)
                return -EAGAIN;
+
+       /* starting over for a new packet */
+       msg->msg_flags &= ~MSG_TRUNC;
        goto try_again;
 }
 
index 41f8c9c..328985c 100644 (file)
@@ -453,8 +453,11 @@ csum_copy_err:
        }
        unlock_sock_fast(sk, slow);
 
-       if (flags & MSG_DONTWAIT)
+       if (noblock)
                return -EAGAIN;
+
+       /* starting over for a new packet */
+       msg->msg_flags &= ~MSG_TRUNC;
        goto try_again;
 }
 
index 339ba64..5daf6cc 100644 (file)
@@ -577,13 +577,13 @@ retry:
        }
        inode = &gss_msg->inode->vfs_inode;
        for (;;) {
-               prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE);
+               prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE);
                spin_lock(&inode->i_lock);
                if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) {
                        break;
                }
                spin_unlock(&inode->i_lock);
-               if (signalled()) {
+               if (fatal_signal_pending(current)) {
                        err = -ERESTARTSYS;
                        goto out_intr;
                }
index b84d739..8c91415 100644 (file)
@@ -1061,7 +1061,7 @@ call_allocate(struct rpc_task *task)
 
        dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
 
-       if (RPC_IS_ASYNC(task) || !signalled()) {
+       if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) {
                task->tk_action = call_allocate;
                rpc_delay(task, HZ>>4);
                return;
@@ -1175,6 +1175,9 @@ call_bind_status(struct rpc_task *task)
                        status = -EOPNOTSUPP;
                        break;
                }
+               if (task->tk_rebind_retry == 0)
+                       break;
+               task->tk_rebind_retry--;
                rpc_delay(task, 3*HZ);
                goto retry_timeout;
        case -ETIMEDOUT:
index 6b43ee7..a27406b 100644 (file)
@@ -792,6 +792,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
        /* Initialize retry counters */
        task->tk_garb_retry = 2;
        task->tk_cred_retry = 2;
+       task->tk_rebind_retry = 2;
 
        task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
        task->tk_owner = current->tgid;
index 8e319a4..8246532 100644 (file)
@@ -469,7 +469,7 @@ static struct key *construct_key_and_link(struct key_type *type,
        } else if (ret == -EINPROGRESS) {
                ret = 0;
        } else {
-               key = ERR_PTR(ret);
+               goto couldnt_alloc_key;
        }
 
        key_put(dest_keyring);
@@ -479,6 +479,7 @@ static struct key *construct_key_and_link(struct key_type *type,
 construction_failed:
        key_negate_and_link(key, key_negative_timeout, NULL, NULL);
        key_put(key);
+couldnt_alloc_key:
        key_put(dest_keyring);
        kleave(" = %d", ret);
        return ERR_PTR(ret);
index 6e24091..bfee60c 100644 (file)
@@ -599,4 +599,4 @@ module_exit(atmel_abdac_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Driver for Atmel Audio Bitstream DAC (ABDAC)");
-MODULE_AUTHOR("Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>");
+MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
index b310702..ac35222 100644 (file)
@@ -1199,4 +1199,4 @@ module_exit(atmel_ac97c_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Driver for Atmel AC97 controller");
-MODULE_AUTHOR("Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>");
+MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
index 2ca6f4f..e3569bd 100644 (file)
@@ -27,7 +27,6 @@
 #include "hpioctl.h"
 
 #include <linux/pci.h>
-#include <linux/version.h>
 #include <linux/init.h>
 #include <linux/jiffies.h>
 #include <linux/slab.h>
index f16bc8a..e083122 100644 (file)
@@ -149,7 +149,7 @@ static int cs5535audio_build_dma_packets(struct cs5535audio *cs5535au,
                        &((struct cs5535audio_dma_desc *) dma->desc_buf.area)[i];
                desc->addr = cpu_to_le32(addr);
                desc->size = cpu_to_le32(period_bytes);
-               desc->ctlreserved = cpu_to_le32(PRD_EOP);
+               desc->ctlreserved = cpu_to_le16(PRD_EOP);
                desc_addr += sizeof(struct cs5535audio_dma_desc);
                addr += period_bytes;
        }
@@ -157,7 +157,7 @@ static int cs5535audio_build_dma_packets(struct cs5535audio *cs5535au,
        lastdesc = &((struct cs5535audio_dma_desc *) dma->desc_buf.area)[periods];
        lastdesc->addr = cpu_to_le32((u32) dma->desc_buf.addr);
        lastdesc->size = 0;
-       lastdesc->ctlreserved = cpu_to_le32(PRD_JMP);
+       lastdesc->ctlreserved = cpu_to_le16(PRD_JMP);
        jmpprd_addr = cpu_to_le32(lastdesc->addr +
                                  (sizeof(struct cs5535audio_dma_desc)*periods));
 
index b05f7be..e3e8531 100644 (file)
@@ -294,7 +294,7 @@ static int hdmi_update_eld(struct hdmi_eld *e,
                snd_printd(KERN_INFO "HDMI: out of range MNL %d\n", mnl);
                goto out_fail;
        } else
-               strlcpy(e->monitor_name, buf + ELD_FIXED_BYTES, mnl);
+               strlcpy(e->monitor_name, buf + ELD_FIXED_BYTES, mnl + 1);
 
        for (i = 0; i < e->sad_count; i++) {
                if (ELD_FIXED_BYTES + mnl + 3 * (i + 1) > size) {
index 694b9da..7bbc5f2 100644 (file)
@@ -3074,6 +3074,7 @@ static const char * const cxt5066_models[CXT5066_MODELS] = {
 };
 
 static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
+       SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT5066_AUTO),
        SND_PCI_QUIRK_MASK(0x1025, 0xff00, 0x0400, "Acer", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO),
        SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD),
@@ -4389,6 +4390,8 @@ static const struct hda_codec_preset snd_hda_preset_conexant[] = {
          .patch = patch_cxt5066 },
        { .id = 0x14f15069, .name = "CX20585",
          .patch = patch_cxt5066 },
+       { .id = 0x14f1506c, .name = "CX20588",
+         .patch = patch_cxt5066 },
        { .id = 0x14f1506e, .name = "CX20590",
          .patch = patch_cxt5066 },
        { .id = 0x14f15097, .name = "CX20631",
@@ -4417,6 +4420,7 @@ MODULE_ALIAS("snd-hda-codec-id:14f15066");
 MODULE_ALIAS("snd-hda-codec-id:14f15067");
 MODULE_ALIAS("snd-hda-codec-id:14f15068");
 MODULE_ALIAS("snd-hda-codec-id:14f15069");
+MODULE_ALIAS("snd-hda-codec-id:14f1506c");
 MODULE_ALIAS("snd-hda-codec-id:14f1506e");
 MODULE_ALIAS("snd-hda-codec-id:14f15097");
 MODULE_ALIAS("snd-hda-codec-id:14f15098");
index 61a774b..d21191d 100644 (file)
@@ -4883,7 +4883,6 @@ static const struct snd_pci_quirk alc880_cfg_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0xe309, "ULI", ALC880_3ST_DIG),
        SND_PCI_QUIRK(0x1025, 0xe310, "ULI", ALC880_3ST),
        SND_PCI_QUIRK(0x1039, 0x1234, NULL, ALC880_6ST_DIG),
-       SND_PCI_QUIRK(0x103c, 0x2a09, "HP", ALC880_5ST),
        SND_PCI_QUIRK(0x1043, 0x10b3, "ASUS W1V", ALC880_ASUS_W1V),
        SND_PCI_QUIRK(0x1043, 0x10c2, "ASUS W6A", ALC880_ASUS_DIG),
        SND_PCI_QUIRK(0x1043, 0x10c3, "ASUS Wxx", ALC880_ASUS_DIG),
@@ -12600,6 +12599,7 @@ static const struct hda_verb alc262_toshiba_rx1_unsol_verbs[] = {
  */
 enum {
        PINFIX_FSC_H270,
+       PINFIX_HP_Z200,
 };
 
 static const struct alc_fixup alc262_fixups[] = {
@@ -12612,9 +12612,17 @@ static const struct alc_fixup alc262_fixups[] = {
                        { }
                }
        },
+       [PINFIX_HP_Z200] = {
+               .type = ALC_FIXUP_PINS,
+               .v.pins = (const struct alc_pincfg[]) {
+                       { 0x16, 0x99130120 }, /* internal speaker */
+                       { }
+               }
+       },
 };
 
 static const struct snd_pci_quirk alc262_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x103c, 0x170b, "HP Z200", PINFIX_HP_Z200),
        SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", PINFIX_FSC_H270),
        {}
 };
@@ -12731,6 +12739,8 @@ static const struct snd_pci_quirk alc262_cfg_tbl[] = {
                           ALC262_HP_BPC),
        SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1500, "HP z series",
                           ALC262_HP_BPC),
+       SND_PCI_QUIRK(0x103c, 0x170b, "HP Z200",
+                          ALC262_AUTO),
        SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1700, "HP xw series",
                           ALC262_HP_BPC),
        SND_PCI_QUIRK(0x103c, 0x2800, "HP D7000", ALC262_HP_BPC_D7000_WL),
@@ -13872,7 +13882,6 @@ static const struct snd_pci_quirk alc268_cfg_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1205, "ASUS W7J", ALC268_3ST),
        SND_PCI_QUIRK(0x1170, 0x0040, "ZEPTO", ALC268_ZEPTO),
        SND_PCI_QUIRK(0x14c0, 0x0025, "COMPAL IFL90/JFL-92", ALC268_TOSHIBA),
-       SND_PCI_QUIRK(0x152d, 0x0763, "Diverse (CPR2000)", ALC268_ACER),
        SND_PCI_QUIRK(0x152d, 0x0771, "Quanta IL1", ALC267_QUANTA_IL1),
        {}
 };
index c952582..f43bb0e 100644 (file)
@@ -745,12 +745,23 @@ static int via_independent_hp_put(struct snd_kcontrol *kcontrol,
        struct via_spec *spec = codec->spec;
        hda_nid_t nid = kcontrol->private_value;
        unsigned int pinsel = ucontrol->value.enumerated.item[0];
+       unsigned int parm0, parm1;
        /* Get Independent Mode index of headphone pin widget */
        spec->hp_independent_mode = spec->hp_independent_mode_index == pinsel
                ? 1 : 0;
-       if (spec->codec_type == VT1718S)
+       if (spec->codec_type == VT1718S) {
                snd_hda_codec_write(codec, nid, 0,
                                    AC_VERB_SET_CONNECT_SEL, pinsel ? 2 : 0);
+               /* Set correct mute switch for MW3 */
+               parm0 = spec->hp_independent_mode ?
+                              AMP_IN_UNMUTE(0) : AMP_IN_MUTE(0);
+               parm1 = spec->hp_independent_mode ?
+                              AMP_IN_MUTE(1) : AMP_IN_UNMUTE(1);
+               snd_hda_codec_write(codec, 0x1b, 0,
+                                   AC_VERB_SET_AMP_GAIN_MUTE, parm0);
+               snd_hda_codec_write(codec, 0x1b, 0,
+                                   AC_VERB_SET_AMP_GAIN_MUTE, parm1);
+       }
        else
                snd_hda_codec_write(codec, nid, 0,
                                    AC_VERB_SET_CONNECT_SEL, pinsel);
@@ -4283,9 +4294,6 @@ static const struct hda_verb vt1718S_volume_init_verbs[] = {
        {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
        {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
        {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(5)},
-
-       /* Setup default input of Front HP to MW9 */
-       {0x28, AC_VERB_SET_CONNECT_SEL, 0x1},
        /* PW9 PW10 Output enable */
        {0x2d, AC_VERB_SET_PIN_WIDGET_CONTROL, AC_PINCTL_OUT_EN},
        {0x2e, AC_VERB_SET_PIN_WIDGET_CONTROL, AC_PINCTL_OUT_EN},
@@ -4294,10 +4302,10 @@ static const struct hda_verb vt1718S_volume_init_verbs[] = {
        /* Enable Boost Volume backdoor */
        {0x1, 0xf88, 0x8},
        /* MW0/1/2/3/4: un-mute index 0 (AOWx), mute index 1 (MW9) */
-       {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
+       {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
        {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
        {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
-       {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
+       {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
        {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
        {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
        {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
@@ -4307,8 +4315,6 @@ static const struct hda_verb vt1718S_volume_init_verbs[] = {
        /* set MUX1 = 2 (AOW4), MUX2 = 1 (AOW3) */
        {0x34, AC_VERB_SET_CONNECT_SEL, 0x2},
        {0x35, AC_VERB_SET_CONNECT_SEL, 0x1},
-       /* Unmute MW4's index 0 */
-       {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
        { }
 };
 
@@ -4456,6 +4462,19 @@ static int vt1718S_auto_create_multi_out_ctls(struct via_spec *spec,
                        if (err < 0)
                                return err;
                } else if (i == AUTO_SEQ_FRONT) {
+                       /* add control to mixer index 0 */
+                       err = via_add_control(spec, VIA_CTL_WIDGET_VOL,
+                                             "Master Front Playback Volume",
+                                             HDA_COMPOSE_AMP_VAL(0x21, 3, 5,
+                                                                 HDA_INPUT));
+                       if (err < 0)
+                               return err;
+                       err = via_add_control(spec, VIA_CTL_WIDGET_MUTE,
+                                             "Master Front Playback Switch",
+                                             HDA_COMPOSE_AMP_VAL(0x21, 3, 5,
+                                                                 HDA_INPUT));
+                       if (err < 0)
+                               return err;
                        /* Front */
                        sprintf(name, "%s Playback Volume", chname[i]);
                        err = via_add_control(
index 3f08afc..c8e402f 100644 (file)
@@ -896,11 +896,11 @@ struct hdspm {
        unsigned char max_channels_in;
        unsigned char max_channels_out;
 
-       char *channel_map_in;
-       char *channel_map_out;
+       signed char *channel_map_in;
+       signed char *channel_map_out;
 
-       char *channel_map_in_ss, *channel_map_in_ds, *channel_map_in_qs;
-       char *channel_map_out_ss, *channel_map_out_ds, *channel_map_out_qs;
+       signed char *channel_map_in_ss, *channel_map_in_ds, *channel_map_in_qs;
+       signed char *channel_map_out_ss, *channel_map_out_ds, *channel_map_out_qs;
 
        char **port_names_in;
        char **port_names_out;
index 3c2ee1b..6af23d0 100644 (file)
@@ -13,7 +13,6 @@
 
 #include <linux/module.h>
 #include <linux/moduleparam.h>
-#include <linux/version.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/delay.h>
index d8f130d..bb699bb 100644 (file)
@@ -11,9 +11,6 @@ menuconfig SND_IMX_SOC
 
 if SND_IMX_SOC
 
-config SND_MXC_SOC_SSI
-       tristate
-
 config SND_MXC_SOC_FIQ
        tristate
 
@@ -24,7 +21,6 @@ config SND_MXC_SOC_WM1133_EV1
        tristate "Audio on the the i.MX31ADS with WM1133-EV1 fitted"
        depends on MACH_MX31ADS_WM1133_EV1 && EXPERIMENTAL
        select SND_SOC_WM8350
-       select SND_MXC_SOC_SSI
        select SND_MXC_SOC_FIQ
        help
          Enable support for audio on the i.MX31ADS with the WM1133-EV1
@@ -34,7 +30,6 @@ config SND_SOC_MX27VIS_AIC32X4
        tristate "SoC audio support for Visstrim M10 boards"
        depends on MACH_IMX27_VISSTRIM_M10
        select SND_SOC_TVL320AIC32X4
-       select SND_MXC_SOC_SSI
        select SND_MXC_SOC_MX2
        help
          Say Y if you want to add support for SoC audio on Visstrim SM10
@@ -44,7 +39,6 @@ config SND_SOC_PHYCORE_AC97
        tristate "SoC Audio support for Phytec phyCORE (and phyCARD) boards"
        depends on MACH_PCM043 || MACH_PCA100
        select SND_SOC_WM9712
-       select SND_MXC_SOC_SSI
        select SND_MXC_SOC_FIQ
        help
          Say Y if you want to add support for SoC audio on Phytec phyCORE
@@ -57,7 +51,6 @@ config SND_SOC_EUKREA_TLV320
                || MACH_EUKREA_MBIMXSD35_BASEBOARD \
                || MACH_EUKREA_MBIMXSD51_BASEBOARD
        select SND_SOC_TLV320AIC23
-       select SND_MXC_SOC_SSI
        select SND_MXC_SOC_FIQ
        help
          Enable I2S based access to the TLV320AIC23B codec attached
index b2ed764..43fdc24 100644 (file)
@@ -337,3 +337,5 @@ static void __exit snd_imx_pcm_exit(void)
        platform_driver_unregister(&imx_pcm_driver);
 }
 module_exit(snd_imx_pcm_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:imx-pcm-audio");
index 5b13fec..61fceb0 100644 (file)
@@ -774,4 +774,4 @@ module_exit(imx_ssi_exit);
 MODULE_AUTHOR("Sascha Hauer, <s.hauer@pengutronix.de>");
 MODULE_DESCRIPTION("i.MX I2S/ac97 SoC Interface");
 MODULE_LICENSE("GPL");
-
+MODULE_ALIAS("platform:imx-ssi");
index 2ce0b2d..fab20a5 100644 (file)
@@ -95,14 +95,14 @@ static int pxa2xx_soc_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
        if (!card->dev->coherent_dma_mask)
                card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
 
-       if (dai->driver->playback.channels_min) {
+       if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                ret = pxa2xx_pcm_preallocate_dma_buffer(pcm,
                        SNDRV_PCM_STREAM_PLAYBACK);
                if (ret)
                        goto out;
        }
 
-       if (dai->driver->capture.channels_min) {
+       if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
                ret = pxa2xx_pcm_preallocate_dma_buffer(pcm,
                        SNDRV_PCM_STREAM_CAPTURE);
                if (ret)
index c005ceb..039b953 100644 (file)
@@ -409,9 +409,6 @@ int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec,
        codec->bulk_write_raw = snd_soc_hw_bulk_write_raw;
 
        switch (control) {
-       case SND_SOC_CUSTOM:
-               break;
-
        case SND_SOC_I2C:
 #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
                codec->hw_write = (hw_write_t)i2c_master_send;
index 337a002..4dd051b 100644 (file)
@@ -1124,6 +1124,6 @@ static void __exit at73c213_exit(void)
 }
 module_exit(at73c213_exit);
 
-MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>");
+MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
 MODULE_DESCRIPTION("Sound driver for AT73C213 with Atmel SSC");
 MODULE_LICENSE("GPL");