Merge branch 'x86-debug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 22 Jul 2012 19:04:44 +0000 (12:04 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 22 Jul 2012 19:04:44 +0000 (12:04 -0700)
Pull debug-for-linus git tree from Ingo Molnar.

Fix up trivial conflict in arch/x86/kernel/cpu/perf_event_intel.c due to
a printk() having changed to a pr_info() differently in the two branches.

* 'x86-debug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86: Move call to print_modules() out of show_regs()
  x86/mm: Mark free_initrd_mem() as __init
  x86/microcode: Mark microcode_id[] as __initconst
  x86/nmi: Clean up register_nmi_handler() usage
  x86: Save cr2 in NMI in case NMIs take a page fault (for i386)
  x86: Remove cmpxchg from i386 NMI nesting code
  x86: Save cr2 in NMI in case NMIs take a page fault
  x86/debug: Add KERN_<LEVEL> to bare printks, convert printks to pr_<level>

1162 files changed:
.mailmap
Documentation/ABI/testing/sysfs-block-rssd
Documentation/ABI/testing/sysfs-bus-iio
Documentation/ABI/testing/sysfs-class-mtd
Documentation/DocBook/media/v4l/controls.xml
Documentation/DocBook/media/v4l/pixfmt.xml
Documentation/DocBook/media/v4l/v4l2.xml
Documentation/DocBook/media/v4l/vidioc-create-bufs.xml
Documentation/DocBook/media/v4l/vidioc-dqevent.xml
Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
Documentation/RCU/checklist.txt
Documentation/RCU/rcubarrier.txt
Documentation/RCU/torture.txt
Documentation/RCU/whatisRCU.txt
Documentation/arm/SPEAr/overview.txt
Documentation/device-mapper/verity.txt
Documentation/devicetree/bindings/input/fsl-mma8450.txt
Documentation/devicetree/bindings/mfd/mc13xxx.txt
Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
Documentation/devicetree/bindings/net/fsl-fec.txt
Documentation/devicetree/bindings/pinctrl/fsl,imx6q-pinctrl.txt
Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/hwmon/coretemp
Documentation/kdump/kdump.txt
Documentation/kernel-parameters.txt
Documentation/prctl/no_new_privs.txt [new file with mode: 0644]
Documentation/stable_kernel_rules.txt
Documentation/virtual/kvm/api.txt
MAINTAINERS
Makefile
arch/arm/Kconfig
arch/arm/boot/dts/mmp2-brownstone.dts
arch/arm/boot/dts/omap2.dtsi
arch/arm/boot/dts/spear1310-evb.dts
arch/arm/boot/dts/spear1310.dtsi
arch/arm/boot/dts/spear1340-evb.dts
arch/arm/boot/dts/spear1340.dtsi
arch/arm/boot/dts/spear13xx.dtsi
arch/arm/boot/dts/spear300-evb.dts
arch/arm/boot/dts/spear300.dtsi
arch/arm/boot/dts/spear310-evb.dts
arch/arm/boot/dts/spear310.dtsi
arch/arm/boot/dts/spear320-evb.dts
arch/arm/boot/dts/spear320.dtsi
arch/arm/boot/dts/spear3xx.dtsi
arch/arm/boot/dts/spear600.dtsi
arch/arm/configs/omap2plus_defconfig
arch/arm/include/asm/atomic.h
arch/arm/include/asm/domain.h
arch/arm/include/asm/futex.h
arch/arm/include/asm/hardware/sp810.h
arch/arm/include/asm/thread_info.h
arch/arm/kernel/entry-armv.S
arch/arm/kernel/kprobes-test-arm.c
arch/arm/kernel/kprobes-thumb.c
arch/arm/kernel/perf_event.c
arch/arm/kernel/ptrace.c
arch/arm/kernel/signal.c
arch/arm/kernel/signal.h
arch/arm/kernel/traps.c
arch/arm/kernel/vmlinux.lds.S
arch/arm/mach-dove/include/mach/bridge-regs.h
arch/arm/mach-dove/include/mach/dove.h
arch/arm/mach-exynos/Kconfig
arch/arm/mach-exynos/pm_domains.c
arch/arm/mach-highbank/Makefile
arch/arm/mach-highbank/core.h
arch/arm/mach-highbank/highbank.c
arch/arm/mach-highbank/smc.S [new file with mode: 0644]
arch/arm/mach-imx/Kconfig
arch/arm/mach-imx/clk-imx1.c
arch/arm/mach-imx/clk-imx21.c
arch/arm/mach-imx/clk-imx25.c
arch/arm/mach-imx/clk-imx27.c
arch/arm/mach-imx/clk-imx31.c
arch/arm/mach-imx/clk-imx35.c
arch/arm/mach-imx/clk-imx51-imx53.c
arch/arm/mach-imx/clk-imx6q.c
arch/arm/mach-imx/clk-pllv2.c
arch/arm/mach-imx/crm-regs-imx5.h
arch/arm/mach-imx/hotplug.c
arch/arm/mach-imx/mach-cpuimx35.c
arch/arm/mach-imx/mach-cpuimx51sd.c
arch/arm/mach-imx/mach-imx27_visstrim_m10.c
arch/arm/mach-imx/mach-mx21ads.c
arch/arm/mach-imx/mm-imx3.c
arch/arm/mach-imx/mm-imx5.c
arch/arm/mach-kirkwood/board-iconnect.c
arch/arm/mach-kirkwood/common.c
arch/arm/mach-kirkwood/include/mach/bridge-regs.h
arch/arm/mach-kirkwood/include/mach/kirkwood.h
arch/arm/mach-mmp/include/mach/gpio-pxa.h [deleted file]
arch/arm/mach-mmp/irq.c
arch/arm/mach-mv78xx0/include/mach/bridge-regs.h
arch/arm/mach-mv78xx0/include/mach/mv78xx0.h
arch/arm/mach-mxs/mach-apx4devkit.c
arch/arm/mach-omap2/board-flash.c
arch/arm/mach-omap2/board-n8x0.c
arch/arm/mach-omap2/board-omap3beagle.c
arch/arm/mach-omap2/board-overo.c
arch/arm/mach-omap2/board-rx51-peripherals.c
arch/arm/mach-omap2/clock3xxx_data.c
arch/arm/mach-omap2/clock44xx_data.c
arch/arm/mach-omap2/clockdomain.h
arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c
arch/arm/mach-omap2/clockdomains44xx_data.c
arch/arm/mach-omap2/cm.h
arch/arm/mach-omap2/cminst44xx.c
arch/arm/mach-omap2/dsp.c
arch/arm/mach-omap2/id.c
arch/arm/mach-omap2/irq.c
arch/arm/mach-omap2/mux.c
arch/arm/mach-omap2/mux.h
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
arch/arm/mach-omap2/omap_l3_smx.c
arch/arm/mach-omap2/omap_phy_internal.c
arch/arm/mach-omap2/pm34xx.c
arch/arm/mach-omap2/prm2xxx_3xxx.c
arch/arm/mach-omap2/serial.c
arch/arm/mach-omap2/twl-common.c
arch/arm/mach-omap2/usb-musb.c
arch/arm/mach-omap2/usb-tusb6010.c
arch/arm/mach-orion5x/include/mach/bridge-regs.h
arch/arm/mach-orion5x/include/mach/io.h [new file with mode: 0644]
arch/arm/mach-orion5x/include/mach/orion5x.h
arch/arm/mach-pxa/hx4700.c
arch/arm/mach-s3c24xx/clock-s3c2440.c
arch/arm/mach-shmobile/board-armadillo800eva.c
arch/arm/mach-shmobile/board-kzm9d.c
arch/arm/mach-shmobile/board-kzm9g.c
arch/arm/mach-shmobile/board-mackerel.c
arch/arm/mach-shmobile/clock-sh73a0.c
arch/arm/mach-shmobile/intc-r8a7779.c
arch/arm/mach-shmobile/platsmp.c
arch/arm/mach-shmobile/setup-sh7372.c
arch/arm/mach-spear13xx/include/mach/debug-macro.S
arch/arm/mach-spear13xx/include/mach/dma.h
arch/arm/mach-spear13xx/include/mach/generic.h
arch/arm/mach-spear13xx/include/mach/gpio.h
arch/arm/mach-spear13xx/include/mach/irqs.h
arch/arm/mach-spear13xx/include/mach/spear.h
arch/arm/mach-spear13xx/include/mach/timex.h
arch/arm/mach-spear13xx/include/mach/uncompress.h
arch/arm/mach-spear13xx/spear1310.c
arch/arm/mach-spear13xx/spear1340.c
arch/arm/mach-spear13xx/spear13xx.c
arch/arm/mach-spear3xx/include/mach/debug-macro.S
arch/arm/mach-spear3xx/include/mach/generic.h
arch/arm/mach-spear3xx/include/mach/gpio.h
arch/arm/mach-spear3xx/include/mach/irqs.h
arch/arm/mach-spear3xx/include/mach/misc_regs.h
arch/arm/mach-spear3xx/include/mach/spear.h
arch/arm/mach-spear3xx/include/mach/timex.h
arch/arm/mach-spear3xx/include/mach/uncompress.h
arch/arm/mach-spear3xx/spear300.c
arch/arm/mach-spear3xx/spear310.c
arch/arm/mach-spear3xx/spear320.c
arch/arm/mach-spear3xx/spear3xx.c
arch/arm/mach-spear6xx/include/mach/gpio.h
arch/arm/mach-spear6xx/include/mach/misc_regs.h
arch/arm/mach-spear6xx/spear6xx.c
arch/arm/mach-tegra/reset.c
arch/arm/mach-ux500/board-mop500.c
arch/arm/mach-ux500/timer.c
arch/arm/mach-versatile/core.c
arch/arm/mach-versatile/include/mach/hardware.h
arch/arm/mach-versatile/include/mach/io.h [new file with mode: 0644]
arch/arm/mach-versatile/pci.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/mm.h
arch/arm/mm/mmu.c
arch/arm/net/bpf_jit_32.c
arch/arm/net/bpf_jit_32.h
arch/arm/plat-mxc/epit.c
arch/arm/plat-mxc/include/mach/common.h
arch/arm/plat-mxc/include/mach/mx2_cam.h
arch/arm/plat-mxc/time.c
arch/arm/plat-omap/clock.c
arch/arm/plat-omap/include/plat/cpu.h
arch/arm/plat-omap/include/plat/mmc.h
arch/arm/plat-orion/common.c
arch/arm/plat-pxa/ssp.c
arch/arm/plat-samsung/adc.c
arch/arm/plat-samsung/devs.c
arch/arm/plat-samsung/include/plat/map-s3c.h
arch/arm/plat-samsung/include/plat/watchdog-reset.h
arch/arm/plat-samsung/s5p-clock.c
arch/arm/plat-spear/include/plat/debug-macro.S
arch/arm/plat-spear/include/plat/pl080.h
arch/arm/plat-spear/include/plat/shirq.h
arch/arm/plat-spear/include/plat/timex.h
arch/arm/plat-spear/include/plat/uncompress.h
arch/arm/plat-spear/pl080.c
arch/arm/plat-spear/restart.c
arch/arm/plat-spear/shirq.c
arch/h8300/include/asm/pgtable.h
arch/h8300/include/asm/uaccess.h
arch/h8300/kernel/setup.c
arch/h8300/kernel/signal.c
arch/h8300/kernel/time.c
arch/h8300/mm/init.c
arch/hexagon/kernel/smp.c
arch/ia64/kernel/smpboot.c
arch/m32r/boot/compressed/Makefile
arch/m32r/boot/compressed/misc.c
arch/m32r/include/asm/ptrace.h
arch/m32r/include/asm/smp.h
arch/m32r/kernel/ptrace.c
arch/m32r/kernel/signal.c
arch/m68k/platform/coldfire/clk.c
arch/mips/Kconfig
arch/mips/bcm47xx/Kconfig
arch/mips/bcm63xx/dev-pcmcia.c
arch/mips/cavium-octeon/Kconfig
arch/mips/cavium-octeon/smp.c
arch/mips/include/asm/bitops.h
arch/mips/include/asm/cmpxchg.h
arch/mips/include/asm/cpu.h
arch/mips/include/asm/gic.h
arch/mips/include/asm/inst.h
arch/mips/include/asm/io.h
arch/mips/include/asm/irq.h
arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
arch/mips/include/asm/mips-boards/maltaint.h
arch/mips/include/asm/mipsmtregs.h
arch/mips/include/asm/switch_to.h
arch/mips/include/asm/thread_info.h
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/mips_ksyms.c
arch/mips/kernel/octeon_switch.S
arch/mips/kernel/perf_event_mipsxx.c
arch/mips/kernel/r2300_switch.S
arch/mips/kernel/r4k_switch.S
arch/mips/kernel/smp-bmips.c
arch/mips/kernel/smp.c
arch/mips/kernel/smtc.c
arch/mips/kernel/sync-r4k.c
arch/mips/kernel/traps.c
arch/mips/kernel/vmlinux.lds.S
arch/mips/mm/Makefile
arch/mips/mm/c-r4k.c
arch/mips/mm/page-funcs.S [new file with mode: 0644]
arch/mips/mm/page.c
arch/mips/mm/tlbex.c
arch/mips/mti-malta/malta-pci.c
arch/mips/mti-malta/malta-setup.c
arch/mips/netlogic/xlp/setup.c
arch/mips/oprofile/common.c
arch/mips/oprofile/op_model_mipsxx.c
arch/mips/pci/fixup-fuloong2e.c
arch/mips/pci/fixup-lemote2f.c
arch/mips/pci/fixup-malta.c
arch/mips/pci/fixup-mpc30x.c
arch/mips/pci/fixup-sb1250.c
arch/mips/pci/ops-tx4927.c
arch/mips/pci/pci-ip27.c
arch/mips/pci/pci-lantiq.c
arch/mips/pci/pci-xlr.c
arch/mips/pmc-sierra/yosemite/smp.c
arch/mips/powertv/asic/asic-calliope.c
arch/mips/powertv/asic/asic-cronus.c
arch/mips/powertv/asic/asic-gaia.c
arch/mips/powertv/asic/asic-zeus.c
arch/mips/txx9/generic/pci.c
arch/mn10300/include/asm/ptrace.h
arch/mn10300/include/asm/thread_info.h
arch/mn10300/include/asm/timex.h
arch/mn10300/kernel/cevt-mn10300.c
arch/mn10300/kernel/internal.h
arch/mn10300/kernel/irq.c
arch/mn10300/kernel/signal.c
arch/mn10300/kernel/smp.c
arch/mn10300/kernel/traps.c
arch/mn10300/mm/dma-alloc.c
arch/mn10300/unit-asb2303/include/unit/timex.h
arch/mn10300/unit-asb2303/smc91111.c
arch/mn10300/unit-asb2305/include/unit/timex.h
arch/mn10300/unit-asb2305/unit-init.c
arch/mn10300/unit-asb2364/include/unit/timex.h
arch/parisc/kernel/smp.c
arch/powerpc/include/asm/hw_irq.h
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/smp.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_pr_papr.c
arch/powerpc/mm/numa.c
arch/powerpc/net/bpf_jit_64.S
arch/powerpc/platforms/cell/pervasive.c
arch/powerpc/platforms/pseries/iommu.c
arch/powerpc/platforms/pseries/nvram.c
arch/powerpc/platforms/pseries/processor_idle.c
arch/powerpc/xmon/xmon.c
arch/s390/kernel/smp.c
arch/sh/boards/mach-kfr2r09/setup.c
arch/sh/drivers/pci/pcie-sh7786.c
arch/sh/include/asm/io_noioport.h
arch/sh/kernel/cpu/sh3/serial-sh7720.c
arch/sh/kernel/cpu/sh4a/clock-sh7343.c
arch/sh/kernel/cpu/sh4a/clock-sh7366.c
arch/sh/kernel/cpu/sh4a/clock-sh7722.c
arch/sh/kernel/cpu/sh4a/clock-sh7723.c
arch/sh/kernel/cpu/sh4a/clock-sh7724.c
arch/sh/kernel/cpu/sh4a/clock-sh7734.c
arch/sh/kernel/cpu/sh4a/clock-sh7757.c
arch/sh/kernel/cpu/sh4a/clock-sh7785.c
arch/sh/kernel/cpu/sh4a/clock-sh7786.c
arch/sh/kernel/cpu/sh4a/clock-shx3.c
arch/sparc/kernel/smp_64.c
arch/sparc/kernel/vio.c
arch/tile/kernel/backtrace.c
arch/tile/kernel/smpboot.c
arch/um/drivers/mconsole_kern.c
arch/x86/ia32/ia32_signal.c
arch/x86/include/asm/alternative.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/msr.h
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt_types.h
arch/x86/include/asm/perf_event.h
arch/x86/include/asm/pgtable-3level.h
arch/x86/include/asm/smp.h
arch/x86/include/asm/uaccess_64.h
arch/x86/include/asm/uprobes.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/alternative.c
arch/x86/kernel/cpu/Makefile
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/mkcapflags.pl
arch/x86/kernel/cpu/mtrr/cleanup.c
arch/x86/kernel/cpu/mtrr/generic.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/cpu/perf_event_amd.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/perf_event_intel_uncore.c [new file with mode: 0644]
arch/x86/kernel/cpu/perf_event_intel_uncore.h [new file with mode: 0644]
arch/x86/kernel/cpu/perf_event_p4.c
arch/x86/kernel/cpu/perf_event_p6.c
arch/x86/kernel/cpu/scattered.c
arch/x86/kernel/kgdb.c
arch/x86/kernel/microcode_core.c
arch/x86/kernel/paravirt.c
arch/x86/kernel/process_64.c
arch/x86/kernel/reboot.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/uprobes.c
arch/x86/kernel/vsyscall_64.c
arch/x86/kernel/x8664_ksyms_64.c
arch/x86/kvm/mmu.c
arch/x86/kvm/pmu.c
arch/x86/kvm/trace.h
arch/x86/lib/csum-wrappers_64.c
arch/x86/lib/msr-reg-export.c
arch/x86/lib/msr-reg.S
arch/x86/oprofile/op_model_amd.c
arch/x86/platform/mrst/early_printk_mrst.c
arch/x86/vdso/vdso32-setup.c
arch/x86/xen/enlighten.c
arch/x86/xen/smp.c
arch/xtensa/Makefile
arch/xtensa/kernel/process.c
arch/xtensa/kernel/vmlinux.lds.S
arch/xtensa/mm/init.c
block/blk-cgroup.c
block/blk-core.c
block/blk-timeout.c
block/cfq-iosched.c
block/scsi_ioctl.c
drivers/acpi/acpi_pad.c
drivers/acpi/acpica/hwsleep.c
drivers/acpi/acpica/nspredef.c
drivers/acpi/apei/apei-base.c
drivers/acpi/apei/apei-internal.h
drivers/acpi/apei/ghes.c
drivers/acpi/processor_core.c
drivers/acpi/processor_idle.c
drivers/acpi/sysfs.c
drivers/acpi/video.c
drivers/ata/pata_arasan_cf.c
drivers/base/dd.c
drivers/base/power/main.c
drivers/block/drbd/drbd_bitmap.c
drivers/block/drbd/drbd_req.c
drivers/block/floppy.c
drivers/block/loop.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/mtip32xx/mtip32xx.h
drivers/block/rbd.c
drivers/block/umem.c
drivers/block/xen-blkback/common.h
drivers/block/xen-blkfront.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/btmrvl_drv.h
drivers/bluetooth/btmrvl_main.c
drivers/bluetooth/btmrvl_sdio.c
drivers/bluetooth/btusb.c
drivers/char/hw_random/atmel-rng.c
drivers/clk/clk.c
drivers/clk/mxs/clk-imx23.c
drivers/clk/mxs/clk-imx28.c
drivers/clk/spear/clk-aux-synth.c
drivers/clk/spear/clk-frac-synth.c
drivers/clk/spear/clk-gpt-synth.c
drivers/clk/spear/clk-vco-pll.c
drivers/clk/spear/clk.c
drivers/clk/spear/clk.h
drivers/clk/spear/spear1310_clock.c
drivers/clk/spear/spear1340_clock.c
drivers/clk/spear/spear3xx_clock.c
drivers/clk/spear/spear6xx_clock.c
drivers/dma/dw_dmac.c
drivers/dma/imx-sdma.c
drivers/dma/pl330.c
drivers/edac/edac_mc.c
drivers/edac/i7core_edac.c
drivers/edac/mpc85xx_edac.c
drivers/edac/sb_edac.c
drivers/extcon/extcon-max8997.c
drivers/extcon/extcon_class.c
drivers/extcon/extcon_gpio.c
drivers/gpio/Kconfig
drivers/gpio/devres.c
drivers/gpio/gpio-mxc.c
drivers/gpio/gpio-omap.c
drivers/gpio/gpio-sta2x11.c
drivers/gpio/gpio-tps65910.c
drivers/gpio/gpio-wm8994.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/gma500/cdv_device.c
drivers/gpu/drm/gma500/opregion.c
drivers/gpu/drm/gma500/opregion.h
drivers/gpu/drm/gma500/psb_device.c
drivers/gpu/drm/gma500/psb_drv.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_prime.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_hdmi.c
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_audio.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/r600_hdmi.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_prime.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/radeon/rv770d.h
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/si_reg.h
drivers/gpu/drm/sis/sis_drv.c
drivers/gpu/drm/udl/udl_drv.c
drivers/gpu/drm/udl/udl_main.c
drivers/gpu/drm/via/via_map.c
drivers/hid/Kconfig
drivers/hid/hid-apple.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-logitech-dj.c
drivers/hid/hid-magicmouse.c
drivers/hid/hid-multitouch.c
drivers/hid/usbhid/Kconfig
drivers/hid/usbhid/hid-quirks.c
drivers/hwmon/applesmc.c
drivers/hwmon/coretemp.c
drivers/hwmon/emc2103.c
drivers/hwmon/it87.c
drivers/hwmon/jc42.c
drivers/hwmon/lineage-pem.c
drivers/hwmon/ltc4261.c
drivers/hwmon/max16065.c
drivers/hwspinlock/hwspinlock_core.c
drivers/iio/Kconfig
drivers/iio/industrialio-core.c
drivers/infiniband/core/cma.c
drivers/infiniband/hw/ocrdma/ocrdma.h
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
drivers/infiniband/hw/ocrdma/ocrdma_main.c
drivers/infiniband/hw/ocrdma/ocrdma_sli.h
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/input/joystick/as5011.c
drivers/input/joystick/xpad.c
drivers/input/keyboard/mcs_touchkey.c
drivers/input/keyboard/mpr121_touchkey.c
drivers/input/keyboard/qt1070.c
drivers/input/keyboard/tca6416-keypad.c
drivers/input/keyboard/tca8418_keypad.c
drivers/input/keyboard/tnetv107x-keypad.c
drivers/input/misc/ad714x.c
drivers/input/misc/dm355evm_keys.c
drivers/input/mouse/bcm5974.c
drivers/input/tablet/wacom_sys.c
drivers/input/touchscreen/ad7879.c
drivers/input/touchscreen/atmel_mxt_ts.c
drivers/input/touchscreen/bu21013_ts.c
drivers/input/touchscreen/cy8ctmg110_ts.c
drivers/input/touchscreen/intel-mid-touch.c
drivers/input/touchscreen/pixcir_i2c_ts.c
drivers/input/touchscreen/tnetv107x-ts.c
drivers/input/touchscreen/tsc2005.c
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_types.h
drivers/iommu/dmar.c
drivers/iommu/tegra-smmu.c
drivers/isdn/mISDN/stack.c
drivers/leds/ledtrig-heartbeat.c
drivers/md/dm-raid1.c
drivers/md/dm-region-hash.c
drivers/md/dm-thin.c
drivers/md/md.c
drivers/md/multipath.c
drivers/md/persistent-data/dm-space-map-checker.c
drivers/md/persistent-data/dm-space-map-disk.c
drivers/md/persistent-data/dm-transaction-manager.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/common/saa7146_fops.c
drivers/media/dvb/dvb-core/dvbdev.c
drivers/media/dvb/frontends/cx24110.c
drivers/media/dvb/frontends/cxd2820r_c.c
drivers/media/dvb/frontends/lg2160.c
drivers/media/dvb/siano/smsusb.c
drivers/media/radio/radio-maxiradio.c
drivers/media/radio/radio-sf16fmr2.c
drivers/media/radio/si470x/radio-si470x-usb.c
drivers/media/rc/winbond-cir.c
drivers/media/video/bt8xx/bttv-cards.c
drivers/media/video/bt8xx/bttv-driver.c
drivers/media/video/bt8xx/bttv.h
drivers/media/video/bt8xx/bttvp.h
drivers/media/video/bw-qcam.c
drivers/media/video/cx18/cx18-driver.c
drivers/media/video/cx18/cx18-driver.h
drivers/media/video/cx18/cx18-firmware.c
drivers/media/video/cx18/cx18-mailbox.c
drivers/media/video/cx231xx/cx231xx-audio.c
drivers/media/video/cx231xx/cx231xx-vbi.c
drivers/media/video/cx23885/cx23885-cards.c
drivers/media/video/cx23885/cx23885-dvb.c
drivers/media/video/cx23885/cx23885-video.c
drivers/media/video/cx23885/cx23885.h
drivers/media/video/cx25821/cx25821-core.c
drivers/media/video/cx25821/cx25821.h
drivers/media/video/cx25840/cx25840-core.c
drivers/media/video/cx88/cx88-blackbird.c
drivers/media/video/em28xx/em28xx-cards.c
drivers/media/video/em28xx/em28xx-input.c
drivers/media/video/gspca/gspca.c
drivers/media/video/gspca/ov534.c
drivers/media/video/gspca/ov534_9.c
drivers/media/video/gspca/pac7311.c
drivers/media/video/gspca/sn9c20x.c
drivers/media/video/gspca/sonixj.c
drivers/media/video/ivtv/ivtv-driver.c
drivers/media/video/ivtv/ivtv-driver.h
drivers/media/video/mem2mem_testdev.c
drivers/media/video/mx2_camera.c
drivers/media/video/omap3isp/isppreview.c
drivers/media/video/pms.c
drivers/media/video/s5p-fimc/fimc-capture.c
drivers/media/video/s5p-fimc/fimc-core.c
drivers/media/video/s5p-fimc/fimc-lite.c
drivers/media/video/s5p-fimc/fimc-mdevice.c
drivers/media/video/s5p-fimc/fimc-mdevice.h
drivers/media/video/s5p-mfc/regs-mfc.h
drivers/media/video/s5p-mfc/s5p_mfc_dec.c
drivers/media/video/s5p-mfc/s5p_mfc_enc.c
drivers/media/video/s5p-mfc/s5p_mfc_opr.h
drivers/media/video/s5p-mfc/s5p_mfc_shm.h
drivers/media/video/smiapp/Kconfig
drivers/media/video/smiapp/smiapp-core.c
drivers/media/video/tuner-core.c
drivers/media/video/v4l2-dev.c
drivers/media/video/v4l2-ioctl.c
drivers/media/video/vino.c
drivers/media/video/vivi.c
drivers/mfd/Kconfig
drivers/mfd/ab5500-core.h [deleted file]
drivers/mfd/mc13xxx-spi.c
drivers/mfd/omap-usb-host.c
drivers/mfd/palmas.c
drivers/mfd/stmpe-i2c.c
drivers/mfd/stmpe-spi.c
drivers/misc/mei/interrupt.c
drivers/misc/mei/main.c
drivers/misc/mei/wd.c
drivers/misc/sgi-xp/xpc_uv.c
drivers/mmc/card/block.c
drivers/mmc/core/cd-gpio.c
drivers/mmc/core/mmc.c
drivers/mmc/core/sd.c
drivers/mmc/core/sdio.c
drivers/mmc/host/atmel-mci-regs.h
drivers/mmc/host/atmel-mci.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/mmci.c
drivers/mmc/host/mxs-mmc.c
drivers/mmc/host/omap.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/sdhci-s3c.c
drivers/mmc/host/sdhci-spear.c
drivers/mmc/host/sdhci.c
drivers/mtd/mtdoops.c
drivers/mtd/nand/cafe_nand.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/mxc_nand.c
drivers/mtd/nand/nand_base.c
drivers/mtd/nand/nandsim.c
drivers/mtd/ubi/debug.c
drivers/net/bonding/bond_debugfs.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_procfs.c
drivers/net/caif/caif_hsi.c
drivers/net/can/c_can/c_can.c
drivers/net/can/flexcan.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/intel/Kconfig
drivers/net/ethernet/intel/e1000e/82571.c
drivers/net/ethernet/intel/e1000e/defines.h
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igbvf/ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/stmicro/stmmac/ring_mode.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/phy/mdio-mux.c
drivers/net/phy/micrel.c
drivers/net/usb/ipheth.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/usbnet.c
drivers/net/wireless/airo.c
drivers/net/wireless/ath/ath.h
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/key.c
drivers/net/wireless/b43/main.c
drivers/net/wireless/b43legacy/dma.c
drivers/net/wireless/b43legacy/main.c
drivers/net/wireless/iwlegacy/4965-mac.c
drivers/net/wireless/iwlegacy/common.c
drivers/net/wireless/iwlwifi/iwl-debugfs.c
drivers/net/wireless/iwlwifi/iwl-mac80211.c
drivers/net/wireless/mwifiex/11n_rxreorder.c
drivers/net/wireless/mwifiex/11n_rxreorder.h
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/ie.c
drivers/net/wireless/mwifiex/sdio.c
drivers/net/wireless/mwifiex/sta_event.c
drivers/net/wireless/mwifiex/txrx.c
drivers/net/wireless/mwifiex/uap_cmd.c
drivers/net/wireless/mwifiex/usb.c
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/rndis_wlan.c
drivers/net/wireless/rt2x00/rt2x00usb.c
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/net/wireless/ti/wl1251/acx.c
drivers/net/wireless/ti/wl1251/event.c
drivers/net/wireless/ti/wl1251/spi.c
drivers/net/wireless/ti/wlcore/Kconfig
drivers/net/xen-netfront.c
drivers/of/platform.c
drivers/oprofile/oprofile_perf.c
drivers/pci/pci-driver.c
drivers/pci/pci.c
drivers/pci/quirks.c
drivers/pinctrl/pinctrl-imx.c
drivers/pinctrl/pinctrl-imx6q.c
drivers/pinctrl/pinctrl-mxs.c
drivers/pinctrl/pinctrl-nomadik.c
drivers/pinctrl/spear/pinctrl-spear.c
drivers/pinctrl/spear/pinctrl-spear.h
drivers/pinctrl/spear/pinctrl-spear1310.c
drivers/pinctrl/spear/pinctrl-spear1340.c
drivers/pinctrl/spear/pinctrl-spear300.c
drivers/pinctrl/spear/pinctrl-spear310.c
drivers/pinctrl/spear/pinctrl-spear320.c
drivers/pinctrl/spear/pinctrl-spear3xx.c
drivers/pinctrl/spear/pinctrl-spear3xx.h
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/intel_ips.c
drivers/platform/x86/sony-laptop.c
drivers/regulator/ab8500.c
drivers/regulator/core.c
drivers/regulator/db8500-prcmu.c
drivers/regulator/palmas-regulator.c
drivers/regulator/s5m8767.c
drivers/regulator/tps65023-regulator.c
drivers/regulator/tps6524x-regulator.c
drivers/remoteproc/Kconfig
drivers/remoteproc/omap_remoteproc.c
drivers/remoteproc/remoteproc_core.c
drivers/rpmsg/virtio_rpmsg_bus.c
drivers/rtc/rtc-ab8500.c
drivers/rtc/rtc-mxc.c
drivers/rtc/rtc-spear.c
drivers/rtc/rtc-twl.c
drivers/scsi/aic94xx/aic94xx_task.c
drivers/scsi/bnx2i/bnx2i.h
drivers/scsi/bnx2i/bnx2i_hwi.c
drivers/scsi/bnx2i/bnx2i_iscsi.c
drivers/scsi/libsas/sas_ata.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/scsi_wait_scan.c
drivers/scsi/sd.c
drivers/spi/spi-omap2-mcspi.c
drivers/staging/comedi/drivers.c
drivers/staging/gdm72xx/netlink_k.c
drivers/staging/iio/Documentation/device.txt
drivers/staging/iio/adc/Kconfig
drivers/staging/iio/adc/ad7606_core.c
drivers/staging/media/lirc/lirc_serial.c
drivers/staging/omapdrm/omap_fbdev.c
drivers/staging/rtl8712/usb_intf.c
drivers/target/target_core_cdb.c
drivers/target/target_core_pr.c
drivers/target/tcm_fc/tfc_cmd.c
drivers/target/tcm_fc/tfc_sess.c
drivers/tty/hvc/hvc_opal.c
drivers/tty/serial/8250/8250.c
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/serial_txx9.c
drivers/usb/Makefile
drivers/usb/class/cdc-wdm.c
drivers/usb/core/hub.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/fsl_qe_udc.h
drivers/usb/gadget/lpc32xx_udc.c
drivers/usb/host/ehci-omap.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.h
drivers/usb/musb/musb_host.c
drivers/usb/otg/twl6030-usb.c
drivers/usb/phy/Kconfig
drivers/usb/serial/cp210x.c
drivers/usb/serial/metro-usb.c
drivers/usb/serial/option.c
drivers/usb/storage/scsiglue.c
drivers/usb/storage/unusual_devs.h
drivers/vhost/vhost.c
drivers/video/omap2/dss/core.c
drivers/video/omap2/dss/dispc.c
drivers/video/omap2/dss/dsi.c
drivers/video/omap2/dss/dss.c
drivers/video/omap2/dss/hdmi.c
drivers/video/omap2/dss/rfbi.c
drivers/video/omap2/dss/venc.c
drivers/virtio/virtio_balloon.c
drivers/watchdog/hpwdt.c
drivers/watchdog/iTCO_wdt.c
drivers/watchdog/sp805_wdt.c
drivers/watchdog/watchdog_dev.c
fs/btrfs/backref.c
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.h
fs/btrfs/super.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/buffer.c
fs/ceph/addr.c
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/readdir.c
fs/cifs/transport.c
fs/ecryptfs/kthread.c
fs/ecryptfs/miscdev.c
fs/eventpoll.c
fs/exec.c
fs/exofs/ore.c
fs/exofs/ore_raid.c
fs/ext4/ioctl.c
fs/fat/inode.c
fs/fifo.c
fs/hfsplus/ioctl.c
fs/hfsplus/wrapper.c
fs/locks.c
fs/nfs/client.c
fs/nfs/direct.c
fs/nfs/idmap.c
fs/nfs/inode.c
fs/nfs/objlayout/objio_osd.c
fs/nfs/pnfs.c
fs/nfs/super.c
fs/nilfs2/gcinode.c
fs/nilfs2/segment.c
fs/ocfs2/dlmglue.c
fs/ocfs2/extent_map.c
fs/ocfs2/file.c
fs/ocfs2/quota_global.c
fs/open.c
fs/pstore/inode.c
fs/pstore/platform.c
fs/pstore/ram.c
fs/pstore/ram_core.c
fs/ramfs/file-nommu.c
fs/splice.c
fs/ubifs/debug.c
fs/ubifs/find.c
fs/ubifs/sb.c
fs/udf/super.c
fs/xfs/xfs_alloc.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf.h
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_log.c
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_log_priv.h
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_mount.h
fs/xfs/xfs_sync.c
fs/xfs/xfs_trace.h
include/asm-generic/bug.h
include/asm-generic/dma-contiguous.h
include/asm-generic/pgtable.h
include/drm/drm_pciids.h
include/linux/aio.h
include/linux/blkdev.h
include/linux/bootmem.h
include/linux/capability.h
include/linux/ceph/messenger.h
include/linux/compiler-gcc.h
include/linux/device.h
include/linux/eventpoll.h
include/linux/ftrace_event.h
include/linux/gpio.h
include/linux/hrtimer.h
include/linux/init_task.h
include/linux/input.h
include/linux/intel-iommu.h
include/linux/irq.h
include/linux/jump_label.h
include/linux/kernel.h
include/linux/key.h
include/linux/kmsg_dump.h
include/linux/kvm_host.h
include/linux/memblock.h
include/linux/mm_types.h
include/linux/mmc/sdhci-spear.h
include/linux/mmc/sdio.h
include/linux/mmzone.h
include/linux/nfs_fs_sb.h
include/linux/pata_arasan_cf_data.h
include/linux/pci.h
include/linux/pci_ids.h
include/linux/perf_event.h
include/linux/prctl.h
include/linux/pstore_ram.h
include/linux/pxa2xx_ssp.h
include/linux/rcupdate.h
include/linux/rcutiny.h
include/linux/rpmsg.h
include/linux/sched.h
include/linux/skbuff.h
include/linux/smp.h
include/linux/spi/pxa2xx_spi.h
include/linux/splice.h
include/linux/tick.h
include/linux/tracepoint.h
include/linux/vga_switcheroo.h
include/linux/videodev2.h
include/net/bluetooth/hci.h
include/net/ip_vs.h
include/net/mac80211.h
include/net/netfilter/nf_conntrack_ecache.h
include/net/phonet/gprs.h
include/net/sctp/structs.h
include/net/sctp/tsnmap.h
include/scsi/libsas.h
include/scsi/scsi_cmnd.h
include/scsi/scsi_device.h
include/sound/tea575x-tuner.h
include/trace/events/rcu.h
include/trace/ftrace.h
kernel/cgroup.c
kernel/debug/kdb/kdb_main.c
kernel/debug/kdb/kdb_private.h
kernel/events/core.c
kernel/events/uprobes.c
kernel/exit.c
kernel/fork.c
kernel/hrtimer.c
kernel/pid_namespace.c
kernel/power/hibernate.c
kernel/power/user.c
kernel/printk.c
kernel/rcupdate.c
kernel/rcutiny.c
kernel/rcutiny_plugin.h
kernel/rcutorture.c
kernel/rcutree.c
kernel/rcutree.h
kernel/rcutree_plugin.h
kernel/rcutree_trace.c
kernel/relay.c
kernel/sched/core.c
kernel/sched/idle_task.c
kernel/sched/sched.h
kernel/smp.c
kernel/smpboot.h
kernel/sys.c
kernel/time/ntp.c
kernel/time/tick-sched.c
kernel/time/timekeeping.c
kernel/time/timer_list.c
kernel/timer.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_functions_graph.c
kernel/trace/trace_output.c
lib/dma-debug.c
lib/fault-inject.c
lib/list_debug.c
mm/bootmem.c
mm/compaction.c
mm/madvise.c
mm/memblock.c
mm/memcontrol.c
mm/memory.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/nobootmem.c
mm/oom_kill.c
mm/page_alloc.c
mm/page_cgroup.c
mm/pagewalk.c
mm/percpu-vm.c
mm/shmem.c
mm/sparse.c
mm/vmscan.c
net/8021q/vlan.c
net/9p/protocol.c
net/9p/trans_virtio.c
net/ax25/af_ax25.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/bridge_loop_avoidance.h
net/batman-adv/routing.c
net/batman-adv/soft-interface.c
net/batman-adv/translation-table.c
net/bluetooth/hci_event.c
net/bluetooth/hidp/Kconfig
net/bluetooth/l2cap_core.c
net/bluetooth/mgmt.c
net/bluetooth/smp.c
net/bridge/br_if.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/caif/caif_dev.c
net/caif/caif_socket.c
net/can/raw.c
net/ceph/ceph_common.c
net/ceph/messenger.c
net/ceph/mon_client.c
net/ceph/osd_client.c
net/core/dev.c
net/core/net_namespace.c
net/core/netprio_cgroup.c
net/core/skbuff.c
net/ieee802154/dgram.c
net/ipv4/cipso_ipv4.c
net/ipv6/ip6_fib.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/iucv/af_iucv.c
net/l2tp/l2tp_eth.c
net/mac80211/cfg.c
net/mac80211/mlme.c
net/mac80211/rc80211_minstrel_ht.c
net/mac80211/rx.c
net/mac80211/sta_info.h
net/mac802154/tx.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nfnetlink.c
net/netfilter/xt_set.c
net/nfc/llcp/sock.c
net/nfc/nci/ntf.c
net/nfc/rawsock.c
net/phonet/af_phonet.c
net/phonet/datagram.c
net/phonet/pep-gprs.c
net/phonet/pep.c
net/phonet/pn_dev.c
net/phonet/pn_netlink.c
net/phonet/socket.c
net/phonet/sysctl.c
net/rxrpc/ar-peer.c
net/sched/sch_netem.c
net/sched/sch_sfb.c
net/sctp/associola.c
net/sctp/input.c
net/sctp/output.c
net/sctp/protocol.c
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
net/sctp/socket.c
net/sctp/transport.c
net/sctp/tsnmap.c
net/sctp/ulpevent.c
net/sctp/ulpqueue.c
net/wireless/reg.c
net/wireless/util.c
scripts/get_maintainer.pl
scripts/gfp-translate [changed mode: 0644->0755]
security/security.c
security/selinux/hooks.c
security/selinux/include/classmap.h
sound/i2c/other/tea575x-tuner.c
sound/pci/es1968.c
sound/pci/fm801.c
sound/pci/hda/Kconfig
sound/pci/hda/hda_auto_parser.c
sound/pci/hda/hda_auto_parser.h
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_codec.h
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/codecs/tlv320aic3x.h
sound/soc/codecs/wm2200.c
sound/soc/codecs/wm8904.c
sound/soc/codecs/wm8994.c
sound/soc/codecs/wm8996.c
sound/soc/pxa/pxa-ssp.c
sound/soc/tegra/tegra_wm8903.c
sound/usb/6fire/firmware.c
sound/usb/endpoint.c
sound/usb/mixer_maps.c
sound/usb/pcm.c
sound/usb/quirks-table.h
tools/hv/hv_kvp_daemon.c
tools/lib/traceevent/Makefile
tools/lib/traceevent/event-parse.c
tools/lib/traceevent/event-parse.h
tools/lib/traceevent/parse-filter.c
tools/perf/Documentation/perf-bench.txt
tools/perf/Documentation/perf-report.txt
tools/perf/Documentation/perf-top.txt
tools/perf/Makefile
tools/perf/bench/mem-memcpy.c
tools/perf/bench/mem-memset.c
tools/perf/builtin-bench.c
tools/perf/builtin-evlist.c
tools/perf/builtin-kmem.c
tools/perf/builtin-lock.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-sched.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/builtin-test.c
tools/perf/builtin-top.c
tools/perf/config/feature-tests.mak
tools/perf/ui/browsers/annotate.c
tools/perf/ui/browsers/hists.c
tools/perf/ui/gtk/browser.c
tools/perf/ui/gtk/gtk.h
tools/perf/ui/gtk/setup.c
tools/perf/ui/gtk/util.c [new file with mode: 0644]
tools/perf/ui/tui/setup.c
tools/perf/ui/tui/util.c [new file with mode: 0644]
tools/perf/ui/util.c
tools/perf/ui/util.h
tools/perf/util/debug.c
tools/perf/util/debug.h
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/header.c
tools/perf/util/hist.h
tools/perf/util/include/linux/bitops.h
tools/perf/util/include/linux/kernel.h
tools/perf/util/map.c
tools/perf/util/map.h
tools/perf/util/parse-events-test.c
tools/perf/util/parse-events.c
tools/perf/util/parse-events.h
tools/perf/util/parse-events.l
tools/perf/util/parse-events.y
tools/perf/util/pmu.c
tools/perf/util/pmu.h
tools/perf/util/scripting-engines/trace-event-perl.c
tools/perf/util/scripting-engines/trace-event-python.c
tools/perf/util/session.c
tools/perf/util/session.h
tools/perf/util/sort.c
tools/perf/util/sort.h
tools/perf/util/string.c
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/top.c
tools/perf/util/trace-event-parse.c
tools/perf/util/trace-event-read.c
tools/perf/util/trace-event-scripting.c
tools/perf/util/trace-event.h
tools/perf/util/util.h
virt/kvm/assigned-dev.c
virt/kvm/eventfd.c
virt/kvm/kvm_main.c

index 2909c33..658003a 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -111,6 +111,7 @@ Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
 Uwe Kleine-König <ukl@pengutronix.de>
 Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
 Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
+Viresh Kumar <viresh.linux@gmail.com> <viresh.kumar@st.com>
 Takashi YOSHII <takashi.yoshii.zj@renesas.com>
 Yusuke Goda <goda.yusuke@renesas.com>
 Gustavo Padovan <gustavo@las.ic.unicamp.br>
index 679ce35..beef30c 100644 (file)
@@ -1,26 +1,5 @@
-What:           /sys/block/rssd*/registers
-Date:           March 2012
-KernelVersion:  3.3
-Contact:        Asai Thambi S P <asamymuthupa@micron.com>
-Description:    This is a read-only file. Dumps below driver information and
-                hardware registers.
-                    - S ACTive
-                    - Command Issue
-                    - Completed
-                    - PORT IRQ STAT
-                    - HOST IRQ STAT
-                    - Allocated
-                    - Commands in Q
-
 What:           /sys/block/rssd*/status
 Date:           April 2012
 KernelVersion:  3.4
 Contact:        Asai Thambi S P <asamymuthupa@micron.com>
 Description:    This is a read-only file. Indicates the status of the device.
-
-What:           /sys/block/rssd*/flags
-Date:           May 2012
-KernelVersion:  3.5
-Contact:        Asai Thambi S P <asamymuthupa@micron.com>
-Description:    This is a read-only file. Dumps the flags in port and driver
-                data structure
index 5bc8a47..cfedf63 100644 (file)
@@ -219,6 +219,7 @@ What:               /sys/bus/iio/devices/iio:deviceX/in_voltageY_scale
 What:          /sys/bus/iio/devices/iio:deviceX/in_voltageY_supply_scale
 What:          /sys/bus/iio/devices/iio:deviceX/in_voltage_scale
 What:          /sys/bus/iio/devices/iio:deviceX/out_voltageY_scale
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_scale
 What:          /sys/bus/iio/devices/iio:deviceX/in_accel_scale
 What:          /sys/bus/iio/devices/iio:deviceX/in_accel_peak_scale
 What:          /sys/bus/iio/devices/iio:deviceX/in_anglvel_scale
@@ -273,6 +274,7 @@ What:               /sys/bus/iio/devices/iio:deviceX/in_accel_scale_available
 What:          /sys/.../iio:deviceX/in_voltageX_scale_available
 What:          /sys/.../iio:deviceX/in_voltage-voltage_scale_available
 What:          /sys/.../iio:deviceX/out_voltageX_scale_available
+What:          /sys/.../iio:deviceX/out_altvoltageX_scale_available
 What:          /sys/.../iio:deviceX/in_capacitance_scale_available
 KernelVersion: 2.635
 Contact:       linux-iio@vger.kernel.org
@@ -298,14 +300,19 @@ Description:
                gives the 3dB frequency of the filter in Hz.
 
 What:          /sys/bus/iio/devices/iio:deviceX/out_voltageY_raw
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_raw
 KernelVersion: 2.6.37
 Contact:       linux-iio@vger.kernel.org
 Description:
                Raw (unscaled, no bias etc.) output voltage for
                channel Y.  The number must always be specified and
                unique if the output corresponds to a single channel.
+               While DAC like devices typically use out_voltage,
+               a continuous frequency generating device, such as
+               a DDS or PLL should use out_altvoltage.
 
 What:          /sys/bus/iio/devices/iio:deviceX/out_voltageY&Z_raw
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltageY&Z_raw
 KernelVersion: 2.6.37
 Contact:       linux-iio@vger.kernel.org
 Description:
@@ -316,6 +323,8 @@ Description:
 
 What:          /sys/bus/iio/devices/iio:deviceX/out_voltageY_powerdown_mode
 What:          /sys/bus/iio/devices/iio:deviceX/out_voltage_powerdown_mode
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_powerdown_mode
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltage_powerdown_mode
 KernelVersion: 2.6.38
 Contact:       linux-iio@vger.kernel.org
 Description:
@@ -330,6 +339,8 @@ Description:
 
 What:          /sys/.../iio:deviceX/out_votlageY_powerdown_mode_available
 What:          /sys/.../iio:deviceX/out_voltage_powerdown_mode_available
+What:          /sys/.../iio:deviceX/out_altvotlageY_powerdown_mode_available
+What:          /sys/.../iio:deviceX/out_altvoltage_powerdown_mode_available
 KernelVersion: 2.6.38
 Contact:       linux-iio@vger.kernel.org
 Description:
@@ -338,6 +349,8 @@ Description:
 
 What:          /sys/bus/iio/devices/iio:deviceX/out_voltageY_powerdown
 What:          /sys/bus/iio/devices/iio:deviceX/out_voltage_powerdown
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_powerdown
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltage_powerdown
 KernelVersion: 2.6.38
 Contact:       linux-iio@vger.kernel.org
 Description:
@@ -346,6 +359,24 @@ Description:
                normal operation. Y may be suppressed if all outputs are
                controlled together.
 
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_frequency
+KernelVersion: 3.4.0
+Contact:       linux-iio@vger.kernel.org
+Description:
+               Output frequency for channel Y in Hz. The number must always be
+               specified and unique if the output corresponds to a single
+               channel.
+
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_phase
+KernelVersion: 3.4.0
+Contact:       linux-iio@vger.kernel.org
+Description:
+               Phase in radians of one frequency/clock output Y
+               (out_altvoltageY) relative to another frequency/clock output
+               (out_altvoltageZ) of the device X. The number must always be
+               specified and unique if the output corresponds to a single
+               channel.
+
 What:          /sys/bus/iio/devices/iio:deviceX/events
 KernelVersion: 2.6.35
 Contact:       linux-iio@vger.kernel.org
index db1ad7e..938ef71 100644 (file)
@@ -142,13 +142,14 @@ KernelVersion:    3.4
 Contact:       linux-mtd@lists.infradead.org
 Description:
                This allows the user to examine and adjust the criteria by which
-               mtd returns -EUCLEAN from mtd_read().  If the maximum number of
-               bit errors that were corrected on any single region comprising
-               an ecc step (as reported by the driver) equals or exceeds this
-               value, -EUCLEAN is returned.  Otherwise, absent an error, 0 is
-               returned.  Higher layers (e.g., UBI) use this return code as an
-               indication that an erase block may be degrading and should be
-               scrutinized as a candidate for being marked as bad.
+               mtd returns -EUCLEAN from mtd_read() and mtd_read_oob().  If the
+               maximum number of bit errors that were corrected on any single
+               region comprising an ecc step (as reported by the driver) equals
+               or exceeds this value, -EUCLEAN is returned.  Otherwise, absent
+               an error, 0 is returned.  Higher layers (e.g., UBI) use this
+               return code as an indication that an erase block may be
+               degrading and should be scrutinized as a candidate for being
+               marked as bad.
 
                The initial value may be specified by the flash device driver.
                If not, then the default value is ecc_strength.
@@ -167,7 +168,7 @@ Description:
                block degradation, but high enough to avoid the consequences of
                a persistent return value of -EUCLEAN on devices where sticky
                bitflips occur.  Note that if bitflip_threshold exceeds
-               ecc_strength, -EUCLEAN is never returned by mtd_read().
+               ecc_strength, -EUCLEAN is never returned by the read operations.
                Conversely, if bitflip_threshold is zero, -EUCLEAN is always
                returned, absent a hard error.
 
index 676bc46..cda0dfb 100644 (file)
@@ -3988,7 +3988,7 @@ interface and may change in the future.</para>
            from RGB to Y'CbCr color space.
            </entry>
          </row>
-         <row id = "v4l2-jpeg-chroma-subsampling">
+         <row>
            <entrytbl spanname="descr" cols="2">
              <tbody valign="top">
                <row>
index f5ac15e..e58934c 100644 (file)
@@ -986,13 +986,13 @@ http://www.thedirks.org/winnov/</ulink></para></entry>
          <row id="V4L2-PIX-FMT-Y4">
            <entry><constant>V4L2_PIX_FMT_Y4</constant></entry>
            <entry>'Y04 '</entry>
-           <entry>Old 4-bit greyscale format. Only the least significant 4 bits of each byte are used,
+           <entry>Old 4-bit greyscale format. Only the most significant 4 bits of each byte are used,
 the other bits are set to 0.</entry>
          </row>
          <row id="V4L2-PIX-FMT-Y6">
            <entry><constant>V4L2_PIX_FMT_Y6</constant></entry>
            <entry>'Y06 '</entry>
-           <entry>Old 6-bit greyscale format. Only the least significant 6 bits of each byte are used,
+           <entry>Old 6-bit greyscale format. Only the most significant 6 bits of each byte are used,
 the other bits are set to 0.</entry>
          </row>
        </tbody>
index 015c561..008c2d7 100644 (file)
@@ -560,6 +560,7 @@ and discussions on the V4L mailing list.</revremark>
     &sub-g-tuner;
     &sub-log-status;
     &sub-overlay;
+    &sub-prepare-buf;
     &sub-qbuf;
     &sub-querybuf;
     &sub-querycap;
@@ -567,7 +568,6 @@ and discussions on the V4L mailing list.</revremark>
     &sub-query-dv-preset;
     &sub-query-dv-timings;
     &sub-querystd;
-    &sub-prepare-buf;
     &sub-reqbufs;
     &sub-s-hw-freq-seek;
     &sub-streamon;
index 765549f..a2474ec 100644 (file)
@@ -108,10 +108,9 @@ information.</para>
 /></entry>
          </row>
          <row>
-           <entry>__u32</entry>
+           <entry>struct&nbsp;v4l2_format</entry>
            <entry><structfield>format</structfield></entry>
-           <entry>Filled in by the application, preserved by the driver.
-           See <xref linkend="v4l2-format" />.</entry>
+           <entry>Filled in by the application, preserved by the driver.</entry>
          </row>
          <row>
            <entry>__u32</entry>
index e8714aa..98a856f 100644 (file)
@@ -89,7 +89,7 @@
          <row>
            <entry></entry>
            <entry>&v4l2-event-frame-sync;</entry>
-            <entry><structfield>frame</structfield></entry>
+            <entry><structfield>frame_sync</structfield></entry>
            <entry>Event data for event V4L2_EVENT_FRAME_SYNC.</entry>
          </row>
          <row>
index e3d5afc..0a4b90f 100644 (file)
@@ -284,13 +284,6 @@ These controls are described in <xref
            processing controls. These controls are described in <xref
            linkend="image-process-controls" />.</entry>
          </row>
-         <row>
-           <entry><constant>V4L2_CTRL_CLASS_JPEG</constant></entry>
-           <entry>0x9d0000</entry>
-           <entry>The class containing JPEG compression controls.
-These controls are described in <xref
-               linkend="jpeg-controls" />.</entry>
-         </row>
        </tbody>
       </tgroup>
     </table>
index 5c8d749..fc103d7 100644 (file)
@@ -162,9 +162,9 @@ over a rather long period of time, but improvements are always welcome!
                when publicizing a pointer to a structure that can
                be traversed by an RCU read-side critical section.
 
-5.     If call_rcu(), or a related primitive such as call_rcu_bh() or
-       call_rcu_sched(), is used, the callback function must be
-       written to be called from softirq context.  In particular,
+5.     If call_rcu(), or a related primitive such as call_rcu_bh(),
+       call_rcu_sched(), or call_srcu() is used, the callback function
+       must be written to be called from softirq context.  In particular,
        it cannot block.
 
 6.     Since synchronize_rcu() can block, it cannot be called from
@@ -202,11 +202,12 @@ over a rather long period of time, but improvements are always welcome!
        updater uses call_rcu_sched() or synchronize_sched(), then
        the corresponding readers must disable preemption, possibly
        by calling rcu_read_lock_sched() and rcu_read_unlock_sched().
-       If the updater uses synchronize_srcu(), the the corresponding
-       readers must use srcu_read_lock() and srcu_read_unlock(),
-       and with the same srcu_struct.  The rules for the expedited
-       primitives are the same as for their non-expedited counterparts.
-       Mixing things up will result in confusion and broken kernels.
+       If the updater uses synchronize_srcu() or call_srcu(),
+       the the corresponding readers must use srcu_read_lock() and
+       srcu_read_unlock(), and with the same srcu_struct.  The rules for
+       the expedited primitives are the same as for their non-expedited
+       counterparts.  Mixing things up will result in confusion and
+       broken kernels.
 
        One exception to this rule: rcu_read_lock() and rcu_read_unlock()
        may be substituted for rcu_read_lock_bh() and rcu_read_unlock_bh()
@@ -333,14 +334,14 @@ over a rather long period of time, but improvements are always welcome!
        victim CPU from ever going offline.)
 
 14.    SRCU (srcu_read_lock(), srcu_read_unlock(), srcu_dereference(),
-       synchronize_srcu(), and synchronize_srcu_expedited()) may only
-       be invoked from process context.  Unlike other forms of RCU, it
-       -is- permissible to block in an SRCU read-side critical section
-       (demarked by srcu_read_lock() and srcu_read_unlock()), hence the
-       "SRCU": "sleepable RCU".  Please note that if you don't need
-       to sleep in read-side critical sections, you should be using
-       RCU rather than SRCU, because RCU is almost always faster and
-       easier to use than is SRCU.
+       synchronize_srcu(), synchronize_srcu_expedited(), and call_srcu())
+       may only be invoked from process context.  Unlike other forms of
+       RCU, it -is- permissible to block in an SRCU read-side critical
+       section (demarked by srcu_read_lock() and srcu_read_unlock()),
+       hence the "SRCU": "sleepable RCU".  Please note that if you
+       don't need to sleep in read-side critical sections, you should be
+       using RCU rather than SRCU, because RCU is almost always faster
+       and easier to use than is SRCU.
 
        If you need to enter your read-side critical section in a
        hardirq or exception handler, and then exit that same read-side
@@ -353,8 +354,8 @@ over a rather long period of time, but improvements are always welcome!
        cleanup_srcu_struct().  These are passed a "struct srcu_struct"
        that defines the scope of a given SRCU domain.  Once initialized,
        the srcu_struct is passed to srcu_read_lock(), srcu_read_unlock()
-       synchronize_srcu(), and synchronize_srcu_expedited().  A given
-       synchronize_srcu() waits only for SRCU read-side critical
+       synchronize_srcu(), synchronize_srcu_expedited(), and call_srcu().
+       A given synchronize_srcu() waits only for SRCU read-side critical
        sections governed by srcu_read_lock() and srcu_read_unlock()
        calls that have been passed the same srcu_struct.  This property
        is what makes sleeping read-side critical sections tolerable --
@@ -374,7 +375,7 @@ over a rather long period of time, but improvements are always welcome!
        requiring SRCU's read-side deadlock immunity or low read-side
        realtime latency.
 
-       Note that, rcu_assign_pointer() relates to SRCU just as they do
+       Note that, rcu_assign_pointer() relates to SRCU just as it does
        to other forms of RCU.
 
 15.    The whole point of call_rcu(), synchronize_rcu(), and friends
index e439a0e..38428c1 100644 (file)
@@ -79,8 +79,6 @@ complete. Pseudo-code using rcu_barrier() is as follows:
    2. Execute rcu_barrier().
    3. Allow the module to be unloaded.
 
-Quick Quiz #1: Why is there no srcu_barrier()?
-
 The rcutorture module makes use of rcu_barrier in its exit function
 as follows:
 
@@ -162,7 +160,7 @@ for any pre-existing callbacks to complete.
 Then lines 55-62 print status and do operation-specific cleanup, and
 then return, permitting the module-unload operation to be completed.
 
-Quick Quiz #2: Is there any other situation where rcu_barrier() might
+Quick Quiz #1: Is there any other situation where rcu_barrier() might
        be required?
 
 Your module might have additional complications. For example, if your
@@ -242,7 +240,7 @@ reaches zero, as follows:
  4 complete(&rcu_barrier_completion);
  5 }
 
-Quick Quiz #3: What happens if CPU 0's rcu_barrier_func() executes
+Quick Quiz #2: What happens if CPU 0's rcu_barrier_func() executes
        immediately (thus incrementing rcu_barrier_cpu_count to the
        value one), but the other CPU's rcu_barrier_func() invocations
        are delayed for a full grace period? Couldn't this result in
@@ -259,12 +257,7 @@ so that your module may be safely unloaded.
 
 Answers to Quick Quizzes
 
-Quick Quiz #1: Why is there no srcu_barrier()?
-
-Answer: Since there is no call_srcu(), there can be no outstanding SRCU
-       callbacks. Therefore, there is no need to wait for them.
-
-Quick Quiz #2: Is there any other situation where rcu_barrier() might
+Quick Quiz #1: Is there any other situation where rcu_barrier() might
        be required?
 
 Answer: Interestingly enough, rcu_barrier() was not originally
@@ -278,7 +271,7 @@ Answer: Interestingly enough, rcu_barrier() was not originally
        implementing rcutorture, and found that rcu_barrier() solves
        this problem as well.
 
-Quick Quiz #3: What happens if CPU 0's rcu_barrier_func() executes
+Quick Quiz #2: What happens if CPU 0's rcu_barrier_func() executes
        immediately (thus incrementing rcu_barrier_cpu_count to the
        value one), but the other CPU's rcu_barrier_func() invocations
        are delayed for a full grace period? Couldn't this result in
index 4ddf391..7dce8a1 100644 (file)
@@ -174,11 +174,20 @@ torture_type      The type of RCU to test, with string values as follows:
                        and synchronize_rcu_bh_expedited().
 
                "srcu": srcu_read_lock(), srcu_read_unlock() and
+                       call_srcu().
+
+               "srcu_sync": srcu_read_lock(), srcu_read_unlock() and
                        synchronize_srcu().
 
                "srcu_expedited": srcu_read_lock(), srcu_read_unlock() and
                        synchronize_srcu_expedited().
 
+               "srcu_raw": srcu_read_lock_raw(), srcu_read_unlock_raw(),
+                       and call_srcu().
+
+               "srcu_raw_sync": srcu_read_lock_raw(), srcu_read_unlock_raw(),
+                       and synchronize_srcu().
+
                "sched": preempt_disable(), preempt_enable(), and
                        call_rcu_sched().
 
index 6bbe8dc..69ee188 100644 (file)
@@ -833,9 +833,9 @@ sched:      Critical sections       Grace period            Barrier
 
 SRCU:  Critical sections       Grace period            Barrier
 
-       srcu_read_lock          synchronize_srcu        N/A
-       srcu_read_unlock        synchronize_srcu_expedited
-       srcu_read_lock_raw
+       srcu_read_lock          synchronize_srcu        srcu_barrier
+       srcu_read_unlock        call_srcu
+       srcu_read_lock_raw      synchronize_srcu_expedited
        srcu_read_unlock_raw
        srcu_dereference
 
index 57aae77..65610bf 100644 (file)
@@ -60,4 +60,4 @@ Introduction
   Document Author
   ---------------
 
-  Viresh Kumar <viresh.kumar@st.com>, (c) 2010-2012 ST Microelectronics
+  Viresh Kumar <viresh.linux@gmail.com>, (c) 2010-2012 ST Microelectronics
index 32e4879..9884681 100644 (file)
@@ -7,39 +7,39 @@ This target is read-only.
 
 Construction Parameters
 =======================
-    <version> <dev> <hash_dev> <hash_start>
+    <version> <dev> <hash_dev>
     <data_block_size> <hash_block_size>
     <num_data_blocks> <hash_start_block>
     <algorithm> <digest> <salt>
 
 <version>
-    This is the version number of the on-disk format.
+    This is the type of the on-disk hash format.
 
     0 is the original format used in the Chromium OS.
-       The salt is appended when hashing, digests are stored continuously and
-       the rest of the block is padded with zeros.
+      The salt is appended when hashing, digests are stored continuously and
+      the rest of the block is padded with zeros.
 
     1 is the current format that should be used for new devices.
-       The salt is prepended when hashing and each digest is
-       padded with zeros to the power of two.
+      The salt is prepended when hashing and each digest is
+      padded with zeros to the power of two.
 
 <dev>
-    This is the device containing the data the integrity of which needs to be
+    This is the device containing data, the integrity of which needs to be
     checked.  It may be specified as a path, like /dev/sdaX, or a device number,
     <major>:<minor>.
 
 <hash_dev>
-    This is the device that that supplies the hash tree data.  It may be
+    This is the device that supplies the hash tree data.  It may be
     specified similarly to the device path and may be the same device.  If the
-    same device is used, the hash_start should be outside of the dm-verity
-    configured device size.
+    same device is used, the hash_start should be outside the configured
+    dm-verity device.
 
 <data_block_size>
-    The block size on a data device.  Each block corresponds to one digest on
-    the hash device.
+    The block size on a data device in bytes.
+    Each block corresponds to one digest on the hash device.
 
 <hash_block_size>
-    The size of a hash block.
+    The size of a hash block in bytes.
 
 <num_data_blocks>
     The number of data blocks on the data device.  Additional blocks are
@@ -65,7 +65,7 @@ Construction Parameters
 Theory of operation
 ===================
 
-dm-verity is meant to be setup as part of a verified boot path.  This
+dm-verity is meant to be set up as part of a verified boot path.  This
 may be anything ranging from a boot using tboot or trustedgrub to just
 booting from a known-good device (like a USB drive or CD).
 
@@ -73,20 +73,20 @@ When a dm-verity device is configured, it is expected that the caller
 has been authenticated in some way (cryptographic signatures, etc).
 After instantiation, all hashes will be verified on-demand during
 disk access.  If they cannot be verified up to the root node of the
-tree, the root hash, then the I/O will fail.  This should identify
+tree, the root hash, then the I/O will fail.  This should detect
 tampering with any data on the device and the hash data.
 
 Cryptographic hashes are used to assert the integrity of the device on a
-per-block basis.  This allows for a lightweight hash computation on first read
-into the page cache.  Block hashes are stored linearly-aligned to the nearest
-block the size of a page.
+per-block basis. This allows for a lightweight hash computation on first read
+into the page cache. Block hashes are stored linearly, aligned to the nearest
+block size.
 
 Hash Tree
 ---------
 
 Each node in the tree is a cryptographic hash.  If it is a leaf node, the hash
-is of some block data on disk.  If it is an intermediary node, then the hash is
-of a number of child nodes.
+of some data block on disk is calculated. If it is an intermediary node,
+the hash of a number of child nodes is calculated.
 
 Each entry in the tree is a collection of neighboring nodes that fit in one
 block.  The number is determined based on block_size and the size of the
@@ -110,63 +110,23 @@ alg = sha256, num_blocks = 32768, block_size = 4096
 On-disk format
 ==============
 
-Below is the recommended on-disk format. The verity kernel code does not
-read the on-disk header. It only reads the hash blocks which directly
-follow the header. It is expected that a user-space tool will verify the
-integrity of the verity_header and then call dmsetup with the correct
-parameters. Alternatively, the header can be omitted and the dmsetup
-parameters can be passed via the kernel command-line in a rooted chain
-of trust where the command-line is verified.
+The verity kernel code does not read the verity metadata on-disk header.
+It only reads the hash blocks which directly follow the header.
+It is expected that a user-space tool will verify the integrity of the
+verity header.
 
-The on-disk format is especially useful in cases where the hash blocks
-are on a separate partition. The magic number allows easy identification
-of the partition contents. Alternatively, the hash blocks can be stored
-in the same partition as the data to be verified. In such a configuration
-the filesystem on the partition would be sized a little smaller than
-the full-partition, leaving room for the hash blocks.
-
-struct superblock {
-       uint8_t signature[8]
-               "verity\0\0";
-
-       uint8_t version;
-               1 - current format
-
-       uint8_t data_block_bits;
-               log2(data block size)
-
-       uint8_t hash_block_bits;
-               log2(hash block size)
-
-       uint8_t pad1[1];
-               zero padding
-
-       uint16_t salt_size;
-               big-endian salt size
-
-       uint8_t pad2[2];
-               zero padding
-
-       uint32_t data_blocks_hi;
-               big-endian high 32 bits of the 64-bit number of data blocks
-
-       uint32_t data_blocks_lo;
-               big-endian low 32 bits of the 64-bit number of data blocks
-
-       uint8_t algorithm[16];
-               cryptographic algorithm
-
-       uint8_t salt[384];
-               salt (the salt size is specified above)
-
-       uint8_t pad3[88];
-               zero padding to 512-byte boundary
-}
+Alternatively, the header can be omitted and the dmsetup parameters can
+be passed via the kernel command-line in a rooted chain of trust where
+the command-line is verified.
 
 Directly following the header (and with sector number padded to the next hash
 block boundary) are the hash blocks which are stored a depth at a time
 (starting from the root), sorted in order of increasing index.
 
+The full specification of kernel parameters and on-disk metadata format
+is available at the cryptsetup project's wiki page
+  http://code.google.com/p/cryptsetup/wiki/DMVerity
+
 Status
 ======
 V (for Valid) is returned if every check performed so far was valid.
@@ -174,21 +134,22 @@ If any check failed, C (for Corruption) is returned.
 
 Example
 =======
-
-Setup a device:
-  dmsetup create vroot --table \
-    "0 2097152 "\
-    "verity 1 /dev/sda1 /dev/sda2 4096 4096 2097152 1 "\
+Set up a device:
+  # dmsetup create vroot --readonly --table \
+    "0 2097152 verity 1 /dev/sda1 /dev/sda2 4096 4096 262144 1 sha256 "\
     "4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076 "\
     "1234000000000000000000000000000000000000000000000000000000000000"
 
 A command line tool veritysetup is available to compute or verify
-the hash tree or activate the kernel driver.  This is available from
-the LVM2 upstream repository and may be supplied as a package called
-device-mapper-verity-tools:
-    git://sources.redhat.com/git/lvm2
-    http://sourceware.org/git/?p=lvm2.git
-    http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/verity?cvsroot=lvm2
-
-veritysetup -a vroot /dev/sda1 /dev/sda2 \
-       4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
+the hash tree or activate the kernel device. This is available from
+the cryptsetup upstream repository http://code.google.com/p/cryptsetup/
+(as a libcryptsetup extension).
+
+Create hash on the device:
+  # veritysetup format /dev/sda1 /dev/sda2
+  ...
+  Root hash: 4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
+
+Activate the device:
+  # veritysetup create vroot /dev/sda1 /dev/sda2 \
+    4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
index a00c94c..0b96e57 100644 (file)
@@ -2,6 +2,7 @@
 
 Required properties:
 - compatible : "fsl,mma8450".
+- reg: the I2C address of MMA8450
 
 Example:
 
index 19f6af4..baf0798 100644 (file)
@@ -46,8 +46,8 @@ Examples:
 
 ecspi@70010000 { /* ECSPI1 */
        fsl,spi-num-chipselects = <2>;
-       cs-gpios = <&gpio3 24 0>, /* GPIO4_24 */
-                  <&gpio3 25 0>; /* GPIO4_25 */
+       cs-gpios = <&gpio4 24 0>, /* GPIO4_24 */
+                  <&gpio4 25 0>; /* GPIO4_25 */
        status = "okay";
 
        pmic: mc13892@0 {
index c7e404b..fea541e 100644 (file)
@@ -29,6 +29,6 @@ esdhc@70008000 {
        compatible = "fsl,imx51-esdhc";
        reg = <0x70008000 0x4000>;
        interrupts = <2>;
-       cd-gpios = <&gpio0 6 0>; /* GPIO1_6 */
-       wp-gpios = <&gpio0 5 0>; /* GPIO1_5 */
+       cd-gpios = <&gpio1 6 0>; /* GPIO1_6 */
+       wp-gpios = <&gpio1 5 0>; /* GPIO1_5 */
 };
index 7ab9e1a..4616fc2 100644 (file)
@@ -19,6 +19,6 @@ ethernet@83fec000 {
        reg = <0x83fec000 0x4000>;
        interrupts = <87>;
        phy-mode = "mii";
-       phy-reset-gpios = <&gpio1 14 0>; /* GPIO2_14 */
+       phy-reset-gpios = <&gpio2 14 0>; /* GPIO2_14 */
        local-mac-address = [00 04 9F 01 1B B9];
 };
index 82b43f9..a4119f6 100644 (file)
@@ -1626,3 +1626,5 @@ MX6Q_PAD_SD2_DAT3__PCIE_CTRL_MUX_11               1587
 MX6Q_PAD_SD2_DAT3__GPIO_1_12                   1588
 MX6Q_PAD_SD2_DAT3__SJC_DONE                    1589
 MX6Q_PAD_SD2_DAT3__ANATOP_TESTO_3              1590
+MX6Q_PAD_ENET_RX_ER__ANATOP_USBOTG_ID          1591
+MX6Q_PAD_GPIO_1__ANATOP_USBOTG_ID              1592
index 9841057..4256a6d 100644 (file)
@@ -17,6 +17,6 @@ ecspi@70010000 {
        reg = <0x70010000 0x4000>;
        interrupts = <36>;
        fsl,spi-num-chipselects = <2>;
-       cs-gpios = <&gpio3 24 0>, /* GPIO4_24 */
-                  <&gpio3 25 0>; /* GPIO4_25 */
+       cs-gpios = <&gpio3 24 0>, /* GPIO3_24 */
+                  <&gpio3 25 0>; /* GPIO3_25 */
 };
index 6eab917..db4d3af 100644 (file)
@@ -3,6 +3,7 @@ Device tree binding vendor prefix registry.  Keep list in alphabetical order.
 This isn't an exhaustive list, but you should add new prefixes to it before
 using them to avoid name-space collisions.
 
+ad     Avionic Design GmbH
 adi    Analog Devices, Inc.
 amcc   Applied Micro Circuits Corporation (APM, formally AMCC)
 apm    Applied Micro Circuits Corporation (APM)
index 84d46c0..c86b50c 100644 (file)
@@ -6,7 +6,9 @@ Supported chips:
     Prefix: 'coretemp'
     CPUID: family 0x6, models 0xe (Pentium M DC), 0xf (Core 2 DC 65nm),
                               0x16 (Core 2 SC 65nm), 0x17 (Penryn 45nm),
-                              0x1a (Nehalem), 0x1c (Atom), 0x1e (Lynnfield)
+                              0x1a (Nehalem), 0x1c (Atom), 0x1e (Lynnfield),
+                              0x26 (Tunnel Creek Atom), 0x27 (Medfield Atom),
+                              0x36 (Cedar Trail Atom)
     Datasheet: Intel 64 and IA-32 Architectures Software Developer's Manual
                Volume 3A: System Programming Guide
                http://softwarecommunity.intel.com/Wiki/Mobility/720.htm
@@ -52,6 +54,17 @@ Some information comes from ark.intel.com
 
 Process                Processor                                       TjMax(C)
 
+22nm           Core i5/i7 Processors
+               i7 3920XM, 3820QM, 3720QM, 3667U, 3520M         105
+               i5 3427U, 3360M/3320M                           105
+               i7 3770/3770K                                   105
+               i5 3570/3570K, 3550, 3470/3450                  105
+               i7 3770S                                        103
+               i5 3570S/3550S, 3475S/3470S/3450S               103
+               i7 3770T                                        94
+               i5 3570T                                        94
+               i5 3470T                                        91
+
 32nm           Core i3/i5/i7 Processors
                i7 660UM/640/620, 640LM/620, 620M, 610E         105
                i5 540UM/520/430, 540M/520/450/430              105
@@ -65,6 +78,11 @@ Process              Processor                                       TjMax(C)
                U3400                                           105
                P4505/P4500                                     90
 
+32nm           Atom Processors
+               Z2460                                           90
+               D2700/2550/2500                                 100
+               N2850/2800/2650/2600                            100
+
 45nm           Xeon Processors 5400 Quad-Core
                X5492, X5482, X5472, X5470, X5460, X5450        85
                E5472, E5462, E5450/40/30/20/10/05              85
@@ -85,6 +103,8 @@ Process              Processor                                       TjMax(C)
                N475/470/455/450                                100
                N280/270                                        90
                330/230                                         125
+               E680/660/640/620                                90
+               E680T/660T/640T/620T                            110
 
 45nm           Core2 Processors
                Solo ULV SU3500/3300                            100
index 506c739..13f1aa0 100644 (file)
@@ -86,7 +86,7 @@ There is also a gitweb interface available at
 http://www.kernel.org/git/?p=utils/kernel/kexec/kexec-tools.git
 
 More information about kexec-tools can be found at
-http://www.kernel.org/pub/linux/utils/kernel/kexec/README.html
+http://horms.net/projects/kexec/
 
 3) Unpack the tarball with the tar command, as follows:
 
index a92c5eb..12783fa 100644 (file)
@@ -2367,6 +2367,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        Set maximum number of finished RCU callbacks to process
                        in one batch.
 
+       rcutree.fanout_leaf=    [KNL,BOOT]
+                       Increase the number of CPUs assigned to each
+                       leaf rcu_node structure.  Useful for very large
+                       systems.
+
        rcutree.qhimark=        [KNL,BOOT]
                        Set threshold of queued
                        RCU callbacks over which batch limiting is disabled.
diff --git a/Documentation/prctl/no_new_privs.txt b/Documentation/prctl/no_new_privs.txt
new file mode 100644 (file)
index 0000000..f7be84f
--- /dev/null
@@ -0,0 +1,57 @@
+The execve system call can grant a newly-started program privileges that
+its parent did not have.  The most obvious examples are setuid/setgid
+programs and file capabilities.  To prevent the parent program from
+gaining these privileges as well, the kernel and user code must be
+careful to prevent the parent from doing anything that could subvert the
+child.  For example:
+
+ - The dynamic loader handles LD_* environment variables differently if
+   a program is setuid.
+
+ - chroot is disallowed to unprivileged processes, since it would allow
+   /etc/passwd to be replaced from the point of view of a process that
+   inherited chroot.
+
+ - The exec code has special handling for ptrace.
+
+These are all ad-hoc fixes.  The no_new_privs bit (since Linux 3.5) is a
+new, generic mechanism to make it safe for a process to modify its
+execution environment in a manner that persists across execve.  Any task
+can set no_new_privs.  Once the bit is set, it is inherited across fork,
+clone, and execve and cannot be unset.  With no_new_privs set, execve
+promises not to grant the privilege to do anything that could not have
+been done without the execve call.  For example, the setuid and setgid
+bits will no longer change the uid or gid; file capabilities will not
+add to the permitted set, and LSMs will not relax constraints after
+execve.
+
+To set no_new_privs, use prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0).
+
+Be careful, though: LSMs might also not tighten constraints on exec
+in no_new_privs mode.  (This means that setting up a general-purpose
+service launcher to set no_new_privs before execing daemons may
+interfere with LSM-based sandboxing.)
+
+Note that no_new_privs does not prevent privilege changes that do not
+involve execve.  An appropriately privileged task can still call
+setuid(2) and receive SCM_RIGHTS datagrams.
+
+There are two main use cases for no_new_privs so far:
+
+ - Filters installed for the seccomp mode 2 sandbox persist across
+   execve and can change the behavior of newly-executed programs.
+   Unprivileged users are therefore only allowed to install such filters
+   if no_new_privs is set.
+
+ - By itself, no_new_privs can be used to reduce the attack surface
+   available to an unprivileged user.  If everything running with a
+   given uid has no_new_privs set, then that uid will be unable to
+   escalate its privileges by directly attacking setuid, setgid, and
+   fcap-using binaries; it will need to compromise something without the
+   no_new_privs bit set first.
+
+In the future, other potentially dangerous kernel features could become
+available to unprivileged tasks if no_new_privs is set.  In principle,
+several options to unshare(2) and clone(2) would be safe when
+no_new_privs is set, and no_new_privs + chroot is considerable less
+dangerous than chroot by itself.
index f0ab5cf..4a7b54b 100644 (file)
@@ -12,6 +12,12 @@ Rules on what kind of patches are accepted, and which ones are not, into the
    marked CONFIG_BROKEN), an oops, a hang, data corruption, a real
    security issue, or some "oh, that's not good" issue.  In short, something
    critical.
+ - Serious issues as reported by a user of a distribution kernel may also
+   be considered if they fix a notable performance or interactivity issue.
+   As these fixes are not as obvious and have a higher risk of a subtle
+   regression they should only be submitted by a distribution kernel
+   maintainer and include an addendum linking to a bugzilla entry if it
+   exists and additional information on the user-visible impact.
  - New device IDs and quirks are also accepted.
  - No "theoretical race condition" issues, unless an explanation of how the
    race can be exploited is also provided.
index 9301266..2c99483 100644 (file)
@@ -1930,6 +1930,23 @@ The "pte_enc" field provides a value that can OR'ed into the hash
 PTE's RPN field (ie, it needs to be shifted left by 12 to OR it
 into the hash PTE second double word).
 
+4.75 KVM_IRQFD
+
+Capability: KVM_CAP_IRQFD
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_irqfd (in)
+Returns: 0 on success, -1 on error
+
+Allows setting an eventfd to directly trigger a guest interrupt.
+kvm_irqfd.fd specifies the file descriptor to use as the eventfd and
+kvm_irqfd.gsi specifies the irqchip pin toggled by this event.  When
+an event is tiggered on the eventfd, an interrupt is injected into
+the guest using the specified gsi pin.  The irqfd is removed using
+the KVM_IRQFD_FLAG_DEASSIGN flag, specifying both kvm_irqfd.fd
+and kvm_irqfd.gsi.
+
+
 5. The kvm_run structure
 ------------------------
 
index 3e30a3a..fe643e7 100644 (file)
@@ -579,7 +579,7 @@ F:  drivers/net/appletalk/
 F:     net/appletalk/
 
 ARASAN COMPACT FLASH PATA CONTROLLER
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 L:     linux-ide@vger.kernel.org
 S:     Maintained
 F:     include/linux/pata_arasan_cf_data.h
@@ -3433,13 +3433,14 @@ S:      Supported
 F:     drivers/idle/i7300_idle.c
 
 IEEE 802.15.4 SUBSYSTEM
+M:     Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
 M:     Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
-M:     Sergey Lapin <slapin@ossfans.org>
 L:     linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
 W:     http://apps.sourceforge.net/trac/linux-zigbee
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
 S:     Maintained
 F:     net/ieee802154/
+F:     net/mac802154/
 F:     drivers/ieee802154/
 
 IIO SUBSYSTEM AND DRIVERS
@@ -4654,8 +4655,8 @@ L:        netfilter@vger.kernel.org
 L:     coreteam@netfilter.org
 W:     http://www.netfilter.org/
 W:     http://www.iptables.org/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-2.6.git
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next-2.6.git
+T:     git git://1984.lsi.us.es/nf
+T:     git git://1984.lsi.us.es/nf-next
 S:     Supported
 F:     include/linux/netfilter*
 F:     include/linux/netfilter/
@@ -4857,6 +4858,7 @@ M:        Kevin Hilman <khilman@ti.com>
 L:     linux-omap@vger.kernel.org
 S:     Maintained
 F:     arch/arm/*omap*/*pm*
+F:     drivers/cpufreq/omap-cpufreq.c
 
 OMAP POWERDOMAIN/CLOCKDOMAIN SOC ADAPTATION LAYER SUPPORT
 M:     Rajendra Nayak <rnayak@ti.com>
@@ -5296,7 +5298,7 @@ S:        Maintained
 F:     drivers/pinctrl/
 
 PIN CONTROLLER - ST SPEAR
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 L:     spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.st.com/spear
@@ -5563,7 +5565,7 @@ F:        Documentation/networking/LICENSE.qla3xxx
 F:     drivers/net/ethernet/qlogic/qla3xxx.*
 
 QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
-M:     Anirban Chakraborty <anirban.chakraborty@qlogic.com>
+M:     Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
 M:     Sony Chacko <sony.chacko@qlogic.com>
 M:     linux-driver@qlogic.com
 L:     netdev@vger.kernel.org
@@ -5571,7 +5573,6 @@ S:        Supported
 F:     drivers/net/ethernet/qlogic/qlcnic/
 
 QLOGIC QLGE 10Gb ETHERNET DRIVER
-M:     Anirban Chakraborty <anirban.chakraborty@qlogic.com>
 M:     Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
 M:     Ron Mercer <ron.mercer@qlogic.com>
 M:     linux-driver@qlogic.com
@@ -5873,7 +5874,7 @@ S:        Maintained
 F:     drivers/tty/serial
 
 SYNOPSYS DESIGNWARE DMAC DRIVER
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 S:     Maintained
 F:     include/linux/dw_dmac.h
 F:     drivers/dma/dw_dmac_regs.h
@@ -5909,7 +5910,7 @@ M:        Ingo Molnar <mingo@redhat.com>
 M:     Peter Zijlstra <peterz@infradead.org>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
 S:     Maintained
-F:     kernel/sched*
+F:     kernel/sched/
 F:     include/linux/sched.h
 
 SCORE ARCHITECTURE
@@ -6021,7 +6022,7 @@ S:        Maintained
 F:     drivers/mmc/host/sdhci-s3c.c
 
 SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) ST SPEAR DRIVER
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 L:     spear-devel@list.st.com
 L:     linux-mmc@vger.kernel.org
 S:     Maintained
@@ -6377,7 +6378,7 @@ S:        Maintained
 F:     include/linux/compiler.h
 
 SPEAR PLATFORM SUPPORT
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 M:     Shiraz Hashim <shiraz.hashim@st.com>
 L:     spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -6386,7 +6387,7 @@ S:        Maintained
 F:     arch/arm/plat-spear/
 
 SPEAR13XX MACHINE SUPPORT
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 M:     Shiraz Hashim <shiraz.hashim@st.com>
 L:     spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -6395,7 +6396,7 @@ S:        Maintained
 F:     arch/arm/mach-spear13xx/
 
 SPEAR3XX MACHINE SUPPORT
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 M:     Shiraz Hashim <shiraz.hashim@st.com>
 L:     spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -6406,7 +6407,7 @@ F:        arch/arm/mach-spear3xx/
 SPEAR6XX MACHINE SUPPORT
 M:     Rajeev Kumar <rajeev-dlh.kumar@st.com>
 M:     Shiraz Hashim <shiraz.hashim@st.com>
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 L:     spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.st.com/spear
@@ -6414,7 +6415,7 @@ S:        Maintained
 F:     arch/arm/mach-spear6xx/
 
 SPEAR CLOCK FRAMEWORK SUPPORT
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 L:     spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.st.com/spear
@@ -7421,7 +7422,7 @@ F:        include/linux/vlynq.h
 
 VME SUBSYSTEM
 M:     Martyn Welch <martyn.welch@ge.com>
-M:     Manohar Vanga <manohar.vanga@cern.ch>
+M:     Manohar Vanga <manohar.vanga@gmail.com>
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 L:     devel@driverdev.osuosl.org
 S:     Maintained
index b771af5..4bb09e1 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 5
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION =
 NAME = Saber-toothed Squirrel
 
 # *DOCUMENTATION*
@@ -561,6 +561,8 @@ else
 KBUILD_CFLAGS  += -O2
 endif
 
+include $(srctree)/arch/$(SRCARCH)/Makefile
+
 ifdef CONFIG_READABLE_ASM
 # Disable optimizations that make assembler listings hard to read.
 # reorder blocks reorders the control in the function
@@ -571,8 +573,6 @@ KBUILD_CFLAGS += $(call cc-option,-fno-reorder-blocks,) \
                  $(call cc-option,-fno-partial-inlining)
 endif
 
-include $(srctree)/arch/$(SRCARCH)/Makefile
-
 ifneq ($(CONFIG_FRAME_WARN),0)
 KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN})
 endif
index 84449dd..a91009c 100644 (file)
@@ -293,6 +293,7 @@ config ARCH_VERSATILE
        select ICST
        select GENERIC_CLOCKEVENTS
        select ARCH_WANT_OPTIONAL_GPIOLIB
+       select NEED_MACH_IO_H if PCI
        select PLAT_VERSATILE
        select PLAT_VERSATILE_CLCD
        select PLAT_VERSATILE_FPGA_IRQ
@@ -588,6 +589,7 @@ config ARCH_ORION5X
        select PCI
        select ARCH_REQUIRE_GPIOLIB
        select GENERIC_CLOCKEVENTS
+       select NEED_MACH_IO_H
        select PLAT_ORION
        help
          Support for the following Marvell Orion 5x series SoCs:
index 153a4b2..c9b4f27 100644 (file)
@@ -11,7 +11,7 @@
 /include/ "mmp2.dtsi"
 
 / {
-       model = "Marvell MMP2 Aspenite Development Board";
+       model = "Marvell MMP2 Brownstone Development Board";
        compatible = "mrvl,mmp2-brownstone", "mrvl,mmp2";
 
        chosen {
@@ -19,7 +19,7 @@
        };
 
        memory {
-               reg = <0x00000000 0x04000000>;
+               reg = <0x00000000 0x08000000>;
        };
 
        soc {
index f2ab4ea..581cb08 100644 (file)
@@ -44,6 +44,8 @@
                        compatible = "ti,omap2-intc";
                        interrupt-controller;
                        #interrupt-cells = <1>;
+                       ti,intc-size = <96>;
+                       reg = <0x480FE000 0x1000>;
                };
 
                uart1: serial@4806a000 {
index 8314e41..dd4358b 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr1310 Evaluation Baord
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 9e61da4..419ea74 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for all SPEAr1310 SoCs
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 0d8472e..c9a54e0 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr1340 Evaluation Baord
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index a26fc47..d71fe2a 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for all SPEAr1340 SoCs
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 1f8e1e1..f7b84ac 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for all SPEAr13xx SoCs
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
@@ -43,8 +43,8 @@
 
        pmu {
                compatible = "arm,cortex-a9-pmu";
-               interrupts = <0 8 0x04
-                             0 9 0x04>;
+               interrupts = <0 6 0x04
+                             0 7 0x04>;
        };
 
        L2: l2-cache {
                gmac0: eth@e2000000 {
                        compatible = "st,spear600-gmac";
                        reg = <0xe2000000 0x8000>;
-                       interrupts = <0 23 0x4
-                                     0 24 0x4>;
+                       interrupts = <0 33 0x4
+                                     0 34 0x4>;
                        interrupt-names = "macirq", "eth_wake_irq";
                        status = "disabled";
                };
                        kbd@e0300000 {
                                compatible = "st,spear300-kbd";
                                reg = <0xe0300000 0x1000>;
+                               interrupts = <0 52 0x4>;
                                status = "disabled";
                        };
 
                        serial@e0000000 {
                                compatible = "arm,pl011", "arm,primecell";
                                reg = <0xe0000000 0x1000>;
-                               interrupts = <0 36 0x4>;
+                               interrupts = <0 35 0x4>;
                                status = "disabled";
                        };
 
index fc82b1a..d71b8d5 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr300 Evaluation Baord
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 01c5e35..ed3627c 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr300 SoC
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index dc5e2d4..b00544e 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr310 Evaluation Baord
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index e47081c..62fc4fb 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr310 SoC
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 6308fa3..e4e912f 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr320 Evaluation Baord
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
@@ -15,8 +15,8 @@
 /include/ "spear320.dtsi"
 
 / {
-       model = "ST SPEAr300 Evaluation Board";
-       compatible = "st,spear300-evb", "st,spear300";
+       model = "ST SPEAr320 Evaluation Board";
+       compatible = "st,spear320-evb", "st,spear320";
        #address-cells = <1>;
        #size-cells = <1>;
 
@@ -26,7 +26,7 @@
 
        ahb {
                pinmux@b3000000 {
-                       st,pinmux-mode = <3>;
+                       st,pinmux-mode = <4>;
                        pinctrl-names = "default";
                        pinctrl-0 = <&state_default>;
 
index 5372ca3..1f49d69 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr320 SoC
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 9107255..3a8bb57 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for all SPEAr3xx SoCs
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 089f0a4..a3c36e4 100644 (file)
                        timer@f0000000 {
                                compatible = "st,spear-timer";
                                reg = <0xf0000000 0x400>;
+                               interrupt-parent = <&vic0>;
                                interrupts = <16>;
                        };
                };
index 9854ff4..11828e6 100644 (file)
@@ -176,7 +176,6 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 CONFIG_USB_DEVICEFS=y
 CONFIG_USB_SUSPEND=y
 CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_WDM=y
 CONFIG_USB_STORAGE=y
 CONFIG_USB_LIBUSUAL=y
index 68374ba..c79f61f 100644 (file)
@@ -243,7 +243,7 @@ typedef struct {
 
 #define ATOMIC64_INIT(i) { (i) }
 
-static inline u64 atomic64_read(atomic64_t *v)
+static inline u64 atomic64_read(const atomic64_t *v)
 {
        u64 result;
 
index 3d22204..6ddbe44 100644 (file)
 #ifndef __ASSEMBLY__
 
 #ifdef CONFIG_CPU_USE_DOMAINS
-#define set_domain(x)                                  \
-       do {                                            \
-       __asm__ __volatile__(                           \
-       "mcr    p15, 0, %0, c3, c0      @ set domain"   \
-         : : "r" (x));                                 \
-       isb();                                          \
-       } while (0)
+static inline void set_domain(unsigned val)
+{
+       asm volatile(
+       "mcr    p15, 0, %0, c3, c0      @ set domain"
+         : : "r" (val));
+       isb();
+}
 
 #define modify_domain(dom,type)                                        \
        do {                                                    \
@@ -78,8 +78,8 @@
        } while (0)
 
 #else
-#define set_domain(x)          do { } while (0)
-#define modify_domain(dom,type)        do { } while (0)
+static inline void set_domain(unsigned val) { }
+static inline void modify_domain(unsigned dom, unsigned type)  { }
 #endif
 
 /*
index 7be5469..e42cf59 100644 (file)
@@ -19,6 +19,7 @@
        "       .long   1b, 4f, 2b, 4f\n"                       \
        "       .popsection\n"                                  \
        "       .pushsection .fixup,\"ax\"\n"                   \
+       "       .align  2\n"                                    \
        "4:     mov     %0, " err_reg "\n"                      \
        "       b       3b\n"                                   \
        "       .popsection"
index e0d1c0c..6b9b077 100644 (file)
@@ -4,7 +4,7 @@
  * ARM PrimeXsys System Controller SP810 header file
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index b79f8e9..af7b0bd 100644 (file)
@@ -148,7 +148,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
 #define TIF_NOTIFY_RESUME      2       /* callback before returning to user */
 #define TIF_SYSCALL_TRACE      8
 #define TIF_SYSCALL_AUDIT      9
-#define TIF_SYSCALL_RESTARTSYS 10
 #define TIF_POLLING_NRFLAG     16
 #define TIF_USING_IWMMXT       17
 #define TIF_MEMDIE             18      /* is terminating due to OOM killer */
@@ -164,11 +163,9 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
 #define _TIF_POLLING_NRFLAG    (1 << TIF_POLLING_NRFLAG)
 #define _TIF_USING_IWMMXT      (1 << TIF_USING_IWMMXT)
 #define _TIF_SECCOMP           (1 << TIF_SECCOMP)
-#define _TIF_SYSCALL_RESTARTSYS        (1 << TIF_SYSCALL_RESTARTSYS)
 
 /* Checks for any syscall work in entry-common.S */
-#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
-                          _TIF_SYSCALL_RESTARTSYS)
+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
 
 /*
  * Change these and you break ASM code in entry-common.S
index 437f0c4..0d1851c 100644 (file)
@@ -495,6 +495,7 @@ ENDPROC(__und_usr)
  * The out of line fixup for the ldrt above.
  */
        .pushsection .fixup, "ax"
+       .align  2
 4:     mov     pc, r9
        .popsection
        .pushsection __ex_table,"a"
index ba32b39..38c1a3b 100644 (file)
@@ -187,8 +187,8 @@ void kprobe_arm_test_cases(void)
        TEST_BF_R ("mov pc, r",0,2f,"")
        TEST_BF_RR("mov pc, r",0,2f,", asl r",1,0,"")
        TEST_BB(   "sub pc, pc, #1b-2b+8")
-#if __LINUX_ARM_ARCH__ >= 6
-       TEST_BB(   "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before ARMv6 */
+#if __LINUX_ARM_ARCH__ == 6 && !defined(CONFIG_CPU_V7)
+       TEST_BB(   "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before and after ARMv6 */
 #endif
        TEST_BB_R( "sub pc, pc, r",14, 1f-2f+8,"")
        TEST_BB_R( "rsb pc, r",14,1f-2f+8,", pc")
index 8f96ec7..6123daf 100644 (file)
@@ -660,7 +660,7 @@ static const union decode_item t32_table_1111_100x[] = {
        /* LDRSB (literal)      1111 1001 x001 1111 xxxx xxxx xxxx xxxx */
        /* LDRH (literal)       1111 1000 x011 1111 xxxx xxxx xxxx xxxx */
        /* LDRSH (literal)      1111 1001 x011 1111 xxxx xxxx xxxx xxxx */
-       DECODE_EMULATEX (0xfe5f0000, 0xf81f0000, t32_simulate_ldr_literal,
+       DECODE_SIMULATEX(0xfe5f0000, 0xf81f0000, t32_simulate_ldr_literal,
                                                 REGS(PC, NOSPPCX, 0, 0, 0)),
 
        /* STRB (immediate)     1111 1000 0000 xxxx xxxx 1xxx xxxx xxxx */
index 186c8cb..a02eada 100644 (file)
@@ -503,7 +503,7 @@ __hw_perf_event_init(struct perf_event *event)
             event_requires_mode_exclusion(&event->attr)) {
                pr_debug("ARM performance counters do not support "
                         "mode exclusion\n");
-               return -EPERM;
+               return -EOPNOTSUPP;
        }
 
        /*
index 5700a7a..14e3826 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/regset.h>
 #include <linux/audit.h>
 #include <linux/tracehook.h>
-#include <linux/unistd.h>
 
 #include <asm/pgtable.h>
 #include <asm/traps.h>
@@ -918,8 +917,6 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
                audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
                                    regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
 
-       if (why == 0 && test_and_clear_thread_flag(TIF_SYSCALL_RESTARTSYS))
-               scno = __NR_restart_syscall - __NR_SYSCALL_BASE;
        if (!test_thread_flag(TIF_SYSCALL_TRACE))
                return scno;
 
index fd2392a..536c5d6 100644 (file)
@@ -27,6 +27,7 @@
  */
 #define SWI_SYS_SIGRETURN      (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
 #define SWI_SYS_RT_SIGRETURN   (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
+#define SWI_SYS_RESTART                (0xef000000|__NR_restart_syscall|__NR_OABI_SYSCALL_BASE)
 
 /*
  * With EABI, the syscall number has to be loaded into r7.
@@ -46,6 +47,18 @@ const unsigned long sigreturn_codes[7] = {
        MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
 };
 
+/*
+ * Either we support OABI only, or we have EABI with the OABI
+ * compat layer enabled.  In the later case we don't know if
+ * user space is EABI or not, and if not we must not clobber r7.
+ * Always using the OABI syscall solves that issue and works for
+ * all those cases.
+ */
+const unsigned long syscall_restart_code[2] = {
+       SWI_SYS_RESTART,        /* swi  __NR_restart_syscall */
+       0xe49df004,             /* ldr  pc, [sp], #4 */
+};
+
 /*
  * atomically swap in the new signal mask, and wait for a signal.
  */
@@ -592,10 +605,12 @@ static void do_signal(struct pt_regs *regs, int syscall)
                case -ERESTARTNOHAND:
                case -ERESTARTSYS:
                case -ERESTARTNOINTR:
-               case -ERESTART_RESTARTBLOCK:
                        regs->ARM_r0 = regs->ARM_ORIG_r0;
                        regs->ARM_pc = restart_addr;
                        break;
+               case -ERESTART_RESTARTBLOCK:
+                       regs->ARM_r0 = -EINTR;
+                       break;
                }
        }
 
@@ -611,14 +626,12 @@ static void do_signal(struct pt_regs *regs, int syscall)
                 * debugger has chosen to restart at a different PC.
                 */
                if (regs->ARM_pc == restart_addr) {
-                       if (retval == -ERESTARTNOHAND ||
-                           retval == -ERESTART_RESTARTBLOCK
+                       if (retval == -ERESTARTNOHAND
                            || (retval == -ERESTARTSYS
                                && !(ka.sa.sa_flags & SA_RESTART))) {
                                regs->ARM_r0 = -EINTR;
                                regs->ARM_pc = continue_addr;
                        }
-                       clear_thread_flag(TIF_SYSCALL_RESTARTSYS);
                }
 
                handle_signal(signr, &ka, &info, regs);
@@ -632,8 +645,29 @@ static void do_signal(struct pt_regs *regs, int syscall)
                 * ignore the restart.
                 */
                if (retval == -ERESTART_RESTARTBLOCK
-                   && regs->ARM_pc == restart_addr)
-                       set_thread_flag(TIF_SYSCALL_RESTARTSYS);
+                   && regs->ARM_pc == continue_addr) {
+                       if (thumb_mode(regs)) {
+                               regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
+                               regs->ARM_pc -= 2;
+                       } else {
+#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
+                               regs->ARM_r7 = __NR_restart_syscall;
+                               regs->ARM_pc -= 4;
+#else
+                               u32 __user *usp;
+
+                               regs->ARM_sp -= 4;
+                               usp = (u32 __user *)regs->ARM_sp;
+
+                               if (put_user(regs->ARM_pc, usp) == 0) {
+                                       regs->ARM_pc = KERN_RESTART_CODE;
+                               } else {
+                                       regs->ARM_sp += 4;
+                                       force_sigsegv(0, current);
+                               }
+#endif
+                       }
+               }
        }
 
        restore_saved_sigmask();
index 5ff067b..6fcfe83 100644 (file)
@@ -8,5 +8,7 @@
  * published by the Free Software Foundation.
  */
 #define KERN_SIGRETURN_CODE    (CONFIG_VECTORS_BASE + 0x00000500)
+#define KERN_RESTART_CODE      (KERN_SIGRETURN_CODE + sizeof(sigreturn_codes))
 
 extern const unsigned long sigreturn_codes[7];
+extern const unsigned long syscall_restart_code[2];
index 4928d89..3647170 100644 (file)
@@ -820,6 +820,8 @@ void __init early_trap_init(void *vectors_base)
         */
        memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
               sigreturn_codes, sizeof(sigreturn_codes));
+       memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE),
+              syscall_restart_code, sizeof(syscall_restart_code));
 
        flush_icache_range(vectors, vectors + PAGE_SIZE);
        modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
index 43a31fb..36ff15b 100644 (file)
@@ -183,7 +183,9 @@ SECTIONS
        }
 #endif
 
+#ifdef CONFIG_SMP
        PERCPU_SECTION(L1_CACHE_BYTES)
+#endif
 
 #ifdef CONFIG_XIP_KERNEL
        __data_loc = ALIGN(4);          /* location in binary */
index 226949d..f953bb5 100644 (file)
@@ -50,5 +50,6 @@
 #define POWER_MANAGEMENT       (BRIDGE_VIRT_BASE | 0x011c)
 
 #define TIMER_VIRT_BASE                (BRIDGE_VIRT_BASE | 0x0300)
+#define TIMER_PHYS_BASE         (BRIDGE_PHYS_BASE | 0x0300)
 
 #endif
index ad1165d..d52b0ef 100644 (file)
@@ -78,6 +78,7 @@
 
 /* North-South Bridge */
 #define BRIDGE_VIRT_BASE       (DOVE_SB_REGS_VIRT_BASE | 0x20000)
+#define BRIDGE_PHYS_BASE       (DOVE_SB_REGS_PHYS_BASE | 0x20000)
 
 /* Cryptographic Engine */
 #define DOVE_CRYPT_PHYS_BASE   (DOVE_SB_REGS_PHYS_BASE | 0x30000)
index 573be57..6f6d13f 100644 (file)
@@ -212,7 +212,7 @@ config MACH_SMDKV310
        select EXYNOS_DEV_SYSMMU
        select EXYNOS4_DEV_AHCI
        select SAMSUNG_DEV_KEYPAD
-       select EXYNOS4_DEV_DMA
+       select EXYNOS_DEV_DMA
        select SAMSUNG_DEV_PWM
        select EXYNOS4_DEV_USB_OHCI
        select EXYNOS4_SETUP_FIMD0
@@ -264,7 +264,7 @@ config MACH_UNIVERSAL_C210
        select S5P_DEV_ONENAND
        select S5P_DEV_TV
        select EXYNOS_DEV_SYSMMU
-       select EXYNOS4_DEV_DMA
+       select EXYNOS_DEV_DMA
        select EXYNOS_DEV_DRM
        select EXYNOS4_SETUP_FIMD0
        select EXYNOS4_SETUP_I2C1
@@ -303,7 +303,7 @@ config MACH_NURI
        select S5P_DEV_MFC
        select S5P_DEV_USB_EHCI
        select S5P_SETUP_MIPIPHY
-       select EXYNOS4_DEV_DMA
+       select EXYNOS_DEV_DMA
        select EXYNOS_DEV_DRM
        select EXYNOS4_SETUP_FIMC
        select EXYNOS4_SETUP_FIMD0
@@ -341,7 +341,7 @@ config MACH_ORIGEN
        select SAMSUNG_DEV_PWM
        select EXYNOS_DEV_DRM
        select EXYNOS_DEV_SYSMMU
-       select EXYNOS4_DEV_DMA
+       select EXYNOS_DEV_DMA
        select EXYNOS4_DEV_USB_OHCI
        select EXYNOS4_SETUP_FIMD0
        select EXYNOS4_SETUP_SDHCI
index e9fafcf..373c3c0 100644 (file)
@@ -119,7 +119,9 @@ static __init void exynos_pm_add_dev_to_genpd(struct platform_device *pdev,
                                                struct exynos_pm_domain *pd)
 {
        if (pdev->dev.bus) {
-               if (pm_genpd_add_device(&pd->pd, &pdev->dev))
+               if (!pm_genpd_add_device(&pd->pd, &pdev->dev))
+                       pm_genpd_dev_need_restore(&pdev->dev, true);
+               else
                        pr_info("%s: error in adding %s device to %s power"
                                "domain\n", __func__, dev_name(&pdev->dev),
                                pd->name);
@@ -151,9 +153,12 @@ static __init int exynos4_pm_init_power_domain(void)
        if (of_have_populated_dt())
                return exynos_pm_dt_parse_domains();
 
-       for (idx = 0; idx < ARRAY_SIZE(exynos4_pm_domains); idx++)
-               pm_genpd_init(&exynos4_pm_domains[idx]->pd, NULL,
-                               exynos4_pm_domains[idx]->is_off);
+       for (idx = 0; idx < ARRAY_SIZE(exynos4_pm_domains); idx++) {
+               struct exynos_pm_domain *pd = exynos4_pm_domains[idx];
+               int on = __raw_readl(pd->base + 0x4) & S5P_INT_LOCAL_PWR_EN;
+
+               pm_genpd_init(&pd->pd, NULL, !on);
+       }
 
 #ifdef CONFIG_S5P_DEV_FIMD0
        exynos_pm_add_dev_to_genpd(&s5p_device_fimd0, &exynos4_pd_lcd0);
index f8437dd..ded4652 100644 (file)
@@ -1,4 +1,8 @@
-obj-y                                  := clock.o highbank.o system.o
+obj-y                                  := clock.o highbank.o system.o smc.o
+
+plus_sec := $(call as-instr,.arch_extension sec,+sec)
+AFLAGS_smc.o                           :=-Wa,-march=armv7-a$(plus_sec)
+
 obj-$(CONFIG_DEBUG_HIGHBANK_UART)      += lluart.o
 obj-$(CONFIG_SMP)                      += platsmp.o
 obj-$(CONFIG_HOTPLUG_CPU)              += hotplug.o
index d8e2d0b..141ed51 100644 (file)
@@ -8,3 +8,4 @@ extern void highbank_lluart_map_io(void);
 static inline void highbank_lluart_map_io(void) {}
 #endif
 
+extern void highbank_smc1(int fn, int arg);
index 410a112..8777612 100644 (file)
@@ -85,10 +85,24 @@ const static struct of_device_id irq_match[] = {
        {}
 };
 
+#ifdef CONFIG_CACHE_L2X0
+static void highbank_l2x0_disable(void)
+{
+       /* Disable PL310 L2 Cache controller */
+       highbank_smc1(0x102, 0x0);
+}
+#endif
+
 static void __init highbank_init_irq(void)
 {
        of_irq_init(irq_match);
+
+#ifdef CONFIG_CACHE_L2X0
+       /* Enable PL310 L2 Cache controller */
+       highbank_smc1(0x102, 0x1);
        l2x0_of_init(0, ~0UL);
+       outer_cache.disable = highbank_l2x0_disable;
+#endif
 }
 
 static void __init highbank_timer_init(void)
diff --git a/arch/arm/mach-highbank/smc.S b/arch/arm/mach-highbank/smc.S
new file mode 100644 (file)
index 0000000..407d17b
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copied from omap44xx-smc.S Copyright (C) 2010 Texas Instruments, Inc.
+ * Copyright 2012 Calxeda, Inc.
+ *
+ * This program is free software,you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+
+/*
+ * This is common routine to manage secure monitor API
+ * used to modify the PL310 secure registers.
+ * 'r0' contains the value to be modified and 'r12' contains
+ * the monitor API number.
+ * Function signature : void highbank_smc1(u32 fn, u32 arg)
+ */
+
+ENTRY(highbank_smc1)
+       stmfd   sp!, {r4-r11, lr}
+       mov     r12, r0
+       mov     r0, r1
+       dsb
+       smc     #0
+       ldmfd   sp!, {r4-r11, pc}
+ENDPROC(highbank_smc1)
index 0021f72..eff4db5 100644 (file)
@@ -477,6 +477,7 @@ config MACH_MX31_3DS
        select IMX_HAVE_PLATFORM_IMX2_WDT
        select IMX_HAVE_PLATFORM_IMX_I2C
        select IMX_HAVE_PLATFORM_IMX_KEYPAD
+       select IMX_HAVE_PLATFORM_IMX_SSI
        select IMX_HAVE_PLATFORM_IMX_UART
        select IMX_HAVE_PLATFORM_IPU_CORE
        select IMX_HAVE_PLATFORM_MXC_EHCI
index 0f0beb5..516ddee 100644 (file)
@@ -108,8 +108,7 @@ int __init mx1_clocks_init(unsigned long fref)
        clk_register_clkdev(clk[clk32], NULL, "mxc_rtc.0");
        clk_register_clkdev(clk[clko], "clko", NULL);
 
-       mxc_timer_init(NULL, MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR),
-                       MX1_TIM1_INT);
+       mxc_timer_init(MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR), MX1_TIM1_INT);
 
        return 0;
 }
index 4e4f384..ea13e61 100644 (file)
@@ -180,7 +180,7 @@ int __init mx21_clocks_init(unsigned long lref, unsigned long href)
        clk_register_clkdev(clk[sdhc1_ipg_gate], "sdhc1", NULL);
        clk_register_clkdev(clk[sdhc2_ipg_gate], "sdhc2", NULL);
 
-       mxc_timer_init(NULL, MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR),
-                       MX21_INT_GPT1);
+       mxc_timer_init(MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR), MX21_INT_GPT1);
+
        return 0;
 }
index d9833bb..fdd8cc8 100644 (file)
@@ -243,6 +243,6 @@ int __init mx25_clocks_init(void)
        clk_register_clkdev(clk[sdma_ahb], "ahb", "imx35-sdma");
        clk_register_clkdev(clk[iim_ipg], "iim", NULL);
 
-       mxc_timer_init(NULL, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
+       mxc_timer_init(MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
        return 0;
 }
index 50a7ebd..295cbd7 100644 (file)
@@ -263,8 +263,7 @@ int __init mx27_clocks_init(unsigned long fref)
        clk_register_clkdev(clk[ssi1_baud_gate], "bitrate" , "imx-ssi.0");
        clk_register_clkdev(clk[ssi2_baud_gate], "bitrate" , "imx-ssi.1");
 
-       mxc_timer_init(NULL, MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR),
-                       MX27_INT_GPT1);
+       mxc_timer_init(MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR), MX27_INT_GPT1);
 
        clk_prepare_enable(clk[emi_ahb_gate]);
 
index a854b9c..c9a06d8 100644 (file)
@@ -175,8 +175,7 @@ int __init mx31_clocks_init(unsigned long fref)
        mx31_revision();
        clk_disable_unprepare(clk[iim_gate]);
 
-       mxc_timer_init(NULL, MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR),
-                       MX31_INT_GPT);
+       mxc_timer_init(MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR), MX31_INT_GPT);
 
        return 0;
 }
index a9e60bf..c6422fb 100644 (file)
@@ -201,7 +201,6 @@ int __init mx35_clocks_init()
                        pr_err("i.MX35 clk %d: register failed with %ld\n",
                                i, PTR_ERR(clk[i]));
 
-
        clk_register_clkdev(clk[pata_gate], NULL, "pata_imx");
        clk_register_clkdev(clk[can1_gate], NULL, "flexcan.0");
        clk_register_clkdev(clk[can2_gate], NULL, "flexcan.1");
@@ -264,14 +263,20 @@ int __init mx35_clocks_init()
        clk_prepare_enable(clk[iim_gate]);
        clk_prepare_enable(clk[emi_gate]);
 
+       /*
+        * SCC is needed to boot via mmc after a watchdog reset. The clock code
+        * before conversion to common clk also enabled UART1 (which isn't
+        * handled here and not needed for mmc) and IIM (which is enabled
+        * unconditionally above).
+        */
+       clk_prepare_enable(clk[scc_gate]);
+
        imx_print_silicon_rev("i.MX35", mx35_revision());
 
 #ifdef CONFIG_MXC_USE_EPIT
-       epit_timer_init(&epit1_clk,
-                       MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
+       epit_timer_init(MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
 #else
-       mxc_timer_init(NULL, MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR),
-                       MX35_INT_GPT);
+       mxc_timer_init(MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT);
 #endif
 
        return 0;
index fcd94f3..a2200c7 100644 (file)
@@ -104,12 +104,12 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
                                periph_apm_sel, ARRAY_SIZE(periph_apm_sel));
        clk[main_bus] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 1,
                                main_bus_sel, ARRAY_SIZE(main_bus_sel));
-       clk[per_lp_apm] = imx_clk_mux("per_lp_apm", MXC_CCM_CBCDR, 1, 1,
+       clk[per_lp_apm] = imx_clk_mux("per_lp_apm", MXC_CCM_CBCMR, 1, 1,
                                per_lp_apm_sel, ARRAY_SIZE(per_lp_apm_sel));
        clk[per_pred1] = imx_clk_divider("per_pred1", "per_lp_apm", MXC_CCM_CBCDR, 6, 2);
        clk[per_pred2] = imx_clk_divider("per_pred2", "per_pred1", MXC_CCM_CBCDR, 3, 3);
        clk[per_podf] = imx_clk_divider("per_podf", "per_pred2", MXC_CCM_CBCDR, 0, 3);
-       clk[per_root] = imx_clk_mux("per_root", MXC_CCM_CBCDR, 1, 0,
+       clk[per_root] = imx_clk_mux("per_root", MXC_CCM_CBCMR, 0, 1,
                                per_root_sel, ARRAY_SIZE(per_root_sel));
        clk[ahb] = imx_clk_divider("ahb", "main_bus", MXC_CCM_CBCDR, 10, 3);
        clk[ahb_max] = imx_clk_gate2("ahb_max", "ahb", MXC_CCM_CCGR0, 28);
@@ -172,7 +172,7 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
        clk[pwm1_hf_gate] = imx_clk_gate2("pwm1_hf_gate", "ipg", MXC_CCM_CCGR2, 12);
        clk[pwm2_ipg_gate] = imx_clk_gate2("pwm2_ipg_gate", "ipg", MXC_CCM_CCGR2, 14);
        clk[pwm2_hf_gate] = imx_clk_gate2("pwm2_hf_gate", "ipg", MXC_CCM_CCGR2, 16);
-       clk[gpt_gate] = imx_clk_gate2("gpt_gate", "ipg", MXC_CCM_CCGR2, 18);
+       clk[gpt_gate] = imx_clk_gate2("gpt_gate", "per_root", MXC_CCM_CCGR2, 18);
        clk[fec_gate] = imx_clk_gate2("fec_gate", "ipg", MXC_CCM_CCGR2, 24);
        clk[usboh3_gate] = imx_clk_gate2("usboh3_gate", "ipg", MXC_CCM_CCGR2, 26);
        clk[usboh3_per_gate] = imx_clk_gate2("usboh3_per_gate", "usboh3_podf", MXC_CCM_CCGR2, 28);
@@ -366,8 +366,7 @@ int __init mx51_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
        clk_set_rate(clk[esdhc_b_podf], 166250000);
 
        /* System timer */
-       mxc_timer_init(NULL, MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR),
-               MX51_INT_GPT);
+       mxc_timer_init(MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR), MX51_INT_GPT);
 
        clk_prepare_enable(clk[iim_gate]);
        imx_print_silicon_rev("i.MX51", mx51_revision());
@@ -452,8 +451,7 @@ int __init mx53_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
        clk_set_rate(clk[esdhc_b_podf], 200000000);
 
        /* System timer */
-       mxc_timer_init(NULL, MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR),
-               MX53_INT_GPT);
+       mxc_timer_init(MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR), MX53_INT_GPT);
 
        clk_prepare_enable(clk[iim_gate]);
        imx_print_silicon_rev("i.MX53", mx53_revision());
index cab02d0..e1a17ac 100644 (file)
@@ -122,10 +122,6 @@ static const char *cko1_sels[]     = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5
                                    "dummy", "axi", "enfc", "ipu1_di0", "ipu1_di1", "ipu2_di0",
                                    "ipu2_di1", "ahb", "ipg", "ipg_per", "ckil", "pll4_audio", };
 
-static const char * const clks_init_on[] __initconst = {
-       "mmdc_ch0_axi", "mmdc_ch1_axi", "usboh3",
-};
-
 enum mx6q_clks {
        dummy, ckil, ckih, osc, pll2_pfd0_352m, pll2_pfd1_594m, pll2_pfd2_396m,
        pll3_pfd0_720m, pll3_pfd1_540m, pll3_pfd2_508m, pll3_pfd3_454m,
@@ -156,16 +152,20 @@ enum mx6q_clks {
        ssi2, ssi3, uart_ipg, uart_serial, usboh3, usdhc1, usdhc2, usdhc3,
        usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg,
        pll4_audio, pll5_video, pll6_mlb, pll7_usb_host, pll8_enet, ssi1_ipg,
-       ssi2_ipg, ssi3_ipg, clk_max
+       ssi2_ipg, ssi3_ipg, rom,
+       clk_max
 };
 
 static struct clk *clk[clk_max];
 
+static enum mx6q_clks const clks_init_on[] __initconst = {
+       mmdc_ch0_axi, rom,
+};
+
 int __init mx6q_clocks_init(void)
 {
        struct device_node *np;
        void __iomem *base;
-       struct clk *c;
        int i, irq;
 
        clk[dummy] = imx_clk_fixed("dummy", 0);
@@ -365,6 +365,7 @@ int __init mx6q_clocks_init(void)
        clk[gpmi_bch]     = imx_clk_gate2("gpmi_bch",      "usdhc4",            base + 0x78, 26);
        clk[gpmi_io]      = imx_clk_gate2("gpmi_io",       "enfc",              base + 0x78, 28);
        clk[gpmi_apb]     = imx_clk_gate2("gpmi_apb",      "usdhc3",            base + 0x78, 30);
+       clk[rom]          = imx_clk_gate2("rom",           "ahb",               base + 0x7c, 0);
        clk[sata]         = imx_clk_gate2("sata",          "ipg",               base + 0x7c, 4);
        clk[sdma]         = imx_clk_gate2("sdma",          "ahb",               base + 0x7c, 6);
        clk[spba]         = imx_clk_gate2("spba",          "ipg",               base + 0x7c, 12);
@@ -424,21 +425,14 @@ int __init mx6q_clocks_init(void)
        clk_register_clkdev(clk[ahb], "ahb", NULL);
        clk_register_clkdev(clk[cko1], "cko1", NULL);
 
-       for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) {
-               c = clk_get_sys(clks_init_on[i], NULL);
-               if (IS_ERR(c)) {
-                       pr_err("%s: failed to get clk %s", __func__,
-                              clks_init_on[i]);
-                       return PTR_ERR(c);
-               }
-               clk_prepare_enable(c);
-       }
+       for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
+               clk_prepare_enable(clk[clks_init_on[i]]);
 
        np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
        base = of_iomap(np, 0);
        WARN_ON(!base);
        irq = irq_of_parse_and_map(np, 0);
-       mxc_timer_init(NULL, base, irq);
+       mxc_timer_init(base, irq);
 
        return 0;
 }
index 4685919..0440379 100644 (file)
@@ -74,30 +74,15 @@ struct clk_pllv2 {
        void __iomem    *base;
 };
 
-static unsigned long clk_pllv2_recalc_rate(struct clk_hw *hw,
-               unsigned long parent_rate)
+static unsigned long __clk_pllv2_recalc_rate(unsigned long parent_rate,
+               u32 dp_ctl, u32 dp_op, u32 dp_mfd, u32 dp_mfn)
 {
        long mfi, mfn, mfd, pdf, ref_clk, mfn_abs;
-       unsigned long dp_op, dp_mfd, dp_mfn, dp_ctl, pll_hfsm, dbl;
-       void __iomem *pllbase;
+       unsigned long dbl;
        s64 temp;
-       struct clk_pllv2 *pll = to_clk_pllv2(hw);
-
-       pllbase = pll->base;
 
-       dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
-       pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
        dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN;
 
-       if (pll_hfsm == 0) {
-               dp_op = __raw_readl(pllbase + MXC_PLL_DP_OP);
-               dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_MFD);
-               dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_MFN);
-       } else {
-               dp_op = __raw_readl(pllbase + MXC_PLL_DP_HFS_OP);
-               dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFD);
-               dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFN);
-       }
        pdf = dp_op & MXC_PLL_DP_OP_PDF_MASK;
        mfi = (dp_op & MXC_PLL_DP_OP_MFI_MASK) >> MXC_PLL_DP_OP_MFI_OFFSET;
        mfi = (mfi <= 5) ? 5 : mfi;
@@ -123,18 +108,30 @@ static unsigned long clk_pllv2_recalc_rate(struct clk_hw *hw,
        return temp;
 }
 
-static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
+static unsigned long clk_pllv2_recalc_rate(struct clk_hw *hw,
                unsigned long parent_rate)
 {
+       u32 dp_op, dp_mfd, dp_mfn, dp_ctl;
+       void __iomem *pllbase;
        struct clk_pllv2 *pll = to_clk_pllv2(hw);
+
+       pllbase = pll->base;
+
+       dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
+       dp_op = __raw_readl(pllbase + MXC_PLL_DP_OP);
+       dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_MFD);
+       dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_MFN);
+
+       return __clk_pllv2_recalc_rate(parent_rate, dp_ctl, dp_op, dp_mfd, dp_mfn);
+}
+
+static int __clk_pllv2_set_rate(unsigned long rate, unsigned long parent_rate,
+               u32 *dp_op, u32 *dp_mfd, u32 *dp_mfn)
+{
        u32 reg;
-       void __iomem *pllbase;
        long mfi, pdf, mfn, mfd = 999999;
        s64 temp64;
        unsigned long quad_parent_rate;
-       unsigned long pll_hfsm, dp_ctl;
-
-       pllbase = pll->base;
 
        quad_parent_rate = 4 * parent_rate;
        pdf = mfi = -1;
@@ -144,25 +141,41 @@ static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
                return -EINVAL;
        pdf--;
 
-       temp64 = rate * (pdf+1) - quad_parent_rate * mfi;
-       do_div(temp64, quad_parent_rate/1000000);
+       temp64 = rate * (pdf + 1) - quad_parent_rate * mfi;
+       do_div(temp64, quad_parent_rate / 1000000);
        mfn = (long)temp64;
 
+       reg = mfi << 4 | pdf;
+
+       *dp_op = reg;
+       *dp_mfd = mfd;
+       *dp_mfn = mfn;
+
+       return 0;
+}
+
+static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long parent_rate)
+{
+       struct clk_pllv2 *pll = to_clk_pllv2(hw);
+       void __iomem *pllbase;
+       u32 dp_ctl, dp_op, dp_mfd, dp_mfn;
+       int ret;
+
+       pllbase = pll->base;
+
+
+       ret = __clk_pllv2_set_rate(rate, parent_rate, &dp_op, &dp_mfd, &dp_mfn);
+       if (ret)
+               return ret;
+
        dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
        /* use dpdck0_2 */
        __raw_writel(dp_ctl | 0x1000L, pllbase + MXC_PLL_DP_CTL);
-       pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
-       if (pll_hfsm == 0) {
-               reg = mfi << 4 | pdf;
-               __raw_writel(reg, pllbase + MXC_PLL_DP_OP);
-               __raw_writel(mfd, pllbase + MXC_PLL_DP_MFD);
-               __raw_writel(mfn, pllbase + MXC_PLL_DP_MFN);
-       } else {
-               reg = mfi << 4 | pdf;
-               __raw_writel(reg, pllbase + MXC_PLL_DP_HFS_OP);
-               __raw_writel(mfd, pllbase + MXC_PLL_DP_HFS_MFD);
-               __raw_writel(mfn, pllbase + MXC_PLL_DP_HFS_MFN);
-       }
+
+       __raw_writel(dp_op, pllbase + MXC_PLL_DP_OP);
+       __raw_writel(dp_mfd, pllbase + MXC_PLL_DP_MFD);
+       __raw_writel(dp_mfn, pllbase + MXC_PLL_DP_MFN);
 
        return 0;
 }
@@ -170,7 +183,11 @@ static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
 static long clk_pllv2_round_rate(struct clk_hw *hw, unsigned long rate,
                unsigned long *prate)
 {
-       return rate;
+       u32 dp_op, dp_mfd, dp_mfn;
+
+       __clk_pllv2_set_rate(rate, *prate, &dp_op, &dp_mfd, &dp_mfn);
+       return __clk_pllv2_recalc_rate(*prate, MXC_PLL_DP_CTL_DPDCK0_2_EN,
+                       dp_op, dp_mfd, dp_mfn);
 }
 
 static int clk_pllv2_prepare(struct clk_hw *hw)
index 5e11ba7..5e3f1f0 100644 (file)
@@ -23,7 +23,7 @@
 #define MX53_DPLL1_BASE                MX53_IO_ADDRESS(MX53_PLL1_BASE_ADDR)
 #define MX53_DPLL2_BASE                MX53_IO_ADDRESS(MX53_PLL2_BASE_ADDR)
 #define MX53_DPLL3_BASE                MX53_IO_ADDRESS(MX53_PLL3_BASE_ADDR)
-#define MX53_DPLL4_BASE                MX53_IO_ADDRESS(MX53_PLL3_BASE_ADDR)
+#define MX53_DPLL4_BASE                MX53_IO_ADDRESS(MX53_PLL4_BASE_ADDR)
 
 /* PLL Register Offsets */
 #define MXC_PLL_DP_CTL                 0x00
index 89493ab..20ed2d5 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <linux/errno.h>
 #include <asm/cacheflush.h>
+#include <asm/cp15.h>
 #include <mach/common.h>
 
 int platform_cpu_kill(unsigned int cpu)
@@ -19,6 +20,44 @@ int platform_cpu_kill(unsigned int cpu)
        return 1;
 }
 
+static inline void cpu_enter_lowpower(void)
+{
+       unsigned int v;
+
+       flush_cache_all();
+       asm volatile(
+               "mcr    p15, 0, %1, c7, c5, 0\n"
+       "       mcr     p15, 0, %1, c7, c10, 4\n"
+       /*
+        * Turn off coherency
+        */
+       "       mrc     p15, 0, %0, c1, c0, 1\n"
+       "       bic     %0, %0, %3\n"
+       "       mcr     p15, 0, %0, c1, c0, 1\n"
+       "       mrc     p15, 0, %0, c1, c0, 0\n"
+       "       bic     %0, %0, %2\n"
+       "       mcr     p15, 0, %0, c1, c0, 0\n"
+         : "=&r" (v)
+         : "r" (0), "Ir" (CR_C), "Ir" (0x40)
+         : "cc");
+}
+
+static inline void cpu_leave_lowpower(void)
+{
+       unsigned int v;
+
+       asm volatile(
+               "mrc    p15, 0, %0, c1, c0, 0\n"
+       "       orr     %0, %0, %1\n"
+       "       mcr     p15, 0, %0, c1, c0, 0\n"
+       "       mrc     p15, 0, %0, c1, c0, 1\n"
+       "       orr     %0, %0, %2\n"
+       "       mcr     p15, 0, %0, c1, c0, 1\n"
+         : "=&r" (v)
+         : "Ir" (CR_C), "Ir" (0x40)
+         : "cc");
+}
+
 /*
  * platform-specific code to shutdown a CPU
  *
@@ -26,9 +65,10 @@ int platform_cpu_kill(unsigned int cpu)
  */
 void platform_cpu_die(unsigned int cpu)
 {
-       flush_cache_all();
+       cpu_enter_lowpower();
        imx_enable_cpu(cpu, false);
        cpu_do_idle();
+       cpu_leave_lowpower();
 
        /* We should never return from idle */
        panic("cpu %d unexpectedly exit from shutdown\n", cpu);
index c515f8e..6450303 100644 (file)
@@ -70,7 +70,6 @@ static struct i2c_board_info eukrea_cpuimx35_i2c_devices[] = {
                I2C_BOARD_INFO("pcf8563", 0x51),
        }, {
                I2C_BOARD_INFO("tsc2007", 0x48),
-               .type           = "tsc2007",
                .platform_data  = &tsc2007_info,
                .irq            = IMX_GPIO_TO_IRQ(TSC2007_IRQGPIO),
        },
index ac50f16..1e09de5 100644 (file)
@@ -142,7 +142,6 @@ static struct i2c_board_info eukrea_cpuimx51sd_i2c_devices[] = {
                I2C_BOARD_INFO("pcf8563", 0x51),
        }, {
                I2C_BOARD_INFO("tsc2007", 0x49),
-               .type           = "tsc2007",
                .platform_data  = &tsc2007_info,
        },
 };
index dff82eb..ba09552 100644 (file)
@@ -38,7 +38,7 @@
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
-#include <asm/system.h>
+#include <asm/system_info.h>
 #include <mach/common.h>
 #include <mach/iomux-mx27.h>
 
@@ -116,6 +116,8 @@ static const int visstrim_m10_pins[] __initconst = {
        PB23_PF_USB_PWR,
        PB24_PF_USB_OC,
        /* CSI */
+       TVP5150_RSTN | GPIO_GPIO | GPIO_OUT,
+       TVP5150_PWDN | GPIO_GPIO | GPIO_OUT,
        PB10_PF_CSI_D0,
        PB11_PF_CSI_D1,
        PB12_PF_CSI_D2,
@@ -147,6 +149,24 @@ static struct gpio visstrim_m10_version_gpios[] = {
        { MOTHERBOARD_BIT2, GPIOF_IN, "mother-version-2" },
 };
 
+static const struct gpio visstrim_m10_gpios[] __initconst = {
+       {
+               .gpio = TVP5150_RSTN,
+               .flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH,
+               .label = "tvp5150_rstn",
+       },
+       {
+               .gpio = TVP5150_PWDN,
+               .flags = GPIOF_DIR_OUT | GPIOF_INIT_LOW,
+               .label = "tvp5150_pwdn",
+       },
+       {
+               .gpio = OTG_PHY_CS_GPIO,
+               .flags = GPIOF_DIR_OUT | GPIOF_INIT_LOW,
+               .label = "usbotg_cs",
+       },
+};
+
 /* Camera */
 static int visstrim_camera_power(struct device *dev, int on)
 {
@@ -190,13 +210,6 @@ static void __init visstrim_camera_init(void)
        struct platform_device *pdev;
        int dma;
 
-       /* Initialize tvp5150 gpios */
-       mxc_gpio_mode(TVP5150_RSTN | GPIO_GPIO | GPIO_OUT);
-       mxc_gpio_mode(TVP5150_PWDN | GPIO_GPIO | GPIO_OUT);
-       gpio_set_value(TVP5150_RSTN, 1);
-       gpio_set_value(TVP5150_PWDN, 0);
-       ndelay(1);
-
        gpio_set_value(TVP5150_PWDN, 1);
        ndelay(1);
        gpio_set_value(TVP5150_RSTN, 0);
@@ -377,10 +390,6 @@ static struct i2c_board_info visstrim_m10_i2c_devices[] = {
 /* USB OTG */
 static int otg_phy_init(struct platform_device *pdev)
 {
-       gpio_set_value(OTG_PHY_CS_GPIO, 0);
-
-       mdelay(10);
-
        return mx27_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED);
 }
 
@@ -435,6 +444,11 @@ static void __init visstrim_m10_board_init(void)
        if (ret)
                pr_err("Failed to setup pins (%d)\n", ret);
 
+       ret = gpio_request_array(visstrim_m10_gpios,
+                               ARRAY_SIZE(visstrim_m10_gpios));
+       if (ret)
+               pr_err("Failed to request gpios (%d)\n", ret);
+
        imx27_add_imx_ssi(0, &visstrim_m10_ssi_pdata);
        imx27_add_imx_uart0(&uart_pdata);
 
index d14bbe9..3e7401f 100644 (file)
@@ -32,7 +32,7 @@
  * Memory-mapped I/O on MX21ADS base board
  */
 #define MX21ADS_MMIO_BASE_ADDR   0xf5000000
-#define MX21ADS_MMIO_SIZE        SZ_16M
+#define MX21ADS_MMIO_SIZE        0xc00000
 
 #define MX21ADS_REG_ADDR(offset)    (void __force __iomem *) \
                (MX21ADS_MMIO_BASE_ADDR + (offset))
index 967ed5b..a8983b9 100644 (file)
@@ -86,6 +86,7 @@ static void __iomem *imx3_ioremap_caller(unsigned long phys_addr, size_t size,
 
 void __init imx3_init_l2x0(void)
 {
+#ifdef CONFIG_CACHE_L2X0
        void __iomem *l2x0_base;
        void __iomem *clkctl_base;
 
@@ -115,6 +116,7 @@ void __init imx3_init_l2x0(void)
        }
 
        l2x0_init(l2x0_base, 0x00030024, 0x00000000);
+#endif
 }
 
 #ifdef CONFIG_SOC_IMX31
@@ -179,6 +181,8 @@ void __init imx31_soc_init(void)
        mxc_register_gpio("imx31-gpio", 1, MX31_GPIO2_BASE_ADDR, SZ_16K, MX31_INT_GPIO2, 0);
        mxc_register_gpio("imx31-gpio", 2, MX31_GPIO3_BASE_ADDR, SZ_16K, MX31_INT_GPIO3, 0);
 
+       pinctrl_provide_dummies();
+
        if (to_version == 1) {
                strncpy(imx31_sdma_pdata.fw_name, "sdma-imx31-to1.bin",
                        strlen(imx31_sdma_pdata.fw_name));
index feeee17..1d00305 100644 (file)
@@ -202,6 +202,8 @@ void __init imx51_soc_init(void)
        mxc_register_gpio("imx31-gpio", 2, MX51_GPIO3_BASE_ADDR, SZ_16K, MX51_INT_GPIO3_LOW, MX51_INT_GPIO3_HIGH);
        mxc_register_gpio("imx31-gpio", 3, MX51_GPIO4_BASE_ADDR, SZ_16K, MX51_INT_GPIO4_LOW, MX51_INT_GPIO4_HIGH);
 
+       pinctrl_provide_dummies();
+
        /* i.mx51 has the i.mx35 type sdma */
        imx_add_imx_sdma("imx35-sdma", MX51_SDMA_BASE_ADDR, MX51_INT_SDMA, &imx51_sdma_pdata);
 
index 2222c57..b0d3cc4 100644 (file)
@@ -20,9 +20,6 @@
 #include <linux/mv643xx_eth.h>
 #include <linux/gpio.h>
 #include <linux/leds.h>
-#include <linux/spi/flash.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
 #include <linux/i2c.h>
 #include <linux/input.h>
 #include <linux/gpio_keys.h>
index 25fb3fd..f261cd2 100644 (file)
@@ -159,6 +159,7 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
        gate_fn->gate.flags = clk_gate_flags;
        gate_fn->gate.lock = lock;
        gate_fn->gate.hw.init = &init;
+       gate_fn->fn = fn;
 
        /* ops is the gate ops, but with our disable function */
        if (clk_gate_fn_ops.disable != clk_gate_fn_disable) {
@@ -193,9 +194,11 @@ static struct clk __init *kirkwood_register_gate_fn(const char *name,
                                    bit_idx, 0, &gating_lock, fn);
 }
 
+static struct clk *ge0, *ge1;
+
 void __init kirkwood_clk_init(void)
 {
-       struct clk *runit, *ge0, *ge1, *sata0, *sata1, *usb0, *sdio;
+       struct clk *runit, *sata0, *sata1, *usb0, *sdio;
        struct clk *crypto, *xor0, *xor1, *pex0, *pex1, *audio;
 
        tclk = clk_register_fixed_rate(NULL, "tclk", NULL,
@@ -257,6 +260,9 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
        orion_ge00_init(eth_data,
                        GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM,
                        IRQ_KIRKWOOD_GE00_ERR);
+       /* The interface forgets the MAC address assigned by u-boot if
+       the clock is turned off, so claim the clk now. */
+       clk_prepare_enable(ge0);
 }
 
 
@@ -268,6 +274,7 @@ void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data)
        orion_ge01_init(eth_data,
                        GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM,
                        IRQ_KIRKWOOD_GE01_ERR);
+       clk_prepare_enable(ge1);
 }
 
 
index 3eee37a..a115142 100644 (file)
@@ -38,6 +38,7 @@
 #define IRQ_MASK_HIGH_OFF      0x0014
 
 #define TIMER_VIRT_BASE                (BRIDGE_VIRT_BASE | 0x0300)
+#define TIMER_PHYS_BASE                (BRIDGE_PHYS_BASE | 0x0300)
 
 #define L2_CONFIG_REG          (BRIDGE_VIRT_BASE | 0x0128)
 #define L2_WRITETHROUGH                0x00000010
index fede3d5..c5b6851 100644 (file)
@@ -80,6 +80,7 @@
 #define  UART1_VIRT_BASE       (DEV_BUS_VIRT_BASE | 0x2100)
 
 #define BRIDGE_VIRT_BASE       (KIRKWOOD_REGS_VIRT_BASE | 0x20000)
+#define BRIDGE_PHYS_BASE       (KIRKWOOD_REGS_PHYS_BASE | 0x20000)
 
 #define CRYPTO_PHYS_BASE       (KIRKWOOD_REGS_PHYS_BASE | 0x30000)
 
diff --git a/arch/arm/mach-mmp/include/mach/gpio-pxa.h b/arch/arm/mach-mmp/include/mach/gpio-pxa.h
deleted file mode 100644 (file)
index 0e135a5..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __ASM_MACH_GPIO_PXA_H
-#define __ASM_MACH_GPIO_PXA_H
-
-#include <mach/addr-map.h>
-#include <mach/cputype.h>
-#include <mach/irqs.h>
-
-#define GPIO_REGS_VIRT (APB_VIRT_BASE + 0x19000)
-
-#define BANK_OFF(n)    (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
-#define GPIO_REG(x)    (*(volatile u32 *)(GPIO_REGS_VIRT + (x)))
-
-#define gpio_to_bank(gpio)     ((gpio) >> 5)
-
-/* NOTE: these macros are defined here to make optimization of
- * gpio_{get,set}_value() to work when 'gpio' is a constant.
- * Usage of these macros otherwise is no longer recommended,
- * use generic GPIO API whenever possible.
- */
-#define GPIO_bit(gpio) (1 << ((gpio) & 0x1f))
-
-#define GPLR(x)                GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x00)
-#define GPDR(x)                GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x0c)
-#define GPSR(x)                GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x18)
-#define GPCR(x)                GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x24)
-
-#include <plat/gpio-pxa.h>
-
-#endif /* __ASM_MACH_GPIO_PXA_H */
index fcfe0e3..e60c7d9 100644 (file)
@@ -241,6 +241,7 @@ void __init mmp2_init_icu(void)
        icu_data[1].clr_mfp_irq_base = IRQ_MMP2_PMIC_BASE;
        icu_data[1].clr_mfp_hwirq = IRQ_MMP2_PMIC - IRQ_MMP2_PMIC_BASE;
        icu_data[1].nr_irqs = 2;
+       icu_data[1].cascade_irq = 4;
        icu_data[1].virq_base = IRQ_MMP2_PMIC_BASE;
        icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs,
                                                   icu_data[1].virq_base, 0,
@@ -249,6 +250,7 @@ void __init mmp2_init_icu(void)
        icu_data[2].reg_status = mmp_icu_base + 0x154;
        icu_data[2].reg_mask = mmp_icu_base + 0x16c;
        icu_data[2].nr_irqs = 2;
+       icu_data[2].cascade_irq = 5;
        icu_data[2].virq_base = IRQ_MMP2_RTC_BASE;
        icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs,
                                                   icu_data[2].virq_base, 0,
@@ -257,6 +259,7 @@ void __init mmp2_init_icu(void)
        icu_data[3].reg_status = mmp_icu_base + 0x180;
        icu_data[3].reg_mask = mmp_icu_base + 0x17c;
        icu_data[3].nr_irqs = 3;
+       icu_data[3].cascade_irq = 9;
        icu_data[3].virq_base = IRQ_MMP2_KEYPAD_BASE;
        icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs,
                                                   icu_data[3].virq_base, 0,
@@ -265,6 +268,7 @@ void __init mmp2_init_icu(void)
        icu_data[4].reg_status = mmp_icu_base + 0x158;
        icu_data[4].reg_mask = mmp_icu_base + 0x170;
        icu_data[4].nr_irqs = 5;
+       icu_data[4].cascade_irq = 17;
        icu_data[4].virq_base = IRQ_MMP2_TWSI_BASE;
        icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs,
                                                   icu_data[4].virq_base, 0,
@@ -273,6 +277,7 @@ void __init mmp2_init_icu(void)
        icu_data[5].reg_status = mmp_icu_base + 0x15c;
        icu_data[5].reg_mask = mmp_icu_base + 0x174;
        icu_data[5].nr_irqs = 15;
+       icu_data[5].cascade_irq = 35;
        icu_data[5].virq_base = IRQ_MMP2_MISC_BASE;
        icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs,
                                                   icu_data[5].virq_base, 0,
@@ -281,6 +286,7 @@ void __init mmp2_init_icu(void)
        icu_data[6].reg_status = mmp_icu_base + 0x160;
        icu_data[6].reg_mask = mmp_icu_base + 0x178;
        icu_data[6].nr_irqs = 2;
+       icu_data[6].cascade_irq = 51;
        icu_data[6].virq_base = IRQ_MMP2_MIPI_HSI1_BASE;
        icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs,
                                                   icu_data[6].virq_base, 0,
@@ -289,6 +295,7 @@ void __init mmp2_init_icu(void)
        icu_data[7].reg_status = mmp_icu_base + 0x188;
        icu_data[7].reg_mask = mmp_icu_base + 0x184;
        icu_data[7].nr_irqs = 2;
+       icu_data[7].cascade_irq = 55;
        icu_data[7].virq_base = IRQ_MMP2_MIPI_HSI0_BASE;
        icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs,
                                                   icu_data[7].virq_base, 0,
index c64dbb9..eb187e0 100644 (file)
@@ -31,5 +31,6 @@
 #define IRQ_MASK_HIGH_OFF      0x0014
 
 #define TIMER_VIRT_BASE                (BRIDGE_VIRT_BASE | 0x0300)
+#define TIMER_PHYS_BASE                (BRIDGE_PHYS_BASE | 0x0300)
 
 #endif
index 3674497..e807c4c 100644 (file)
@@ -42,6 +42,7 @@
 #define MV78XX0_CORE0_REGS_PHYS_BASE   0xf1020000
 #define MV78XX0_CORE1_REGS_PHYS_BASE   0xf1024000
 #define MV78XX0_CORE_REGS_VIRT_BASE    0xfe400000
+#define MV78XX0_CORE_REGS_PHYS_BASE    0xfe400000
 #define MV78XX0_CORE_REGS_SIZE         SZ_16K
 
 #define MV78XX0_PCIE_IO_PHYS_BASE(i)   (0xf0800000 + ((i) << 20))
@@ -59,6 +60,7 @@
  * Core-specific peripheral registers.
  */
 #define BRIDGE_VIRT_BASE       (MV78XX0_CORE_REGS_VIRT_BASE)
+#define BRIDGE_PHYS_BASE       (MV78XX0_CORE_REGS_PHYS_BASE)
 
 /*
  * Register Map
index 5e90b9d..f5f0617 100644 (file)
@@ -205,6 +205,16 @@ static int apx4devkit_phy_fixup(struct phy_device *phy)
        return 0;
 }
 
+static void __init apx4devkit_fec_phy_clk_enable(void)
+{
+       struct clk *clk;
+
+       /* Enable fec phy clock */
+       clk = clk_get_sys("enet_out", NULL);
+       if (!IS_ERR(clk))
+               clk_prepare_enable(clk);
+}
+
 static void __init apx4devkit_init(void)
 {
        mx28_soc_init();
@@ -225,6 +235,7 @@ static void __init apx4devkit_init(void)
        phy_register_fixup_for_uid(PHY_ID_KS8051, MICREL_PHY_ID_MASK,
                        apx4devkit_phy_fixup);
 
+       apx4devkit_fec_phy_clk_enable();
        mx28_add_fec(0, &mx28_fec_pdata);
 
        mx28_add_mxs_mmc(0, &apx4devkit_mmc_pdata);
index 70a81f9..53c39d2 100644 (file)
@@ -97,11 +97,6 @@ __init board_onenand_init(struct mtd_partition *onenand_parts,
 
        gpmc_onenand_init(&board_onenand_data);
 }
-#else
-void
-__init board_onenand_init(struct mtd_partition *nor_parts, u8 nr_parts, u8 cs)
-{
-}
 #endif /* CONFIG_MTD_ONENAND_OMAP2 || CONFIG_MTD_ONENAND_OMAP2_MODULE */
 
 #if defined(CONFIG_MTD_NAND_OMAP2) || \
index 8ca14e8..2c5d0ed 100644 (file)
@@ -83,11 +83,9 @@ static struct musb_hdrc_config musb_config = {
 };
 
 static struct musb_hdrc_platform_data tusb_data = {
-#if defined(CONFIG_USB_MUSB_OTG)
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
        .mode           = MUSB_OTG,
-#elif defined(CONFIG_USB_MUSB_PERIPHERAL)
-       .mode           = MUSB_PERIPHERAL,
-#else /* defined(CONFIG_USB_MUSB_HOST) */
+#else
        .mode           = MUSB_HOST,
 #endif
        .set_power      = tusb_set_power,
index 79c6909..580fd17 100644 (file)
@@ -81,13 +81,13 @@ static u8 omap3_beagle_version;
 static struct {
        int mmc1_gpio_wp;
        int usb_pwr_level;
-       int reset_gpio;
+       int dvi_pd_gpio;
        int usr_button_gpio;
        int mmc_caps;
 } beagle_config = {
        .mmc1_gpio_wp = -EINVAL,
        .usb_pwr_level = GPIOF_OUT_INIT_LOW,
-       .reset_gpio = 129,
+       .dvi_pd_gpio = -EINVAL,
        .usr_button_gpio = 4,
        .mmc_caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
 };
@@ -126,21 +126,21 @@ static void __init omap3_beagle_init_rev(void)
                printk(KERN_INFO "OMAP3 Beagle Rev: Ax/Bx\n");
                omap3_beagle_version = OMAP3BEAGLE_BOARD_AXBX;
                beagle_config.mmc1_gpio_wp = 29;
-               beagle_config.reset_gpio = 170;
+               beagle_config.dvi_pd_gpio = 170;
                beagle_config.usr_button_gpio = 7;
                break;
        case 6:
                printk(KERN_INFO "OMAP3 Beagle Rev: C1/C2/C3\n");
                omap3_beagle_version = OMAP3BEAGLE_BOARD_C1_3;
                beagle_config.mmc1_gpio_wp = 23;
-               beagle_config.reset_gpio = 170;
+               beagle_config.dvi_pd_gpio = 170;
                beagle_config.usr_button_gpio = 7;
                break;
        case 5:
                printk(KERN_INFO "OMAP3 Beagle Rev: C4\n");
                omap3_beagle_version = OMAP3BEAGLE_BOARD_C4;
                beagle_config.mmc1_gpio_wp = 23;
-               beagle_config.reset_gpio = 170;
+               beagle_config.dvi_pd_gpio = 170;
                beagle_config.usr_button_gpio = 7;
                break;
        case 0:
@@ -274,11 +274,9 @@ static int beagle_twl_gpio_setup(struct device *dev,
                if (r)
                        pr_err("%s: unable to configure nDVI_PWR_EN\n",
                                __func__);
-               r = gpio_request_one(gpio + 2, GPIOF_OUT_INIT_HIGH,
-                                    "DVI_LDO_EN");
-               if (r)
-                       pr_err("%s: unable to configure DVI_LDO_EN\n",
-                               __func__);
+
+               beagle_config.dvi_pd_gpio = gpio + 2;
+
        } else {
                /*
                 * REVISIT: need ehci-omap hooks for external VBUS
@@ -287,7 +285,7 @@ static int beagle_twl_gpio_setup(struct device *dev,
                if (gpio_request_one(gpio + 1, GPIOF_IN, "EHCI_nOC"))
                        pr_err("%s: unable to configure EHCI_nOC\n", __func__);
        }
-       dvi_panel.power_down_gpio = beagle_config.reset_gpio;
+       dvi_panel.power_down_gpio = beagle_config.dvi_pd_gpio;
 
        gpio_request_one(gpio + TWL4030_GPIO_MAX, beagle_config.usb_pwr_level,
                        "nEN_USB_PWR");
@@ -499,7 +497,7 @@ static void __init omap3_beagle_init(void)
        omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
        omap3_beagle_init_rev();
 
-       if (beagle_config.mmc1_gpio_wp != -EINVAL)
+       if (gpio_is_valid(beagle_config.mmc1_gpio_wp))
                omap_mux_init_gpio(beagle_config.mmc1_gpio_wp, OMAP_PIN_INPUT);
        mmc[0].caps = beagle_config.mmc_caps;
        omap_hsmmc_init(mmc);
@@ -510,15 +508,13 @@ static void __init omap3_beagle_init(void)
 
        platform_add_devices(omap3_beagle_devices,
                        ARRAY_SIZE(omap3_beagle_devices));
+       if (gpio_is_valid(beagle_config.dvi_pd_gpio))
+               omap_mux_init_gpio(beagle_config.dvi_pd_gpio, OMAP_PIN_OUTPUT);
        omap_display_init(&beagle_dss_data);
        omap_serial_init();
        omap_sdrc_init(mt46h32m32lf6_sdrc_params,
                                  mt46h32m32lf6_sdrc_params);
 
-       omap_mux_init_gpio(170, OMAP_PIN_INPUT);
-       /* REVISIT leave DVI powered down until it's needed ... */
-       gpio_request_one(170, GPIOF_OUT_INIT_HIGH, "DVI_nPD");
-
        usb_musb_init(NULL);
        usbhs_init(&usbhs_bdata);
        omap_nand_flash_init(NAND_BUSWIDTH_16, omap3beagle_nand_partitions,
index 8fa2fc3..779734d 100644 (file)
@@ -494,8 +494,8 @@ static void __init overo_init(void)
 
        regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
        omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
-       omap_hsmmc_init(mmc);
        overo_i2c_init();
+       omap_hsmmc_init(mmc);
        omap_display_init(&overo_dss_data);
        omap_serial_init();
        omap_sdrc_init(mt46h32m32lf6_sdrc_params,
index ff53dec..df2534d 100644 (file)
@@ -144,7 +144,6 @@ static struct lis3lv02d_platform_data rx51_lis3lv02d_data = {
        .release_resources = lis302_release,
        .st_min_limits = {-32, 3, 3},
        .st_max_limits = {-3, 32, 32},
-       .irq2 = OMAP_GPIO_IRQ(LIS302_IRQ2_GPIO),
 };
 #endif
 
@@ -1030,7 +1029,6 @@ static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_3[] = {
        {
                I2C_BOARD_INFO("lis3lv02d", 0x1d),
                .platform_data = &rx51_lis3lv02d_data,
-               .irq = OMAP_GPIO_IRQ(LIS302_IRQ1_GPIO),
        },
 #endif
 };
@@ -1056,6 +1054,10 @@ static int __init rx51_i2c_init(void)
        omap_pmic_init(1, 2200, "twl5030", INT_34XX_SYS_NIRQ, &rx51_twldata);
        omap_register_i2c_bus(2, 100, rx51_peripherals_i2c_board_info_2,
                              ARRAY_SIZE(rx51_peripherals_i2c_board_info_2));
+#if defined(CONFIG_SENSORS_LIS3_I2C) || defined(CONFIG_SENSORS_LIS3_I2C_MODULE)
+       rx51_lis3lv02d_data.irq2 = gpio_to_irq(LIS302_IRQ2_GPIO);
+       rx51_peripherals_i2c_board_info_3[0].irq = gpio_to_irq(LIS302_IRQ1_GPIO);
+#endif
        omap_register_i2c_bus(3, 400, rx51_peripherals_i2c_board_info_3,
                              ARRAY_SIZE(rx51_peripherals_i2c_board_info_3));
        return 0;
index 4e1a3b0..1efdec2 100644 (file)
@@ -3514,7 +3514,7 @@ int __init omap3xxx_clk_init(void)
        struct omap_clk *c;
        u32 cpu_clkflg = 0;
 
-       if (cpu_is_omap3517()) {
+       if (soc_is_am35xx()) {
                cpu_mask = RATE_IN_34XX;
                cpu_clkflg = CK_AM35XX;
        } else if (cpu_is_omap3630()) {
index 2172f66..ba6f9a0 100644 (file)
@@ -84,6 +84,7 @@ static struct clk slimbus_clk = {
 
 static struct clk sys_32k_ck = {
        .name           = "sys_32k_ck",
+       .clkdm_name     = "prm_clkdm",
        .rate           = 32768,
        .ops            = &clkops_null,
 };
@@ -512,6 +513,7 @@ static struct clk ddrphy_ck = {
        .name           = "ddrphy_ck",
        .parent         = &dpll_core_m2_ck,
        .ops            = &clkops_null,
+       .clkdm_name     = "l3_emif_clkdm",
        .fixed_div      = 2,
        .recalc         = &omap_fixed_divisor_recalc,
 };
@@ -769,6 +771,7 @@ static const struct clksel dpll_mpu_m2_div[] = {
 static struct clk dpll_mpu_m2_ck = {
        .name           = "dpll_mpu_m2_ck",
        .parent         = &dpll_mpu_ck,
+       .clkdm_name     = "cm_clkdm",
        .clksel         = dpll_mpu_m2_div,
        .clksel_reg     = OMAP4430_CM_DIV_M2_DPLL_MPU,
        .clksel_mask    = OMAP4430_DPLL_CLKOUT_DIV_MASK,
@@ -1149,6 +1152,7 @@ static const struct clksel l3_div_div[] = {
 static struct clk l3_div_ck = {
        .name           = "l3_div_ck",
        .parent         = &div_core_ck,
+       .clkdm_name     = "cm_clkdm",
        .clksel         = l3_div_div,
        .clksel_reg     = OMAP4430_CM_CLKSEL_CORE,
        .clksel_mask    = OMAP4430_CLKSEL_L3_MASK,
@@ -2824,6 +2828,7 @@ static const struct clksel trace_clk_div_div[] = {
 static struct clk trace_clk_div_ck = {
        .name           = "trace_clk_div_ck",
        .parent         = &pmd_trace_clk_mux_ck,
+       .clkdm_name     = "emu_sys_clkdm",
        .clksel         = trace_clk_div_div,
        .clksel_reg     = OMAP4430_CM_EMU_DEBUGSS_CLKCTRL,
        .clksel_mask    = OMAP4430_CLKSEL_PMD_TRACE_CLK_MASK,
@@ -3412,9 +3417,12 @@ int __init omap4xxx_clk_init(void)
        if (cpu_is_omap443x()) {
                cpu_mask = RATE_IN_4430;
                cpu_clkflg = CK_443X;
-       } else if (cpu_is_omap446x()) {
+       } else if (cpu_is_omap446x() || cpu_is_omap447x()) {
                cpu_mask = RATE_IN_4460 | RATE_IN_4430;
                cpu_clkflg = CK_446X | CK_443X;
+
+               if (cpu_is_omap447x())
+                       pr_warn("WARNING: OMAP4470 clock data incomplete!\n");
        } else {
                return 0;
        }
index f7b5860..6227e95 100644 (file)
  *
  * CLKDM_NO_AUTODEPS: Prevent "autodeps" from being added/removed from this
  *     clockdomain.  (Currently, this applies to OMAP3 clockdomains only.)
+ * CLKDM_ACTIVE_WITH_MPU: The PRCM guarantees that this clockdomain is
+ *     active whenever the MPU is active.  True for interconnects and
+ *     the WKUP clockdomains.
  */
 #define CLKDM_CAN_FORCE_SLEEP                  (1 << 0)
 #define CLKDM_CAN_FORCE_WAKEUP                 (1 << 1)
 #define CLKDM_CAN_ENABLE_AUTO                  (1 << 2)
 #define CLKDM_CAN_DISABLE_AUTO                 (1 << 3)
 #define CLKDM_NO_AUTODEPS                      (1 << 4)
+#define CLKDM_ACTIVE_WITH_MPU                  (1 << 5)
 
 #define CLKDM_CAN_HWSUP                (CLKDM_CAN_ENABLE_AUTO | CLKDM_CAN_DISABLE_AUTO)
 #define CLKDM_CAN_SWSUP                (CLKDM_CAN_FORCE_SLEEP | CLKDM_CAN_FORCE_WAKEUP)
index 839145e..4972219 100644 (file)
@@ -88,4 +88,5 @@ struct clockdomain wkup_common_clkdm = {
        .name           = "wkup_clkdm",
        .pwrdm          = { .name = "wkup_pwrdm" },
        .dep_bit        = OMAP_EN_WKUP_SHIFT,
+       .flags          = CLKDM_ACTIVE_WITH_MPU,
 };
index c534258..7f2133a 100644 (file)
@@ -381,7 +381,7 @@ static struct clockdomain l4_wkup_44xx_clkdm = {
        .cm_inst          = OMAP4430_PRM_WKUP_CM_INST,
        .clkdm_offs       = OMAP4430_PRM_WKUP_CM_WKUP_CDOFFS,
        .dep_bit          = OMAP4430_L4WKUP_STATDEP_SHIFT,
-       .flags            = CLKDM_CAN_HWSUP,
+       .flags            = CLKDM_CAN_HWSUP | CLKDM_ACTIVE_WITH_MPU,
 };
 
 static struct clockdomain emu_sys_44xx_clkdm = {
index a7bc096..f24e3f7 100644 (file)
  */
 #define MAX_MODULE_READY_TIME          2000
 
+/*
+ * MAX_MODULE_DISABLE_TIME: max duration in microseconds to wait for
+ * the PRCM to request that a module enter the inactive state in the
+ * case of OMAP2 & 3.  In the case of OMAP4 this is the max duration
+ * in microseconds for the module to reach the inactive state from
+ * a functional state.
+ * XXX FSUSB on OMAP4430 takes ~4ms to idle after reset during
+ * kernel init.
+ */
+#define MAX_MODULE_DISABLE_TIME                5000
+
 #endif
index 8c86d29..1a39945 100644 (file)
@@ -313,9 +313,9 @@ int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs, u16 clkctrl_off
 
        omap_test_timeout((_clkctrl_idlest(part, inst, cdoffs, clkctrl_offs) ==
                           CLKCTRL_IDLEST_DISABLED),
-                         MAX_MODULE_READY_TIME, i);
+                         MAX_MODULE_DISABLE_TIME, i);
 
-       return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY;
+       return (i < MAX_MODULE_DISABLE_TIME) ? 0 : -EBUSY;
 }
 
 /**
index 845309f..88ffa1e 100644 (file)
@@ -20,6 +20,9 @@
 
 #include <linux/module.h>
 #include <linux/platform_device.h>
+
+#include <asm/memblock.h>
+
 #include "cm2xxx_3xxx.h"
 #include "prm2xxx_3xxx.h"
 #ifdef CONFIG_BRIDGE_DVFS
index 0389b32..00486a8 100644 (file)
@@ -246,6 +246,17 @@ void __init omap3xxx_check_features(void)
 
        omap_features |= OMAP3_HAS_SDRC;
 
+       /*
+        * am35x fixups:
+        * - The am35x Chip ID register has bits 12, 7:5, and 3:2 marked as
+        *   reserved and therefore return 0 when read.  Unfortunately,
+        *   OMAP3_CHECK_FEATURE() will interpret some of those zeroes to
+        *   mean that a feature is present even though it isn't so clear
+        *   the incorrectly set feature bits.
+        */
+       if (soc_is_am35xx())
+               omap_features &= ~(OMAP3_HAS_IVA | OMAP3_HAS_ISP);
+
        /*
         * TODO: Get additional info (where applicable)
         *       e.g. Size of L2 cache.
index fdc4303..6038a8c 100644 (file)
@@ -149,6 +149,7 @@ omap_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
        ct->chip.irq_ack = omap_mask_ack_irq;
        ct->chip.irq_mask = irq_gc_mask_disable_reg;
        ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+       ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;
 
        ct->regs.enable = INTC_MIR_CLEAR0;
        ct->regs.disable = INTC_MIR_SET0;
index 80e55c5..9fe6829 100644 (file)
@@ -41,6 +41,7 @@
 #include "control.h"
 #include "mux.h"
 #include "prm.h"
+#include "common.h"
 
 #define OMAP_MUX_BASE_OFFSET           0x30    /* Offset from CTRL_BASE */
 #define OMAP_MUX_BASE_SZ               0x5ca
@@ -217,8 +218,7 @@ static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition,
        return -ENODEV;
 }
 
-static int __init
-omap_mux_get_by_name(const char *muxname,
+int __init omap_mux_get_by_name(const char *muxname,
                        struct omap_mux_partition **found_partition,
                        struct omap_mux **found_mux)
 {
index 69fe060..471e62a 100644 (file)
@@ -59,6 +59,7 @@
 #define OMAP_PIN_OFF_WAKEUPENABLE      OMAP_WAKEUP_EN
 
 #define OMAP_MODE_GPIO(x)      (((x) & OMAP_MUX_MODE7) == OMAP_MUX_MODE4)
+#define OMAP_MODE_UART(x)      (((x) & OMAP_MUX_MODE7) == OMAP_MUX_MODE0)
 
 /* Flags for omapX_mux_init */
 #define OMAP_PACKAGE_MASK              0xffff
@@ -225,8 +226,18 @@ omap_hwmod_mux_init(struct omap_device_pad *bpads, int nr_pads);
  */
 void omap_hwmod_mux(struct omap_hwmod_mux_info *hmux, u8 state);
 
+int omap_mux_get_by_name(const char *muxname,
+               struct omap_mux_partition **found_partition,
+               struct omap_mux **found_mux);
 #else
 
+static inline int omap_mux_get_by_name(const char *muxname,
+               struct omap_mux_partition **found_partition,
+               struct omap_mux **found_mux)
+{
+       return 0;
+}
+
 static inline int omap_mux_init_gpio(int gpio, int val)
 {
        return 0;
index bf86f7e..2d710f5 100644 (file)
@@ -530,7 +530,7 @@ static int _disable_wakeup(struct omap_hwmod *oh, u32 *v)
        if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)
                _set_slave_idlemode(oh, HWMOD_IDLEMODE_SMART, v);
        if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP)
-               _set_master_standbymode(oh, HWMOD_IDLEMODE_SMART_WKUP, v);
+               _set_master_standbymode(oh, HWMOD_IDLEMODE_SMART, v);
 
        /* XXX test pwrdm_get_wken for this hwmod's subsystem */
 
@@ -1124,15 +1124,18 @@ static struct omap_hwmod_addr_space * __init _find_mpu_rt_addr_space(struct omap
  * _enable_sysc - try to bring a module out of idle via OCP_SYSCONFIG
  * @oh: struct omap_hwmod *
  *
- * If module is marked as SWSUP_SIDLE, force the module out of slave
- * idle; otherwise, configure it for smart-idle.  If module is marked
- * as SWSUP_MSUSPEND, force the module out of master standby;
- * otherwise, configure it for smart-standby.  No return value.
+ * Ensure that the OCP_SYSCONFIG register for the IP block represented
+ * by @oh is set to indicate to the PRCM that the IP block is active.
+ * Usually this means placing the module into smart-idle mode and
+ * smart-standby, but if there is a bug in the automatic idle handling
+ * for the IP block, it may need to be placed into the force-idle or
+ * no-idle variants of these modes.  No return value.
  */
 static void _enable_sysc(struct omap_hwmod *oh)
 {
        u8 idlemode, sf;
        u32 v;
+       bool clkdm_act;
 
        if (!oh->class->sysc)
                return;
@@ -1141,8 +1144,16 @@ static void _enable_sysc(struct omap_hwmod *oh)
        sf = oh->class->sysc->sysc_flags;
 
        if (sf & SYSC_HAS_SIDLEMODE) {
-               idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
-                       HWMOD_IDLEMODE_NO : HWMOD_IDLEMODE_SMART;
+               clkdm_act = ((oh->clkdm &&
+                             oh->clkdm->flags & CLKDM_ACTIVE_WITH_MPU) ||
+                            (oh->_clk && oh->_clk->clkdm &&
+                             oh->_clk->clkdm->flags & CLKDM_ACTIVE_WITH_MPU));
+               if (clkdm_act && !(oh->class->sysc->idlemodes &
+                                  (SIDLE_SMART | SIDLE_SMART_WKUP)))
+                       idlemode = HWMOD_IDLEMODE_FORCE;
+               else
+                       idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
+                               HWMOD_IDLEMODE_NO : HWMOD_IDLEMODE_SMART;
                _set_slave_idlemode(oh, idlemode, &v);
        }
 
@@ -1208,8 +1219,13 @@ static void _idle_sysc(struct omap_hwmod *oh)
        sf = oh->class->sysc->sysc_flags;
 
        if (sf & SYSC_HAS_SIDLEMODE) {
-               idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
-                       HWMOD_IDLEMODE_FORCE : HWMOD_IDLEMODE_SMART;
+               /* XXX What about HWMOD_IDLEMODE_SMART_WKUP? */
+               if (oh->flags & HWMOD_SWSUP_SIDLE ||
+                   !(oh->class->sysc->idlemodes &
+                     (SIDLE_SMART | SIDLE_SMART_WKUP)))
+                       idlemode = HWMOD_IDLEMODE_FORCE;
+               else
+                       idlemode = HWMOD_IDLEMODE_SMART;
                _set_slave_idlemode(oh, idlemode, &v);
        }
 
index 950454a..b7bcba5 100644 (file)
@@ -393,8 +393,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_counter_sysc = {
        .rev_offs       = 0x0000,
        .sysc_offs      = 0x0004,
        .sysc_flags     = SYSC_HAS_SIDLEMODE,
-       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-                          SIDLE_SMART_WKUP),
+       .idlemodes      = (SIDLE_FORCE | SIDLE_NO),
        .sysc_fields    = &omap_hwmod_sysc_type1,
 };
 
@@ -854,6 +853,11 @@ static struct omap_hwmod omap44xx_dss_hdmi_hwmod = {
        .name           = "dss_hdmi",
        .class          = &omap44xx_hdmi_hwmod_class,
        .clkdm_name     = "l3_dss_clkdm",
+       /*
+        * HDMI audio requires to use no-idle mode. Hence,
+        * set idle mode by software.
+        */
+       .flags          = HWMOD_SWSUP_SIDLE,
        .mpu_irqs       = omap44xx_dss_hdmi_irqs,
        .sdma_reqs      = omap44xx_dss_hdmi_sdma_reqs,
        .main_clk       = "dss_48mhz_clk",
@@ -1924,7 +1928,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp1_sdma_reqs[] = {
 
 static struct omap_hwmod_opt_clk mcbsp1_opt_clks[] = {
        { .role = "pad_fck", .clk = "pad_clks_ck" },
-       { .role = "prcm_clk", .clk = "mcbsp1_sync_mux_ck" },
+       { .role = "prcm_fck", .clk = "mcbsp1_sync_mux_ck" },
 };
 
 static struct omap_hwmod omap44xx_mcbsp1_hwmod = {
@@ -1959,7 +1963,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp2_sdma_reqs[] = {
 
 static struct omap_hwmod_opt_clk mcbsp2_opt_clks[] = {
        { .role = "pad_fck", .clk = "pad_clks_ck" },
-       { .role = "prcm_clk", .clk = "mcbsp2_sync_mux_ck" },
+       { .role = "prcm_fck", .clk = "mcbsp2_sync_mux_ck" },
 };
 
 static struct omap_hwmod omap44xx_mcbsp2_hwmod = {
@@ -1994,7 +1998,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp3_sdma_reqs[] = {
 
 static struct omap_hwmod_opt_clk mcbsp3_opt_clks[] = {
        { .role = "pad_fck", .clk = "pad_clks_ck" },
-       { .role = "prcm_clk", .clk = "mcbsp3_sync_mux_ck" },
+       { .role = "prcm_fck", .clk = "mcbsp3_sync_mux_ck" },
 };
 
 static struct omap_hwmod omap44xx_mcbsp3_hwmod = {
@@ -2029,7 +2033,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp4_sdma_reqs[] = {
 
 static struct omap_hwmod_opt_clk mcbsp4_opt_clks[] = {
        { .role = "pad_fck", .clk = "pad_clks_ck" },
-       { .role = "prcm_clk", .clk = "mcbsp4_sync_mux_ck" },
+       { .role = "prcm_fck", .clk = "mcbsp4_sync_mux_ck" },
 };
 
 static struct omap_hwmod omap44xx_mcbsp4_hwmod = {
@@ -3860,7 +3864,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_2 = {
 };
 
 /* usb_host_fs -> l3_main_2 */
-static struct omap_hwmod_ocp_if omap44xx_usb_host_fs__l3_main_2 = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_usb_host_fs__l3_main_2 = {
        .master         = &omap44xx_usb_host_fs_hwmod,
        .slave          = &omap44xx_l3_main_2_hwmod,
        .clk            = "l3_div_ck",
@@ -3918,7 +3922,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_3 = {
 };
 
 /* aess -> l4_abe */
-static struct omap_hwmod_ocp_if omap44xx_aess__l4_abe = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_aess__l4_abe = {
        .master         = &omap44xx_aess_hwmod,
        .slave          = &omap44xx_l4_abe_hwmod,
        .clk            = "ocp_abe_iclk",
@@ -4009,7 +4013,7 @@ static struct omap_hwmod_addr_space omap44xx_aess_addrs[] = {
 };
 
 /* l4_abe -> aess */
-static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_abe__aess = {
        .master         = &omap44xx_l4_abe_hwmod,
        .slave          = &omap44xx_aess_hwmod,
        .clk            = "ocp_abe_iclk",
@@ -4027,7 +4031,7 @@ static struct omap_hwmod_addr_space omap44xx_aess_dma_addrs[] = {
 };
 
 /* l4_abe -> aess (dma) */
-static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess_dma = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_abe__aess_dma = {
        .master         = &omap44xx_l4_abe_hwmod,
        .slave          = &omap44xx_aess_hwmod,
        .clk            = "ocp_abe_iclk",
@@ -5853,7 +5857,7 @@ static struct omap_hwmod_addr_space omap44xx_usb_host_fs_addrs[] = {
 };
 
 /* l4_cfg -> usb_host_fs */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_host_fs = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_cfg__usb_host_fs = {
        .master         = &omap44xx_l4_cfg_hwmod,
        .slave          = &omap44xx_usb_host_fs_hwmod,
        .clk            = "l4_div_ck",
@@ -6010,13 +6014,13 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
        &omap44xx_iva__l3_main_2,
        &omap44xx_l3_main_1__l3_main_2,
        &omap44xx_l4_cfg__l3_main_2,
-       &omap44xx_usb_host_fs__l3_main_2,
+       /* &omap44xx_usb_host_fs__l3_main_2, */
        &omap44xx_usb_host_hs__l3_main_2,
        &omap44xx_usb_otg_hs__l3_main_2,
        &omap44xx_l3_main_1__l3_main_3,
        &omap44xx_l3_main_2__l3_main_3,
        &omap44xx_l4_cfg__l3_main_3,
-       &omap44xx_aess__l4_abe,
+       /* &omap44xx_aess__l4_abe, */
        &omap44xx_dsp__l4_abe,
        &omap44xx_l3_main_1__l4_abe,
        &omap44xx_mpu__l4_abe,
@@ -6025,8 +6029,8 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
        &omap44xx_l4_cfg__l4_wkup,
        &omap44xx_mpu__mpu_private,
        &omap44xx_l4_cfg__ocp_wp_noc,
-       &omap44xx_l4_abe__aess,
-       &omap44xx_l4_abe__aess_dma,
+       /* &omap44xx_l4_abe__aess, */
+       /* &omap44xx_l4_abe__aess_dma, */
        &omap44xx_l3_main_2__c2c,
        &omap44xx_l4_wkup__counter_32k,
        &omap44xx_l4_cfg__ctrl_module_core,
@@ -6132,7 +6136,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
        &omap44xx_l4_per__uart2,
        &omap44xx_l4_per__uart3,
        &omap44xx_l4_per__uart4,
-       &omap44xx_l4_cfg__usb_host_fs,
+       /* &omap44xx_l4_cfg__usb_host_fs, */
        &omap44xx_l4_cfg__usb_host_hs,
        &omap44xx_l4_cfg__usb_otg_hs,
        &omap44xx_l4_cfg__usb_tll_hs,
index a05a62f..acc2164 100644 (file)
@@ -155,10 +155,11 @@ static irqreturn_t omap3_l3_block_irq(struct omap3_l3 *l3,
        u8 multi = error & L3_ERROR_LOG_MULTI;
        u32 address = omap3_l3_decode_addr(error_addr);
 
-       WARN(true, "%s seen by %s %s at address %x\n",
+       pr_err("%s seen by %s %s at address %x\n",
                        omap3_l3_code_string(code),
                        omap3_l3_initiator_string(initid),
                        multi ? "Multiple Errors" : "", address);
+       WARN_ON(1);
 
        return IRQ_HANDLED;
 }
index 4c90477..d52651a 100644 (file)
@@ -239,21 +239,15 @@ void am35x_set_mode(u8 musb_mode)
 
        devconf2 &= ~CONF2_OTGMODE;
        switch (musb_mode) {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
        case MUSB_HOST:         /* Force VBUS valid, ID = 0 */
                devconf2 |= CONF2_FORCE_HOST;
                break;
-#endif
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
        case MUSB_PERIPHERAL:   /* Force VBUS valid, ID = 1 */
                devconf2 |= CONF2_FORCE_DEVICE;
                break;
-#endif
-#ifdef CONFIG_USB_MUSB_OTG
        case MUSB_OTG:          /* Don't override the VBUS/ID comparators */
                devconf2 |= CONF2_NO_OVERRIDE;
                break;
-#endif
        default:
                pr_info(KERN_INFO "Unsupported mode %u\n", musb_mode);
        }
index a34023d..3a595e8 100644 (file)
@@ -724,6 +724,7 @@ int __init omap3_pm_init(void)
        ret = request_irq(omap_prcm_event_to_irq("io"),
                _prcm_int_handle_io, IRQF_SHARED | IRQF_NO_SUSPEND, "pm_io",
                omap3_pm_init);
+       enable_irq(omap_prcm_event_to_irq("io"));
 
        if (ret) {
                pr_err("pm: Failed to request pm_io irq\n");
index 9ce7654..21cb740 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/io.h>
+#include <linux/irq.h>
 
 #include "common.h"
 #include <plat/cpu.h>
@@ -303,8 +304,15 @@ void omap3xxx_prm_restore_irqen(u32 *saved_mask)
 
 static int __init omap3xxx_prcm_init(void)
 {
-       if (cpu_is_omap34xx())
-               return omap_prcm_register_chain_handler(&omap3_prcm_irq_setup);
-       return 0;
+       int ret = 0;
+
+       if (cpu_is_omap34xx()) {
+               ret = omap_prcm_register_chain_handler(&omap3_prcm_irq_setup);
+               if (!ret)
+                       irq_set_status_flags(omap_prcm_event_to_irq("io"),
+                                            IRQ_NOAUTOEN);
+       }
+
+       return ret;
 }
 subsys_initcall(omap3xxx_prcm_init);
index 292d4aa..c1b93c7 100644 (file)
@@ -57,6 +57,7 @@ struct omap_uart_state {
 
        struct list_head node;
        struct omap_hwmod *oh;
+       struct omap_device_pad default_omap_uart_pads[2];
 };
 
 static LIST_HEAD(uart_list);
@@ -126,11 +127,70 @@ static void omap_uart_set_smartidle(struct platform_device *pdev) {}
 #endif /* CONFIG_PM */
 
 #ifdef CONFIG_OMAP_MUX
-static void omap_serial_fill_default_pads(struct omap_board_data *bdata)
+
+#define OMAP_UART_DEFAULT_PAD_NAME_LEN 28
+static char rx_pad_name[OMAP_UART_DEFAULT_PAD_NAME_LEN],
+               tx_pad_name[OMAP_UART_DEFAULT_PAD_NAME_LEN] __initdata;
+
+static void  __init
+omap_serial_fill_uart_tx_rx_pads(struct omap_board_data *bdata,
+                               struct omap_uart_state *uart)
+{
+       uart->default_omap_uart_pads[0].name = rx_pad_name;
+       uart->default_omap_uart_pads[0].flags = OMAP_DEVICE_PAD_REMUX |
+                                                       OMAP_DEVICE_PAD_WAKEUP;
+       uart->default_omap_uart_pads[0].enable = OMAP_PIN_INPUT |
+                                                       OMAP_MUX_MODE0;
+       uart->default_omap_uart_pads[0].idle = OMAP_PIN_INPUT | OMAP_MUX_MODE0;
+       uart->default_omap_uart_pads[1].name = tx_pad_name;
+       uart->default_omap_uart_pads[1].enable = OMAP_PIN_OUTPUT |
+                                                       OMAP_MUX_MODE0;
+       bdata->pads = uart->default_omap_uart_pads;
+       bdata->pads_cnt = ARRAY_SIZE(uart->default_omap_uart_pads);
+}
+
+static void  __init omap_serial_check_wakeup(struct omap_board_data *bdata,
+                                               struct omap_uart_state *uart)
 {
+       struct omap_mux_partition *tx_partition = NULL, *rx_partition = NULL;
+       struct omap_mux *rx_mux = NULL, *tx_mux = NULL;
+       char *rx_fmt, *tx_fmt;
+       int uart_nr = bdata->id + 1;
+
+       if (bdata->id != 2) {
+               rx_fmt = "uart%d_rx.uart%d_rx";
+               tx_fmt = "uart%d_tx.uart%d_tx";
+       } else {
+               rx_fmt = "uart%d_rx_irrx.uart%d_rx_irrx";
+               tx_fmt = "uart%d_tx_irtx.uart%d_tx_irtx";
+       }
+
+       snprintf(rx_pad_name, OMAP_UART_DEFAULT_PAD_NAME_LEN, rx_fmt,
+                       uart_nr, uart_nr);
+       snprintf(tx_pad_name, OMAP_UART_DEFAULT_PAD_NAME_LEN, tx_fmt,
+                       uart_nr, uart_nr);
+
+       if (omap_mux_get_by_name(rx_pad_name, &rx_partition, &rx_mux) >= 0 &&
+                       omap_mux_get_by_name
+                               (tx_pad_name, &tx_partition, &tx_mux) >= 0) {
+               u16 tx_mode, rx_mode;
+
+               tx_mode = omap_mux_read(tx_partition, tx_mux->reg_offset);
+               rx_mode = omap_mux_read(rx_partition, rx_mux->reg_offset);
+
+               /*
+                * Check if uart is used in default tx/rx mode i.e. in mux mode0
+                * if yes then configure rx pin for wake up capability
+                */
+               if (OMAP_MODE_UART(rx_mode) && OMAP_MODE_UART(tx_mode))
+                       omap_serial_fill_uart_tx_rx_pads(bdata, uart);
+       }
 }
 #else
-static void omap_serial_fill_default_pads(struct omap_board_data *bdata) {}
+static void __init omap_serial_check_wakeup(struct omap_board_data *bdata,
+               struct omap_uart_state *uart)
+{
+}
 #endif
 
 static char *cmdline_find_option(char *str)
@@ -287,8 +347,7 @@ void __init omap_serial_board_init(struct omap_uart_port_info *info)
                bdata.pads = NULL;
                bdata.pads_cnt = 0;
 
-               if (cpu_is_omap44xx() || cpu_is_omap34xx())
-                       omap_serial_fill_default_pads(&bdata);
+               omap_serial_check_wakeup(&bdata, uart);
 
                if (!info)
                        omap_serial_init_port(&bdata, NULL);
index 119d5a9..43a9790 100644 (file)
@@ -32,6 +32,7 @@
 #include "twl-common.h"
 #include "pm.h"
 #include "voltage.h"
+#include "mux.h"
 
 static struct i2c_board_info __initdata pmic_i2c_board_info = {
        .addr           = 0x48,
@@ -77,6 +78,7 @@ void __init omap4_pmic_init(const char *pmic_type,
                    struct twl6040_platform_data *twl6040_data, int twl6040_irq)
 {
        /* PMIC part*/
+       omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
        strncpy(omap4_i2c1_board_info[0].type, pmic_type,
                sizeof(omap4_i2c1_board_info[0].type));
        omap4_i2c1_board_info[0].irq = OMAP44XX_IRQ_SYS_1N;
index b19d1b4..c4a5768 100644 (file)
@@ -41,12 +41,10 @@ static struct musb_hdrc_config musb_config = {
 };
 
 static struct musb_hdrc_platform_data musb_plat = {
-#ifdef CONFIG_USB_MUSB_OTG
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
        .mode           = MUSB_OTG,
-#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
+#else
        .mode           = MUSB_HOST,
-#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
-       .mode           = MUSB_PERIPHERAL,
 #endif
        /* .clock is set dynamically */
        .config         = &musb_config,
index db84a46..805bea6 100644 (file)
@@ -300,7 +300,7 @@ tusb6010_setup_interface(struct musb_hdrc_platform_data *data,
                printk(error, 3, status);
                return status;
        }
-       tusb_resources[2].start = irq + IH_GPIO_BASE;
+       tusb_resources[2].start = gpio_to_irq(irq);
 
        /* set up memory timings ... can speed them up later */
        if (!ps_refclk) {
index 96484bc..11a3c1e 100644 (file)
@@ -35,5 +35,5 @@
 #define MAIN_IRQ_MASK          (ORION5X_BRIDGE_VIRT_BASE | 0x204)
 
 #define TIMER_VIRT_BASE                (ORION5X_BRIDGE_VIRT_BASE | 0x300)
-
+#define TIMER_PHYS_BASE                (ORION5X_BRIDGE_PHYS_BASE | 0x300)
 #endif
diff --git a/arch/arm/mach-orion5x/include/mach/io.h b/arch/arm/mach-orion5x/include/mach/io.h
new file mode 100644 (file)
index 0000000..1aa5d0a
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * arch/arm/mach-orion5x/include/mach/io.h
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __ASM_ARCH_IO_H
+#define __ASM_ARCH_IO_H
+
+#include <mach/orion5x.h>
+#include <asm/sizes.h>
+
+#define IO_SPACE_LIMIT         SZ_2M
+static inline void __iomem *__io(unsigned long addr)
+{
+       return (void __iomem *)(addr + ORION5X_PCIE_IO_VIRT_BASE);
+}
+
+#define __io(a)                         __io(a)
+#endif
index 2745f5d..683e085 100644 (file)
@@ -82,6 +82,7 @@
 #define  UART1_VIRT_BASE               (ORION5X_DEV_BUS_VIRT_BASE | 0x2100)
 
 #define ORION5X_BRIDGE_VIRT_BASE       (ORION5X_REGS_VIRT_BASE | 0x20000)
+#define ORION5X_BRIDGE_PHYS_BASE       (ORION5X_REGS_PHYS_BASE | 0x20000)
 
 #define ORION5X_PCI_VIRT_BASE          (ORION5X_REGS_VIRT_BASE | 0x30000)
 
index d09da6a..d3de84b 100644 (file)
@@ -127,7 +127,11 @@ static unsigned long hx4700_pin_config[] __initdata = {
        GPIO19_SSP2_SCLK,
        GPIO86_SSP2_RXD,
        GPIO87_SSP2_TXD,
-       GPIO88_GPIO,
+       GPIO88_GPIO | MFP_LPM_DRIVE_HIGH,       /* TSC2046_CS */
+
+       /* BQ24022 Regulator */
+       GPIO72_GPIO | MFP_LPM_KEEP_OUTPUT,      /* BQ24022_nCHARGE_EN */
+       GPIO96_GPIO | MFP_LPM_KEEP_OUTPUT,      /* BQ24022_ISET2 */
 
        /* HX4700 specific input GPIOs */
        GPIO12_GPIO | WAKEUP_ON_EDGE_RISE,      /* ASIC3_IRQ */
@@ -135,6 +139,10 @@ static unsigned long hx4700_pin_config[] __initdata = {
        GPIO14_GPIO,    /* nWLAN_IRQ */
 
        /* HX4700 specific output GPIOs */
+       GPIO61_GPIO | MFP_LPM_DRIVE_HIGH,       /* W3220_nRESET */
+       GPIO71_GPIO | MFP_LPM_DRIVE_HIGH,       /* ASIC3_nRESET */
+       GPIO81_GPIO | MFP_LPM_DRIVE_HIGH,       /* CPU_GP_nRESET */
+       GPIO116_GPIO | MFP_LPM_DRIVE_HIGH,      /* CPU_HW_nRESET */
        GPIO102_GPIO | MFP_LPM_DRIVE_LOW,       /* SYNAPTICS_POWER_ON */
 
        GPIO10_GPIO,    /* GSM_IRQ */
@@ -872,14 +880,19 @@ static struct gpio global_gpios[] = {
        { GPIO110_HX4700_LCD_LVDD_3V3_ON, GPIOF_OUT_INIT_HIGH, "LCD_LVDD" },
        { GPIO111_HX4700_LCD_AVDD_3V3_ON, GPIOF_OUT_INIT_HIGH, "LCD_AVDD" },
        { GPIO32_HX4700_RS232_ON,         GPIOF_OUT_INIT_HIGH, "RS232_ON" },
+       { GPIO61_HX4700_W3220_nRESET,     GPIOF_OUT_INIT_HIGH, "W3220_nRESET" },
        { GPIO71_HX4700_ASIC3_nRESET,     GPIOF_OUT_INIT_HIGH, "ASIC3_nRESET" },
+       { GPIO81_HX4700_CPU_GP_nRESET,    GPIOF_OUT_INIT_HIGH, "CPU_GP_nRESET" },
        { GPIO82_HX4700_EUART_RESET,      GPIOF_OUT_INIT_HIGH, "EUART_RESET" },
+       { GPIO116_HX4700_CPU_HW_nRESET,   GPIOF_OUT_INIT_HIGH, "CPU_HW_nRESET" },
 };
 
 static void __init hx4700_init(void)
 {
        int ret;
 
+       PCFR = PCFR_GPR_EN | PCFR_OPDE;
+
        pxa2xx_mfp_config(ARRAY_AND_SIZE(hx4700_pin_config));
        gpio_set_wake(GPIO12_HX4700_ASIC3_IRQ, 1);
        ret = gpio_request_array(ARRAY_AND_SIZE(global_gpios));
index 414364e..cb2883d 100644 (file)
@@ -106,7 +106,7 @@ static struct clk s3c2440_clk_cam_upll = {
 static struct clk s3c2440_clk_ac97 = {
        .name           = "ac97",
        .enable         = s3c2410_clkcon_enable,
-       .ctrlbit        = S3C2440_CLKCON_CAMERA,
+       .ctrlbit        = S3C2440_CLKCON_AC97,
 };
 
 static unsigned long  s3c2440_fclk_n_getrate(struct clk *clk)
index 9e37026..9bd1355 100644 (file)
@@ -779,6 +779,7 @@ DT_MACHINE_START(ARMADILLO800EVA_DT, "armadillo800eva")
        .init_irq       = r8a7740_init_irq,
        .handle_irq     = shmobile_handle_irq_intc,
        .init_machine   = eva_init,
+       .init_late      = shmobile_init_late,
        .timer          = &shmobile_timer,
        .dt_compat      = eva_boards_compat_dt,
 MACHINE_END
index 7bc5e7d..6a33cf3 100644 (file)
@@ -80,6 +80,7 @@ DT_MACHINE_START(KZM9D_DT, "kzm9d")
        .init_irq       = emev2_init_irq,
        .handle_irq     = gic_handle_irq,
        .init_machine   = kzm9d_add_standard_devices,
+       .init_late      = shmobile_init_late,
        .timer          = &shmobile_timer,
        .dt_compat      = kzm9d_boards_compat_dt,
 MACHINE_END
index d8e33b6..c0ae815 100644 (file)
@@ -455,6 +455,7 @@ DT_MACHINE_START(KZM9G_DT, "kzm9g")
        .init_irq       = sh73a0_init_irq,
        .handle_irq     = gic_handle_irq,
        .init_machine   = kzm_init,
+       .init_late      = shmobile_init_late,
        .timer          = &shmobile_timer,
        .dt_compat      = kzm9g_boards_compat_dt,
 MACHINE_END
index b577f7c..150122a 100644 (file)
@@ -1512,6 +1512,9 @@ static void __init mackerel_init(void)
        gpio_request(GPIO_FN_SDHID0_1, NULL);
        gpio_request(GPIO_FN_SDHID0_0, NULL);
 
+       /* SDHI0 PORT172 card-detect IRQ26 */
+       gpio_request(GPIO_FN_IRQ26_172, NULL);
+
 #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
        /* enable SDHI1 */
        gpio_request(GPIO_FN_SDHICMD1, NULL);
index 472d1f5..3946c4b 100644 (file)
@@ -475,9 +475,9 @@ static struct clk *late_main_clks[] = {
 
 enum { MSTP001,
        MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100,
-       MSTP219,
+       MSTP219, MSTP218,
        MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
-       MSTP331, MSTP329, MSTP325, MSTP323, MSTP318,
+       MSTP331, MSTP329, MSTP325, MSTP323,
        MSTP314, MSTP313, MSTP312, MSTP311,
        MSTP303, MSTP302, MSTP301, MSTP300,
        MSTP411, MSTP410, MSTP403,
@@ -497,6 +497,7 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */
        [MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */
        [MSTP219] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 19, 0), /* SCIFA7 */
+       [MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* SY-DMAC */
        [MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
        [MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
        [MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */
@@ -508,7 +509,6 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
        [MSTP325] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 25, 0), /* IrDA */
        [MSTP323] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 23, 0), /* IIC1 */
-       [MSTP318] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 18, 0), /* SY-DMAC */
        [MSTP314] = MSTP(&div6_clks[DIV6_SDHI0], SMSTPCR3, 14, 0), /* SDHI0 */
        [MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */
        [MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
@@ -552,6 +552,7 @@ static struct clk_lookup lookups[] = {
        CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
        CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
        CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */
+       CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* SY-DMAC */
        CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
        CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
        CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */
@@ -563,7 +564,6 @@ static struct clk_lookup lookups[] = {
        CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
        CLKDEV_DEV_ID("sh_irda.0", &mstp_clks[MSTP325]), /* IrDA */
        CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* I2C1 */
-       CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP318]), /* SY-DMAC */
        CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */
        CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
        CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */
index 550b23d..f04fad4 100644 (file)
@@ -35,6 +35,9 @@
 #define INT2SMSKCR3 0xfe7822ac
 #define INT2SMSKCR4 0xfe7822b0
 
+#define INT2NTSR0 0xfe700060
+#define INT2NTSR1 0xfe700064
+
 static int r8a7779_set_wake(struct irq_data *data, unsigned int on)
 {
        return 0; /* always allow wakeup */
@@ -49,6 +52,10 @@ void __init r8a7779_init_irq(void)
        gic_init(0, 29, gic_dist_base, gic_cpu_base);
        gic_arch_extn.irq_set_wake = r8a7779_set_wake;
 
+       /* route all interrupts to ARM */
+       __raw_writel(0xffffffff, INT2NTSR0);
+       __raw_writel(0x3fffffff, INT2NTSR1);
+
        /* unmask all known interrupts in INTCS2 */
        __raw_writel(0xfffffff0, INT2SMSKCR0);
        __raw_writel(0xfff7ffff, INT2SMSKCR1);
index bacdd66..fde0d23 100644 (file)
 #include <mach/common.h>
 #include <mach/emev2.h>
 
+#ifdef CONFIG_ARCH_SH73A0
 #define is_sh73a0() (machine_is_ag5evm() || machine_is_kota2() || \
                        of_machine_is_compatible("renesas,sh73a0"))
+#else
+#define is_sh73a0() (0)
+#endif
+
 #define is_r8a7779() machine_is_marzen()
+
+#ifdef CONFIG_ARCH_EMEV2
 #define is_emev2() of_machine_is_compatible("renesas,emev2")
+#else
+#define is_emev2() (0)
+#endif
 
 static unsigned int __init shmobile_smp_get_core_count(void)
 {
index 6a4bd58..fafce9c 100644 (file)
@@ -484,7 +484,7 @@ static const struct sh_dmae_slave_config sh7372_dmae_slaves[] = {
        },
 };
 
-#define SH7372_CHCLR 0x220
+#define SH7372_CHCLR (0x220 - 0x20)
 
 static const struct sh_dmae_channel sh7372_dmae_channels[] = {
        {
index ea15646..9e3ae6b 100644 (file)
@@ -4,7 +4,7 @@
  * Debugging macro include header spear13xx machine family
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 383ab04..d50bdb6 100644 (file)
@@ -4,7 +4,7 @@
  * DMA information for SPEAr13xx machine family
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 6d8c45b..dac57fd 100644 (file)
@@ -4,7 +4,7 @@
  * spear13xx machine family generic header file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index cd6f4f8..85f1763 100644 (file)
@@ -4,7 +4,7 @@
  * GPIO macros for SPEAr13xx machine family
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index f542a24..271a62b 100644 (file)
@@ -4,7 +4,7 @@
  * IRQ helper macros for spear13xx machine family
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 30c57ef..65f27de 100644 (file)
@@ -4,7 +4,7 @@
  * spear13xx Machine family specific definition
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 31af3e8..3a58b82 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr3XX machine family specific timex definitions
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index c784089..70fe72f 100644 (file)
@@ -4,7 +4,7 @@
  * Serial port stubs for kernel decompress status messages
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index fefd15b..732d29b 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr1310 machine source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index ee38cbc..81e4ed7 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr1340 machine source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 50b349a..cf936b1 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr13XX machines common source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 590519f..0a6381f 100644 (file)
@@ -4,7 +4,7 @@
  * Debugging macro include header spear3xx machine family
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar<viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 4a95b94..ce19113 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr3XX machine family generic header file
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar<viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 451b208..2ac74c6 100644 (file)
@@ -4,7 +4,7 @@
  * GPIO macros for SPEAr3xx machine family
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar<viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 51bd62a..803de76 100644 (file)
@@ -4,7 +4,7 @@
  * IRQ helper macros for SPEAr3xx machine family
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 18e2ac5..6309bf6 100644 (file)
@@ -4,7 +4,7 @@
  * Miscellaneous registers definitions for SPEAr3xx machine family
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 51eb953..8cca951 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr3xx Machine family specific definition
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index a38cc9d..9f5d08b 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr3XX machine family specific timex definitions
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 53ba8bb..b909b01 100644 (file)
@@ -4,7 +4,7 @@
  * Serial port stubs for kernel decompress status messages
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index f74a05b..0f882ec 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr300 machine source file
  *
  * Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 84dfb09..bbcf457 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr310 machine source file
  *
  * Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index a88fa84..88d483b 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr320 machine source file
  *
  * Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index f22419e..66db5f1 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr3XX machines common source file
  *
  * Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -87,7 +87,7 @@ void __init spear3xx_map_io(void)
 
 static void __init spear3xx_timer_init(void)
 {
-       char pclk_name[] = "pll3_48m_clk";
+       char pclk_name[] = "pll3_clk";
        struct clk *gpt_clk, *pclk;
 
        spear3xx_clk_init();
index 3a789db..d42cefc 100644 (file)
@@ -4,7 +4,7 @@
  * GPIO macros for SPEAr6xx machine family
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 179e457..c34acc2 100644 (file)
@@ -4,7 +4,7 @@
  * Miscellaneous registers definitions for SPEAr6xx machine family
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 2e2e359..9af67d0 100644 (file)
@@ -423,7 +423,7 @@ void __init spear6xx_map_io(void)
 
 static void __init spear6xx_timer_init(void)
 {
-       char pclk_name[] = "pll3_48m_clk";
+       char pclk_name[] = "pll3_clk";
        struct clk *gpt_clk, *pclk;
 
        spear6xx_clk_init();
index 4d6a2ee..5beb7eb 100644 (file)
@@ -33,7 +33,7 @@
 
 static bool is_enabled;
 
-static void tegra_cpu_reset_handler_enable(void)
+static void __init tegra_cpu_reset_handler_enable(void)
 {
        void __iomem *iram_base = IO_ADDRESS(TEGRA_IRAM_RESET_BASE);
        void __iomem *evp_cpu_reset =
index 9c74ac5..4fd93f5 100644 (file)
@@ -580,43 +580,12 @@ static void ux500_uart0_reset(void)
        udelay(1);
 }
 
-/* This needs to be referenced by callbacks */
-struct pinctrl *u0_p;
-struct pinctrl_state *u0_def;
-struct pinctrl_state *u0_sleep;
-
-static void ux500_uart0_init(void)
-{
-       int ret;
-
-       if (IS_ERR(u0_p) || IS_ERR(u0_def))
-               return;
-
-       ret = pinctrl_select_state(u0_p, u0_def);
-       if (ret)
-               pr_err("could not set UART0 defstate\n");
-}
-
-static void ux500_uart0_exit(void)
-{
-       int ret;
-
-       if (IS_ERR(u0_p) || IS_ERR(u0_sleep))
-               return;
-
-       ret = pinctrl_select_state(u0_p, u0_sleep);
-       if (ret)
-               pr_err("could not set UART0 idlestate\n");
-}
-
 static struct amba_pl011_data uart0_plat = {
 #ifdef CONFIG_STE_DMA40
        .dma_filter = stedma40_filter,
        .dma_rx_param = &uart0_dma_cfg_rx,
        .dma_tx_param = &uart0_dma_cfg_tx,
 #endif
-       .init = ux500_uart0_init,
-       .exit = ux500_uart0_exit,
        .reset = ux500_uart0_reset,
 };
 
@@ -638,28 +607,7 @@ static struct amba_pl011_data uart2_plat = {
 
 static void __init mop500_uart_init(struct device *parent)
 {
-       struct amba_device *uart0_device;
-
-       uart0_device = db8500_add_uart0(parent, &uart0_plat);
-       if (uart0_device) {
-               u0_p = pinctrl_get(&uart0_device->dev);
-               if (IS_ERR(u0_p))
-                       dev_err(&uart0_device->dev,
-                               "could not get UART0 pinctrl\n");
-               else {
-                       u0_def = pinctrl_lookup_state(u0_p,
-                                                     PINCTRL_STATE_DEFAULT);
-                       if (IS_ERR(u0_def)) {
-                               dev_err(&uart0_device->dev,
-                                       "could not get UART0 defstate\n");
-                       }
-                       u0_sleep = pinctrl_lookup_state(u0_p,
-                                                       PINCTRL_STATE_SLEEP);
-                       if (IS_ERR(u0_sleep))
-                               dev_err(&uart0_device->dev,
-                                       "could not get UART0 idlestate\n");
-               }
-       }
+       db8500_add_uart0(parent, &uart0_plat);
        db8500_add_uart1(parent, &uart1_plat);
        db8500_add_uart2(parent, &uart2_plat);
 }
@@ -677,11 +625,6 @@ static struct platform_device *snowball_platform_devs[] __initdata = {
        &ab8500_device,
 };
 
-static struct platform_device *snowball_of_platform_devs[] __initdata = {
-       &snowball_led_dev,
-       &snowball_key_dev,
-};
-
 static void __init mop500_init_machine(void)
 {
        struct device *parent = NULL;
@@ -821,6 +764,11 @@ MACHINE_END
 
 #ifdef CONFIG_MACH_UX500_DT
 
+static struct platform_device *snowball_of_platform_devs[] __initdata = {
+       &snowball_led_dev,
+       &snowball_key_dev,
+};
+
 struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
        /* Requires DMA and call-back bindings. */
        OF_DEV_AUXDATA("arm,pl011", 0x80120000, "uart0", &uart0_plat),
@@ -838,6 +786,8 @@ struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
        OF_DEV_AUXDATA("st,nomadik-gpio", 0x8011e000, "gpio.6", NULL),
        OF_DEV_AUXDATA("st,nomadik-gpio", 0x8011e080, "gpio.7", NULL),
        OF_DEV_AUXDATA("st,nomadik-gpio", 0xa03fe000, "gpio.8", NULL),
+       /* Requires device name bindings. */
+       OF_DEV_AUXDATA("stericsson,nmk_pinctrl", 0, "pinctrl-db8500", NULL),
        {},
 };
 
index 741e71f..66e7f00 100644 (file)
@@ -63,8 +63,10 @@ static void __init ux500_timer_init(void)
 
        /* TODO: Once MTU has been DT:ed place code above into else. */
        if (of_have_populated_dt()) {
+#ifdef CONFIG_OF
                np = of_find_matching_node(NULL, prcmu_timer_of_match);
                if (!np)
+#endif
                        goto dt_fail;
 
                tmp_base = of_iomap(np, 0);
index cf4687e..cd8ea35 100644 (file)
@@ -169,26 +169,13 @@ static struct map_desc versatile_io_desc[] __initdata = {
                .pfn            = __phys_to_pfn(VERSATILE_PCI_CFG_BASE),
                .length         = VERSATILE_PCI_CFG_BASE_SIZE,
                .type           = MT_DEVICE
-       },
-#if 0
-       {
-               .virtual        =  VERSATILE_PCI_VIRT_MEM_BASE0,
-               .pfn            = __phys_to_pfn(VERSATILE_PCI_MEM_BASE0),
-               .length         = SZ_16M,
-               .type           = MT_DEVICE
        }, {
-               .virtual        =  VERSATILE_PCI_VIRT_MEM_BASE1,
-               .pfn            = __phys_to_pfn(VERSATILE_PCI_MEM_BASE1),
-               .length         = SZ_16M,
-               .type           = MT_DEVICE
-       }, {
-               .virtual        =  VERSATILE_PCI_VIRT_MEM_BASE2,
-               .pfn            = __phys_to_pfn(VERSATILE_PCI_MEM_BASE2),
-               .length         = SZ_16M,
+               .virtual        =  (unsigned long)VERSATILE_PCI_VIRT_MEM_BASE0,
+               .pfn            = __phys_to_pfn(VERSATILE_PCI_MEM_BASE0),
+               .length         = IO_SPACE_LIMIT,
                .type           = MT_DEVICE
        },
 #endif
-#endif
 };
 
 void __init versatile_map_io(void)
index 4d4973d..408e58d 100644 (file)
@@ -29,8 +29,9 @@
  */
 #define VERSATILE_PCI_VIRT_BASE                (void __iomem *)0xe8000000ul
 #define VERSATILE_PCI_CFG_VIRT_BASE    (void __iomem *)0xe9000000ul
+#define VERSATILE_PCI_VIRT_MEM_BASE0   (void __iomem *)PCIO_BASE
 
-/* macro to get at IO space when running virtually */
+/* macro to get at MMIO space when running virtually */
 #define IO_ADDRESS(x)          (((x) & 0x0fffffff) + (((x) >> 4) & 0x0f000000) + 0xf0000000)
 
 #define __io_address(n)                ((void __iomem __force *)IO_ADDRESS(n))
diff --git a/arch/arm/mach-versatile/include/mach/io.h b/arch/arm/mach-versatile/include/mach/io.h
new file mode 100644 (file)
index 0000000..0406513
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ *  arch/arm/mach-versatile/include/mach/io.h
+ *
+ *  Copyright (C) 2003 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifndef __ASM_ARM_ARCH_IO_H
+#define __ASM_ARM_ARCH_IO_H
+
+#define PCIO_BASE      0xeb000000ul
+
+#define __io(a)                ((a) + PCIO_BASE)
+
+#endif
index 15c6a00..e95bf84 100644 (file)
@@ -169,11 +169,18 @@ static struct pci_ops pci_versatile_ops = {
        .write  = versatile_write_config,
 };
 
+static struct resource io_port = {
+       .name   = "PCI",
+       .start  = 0,
+       .end    = IO_SPACE_LIMIT,
+       .flags  = IORESOURCE_IO,
+};
+
 static struct resource io_mem = {
        .name   = "PCI I/O space",
        .start  = VERSATILE_PCI_MEM_BASE0,
        .end    = VERSATILE_PCI_MEM_BASE0+VERSATILE_PCI_MEM_BASE0_SIZE-1,
-       .flags  = IORESOURCE_IO,
+       .flags  = IORESOURCE_MEM,
 };
 
 static struct resource non_mem = {
@@ -200,6 +207,12 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
                       "memory region (%d)\n", ret);
                goto out;
        }
+       ret = request_resource(&ioport_resource, &io_port);
+       if (ret) {
+               printk(KERN_ERR "PCI: unable to allocate I/O "
+                      "port region (%d)\n", ret);
+               goto out;
+       }
        ret = request_resource(&iomem_resource, &non_mem);
        if (ret) {
                printk(KERN_ERR "PCI: unable to allocate non-prefetchable "
@@ -218,7 +231,7 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
         * the mem resource for this bus
         * the prefetch mem resource for this bus
         */
-       pci_add_resource_offset(&sys->resources, &io_mem, sys->io_offset);
+       pci_add_resource_offset(&sys->resources, &io_port, sys->io_offset);
        pci_add_resource_offset(&sys->resources, &non_mem, sys->mem_offset);
        pci_add_resource_offset(&sys->resources, &pre_mem, sys->mem_offset);
 
@@ -249,6 +262,7 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys)
 
        if (nr == 0) {
                sys->mem_offset = 0;
+               sys->io_offset = 0;
                ret = pci_versatile_setup_resources(sys);
                if (ret < 0) {
                        printk("pci_versatile_setup: resources... oops?\n");
@@ -325,7 +339,6 @@ void __init pci_versatile_preinit(void)
 static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        int irq;
-       int devslot = PCI_SLOT(dev->devfn);
 
        /* slot,  pin,  irq
         *  24     1     27
index d766e42..655878b 100644 (file)
@@ -1067,7 +1067,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
                return NULL;
 
        while (count) {
-               int j, order = __ffs(count);
+               int j, order = __fls(count);
 
                pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
                while (!pages[i] && order)
@@ -1091,7 +1091,7 @@ error:
        while (--i)
                if (pages[i])
                        __free_pages(pages[i], 0);
-       if (array_size < PAGE_SIZE)
+       if (array_size <= PAGE_SIZE)
                kfree(pages);
        else
                vfree(pages);
@@ -1106,7 +1106,7 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t s
        for (i = 0; i < count; i++)
                if (pages[i])
                        __free_pages(pages[i], 0);
-       if (array_size < PAGE_SIZE)
+       if (array_size <= PAGE_SIZE)
                kfree(pages);
        else
                vfree(pages);
index c471436..2e8a1ef 100644 (file)
@@ -64,7 +64,7 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
 #ifdef CONFIG_ZONE_DMA
 extern phys_addr_t arm_dma_limit;
 #else
-#define arm_dma_limit ((u32)~0)
+#define arm_dma_limit ((phys_addr_t)~0)
 #endif
 
 extern phys_addr_t arm_lowmem_limit;
index e5dad60..cf4528d 100644 (file)
@@ -791,6 +791,79 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
        }
 }
 
+#ifndef CONFIG_ARM_LPAE
+
+/*
+ * The Linux PMD is made of two consecutive section entries covering 2MB
+ * (see definition in include/asm/pgtable-2level.h).  However a call to
+ * create_mapping() may optimize static mappings by using individual
+ * 1MB section mappings.  This leaves the actual PMD potentially half
+ * initialized if the top or bottom section entry isn't used, leaving it
+ * open to problems if a subsequent ioremap() or vmalloc() tries to use
+ * the virtual space left free by that unused section entry.
+ *
+ * Let's avoid the issue by inserting dummy vm entries covering the unused
+ * PMD halves once the static mappings are in place.
+ */
+
+static void __init pmd_empty_section_gap(unsigned long addr)
+{
+       struct vm_struct *vm;
+
+       vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
+       vm->addr = (void *)addr;
+       vm->size = SECTION_SIZE;
+       vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+       vm->caller = pmd_empty_section_gap;
+       vm_area_add_early(vm);
+}
+
+static void __init fill_pmd_gaps(void)
+{
+       struct vm_struct *vm;
+       unsigned long addr, next = 0;
+       pmd_t *pmd;
+
+       /* we're still single threaded hence no lock needed here */
+       for (vm = vmlist; vm; vm = vm->next) {
+               if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+                       continue;
+               addr = (unsigned long)vm->addr;
+               if (addr < next)
+                       continue;
+
+               /*
+                * Check if this vm starts on an odd section boundary.
+                * If so and the first section entry for this PMD is free
+                * then we block the corresponding virtual address.
+                */
+               if ((addr & ~PMD_MASK) == SECTION_SIZE) {
+                       pmd = pmd_off_k(addr);
+                       if (pmd_none(*pmd))
+                               pmd_empty_section_gap(addr & PMD_MASK);
+               }
+
+               /*
+                * Then check if this vm ends on an odd section boundary.
+                * If so and the second section entry for this PMD is empty
+                * then we block the corresponding virtual address.
+                */
+               addr += vm->size;
+               if ((addr & ~PMD_MASK) == SECTION_SIZE) {
+                       pmd = pmd_off_k(addr) + 1;
+                       if (pmd_none(*pmd))
+                               pmd_empty_section_gap(addr);
+               }
+
+               /* no need to look at any vm entry until we hit the next PMD */
+               next = (addr + PMD_SIZE - 1) & PMD_MASK;
+       }
+}
+
+#else
+#define fill_pmd_gaps() do { } while (0)
+#endif
+
 static void * __initdata vmalloc_min =
        (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
 
@@ -1072,6 +1145,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
         */
        if (mdesc->map_io)
                mdesc->map_io();
+       fill_pmd_gaps();
 
        /*
         * Finally flush the caches and tlb to ensure that we're in a
index 6213584..c641fb6 100644 (file)
@@ -762,6 +762,11 @@ b_epilogue:
                        update_on_xread(ctx);
                        emit(ARM_MOV_R(r_A, r_X), ctx);
                        break;
+               case BPF_S_ANC_ALU_XOR_X:
+                       /* A ^= X */
+                       update_on_xread(ctx);
+                       emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
+                       break;
                case BPF_S_ANC_PROTOCOL:
                        /* A = ntohs(skb->protocol) */
                        ctx->seen |= SEEN_SKB;
index 99ae5e3..7fa2f7d 100644 (file)
@@ -68,6 +68,8 @@
 #define ARM_INST_CMP_R         0x01500000
 #define ARM_INST_CMP_I         0x03500000
 
+#define ARM_INST_EOR_R         0x00200000
+
 #define ARM_INST_LDRB_I                0x05d00000
 #define ARM_INST_LDRB_R                0x07d00000
 #define ARM_INST_LDRH_I                0x01d000b0
 #define ARM_CMP_R(rn, rm)      _AL3_R(ARM_INST_CMP, 0, rn, rm)
 #define ARM_CMP_I(rn, imm)     _AL3_I(ARM_INST_CMP, 0, rn, imm)
 
+#define ARM_EOR_R(rd, rn, rm)  _AL3_R(ARM_INST_EOR, rd, rn, rm)
+
 #define ARM_LDR_I(rt, rn, off) (ARM_INST_LDR_I | (rt) << 12 | (rn) << 16 \
                                 | (off))
 #define ARM_LDRB_I(rt, rn, off)        (ARM_INST_LDRB_I | (rt) << 12 | (rn) << 16 \
index 9129c9e..88726f4 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/irq.h>
 #include <linux/clockchips.h>
 #include <linux/clk.h>
+#include <linux/err.h>
 
 #include <mach/hardware.h>
 #include <asm/mach/time.h>
@@ -201,8 +202,16 @@ static int __init epit_clockevent_init(struct clk *timer_clk)
        return 0;
 }
 
-void __init epit_timer_init(struct clk *timer_clk, void __iomem *base, int irq)
+void __init epit_timer_init(void __iomem *base, int irq)
 {
+       struct clk *timer_clk;
+
+       timer_clk = clk_get_sys("imx-epit.0", NULL);
+       if (IS_ERR(timer_clk)) {
+               pr_err("i.MX epit: unable to get clk\n");
+               return;
+       }
+
        clk_prepare_enable(timer_clk);
 
        timer_base = base;
index cf663d8..e429ca1 100644 (file)
@@ -54,8 +54,8 @@ extern void imx50_soc_init(void);
 extern void imx51_soc_init(void);
 extern void imx53_soc_init(void);
 extern void imx51_init_late(void);
-extern void epit_timer_init(struct clk *timer_clk, void __iomem *base, int irq);
-extern void mxc_timer_init(struct clk *timer_clk, void __iomem *, int);
+extern void epit_timer_init(void __iomem *base, int irq);
+extern void mxc_timer_init(void __iomem *, int);
 extern int mx1_clocks_init(unsigned long fref);
 extern int mx21_clocks_init(unsigned long lref, unsigned long fref);
 extern int mx25_clocks_init(void);
index 7ded6f1..3c080a3 100644 (file)
@@ -23,6 +23,7 @@
 #ifndef __MACH_MX2_CAM_H_
 #define __MACH_MX2_CAM_H_
 
+#define MX2_CAMERA_SWAP16              (1 << 0)
 #define MX2_CAMERA_EXT_VSYNC           (1 << 1)
 #define MX2_CAMERA_CCIR                        (1 << 2)
 #define MX2_CAMERA_CCIR_INTERLACE      (1 << 3)
@@ -30,6 +31,7 @@
 #define MX2_CAMERA_GATED_CLOCK         (1 << 5)
 #define MX2_CAMERA_INV_DATA            (1 << 6)
 #define MX2_CAMERA_PCLK_SAMPLE_RISING  (1 << 7)
+#define MX2_CAMERA_PACK_DIR_MSB                (1 << 8)
 
 /**
  * struct mx2_camera_platform_data - optional platform data for mx2_camera
index 99f958c..00e8e65 100644 (file)
@@ -58,6 +58,7 @@
 /* MX31, MX35, MX25, MX5 */
 #define V2_TCTL_WAITEN         (1 << 3) /* Wait enable mode */
 #define V2_TCTL_CLK_IPG                (1 << 6)
+#define V2_TCTL_CLK_PER                (2 << 6)
 #define V2_TCTL_FRR            (1 << 9)
 #define V2_IR                  0x0c
 #define V2_TSTAT               0x08
@@ -280,23 +281,22 @@ static int __init mxc_clockevent_init(struct clk *timer_clk)
        return 0;
 }
 
-void __init mxc_timer_init(struct clk *timer_clk, void __iomem *base, int irq)
+void __init mxc_timer_init(void __iomem *base, int irq)
 {
        uint32_t tctl_val;
+       struct clk *timer_clk;
        struct clk *timer_ipg_clk;
 
-       if (!timer_clk) {
-               timer_clk = clk_get_sys("imx-gpt.0", "per");
-               if (IS_ERR(timer_clk)) {
-                       pr_err("i.MX timer: unable to get clk\n");
-                       return;
-               }
-
-               timer_ipg_clk = clk_get_sys("imx-gpt.0", "ipg");
-               if (!IS_ERR(timer_ipg_clk))
-                       clk_prepare_enable(timer_ipg_clk);
+       timer_clk = clk_get_sys("imx-gpt.0", "per");
+       if (IS_ERR(timer_clk)) {
+               pr_err("i.MX timer: unable to get clk\n");
+               return;
        }
 
+       timer_ipg_clk = clk_get_sys("imx-gpt.0", "ipg");
+       if (!IS_ERR(timer_ipg_clk))
+               clk_prepare_enable(timer_ipg_clk);
+
        clk_prepare_enable(timer_clk);
 
        timer_base = base;
@@ -309,7 +309,7 @@ void __init mxc_timer_init(struct clk *timer_clk, void __iomem *base, int irq)
        __raw_writel(0, timer_base + MXC_TPRER); /* see datasheet note */
 
        if (timer_is_v2())
-               tctl_val = V2_TCTL_CLK_IPG | V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
+               tctl_val = V2_TCTL_CLK_PER | V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
        else
                tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN;
 
index 62ec5c4..706b7e2 100644 (file)
@@ -461,6 +461,7 @@ static int clk_dbg_show_summary(struct seq_file *s, void *unused)
        struct clk *c;
        struct clk *pa;
 
+       mutex_lock(&clocks_mutex);
        seq_printf(s, "%-30s %-30s %-10s %s\n",
                "clock-name", "parent-name", "rate", "use-count");
 
@@ -469,6 +470,7 @@ static int clk_dbg_show_summary(struct seq_file *s, void *unused)
                seq_printf(s, "%-30s %-30s %-10lu %d\n",
                        c->name, pa ? pa->name : "none", c->rate, c->usecount);
        }
+       mutex_unlock(&clocks_mutex);
 
        return 0;
 }
index 297245d..de6c0a0 100644 (file)
@@ -252,8 +252,6 @@ IS_AM_SUBCLASS(335x, 0x335)
  * cpu_is_omap2423():  True for OMAP2423
  * cpu_is_omap2430():  True for OMAP2430
  * cpu_is_omap3430():  True for OMAP3430
- * cpu_is_omap3505():  True for OMAP3505
- * cpu_is_omap3517():  True for OMAP3517
  */
 #define GET_OMAP_TYPE  ((omap_rev() >> 16) & 0xffff)
 
@@ -277,8 +275,6 @@ IS_OMAP_TYPE(2422, 0x2422)
 IS_OMAP_TYPE(2423, 0x2423)
 IS_OMAP_TYPE(2430, 0x2430)
 IS_OMAP_TYPE(3430, 0x3430)
-IS_OMAP_TYPE(3505, 0x3517)
-IS_OMAP_TYPE(3517, 0x3517)
 
 #define cpu_is_omap310()               0
 #define cpu_is_omap730()               0
@@ -293,12 +289,6 @@ IS_OMAP_TYPE(3517, 0x3517)
 #define cpu_is_omap2422()              0
 #define cpu_is_omap2423()              0
 #define cpu_is_omap2430()              0
-#define cpu_is_omap3503()              0
-#define cpu_is_omap3515()              0
-#define cpu_is_omap3525()              0
-#define cpu_is_omap3530()              0
-#define cpu_is_omap3505()              0
-#define cpu_is_omap3517()              0
 #define cpu_is_omap3430()              0
 #define cpu_is_omap3630()              0
 
@@ -350,12 +340,6 @@ IS_OMAP_TYPE(3517, 0x3517)
 
 #if defined(CONFIG_ARCH_OMAP3)
 # undef cpu_is_omap3430
-# undef cpu_is_omap3503
-# undef cpu_is_omap3515
-# undef cpu_is_omap3525
-# undef cpu_is_omap3530
-# undef cpu_is_omap3505
-# undef cpu_is_omap3517
 # undef cpu_is_ti81xx
 # undef cpu_is_ti816x
 # undef cpu_is_ti814x
@@ -363,19 +347,6 @@ IS_OMAP_TYPE(3517, 0x3517)
 # undef cpu_is_am33xx
 # undef cpu_is_am335x
 # define cpu_is_omap3430()             is_omap3430()
-# define cpu_is_omap3503()             (cpu_is_omap3430() &&           \
-                                               (!omap3_has_iva()) &&   \
-                                               (!omap3_has_sgx()))
-# define cpu_is_omap3515()             (cpu_is_omap3430() &&           \
-                                               (!omap3_has_iva()) &&   \
-                                               (omap3_has_sgx()))
-# define cpu_is_omap3525()             (cpu_is_omap3430() &&           \
-                                               (!omap3_has_sgx()) &&   \
-                                               (omap3_has_iva()))
-# define cpu_is_omap3530()             (cpu_is_omap3430())
-# define cpu_is_omap3517()             is_omap3517()
-# define cpu_is_omap3505()             (cpu_is_omap3517() &&           \
-                                               !omap3_has_sgx())
 # undef cpu_is_omap3630
 # define cpu_is_omap3630()             is_omap363x()
 # define cpu_is_ti81xx()               is_ti81xx()
@@ -424,10 +395,6 @@ IS_OMAP_TYPE(3517, 0x3517)
 #define OMAP3630_REV_ES1_1     (OMAP363X_CLASS | (0x1 << 8))
 #define OMAP3630_REV_ES1_2     (OMAP363X_CLASS | (0x2 << 8))
 
-#define OMAP3517_CLASS         0x35170034
-#define OMAP3517_REV_ES1_0     OMAP3517_CLASS
-#define OMAP3517_REV_ES1_1     (OMAP3517_CLASS | (0x1 << 8))
-
 #define TI816X_CLASS           0x81600034
 #define TI8168_REV_ES1_0       TI816X_CLASS
 #define TI8168_REV_ES1_1       (TI816X_CLASS | (0x1 << 8))
index a7754a8..5493bd9 100644 (file)
@@ -172,8 +172,7 @@ struct omap_mmc_platform_data {
 extern void omap_mmc_notify_cover_event(struct device *dev, int slot,
                                        int is_closed);
 
-#if    defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) || \
-       defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)
+#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
 void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
                                int nr_controllers);
 void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data);
@@ -185,7 +184,6 @@ static inline void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
 static inline void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)
 {
 }
-
 #endif
 
 extern int omap_msdi_reset(struct omap_hwmod *oh);
index 61fd837..c179378 100644 (file)
@@ -582,7 +582,7 @@ void __init orion_spi_1_init(unsigned long mapbase)
  * Watchdog
  ****************************************************************************/
 static struct resource orion_wdt_resource =
-               DEFINE_RES_MEM(TIMER_VIRT_BASE, 0x28);
+               DEFINE_RES_MEM(TIMER_PHYS_BASE, 0x28);
 
 static struct platform_device orion_wdt_device = {
        .name           = "orion_wdt",
index 58b7980..584c9bf 100644 (file)
@@ -193,6 +193,7 @@ static const struct platform_device_id ssp_id_table[] = {
        { "pxa25x-nssp",        PXA25x_NSSP },
        { "pxa27x-ssp",         PXA27x_SSP },
        { "pxa168-ssp",         PXA168_SSP },
+       { "pxa910-ssp",         PXA910_SSP },
        { },
 };
 
index 33ecd0c..b1e05cc 100644 (file)
@@ -157,11 +157,13 @@ int s3c_adc_start(struct s3c_adc_client *client,
                return -EINVAL;
        }
 
-       if (client->is_ts && adc->ts_pend)
-               return -EAGAIN;
-
        spin_lock_irqsave(&adc->lock, flags);
 
+       if (client->is_ts && adc->ts_pend) {
+               spin_unlock_irqrestore(&adc->lock, flags);
+               return -EAGAIN;
+       }
+
        client->channel = channel;
        client->nr_samples = nr_samples;
 
index 1d214cb..6303974 100644 (file)
@@ -126,7 +126,8 @@ struct platform_device s3c_device_adc = {
 #ifdef CONFIG_CPU_S3C2440
 static struct resource s3c_camif_resource[] = {
        [0] = DEFINE_RES_MEM(S3C2440_PA_CAMIF, S3C2440_SZ_CAMIF),
-       [1] = DEFINE_RES_IRQ(IRQ_CAM),
+       [1] = DEFINE_RES_IRQ(IRQ_S3C2440_CAM_C),
+       [2] = DEFINE_RES_IRQ(IRQ_S3C2440_CAM_P),
 };
 
 struct platform_device s3c_device_camif = {
index 7d04875..c0c70a8 100644 (file)
@@ -22,7 +22,7 @@
 #define S3C24XX_VA_WATCHDOG    S3C_VA_WATCHDOG
 
 #define S3C2412_VA_SSMC                S3C_ADDR_CPU(0x00000000)
-#define S3C2412_VA_EBI         S3C_ADDR_CPU(0x00010000)
+#define S3C2412_VA_EBI         S3C_ADDR_CPU(0x00100000)
 
 #define S3C2410_PA_UART                (0x50000000)
 #define S3C24XX_PA_UART                S3C2410_PA_UART
index f19aff1..bc4db9b 100644 (file)
@@ -25,7 +25,7 @@ static inline void arch_wdt_reset(void)
 
        __raw_writel(0, S3C2410_WTCON);   /* disable watchdog, to be safe  */
 
-       if (s3c2410_wdtclk)
+       if (!IS_ERR(s3c2410_wdtclk))
                clk_enable(s3c2410_wdtclk);
 
        /* put initial values into count and data */
index 031a618..48a1599 100644 (file)
@@ -37,6 +37,7 @@ struct clk clk_ext_xtal_mux = {
 struct clk clk_xusbxti = {
        .name           = "xusbxti",
        .id             = -1,
+       .rate           = 24000000,
 };
 
 struct clk s5p_clk_27m = {
index ab3de72..75b05ad 100644 (file)
@@ -4,7 +4,7 @@
  * Debugging macro include header for spear platform
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index e14a3e4..2bc6b54 100644 (file)
@@ -4,7 +4,7 @@
  * DMAC pl080 definitions for SPEAr platform
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 03ed8b5..88a7fbd 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr platform shared irq layer header file
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 914d09d..ef95e5b 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr platform specific timex definitions
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 6dd455b..2ce6cb1 100644 (file)
@@ -4,7 +4,7 @@
  * Serial port stubs for kernel decompress status messages
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index a56a067..12cf27f 100644 (file)
@@ -4,7 +4,7 @@
  * DMAC pl080 definitions for SPEAr platform
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index ea0a613..4f99011 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr platform specific restart functions
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 961fb72..853e891 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr platform shared irq layer source file
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index a09230a..62ef176 100644 (file)
@@ -70,4 +70,7 @@ extern int is_in_rom(unsigned long);
 #define        VMALLOC_END     0xffffffff
 
 #define arch_enter_lazy_cpu_mode()    do {} while (0)
+
+#include <asm-generic/pgtable.h>
+
 #endif /* _H8300_PGTABLE_H */
index 356068c..8725d1a 100644 (file)
@@ -100,7 +100,6 @@ extern int __put_user_bad(void);
        break;                                                  \
     default:                                                   \
        __gu_err = __get_user_bad();                            \
-       __gu_val = 0;                                           \
        break;                                                  \
     }                                                          \
     (x) = __gu_val;                                            \
@@ -159,4 +158,6 @@ clear_user(void *to, unsigned long n)
        return 0;
 }
 
+#define __clear_user   clear_user
+
 #endif /* _H8300_UACCESS_H */
index 68d6510..d0b1607 100644 (file)
@@ -35,6 +35,7 @@
 #include <asm/setup.h>
 #include <asm/irq.h>
 #include <asm/pgtable.h>
+#include <asm/sections.h>
 
 #if defined(__H8300H__)
 #define CPU "H8/300H"
@@ -54,7 +55,6 @@ unsigned long memory_end;
 
 char __initdata command_line[COMMAND_LINE_SIZE];
 
-extern int _stext, _etext, _sdata, _edata, _sbss, _ebss, _end;
 extern int _ramstart, _ramend;
 extern char _target_name[];
 extern void h8300_gpio_init(void);
@@ -119,9 +119,9 @@ void __init setup_arch(char **cmdline_p)
            memory_end = CONFIG_BLKDEV_RESERVE_ADDRESS; 
 #endif
 
-       init_mm.start_code = (unsigned long) &_stext;
-       init_mm.end_code = (unsigned long) &_etext;
-       init_mm.end_data = (unsigned long) &_edata;
+       init_mm.start_code = (unsigned long) _stext;
+       init_mm.end_code = (unsigned long) _etext;
+       init_mm.end_data = (unsigned long) _edata;
        init_mm.brk = (unsigned long) 0; 
 
 #if (defined(CONFIG_H8300H_SIM) || defined(CONFIG_H8S_SIM)) && defined(CONFIG_GDB_MAGICPRINT)
@@ -134,15 +134,12 @@ void __init setup_arch(char **cmdline_p)
        printk(KERN_INFO "H8/300 series support by Yoshinori Sato <ysato@users.sourceforge.jp>\n");
 
 #ifdef DEBUG
-       printk(KERN_DEBUG "KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x "
-               "BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext,
-               (int) &_sdata, (int) &_edata,
-               (int) &_sbss, (int) &_ebss);
-       printk(KERN_DEBUG "KERNEL -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x "
-               "STACK=0x%06x-0x%06x\n",
-              (int) &_ebss, (int) memory_start,
-               (int) memory_start, (int) memory_end,
-               (int) memory_end, (int) &_ramend);
+       printk(KERN_DEBUG "KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p "
+               "BSS=0x%p-0x%p\n", _stext, _etext, _sdata, _edata, __bss_start,
+               __bss_stop);
+       printk(KERN_DEBUG "KERNEL -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx "
+               "STACK=0x%06lx-0x%p\n", __bss_stop, memory_start, memory_start,
+               memory_end, memory_end, &_ramend);
 #endif
 
 #ifdef CONFIG_DEFAULT_CMDLINE
index fca1037..5adaada 100644 (file)
@@ -447,7 +447,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
  * want to handle. Thus you cannot kill init even with a SIGKILL even by
  * mistake.
  */
-statis void do_signal(struct pt_regs *regs)
+static void do_signal(struct pt_regs *regs)
 {
        siginfo_t info;
        int signr;
index 32263a1..e0f7419 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/profile.h>
 
 #include <asm/io.h>
+#include <asm/irq_regs.h>
 #include <asm/timer.h>
 
 #define        TICK_SIZE (tick_nsec / 1000)
index 973369c..981e250 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/segment.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
+#include <asm/sections.h>
 
 #undef DEBUG
 
@@ -123,7 +124,6 @@ void __init mem_init(void)
        int codek = 0, datak = 0, initk = 0;
        /* DAVIDM look at setup memory map generically with reserved area */
        unsigned long tmp;
-       extern char _etext, _stext, _sdata, _ebss, __init_begin, __init_end;
        extern unsigned long  _ramend, _ramstart;
        unsigned long len = &_ramend - &_ramstart;
        unsigned long start_mem = memory_start; /* DAVIDM - these must start at end of kernel */
@@ -142,9 +142,9 @@ void __init mem_init(void)
        /* this will put all memory onto the freelists */
        totalram_pages = free_all_bootmem();
 
-       codek = (&_etext - &_stext) >> 10;
-       datak = (&_ebss - &_sdata) >> 10;
-       initk = (&__init_begin - &__init_end) >> 10;
+       codek = (_etext - _stext) >> 10;
+       datak = (__bss_stop - _sdata) >> 10;
+       initk = (__init_begin - __init_end) >> 10;
 
        tmp = nr_free_pages() << PAGE_SHIFT;
        printk(KERN_INFO "Memory available: %luk/%luk RAM, %luk/%luk ROM (%dk kernel code, %dk data)\n",
@@ -178,22 +178,21 @@ free_initmem(void)
 {
 #ifdef CONFIG_RAMKERNEL
        unsigned long addr;
-       extern char __init_begin, __init_end;
 /*
  *     the following code should be cool even if these sections
  *     are not page aligned.
  */
-       addr = PAGE_ALIGN((unsigned long)(&__init_begin));
+       addr = PAGE_ALIGN((unsigned long)(__init_begin));
        /* next to check that the page we free is not a partial page */
-       for (; addr + PAGE_SIZE < (unsigned long)(&__init_end); addr +=PAGE_SIZE) {
+       for (; addr + PAGE_SIZE < (unsigned long)__init_end; addr +=PAGE_SIZE) {
                ClearPageReserved(virt_to_page(addr));
                init_page_count(virt_to_page(addr));
                free_page(addr);
                totalram_pages++;
        }
        printk(KERN_INFO "Freeing unused kernel memory: %ldk freed (0x%x - 0x%x)\n",
-                       (addr - PAGE_ALIGN((long) &__init_begin)) >> 10,
-                       (int)(PAGE_ALIGN((unsigned long)(&__init_begin))),
+                       (addr - PAGE_ALIGN((long) __init_begin)) >> 10,
+                       (int)(PAGE_ALIGN((unsigned long)__init_begin)),
                        (int)(addr - PAGE_SIZE));
 #endif
 }
index f726462..149fbef 100644 (file)
@@ -180,9 +180,7 @@ void __cpuinit start_secondary(void)
 
        notify_cpu_starting(cpu);
 
-       ipi_call_lock();
        set_cpu_online(cpu, true);
-       ipi_call_unlock();
 
        local_irq_enable();
 
index 1113b8a..963d2db 100644 (file)
@@ -382,7 +382,6 @@ smp_callin (void)
        set_numa_node(cpu_to_node_map[cpuid]);
        set_numa_mem(local_memory_node(cpu_to_node_map[cpuid]));
 
-       ipi_call_lock_irq();
        spin_lock(&vector_lock);
        /* Setup the per cpu irq handling data structures */
        __setup_vector_irq(cpuid);
@@ -390,7 +389,6 @@ smp_callin (void)
        set_cpu_online(cpuid, true);
        per_cpu(cpu_state, cpuid) = CPU_ONLINE;
        spin_unlock(&vector_lock);
-       ipi_call_unlock_irq();
 
        smp_setup_percpu_timer();
 
index 177716b..01729c2 100644 (file)
@@ -43,9 +43,9 @@ endif
 
 OBJCOPYFLAGS += -R .empty_zero_page
 
-suffix_$(CONFIG_KERNEL_GZIP)   = gz
-suffix_$(CONFIG_KERNEL_BZIP2)  = bz2
-suffix_$(CONFIG_KERNEL_LZMA)   = lzma
+suffix-$(CONFIG_KERNEL_GZIP)   = gz
+suffix-$(CONFIG_KERNEL_BZIP2)  = bz2
+suffix-$(CONFIG_KERNEL_LZMA)   = lzma
 
 $(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) FORCE
        $(call if_changed,ld)
index 370d608..28a0952 100644 (file)
@@ -28,7 +28,7 @@ static unsigned long free_mem_ptr;
 static unsigned long free_mem_end_ptr;
 
 #ifdef CONFIG_KERNEL_BZIP2
-static void *memset(void *s, int c, size_t n)
+void *memset(void *s, int c, size_t n)
 {
        char *ss = s;
 
@@ -39,6 +39,16 @@ static void *memset(void *s, int c, size_t n)
 #endif
 
 #ifdef CONFIG_KERNEL_GZIP
+void *memcpy(void *dest, const void *src, size_t n)
+{
+       char *d = dest;
+       const char *s = src;
+       while (n--)
+               *d++ = *s++;
+
+       return dest;
+}
+
 #define BOOT_HEAP_SIZE             0x10000
 #include "../../../../lib/decompress_inflate.c"
 #endif
index 5275275..4313aa6 100644 (file)
@@ -113,9 +113,6 @@ struct pt_regs {
 
 #define PTRACE_OLDSETOPTIONS   21
 
-/* options set using PTRACE_SETOPTIONS */
-#define PTRACE_O_TRACESYSGOOD  0x00000001
-
 #ifdef __KERNEL__
 
 #include <asm/m32r.h>          /* M32R_PSW_BSM, M32R_PSW_BPM */
index cf7829a..c689b82 100644 (file)
@@ -79,11 +79,6 @@ static __inline__ int cpu_number_map(int cpu)
        return cpu;
 }
 
-static __inline__ unsigned int num_booting_cpus(void)
-{
-       return cpumask_weight(&cpu_callout_map);
-}
-
 extern void smp_send_timer(void);
 extern unsigned long send_IPI_mask_phys(const cpumask_t*, int, int);
 
index 4c03361..51f5e9a 100644 (file)
@@ -591,17 +591,16 @@ void user_enable_single_step(struct task_struct *child)
 
        if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
            != sizeof(insn))
-               return -EIO;
+               return;
 
        compute_next_pc(insn, pc, &next_pc, child);
        if (next_pc & 0x80000000)
-               return -EIO;
+               return;
 
        if (embed_debug_trap(child, next_pc))
-               return -EIO;
+               return;
 
        invalidate_cache();
-       return 0;
 }
 
 void user_disable_single_step(struct task_struct *child)
index f3fb2c0..d0f60b9 100644 (file)
@@ -286,7 +286,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
                        case -ERESTARTNOINTR:
                                regs->r0 = regs->orig_r0;
                                if (prev_insn(regs) < 0)
-                                       return -EFAULT;
+                                       return;
                }
        }
 
index 9f1260c..44da406 100644 (file)
@@ -42,4 +42,11 @@ unsigned long clk_get_rate(struct clk *clk)
        return MCF_CLK;
 }
 EXPORT_SYMBOL(clk_get_rate);
+
+struct clk *devm_clk_get(struct device *dev, const char *id)
+{
+       return NULL;
+}
+EXPORT_SYMBOL(devm_clk_get);
+
 /***************************************************************************/
index 09ab87e..b3e10fd 100644 (file)
@@ -288,6 +288,7 @@ config MIPS_MALTA
        select SYS_HAS_CPU_MIPS32_R1
        select SYS_HAS_CPU_MIPS32_R2
        select SYS_HAS_CPU_MIPS64_R1
+       select SYS_HAS_CPU_MIPS64_R2
        select SYS_HAS_CPU_NEVADA
        select SYS_HAS_CPU_RM7000
        select SYS_HAS_EARLY_PRINTK
@@ -1423,6 +1424,7 @@ config CPU_SB1
 config CPU_CAVIUM_OCTEON
        bool "Cavium Octeon processor"
        depends on SYS_HAS_CPU_CAVIUM_OCTEON
+       select ARCH_SPARSEMEM_ENABLE
        select CPU_HAS_PREFETCH
        select CPU_SUPPORTS_64BIT_KERNEL
        select SYS_SUPPORTS_SMP
index 6210b8d..b311be4 100644 (file)
@@ -21,6 +21,7 @@ config BCM47XX_BCMA
        select BCMA
        select BCMA_HOST_SOC
        select BCMA_DRIVER_MIPS
+       select BCMA_HOST_PCI if PCI
        select BCMA_DRIVER_PCI_HOSTMODE if PCI
        default y
        help
index de4d917..a551bab 100644 (file)
@@ -79,11 +79,11 @@ static int __init config_pcmcia_cs(unsigned int cs,
        return ret;
 }
 
-static const __initdata struct {
+static const struct {
        unsigned int    cs;
        unsigned int    base;
        unsigned int    size;
-} pcmcia_cs[3] = {
+} pcmcia_cs[3] __initconst = {
        {
                .cs     = MPI_CS_PCMCIA_COMMON,
                .base   = BCM_PCMCIA_COMMON_BASE_PA,
index f9e275a..2f4f6d5 100644 (file)
@@ -82,10 +82,6 @@ config CAVIUM_OCTEON_LOCK_L2_MEMCPY
        help
          Lock the kernel's implementation of memcpy() into L2.
 
-config ARCH_SPARSEMEM_ENABLE
-       def_bool y
-       select SPARSEMEM_STATIC
-
 config IOMMU_HELPER
        bool
 
index 4b93048..ee1fb9f 100644 (file)
@@ -185,7 +185,6 @@ static void __cpuinit octeon_init_secondary(void)
        octeon_init_cvmcount();
 
        octeon_irq_setup_secondary();
-       raw_local_irq_enable();
 }
 
 /**
@@ -233,6 +232,7 @@ static void octeon_smp_finish(void)
 
        /* to generate the first CPU timer interrupt */
        write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
+       local_irq_enable();
 }
 
 /**
index 2e1ad4c..82ad35c 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/irqflags.h>
 #include <linux/types.h>
 #include <asm/barrier.h>
-#include <asm/bug.h>
 #include <asm/byteorder.h>             /* sigh ... */
 #include <asm/cpu-features.h>
 #include <asm/sgidefs.h>
index 285a41f..eee10dc 100644 (file)
@@ -8,6 +8,7 @@
 #ifndef __ASM_CMPXCHG_H
 #define __ASM_CMPXCHG_H
 
+#include <linux/bug.h>
 #include <linux/irqflags.h>
 #include <asm/war.h>
 
index f9fa2a4..95e40c1 100644 (file)
@@ -94,6 +94,7 @@
 #define PRID_IMP_24KE          0x9600
 #define PRID_IMP_74K           0x9700
 #define PRID_IMP_1004K         0x9900
+#define PRID_IMP_M14KC         0x9c00
 
 /*
  * These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
@@ -260,12 +261,12 @@ enum cpu_type_enum {
         */
        CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
        CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
-       CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC,
+       CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_M14KC,
 
        /*
         * MIPS64 class processors
         */
-       CPU_5KC, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
+       CPU_5KC, CPU_5KE, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
        CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2,
        CPU_XLR, CPU_XLP,
 
@@ -288,7 +289,7 @@ enum cpu_type_enum {
 #define MIPS_CPU_ISA_M64R2     0x00000100
 
 #define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_I | MIPS_CPU_ISA_II | \
-       MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 )
+       MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2)
 #define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \
        MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)
 
index 86548da..991b659 100644 (file)
 
 #define GIC_VPE_EIC_SHADOW_SET_BASE    0x0100
 #define GIC_VPE_EIC_SS(intr) \
-       (GIC_EIC_SHADOW_SET_BASE + (4 * intr))
+       (GIC_VPE_EIC_SHADOW_SET_BASE + (4 * intr))
 
 #define GIC_VPE_EIC_VEC_BASE           0x0800
 #define GIC_VPE_EIC_VEC(intr) \
@@ -330,6 +330,17 @@ struct gic_intr_map {
 #define GIC_FLAG_TRANSPARENT   0x02
 };
 
+/*
+ * This is only used in EIC mode. This helps to figure out which
+ * shared interrupts we need to process when we get a vector interrupt.
+ */
+#define GIC_MAX_SHARED_INTR  0x5
+struct gic_shared_intr_map {
+       unsigned int num_shared_intr;
+       unsigned int intr_list[GIC_MAX_SHARED_INTR];
+       unsigned int local_intr_mask;
+};
+
 extern void gic_init(unsigned long gic_base_addr,
        unsigned long gic_addrspace_size, struct gic_intr_map *intrmap,
        unsigned int intrmap_size, unsigned int irqbase);
@@ -338,5 +349,7 @@ extern unsigned int gic_get_int(void);
 extern void gic_send_ipi(unsigned int intr);
 extern unsigned int plat_ipi_call_int_xlate(unsigned int);
 extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
+extern void gic_bind_eic_interrupt(int irq, int set);
+extern unsigned int gic_get_timer_pending(void);
 
 #endif /* _ASM_GICREGS_H */
index 7ebfc39..ab84064 100644 (file)
@@ -251,7 +251,7 @@ struct f_format {   /* FPU register format */
        unsigned int func : 6;
 };
 
-struct ma_format {     /* FPU multipy and add format (MIPS IV) */
+struct ma_format {     /* FPU multiply and add format (MIPS IV) */
        unsigned int opcode : 6;
        unsigned int fr : 5;
        unsigned int ft : 5;
@@ -324,7 +324,7 @@ struct f_format {   /* FPU register format */
        unsigned int opcode : 6;
 };
 
-struct ma_format {     /* FPU multipy and add format (MIPS IV) */
+struct ma_format {     /* FPU multiply and add format (MIPS IV) */
        unsigned int fmt : 2;
        unsigned int func : 4;
        unsigned int fd : 5;
index a58f229..29d9c23 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/types.h>
 
 #include <asm/addrspace.h>
+#include <asm/bug.h>
 #include <asm/byteorder.h>
 #include <asm/cpu.h>
 #include <asm/cpu-features.h>
index fb698dc..78dbb8a 100644 (file)
@@ -136,6 +136,7 @@ extern void free_irqno(unsigned int irq);
  * IE7.  Since R2 their number has to be read from the c0_intctl register.
  */
 #define CP0_LEGACY_COMPARE_IRQ 7
+#define CP0_LEGACY_PERFCNT_IRQ 7
 
 extern int cp0_compare_irq;
 extern int cp0_compare_irq_shift;
index 94d4faa..fdcd78c 100644 (file)
@@ -99,7 +99,7 @@
 #define CKCTL_6368_USBH_CLK_EN         (1 << 15)
 #define CKCTL_6368_DISABLE_GLESS_EN    (1 << 16)
 #define CKCTL_6368_NAND_CLK_EN         (1 << 17)
-#define CKCTL_6368_IPSEC_CLK_EN                (1 << 17)
+#define CKCTL_6368_IPSEC_CLK_EN                (1 << 18)
 
 #define CKCTL_6368_ALL_SAFE_EN         (CKCTL_6368_SWPKT_USB_EN |      \
                                        CKCTL_6368_SWPKT_SAR_EN |       \
index d11aa02..5447d9f 100644 (file)
 #define GIC_CPU_INT4           4 /* .                  */
 #define GIC_CPU_INT5           5 /* Core Interrupt 5   */
 
+/* MALTA GIC local interrupts */
+#define GIC_INT_TMR             (GIC_CPU_INT5)
+#define GIC_INT_PERFCTR         (GIC_CPU_INT5)
+
+/* GIC constants */
+/* Add 2 to convert non-eic hw int # to eic vector # */
+#define GIC_CPU_TO_VEC_OFFSET   (2)
+/* If we map an intr to pin X, GIC will actually generate vector X+1 */
+#define GIC_PIN_TO_VEC_OFFSET   (1)
+
 #define GIC_EXT_INTR(x)                x
 
 /* External Interrupts used for IPI */
index c9420aa..e71ff4c 100644 (file)
@@ -48,7 +48,7 @@
 #define CP0_VPECONF0           $1, 2
 #define CP0_VPECONF1           $1, 3
 #define CP0_YQMASK             $1, 4
-#define CP0_VPESCHEDULE        $1, 5
+#define CP0_VPESCHEDULE                $1, 5
 #define CP0_VPESCHEFBK         $1, 6
 #define CP0_TCSTATUS           $2, 1
 #define CP0_TCBIND             $2, 2
index 5d33621..4f8ddba 100644 (file)
@@ -22,7 +22,7 @@ struct task_struct;
  * switch_to(n) should switch tasks to task nr n, first
  * checking that n isn't the current task, in which case it does nothing.
  */
-extern asmlinkage void *resume(void *last, void *next, void *next_ti);
+extern asmlinkage void *resume(void *last, void *next, void *next_ti, u32 __usedfpu);
 
 extern unsigned int ll_bit;
 extern struct task_struct *ll_task;
@@ -66,11 +66,13 @@ do {                                                                        \
 
 #define switch_to(prev, next, last)                                    \
 do {                                                                   \
+       u32 __usedfpu;                                                  \
        __mips_mt_fpaff_switch_to(prev);                                \
        if (cpu_has_dsp)                                                \
                __save_dsp(prev);                                       \
        __clear_software_ll_bit();                                      \
-       (last) = resume(prev, next, task_thread_info(next));            \
+       __usedfpu = test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU);  \
+       (last) = resume(prev, next, task_thread_info(next), __usedfpu); \
 } while (0)
 
 #define finish_arch_switch(prev)                                       \
index e2eca7d..ca97e0e 100644 (file)
@@ -60,6 +60,8 @@ struct thread_info {
 register struct thread_info *__current_thread_info __asm__("$28");
 #define current_thread_info()  __current_thread_info
 
+#endif /* !__ASSEMBLY__ */
+
 /* thread information allocation */
 #if defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_32BIT)
 #define THREAD_SIZE_ORDER (1)
@@ -85,8 +87,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
 
 #define STACK_WARN     (THREAD_SIZE / 8)
 
-#endif /* !__ASSEMBLY__ */
-
 #define PREEMPT_ACTIVE         0x10000000
 
 /*
index 6ae7ce4..f4630e1 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) xxxx  the Anonymous
  * Copyright (C) 1994 - 2006 Ralf Baechle
  * Copyright (C) 2003, 2004  Maciej W. Rozycki
- * Copyright (C) 2001, 2004  MIPS Inc.
+ * Copyright (C) 2001, 2004, 2011, 2012  MIPS Technologies, Inc.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -199,6 +199,7 @@ void __init check_wait(void)
                cpu_wait = rm7k_wait_irqoff;
                break;
 
+       case CPU_M14KC:
        case CPU_24K:
        case CPU_34K:
        case CPU_1004K:
@@ -810,6 +811,10 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
                c->cputype = CPU_5KC;
                __cpu_name[cpu] = "MIPS 5Kc";
                break;
+       case PRID_IMP_5KE:
+               c->cputype = CPU_5KE;
+               __cpu_name[cpu] = "MIPS 5KE";
+               break;
        case PRID_IMP_20KC:
                c->cputype = CPU_20KC;
                __cpu_name[cpu] = "MIPS 20Kc";
@@ -831,6 +836,10 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
                c->cputype = CPU_74K;
                __cpu_name[cpu] = "MIPS 74Kc";
                break;
+       case PRID_IMP_M14KC:
+               c->cputype = CPU_M14KC;
+               __cpu_name[cpu] = "MIPS M14Kc";
+               break;
        case PRID_IMP_1004K:
                c->cputype = CPU_1004K;
                __cpu_name[cpu] = "MIPS 1004Kc";
index 57ba13e..3fc1691 100644 (file)
@@ -5,7 +5,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05 by Ralf Baechle
+ * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05, 12 by Ralf Baechle
  * Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc.
  */
 #include <linux/interrupt.h>
@@ -34,6 +34,12 @@ EXPORT_SYMBOL(memmove);
 
 EXPORT_SYMBOL(kernel_thread);
 
+/*
+ * Functions that operate on entire pages.  Mostly used by memory management.
+ */
+EXPORT_SYMBOL(clear_page);
+EXPORT_SYMBOL(copy_page);
+
 /*
  * Userspace access stuff.
  */
index ce89c80..0441f54 100644 (file)
@@ -31,7 +31,7 @@
 
 /*
  * task_struct *resume(task_struct *prev, task_struct *next,
- *                     struct thread_info *next_ti)
+ *                     struct thread_info *next_ti, int usedfpu)
  */
        .align  7
        LEAF(resume)
index f29099b..eb5e394 100644 (file)
@@ -162,11 +162,6 @@ static unsigned int counters_total_to_per_cpu(unsigned int counters)
        return counters >> vpe_shift();
 }
 
-static unsigned int counters_per_cpu_to_total(unsigned int counters)
-{
-       return counters << vpe_shift();
-}
-
 #else /* !CONFIG_MIPS_MT_SMP */
 #define vpe_id()       0
 
index 2938983..9c51be5 100644 (file)
@@ -43,7 +43,7 @@
 
 /*
  * task_struct *resume(task_struct *prev, task_struct *next,
- *                     struct thread_info *next_ti)
+ *                     struct thread_info *next_ti, int usedfpu)
  */
 LEAF(resume)
        mfc0    t1, CP0_STATUS
@@ -51,18 +51,9 @@ LEAF(resume)
        cpu_save_nonscratch a0
        sw      ra, THREAD_REG31(a0)
 
-       /*
-        * check if we need to save FPU registers
-        */
-       lw      t3, TASK_THREAD_INFO(a0)
-       lw      t0, TI_FLAGS(t3)
-       li      t1, _TIF_USEDFPU
-       and     t2, t0, t1
-       beqz    t2, 1f
-       nor     t1, zero, t1
+       beqz    a3, 1f
 
-       and     t0, t0, t1
-       sw      t0, TI_FLAGS(t3)
+       PTR_L   t3, TASK_THREAD_INFO(a0)
 
        /*
         * clear saved user stack CU1 bit
index 9414f93..42d2a39 100644 (file)
@@ -41,7 +41,7 @@
 
 /*
  * task_struct *resume(task_struct *prev, task_struct *next,
- *                     struct thread_info *next_ti)
+ *                     struct thread_info *next_ti, int usedfpu)
  */
        .align  5
        LEAF(resume)
        /*
         * check if we need to save FPU registers
         */
-       PTR_L   t3, TASK_THREAD_INFO(a0)
-       LONG_L  t0, TI_FLAGS(t3)
-       li      t1, _TIF_USEDFPU
-       and     t2, t0, t1
-       beqz    t2, 1f
-       nor     t1, zero, t1
 
-       and     t0, t0, t1
-       LONG_S  t0, TI_FLAGS(t3)
+       beqz    a3, 1f
 
+       PTR_L   t3, TASK_THREAD_INFO(a0)
        /*
         * clear saved user stack CU1 bit
         */
index 3046e29..8e393b8 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/smp.h>
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
-#include <linux/init.h>
 #include <linux/cpu.h>
 #include <linux/cpumask.h>
 #include <linux/reboot.h>
@@ -197,13 +196,6 @@ static void bmips_init_secondary(void)
 
        write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0));
 #endif
-
-       /* make sure there won't be a timer interrupt for a little while */
-       write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
-
-       irq_enable_hazard();
-       set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE);
-       irq_enable_hazard();
 }
 
 /*
@@ -212,6 +204,13 @@ static void bmips_init_secondary(void)
 static void bmips_smp_finish(void)
 {
        pr_info("SMP: CPU%d is running\n", smp_processor_id());
+
+       /* make sure there won't be a timer interrupt for a little while */
+       write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
+
+       irq_enable_hazard();
+       set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE);
+       irq_enable_hazard();
 }
 
 /*
index 48650c8..1268392 100644 (file)
@@ -122,13 +122,21 @@ asmlinkage __cpuinit void start_secondary(void)
 
        notify_cpu_starting(cpu);
 
-       mp_ops->smp_finish();
+       set_cpu_online(cpu, true);
+
        set_cpu_sibling_map(cpu);
 
        cpu_set(cpu, cpu_callin_map);
 
        synchronise_count_slave();
 
+       /*
+        * irq will be enabled in ->smp_finish(), enabling it too early
+        * is dangerous.
+        */
+       WARN_ON_ONCE(!irqs_disabled());
+       mp_ops->smp_finish();
+
        cpu_idle();
 }
 
@@ -196,8 +204,6 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
        while (!cpu_isset(cpu, cpu_callin_map))
                udelay(100);
 
-       set_cpu_online(cpu, true);
-
        return 0;
 }
 
index f5dd38f..15b5f3c 100644 (file)
@@ -322,7 +322,7 @@ int __init smtc_build_cpu_map(int start_cpu_slot)
 
 /*
  * Common setup before any secondaries are started
- * Make sure all CPU's are in a sensible state before we boot any of the
+ * Make sure all CPUs are in a sensible state before we boot any of the
  * secondaries.
  *
  * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
@@ -340,12 +340,12 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
        /*
         * TCContext gets an offset from the base of the IPIQ array
         * to be used in low-level code to detect the presence of
-        * an active IPI queue
+        * an active IPI queue.
         */
        write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16);
        /* Bind tc to vpe */
        write_tc_c0_tcbind(vpe);
-       /* In general, all TCs should have the same cpu_data indications */
+       /* In general, all TCs should have the same cpu_data indications. */
        memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
        /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
        if (cpu_data[0].cputype == CPU_34K ||
@@ -358,8 +358,8 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
 }
 
 /*
- * Tweak to get Count registes in as close a sync as possible.
- * Value seems good for 34K-class cores.
+ * Tweak to get Count registes in as close a sync as possible.  The
+ * value seems good for 34K-class cores.
  */
 
 #define CP0_SKEW 8
@@ -615,7 +615,6 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
 
 void smtc_init_secondary(void)
 {
-       local_irq_enable();
 }
 
 void smtc_smp_finish(void)
@@ -631,6 +630,8 @@ void smtc_smp_finish(void)
        if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id))
                write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
 
+       local_irq_enable();
+
        printk("TC %d going on-line as CPU %d\n",
                cpu_data[smp_processor_id()].tc_id, smp_processor_id());
 }
index 99f913c..842d55e 100644 (file)
@@ -111,7 +111,6 @@ void __cpuinit synchronise_count_master(void)
 void __cpuinit synchronise_count_slave(void)
 {
        int i;
-       unsigned long flags;
        unsigned int initcount;
        int ncpus;
 
@@ -123,8 +122,6 @@ void __cpuinit synchronise_count_slave(void)
        return;
 #endif
 
-       local_irq_save(flags);
-
        /*
         * Not every cpu is online at the time this gets called,
         * so we first wait for the master to say everyone is ready
@@ -154,7 +151,5 @@ void __cpuinit synchronise_count_slave(void)
        }
        /* Arrange for an interrupt in a short while */
        write_c0_compare(read_c0_count() + COUNTON);
-
-       local_irq_restore(flags);
 }
 #undef NR_LOOPS
index 2d0c2a2..c3c2935 100644 (file)
@@ -132,6 +132,9 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
        unsigned long ra = regs->regs[31];
        unsigned long pc = regs->cp0_epc;
 
+       if (!task)
+               task = current;
+
        if (raw_show_trace || !__kernel_text_address(pc)) {
                show_raw_backtrace(sp);
                return;
@@ -1249,6 +1252,7 @@ static inline void parity_protection_init(void)
                break;
 
        case CPU_5KC:
+       case CPU_5KE:
                write_c0_ecc(0x80000000);
                back_to_back_c0_hazard();
                /* Set the PE bit (bit 31) in the c0_errctl register. */
@@ -1498,6 +1502,7 @@ extern void flush_tlb_handlers(void);
  * Timer interrupt
  */
 int cp0_compare_irq;
+EXPORT_SYMBOL_GPL(cp0_compare_irq);
 int cp0_compare_irq_shift;
 
 /*
@@ -1597,7 +1602,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
                        cp0_perfcount_irq = -1;
        } else {
                cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
-               cp0_compare_irq_shift = cp0_compare_irq;
+               cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
                cp0_perfcount_irq = -1;
        }
 
index 924da5e..df243a6 100644 (file)
@@ -1,5 +1,6 @@
 #include <asm/asm-offsets.h>
 #include <asm/page.h>
+#include <asm/thread_info.h>
 #include <asm-generic/vmlinux.lds.h>
 
 #undef mips
@@ -72,7 +73,7 @@ SECTIONS
        .data : {       /* Data */
                . = . + DATAOFFSET;             /* for CONFIG_MAPPED_KERNEL */
 
-               INIT_TASK_DATA(PAGE_SIZE)
+               INIT_TASK_DATA(THREAD_SIZE)
                NOSAVE_DATA
                CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
                READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
index 4aa2028..fd6203f 100644 (file)
@@ -3,8 +3,8 @@
 #
 
 obj-y                          += cache.o dma-default.o extable.o fault.o \
-                                  gup.o init.o mmap.o page.o tlbex.o \
-                                  tlbex-fault.o uasm.o
+                                  gup.o init.o mmap.o page.o page-funcs.o \
+                                  tlbex.o tlbex-fault.o uasm.o
 
 obj-$(CONFIG_32BIT)            += ioremap.o pgtable-32.o
 obj-$(CONFIG_64BIT)            += pgtable-64.o
index 5109be9..f092c26 100644 (file)
@@ -977,7 +977,7 @@ static void __cpuinit probe_pcache(void)
                        c->icache.linesz = 2 << lsize;
                else
                        c->icache.linesz = lsize;
-               c->icache.sets = 64 << ((config1 >> 22) & 7);
+               c->icache.sets = 32 << (((config1 >> 22) + 1) & 7);
                c->icache.ways = 1 + ((config1 >> 16) & 7);
 
                icache_size = c->icache.sets *
@@ -997,7 +997,7 @@ static void __cpuinit probe_pcache(void)
                        c->dcache.linesz = 2 << lsize;
                else
                        c->dcache.linesz= lsize;
-               c->dcache.sets = 64 << ((config1 >> 13) & 7);
+               c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7);
                c->dcache.ways = 1 + ((config1 >> 7) & 7);
 
                dcache_size = c->dcache.sets *
@@ -1051,6 +1051,7 @@ static void __cpuinit probe_pcache(void)
        case CPU_R14000:
                break;
 
+       case CPU_M14KC:
        case CPU_24K:
        case CPU_34K:
        case CPU_74K:
diff --git a/arch/mips/mm/page-funcs.S b/arch/mips/mm/page-funcs.S
new file mode 100644 (file)
index 0000000..48a6b38
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Micro-assembler generated clear_page/copy_page functions.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.
+ * Copyright (C) 2012  Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <asm/asm.h>
+#include <asm/regdef.h>
+
+#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
+#define cpu_clear_page_function_name   clear_page_cpu
+#define cpu_copy_page_function_name    copy_page_cpu
+#else
+#define cpu_clear_page_function_name   clear_page
+#define cpu_copy_page_function_name    copy_page
+#endif
+
+/*
+ * Maximum sizes:
+ *
+ * R4000 128 bytes S-cache:            0x058 bytes
+ * R4600 v1.7:                         0x05c bytes
+ * R4600 v2.0:                         0x060 bytes
+ * With prefetching, 16 word strides   0x120 bytes
+ */
+EXPORT(__clear_page_start)
+LEAF(cpu_clear_page_function_name)
+1:     j       1b              /* Dummy, will be replaced. */
+       .space 288
+END(cpu_clear_page_function_name)
+EXPORT(__clear_page_end)
+
+/*
+ * Maximum sizes:
+ *
+ * R4000 128 bytes S-cache:            0x11c bytes
+ * R4600 v1.7:                         0x080 bytes
+ * R4600 v2.0:                         0x07c bytes
+ * With prefetching, 16 word strides   0x540 bytes
+ */
+EXPORT(__copy_page_start)
+LEAF(cpu_copy_page_function_name)
+1:     j       1b              /* Dummy, will be replaced. */
+       .space 1344
+END(cpu_copy_page_function_name)
+EXPORT(__copy_page_end)
index cc0b626..98f530e 100644 (file)
@@ -6,6 +6,7 @@
  * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
  * Copyright (C) 2007  Maciej W. Rozycki
  * Copyright (C) 2008  Thiemo Seufer
+ * Copyright (C) 2012  MIPS Technologies, Inc.
  */
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -71,45 +72,6 @@ static struct uasm_reloc __cpuinitdata relocs[5];
 #define cpu_is_r4600_v1_x()    ((read_c0_prid() & 0xfffffff0) == 0x00002010)
 #define cpu_is_r4600_v2_x()    ((read_c0_prid() & 0xfffffff0) == 0x00002020)
 
-/*
- * Maximum sizes:
- *
- * R4000 128 bytes S-cache:            0x058 bytes
- * R4600 v1.7:                         0x05c bytes
- * R4600 v2.0:                         0x060 bytes
- * With prefetching, 16 word strides   0x120 bytes
- */
-
-static u32 clear_page_array[0x120 / 4];
-
-#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
-void clear_page_cpu(void *page) __attribute__((alias("clear_page_array")));
-#else
-void clear_page(void *page) __attribute__((alias("clear_page_array")));
-#endif
-
-EXPORT_SYMBOL(clear_page);
-
-/*
- * Maximum sizes:
- *
- * R4000 128 bytes S-cache:            0x11c bytes
- * R4600 v1.7:                         0x080 bytes
- * R4600 v2.0:                         0x07c bytes
- * With prefetching, 16 word strides   0x540 bytes
- */
-static u32 copy_page_array[0x540 / 4];
-
-#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
-void
-copy_page_cpu(void *to, void *from) __attribute__((alias("copy_page_array")));
-#else
-void copy_page(void *to, void *from) __attribute__((alias("copy_page_array")));
-#endif
-
-EXPORT_SYMBOL(copy_page);
-
-
 static int pref_bias_clear_store __cpuinitdata;
 static int pref_bias_copy_load __cpuinitdata;
 static int pref_bias_copy_store __cpuinitdata;
@@ -282,10 +244,15 @@ static inline void __cpuinit build_clear_pref(u32 **buf, int off)
                }
 }
 
+extern u32 __clear_page_start;
+extern u32 __clear_page_end;
+extern u32 __copy_page_start;
+extern u32 __copy_page_end;
+
 void __cpuinit build_clear_page(void)
 {
        int off;
-       u32 *buf = (u32 *)&clear_page_array;
+       u32 *buf = &__clear_page_start;
        struct uasm_label *l = labels;
        struct uasm_reloc *r = relocs;
        int i;
@@ -356,17 +323,17 @@ void __cpuinit build_clear_page(void)
        uasm_i_jr(&buf, RA);
        uasm_i_nop(&buf);
 
-       BUG_ON(buf > clear_page_array + ARRAY_SIZE(clear_page_array));
+       BUG_ON(buf > &__clear_page_end);
 
        uasm_resolve_relocs(relocs, labels);
 
        pr_debug("Synthesized clear page handler (%u instructions).\n",
-                (u32)(buf - clear_page_array));
+                (u32)(buf - &__clear_page_start));
 
        pr_debug("\t.set push\n");
        pr_debug("\t.set noreorder\n");
-       for (i = 0; i < (buf - clear_page_array); i++)
-               pr_debug("\t.word 0x%08x\n", clear_page_array[i]);
+       for (i = 0; i < (buf - &__clear_page_start); i++)
+               pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]);
        pr_debug("\t.set pop\n");
 }
 
@@ -427,7 +394,7 @@ static inline void build_copy_store_pref(u32 **buf, int off)
 void __cpuinit build_copy_page(void)
 {
        int off;
-       u32 *buf = (u32 *)&copy_page_array;
+       u32 *buf = &__copy_page_start;
        struct uasm_label *l = labels;
        struct uasm_reloc *r = relocs;
        int i;
@@ -595,21 +562,23 @@ void __cpuinit build_copy_page(void)
        uasm_i_jr(&buf, RA);
        uasm_i_nop(&buf);
 
-       BUG_ON(buf > copy_page_array + ARRAY_SIZE(copy_page_array));
+       BUG_ON(buf > &__copy_page_end);
 
        uasm_resolve_relocs(relocs, labels);
 
        pr_debug("Synthesized copy page handler (%u instructions).\n",
-                (u32)(buf - copy_page_array));
+                (u32)(buf - &__copy_page_start));
 
        pr_debug("\t.set push\n");
        pr_debug("\t.set noreorder\n");
-       for (i = 0; i < (buf - copy_page_array); i++)
-               pr_debug("\t.word 0x%08x\n", copy_page_array[i]);
+       for (i = 0; i < (buf - &__copy_page_start); i++)
+               pr_debug("\t.word 0x%08x\n", (&__copy_page_start)[i]);
        pr_debug("\t.set pop\n");
 }
 
 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
+extern void clear_page_cpu(void *page);
+extern void copy_page_cpu(void *to, void *from);
 
 /*
  * Pad descriptors to cacheline, since each is exclusively owned by a
index 0bc485b..03eb0ef 100644 (file)
@@ -9,6 +9,7 @@
  * Copyright (C) 2005, 2007, 2008, 2009  Maciej W. Rozycki
  * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
  * Copyright (C) 2008, 2009 Cavium Networks, Inc.
+ * Copyright (C) 2011  MIPS Technologies, Inc.
  *
  * ... and the days got worse and worse and now you see
  * I've gone completly out of my mind.
@@ -494,6 +495,7 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
        case CPU_R14000:
        case CPU_4KC:
        case CPU_4KEC:
+       case CPU_M14KC:
        case CPU_SB1:
        case CPU_SB1A:
        case CPU_4KSC:
index bf80921..284dea5 100644 (file)
@@ -241,8 +241,9 @@ void __init mips_pcibios_init(void)
                return;
        }
 
-       if (controller->io_resource->start < 0x00001000UL)      /* FIXME */
-               controller->io_resource->start = 0x00001000UL;
+       /* Change start address to avoid conflicts with ACPI and SMB devices */
+       if (controller->io_resource->start < 0x00002000UL)
+               controller->io_resource->start = 0x00002000UL;
 
        iomem_resource.end &= 0xfffffffffULL;                   /* 64 GB */
        ioport_resource.end = controller->io_resource->end;
@@ -253,7 +254,7 @@ void __init mips_pcibios_init(void)
 }
 
 /* Enable PCI 2.1 compatibility in PIIX4 */
-static void __init quirk_dlcsetup(struct pci_dev *dev)
+static void __devinit quirk_dlcsetup(struct pci_dev *dev)
 {
        u8 odlc, ndlc;
        (void) pci_read_config_byte(dev, 0x82, &odlc);
index b7f37d4..2e28f65 100644 (file)
@@ -111,7 +111,7 @@ static void __init pci_clock_check(void)
        unsigned int __iomem *jmpr_p =
                (unsigned int *) ioremap(MALTA_JMPRS_REG, sizeof(unsigned int));
        int jmpr = (__raw_readl(jmpr_p) >> 2) & 0x07;
-       static const int pciclocks[] __initdata = {
+       static const int pciclocks[] __initconst = {
                33, 20, 25, 30, 12, 16, 37, 10
        };
        int pciclock = pciclocks[jmpr];
index acb677a..b3df7c2 100644 (file)
@@ -82,8 +82,10 @@ void __init prom_free_prom_memory(void)
 
 void xlp_mmu_init(void)
 {
+       /* enable extended TLB and Large Fixed TLB */
        write_c0_config6(read_c0_config6() | 0x24);
-       current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
+
+       /* set page mask of Fixed TLB in config7 */
        write_c0_config7(PM_DEFAULT_MASK >>
                (13 + (ffz(PM_DEFAULT_MASK >> 13) / 2)));
 }
@@ -100,6 +102,10 @@ void __init prom_init(void)
        nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1));
 #ifdef CONFIG_SMP
        nlm_wakeup_secondary_cpus(0xffffffff);
+
+       /* update TLB size after waking up threads */
+       current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
+
        register_smp_ops(&nlm_smp_ops);
 #endif
 }
index d1f2d4c..b6e3782 100644 (file)
@@ -78,6 +78,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
 
        switch (current_cpu_type()) {
        case CPU_5KC:
+       case CPU_M14KC:
        case CPU_20KC:
        case CPU_24K:
        case CPU_25KF:
index baba3bc..4d80a85 100644 (file)
@@ -322,6 +322,10 @@ static int __init mipsxx_init(void)
 
        op_model_mipsxx_ops.num_counters = counters;
        switch (current_cpu_type()) {
+       case CPU_M14KC:
+               op_model_mipsxx_ops.cpu_type = "mips/M14Kc";
+               break;
+
        case CPU_20KC:
                op_model_mipsxx_ops.cpu_type = "mips/20K";
                break;
index d5d4c01..0857ab8 100644 (file)
@@ -48,7 +48,7 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
        return 0;
 }
 
-static void __init loongson2e_nec_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_nec_fixup(struct pci_dev *pdev)
 {
        unsigned int val;
 
@@ -60,7 +60,7 @@ static void __init loongson2e_nec_fixup(struct pci_dev *pdev)
        pci_write_config_dword(pdev, 0xe4, 1 << 5);
 }
 
-static void __init loongson2e_686b_func0_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_686b_func0_fixup(struct pci_dev *pdev)
 {
        unsigned char c;
 
@@ -135,7 +135,7 @@ static void __init loongson2e_686b_func0_fixup(struct pci_dev *pdev)
        printk(KERN_INFO"via686b fix: ISA bridge done\n");
 }
 
-static void __init loongson2e_686b_func1_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_686b_func1_fixup(struct pci_dev *pdev)
 {
        printk(KERN_INFO"via686b fix: IDE\n");
 
@@ -168,19 +168,19 @@ static void __init loongson2e_686b_func1_fixup(struct pci_dev *pdev)
        printk(KERN_INFO"via686b fix: IDE done\n");
 }
 
-static void __init loongson2e_686b_func2_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_686b_func2_fixup(struct pci_dev *pdev)
 {
        /* irq routing */
        pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, 10);
 }
 
-static void __init loongson2e_686b_func3_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_686b_func3_fixup(struct pci_dev *pdev)
 {
        /* irq routing */
        pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, 11);
 }
 
-static void __init loongson2e_686b_func5_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_686b_func5_fixup(struct pci_dev *pdev)
 {
        unsigned int val;
        unsigned char c;
index 4b9768d..a7b917d 100644 (file)
@@ -96,21 +96,21 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
 }
 
 /* CS5536 SPEC. fixup */
-static void __init loongson_cs5536_isa_fixup(struct pci_dev *pdev)
+static void __devinit loongson_cs5536_isa_fixup(struct pci_dev *pdev)
 {
        /* the uart1 and uart2 interrupt in PIC is enabled as default */
        pci_write_config_dword(pdev, PCI_UART1_INT_REG, 1);
        pci_write_config_dword(pdev, PCI_UART2_INT_REG, 1);
 }
 
-static void __init loongson_cs5536_ide_fixup(struct pci_dev *pdev)
+static void __devinit loongson_cs5536_ide_fixup(struct pci_dev *pdev)
 {
        /* setting the mutex pin as IDE function */
        pci_write_config_dword(pdev, PCI_IDE_CFG_REG,
                               CS5536_IDE_FLASH_SIGNATURE);
 }
 
-static void __init loongson_cs5536_acc_fixup(struct pci_dev *pdev)
+static void __devinit loongson_cs5536_acc_fixup(struct pci_dev *pdev)
 {
        /* enable the AUDIO interrupt in PIC  */
        pci_write_config_dword(pdev, PCI_ACC_INT_REG, 1);
@@ -118,14 +118,14 @@ static void __init loongson_cs5536_acc_fixup(struct pci_dev *pdev)
        pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xc0);
 }
 
-static void __init loongson_cs5536_ohci_fixup(struct pci_dev *pdev)
+static void __devinit loongson_cs5536_ohci_fixup(struct pci_dev *pdev)
 {
        /* enable the OHCI interrupt in PIC */
        /* THE OHCI, EHCI, UDC, OTG are shared with interrupt in PIC */
        pci_write_config_dword(pdev, PCI_OHCI_INT_REG, 1);
 }
 
-static void __init loongson_cs5536_ehci_fixup(struct pci_dev *pdev)
+static void __devinit loongson_cs5536_ehci_fixup(struct pci_dev *pdev)
 {
        u32 hi, lo;
 
@@ -137,7 +137,7 @@ static void __init loongson_cs5536_ehci_fixup(struct pci_dev *pdev)
        pci_write_config_dword(pdev, PCI_EHCI_FLADJ_REG, 0x2000);
 }
 
-static void __init loongson_nec_fixup(struct pci_dev *pdev)
+static void __devinit loongson_nec_fixup(struct pci_dev *pdev)
 {
        unsigned int val;
 
index 0f48498..70073c9 100644 (file)
@@ -49,10 +49,10 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
        return 0;
 }
 
-static void __init malta_piix_func0_fixup(struct pci_dev *pdev)
+static void __devinit malta_piix_func0_fixup(struct pci_dev *pdev)
 {
        unsigned char reg_val;
-       static int piixirqmap[16] __initdata = {  /* PIIX PIRQC[A:D] irq mappings */
+       static int piixirqmap[16] __devinitdata = {  /* PIIX PIRQC[A:D] irq mappings */
                0,  0,  0,  3,
                4,  5,  6,  7,
                0,  9, 10, 11,
@@ -83,7 +83,7 @@ static void __init malta_piix_func0_fixup(struct pci_dev *pdev)
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0,
         malta_piix_func0_fixup);
 
-static void __init malta_piix_func1_fixup(struct pci_dev *pdev)
+static void __devinit malta_piix_func1_fixup(struct pci_dev *pdev)
 {
        unsigned char reg_val;
 
index e08f49c..8e4f828 100644 (file)
 
 #include <asm/vr41xx/mpc30x.h>
 
-static const int internal_func_irqs[] __initdata = {
+static const int internal_func_irqs[] __initconst = {
        VRC4173_CASCADE_IRQ,
        VRC4173_AC97_IRQ,
        VRC4173_USB_IRQ,
 };
 
-static const int irq_tab_mpc30x[] __initdata = {
+static const int irq_tab_mpc30x[] __initconst = {
  [12] = VRC4173_PCMCIA1_IRQ,
  [13] = VRC4173_PCMCIA2_IRQ,
  [29] = MQ200_IRQ,
index f0bb914..d02900a 100644 (file)
@@ -15,7 +15,7 @@
  * Set the BCM1250, etc. PCI host bridge's TRDY timeout
  * to the finite max.
  */
-static void __init quirk_sb1250_pci(struct pci_dev *dev)
+static void __devinit quirk_sb1250_pci(struct pci_dev *dev)
 {
        pci_write_config_byte(dev, 0x40, 0xff);
 }
@@ -25,7 +25,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_PCI,
 /*
  * The BCM1250, etc. PCI/HT bridge reports as a host bridge.
  */
-static void __init quirk_sb1250_ht(struct pci_dev *dev)
+static void __devinit quirk_sb1250_ht(struct pci_dev *dev)
 {
        dev->class = PCI_CLASS_BRIDGE_PCI << 8;
 }
@@ -35,7 +35,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_HT,
 /*
  * Set the SP1011 HT/PCI bridge's TRDY timeout to the finite max.
  */
-static void __init quirk_sp1011(struct pci_dev *dev)
+static void __devinit quirk_sp1011(struct pci_dev *dev)
 {
        pci_write_config_byte(dev, 0x64, 0xff);
 }
index a1e7e6d..bc13e29 100644 (file)
@@ -495,7 +495,7 @@ irqreturn_t tx4927_pcierr_interrupt(int irq, void *dev_id)
 }
 
 #ifdef CONFIG_TOSHIBA_FPCIB0
-static void __init tx4927_quirk_slc90e66_bridge(struct pci_dev *dev)
+static void __devinit tx4927_quirk_slc90e66_bridge(struct pci_dev *dev)
 {
        struct tx4927_pcic_reg __iomem *pcicptr = pci_bus_to_pcicptr(dev->bus);
 
index 0fbe4c0..fdc2444 100644 (file)
@@ -212,7 +212,7 @@ static inline void pci_enable_swapping(struct pci_dev *dev)
        bridge->b_widget.w_tflush;      /* Flush */
 }
 
-static void __init pci_fixup_ioc3(struct pci_dev *d)
+static void __devinit pci_fixup_ioc3(struct pci_dev *d)
 {
        pci_disable_swapping(d);
 }
index ea45353..075d87a 100644 (file)
@@ -129,7 +129,7 @@ static int __devinit ltq_pci_startup(struct platform_device *pdev)
 
        /* setup reset gpio used by pci */
        reset_gpio = of_get_named_gpio(node, "gpio-reset", 0);
-       if (reset_gpio > 0)
+       if (gpio_is_valid(reset_gpio))
                devm_gpio_request(&pdev->dev, reset_gpio, "pci-reset");
 
        /* enable auto-switching between PCI and EBU */
@@ -192,7 +192,7 @@ static int __devinit ltq_pci_startup(struct platform_device *pdev)
        ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_IEN) | 0x10, LTQ_EBU_PCC_IEN);
 
        /* toggle reset pin */
-       if (reset_gpio > 0) {
+       if (gpio_is_valid(reset_gpio)) {
                __gpio_set_value(reset_gpio, 0);
                wmb();
                mdelay(1);
index 1644805..172af1c 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/irq.h>
 #include <linux/irqdesc.h>
 #include <linux/console.h>
+#include <linux/pci_regs.h>
 
 #include <asm/io.h>
 
@@ -156,35 +157,55 @@ struct pci_controller nlm_pci_controller = {
        .io_offset      = 0x00000000UL,
 };
 
+/*
+ * The top level PCIe links on the XLS PCIe controller appear as
+ * bridges. Given a device, this function finds which link it is
+ * on.
+ */
+static struct pci_dev *xls_get_pcie_link(const struct pci_dev *dev)
+{
+       struct pci_bus *bus, *p;
+
+       /* Find the bridge on bus 0 */
+       bus = dev->bus;
+       for (p = bus->parent; p && p->number != 0; p = p->parent)
+               bus = p;
+
+       return p ? bus->self : NULL;
+}
+
 static int get_irq_vector(const struct pci_dev *dev)
 {
+       struct pci_dev *lnk;
+
        if (!nlm_chip_is_xls())
-               return  PIC_PCIX_IRQ;   /* for XLR just one IRQ*/
+               return  PIC_PCIX_IRQ;   /* for XLR just one IRQ */
 
        /*
         * For XLS PCIe, there is an IRQ per Link, find out which
         * link the device is on to assign interrupts
-       */
-       if (dev->bus->self == NULL)
+        */
+       lnk = xls_get_pcie_link(dev);
+       if (lnk == NULL)
                return 0;
 
-       switch  (dev->bus->self->devfn) {
-       case 0x0:
+       switch  (PCI_SLOT(lnk->devfn)) {
+       case 0:
                return PIC_PCIE_LINK0_IRQ;
-       case 0x8:
+       case 1:
                return PIC_PCIE_LINK1_IRQ;
-       case 0x10:
+       case 2:
                if (nlm_chip_is_xls_b())
                        return PIC_PCIE_XLSB0_LINK2_IRQ;
                else
                        return PIC_PCIE_LINK2_IRQ;
-       case 0x18:
+       case 3:
                if (nlm_chip_is_xls_b())
                        return PIC_PCIE_XLSB0_LINK3_IRQ;
                else
                        return PIC_PCIE_LINK3_IRQ;
        }
-       WARN(1, "Unexpected devfn %d\n", dev->bus->self->devfn);
+       WARN(1, "Unexpected devfn %d\n", lnk->devfn);
        return 0;
 }
 
@@ -202,7 +223,27 @@ void arch_teardown_msi_irq(unsigned int irq)
 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
 {
        struct msi_msg msg;
+       struct pci_dev *lnk;
        int irq, ret;
+       u16 val;
+
+       /* MSI not supported on XLR */
+       if (!nlm_chip_is_xls())
+               return 1;
+
+       /*
+        * Enable MSI on the XLS PCIe controller bridge which was disabled
+        * at enumeration, the bridge MSI capability is at 0x50
+        */
+       lnk = xls_get_pcie_link(dev);
+       if (lnk == NULL)
+               return 1;
+
+       pci_read_config_word(lnk, 0x50 + PCI_MSI_FLAGS, &val);
+       if ((val & PCI_MSI_FLAGS_ENABLE) == 0) {
+               val |= PCI_MSI_FLAGS_ENABLE;
+               pci_write_config_word(lnk, 0x50 + PCI_MSI_FLAGS, val);
+       }
 
        irq = get_irq_vector(dev);
        if (irq <= 0)
@@ -327,7 +368,7 @@ static int __init pcibios_init(void)
                }
        } else {
                /* XLR PCI controller ACK */
-               irq_set_handler_data(PIC_PCIE_XLSB0_LINK3_IRQ, xlr_pci_ack);
+               irq_set_handler_data(PIC_PCIX_IRQ, xlr_pci_ack);
        }
 
        return 0;
index b71fae2..5edab2b 100644 (file)
@@ -115,11 +115,11 @@ static void yos_send_ipi_mask(const struct cpumask *mask, unsigned int action)
  */
 static void __cpuinit yos_init_secondary(void)
 {
-       set_c0_status(ST0_CO | ST0_IE | ST0_IM);
 }
 
 static void __cpuinit yos_smp_finish(void)
 {
+       set_c0_status(ST0_CO | ST0_IM | ST0_IE);
 }
 
 /* Hook for after all CPUs are online */
index 0a170e0..7773f3d 100644 (file)
@@ -28,7 +28,7 @@
 
 #define CALLIOPE_ADDR(x)       (CALLIOPE_IO_BASE + (x))
 
-const struct register_map calliope_register_map __initdata = {
+const struct register_map calliope_register_map __initconst = {
        .eic_slow0_strt_add = {.phys = CALLIOPE_ADDR(0x800000)},
        .eic_cfg_bits = {.phys = CALLIOPE_ADDR(0x800038)},
        .eic_ready_status = {.phys = CALLIOPE_ADDR(0x80004c)},
index bbc0c12..da076db 100644 (file)
@@ -28,7 +28,7 @@
 
 #define CRONUS_ADDR(x) (CRONUS_IO_BASE + (x))
 
-const struct register_map cronus_register_map __initdata = {
+const struct register_map cronus_register_map __initconst = {
        .eic_slow0_strt_add = {.phys = CRONUS_ADDR(0x000000)},
        .eic_cfg_bits = {.phys = CRONUS_ADDR(0x000038)},
        .eic_ready_status = {.phys = CRONUS_ADDR(0x00004C)},
index 91dda68..47683b3 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/init.h>
 #include <asm/mach-powertv/asic.h>
 
-const struct register_map gaia_register_map __initdata = {
+const struct register_map gaia_register_map __initconst = {
        .eic_slow0_strt_add = {.phys = GAIA_IO_BASE + 0x000000},
        .eic_cfg_bits = {.phys = GAIA_IO_BASE + 0x000038},
        .eic_ready_status = {.phys = GAIA_IO_BASE + 0x00004C},
index 4a05bb0..6ff4b10 100644 (file)
@@ -28,7 +28,7 @@
 
 #define ZEUS_ADDR(x)   (ZEUS_IO_BASE + (x))
 
-const struct register_map zeus_register_map __initdata = {
+const struct register_map zeus_register_map __initconst = {
        .eic_slow0_strt_add = {.phys = ZEUS_ADDR(0x000000)},
        .eic_cfg_bits = {.phys = ZEUS_ADDR(0x000038)},
        .eic_ready_status = {.phys = ZEUS_ADDR(0x00004c)},
index 682efb0..64eb71b 100644 (file)
@@ -269,7 +269,7 @@ txx9_i8259_irq_setup(int irq)
        return err;
 }
 
-static void __init quirk_slc90e66_bridge(struct pci_dev *dev)
+static void __devinit quirk_slc90e66_bridge(struct pci_dev *dev)
 {
        int irq;        /* PCI/ISA Bridge interrupt */
        u8 reg_64;
index 55b79ef..44251b9 100644 (file)
@@ -81,9 +81,6 @@ struct pt_regs {
 #define PTRACE_GETFPREGS          14
 #define PTRACE_SETFPREGS          15
 
-/* options set using PTRACE_SETOPTIONS */
-#define PTRACE_O_TRACESYSGOOD     0x00000001
-
 #ifdef __KERNEL__
 
 #define user_mode(regs)                        (((regs)->epsw & EPSW_nSL) == EPSW_nSL)
index 08251d6..ac519bb 100644 (file)
@@ -123,7 +123,7 @@ static inline unsigned long current_stack_pointer(void)
 }
 
 #ifndef CONFIG_KGDB
-void arch_release_thread_info(struct thread_info *ti)
+void arch_release_thread_info(struct thread_info *ti);
 #endif
 #define get_thread_info(ti)    get_task_struct((ti)->task)
 #define put_thread_info(ti)    put_task_struct((ti)->task)
index bd4e90d..f8e6642 100644 (file)
@@ -11,7 +11,6 @@
 #ifndef _ASM_TIMEX_H
 #define _ASM_TIMEX_H
 
-#include <asm/hardirq.h>
 #include <unit/timex.h>
 
 #define TICK_SIZE (tick_nsec / 1000)
@@ -30,16 +29,6 @@ static inline cycles_t get_cycles(void)
 extern int init_clockevents(void);
 extern int init_clocksource(void);
 
-static inline void setup_jiffies_interrupt(int irq,
-                                          struct irqaction *action)
-{
-       u16 tmp;
-       setup_irq(irq, action);
-       set_intr_level(irq, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL));
-       GxICR(irq) |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
-       tmp = GxICR(irq);
-}
-
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_TIMEX_H */
index 69cae02..ccce35e 100644 (file)
@@ -70,6 +70,16 @@ static void event_handler(struct clock_event_device *dev)
 {
 }
 
+static inline void setup_jiffies_interrupt(int irq,
+                                          struct irqaction *action)
+{
+       u16 tmp;
+       setup_irq(irq, action);
+       set_intr_level(irq, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL));
+       GxICR(irq) |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
+       tmp = GxICR(irq);
+}
+
 int __init init_clockevents(void)
 {
        struct clock_event_device *cd;
index a5ac755..2df4401 100644 (file)
@@ -9,6 +9,8 @@
  * 2 of the Licence, or (at your option) any later version.
  */
 
+#include <linux/irqreturn.h>
+
 struct clocksource;
 struct clock_event_device;
 
index 2381df8..35932a8 100644 (file)
@@ -170,9 +170,9 @@ mn10300_cpupic_setaffinity(struct irq_data *d, const struct cpumask *mask,
        case SC1TXIRQ:
 #ifdef CONFIG_MN10300_TTYSM1_TIMER12
        case TM12IRQ:
-#elif CONFIG_MN10300_TTYSM1_TIMER9
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER9)
        case TM9IRQ:
-#elif CONFIG_MN10300_TTYSM1_TIMER3
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER3)
        case TM3IRQ:
 #endif /* CONFIG_MN10300_TTYSM1_TIMER12 */
 #endif /* CONFIG_MN10300_TTYSM1 */
index 6ab0bee..4d584ae 100644 (file)
@@ -459,10 +459,11 @@ static int handle_signal(int sig,
        else
                ret = setup_frame(sig, ka, oldset, regs);
        if (ret)
-               return;
+               return ret;
 
        signal_delivered(sig, info, ka, regs,
-                                test_thread_flag(TIF_SINGLESTEP));
+                        test_thread_flag(TIF_SINGLESTEP));
+       return 0;
 }
 
 /*
index 090d35d..e62c223 100644 (file)
@@ -876,9 +876,7 @@ static void __init smp_online(void)
 
        notify_cpu_starting(cpu);
 
-       ipi_call_lock();
        set_cpu_online(cpu, true);
-       ipi_call_unlock();
 
        local_irq_enable();
 }
index 94a9c6d..b900e5a 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/kdebug.h>
 #include <linux/bug.h>
 #include <linux/irq.h>
+#include <linux/export.h>
 #include <asm/processor.h>
 #include <linux/uaccess.h>
 #include <asm/io.h>
index 159acb0..e244ebe 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/string.h>
 #include <linux/pci.h>
 #include <linux/gfp.h>
+#include <linux/export.h>
 #include <asm/io.h>
 
 static unsigned long pci_sram_allocated = 0xbc000000;
index cc18fe7..c37f983 100644 (file)
 #ifndef _ASM_UNIT_TIMEX_H
 #define _ASM_UNIT_TIMEX_H
 
-#ifndef __ASSEMBLY__
-#include <linux/irq.h>
-#endif /* __ASSEMBLY__ */
-
 #include <asm/timer-regs.h>
 #include <unit/clock.h>
 #include <asm/param.h>
index 43c2464..5367769 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/platform_device.h>
 
 #include <asm/io.h>
+#include <asm/irq.h>
 #include <asm/timex.h>
 #include <asm/processor.h>
 #include <asm/intctl-regs.h>
index 758af30..4cefc22 100644 (file)
 #ifndef _ASM_UNIT_TIMEX_H
 #define _ASM_UNIT_TIMEX_H
 
-#ifndef __ASSEMBLY__
-#include <linux/irq.h>
-#endif /* __ASSEMBLY__ */
-
 #include <asm/timer-regs.h>
 #include <unit/clock.h>
 #include <asm/param.h>
index e1becd6..bc4adfa 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/init.h>
 #include <linux/pci.h>
 #include <asm/io.h>
+#include <asm/irq.h>
 #include <asm/setup.h>
 #include <asm/processor.h>
 #include <asm/intctl-regs.h>
index ddb7ed0..42f32db 100644 (file)
 #ifndef _ASM_UNIT_TIMEX_H
 #define _ASM_UNIT_TIMEX_H
 
-#ifndef __ASSEMBLY__
-#include <linux/irq.h>
-#endif /* __ASSEMBLY__ */
-
 #include <asm/timer-regs.h>
 #include <unit/clock.h>
 #include <asm/param.h>
index a47828d..6266730 100644 (file)
@@ -300,9 +300,7 @@ smp_cpu_init(int cpunum)
 
        notify_cpu_starting(cpunum);
 
-       ipi_call_lock();
        set_cpu_online(cpunum, true);
-       ipi_call_unlock();
 
        /* Initialise the idle task for this CPU */
        atomic_inc(&init_mm.mm_count);
index 32b394f..0554ab0 100644 (file)
@@ -86,8 +86,8 @@ static inline bool arch_irqs_disabled(void)
 }
 
 #ifdef CONFIG_PPC_BOOK3E
-#define __hard_irq_enable()    asm volatile("wrteei 1" : : : "memory");
-#define __hard_irq_disable()   asm volatile("wrteei 0" : : : "memory");
+#define __hard_irq_enable()    asm volatile("wrteei 1" : : : "memory")
+#define __hard_irq_disable()   asm volatile("wrteei 0" : : : "memory")
 #else
 #define __hard_irq_enable()    __mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
 #define __hard_irq_disable()   __mtmsrd(local_paca->kernel_msr, 1)
@@ -103,6 +103,11 @@ static inline void hard_irq_disable(void)
 /* include/linux/interrupt.h needs hard_irq_disable to be a macro */
 #define hard_irq_disable       hard_irq_disable
 
+static inline bool lazy_irq_pending(void)
+{
+       return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
+}
+
 /*
  * This is called by asynchronous interrupts to conditionally
  * re-enable hard interrupts when soft-disabled after having
@@ -120,6 +125,8 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
        return !regs->softe;
 }
 
+extern bool prep_irq_for_idle(void);
+
 #else /* CONFIG_PPC64 */
 
 #define SET_MSR_EE(x)  mtmsr(x)
index ed1718f..5971c85 100644 (file)
@@ -558,27 +558,54 @@ _GLOBAL(ret_from_except_lite)
        mtmsrd  r10,1             /* Update machine state */
 #endif /* CONFIG_PPC_BOOK3E */
 
-#ifdef CONFIG_PREEMPT
        clrrdi  r9,r1,THREAD_SHIFT      /* current_thread_info() */
-       li      r0,_TIF_NEED_RESCHED    /* bits to check */
        ld      r3,_MSR(r1)
        ld      r4,TI_FLAGS(r9)
-       /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
-       rlwimi  r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
-       and.    r0,r4,r0        /* check NEED_RESCHED and maybe SIGPENDING */
-       bne     do_work
-
-#else /* !CONFIG_PREEMPT */
-       ld      r3,_MSR(r1)     /* Returning to user mode? */
        andi.   r3,r3,MSR_PR
-       beq     restore         /* if not, just restore regs and return */
+       beq     resume_kernel
 
        /* Check current_thread_info()->flags */
+       andi.   r0,r4,_TIF_USER_WORK_MASK
+       beq     restore
+
+       andi.   r0,r4,_TIF_NEED_RESCHED
+       beq     1f
+       bl      .restore_interrupts
+       bl      .schedule
+       b       .ret_from_except_lite
+
+1:     bl      .save_nvgprs
+       bl      .restore_interrupts
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      .do_notify_resume
+       b       .ret_from_except
+
+resume_kernel:
+#ifdef CONFIG_PREEMPT
+       /* Check if we need to preempt */
+       andi.   r0,r4,_TIF_NEED_RESCHED
+       beq+    restore
+       /* Check that preempt_count() == 0 and interrupts are enabled */
+       lwz     r8,TI_PREEMPT(r9)
+       cmpwi   cr1,r8,0
+       ld      r0,SOFTE(r1)
+       cmpdi   r0,0
+       crandc  eq,cr1*4+eq,eq
+       bne     restore
+
+       /*
+        * Here we are preempting the current task. We want to make
+        * sure we are soft-disabled first
+        */
+       SOFT_DISABLE_INTS(r3,r4)
+1:     bl      .preempt_schedule_irq
+
+       /* Re-test flags and eventually loop */
        clrrdi  r9,r1,THREAD_SHIFT
        ld      r4,TI_FLAGS(r9)
-       andi.   r0,r4,_TIF_USER_WORK_MASK
-       bne     do_work
-#endif /* !CONFIG_PREEMPT */
+       andi.   r0,r4,_TIF_NEED_RESCHED
+       bne     1b
+#endif /* CONFIG_PREEMPT */
 
        .globl  fast_exc_return_irq
 fast_exc_return_irq:
@@ -759,50 +786,6 @@ restore_check_irq_replay:
 #endif /* CONFIG_PPC_BOOK3E */
 1:     b       .ret_from_except /* What else to do here ? */
  
-
-
-3:
-do_work:
-#ifdef CONFIG_PREEMPT
-       andi.   r0,r3,MSR_PR    /* Returning to user mode? */
-       bne     user_work
-       /* Check that preempt_count() == 0 and interrupts are enabled */
-       lwz     r8,TI_PREEMPT(r9)
-       cmpwi   cr1,r8,0
-       ld      r0,SOFTE(r1)
-       cmpdi   r0,0
-       crandc  eq,cr1*4+eq,eq
-       bne     restore
-
-       /*
-        * Here we are preempting the current task. We want to make
-        * sure we are soft-disabled first
-        */
-       SOFT_DISABLE_INTS(r3,r4)
-1:     bl      .preempt_schedule_irq
-
-       /* Re-test flags and eventually loop */
-       clrrdi  r9,r1,THREAD_SHIFT
-       ld      r4,TI_FLAGS(r9)
-       andi.   r0,r4,_TIF_NEED_RESCHED
-       bne     1b
-       b       restore
-
-user_work:
-#endif /* CONFIG_PREEMPT */
-
-       andi.   r0,r4,_TIF_NEED_RESCHED
-       beq     1f
-       bl      .restore_interrupts
-       bl      .schedule
-       b       .ret_from_except_lite
-
-1:     bl      .save_nvgprs
-       bl      .restore_interrupts
-       addi    r3,r1,STACK_FRAME_OVERHEAD
-       bl      .do_notify_resume
-       b       .ret_from_except
-
 unrecov_restore:
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      .unrecoverable_exception
index 7835a5e..1f017bb 100644 (file)
@@ -229,7 +229,7 @@ notrace void arch_local_irq_restore(unsigned long en)
         */
        if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
                __hard_irq_disable();
-#ifdef CONFIG_TRACE_IRQFLAG
+#ifdef CONFIG_TRACE_IRQFLAGS
        else {
                /*
                 * We should already be hard disabled here. We had bugs
@@ -277,7 +277,7 @@ EXPORT_SYMBOL(arch_local_irq_restore);
  * NOTE: This is called with interrupts hard disabled but not marked
  * as such in paca->irq_happened, so we need to resync this.
  */
-void restore_interrupts(void)
+void notrace restore_interrupts(void)
 {
        if (irqs_disabled()) {
                local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
@@ -286,6 +286,52 @@ void restore_interrupts(void)
                __hard_irq_enable();
 }
 
+/*
+ * This is a helper to use when about to go into idle low-power
+ * when the latter has the side effect of re-enabling interrupts
+ * (such as calling H_CEDE under pHyp).
+ *
+ * You call this function with interrupts soft-disabled (this is
+ * already the case when ppc_md.power_save is called). The function
+ * will return whether to enter power save or just return.
+ *
+ * In the former case, it will have notified lockdep of interrupts
+ * being re-enabled and generally sanitized the lazy irq state,
+ * and in the latter case it will leave with interrupts hard
+ * disabled and marked as such, so the local_irq_enable() call
+ * in cpu_idle() will properly re-enable everything.
+ */
+bool prep_irq_for_idle(void)
+{
+       /*
+        * First we need to hard disable to ensure no interrupt
+        * occurs before we effectively enter the low power state
+        */
+       hard_irq_disable();
+
+       /*
+        * If anything happened while we were soft-disabled,
+        * we return now and do not enter the low power state.
+        */
+       if (lazy_irq_pending())
+               return false;
+
+       /* Tell lockdep we are about to re-enable */
+       trace_hardirqs_on();
+
+       /*
+        * Mark interrupts as soft-enabled and clear the
+        * PACA_IRQ_HARD_DIS from the pending mask since we
+        * are about to hard enable as well as a side effect
+        * of entering the low power state.
+        */
+       local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
+       local_paca->soft_enabled = 1;
+
+       /* Tell the caller to enter the low power state */
+       return true;
+}
+
 #endif /* CONFIG_PPC64 */
 
 int arch_show_interrupts(struct seq_file *p, int prec)
index 1b488e5..0794a30 100644 (file)
@@ -1312,7 +1312,7 @@ static struct opal_secondary_data {
 
 extern char opal_secondary_entry;
 
-static void prom_query_opal(void)
+static void __init prom_query_opal(void)
 {
        long rc;
 
@@ -1436,7 +1436,7 @@ static void __init prom_opal_hold_cpus(void)
        prom_debug("prom_opal_hold_cpus: end...\n");
 }
 
-static void prom_opal_takeover(void)
+static void __init prom_opal_takeover(void)
 {
        struct opal_secondary_data *data = &RELOC(opal_secondary_data);
        struct opal_takeover_args *args = &data->args;
index e4cb343..e1417c4 100644 (file)
@@ -571,7 +571,6 @@ void __devinit start_secondary(void *unused)
        if (system_state == SYSTEM_RUNNING)
                vdso_data->processorCount++;
 #endif
-       ipi_call_lock();
        notify_cpu_starting(cpu);
        set_cpu_online(cpu, true);
        /* Update sibling maps */
@@ -601,7 +600,6 @@ void __devinit start_secondary(void *unused)
                of_node_put(np);
        }
        of_node_put(l2_cache);
-       ipi_call_unlock();
 
        local_irq_enable();
 
index c6af1d6..3abe1b8 100644 (file)
@@ -268,24 +268,45 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
        return err;
 }
 
-static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap)
+static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
 {
+       struct kvm *kvm = vcpu->kvm;
        void *va;
        unsigned long nb;
+       unsigned long gpa;
 
-       vpap->update_pending = 0;
-       va = NULL;
-       if (vpap->next_gpa) {
-               va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
-               if (nb < vpap->len) {
-                       /*
-                        * If it's now too short, it must be that userspace
-                        * has changed the mappings underlying guest memory,
-                        * so unregister the region.
-                        */
+       /*
+        * We need to pin the page pointed to by vpap->next_gpa,
+        * but we can't call kvmppc_pin_guest_page under the lock
+        * as it does get_user_pages() and down_read().  So we
+        * have to drop the lock, pin the page, then get the lock
+        * again and check that a new area didn't get registered
+        * in the meantime.
+        */
+       for (;;) {
+               gpa = vpap->next_gpa;
+               spin_unlock(&vcpu->arch.vpa_update_lock);
+               va = NULL;
+               nb = 0;
+               if (gpa)
+                       va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
+               spin_lock(&vcpu->arch.vpa_update_lock);
+               if (gpa == vpap->next_gpa)
+                       break;
+               /* sigh... unpin that one and try again */
+               if (va)
                        kvmppc_unpin_guest_page(kvm, va);
-                       va = NULL;
-               }
+       }
+
+       vpap->update_pending = 0;
+       if (va && nb < vpap->len) {
+               /*
+                * If it's now too short, it must be that userspace
+                * has changed the mappings underlying guest memory,
+                * so unregister the region.
+                */
+               kvmppc_unpin_guest_page(kvm, va);
+               va = NULL;
        }
        if (vpap->pinned_addr)
                kvmppc_unpin_guest_page(kvm, vpap->pinned_addr);
@@ -296,20 +317,18 @@ static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap)
 
 static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
 {
-       struct kvm *kvm = vcpu->kvm;
-
        spin_lock(&vcpu->arch.vpa_update_lock);
        if (vcpu->arch.vpa.update_pending) {
-               kvmppc_update_vpa(kvm, &vcpu->arch.vpa);
+               kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
                init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
        }
        if (vcpu->arch.dtl.update_pending) {
-               kvmppc_update_vpa(kvm, &vcpu->arch.dtl);
+               kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
                vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
                vcpu->arch.dtl_index = 0;
        }
        if (vcpu->arch.slb_shadow.update_pending)
-               kvmppc_update_vpa(kvm, &vcpu->arch.slb_shadow);
+               kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
        spin_unlock(&vcpu->arch.vpa_update_lock);
 }
 
@@ -800,12 +819,39 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
        struct kvm_vcpu *vcpu, *vcpu0, *vnext;
        long ret;
        u64 now;
-       int ptid, i;
+       int ptid, i, need_vpa_update;
 
        /* don't start if any threads have a signal pending */
-       list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+       need_vpa_update = 0;
+       list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
                if (signal_pending(vcpu->arch.run_task))
                        return 0;
+               need_vpa_update |= vcpu->arch.vpa.update_pending |
+                       vcpu->arch.slb_shadow.update_pending |
+                       vcpu->arch.dtl.update_pending;
+       }
+
+       /*
+        * Initialize *vc, in particular vc->vcore_state, so we can
+        * drop the vcore lock if necessary.
+        */
+       vc->n_woken = 0;
+       vc->nap_count = 0;
+       vc->entry_exit_count = 0;
+       vc->vcore_state = VCORE_RUNNING;
+       vc->in_guest = 0;
+       vc->napping_threads = 0;
+
+       /*
+        * Updating any of the vpas requires calling kvmppc_pin_guest_page,
+        * which can't be called with any spinlocks held.
+        */
+       if (need_vpa_update) {
+               spin_unlock(&vc->lock);
+               list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+                       kvmppc_update_vpas(vcpu);
+               spin_lock(&vc->lock);
+       }
 
        /*
         * Make sure we are running on thread 0, and that
@@ -838,20 +884,10 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
                if (vcpu->arch.ceded)
                        vcpu->arch.ptid = ptid++;
 
-       vc->n_woken = 0;
-       vc->nap_count = 0;
-       vc->entry_exit_count = 0;
-       vc->vcore_state = VCORE_RUNNING;
        vc->stolen_tb += mftb() - vc->preempt_tb;
-       vc->in_guest = 0;
        vc->pcpu = smp_processor_id();
-       vc->napping_threads = 0;
        list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
                kvmppc_start_thread(vcpu);
-               if (vcpu->arch.vpa.update_pending ||
-                   vcpu->arch.slb_shadow.update_pending ||
-                   vcpu->arch.dtl.update_pending)
-                       kvmppc_update_vpas(vcpu);
                kvmppc_create_dtl_entry(vcpu, vc);
        }
        /* Grab any remaining hw threads so they can't go into the kernel */
index a84aafc..a1044f4 100644 (file)
@@ -810,7 +810,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
        lwz     r3,VCORE_NAPPING_THREADS(r5)
        lwz     r4,VCPU_PTID(r9)
        li      r0,1
-       sldi    r0,r0,r4
+       sld     r0,r0,r4
        andc.   r3,r3,r0                /* no sense IPI'ing ourselves */
        beq     43f
        mulli   r4,r4,PACA_SIZE         /* get paca for thread 0 */
index 3ff9013..ee02b30 100644 (file)
@@ -241,6 +241,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
        case H_PUT_TCE:
                return kvmppc_h_pr_put_tce(vcpu);
        case H_CEDE:
+               vcpu->arch.shared->msr |= MSR_EE;
                kvm_vcpu_block(vcpu);
                clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
                vcpu->stat.halt_wakeup++;
index b6edbb3..1e95556 100644 (file)
@@ -635,11 +635,11 @@ static inline int __init read_usm_ranges(const u32 **usm)
  */
 static void __init parse_drconf_memory(struct device_node *memory)
 {
-       const u32 *dm, *usm;
+       const u32 *uninitialized_var(dm), *usm;
        unsigned int n, rc, ranges, is_kexec_kdump = 0;
        unsigned long lmb_size, base, size, sz;
        int nid;
-       struct assoc_arrays aa;
+       struct assoc_arrays aa = { .arrays = NULL };
 
        n = of_get_drconf_memory(memory, &dm);
        if (!n)
index 55ba385..7d3a3b5 100644 (file)
@@ -105,6 +105,7 @@ sk_load_byte_msh_positive_offset:
        mr      r4, r_addr;                                     \
        li      r6, SIZE;                                       \
        bl      skb_copy_bits;                                  \
+       nop;                                                    \
        /* R3 = 0 on success */                                 \
        addi    r1, r1, BPF_PPC_SLOWPATH_FRAME;                 \
        ld      r0, 16(r1);                                     \
@@ -156,6 +157,7 @@ bpf_slow_path_byte_msh:
        mr      r4, r_addr;                                     \
        li      r5, SIZE;                                       \
        bl      bpf_internal_load_pointer_neg_helper;           \
+       nop;                                                    \
        /* R3 != 0 on success */                                \
        addi    r1, r1, BPF_PPC_SLOWPATH_FRAME;                 \
        ld      r0, 16(r1);                                     \
index efdacc8..d17e98b 100644 (file)
@@ -42,11 +42,9 @@ static void cbe_power_save(void)
 {
        unsigned long ctrl, thread_switch_control;
 
-       /*
-        * We need to hard disable interrupts, the local_irq_enable() done by
-        * our caller upon return will hard re-enable.
-        */
-       hard_irq_disable();
+       /* Ensure our interrupt state is properly tracked */
+       if (!prep_irq_for_idle())
+               return;
 
        ctrl = mfspr(SPRN_CTRLF);
 
@@ -81,6 +79,9 @@ static void cbe_power_save(void)
         */
        ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
        mtspr(SPRN_CTRLT, ctrl);
+
+       /* Re-enable interrupts in MSR */
+       __hard_irq_enable();
 }
 
 static int cbe_system_reset_exception(struct pt_regs *regs)
index 0915b1a..2d311c0 100644 (file)
@@ -106,7 +106,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
                tcep++;
        }
 
-       if (tbl->it_type == TCE_PCI_SWINV_CREATE)
+       if (tbl->it_type & TCE_PCI_SWINV_CREATE)
                tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
        return 0;
 }
@@ -121,7 +121,7 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
        while (npages--)
                *(tcep++) = 0;
 
-       if (tbl->it_type == TCE_PCI_SWINV_FREE)
+       if (tbl->it_type & TCE_PCI_SWINV_FREE)
                tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
 }
 
index 36f957f..8733a86 100644 (file)
@@ -68,9 +68,7 @@ static const char *pseries_nvram_os_partitions[] = {
 };
 
 static void oops_to_nvram(struct kmsg_dumper *dumper,
-               enum kmsg_dump_reason reason,
-               const char *old_msgs, unsigned long old_len,
-               const char *new_msgs, unsigned long new_len);
+                         enum kmsg_dump_reason reason);
 
 static struct kmsg_dumper nvram_kmsg_dumper = {
        .dump = oops_to_nvram
@@ -503,28 +501,6 @@ int __init pSeries_nvram_init(void)
        return 0;
 }
 
-/*
- * Try to capture the last capture_len bytes of the printk buffer.  Return
- * the amount actually captured.
- */
-static size_t capture_last_msgs(const char *old_msgs, size_t old_len,
-                               const char *new_msgs, size_t new_len,
-                               char *captured, size_t capture_len)
-{
-       if (new_len >= capture_len) {
-               memcpy(captured, new_msgs + (new_len - capture_len),
-                                                               capture_len);
-               return capture_len;
-       } else {
-               /* Grab the end of old_msgs. */
-               size_t old_tail_len = min(old_len, capture_len - new_len);
-               memcpy(captured, old_msgs + (old_len - old_tail_len),
-                                                               old_tail_len);
-               memcpy(captured + old_tail_len, new_msgs, new_len);
-               return old_tail_len + new_len;
-       }
-}
-
 /*
  * Are we using the ibm,rtas-log for oops/panic reports?  And if so,
  * would logging this oops/panic overwrite an RTAS event that rtas_errd
@@ -541,27 +517,6 @@ static int clobbering_unread_rtas_event(void)
                                                NVRAM_RTAS_READ_TIMEOUT);
 }
 
-/* Squeeze out each line's <n> severity prefix. */
-static size_t elide_severities(char *buf, size_t len)
-{
-       char *in, *out, *buf_end = buf + len;
-       /* Assume a <n> at the very beginning marks the start of a line. */
-       int newline = 1;
-
-       in = out = buf;
-       while (in < buf_end) {
-               if (newline && in+3 <= buf_end &&
-                               *in == '<' && isdigit(in[1]) && in[2] == '>') {
-                       in += 3;
-                       newline = 0;
-               } else {
-                       newline = (*in == '\n');
-                       *out++ = *in++;
-               }
-       }
-       return out - buf;
-}
-
 /* Derived from logfs_compress() */
 static int nvram_compress(const void *in, void *out, size_t inlen,
                                                        size_t outlen)
@@ -619,9 +574,7 @@ static int zip_oops(size_t text_len)
  * partition.  If that's too much, go back and capture uncompressed text.
  */
 static void oops_to_nvram(struct kmsg_dumper *dumper,
-               enum kmsg_dump_reason reason,
-               const char *old_msgs, unsigned long old_len,
-               const char *new_msgs, unsigned long new_len)
+                         enum kmsg_dump_reason reason)
 {
        static unsigned int oops_count = 0;
        static bool panicking = false;
@@ -660,14 +613,14 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
                return;
 
        if (big_oops_buf) {
-               text_len = capture_last_msgs(old_msgs, old_len,
-                       new_msgs, new_len, big_oops_buf, big_oops_buf_sz);
-               text_len = elide_severities(big_oops_buf, text_len);
+               kmsg_dump_get_buffer(dumper, false,
+                                    big_oops_buf, big_oops_buf_sz, &text_len);
                rc = zip_oops(text_len);
        }
        if (rc != 0) {
-               text_len = capture_last_msgs(old_msgs, old_len,
-                               new_msgs, new_len, oops_data, oops_data_sz);
+               kmsg_dump_rewind(dumper);
+               kmsg_dump_get_buffer(dumper, true,
+                                    oops_data, oops_data_sz, &text_len);
                err_type = ERR_TYPE_KERNEL_PANIC;
                *oops_len = (u16) text_len;
        }
index 41a34bc..c71be66 100644 (file)
@@ -99,15 +99,18 @@ out:
 static void check_and_cede_processor(void)
 {
        /*
-        * Interrupts are soft-disabled at this point,
-        * but not hard disabled. So an interrupt might have
-        * occurred before entering NAP, and would be potentially
-        * lost (edge events, decrementer events, etc...) unless
-        * we first hard disable then check.
+        * Ensure our interrupt state is properly tracked,
+        * also checks if no interrupt has occurred while we
+        * were soft-disabled
         */
-       hard_irq_disable();
-       if (get_paca()->irq_happened == 0)
+       if (prep_irq_for_idle()) {
                cede_processor();
+#ifdef CONFIG_TRACE_IRQFLAGS
+               /* Ensure that H_CEDE returns with IRQs on */
+               if (WARN_ON(!(mfmsr() & MSR_EE)))
+                       __hard_irq_enable();
+#endif
+       }
 }
 
 static int dedicated_cede_loop(struct cpuidle_device *dev,
index 0f3ab06..eab3492 100644 (file)
@@ -971,7 +971,7 @@ static int cpu_cmd(void)
                /* print cpus waiting or in xmon */
                printf("cpus stopped:");
                count = 0;
-               for (cpu = 0; cpu < NR_CPUS; ++cpu) {
+               for_each_possible_cpu(cpu) {
                        if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
                                if (count == 0)
                                        printf(" %x", cpu);
index 15cca26..8dca9c2 100644 (file)
@@ -717,9 +717,7 @@ static void __cpuinit smp_start_secondary(void *cpuvoid)
        init_cpu_vtimer();
        pfault_init();
        notify_cpu_starting(smp_processor_id());
-       ipi_call_lock();
        set_cpu_online(smp_processor_id(), true);
-       ipi_call_unlock();
        local_irq_enable();
        /* cpu_idle will call schedule for us */
        cpu_idle();
index 158c917..43a179c 100644 (file)
@@ -201,8 +201,8 @@ static struct resource kfr2r09_usb0_gadget_resources[] = {
                .flags  = IORESOURCE_MEM,
        },
        [1] = {
-               .start  = evtirq(0xa20),
-               .end    = evtirq(0xa20),
+               .start  = evt2irq(0xa20),
+               .end    = evt2irq(0xa20),
                .flags  = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
        },
 };
index c045142..9e702f2 100644 (file)
@@ -239,7 +239,7 @@ static int __init pcie_clk_init(struct sh7786_pcie_port *port)
        clk->enable_reg = (void __iomem *)(chan->reg_base + SH4A_PCIEPHYCTLR);
        clk->enable_bit = BITS_CKE;
 
-       ret = sh_clk_mstp32_register(clk, 1);
+       ret = sh_clk_mstp_register(clk, 1);
        if (unlikely(ret < 0))
                goto err_phy;
 
index e136d28..4d48f14 100644 (file)
@@ -19,9 +19,20 @@ static inline u32 inl(unsigned long addr)
        return -1;
 }
 
-#define outb(x, y)     BUG()
-#define outw(x, y)     BUG()
-#define outl(x, y)     BUG()
+static inline void outb(unsigned char x, unsigned long port)
+{
+       BUG();
+}
+
+static inline void outw(unsigned short x, unsigned long port)
+{
+       BUG();
+}
+
+static inline void outl(unsigned int x, unsigned long port)
+{
+       BUG();
+}
 
 #define inb_p(addr)    inb(addr)
 #define inw_p(addr)    inw(addr)
index 8832c52..c4a0336 100644 (file)
@@ -2,7 +2,7 @@
 #include <linux/serial_core.h>
 #include <linux/io.h>
 #include <cpu/serial.h>
-#include <asm/gpio.h>
+#include <cpu/gpio.h>
 
 static void sh7720_sci_init_pins(struct uart_port *port, unsigned int cflag)
 {
index ea01a72..53638e2 100644 (file)
@@ -283,7 +283,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div6_register(div6_clks, DIV6_NR);
 
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+               ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
        return ret;
 }
index 7ac07b4..22e485d 100644 (file)
@@ -276,7 +276,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div6_register(div6_clks, DIV6_NR);
 
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+               ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
        return ret;
 }
index 8e1f970..c4cb740 100644 (file)
@@ -261,7 +261,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div6_register(div6_clks, DIV6_NR);
 
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, HWBLK_NR);
+               ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR);
 
        return ret;
 }
index 35f75cf..37c41c7 100644 (file)
@@ -311,7 +311,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div6_register(div6_clks, DIV6_NR);
 
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, HWBLK_NR);
+               ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR);
 
        return ret;
 }
index 2a87901..c87e78f 100644 (file)
@@ -375,7 +375,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div6_reparent_register(div6_clks, DIV6_NR);
 
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, HWBLK_NR);
+               ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR);
 
        return ret;
 }
index 1697642..deb683a 100644 (file)
@@ -260,7 +260,7 @@ int __init arch_clk_init(void)
                        &div4_table);
 
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+               ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
        return ret;
 }
index 04ab5ae..e84a432 100644 (file)
@@ -148,7 +148,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
                                           &div4_table);
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+               ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
        return ret;
 }
index ab1c58f..1c83788 100644 (file)
@@ -175,7 +175,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
                                           &div4_table);
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+               ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
        return ret;
 }
index 4917094..8bba6f1 100644 (file)
@@ -194,7 +194,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
                                           &div4_table);
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+               ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
        return ret;
 }
index 0f11b39..a9422da 100644 (file)
@@ -149,7 +149,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
                                           &div4_table);
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+               ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
        return ret;
 }
index f591598..781bcb1 100644 (file)
@@ -103,8 +103,6 @@ void __cpuinit smp_callin(void)
        if (cheetah_pcache_forced_on)
                cheetah_enable_pcache();
 
-       local_irq_enable();
-
        callin_flag = 1;
        __asm__ __volatile__("membar #Sync\n\t"
                             "flush  %%g6" : : : "memory");
@@ -124,9 +122,8 @@ void __cpuinit smp_callin(void)
        while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
                rmb();
 
-       ipi_call_lock_irq();
        set_cpu_online(cpuid, true);
-       ipi_call_unlock_irq();
+       local_irq_enable();
 
        /* idle thread is expected to have preempt disabled */
        preempt_disable();
@@ -1308,9 +1305,7 @@ int __cpu_disable(void)
        mdelay(1);
        local_irq_disable();
 
-       ipi_call_lock();
        set_cpu_online(cpu, false);
-       ipi_call_unlock();
 
        cpu_map_rebuild();
 
index 5cffdc5..3e244f3 100644 (file)
@@ -443,7 +443,7 @@ static int __init vio_init(void)
        root_vdev = vio_create_one(hp, root, NULL);
        err = -ENODEV;
        if (!root_vdev) {
-               printk(KERN_ERR "VIO: Coult not create root device.\n");
+               printk(KERN_ERR "VIO: Could not create root device.\n");
                goto out_release;
        }
 
index 9092ce8..f8b74ca 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <linux/kernel.h>
 #include <linux/string.h>
+#include <asm/byteorder.h>
 #include <asm/backtrace.h>
 #include <asm/tile-desc.h>
 #include <arch/abi.h>
@@ -336,8 +337,12 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
                                bytes_to_prefetch / sizeof(tile_bundle_bits);
                }
 
-               /* Decode the next bundle. */
-               bundle.bits = prefetched_bundles[next_bundle++];
+               /*
+                * Decode the next bundle.
+                * TILE always stores instruction bundles in little-endian
+                * mode, even when the chip is running in big-endian mode.
+                */
+               bundle.bits = le64_to_cpu(prefetched_bundles[next_bundle++]);
                bundle.num_insns =
                        parse_insn_tile(bundle.bits, pc, bundle.insns);
                num_info_ops = bt_get_info_ops(&bundle, info_operands);
index 84873fb..e686c5a 100644 (file)
@@ -198,17 +198,7 @@ void __cpuinit online_secondary(void)
 
        notify_cpu_starting(smp_processor_id());
 
-       /*
-        * We need to hold call_lock, so there is no inconsistency
-        * between the time smp_call_function() determines number of
-        * IPI recipients, and the time when the determination is made
-        * for which cpus receive the IPI. Holding this
-        * lock helps us to not include this cpu in a currently in progress
-        * smp_call_function().
-        */
-       ipi_call_lock();
        set_cpu_online(smp_processor_id(), 1);
-       ipi_call_unlock();
        __get_cpu_var(cpu_state) = CPU_ONLINE;
 
        /* Set up tile-specific state for this cpu. */
index 88e466b..43b39d6 100644 (file)
@@ -705,7 +705,6 @@ static void stack_proc(void *arg)
        struct task_struct *from = current, *to = arg;
 
        to->thread.saved_task = from;
-       rcu_switch_from(from);
        switch_to(from, to, from);
 }
 
index daeca56..673ac9b 100644 (file)
@@ -38,7 +38,7 @@
 int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
 {
        int err = 0;
-       bool ia32 = is_ia32_task();
+       bool ia32 = test_thread_flag(TIF_IA32);
 
        if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
                return -EFAULT;
index 49331be..7078068 100644 (file)
@@ -75,23 +75,54 @@ static inline int alternatives_text_reserved(void *start, void *end)
 }
 #endif /* CONFIG_SMP */
 
+#define OLDINSTR(oldinstr)     "661:\n\t" oldinstr "\n662:\n"
+
+#define b_replacement(number)  "663"#number
+#define e_replacement(number)  "664"#number
+
+#define alt_slen "662b-661b"
+#define alt_rlen(number) e_replacement(number)"f-"b_replacement(number)"f"
+
+#define ALTINSTR_ENTRY(feature, number)                                              \
+       " .long 661b - .\n"                             /* label           */ \
+       " .long " b_replacement(number)"f - .\n"        /* new instruction */ \
+       " .word " __stringify(feature) "\n"             /* feature bit     */ \
+       " .byte " alt_slen "\n"                         /* source len      */ \
+       " .byte " alt_rlen(number) "\n"                 /* replacement len */
+
+#define DISCARD_ENTRY(number)                          /* rlen <= slen */    \
+       " .byte 0xff + (" alt_rlen(number) ") - (" alt_slen ")\n"
+
+#define ALTINSTR_REPLACEMENT(newinstr, feature, number)        /* replacement */     \
+       b_replacement(number)":\n\t" newinstr "\n" e_replacement(number) ":\n\t"
+
 /* alternative assembly primitive: */
 #define ALTERNATIVE(oldinstr, newinstr, feature)                       \
-                                                                       \
-      "661:\n\t" oldinstr "\n662:\n"                                   \
-      ".section .altinstructions,\"a\"\n"                              \
-      "         .long 661b - .\n"                      /* label           */   \
-      "         .long 663f - .\n"                      /* new instruction */   \
-      "         .word " __stringify(feature) "\n"      /* feature bit     */   \
-      "         .byte 662b-661b\n"                     /* sourcelen       */   \
-      "         .byte 664f-663f\n"                     /* replacementlen  */   \
-      ".previous\n"                                                    \
-      ".section .discard,\"aw\",@progbits\n"                           \
-      "         .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */   \
-      ".previous\n"                                                    \
-      ".section .altinstr_replacement, \"ax\"\n"                       \
-      "663:\n\t" newinstr "\n664:\n"           /* replacement     */   \
-      ".previous"
+       OLDINSTR(oldinstr)                                              \
+       ".section .altinstructions,\"a\"\n"                             \
+       ALTINSTR_ENTRY(feature, 1)                                      \
+       ".previous\n"                                                   \
+       ".section .discard,\"aw\",@progbits\n"                          \
+       DISCARD_ENTRY(1)                                                \
+       ".previous\n"                                                   \
+       ".section .altinstr_replacement, \"ax\"\n"                      \
+       ALTINSTR_REPLACEMENT(newinstr, feature, 1)                      \
+       ".previous"
+
+#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
+       OLDINSTR(oldinstr)                                              \
+       ".section .altinstructions,\"a\"\n"                             \
+       ALTINSTR_ENTRY(feature1, 1)                                     \
+       ALTINSTR_ENTRY(feature2, 2)                                     \
+       ".previous\n"                                                   \
+       ".section .discard,\"aw\",@progbits\n"                          \
+       DISCARD_ENTRY(1)                                                \
+       DISCARD_ENTRY(2)                                                \
+       ".previous\n"                                                   \
+       ".section .altinstr_replacement, \"ax\"\n"                      \
+       ALTINSTR_REPLACEMENT(newinstr1, feature1, 1)                    \
+       ALTINSTR_REPLACEMENT(newinstr2, feature2, 2)                    \
+       ".previous"
 
 /*
  * This must be included *after* the definition of ALTERNATIVE due to
@@ -139,6 +170,19 @@ static inline int alternatives_text_reserved(void *start, void *end)
        asm volatile (ALTERNATIVE("call %P[old]", "call %P[new]", feature) \
                : output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
 
+/*
+ * Like alternative_call, but there are two features and respective functions.
+ * If CPU has feature2, function2 is used.
+ * Otherwise, if CPU has feature1, function1 is used.
+ * Otherwise, old function is used.
+ */
+#define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2,   \
+                          output, input...)                                  \
+       asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\
+               "call %P[new2]", feature2)                                    \
+               : output : [old] "i" (oldfunc), [new1] "i" (newfunc1),        \
+               [new2] "i" (newfunc2), ## input)
+
 /*
  * use this macro(s) if you need more than one output parameter
  * in alternative_io
index 340ee49..f91e80f 100644 (file)
 #define X86_FEATURE_XSAVEOPT   (7*32+ 4) /* Optimized Xsave */
 #define X86_FEATURE_PLN                (7*32+ 5) /* Intel Power Limit Notification */
 #define X86_FEATURE_PTS                (7*32+ 6) /* Intel Package Thermal Status */
-#define X86_FEATURE_DTS                (7*32+ 7) /* Digital Thermal Sensor */
+#define X86_FEATURE_DTHERM     (7*32+ 7) /* Digital Thermal Sensor */
 #define X86_FEATURE_HW_PSTATE  (7*32+ 8) /* AMD HW-PState */
 
 /* Virtualization flags: Linux defined, word 8 */
index db7c1f2..2da88c0 100644 (file)
@@ -313,8 +313,8 @@ struct kvm_pmu {
        u64 counter_bitmask[2];
        u64 global_ctrl_mask;
        u8 version;
-       struct kvm_pmc gp_counters[X86_PMC_MAX_GENERIC];
-       struct kvm_pmc fixed_counters[X86_PMC_MAX_FIXED];
+       struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
+       struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
        struct irq_work irq_work;
        u64 reprogram_pmi;
 };
index 084ef95..813ed10 100644 (file)
@@ -115,8 +115,8 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
 
 extern unsigned long long native_read_tsc(void);
 
-extern int native_rdmsr_safe_regs(u32 regs[8]);
-extern int native_wrmsr_safe_regs(u32 regs[8]);
+extern int rdmsr_safe_regs(u32 regs[8]);
+extern int wrmsr_safe_regs(u32 regs[8]);
 
 static __always_inline unsigned long long __native_read_tsc(void)
 {
@@ -187,43 +187,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
        return err;
 }
 
-static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
-{
-       u32 gprs[8] = { 0 };
-       int err;
-
-       gprs[1] = msr;
-       gprs[7] = 0x9c5a203a;
-
-       err = native_rdmsr_safe_regs(gprs);
-
-       *p = gprs[0] | ((u64)gprs[2] << 32);
-
-       return err;
-}
-
-static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
-{
-       u32 gprs[8] = { 0 };
-
-       gprs[0] = (u32)val;
-       gprs[1] = msr;
-       gprs[2] = val >> 32;
-       gprs[7] = 0x9c5a203a;
-
-       return native_wrmsr_safe_regs(gprs);
-}
-
-static inline int rdmsr_safe_regs(u32 regs[8])
-{
-       return native_rdmsr_safe_regs(regs);
-}
-
-static inline int wrmsr_safe_regs(u32 regs[8])
-{
-       return native_wrmsr_safe_regs(regs);
-}
-
 #define rdtscl(low)                                            \
        ((low) = (u32)__native_read_tsc())
 
@@ -237,6 +200,8 @@ do {                                                        \
        (high) = (u32)(_l >> 32);                       \
 } while (0)
 
+#define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
+
 #define rdtscp(low, high, aux)                                 \
 do {                                                            \
        unsigned long long _val = native_read_tscp(&(aux));     \
@@ -248,8 +213,7 @@ do {                                                            \
 
 #endif /* !CONFIG_PARAVIRT */
 
-
-#define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val),                \
+#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val),            \
                                             (u32)((val) >> 32))
 
 #define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2))
index 6cbbabf..0b47ddb 100644 (file)
@@ -128,21 +128,11 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err)
        return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
 }
 
-static inline int paravirt_rdmsr_regs(u32 *regs)
-{
-       return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
-}
-
 static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
 {
        return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
 }
 
-static inline int paravirt_wrmsr_regs(u32 *regs)
-{
-       return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
-}
-
 /* These should all do BUG_ON(_err), but our headers are too tangled. */
 #define rdmsr(msr, val1, val2)                 \
 do {                                           \
@@ -176,9 +166,6 @@ do {                                                \
        _err;                                   \
 })
 
-#define rdmsr_safe_regs(regs)  paravirt_rdmsr_regs(regs)
-#define wrmsr_safe_regs(regs)  paravirt_wrmsr_regs(regs)
-
 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
 {
        int err;
@@ -186,32 +173,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
        *p = paravirt_read_msr(msr, &err);
        return err;
 }
-static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
-{
-       u32 gprs[8] = { 0 };
-       int err;
-
-       gprs[1] = msr;
-       gprs[7] = 0x9c5a203a;
-
-       err = paravirt_rdmsr_regs(gprs);
-
-       *p = gprs[0] | ((u64)gprs[2] << 32);
-
-       return err;
-}
-
-static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
-{
-       u32 gprs[8] = { 0 };
-
-       gprs[0] = (u32)val;
-       gprs[1] = msr;
-       gprs[2] = val >> 32;
-       gprs[7] = 0x9c5a203a;
-
-       return paravirt_wrmsr_regs(gprs);
-}
 
 static inline u64 paravirt_read_tsc(void)
 {
@@ -252,6 +213,8 @@ do {                                                \
        high = _l >> 32;                        \
 } while (0)
 
+#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
+
 static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
 {
        return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
index 8e8b9a4..8613cbb 100644 (file)
@@ -153,9 +153,7 @@ struct pv_cpu_ops {
        /* MSR, PMC and TSR operations.
           err = 0/-EFAULT.  wrmsr returns 0/-EFAULT. */
        u64 (*read_msr)(unsigned int msr, int *err);
-       int (*rdmsr_regs)(u32 *regs);
        int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
-       int (*wrmsr_regs)(u32 *regs);
 
        u64 (*read_tsc)(void);
        u64 (*read_pmc)(int counter);
index 588f52e..c78f14a 100644 (file)
@@ -5,11 +5,10 @@
  * Performance event hw details:
  */
 
-#define X86_PMC_MAX_GENERIC                                   32
-#define X86_PMC_MAX_FIXED                                      3
+#define INTEL_PMC_MAX_GENERIC                                 32
+#define INTEL_PMC_MAX_FIXED                                    3
+#define INTEL_PMC_IDX_FIXED                                   32
 
-#define X86_PMC_IDX_GENERIC                                    0
-#define X86_PMC_IDX_FIXED                                     32
 #define X86_PMC_IDX_MAX                                               64
 
 #define MSR_ARCH_PERFMON_PERFCTR0                            0xc1
@@ -48,8 +47,7 @@
        (X86_RAW_EVENT_MASK          |  \
         AMD64_EVENTSEL_EVENT)
 #define AMD64_NUM_COUNTERS                             4
-#define AMD64_NUM_COUNTERS_F15H                                6
-#define AMD64_NUM_COUNTERS_MAX                         AMD64_NUM_COUNTERS_F15H
+#define AMD64_NUM_COUNTERS_CORE                                6
 
 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL          0x3c
 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK                (0x00 << 8)
@@ -121,16 +119,16 @@ struct x86_pmu_capability {
 
 /* Instr_Retired.Any: */
 #define MSR_ARCH_PERFMON_FIXED_CTR0    0x309
-#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0)
+#define INTEL_PMC_IDX_FIXED_INSTRUCTIONS       (INTEL_PMC_IDX_FIXED + 0)
 
 /* CPU_CLK_Unhalted.Core: */
 #define MSR_ARCH_PERFMON_FIXED_CTR1    0x30a
-#define X86_PMC_IDX_FIXED_CPU_CYCLES   (X86_PMC_IDX_FIXED + 1)
+#define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
 
 /* CPU_CLK_Unhalted.Ref: */
 #define MSR_ARCH_PERFMON_FIXED_CTR2    0x30b
-#define X86_PMC_IDX_FIXED_REF_CYCLES   (X86_PMC_IDX_FIXED + 2)
-#define X86_PMC_MSK_FIXED_REF_CYCLES   (1ULL << X86_PMC_IDX_FIXED_REF_CYCLES)
+#define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
+#define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
 
 /*
  * We model BTS tracing as another fixed-mode PMC.
@@ -139,7 +137,7 @@ struct x86_pmu_capability {
  * values are used by actual fixed events and higher values are used
  * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
  */
-#define X86_PMC_IDX_FIXED_BTS                          (X86_PMC_IDX_FIXED + 16)
+#define INTEL_PMC_IDX_FIXED_BTS                                (INTEL_PMC_IDX_FIXED + 16)
 
 /*
  * IBS cpuid feature detection
@@ -234,6 +232,7 @@ struct perf_guest_switch_msr {
 
 extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
 extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
+extern void perf_check_microcode(void);
 #else
 static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
 {
@@ -247,6 +246,7 @@ static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
 }
 
 static inline void perf_events_lapic_init(void)        { }
+static inline void perf_check_microcode(void) { }
 #endif
 
 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
index f824cfb..4cc9f2b 100644 (file)
@@ -47,16 +47,26 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
  * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
  * operations.
  *
- * Without THP if the mmap_sem is hold for reading, the
- * pmd can only transition from null to not null while pmd_read_atomic runs.
- * So there's no need of literally reading it atomically.
+ * Without THP if the mmap_sem is hold for reading, the pmd can only
+ * transition from null to not null while pmd_read_atomic runs. So
+ * we can always return atomic pmd values with this function.
  *
  * With THP if the mmap_sem is hold for reading, the pmd can become
- * THP or null or point to a pte (and in turn become "stable") at any
- * time under pmd_read_atomic, so it's mandatory to read it atomically
- * with cmpxchg8b.
+ * trans_huge or none or point to a pte (and in turn become "stable")
+ * at any time under pmd_read_atomic. We could read it really
+ * atomically here with a atomic64_read for the THP enabled case (and
+ * it would be a whole lot simpler), but to avoid using cmpxchg8b we
+ * only return an atomic pmdval if the low part of the pmdval is later
+ * found stable (i.e. pointing to a pte). And we're returning a none
+ * pmdval if the low part of the pmd is none. In some cases the high
+ * and low part of the pmdval returned may not be consistent if THP is
+ * enabled (the low part may point to previously mapped hugepage,
+ * while the high part may point to a more recently mapped hugepage),
+ * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part
+ * of the pmd to be read atomically to decide if the pmd is unstable
+ * or not, with the only exception of when the low part of the pmd is
+ * zero in which case we return a none pmd.
  */
-#ifndef CONFIG_TRANSPARENT_HUGEPAGE
 static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
 {
        pmdval_t ret;
@@ -74,12 +84,6 @@ static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
 
        return (pmd_t) { ret };
 }
-#else /* CONFIG_TRANSPARENT_HUGEPAGE */
-static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
-{
-       return (pmd_t) { atomic64_read((atomic64_t *)pmdp) };
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
 {
index f483945..2ffa95d 100644 (file)
@@ -169,11 +169,6 @@ void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);
 void smp_store_cpu_info(int id);
 #define cpu_physical_id(cpu)   per_cpu(x86_cpu_to_apicid, cpu)
 
-/* We don't mark CPUs online until __cpu_up(), so we need another measure */
-static inline int num_booting_cpus(void)
-{
-       return cpumask_weight(cpu_callout_mask);
-}
 #else /* !CONFIG_SMP */
 #define wbinvd_on_cpu(cpu)     wbinvd()
 static inline int wbinvd_on_all_cpus(void)
index 8e796fb..d8def8b 100644 (file)
@@ -17,6 +17,8 @@
 
 /* Handles exceptions in both to and from, but doesn't do access_ok */
 __must_check unsigned long
+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
+__must_check unsigned long
 copy_user_generic_string(void *to, const void *from, unsigned len);
 __must_check unsigned long
 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
@@ -26,9 +28,16 @@ copy_user_generic(void *to, const void *from, unsigned len)
 {
        unsigned ret;
 
-       alternative_call(copy_user_generic_unrolled,
+       /*
+        * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
+        * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
+        * Otherwise, use copy_user_generic_unrolled.
+        */
+       alternative_call_2(copy_user_generic_unrolled,
                         copy_user_generic_string,
                         X86_FEATURE_REP_GOOD,
+                        copy_user_enhanced_fast_string,
+                        X86_FEATURE_ERMS,
                         ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
                                     "=d" (len)),
                         "1" (to), "2" (from), "3" (len)
index 1e9bed1..f3971bb 100644 (file)
@@ -48,7 +48,7 @@ struct arch_uprobe_task {
 #endif
 };
 
-extern int  arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm);
+extern int  arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
 extern int  arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
 extern int  arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
 extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
index 8afb693..b2297e5 100644 (file)
@@ -422,12 +422,14 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
                return 0;
        }
 
-       if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
+       if (intsrc->source_irq == 0) {
                if (acpi_skip_timer_override) {
-                       printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
+                       printk(PREFIX "BIOS IRQ0 override ignored.\n");
                        return 0;
                }
-               if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
+
+               if ((intsrc->global_irq == 2) && acpi_fix_pin2_polarity
+                       && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
                        intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
                        printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
                }
@@ -1334,17 +1336,12 @@ static int __init dmi_disable_acpi(const struct dmi_system_id *d)
 }
 
 /*
- * Force ignoring BIOS IRQ0 pin2 override
+ * Force ignoring BIOS IRQ0 override
  */
 static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
 {
-       /*
-        * The ati_ixp4x0_rev() early PCI quirk should have set
-        * the acpi_skip_timer_override flag already:
-        */
        if (!acpi_skip_timer_override) {
-               WARN(1, KERN_ERR "ati_ixp4x0 quirk not complete.\n");
-               pr_notice("%s detected: Ignoring BIOS IRQ0 pin2 override\n",
+               pr_notice("%s detected: Ignoring BIOS IRQ0 override\n",
                        d->ident);
                acpi_skip_timer_override = 1;
        }
@@ -1438,7 +1435,7 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
         * is enabled.  This input is incorrectly designated the
         * ISA IRQ 0 via an interrupt source override even though
         * it is wired to the output of the master 8259A and INTIN0
-        * is not connected at all.  Force ignoring BIOS IRQ0 pin2
+        * is not connected at all.  Force ignoring BIOS IRQ0
         * override in that cases.
         */
        {
@@ -1473,6 +1470,14 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
                     DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
                     },
         },
+       {
+        .callback = dmi_ignore_irq0_timer_override,
+        .ident = "FUJITSU SIEMENS",
+        .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+                    DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
+                    },
+        },
        {}
 };
 
index 1729d72..931280f 100644 (file)
@@ -669,7 +669,7 @@ static int __kprobes stop_machine_text_poke(void *data)
        struct text_poke_param *p;
        int i;
 
-       if (atomic_dec_and_test(&stop_machine_first)) {
+       if (atomic_xchg(&stop_machine_first, 0)) {
                for (i = 0; i < tpp->nparams; i++) {
                        p = &tpp->params[i];
                        text_poke(p->addr, p->opcode, p->len);
index 6ab6aa2..bac4c38 100644 (file)
@@ -32,7 +32,9 @@ obj-$(CONFIG_PERF_EVENTS)             += perf_event.o
 
 ifdef CONFIG_PERF_EVENTS
 obj-$(CONFIG_CPU_SUP_AMD)              += perf_event_amd.o
-obj-$(CONFIG_CPU_SUP_INTEL)            += perf_event_p6.o perf_event_p4.o perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
+obj-$(CONFIG_CPU_SUP_INTEL)            += perf_event_p6.o perf_event_p4.o
+obj-$(CONFIG_CPU_SUP_INTEL)            += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
+obj-$(CONFIG_CPU_SUP_INTEL)            += perf_event_intel_uncore.o
 endif
 
 obj-$(CONFIG_X86_MCE)                  += mcheck/
index 146bb62..9d92e19 100644 (file)
 
 #include "cpu.h"
 
+static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
+{
+       struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+       u32 gprs[8] = { 0 };
+       int err;
+
+       WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__);
+
+       gprs[1] = msr;
+       gprs[7] = 0x9c5a203a;
+
+       err = rdmsr_safe_regs(gprs);
+
+       *p = gprs[0] | ((u64)gprs[2] << 32);
+
+       return err;
+}
+
+static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
+{
+       struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+       u32 gprs[8] = { 0 };
+
+       WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__);
+
+       gprs[0] = (u32)val;
+       gprs[1] = msr;
+       gprs[2] = val >> 32;
+       gprs[7] = 0x9c5a203a;
+
+       return wrmsr_safe_regs(gprs);
+}
+
 #ifdef CONFIG_X86_32
 /*
  *     B step AMD K6 before B 9730xxxx have hardware bugs that can cause
@@ -586,9 +619,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
            !cpu_has(c, X86_FEATURE_TOPOEXT)) {
                u64 val;
 
-               if (!rdmsrl_amd_safe(0xc0011005, &val)) {
+               if (!rdmsrl_safe(0xc0011005, &val)) {
                        val |= 1ULL << 54;
-                       wrmsrl_amd_safe(0xc0011005, val);
+                       wrmsrl_safe(0xc0011005, val);
                        rdmsrl(0xc0011005, val);
                        if (val & (1ULL << 54)) {
                                set_cpu_cap(c, X86_FEATURE_TOPOEXT);
@@ -679,7 +712,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
                err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
                if (err == 0) {
                        mask |= (1 << 10);
-                       checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
+                       wrmsrl_safe(MSR_AMD64_MCx_MASK(4), mask);
                }
        }
 
index 6b9333b..5bbc082 100644 (file)
@@ -947,7 +947,7 @@ static void __cpuinit __print_cpu_msr(void)
                index_max = msr_range_array[i].max;
 
                for (index = index_min; index < index_max; index++) {
-                       if (rdmsrl_amd_safe(index, &val))
+                       if (rdmsrl_safe(index, &val))
                                continue;
                        printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
                }
index dfea390..c7b3fe2 100644 (file)
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/perl -w
 #
 # Generate the x86_cap_flags[] array from include/asm-x86/cpufeature.h
 #
@@ -11,22 +11,35 @@ open(OUT, "> $out\0") or die "$0: cannot create: $out: $!\n";
 print OUT "#include <asm/cpufeature.h>\n\n";
 print OUT "const char * const x86_cap_flags[NCAPINTS*32] = {\n";
 
+%features = ();
+$err = 0;
+
 while (defined($line = <IN>)) {
        if ($line =~ /^\s*\#\s*define\s+(X86_FEATURE_(\S+))\s+(.*)$/) {
                $macro = $1;
-               $feature = $2;
+               $feature = "\L$2";
                $tail = $3;
                if ($tail =~ /\/\*\s*\"([^"]*)\".*\*\//) {
-                       $feature = $1;
+                       $feature = "\L$1";
                }
 
-               if ($feature ne '') {
-                       printf OUT "\t%-32s = \"%s\",\n",
-                               "[$macro]", "\L$feature";
+               next if ($feature eq '');
+
+               if ($features{$feature}++) {
+                       print STDERR "$in: duplicate feature name: $feature\n";
+                       $err++;
                }
+               printf OUT "\t%-32s = \"%s\",\n", "[$macro]", $feature;
        }
 }
 print OUT "};\n";
 
 close(IN);
 close(OUT);
+
+if ($err) {
+       unlink($out);
+       exit(1);
+}
+
+exit(0);
index bdda2e6..35ffda5 100644 (file)
@@ -258,11 +258,11 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk,
 
                /* Compute the maximum size with which we can make a range: */
                if (range_startk)
-                       max_align = ffs(range_startk) - 1;
+                       max_align = __ffs(range_startk);
                else
-                       max_align = 32;
+                       max_align = BITS_PER_LONG - 1;
 
-               align = fls(range_sizek) - 1;
+               align = __fls(range_sizek);
                if (align > max_align)
                        align = max_align;
 
index 75772ae..e9fe907 100644 (file)
@@ -361,11 +361,7 @@ static void __init print_mtrr_state(void)
        }
        pr_debug("MTRR variable ranges %sabled:\n",
                 mtrr_state.enabled & 2 ? "en" : "dis");
-       if (size_or_mask & 0xffffffffUL)
-               high_width = ffs(size_or_mask & 0xffffffffUL) - 1;
-       else
-               high_width = ffs(size_or_mask>>32) + 32 - 1;
-       high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4;
+       high_width = (__ffs64(size_or_mask) - (32 - PAGE_SHIFT) + 3) / 4;
 
        for (i = 0; i < num_var_ranges; ++i) {
                if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
index c4706cf..29557aa 100644 (file)
 
 #include "perf_event.h"
 
-#if 0
-#undef wrmsrl
-#define wrmsrl(msr, val)                                       \
-do {                                                           \
-       trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
-                       (unsigned long)(val));                  \
-       native_write_msr((msr), (u32)((u64)(val)),              \
-                       (u32)((u64)(val) >> 32));               \
-} while (0)
-#endif
-
 struct x86_pmu x86_pmu __read_mostly;
 
 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
@@ -74,7 +63,7 @@ u64 x86_perf_event_update(struct perf_event *event)
        int idx = hwc->idx;
        s64 delta;
 
-       if (idx == X86_PMC_IDX_FIXED_BTS)
+       if (idx == INTEL_PMC_IDX_FIXED_BTS)
                return 0;
 
        /*
@@ -86,7 +75,7 @@ u64 x86_perf_event_update(struct perf_event *event)
         */
 again:
        prev_raw_count = local64_read(&hwc->prev_count);
-       rdmsrl(hwc->event_base, new_raw_count);
+       rdpmcl(hwc->event_base_rdpmc, new_raw_count);
 
        if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
                                        new_raw_count) != prev_raw_count)
@@ -189,7 +178,7 @@ static void release_pmc_hardware(void) {}
 
 static bool check_hw_exists(void)
 {
-       u64 val, val_new = 0;
+       u64 val, val_new = ~0;
        int i, reg, ret = 0;
 
        /*
@@ -222,8 +211,9 @@ static bool check_hw_exists(void)
         * that don't trap on the MSR access and always return 0s.
         */
        val = 0xabcdUL;
-       ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
-       ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
+       reg = x86_pmu_event_addr(0);
+       ret = wrmsrl_safe(reg, val);
+       ret |= rdmsrl_safe(reg, &val_new);
        if (ret || val != val_new)
                goto msr_fail;
 
@@ -240,6 +230,7 @@ bios_fail:
 
 msr_fail:
        printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
+       printk(KERN_ERR "Failed to access perfctr msr (MSR %x is %Lx)\n", reg, val_new);
 
        return false;
 }
@@ -388,7 +379,7 @@ int x86_pmu_hw_config(struct perf_event *event)
                int precise = 0;
 
                /* Support for constant skid */
-               if (x86_pmu.pebs_active) {
+               if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
                        precise++;
 
                        /* Support for IP fixup */
@@ -637,8 +628,8 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
        c = sched->constraints[sched->state.event];
 
        /* Prefer fixed purpose counters */
-       if (x86_pmu.num_counters_fixed) {
-               idx = X86_PMC_IDX_FIXED;
+       if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
+               idx = INTEL_PMC_IDX_FIXED;
                for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
                        if (!__test_and_set_bit(idx, sched->state.used))
                                goto done;
@@ -646,7 +637,7 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
        }
        /* Grab the first unused counter starting with idx */
        idx = sched->state.counter;
-       for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_FIXED) {
+       for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
                if (!__test_and_set_bit(idx, sched->state.used))
                        goto done;
        }
@@ -704,8 +695,8 @@ static bool perf_sched_next_event(struct perf_sched *sched)
 /*
  * Assign a counter for each event.
  */
-static int perf_assign_events(struct event_constraint **constraints, int n,
-                             int wmin, int wmax, int *assign)
+int perf_assign_events(struct event_constraint **constraints, int n,
+                       int wmin, int wmax, int *assign)
 {
        struct perf_sched sched;
 
@@ -824,15 +815,17 @@ static inline void x86_assign_hw_event(struct perf_event *event,
        hwc->last_cpu = smp_processor_id();
        hwc->last_tag = ++cpuc->tags[i];
 
-       if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
+       if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) {
                hwc->config_base = 0;
                hwc->event_base = 0;
-       } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
+       } else if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
                hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
-               hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED);
+               hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED);
+               hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30;
        } else {
                hwc->config_base = x86_pmu_config_addr(hwc->idx);
                hwc->event_base  = x86_pmu_event_addr(hwc->idx);
+               hwc->event_base_rdpmc = hwc->idx;
        }
 }
 
@@ -930,7 +923,7 @@ int x86_perf_event_set_period(struct perf_event *event)
        s64 period = hwc->sample_period;
        int ret = 0, idx = hwc->idx;
 
-       if (idx == X86_PMC_IDX_FIXED_BTS)
+       if (idx == INTEL_PMC_IDX_FIXED_BTS)
                return 0;
 
        /*
@@ -1316,7 +1309,6 @@ static struct attribute_group x86_pmu_format_group = {
 static int __init init_hw_perf_events(void)
 {
        struct x86_pmu_quirk *quirk;
-       struct event_constraint *c;
        int err;
 
        pr_info("Performance Events: ");
@@ -1347,21 +1339,8 @@ static int __init init_hw_perf_events(void)
        for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
                quirk->func();
 
-       if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
-               WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
-                    x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
-               x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
-       }
-       x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
-
-       if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
-               WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
-                    x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
-               x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
-       }
-
-       x86_pmu.intel_ctrl |=
-               ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
+       if (!x86_pmu.intel_ctrl)
+               x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
 
        perf_events_lapic_init();
        register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
@@ -1370,22 +1349,6 @@ static int __init init_hw_perf_events(void)
                __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
                                   0, x86_pmu.num_counters, 0);
 
-       if (x86_pmu.event_constraints) {
-               /*
-                * event on fixed counter2 (REF_CYCLES) only works on this
-                * counter, so do not extend mask to generic counters
-                */
-               for_each_event_constraint(c, x86_pmu.event_constraints) {
-                       if (c->cmask != X86_RAW_EVENT_MASK
-                           || c->idxmsk64 == X86_PMC_MSK_FIXED_REF_CYCLES) {
-                               continue;
-                       }
-
-                       c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
-                       c->weight += x86_pmu.num_counters;
-               }
-       }
-
        x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
        x86_pmu_format_group.attrs = x86_pmu.format_attrs;
 
@@ -1620,8 +1583,8 @@ static int x86_pmu_event_idx(struct perf_event *event)
        if (!x86_pmu.attr_rdpmc)
                return 0;
 
-       if (x86_pmu.num_counters_fixed && idx >= X86_PMC_IDX_FIXED) {
-               idx -= X86_PMC_IDX_FIXED;
+       if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
+               idx -= INTEL_PMC_IDX_FIXED;
                idx |= 1 << 30;
        }
 
@@ -1649,7 +1612,12 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
                              struct device_attribute *attr,
                              const char *buf, size_t count)
 {
-       unsigned long val = simple_strtoul(buf, NULL, 0);
+       unsigned long val;
+       ssize_t ret;
+
+       ret = kstrtoul(buf, 0, &val);
+       if (ret)
+               return ret;
 
        if (!!val != !!x86_pmu.attr_rdpmc) {
                x86_pmu.attr_rdpmc = !!val;
@@ -1682,13 +1650,20 @@ static void x86_pmu_flush_branch_stack(void)
                x86_pmu.flush_branch_stack();
 }
 
+void perf_check_microcode(void)
+{
+       if (x86_pmu.check_microcode)
+               x86_pmu.check_microcode();
+}
+EXPORT_SYMBOL_GPL(perf_check_microcode);
+
 static struct pmu pmu = {
        .pmu_enable             = x86_pmu_enable,
        .pmu_disable            = x86_pmu_disable,
 
-       .attr_groups    = x86_pmu_attr_groups,
+       .attr_groups            = x86_pmu_attr_groups,
 
-       .event_init     = x86_pmu_event_init,
+       .event_init             = x86_pmu_event_init,
 
        .add                    = x86_pmu_add,
        .del                    = x86_pmu_del,
@@ -1696,11 +1671,11 @@ static struct pmu pmu = {
        .stop                   = x86_pmu_stop,
        .read                   = x86_pmu_read,
 
-       .start_txn      = x86_pmu_start_txn,
-       .cancel_txn     = x86_pmu_cancel_txn,
-       .commit_txn     = x86_pmu_commit_txn,
+       .start_txn              = x86_pmu_start_txn,
+       .cancel_txn             = x86_pmu_cancel_txn,
+       .commit_txn             = x86_pmu_commit_txn,
 
-       .event_idx      = x86_pmu_event_idx,
+       .event_idx              = x86_pmu_event_idx,
        .flush_branch_stack     = x86_pmu_flush_branch_stack,
 };
 
@@ -1863,7 +1838,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
                else
                        misc |= PERF_RECORD_MISC_GUEST_KERNEL;
        } else {
-               if (user_mode(regs))
+               if (!kernel_ip(regs->ip))
                        misc |= PERF_RECORD_MISC_USER;
                else
                        misc |= PERF_RECORD_MISC_KERNEL;
index 7241e2f..a15df4b 100644 (file)
 
 #include <linux/perf_event.h>
 
+#if 0
+#undef wrmsrl
+#define wrmsrl(msr, val)                                               \
+do {                                                                   \
+       unsigned int _msr = (msr);                                      \
+       u64 _val = (val);                                               \
+       trace_printk("wrmsrl(%x, %Lx)\n", (unsigned int)(_msr),         \
+                       (unsigned long long)(_val));                    \
+       native_write_msr((_msr), (u32)(_val), (u32)(_val >> 32));       \
+} while (0)
+#endif
+
 /*
  *          |   NHM/WSM    |      SNB     |
  * register -------------------------------
@@ -57,7 +69,7 @@ struct amd_nb {
 };
 
 /* The maximal number of PEBS events: */
-#define MAX_PEBS_EVENTS                4
+#define MAX_PEBS_EVENTS                8
 
 /*
  * A debug store configuration.
@@ -349,6 +361,8 @@ struct x86_pmu {
        void            (*cpu_starting)(int cpu);
        void            (*cpu_dying)(int cpu);
        void            (*cpu_dead)(int cpu);
+
+       void            (*check_microcode)(void);
        void            (*flush_branch_stack)(void);
 
        /*
@@ -360,12 +374,16 @@ struct x86_pmu {
        /*
         * Intel DebugStore bits
         */
-       int             bts, pebs;
-       int             bts_active, pebs_active;
+       int             bts             :1,
+                       bts_active      :1,
+                       pebs            :1,
+                       pebs_active     :1,
+                       pebs_broken     :1;
        int             pebs_record_size;
        void            (*drain_pebs)(struct pt_regs *regs);
        struct event_constraint *pebs_constraints;
        void            (*pebs_aliases)(struct perf_event *event);
+       int             max_pebs_events;
 
        /*
         * Intel LBR
@@ -468,6 +486,8 @@ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
 
 void x86_pmu_enable_all(int added);
 
+int perf_assign_events(struct event_constraint **constraints, int n,
+                       int wmin, int wmax, int *assign);
 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
 
 void x86_pmu_stop(struct perf_event *event, int flags);
index 11a4eb9..4528ae7 100644 (file)
@@ -366,7 +366,7 @@ static void amd_pmu_cpu_starting(int cpu)
 
        cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
 
-       if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15)
+       if (boot_cpu_data.x86_max_cores < 2)
                return;
 
        nb_id = amd_get_nb_id(cpu);
@@ -422,35 +422,6 @@ static struct attribute *amd_format_attr[] = {
        NULL,
 };
 
-static __initconst const struct x86_pmu amd_pmu = {
-       .name                   = "AMD",
-       .handle_irq             = x86_pmu_handle_irq,
-       .disable_all            = x86_pmu_disable_all,
-       .enable_all             = x86_pmu_enable_all,
-       .enable                 = x86_pmu_enable_event,
-       .disable                = x86_pmu_disable_event,
-       .hw_config              = amd_pmu_hw_config,
-       .schedule_events        = x86_schedule_events,
-       .eventsel               = MSR_K7_EVNTSEL0,
-       .perfctr                = MSR_K7_PERFCTR0,
-       .event_map              = amd_pmu_event_map,
-       .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
-       .num_counters           = AMD64_NUM_COUNTERS,
-       .cntval_bits            = 48,
-       .cntval_mask            = (1ULL << 48) - 1,
-       .apic                   = 1,
-       /* use highest bit to detect overflow */
-       .max_period             = (1ULL << 47) - 1,
-       .get_event_constraints  = amd_get_event_constraints,
-       .put_event_constraints  = amd_put_event_constraints,
-
-       .format_attrs           = amd_format_attr,
-
-       .cpu_prepare            = amd_pmu_cpu_prepare,
-       .cpu_starting           = amd_pmu_cpu_starting,
-       .cpu_dead               = amd_pmu_cpu_dead,
-};
-
 /* AMD Family 15h */
 
 #define AMD_EVENT_TYPE_MASK    0x000000F0ULL
@@ -597,8 +568,8 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev
        }
 }
 
-static __initconst const struct x86_pmu amd_pmu_f15h = {
-       .name                   = "AMD Family 15h",
+static __initconst const struct x86_pmu amd_pmu = {
+       .name                   = "AMD",
        .handle_irq             = x86_pmu_handle_irq,
        .disable_all            = x86_pmu_disable_all,
        .enable_all             = x86_pmu_enable_all,
@@ -606,50 +577,68 @@ static __initconst const struct x86_pmu amd_pmu_f15h = {
        .disable                = x86_pmu_disable_event,
        .hw_config              = amd_pmu_hw_config,
        .schedule_events        = x86_schedule_events,
-       .eventsel               = MSR_F15H_PERF_CTL,
-       .perfctr                = MSR_F15H_PERF_CTR,
+       .eventsel               = MSR_K7_EVNTSEL0,
+       .perfctr                = MSR_K7_PERFCTR0,
        .event_map              = amd_pmu_event_map,
        .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
-       .num_counters           = AMD64_NUM_COUNTERS_F15H,
+       .num_counters           = AMD64_NUM_COUNTERS,
        .cntval_bits            = 48,
        .cntval_mask            = (1ULL << 48) - 1,
        .apic                   = 1,
        /* use highest bit to detect overflow */
        .max_period             = (1ULL << 47) - 1,
-       .get_event_constraints  = amd_get_event_constraints_f15h,
-       /* nortbridge counters not yet implemented: */
-#if 0
+       .get_event_constraints  = amd_get_event_constraints,
        .put_event_constraints  = amd_put_event_constraints,
 
+       .format_attrs           = amd_format_attr,
+
        .cpu_prepare            = amd_pmu_cpu_prepare,
-       .cpu_dead               = amd_pmu_cpu_dead,
-#endif
        .cpu_starting           = amd_pmu_cpu_starting,
-       .format_attrs           = amd_format_attr,
+       .cpu_dead               = amd_pmu_cpu_dead,
 };
 
+static int setup_event_constraints(void)
+{
+       if (boot_cpu_data.x86 >= 0x15)
+               x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
+       return 0;
+}
+
+static int setup_perfctr_core(void)
+{
+       if (!cpu_has_perfctr_core) {
+               WARN(x86_pmu.get_event_constraints == amd_get_event_constraints_f15h,
+                    KERN_ERR "Odd, counter constraints enabled but no core perfctrs detected!");
+               return -ENODEV;
+       }
+
+       WARN(x86_pmu.get_event_constraints == amd_get_event_constraints,
+            KERN_ERR "hw perf events core counters need constraints handler!");
+
+       /*
+        * If core performance counter extensions exists, we must use
+        * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also
+        * x86_pmu_addr_offset().
+        */
+       x86_pmu.eventsel        = MSR_F15H_PERF_CTL;
+       x86_pmu.perfctr         = MSR_F15H_PERF_CTR;
+       x86_pmu.num_counters    = AMD64_NUM_COUNTERS_CORE;
+
+       printk(KERN_INFO "perf: AMD core performance counters detected\n");
+
+       return 0;
+}
+
 __init int amd_pmu_init(void)
 {
        /* Performance-monitoring supported from K7 and later: */
        if (boot_cpu_data.x86 < 6)
                return -ENODEV;
 
-       /*
-        * If core performance counter extensions exists, it must be
-        * family 15h, otherwise fail. See x86_pmu_addr_offset().
-        */
-       switch (boot_cpu_data.x86) {
-       case 0x15:
-               if (!cpu_has_perfctr_core)
-                       return -ENODEV;
-               x86_pmu = amd_pmu_f15h;
-               break;
-       default:
-               if (cpu_has_perfctr_core)
-                       return -ENODEV;
-               x86_pmu = amd_pmu;
-               break;
-       }
+       x86_pmu = amd_pmu;
+
+       setup_event_constraints();
+       setup_perfctr_core();
 
        /* Events are common for all AMDs */
        memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
index 5073bf1..7a8b9d0 100644 (file)
  */
 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
 {
-  [PERF_COUNT_HW_CPU_CYCLES]           = 0x003c,
-  [PERF_COUNT_HW_INSTRUCTIONS]         = 0x00c0,
-  [PERF_COUNT_HW_CACHE_REFERENCES]     = 0x4f2e,
-  [PERF_COUNT_HW_CACHE_MISSES]         = 0x412e,
-  [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]  = 0x00c4,
-  [PERF_COUNT_HW_BRANCH_MISSES]                = 0x00c5,
-  [PERF_COUNT_HW_BUS_CYCLES]           = 0x013c,
-  [PERF_COUNT_HW_REF_CPU_CYCLES]       = 0x0300, /* pseudo-encoding */
+       [PERF_COUNT_HW_CPU_CYCLES]              = 0x003c,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = 0x00c0,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = 0x4f2e,
+       [PERF_COUNT_HW_CACHE_MISSES]            = 0x412e,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = 0x00c4,
+       [PERF_COUNT_HW_BRANCH_MISSES]           = 0x00c5,
+       [PERF_COUNT_HW_BUS_CYCLES]              = 0x013c,
+       [PERF_COUNT_HW_REF_CPU_CYCLES]          = 0x0300, /* pseudo-encoding */
 };
 
 static struct event_constraint intel_core_event_constraints[] __read_mostly =
@@ -749,7 +749,7 @@ static void intel_pmu_disable_all(void)
 
        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
 
-       if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
+       if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
                intel_pmu_disable_bts();
 
        intel_pmu_pebs_disable_all();
@@ -765,9 +765,9 @@ static void intel_pmu_enable_all(int added)
        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
                        x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
 
-       if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
+       if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
                struct perf_event *event =
-                       cpuc->events[X86_PMC_IDX_FIXED_BTS];
+                       cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
 
                if (WARN_ON_ONCE(!event))
                        return;
@@ -873,7 +873,7 @@ static inline void intel_pmu_ack_status(u64 ack)
 
 static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
 {
-       int idx = hwc->idx - X86_PMC_IDX_FIXED;
+       int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
        u64 ctrl_val, mask;
 
        mask = 0xfULL << (idx * 4);
@@ -888,7 +888,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
        struct hw_perf_event *hwc = &event->hw;
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
-       if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
+       if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
                intel_pmu_disable_bts();
                intel_pmu_drain_bts_buffer();
                return;
@@ -917,7 +917,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
 
 static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
 {
-       int idx = hwc->idx - X86_PMC_IDX_FIXED;
+       int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
        u64 ctrl_val, bits, mask;
 
        /*
@@ -951,7 +951,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
        struct hw_perf_event *hwc = &event->hw;
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
-       if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
+       if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
                if (!__this_cpu_read(cpu_hw_events.enabled))
                        return;
 
@@ -1005,11 +1005,11 @@ static void intel_pmu_reset(void)
        pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
 
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
-               checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
-               checking_wrmsrl(x86_pmu_event_addr(idx),  0ull);
+               wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
+               wrmsrl_safe(x86_pmu_event_addr(idx),  0ull);
        }
        for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
-               checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
+               wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
 
        if (ds)
                ds->bts_index = ds->bts_buffer_base;
@@ -1714,11 +1714,56 @@ static __init void intel_clovertown_quirk(void)
        x86_pmu.pebs_constraints = NULL;
 }
 
+static int intel_snb_pebs_broken(int cpu)
+{
+       u32 rev = UINT_MAX; /* default to broken for unknown models */
+
+       switch (cpu_data(cpu).x86_model) {
+       case 42: /* SNB */
+               rev = 0x28;
+               break;
+
+       case 45: /* SNB-EP */
+               switch (cpu_data(cpu).x86_mask) {
+               case 6: rev = 0x618; break;
+               case 7: rev = 0x70c; break;
+               }
+       }
+
+       return (cpu_data(cpu).microcode < rev);
+}
+
+static void intel_snb_check_microcode(void)
+{
+       int pebs_broken = 0;
+       int cpu;
+
+       get_online_cpus();
+       for_each_online_cpu(cpu) {
+               if ((pebs_broken = intel_snb_pebs_broken(cpu)))
+                       break;
+       }
+       put_online_cpus();
+
+       if (pebs_broken == x86_pmu.pebs_broken)
+               return;
+
+       /*
+        * Serialized by the microcode lock..
+        */
+       if (x86_pmu.pebs_broken) {
+               pr_info("PEBS enabled due to microcode update\n");
+               x86_pmu.pebs_broken = 0;
+       } else {
+               pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
+               x86_pmu.pebs_broken = 1;
+       }
+}
+
 static __init void intel_sandybridge_quirk(void)
 {
-       pr_warn("PEBS disabled due to CPU errata\n");
-       x86_pmu.pebs = 0;
-       x86_pmu.pebs_constraints = NULL;
+       x86_pmu.check_microcode = intel_snb_check_microcode;
+       intel_snb_check_microcode();
 }
 
 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
@@ -1767,6 +1812,7 @@ __init int intel_pmu_init(void)
        union cpuid10_edx edx;
        union cpuid10_eax eax;
        union cpuid10_ebx ebx;
+       struct event_constraint *c;
        unsigned int unused;
        int version;
 
@@ -1802,6 +1848,8 @@ __init int intel_pmu_init(void)
        x86_pmu.events_maskl            = ebx.full;
        x86_pmu.events_mask_len         = eax.split.mask_length;
 
+       x86_pmu.max_pebs_events         = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
+
        /*
         * Quirk: v2 perfmon does not report fixed-purpose events, so
         * assume at least 3 events:
@@ -1953,5 +2001,37 @@ __init int intel_pmu_init(void)
                }
        }
 
+       if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
+               WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
+                    x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
+               x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
+       }
+       x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
+
+       if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
+               WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
+                    x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
+               x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
+       }
+
+       x86_pmu.intel_ctrl |=
+               ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
+
+       if (x86_pmu.event_constraints) {
+               /*
+                * event on fixed counter2 (REF_CYCLES) only works on this
+                * counter, so do not extend mask to generic counters
+                */
+               for_each_event_constraint(c, x86_pmu.event_constraints) {
+                       if (c->cmask != X86_RAW_EVENT_MASK
+                           || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
+                               continue;
+                       }
+
+                       c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
+                       c->weight += x86_pmu.num_counters;
+               }
+       }
+
        return 0;
 }
index 35e2192..629ae0b 100644 (file)
@@ -248,7 +248,7 @@ void reserve_ds_buffers(void)
  */
 
 struct event_constraint bts_constraint =
-       EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
+       EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS, 0);
 
 void intel_pmu_enable_bts(u64 config)
 {
@@ -295,7 +295,7 @@ int intel_pmu_drain_bts_buffer(void)
                u64     to;
                u64     flags;
        };
-       struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
+       struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
        struct bts_record *at, *top;
        struct perf_output_handle handle;
        struct perf_event_header header;
@@ -620,7 +620,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
         * Should not happen, we program the threshold at 1 and do not
         * set a reset value.
         */
-       WARN_ON_ONCE(n > 1);
+       WARN_ONCE(n > 1, "bad leftover pebs %d\n", n);
        at += n - 1;
 
        __intel_pmu_pebs_event(event, iregs, at);
@@ -651,10 +651,10 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
         * Should not happen, we program the threshold at 1 and do not
         * set a reset value.
         */
-       WARN_ON_ONCE(n > MAX_PEBS_EVENTS);
+       WARN_ONCE(n > x86_pmu.max_pebs_events, "Unexpected number of pebs records %d\n", n);
 
        for ( ; at < top; at++) {
-               for_each_set_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) {
+               for_each_set_bit(bit, (unsigned long *)&at->status, x86_pmu.max_pebs_events) {
                        event = cpuc->events[bit];
                        if (!test_bit(bit, cpuc->active_mask))
                                continue;
@@ -670,7 +670,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
                        break;
                }
 
-               if (!event || bit >= MAX_PEBS_EVENTS)
+               if (!event || bit >= x86_pmu.max_pebs_events)
                        continue;
 
                __intel_pmu_pebs_event(event, iregs, at);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
new file mode 100644 (file)
index 0000000..19faffc
--- /dev/null
@@ -0,0 +1,1850 @@
+#include "perf_event_intel_uncore.h"
+
+static struct intel_uncore_type *empty_uncore[] = { NULL, };
+static struct intel_uncore_type **msr_uncores = empty_uncore;
+static struct intel_uncore_type **pci_uncores = empty_uncore;
+/* pci bus to socket mapping */
+static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
+
+static DEFINE_RAW_SPINLOCK(uncore_box_lock);
+
+/* mask of cpus that collect uncore events */
+static cpumask_t uncore_cpu_mask;
+
+/* constraint for the fixed counter */
+static struct event_constraint constraint_fixed =
+       EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
+static struct event_constraint constraint_empty =
+       EVENT_CONSTRAINT(0, 0, 0);
+
+DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
+DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
+DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
+DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
+DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
+DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
+DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
+DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
+DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
+DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
+DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
+DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
+DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
+DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
+DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
+DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
+DEFINE_UNCORE_FORMAT_ATTR(filter_brand0, filter_brand0, "config1:0-7");
+DEFINE_UNCORE_FORMAT_ATTR(filter_brand1, filter_brand1, "config1:8-15");
+DEFINE_UNCORE_FORMAT_ATTR(filter_brand2, filter_brand2, "config1:16-23");
+DEFINE_UNCORE_FORMAT_ATTR(filter_brand3, filter_brand3, "config1:24-31");
+
+/* Sandy Bridge-EP uncore support */
+static struct intel_uncore_type snbep_uncore_cbox;
+static struct intel_uncore_type snbep_uncore_pcu;
+
+static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
+{
+       struct pci_dev *pdev = box->pci_dev;
+       int box_ctl = uncore_pci_box_ctl(box);
+       u32 config;
+
+       pci_read_config_dword(pdev, box_ctl, &config);
+       config |= SNBEP_PMON_BOX_CTL_FRZ;
+       pci_write_config_dword(pdev, box_ctl, config);
+}
+
+static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
+{
+       struct pci_dev *pdev = box->pci_dev;
+       int box_ctl = uncore_pci_box_ctl(box);
+       u32 config;
+
+       pci_read_config_dword(pdev, box_ctl, &config);
+       config &= ~SNBEP_PMON_BOX_CTL_FRZ;
+       pci_write_config_dword(pdev, box_ctl, config);
+}
+
+static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       struct pci_dev *pdev = box->pci_dev;
+       struct hw_perf_event *hwc = &event->hw;
+
+       pci_write_config_dword(pdev, hwc->config_base, hwc->config |
+                               SNBEP_PMON_CTL_EN);
+}
+
+static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       struct pci_dev *pdev = box->pci_dev;
+       struct hw_perf_event *hwc = &event->hw;
+
+       pci_write_config_dword(pdev, hwc->config_base, hwc->config);
+}
+
+static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       struct pci_dev *pdev = box->pci_dev;
+       struct hw_perf_event *hwc = &event->hw;
+       u64 count;
+
+       pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
+       pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
+       return count;
+}
+
+static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
+{
+       struct pci_dev *pdev = box->pci_dev;
+       pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL,
+                               SNBEP_PMON_BOX_CTL_INT);
+}
+
+static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
+{
+       u64 config;
+       unsigned msr;
+
+       msr = uncore_msr_box_ctl(box);
+       if (msr) {
+               rdmsrl(msr, config);
+               config |= SNBEP_PMON_BOX_CTL_FRZ;
+               wrmsrl(msr, config);
+               return;
+       }
+}
+
+static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
+{
+       u64 config;
+       unsigned msr;
+
+       msr = uncore_msr_box_ctl(box);
+       if (msr) {
+               rdmsrl(msr, config);
+               config &= ~SNBEP_PMON_BOX_CTL_FRZ;
+               wrmsrl(msr, config);
+               return;
+       }
+}
+
+static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+
+       if (reg1->idx != EXTRA_REG_NONE)
+               wrmsrl(reg1->reg, reg1->config);
+
+       wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
+}
+
+static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       wrmsrl(hwc->config_base, hwc->config);
+}
+
+static u64 snbep_uncore_msr_read_counter(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       u64 count;
+
+       rdmsrl(hwc->event_base, count);
+       return count;
+}
+
+static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
+{
+       unsigned msr = uncore_msr_box_ctl(box);
+       if (msr)
+               wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
+}
+
+static struct event_constraint *
+snbep_uncore_get_constraint(struct intel_uncore_box *box,
+                           struct perf_event *event)
+{
+       struct intel_uncore_extra_reg *er;
+       struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+       unsigned long flags;
+       bool ok = false;
+
+       if (reg1->idx == EXTRA_REG_NONE || (box->phys_id >= 0 && reg1->alloc))
+               return NULL;
+
+       er = &box->shared_regs[reg1->idx];
+       raw_spin_lock_irqsave(&er->lock, flags);
+       if (!atomic_read(&er->ref) || er->config1 == reg1->config) {
+               atomic_inc(&er->ref);
+               er->config1 = reg1->config;
+               ok = true;
+       }
+       raw_spin_unlock_irqrestore(&er->lock, flags);
+
+       if (ok) {
+               if (box->phys_id >= 0)
+                       reg1->alloc = 1;
+               return NULL;
+       }
+       return &constraint_empty;
+}
+
+static void snbep_uncore_put_constraint(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       struct intel_uncore_extra_reg *er;
+       struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+
+       if (box->phys_id < 0 || !reg1->alloc)
+               return;
+
+       er = &box->shared_regs[reg1->idx];
+       atomic_dec(&er->ref);
+       reg1->alloc = 0;
+}
+
+static int snbep_uncore_hw_config(struct intel_uncore_box *box,
+                                 struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+
+       if (box->pmu->type == &snbep_uncore_cbox) {
+               reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
+                       SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
+               reg1->config = event->attr.config1 &
+                       SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK;
+       } else if (box->pmu->type == &snbep_uncore_pcu) {
+               reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
+               reg1->config = event->attr.config1 &
+                       SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK;
+       } else {
+               return 0;
+       }
+       reg1->idx = 0;
+       return 0;
+}
+
+static struct attribute *snbep_uncore_formats_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_umask.attr,
+       &format_attr_edge.attr,
+       &format_attr_inv.attr,
+       &format_attr_thresh8.attr,
+       NULL,
+};
+
+static struct attribute *snbep_uncore_ubox_formats_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_umask.attr,
+       &format_attr_edge.attr,
+       &format_attr_inv.attr,
+       &format_attr_thresh5.attr,
+       NULL,
+};
+
+static struct attribute *snbep_uncore_cbox_formats_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_umask.attr,
+       &format_attr_edge.attr,
+       &format_attr_tid_en.attr,
+       &format_attr_inv.attr,
+       &format_attr_thresh8.attr,
+       &format_attr_filter_tid.attr,
+       &format_attr_filter_nid.attr,
+       &format_attr_filter_state.attr,
+       &format_attr_filter_opc.attr,
+       NULL,
+};
+
+static struct attribute *snbep_uncore_pcu_formats_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_occ_sel.attr,
+       &format_attr_edge.attr,
+       &format_attr_inv.attr,
+       &format_attr_thresh5.attr,
+       &format_attr_occ_invert.attr,
+       &format_attr_occ_edge.attr,
+       &format_attr_filter_brand0.attr,
+       &format_attr_filter_brand1.attr,
+       &format_attr_filter_brand2.attr,
+       &format_attr_filter_brand3.attr,
+       NULL,
+};
+
+static struct uncore_event_desc snbep_uncore_imc_events[] = {
+       INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
+       INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
+       INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
+       { /* end: all zeroes */ },
+};
+
+static struct uncore_event_desc snbep_uncore_qpi_events[] = {
+       INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
+       INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
+       INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x02,umask=0x08"),
+       INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x03,umask=0x04"),
+       { /* end: all zeroes */ },
+};
+
+static struct attribute_group snbep_uncore_format_group = {
+       .name = "format",
+       .attrs = snbep_uncore_formats_attr,
+};
+
+static struct attribute_group snbep_uncore_ubox_format_group = {
+       .name = "format",
+       .attrs = snbep_uncore_ubox_formats_attr,
+};
+
+static struct attribute_group snbep_uncore_cbox_format_group = {
+       .name = "format",
+       .attrs = snbep_uncore_cbox_formats_attr,
+};
+
+static struct attribute_group snbep_uncore_pcu_format_group = {
+       .name = "format",
+       .attrs = snbep_uncore_pcu_formats_attr,
+};
+
+static struct intel_uncore_ops snbep_uncore_msr_ops = {
+       .init_box       = snbep_uncore_msr_init_box,
+       .disable_box    = snbep_uncore_msr_disable_box,
+       .enable_box     = snbep_uncore_msr_enable_box,
+       .disable_event  = snbep_uncore_msr_disable_event,
+       .enable_event   = snbep_uncore_msr_enable_event,
+       .read_counter   = snbep_uncore_msr_read_counter,
+       .get_constraint = snbep_uncore_get_constraint,
+       .put_constraint = snbep_uncore_put_constraint,
+       .hw_config      = snbep_uncore_hw_config,
+};
+
+static struct intel_uncore_ops snbep_uncore_pci_ops = {
+       .init_box       = snbep_uncore_pci_init_box,
+       .disable_box    = snbep_uncore_pci_disable_box,
+       .enable_box     = snbep_uncore_pci_enable_box,
+       .disable_event  = snbep_uncore_pci_disable_event,
+       .enable_event   = snbep_uncore_pci_enable_event,
+       .read_counter   = snbep_uncore_pci_read_counter,
+};
+
+static struct event_constraint snbep_uncore_cbox_constraints[] = {
+       UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
+       UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
+       UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
+       UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
+       UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
+       UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
+       EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
+       UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
+       UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
+       EVENT_CONSTRAINT_END
+};
+
+static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
+       UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
+       UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
+       EVENT_CONSTRAINT_END
+};
+
+static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
+       UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
+       UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
+       EVENT_CONSTRAINT_END
+};
+
+static struct intel_uncore_type snbep_uncore_ubox = {
+       .name           = "ubox",
+       .num_counters   = 2,
+       .num_boxes      = 1,
+       .perf_ctr_bits  = 44,
+       .fixed_ctr_bits = 48,
+       .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
+       .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
+       .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
+       .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
+       .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
+       .ops            = &snbep_uncore_msr_ops,
+       .format_group   = &snbep_uncore_ubox_format_group,
+};
+
+static struct intel_uncore_type snbep_uncore_cbox = {
+       .name                   = "cbox",
+       .num_counters           = 4,
+       .num_boxes              = 8,
+       .perf_ctr_bits          = 44,
+       .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
+       .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
+       .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
+       .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
+       .msr_offset             = SNBEP_CBO_MSR_OFFSET,
+       .num_shared_regs        = 1,
+       .constraints            = snbep_uncore_cbox_constraints,
+       .ops                    = &snbep_uncore_msr_ops,
+       .format_group           = &snbep_uncore_cbox_format_group,
+};
+
+static struct intel_uncore_type snbep_uncore_pcu = {
+       .name                   = "pcu",
+       .num_counters           = 4,
+       .num_boxes              = 1,
+       .perf_ctr_bits          = 48,
+       .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
+       .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
+       .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
+       .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
+       .num_shared_regs        = 1,
+       .ops                    = &snbep_uncore_msr_ops,
+       .format_group           = &snbep_uncore_pcu_format_group,
+};
+
+static struct intel_uncore_type *snbep_msr_uncores[] = {
+       &snbep_uncore_ubox,
+       &snbep_uncore_cbox,
+       &snbep_uncore_pcu,
+       NULL,
+};
+
+#define SNBEP_UNCORE_PCI_COMMON_INIT()                         \
+       .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
+       .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
+       .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
+       .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
+       .ops            = &snbep_uncore_pci_ops,                \
+       .format_group   = &snbep_uncore_format_group
+
+static struct intel_uncore_type snbep_uncore_ha = {
+       .name           = "ha",
+       .num_counters   = 4,
+       .num_boxes      = 1,
+       .perf_ctr_bits  = 48,
+       SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type snbep_uncore_imc = {
+       .name           = "imc",
+       .num_counters   = 4,
+       .num_boxes      = 4,
+       .perf_ctr_bits  = 48,
+       .fixed_ctr_bits = 48,
+       .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
+       .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
+       .event_descs    = snbep_uncore_imc_events,
+       SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type snbep_uncore_qpi = {
+       .name           = "qpi",
+       .num_counters   = 4,
+       .num_boxes      = 2,
+       .perf_ctr_bits  = 48,
+       .event_descs    = snbep_uncore_qpi_events,
+       SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+
+static struct intel_uncore_type snbep_uncore_r2pcie = {
+       .name           = "r2pcie",
+       .num_counters   = 4,
+       .num_boxes      = 1,
+       .perf_ctr_bits  = 44,
+       .constraints    = snbep_uncore_r2pcie_constraints,
+       SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type snbep_uncore_r3qpi = {
+       .name           = "r3qpi",
+       .num_counters   = 3,
+       .num_boxes      = 2,
+       .perf_ctr_bits  = 44,
+       .constraints    = snbep_uncore_r3qpi_constraints,
+       SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type *snbep_pci_uncores[] = {
+       &snbep_uncore_ha,
+       &snbep_uncore_imc,
+       &snbep_uncore_qpi,
+       &snbep_uncore_r2pcie,
+       &snbep_uncore_r3qpi,
+       NULL,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
+       { /* Home Agent */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
+               .driver_data = (unsigned long)&snbep_uncore_ha,
+       },
+       { /* MC Channel 0 */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
+               .driver_data = (unsigned long)&snbep_uncore_imc,
+       },
+       { /* MC Channel 1 */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
+               .driver_data = (unsigned long)&snbep_uncore_imc,
+       },
+       { /* MC Channel 2 */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
+               .driver_data = (unsigned long)&snbep_uncore_imc,
+       },
+       { /* MC Channel 3 */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
+               .driver_data = (unsigned long)&snbep_uncore_imc,
+       },
+       { /* QPI Port 0 */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
+               .driver_data = (unsigned long)&snbep_uncore_qpi,
+       },
+       { /* QPI Port 1 */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
+               .driver_data = (unsigned long)&snbep_uncore_qpi,
+       },
+       { /* P2PCIe */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
+               .driver_data = (unsigned long)&snbep_uncore_r2pcie,
+       },
+       { /* R3QPI Link 0 */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
+               .driver_data = (unsigned long)&snbep_uncore_r3qpi,
+       },
+       { /* R3QPI Link 1 */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
+               .driver_data = (unsigned long)&snbep_uncore_r3qpi,
+       },
+       { /* end: all zeroes */ }
+};
+
+static struct pci_driver snbep_uncore_pci_driver = {
+       .name           = "snbep_uncore",
+       .id_table       = snbep_uncore_pci_ids,
+};
+
+/*
+ * build pci bus to socket mapping
+ */
+static void snbep_pci2phy_map_init(void)
+{
+       struct pci_dev *ubox_dev = NULL;
+       int i, bus, nodeid;
+       u32 config;
+
+       while (1) {
+               /* find the UBOX device */
+               ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
+                                       PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX,
+                                       ubox_dev);
+               if (!ubox_dev)
+                       break;
+               bus = ubox_dev->bus->number;
+               /* get the Node ID of the local register */
+               pci_read_config_dword(ubox_dev, 0x40, &config);
+               nodeid = config;
+               /* get the Node ID mapping */
+               pci_read_config_dword(ubox_dev, 0x54, &config);
+               /*
+                * every three bits in the Node ID mapping register maps
+                * to a particular node.
+                */
+               for (i = 0; i < 8; i++) {
+                       if (nodeid == ((config >> (3 * i)) & 0x7)) {
+                               pcibus_to_physid[bus] = i;
+                               break;
+                       }
+               }
+       };
+       return;
+}
+/* end of Sandy Bridge-EP uncore support */
+
+
+/* Sandy Bridge uncore support */
+static void snb_uncore_msr_enable_event(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (hwc->idx < UNCORE_PMC_IDX_FIXED)
+               wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
+       else
+               wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
+}
+
+static void snb_uncore_msr_disable_event(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       wrmsrl(event->hw.config_base, 0);
+}
+
+static u64 snb_uncore_msr_read_counter(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       u64 count;
+       rdmsrl(event->hw.event_base, count);
+       return count;
+}
+
+static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
+{
+       if (box->pmu->pmu_idx == 0) {
+               wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
+                       SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
+       }
+}
+
+static struct attribute *snb_uncore_formats_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_umask.attr,
+       &format_attr_edge.attr,
+       &format_attr_inv.attr,
+       &format_attr_cmask5.attr,
+       NULL,
+};
+
+static struct attribute_group snb_uncore_format_group = {
+       .name = "format",
+       .attrs = snb_uncore_formats_attr,
+};
+
+static struct intel_uncore_ops snb_uncore_msr_ops = {
+       .init_box       = snb_uncore_msr_init_box,
+       .disable_event  = snb_uncore_msr_disable_event,
+       .enable_event   = snb_uncore_msr_enable_event,
+       .read_counter   = snb_uncore_msr_read_counter,
+};
+
+static struct event_constraint snb_uncore_cbox_constraints[] = {
+       UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
+       UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
+       EVENT_CONSTRAINT_END
+};
+
+static struct intel_uncore_type snb_uncore_cbox = {
+       .name           = "cbox",
+       .num_counters   = 2,
+       .num_boxes      = 4,
+       .perf_ctr_bits  = 44,
+       .fixed_ctr_bits = 48,
+       .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
+       .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
+       .fixed_ctr      = SNB_UNC_FIXED_CTR,
+       .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
+       .single_fixed   = 1,
+       .event_mask     = SNB_UNC_RAW_EVENT_MASK,
+       .msr_offset     = SNB_UNC_CBO_MSR_OFFSET,
+       .constraints    = snb_uncore_cbox_constraints,
+       .ops            = &snb_uncore_msr_ops,
+       .format_group   = &snb_uncore_format_group,
+};
+
+static struct intel_uncore_type *snb_msr_uncores[] = {
+       &snb_uncore_cbox,
+       NULL,
+};
+/* end of Sandy Bridge uncore support */
+
+/* Nehalem uncore support */
+static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
+{
+       wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
+}
+
+static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
+{
+       wrmsrl(NHM_UNC_PERF_GLOBAL_CTL,
+               NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
+}
+
+static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (hwc->idx < UNCORE_PMC_IDX_FIXED)
+               wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
+       else
+               wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
+}
+
+static struct attribute *nhm_uncore_formats_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_umask.attr,
+       &format_attr_edge.attr,
+       &format_attr_inv.attr,
+       &format_attr_cmask8.attr,
+       NULL,
+};
+
+static struct attribute_group nhm_uncore_format_group = {
+       .name = "format",
+       .attrs = nhm_uncore_formats_attr,
+};
+
+static struct uncore_event_desc nhm_uncore_events[] = {
+       INTEL_UNCORE_EVENT_DESC(clockticks,                "event=0xff,umask=0x00"),
+       INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any,       "event=0x2f,umask=0x0f"),
+       INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any,      "event=0x2c,umask=0x0f"),
+       INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads,     "event=0x20,umask=0x01"),
+       INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes,    "event=0x20,umask=0x02"),
+       INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads,  "event=0x20,umask=0x04"),
+       INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
+       INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads,   "event=0x20,umask=0x10"),
+       INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes,  "event=0x20,umask=0x20"),
+       { /* end: all zeroes */ },
+};
+
+static struct intel_uncore_ops nhm_uncore_msr_ops = {
+       .disable_box    = nhm_uncore_msr_disable_box,
+       .enable_box     = nhm_uncore_msr_enable_box,
+       .disable_event  = snb_uncore_msr_disable_event,
+       .enable_event   = nhm_uncore_msr_enable_event,
+       .read_counter   = snb_uncore_msr_read_counter,
+};
+
+static struct intel_uncore_type nhm_uncore = {
+       .name           = "",
+       .num_counters   = 8,
+       .num_boxes      = 1,
+       .perf_ctr_bits  = 48,
+       .fixed_ctr_bits = 48,
+       .event_ctl      = NHM_UNC_PERFEVTSEL0,
+       .perf_ctr       = NHM_UNC_UNCORE_PMC0,
+       .fixed_ctr      = NHM_UNC_FIXED_CTR,
+       .fixed_ctl      = NHM_UNC_FIXED_CTR_CTRL,
+       .event_mask     = NHM_UNC_RAW_EVENT_MASK,
+       .event_descs    = nhm_uncore_events,
+       .ops            = &nhm_uncore_msr_ops,
+       .format_group   = &nhm_uncore_format_group,
+};
+
+static struct intel_uncore_type *nhm_msr_uncores[] = {
+       &nhm_uncore,
+       NULL,
+};
+/* end of Nehalem uncore support */
+
+static void uncore_assign_hw_event(struct intel_uncore_box *box,
+                               struct perf_event *event, int idx)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       hwc->idx = idx;
+       hwc->last_tag = ++box->tags[idx];
+
+       if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
+               hwc->event_base = uncore_fixed_ctr(box);
+               hwc->config_base = uncore_fixed_ctl(box);
+               return;
+       }
+
+       hwc->config_base = uncore_event_ctl(box, hwc->idx);
+       hwc->event_base  = uncore_perf_ctr(box, hwc->idx);
+}
+
+static void uncore_perf_event_update(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       u64 prev_count, new_count, delta;
+       int shift;
+
+       if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
+               shift = 64 - uncore_fixed_ctr_bits(box);
+       else
+               shift = 64 - uncore_perf_ctr_bits(box);
+
+       /* the hrtimer might modify the previous event value */
+again:
+       prev_count = local64_read(&event->hw.prev_count);
+       new_count = uncore_read_counter(box, event);
+       if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
+               goto again;
+
+       delta = (new_count << shift) - (prev_count << shift);
+       delta >>= shift;
+
+       local64_add(delta, &event->count);
+}
+
+/*
+ * The overflow interrupt is unavailable for SandyBridge-EP, is broken
+ * for SandyBridge. So we use hrtimer to periodically poll the counter
+ * to avoid overflow.
+ */
+static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
+{
+       struct intel_uncore_box *box;
+       unsigned long flags;
+       int bit;
+
+       box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
+       if (!box->n_active || box->cpu != smp_processor_id())
+               return HRTIMER_NORESTART;
+       /*
+        * disable local interrupt to prevent uncore_pmu_event_start/stop
+        * to interrupt the update process
+        */
+       local_irq_save(flags);
+
+       for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
+               uncore_perf_event_update(box, box->events[bit]);
+
+       local_irq_restore(flags);
+
+       hrtimer_forward_now(hrtimer, ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL));
+       return HRTIMER_RESTART;
+}
+
+static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
+{
+       __hrtimer_start_range_ns(&box->hrtimer,
+                       ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL), 0,
+                       HRTIMER_MODE_REL_PINNED, 0);
+}
+
+static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
+{
+       hrtimer_cancel(&box->hrtimer);
+}
+
+static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
+{
+       hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       box->hrtimer.function = uncore_pmu_hrtimer;
+}
+
+struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
+                                         int cpu)
+{
+       struct intel_uncore_box *box;
+       int i, size;
+
+       size = sizeof(*box) + type->num_shared_regs *
+               sizeof(struct intel_uncore_extra_reg);
+
+       box = kmalloc_node(size, GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu));
+       if (!box)
+               return NULL;
+
+       for (i = 0; i < type->num_shared_regs; i++)
+               raw_spin_lock_init(&box->shared_regs[i].lock);
+
+       uncore_pmu_init_hrtimer(box);
+       atomic_set(&box->refcnt, 1);
+       box->cpu = -1;
+       box->phys_id = -1;
+
+       return box;
+}
+
+static struct intel_uncore_box *
+uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
+{
+       static struct intel_uncore_box *box;
+
+       box = *per_cpu_ptr(pmu->box, cpu);
+       if (box)
+               return box;
+
+       raw_spin_lock(&uncore_box_lock);
+       list_for_each_entry(box, &pmu->box_list, list) {
+               if (box->phys_id == topology_physical_package_id(cpu)) {
+                       atomic_inc(&box->refcnt);
+                       *per_cpu_ptr(pmu->box, cpu) = box;
+                       break;
+               }
+       }
+       raw_spin_unlock(&uncore_box_lock);
+
+       return *per_cpu_ptr(pmu->box, cpu);
+}
+
+static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
+{
+       return container_of(event->pmu, struct intel_uncore_pmu, pmu);
+}
+
+static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
+{
+       /*
+        * perf core schedules event on the basis of cpu, uncore events are
+        * collected by one of the cpus inside a physical package.
+        */
+       return uncore_pmu_to_box(uncore_event_to_pmu(event),
+                                smp_processor_id());
+}
+
+static int uncore_collect_events(struct intel_uncore_box *box,
+                               struct perf_event *leader, bool dogrp)
+{
+       struct perf_event *event;
+       int n, max_count;
+
+       max_count = box->pmu->type->num_counters;
+       if (box->pmu->type->fixed_ctl)
+               max_count++;
+
+       if (box->n_events >= max_count)
+               return -EINVAL;
+
+       n = box->n_events;
+       box->event_list[n] = leader;
+       n++;
+       if (!dogrp)
+               return n;
+
+       list_for_each_entry(event, &leader->sibling_list, group_entry) {
+               if (event->state <= PERF_EVENT_STATE_OFF)
+                       continue;
+
+               if (n >= max_count)
+                       return -EINVAL;
+
+               box->event_list[n] = event;
+               n++;
+       }
+       return n;
+}
+
+static struct event_constraint *
+uncore_get_event_constraint(struct intel_uncore_box *box,
+                           struct perf_event *event)
+{
+       struct intel_uncore_type *type = box->pmu->type;
+       struct event_constraint *c;
+
+       if (type->ops->get_constraint) {
+               c = type->ops->get_constraint(box, event);
+               if (c)
+                       return c;
+       }
+
+       if (event->hw.config == ~0ULL)
+               return &constraint_fixed;
+
+       if (type->constraints) {
+               for_each_event_constraint(c, type->constraints) {
+                       if ((event->hw.config & c->cmask) == c->code)
+                               return c;
+               }
+       }
+
+       return &type->unconstrainted;
+}
+
+static void uncore_put_event_constraint(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       if (box->pmu->type->ops->put_constraint)
+               box->pmu->type->ops->put_constraint(box, event);
+}
+
+static int uncore_assign_events(struct intel_uncore_box *box,
+                               int assign[], int n)
+{
+       unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
+       struct event_constraint *c, *constraints[UNCORE_PMC_IDX_MAX];
+       int i, wmin, wmax, ret = 0;
+       struct hw_perf_event *hwc;
+
+       bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
+
+       for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
+               c = uncore_get_event_constraint(box, box->event_list[i]);
+               constraints[i] = c;
+               wmin = min(wmin, c->weight);
+               wmax = max(wmax, c->weight);
+       }
+
+       /* fastpath, try to reuse previous register */
+       for (i = 0; i < n; i++) {
+               hwc = &box->event_list[i]->hw;
+               c = constraints[i];
+
+               /* never assigned */
+               if (hwc->idx == -1)
+                       break;
+
+               /* constraint still honored */
+               if (!test_bit(hwc->idx, c->idxmsk))
+                       break;
+
+               /* not already used */
+               if (test_bit(hwc->idx, used_mask))
+                       break;
+
+               __set_bit(hwc->idx, used_mask);
+               if (assign)
+                       assign[i] = hwc->idx;
+       }
+       /* slow path */
+       if (i != n)
+               ret = perf_assign_events(constraints, n, wmin, wmax, assign);
+
+       if (!assign || ret) {
+               for (i = 0; i < n; i++)
+                       uncore_put_event_constraint(box, box->event_list[i]);
+       }
+       return ret ? -EINVAL : 0;
+}
+
+static void uncore_pmu_event_start(struct perf_event *event, int flags)
+{
+       struct intel_uncore_box *box = uncore_event_to_box(event);
+       int idx = event->hw.idx;
+
+       if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
+               return;
+
+       if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
+               return;
+
+       event->hw.state = 0;
+       box->events[idx] = event;
+       box->n_active++;
+       __set_bit(idx, box->active_mask);
+
+       local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
+       uncore_enable_event(box, event);
+
+       if (box->n_active == 1) {
+               uncore_enable_box(box);
+               uncore_pmu_start_hrtimer(box);
+       }
+}
+
+static void uncore_pmu_event_stop(struct perf_event *event, int flags)
+{
+       struct intel_uncore_box *box = uncore_event_to_box(event);
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
+               uncore_disable_event(box, event);
+               box->n_active--;
+               box->events[hwc->idx] = NULL;
+               WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+               hwc->state |= PERF_HES_STOPPED;
+
+               if (box->n_active == 0) {
+                       uncore_disable_box(box);
+                       uncore_pmu_cancel_hrtimer(box);
+               }
+       }
+
+       if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
+               /*
+                * Drain the remaining delta count out of a event
+                * that we are disabling:
+                */
+               uncore_perf_event_update(box, event);
+               hwc->state |= PERF_HES_UPTODATE;
+       }
+}
+
+static int uncore_pmu_event_add(struct perf_event *event, int flags)
+{
+       struct intel_uncore_box *box = uncore_event_to_box(event);
+       struct hw_perf_event *hwc = &event->hw;
+       int assign[UNCORE_PMC_IDX_MAX];
+       int i, n, ret;
+
+       if (!box)
+               return -ENODEV;
+
+       ret = n = uncore_collect_events(box, event, false);
+       if (ret < 0)
+               return ret;
+
+       hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+       if (!(flags & PERF_EF_START))
+               hwc->state |= PERF_HES_ARCH;
+
+       ret = uncore_assign_events(box, assign, n);
+       if (ret)
+               return ret;
+
+       /* save events moving to new counters */
+       for (i = 0; i < box->n_events; i++) {
+               event = box->event_list[i];
+               hwc = &event->hw;
+
+               if (hwc->idx == assign[i] &&
+                       hwc->last_tag == box->tags[assign[i]])
+                       continue;
+               /*
+                * Ensure we don't accidentally enable a stopped
+                * counter simply because we rescheduled.
+                */
+               if (hwc->state & PERF_HES_STOPPED)
+                       hwc->state |= PERF_HES_ARCH;
+
+               uncore_pmu_event_stop(event, PERF_EF_UPDATE);
+       }
+
+       /* reprogram moved events into new counters */
+       for (i = 0; i < n; i++) {
+               event = box->event_list[i];
+               hwc = &event->hw;
+
+               if (hwc->idx != assign[i] ||
+                       hwc->last_tag != box->tags[assign[i]])
+                       uncore_assign_hw_event(box, event, assign[i]);
+               else if (i < box->n_events)
+                       continue;
+
+               if (hwc->state & PERF_HES_ARCH)
+                       continue;
+
+               uncore_pmu_event_start(event, 0);
+       }
+       box->n_events = n;
+
+       return 0;
+}
+
+static void uncore_pmu_event_del(struct perf_event *event, int flags)
+{
+       struct intel_uncore_box *box = uncore_event_to_box(event);
+       int i;
+
+       uncore_pmu_event_stop(event, PERF_EF_UPDATE);
+
+       for (i = 0; i < box->n_events; i++) {
+               if (event == box->event_list[i]) {
+                       uncore_put_event_constraint(box, event);
+
+                       while (++i < box->n_events)
+                               box->event_list[i - 1] = box->event_list[i];
+
+                       --box->n_events;
+                       break;
+               }
+       }
+
+       event->hw.idx = -1;
+       event->hw.last_tag = ~0ULL;
+}
+
+static void uncore_pmu_event_read(struct perf_event *event)
+{
+       struct intel_uncore_box *box = uncore_event_to_box(event);
+       uncore_perf_event_update(box, event);
+}
+
+/*
+ * validation ensures the group can be loaded onto the
+ * PMU if it was the only group available.
+ */
+static int uncore_validate_group(struct intel_uncore_pmu *pmu,
+                               struct perf_event *event)
+{
+       struct perf_event *leader = event->group_leader;
+       struct intel_uncore_box *fake_box;
+       int ret = -EINVAL, n;
+
+       fake_box = uncore_alloc_box(pmu->type, smp_processor_id());
+       if (!fake_box)
+               return -ENOMEM;
+
+       fake_box->pmu = pmu;
+       /*
+        * the event is not yet connected with its
+        * siblings therefore we must first collect
+        * existing siblings, then add the new event
+        * before we can simulate the scheduling
+        */
+       n = uncore_collect_events(fake_box, leader, true);
+       if (n < 0)
+               goto out;
+
+       fake_box->n_events = n;
+       n = uncore_collect_events(fake_box, event, false);
+       if (n < 0)
+               goto out;
+
+       fake_box->n_events = n;
+
+       ret = uncore_assign_events(fake_box, NULL, n);
+out:
+       kfree(fake_box);
+       return ret;
+}
+
+int uncore_pmu_event_init(struct perf_event *event)
+{
+       struct intel_uncore_pmu *pmu;
+       struct intel_uncore_box *box;
+       struct hw_perf_event *hwc = &event->hw;
+       int ret;
+
+       if (event->attr.type != event->pmu->type)
+               return -ENOENT;
+
+       pmu = uncore_event_to_pmu(event);
+       /* no device found for this pmu */
+       if (pmu->func_id < 0)
+               return -ENOENT;
+
+       /*
+        * Uncore PMU does measure at all privilege level all the time.
+        * So it doesn't make sense to specify any exclude bits.
+        */
+       if (event->attr.exclude_user || event->attr.exclude_kernel ||
+                       event->attr.exclude_hv || event->attr.exclude_idle)
+               return -EINVAL;
+
+       /* Sampling not supported yet */
+       if (hwc->sample_period)
+               return -EINVAL;
+
+       /*
+        * Place all uncore events for a particular physical package
+        * onto a single cpu
+        */
+       if (event->cpu < 0)
+               return -EINVAL;
+       box = uncore_pmu_to_box(pmu, event->cpu);
+       if (!box || box->cpu < 0)
+               return -EINVAL;
+       event->cpu = box->cpu;
+
+       event->hw.idx = -1;
+       event->hw.last_tag = ~0ULL;
+       event->hw.extra_reg.idx = EXTRA_REG_NONE;
+
+       if (event->attr.config == UNCORE_FIXED_EVENT) {
+               /* no fixed counter */
+               if (!pmu->type->fixed_ctl)
+                       return -EINVAL;
+               /*
+                * if there is only one fixed counter, only the first pmu
+                * can access the fixed counter
+                */
+               if (pmu->type->single_fixed && pmu->pmu_idx > 0)
+                       return -EINVAL;
+               hwc->config = ~0ULL;
+       } else {
+               hwc->config = event->attr.config & pmu->type->event_mask;
+               if (pmu->type->ops->hw_config) {
+                       ret = pmu->type->ops->hw_config(box, event);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       if (event->group_leader != event)
+               ret = uncore_validate_group(pmu, event);
+       else
+               ret = 0;
+
+       return ret;
+}
+
+static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
+{
+       int ret;
+
+       pmu->pmu = (struct pmu) {
+               .attr_groups    = pmu->type->attr_groups,
+               .task_ctx_nr    = perf_invalid_context,
+               .event_init     = uncore_pmu_event_init,
+               .add            = uncore_pmu_event_add,
+               .del            = uncore_pmu_event_del,
+               .start          = uncore_pmu_event_start,
+               .stop           = uncore_pmu_event_stop,
+               .read           = uncore_pmu_event_read,
+       };
+
+       if (pmu->type->num_boxes == 1) {
+               if (strlen(pmu->type->name) > 0)
+                       sprintf(pmu->name, "uncore_%s", pmu->type->name);
+               else
+                       sprintf(pmu->name, "uncore");
+       } else {
+               sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
+                       pmu->pmu_idx);
+       }
+
+       ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
+       return ret;
+}
+
+static void __init uncore_type_exit(struct intel_uncore_type *type)
+{
+       int i;
+
+       for (i = 0; i < type->num_boxes; i++)
+               free_percpu(type->pmus[i].box);
+       kfree(type->pmus);
+       type->pmus = NULL;
+       kfree(type->attr_groups[1]);
+       type->attr_groups[1] = NULL;
+}
+
+static void uncore_types_exit(struct intel_uncore_type **types)
+{
+       int i;
+       for (i = 0; types[i]; i++)
+               uncore_type_exit(types[i]);
+}
+
+static int __init uncore_type_init(struct intel_uncore_type *type)
+{
+       struct intel_uncore_pmu *pmus;
+       struct attribute_group *events_group;
+       struct attribute **attrs;
+       int i, j;
+
+       pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
+       if (!pmus)
+               return -ENOMEM;
+
+       type->unconstrainted = (struct event_constraint)
+               __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
+                               0, type->num_counters, 0);
+
+       for (i = 0; i < type->num_boxes; i++) {
+               pmus[i].func_id = -1;
+               pmus[i].pmu_idx = i;
+               pmus[i].type = type;
+               INIT_LIST_HEAD(&pmus[i].box_list);
+               pmus[i].box = alloc_percpu(struct intel_uncore_box *);
+               if (!pmus[i].box)
+                       goto fail;
+       }
+
+       if (type->event_descs) {
+               i = 0;
+               while (type->event_descs[i].attr.attr.name)
+                       i++;
+
+               events_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
+                                       sizeof(*events_group), GFP_KERNEL);
+               if (!events_group)
+                       goto fail;
+
+               attrs = (struct attribute **)(events_group + 1);
+               events_group->name = "events";
+               events_group->attrs = attrs;
+
+               for (j = 0; j < i; j++)
+                       attrs[j] = &type->event_descs[j].attr.attr;
+
+               type->attr_groups[1] = events_group;
+       }
+
+       type->pmus = pmus;
+       return 0;
+fail:
+       uncore_type_exit(type);
+       return -ENOMEM;
+}
+
+static int __init uncore_types_init(struct intel_uncore_type **types)
+{
+       int i, ret;
+
+       for (i = 0; types[i]; i++) {
+               ret = uncore_type_init(types[i]);
+               if (ret)
+                       goto fail;
+       }
+       return 0;
+fail:
+       while (--i >= 0)
+               uncore_type_exit(types[i]);
+       return ret;
+}
+
+static struct pci_driver *uncore_pci_driver;
+static bool pcidrv_registered;
+
+/*
+ * add a pci uncore device
+ */
+static int __devinit uncore_pci_add(struct intel_uncore_type *type,
+                                   struct pci_dev *pdev)
+{
+       struct intel_uncore_pmu *pmu;
+       struct intel_uncore_box *box;
+       int i, phys_id;
+
+       phys_id = pcibus_to_physid[pdev->bus->number];
+       if (phys_id < 0)
+               return -ENODEV;
+
+       box = uncore_alloc_box(type, 0);
+       if (!box)
+               return -ENOMEM;
+
+       /*
+        * for performance monitoring unit with multiple boxes,
+        * each box has a different function id.
+        */
+       for (i = 0; i < type->num_boxes; i++) {
+               pmu = &type->pmus[i];
+               if (pmu->func_id == pdev->devfn)
+                       break;
+               if (pmu->func_id < 0) {
+                       pmu->func_id = pdev->devfn;
+                       break;
+               }
+               pmu = NULL;
+       }
+
+       if (!pmu) {
+               kfree(box);
+               return -EINVAL;
+       }
+
+       box->phys_id = phys_id;
+       box->pci_dev = pdev;
+       box->pmu = pmu;
+       uncore_box_init(box);
+       pci_set_drvdata(pdev, box);
+
+       raw_spin_lock(&uncore_box_lock);
+       list_add_tail(&box->list, &pmu->box_list);
+       raw_spin_unlock(&uncore_box_lock);
+
+       return 0;
+}
+
+static void uncore_pci_remove(struct pci_dev *pdev)
+{
+       struct intel_uncore_box *box = pci_get_drvdata(pdev);
+       struct intel_uncore_pmu *pmu = box->pmu;
+       int cpu, phys_id = pcibus_to_physid[pdev->bus->number];
+
+       if (WARN_ON_ONCE(phys_id != box->phys_id))
+               return;
+
+       raw_spin_lock(&uncore_box_lock);
+       list_del(&box->list);
+       raw_spin_unlock(&uncore_box_lock);
+
+       for_each_possible_cpu(cpu) {
+               if (*per_cpu_ptr(pmu->box, cpu) == box) {
+                       *per_cpu_ptr(pmu->box, cpu) = NULL;
+                       atomic_dec(&box->refcnt);
+               }
+       }
+
+       WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
+       kfree(box);
+}
+
+static int __devinit uncore_pci_probe(struct pci_dev *pdev,
+                               const struct pci_device_id *id)
+{
+       struct intel_uncore_type *type;
+
+       type = (struct intel_uncore_type *)id->driver_data;
+       return uncore_pci_add(type, pdev);
+}
+
+static int __init uncore_pci_init(void)
+{
+       int ret;
+
+       switch (boot_cpu_data.x86_model) {
+       case 45: /* Sandy Bridge-EP */
+               pci_uncores = snbep_pci_uncores;
+               uncore_pci_driver = &snbep_uncore_pci_driver;
+               snbep_pci2phy_map_init();
+               break;
+       default:
+               return 0;
+       }
+
+       ret = uncore_types_init(pci_uncores);
+       if (ret)
+               return ret;
+
+       uncore_pci_driver->probe = uncore_pci_probe;
+       uncore_pci_driver->remove = uncore_pci_remove;
+
+       ret = pci_register_driver(uncore_pci_driver);
+       if (ret == 0)
+               pcidrv_registered = true;
+       else
+               uncore_types_exit(pci_uncores);
+
+       return ret;
+}
+
+static void __init uncore_pci_exit(void)
+{
+       if (pcidrv_registered) {
+               pcidrv_registered = false;
+               pci_unregister_driver(uncore_pci_driver);
+               uncore_types_exit(pci_uncores);
+       }
+}
+
+static void __cpuinit uncore_cpu_dying(int cpu)
+{
+       struct intel_uncore_type *type;
+       struct intel_uncore_pmu *pmu;
+       struct intel_uncore_box *box;
+       int i, j;
+
+       for (i = 0; msr_uncores[i]; i++) {
+               type = msr_uncores[i];
+               for (j = 0; j < type->num_boxes; j++) {
+                       pmu = &type->pmus[j];
+                       box = *per_cpu_ptr(pmu->box, cpu);
+                       *per_cpu_ptr(pmu->box, cpu) = NULL;
+                       if (box && atomic_dec_and_test(&box->refcnt))
+                               kfree(box);
+               }
+       }
+}
+
+static int __cpuinit uncore_cpu_starting(int cpu)
+{
+       struct intel_uncore_type *type;
+       struct intel_uncore_pmu *pmu;
+       struct intel_uncore_box *box, *exist;
+       int i, j, k, phys_id;
+
+       phys_id = topology_physical_package_id(cpu);
+
+       for (i = 0; msr_uncores[i]; i++) {
+               type = msr_uncores[i];
+               for (j = 0; j < type->num_boxes; j++) {
+                       pmu = &type->pmus[j];
+                       box = *per_cpu_ptr(pmu->box, cpu);
+                       /* called by uncore_cpu_init? */
+                       if (box && box->phys_id >= 0) {
+                               uncore_box_init(box);
+                               continue;
+                       }
+
+                       for_each_online_cpu(k) {
+                               exist = *per_cpu_ptr(pmu->box, k);
+                               if (exist && exist->phys_id == phys_id) {
+                                       atomic_inc(&exist->refcnt);
+                                       *per_cpu_ptr(pmu->box, cpu) = exist;
+                                       kfree(box);
+                                       box = NULL;
+                                       break;
+                               }
+                       }
+
+                       if (box) {
+                               box->phys_id = phys_id;
+                               uncore_box_init(box);
+                       }
+               }
+       }
+       return 0;
+}
+
+static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id)
+{
+       struct intel_uncore_type *type;
+       struct intel_uncore_pmu *pmu;
+       struct intel_uncore_box *box;
+       int i, j;
+
+       for (i = 0; msr_uncores[i]; i++) {
+               type = msr_uncores[i];
+               for (j = 0; j < type->num_boxes; j++) {
+                       pmu = &type->pmus[j];
+                       if (pmu->func_id < 0)
+                               pmu->func_id = j;
+
+                       box = uncore_alloc_box(type, cpu);
+                       if (!box)
+                               return -ENOMEM;
+
+                       box->pmu = pmu;
+                       box->phys_id = phys_id;
+                       *per_cpu_ptr(pmu->box, cpu) = box;
+               }
+       }
+       return 0;
+}
+
+static void __cpuinit uncore_change_context(struct intel_uncore_type **uncores,
+                                           int old_cpu, int new_cpu)
+{
+       struct intel_uncore_type *type;
+       struct intel_uncore_pmu *pmu;
+       struct intel_uncore_box *box;
+       int i, j;
+
+       for (i = 0; uncores[i]; i++) {
+               type = uncores[i];
+               for (j = 0; j < type->num_boxes; j++) {
+                       pmu = &type->pmus[j];
+                       if (old_cpu < 0)
+                               box = uncore_pmu_to_box(pmu, new_cpu);
+                       else
+                               box = uncore_pmu_to_box(pmu, old_cpu);
+                       if (!box)
+                               continue;
+
+                       if (old_cpu < 0) {
+                               WARN_ON_ONCE(box->cpu != -1);
+                               box->cpu = new_cpu;
+                               continue;
+                       }
+
+                       WARN_ON_ONCE(box->cpu != old_cpu);
+                       if (new_cpu >= 0) {
+                               uncore_pmu_cancel_hrtimer(box);
+                               perf_pmu_migrate_context(&pmu->pmu,
+                                               old_cpu, new_cpu);
+                               box->cpu = new_cpu;
+                       } else {
+                               box->cpu = -1;
+                       }
+               }
+       }
+}
+
+static void __cpuinit uncore_event_exit_cpu(int cpu)
+{
+       int i, phys_id, target;
+
+       /* if exiting cpu is used for collecting uncore events */
+       if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
+               return;
+
+       /* find a new cpu to collect uncore events */
+       phys_id = topology_physical_package_id(cpu);
+       target = -1;
+       for_each_online_cpu(i) {
+               if (i == cpu)
+                       continue;
+               if (phys_id == topology_physical_package_id(i)) {
+                       target = i;
+                       break;
+               }
+       }
+
+       /* migrate uncore events to the new cpu */
+       if (target >= 0)
+               cpumask_set_cpu(target, &uncore_cpu_mask);
+
+       uncore_change_context(msr_uncores, cpu, target);
+       uncore_change_context(pci_uncores, cpu, target);
+}
+
+static void __cpuinit uncore_event_init_cpu(int cpu)
+{
+       int i, phys_id;
+
+       phys_id = topology_physical_package_id(cpu);
+       for_each_cpu(i, &uncore_cpu_mask) {
+               if (phys_id == topology_physical_package_id(i))
+                       return;
+       }
+
+       cpumask_set_cpu(cpu, &uncore_cpu_mask);
+
+       uncore_change_context(msr_uncores, -1, cpu);
+       uncore_change_context(pci_uncores, -1, cpu);
+}
+
+static int __cpuinit uncore_cpu_notifier(struct notifier_block *self,
+                                        unsigned long action, void *hcpu)
+{
+       unsigned int cpu = (long)hcpu;
+
+       /* allocate/free data structure for uncore box */
+       switch (action & ~CPU_TASKS_FROZEN) {
+       case CPU_UP_PREPARE:
+               uncore_cpu_prepare(cpu, -1);
+               break;
+       case CPU_STARTING:
+               uncore_cpu_starting(cpu);
+               break;
+       case CPU_UP_CANCELED:
+       case CPU_DYING:
+               uncore_cpu_dying(cpu);
+               break;
+       default:
+               break;
+       }
+
+       /* select the cpu that collects uncore events */
+       switch (action & ~CPU_TASKS_FROZEN) {
+       case CPU_DOWN_FAILED:
+       case CPU_STARTING:
+               uncore_event_init_cpu(cpu);
+               break;
+       case CPU_DOWN_PREPARE:
+               uncore_event_exit_cpu(cpu);
+               break;
+       default:
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block uncore_cpu_nb __cpuinitdata = {
+       .notifier_call = uncore_cpu_notifier,
+       /*
+        * to migrate uncore events, our notifier should be executed
+        * before perf core's notifier.
+        */
+       .priority = CPU_PRI_PERF + 1,
+};
+
+static void __init uncore_cpu_setup(void *dummy)
+{
+       uncore_cpu_starting(smp_processor_id());
+}
+
+static int __init uncore_cpu_init(void)
+{
+       int ret, cpu, max_cores;
+
+       max_cores = boot_cpu_data.x86_max_cores;
+       switch (boot_cpu_data.x86_model) {
+       case 26: /* Nehalem */
+       case 30:
+       case 37: /* Westmere */
+       case 44:
+               msr_uncores = nhm_msr_uncores;
+               break;
+       case 42: /* Sandy Bridge */
+               if (snb_uncore_cbox.num_boxes > max_cores)
+                       snb_uncore_cbox.num_boxes = max_cores;
+               msr_uncores = snb_msr_uncores;
+               break;
+       case 45: /* Sandy Birdge-EP */
+               if (snbep_uncore_cbox.num_boxes > max_cores)
+                       snbep_uncore_cbox.num_boxes = max_cores;
+               msr_uncores = snbep_msr_uncores;
+               break;
+       default:
+               return 0;
+       }
+
+       ret = uncore_types_init(msr_uncores);
+       if (ret)
+               return ret;
+
+       get_online_cpus();
+
+       for_each_online_cpu(cpu) {
+               int i, phys_id = topology_physical_package_id(cpu);
+
+               for_each_cpu(i, &uncore_cpu_mask) {
+                       if (phys_id == topology_physical_package_id(i)) {
+                               phys_id = -1;
+                               break;
+                       }
+               }
+               if (phys_id < 0)
+                       continue;
+
+               uncore_cpu_prepare(cpu, phys_id);
+               uncore_event_init_cpu(cpu);
+       }
+       on_each_cpu(uncore_cpu_setup, NULL, 1);
+
+       register_cpu_notifier(&uncore_cpu_nb);
+
+       put_online_cpus();
+
+       return 0;
+}
+
+static int __init uncore_pmus_register(void)
+{
+       struct intel_uncore_pmu *pmu;
+       struct intel_uncore_type *type;
+       int i, j;
+
+       for (i = 0; msr_uncores[i]; i++) {
+               type = msr_uncores[i];
+               for (j = 0; j < type->num_boxes; j++) {
+                       pmu = &type->pmus[j];
+                       uncore_pmu_register(pmu);
+               }
+       }
+
+       for (i = 0; pci_uncores[i]; i++) {
+               type = pci_uncores[i];
+               for (j = 0; j < type->num_boxes; j++) {
+                       pmu = &type->pmus[j];
+                       uncore_pmu_register(pmu);
+               }
+       }
+
+       return 0;
+}
+
+static int __init intel_uncore_init(void)
+{
+       int ret;
+
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+               return -ENODEV;
+
+       ret = uncore_pci_init();
+       if (ret)
+               goto fail;
+       ret = uncore_cpu_init();
+       if (ret) {
+               uncore_pci_exit();
+               goto fail;
+       }
+
+       uncore_pmus_register();
+       return 0;
+fail:
+       return ret;
+}
+device_initcall(intel_uncore_init);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
new file mode 100644 (file)
index 0000000..b13e9ea
--- /dev/null
@@ -0,0 +1,424 @@
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/perf_event.h>
+#include "perf_event.h"
+
+#define UNCORE_PMU_NAME_LEN            32
+#define UNCORE_BOX_HASH_SIZE           8
+
+#define UNCORE_PMU_HRTIMER_INTERVAL    (60 * NSEC_PER_SEC)
+
+#define UNCORE_FIXED_EVENT             0xff
+#define UNCORE_PMC_IDX_MAX_GENERIC     8
+#define UNCORE_PMC_IDX_FIXED           UNCORE_PMC_IDX_MAX_GENERIC
+#define UNCORE_PMC_IDX_MAX             (UNCORE_PMC_IDX_FIXED + 1)
+
+#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
+
+/* SNB event control */
+#define SNB_UNC_CTL_EV_SEL_MASK                        0x000000ff
+#define SNB_UNC_CTL_UMASK_MASK                 0x0000ff00
+#define SNB_UNC_CTL_EDGE_DET                   (1 << 18)
+#define SNB_UNC_CTL_EN                         (1 << 22)
+#define SNB_UNC_CTL_INVERT                     (1 << 23)
+#define SNB_UNC_CTL_CMASK_MASK                 0x1f000000
+#define NHM_UNC_CTL_CMASK_MASK                 0xff000000
+#define NHM_UNC_FIXED_CTR_CTL_EN               (1 << 0)
+
+#define SNB_UNC_RAW_EVENT_MASK                 (SNB_UNC_CTL_EV_SEL_MASK | \
+                                                SNB_UNC_CTL_UMASK_MASK | \
+                                                SNB_UNC_CTL_EDGE_DET | \
+                                                SNB_UNC_CTL_INVERT | \
+                                                SNB_UNC_CTL_CMASK_MASK)
+
+#define NHM_UNC_RAW_EVENT_MASK                 (SNB_UNC_CTL_EV_SEL_MASK | \
+                                                SNB_UNC_CTL_UMASK_MASK | \
+                                                SNB_UNC_CTL_EDGE_DET | \
+                                                SNB_UNC_CTL_INVERT | \
+                                                NHM_UNC_CTL_CMASK_MASK)
+
+/* SNB global control register */
+#define SNB_UNC_PERF_GLOBAL_CTL                 0x391
+#define SNB_UNC_FIXED_CTR_CTRL                  0x394
+#define SNB_UNC_FIXED_CTR                       0x395
+
+/* SNB uncore global control */
+#define SNB_UNC_GLOBAL_CTL_CORE_ALL             ((1 << 4) - 1)
+#define SNB_UNC_GLOBAL_CTL_EN                   (1 << 29)
+
+/* SNB Cbo register */
+#define SNB_UNC_CBO_0_PERFEVTSEL0               0x700
+#define SNB_UNC_CBO_0_PER_CTR0                  0x706
+#define SNB_UNC_CBO_MSR_OFFSET                  0x10
+
+/* NHM global control register */
+#define NHM_UNC_PERF_GLOBAL_CTL                 0x391
+#define NHM_UNC_FIXED_CTR                       0x394
+#define NHM_UNC_FIXED_CTR_CTRL                  0x395
+
+/* NHM uncore global control */
+#define NHM_UNC_GLOBAL_CTL_EN_PC_ALL            ((1ULL << 8) - 1)
+#define NHM_UNC_GLOBAL_CTL_EN_FC                (1ULL << 32)
+
+/* NHM uncore register */
+#define NHM_UNC_PERFEVTSEL0                     0x3c0
+#define NHM_UNC_UNCORE_PMC0                     0x3b0
+
+/* SNB-EP Box level control */
+#define SNBEP_PMON_BOX_CTL_RST_CTRL    (1 << 0)
+#define SNBEP_PMON_BOX_CTL_RST_CTRS    (1 << 1)
+#define SNBEP_PMON_BOX_CTL_FRZ         (1 << 8)
+#define SNBEP_PMON_BOX_CTL_FRZ_EN      (1 << 16)
+#define SNBEP_PMON_BOX_CTL_INT         (SNBEP_PMON_BOX_CTL_RST_CTRL | \
+                                        SNBEP_PMON_BOX_CTL_RST_CTRS | \
+                                        SNBEP_PMON_BOX_CTL_FRZ_EN)
+/* SNB-EP event control */
+#define SNBEP_PMON_CTL_EV_SEL_MASK     0x000000ff
+#define SNBEP_PMON_CTL_UMASK_MASK      0x0000ff00
+#define SNBEP_PMON_CTL_RST             (1 << 17)
+#define SNBEP_PMON_CTL_EDGE_DET                (1 << 18)
+#define SNBEP_PMON_CTL_EV_SEL_EXT      (1 << 21)       /* only for QPI */
+#define SNBEP_PMON_CTL_EN              (1 << 22)
+#define SNBEP_PMON_CTL_INVERT          (1 << 23)
+#define SNBEP_PMON_CTL_TRESH_MASK      0xff000000
+#define SNBEP_PMON_RAW_EVENT_MASK      (SNBEP_PMON_CTL_EV_SEL_MASK | \
+                                        SNBEP_PMON_CTL_UMASK_MASK | \
+                                        SNBEP_PMON_CTL_EDGE_DET | \
+                                        SNBEP_PMON_CTL_INVERT | \
+                                        SNBEP_PMON_CTL_TRESH_MASK)
+
+/* SNB-EP Ubox event control */
+#define SNBEP_U_MSR_PMON_CTL_TRESH_MASK                0x1f000000
+#define SNBEP_U_MSR_PMON_RAW_EVENT_MASK                \
+                               (SNBEP_PMON_CTL_EV_SEL_MASK | \
+                                SNBEP_PMON_CTL_UMASK_MASK | \
+                                SNBEP_PMON_CTL_EDGE_DET | \
+                                SNBEP_PMON_CTL_INVERT | \
+                                SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
+
+#define SNBEP_CBO_PMON_CTL_TID_EN              (1 << 19)
+#define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK      (SNBEP_PMON_RAW_EVENT_MASK | \
+                                                SNBEP_CBO_PMON_CTL_TID_EN)
+
+/* SNB-EP PCU event control */
+#define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK    0x0000c000
+#define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK      0x1f000000
+#define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT      (1 << 30)
+#define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET    (1 << 31)
+#define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK      \
+                               (SNBEP_PMON_CTL_EV_SEL_MASK | \
+                                SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
+                                SNBEP_PMON_CTL_EDGE_DET | \
+                                SNBEP_PMON_CTL_INVERT | \
+                                SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
+                                SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
+                                SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
+
+/* SNB-EP pci control register */
+#define SNBEP_PCI_PMON_BOX_CTL                 0xf4
+#define SNBEP_PCI_PMON_CTL0                    0xd8
+/* SNB-EP pci counter register */
+#define SNBEP_PCI_PMON_CTR0                    0xa0
+
+/* SNB-EP home agent register */
+#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0       0x40
+#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1       0x44
+#define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH      0x48
+/* SNB-EP memory controller register */
+#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL                0xf0
+#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR                0xd0
+/* SNB-EP QPI register */
+#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0         0x228
+#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1         0x22c
+#define SNBEP_Q_Py_PCI_PMON_PKT_MASK0          0x238
+#define SNBEP_Q_Py_PCI_PMON_PKT_MASK1          0x23c
+
+/* SNB-EP Ubox register */
+#define SNBEP_U_MSR_PMON_CTR0                  0xc16
+#define SNBEP_U_MSR_PMON_CTL0                  0xc10
+
+#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL                0xc08
+#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR                0xc09
+
+/* SNB-EP Cbo register */
+#define SNBEP_C0_MSR_PMON_CTR0                 0xd16
+#define SNBEP_C0_MSR_PMON_CTL0                 0xd10
+#define SNBEP_C0_MSR_PMON_BOX_CTL              0xd04
+#define SNBEP_C0_MSR_PMON_BOX_FILTER           0xd14
+#define SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK     0xfffffc1f
+#define SNBEP_CBO_MSR_OFFSET                   0x20
+
+/* SNB-EP PCU register */
+#define SNBEP_PCU_MSR_PMON_CTR0                        0xc36
+#define SNBEP_PCU_MSR_PMON_CTL0                        0xc30
+#define SNBEP_PCU_MSR_PMON_BOX_CTL             0xc24
+#define SNBEP_PCU_MSR_PMON_BOX_FILTER          0xc34
+#define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK     0xffffffff
+#define SNBEP_PCU_MSR_CORE_C3_CTR              0x3fc
+#define SNBEP_PCU_MSR_CORE_C6_CTR              0x3fd
+
+struct intel_uncore_ops;
+struct intel_uncore_pmu;
+struct intel_uncore_box;
+struct uncore_event_desc;
+
+struct intel_uncore_type {
+       const char *name;
+       int num_counters;
+       int num_boxes;
+       int perf_ctr_bits;
+       int fixed_ctr_bits;
+       unsigned perf_ctr;
+       unsigned event_ctl;
+       unsigned event_mask;
+       unsigned fixed_ctr;
+       unsigned fixed_ctl;
+       unsigned box_ctl;
+       unsigned msr_offset;
+       unsigned num_shared_regs:8;
+       unsigned single_fixed:1;
+       struct event_constraint unconstrainted;
+       struct event_constraint *constraints;
+       struct intel_uncore_pmu *pmus;
+       struct intel_uncore_ops *ops;
+       struct uncore_event_desc *event_descs;
+       const struct attribute_group *attr_groups[3];
+};
+
+#define format_group attr_groups[0]
+
+struct intel_uncore_ops {
+       void (*init_box)(struct intel_uncore_box *);
+       void (*disable_box)(struct intel_uncore_box *);
+       void (*enable_box)(struct intel_uncore_box *);
+       void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
+       void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
+       u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
+       int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
+       struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
+                                                  struct perf_event *);
+       void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
+};
+
+struct intel_uncore_pmu {
+       struct pmu pmu;
+       char name[UNCORE_PMU_NAME_LEN];
+       int pmu_idx;
+       int func_id;
+       struct intel_uncore_type *type;
+       struct intel_uncore_box ** __percpu box;
+       struct list_head box_list;
+};
+
+struct intel_uncore_extra_reg {
+       raw_spinlock_t lock;
+       u64 config1;
+       atomic_t ref;
+};
+
+struct intel_uncore_box {
+       int phys_id;
+       int n_active;   /* number of active events */
+       int n_events;
+       int cpu;        /* cpu to collect events */
+       unsigned long flags;
+       atomic_t refcnt;
+       struct perf_event *events[UNCORE_PMC_IDX_MAX];
+       struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
+       unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
+       u64 tags[UNCORE_PMC_IDX_MAX];
+       struct pci_dev *pci_dev;
+       struct intel_uncore_pmu *pmu;
+       struct hrtimer hrtimer;
+       struct list_head list;
+       struct intel_uncore_extra_reg shared_regs[0];
+};
+
+#define UNCORE_BOX_FLAG_INITIATED      0
+
+struct uncore_event_desc {
+       struct kobj_attribute attr;
+       const char *config;
+};
+
+#define INTEL_UNCORE_EVENT_DESC(_name, _config)                        \
+{                                                              \
+       .attr   = __ATTR(_name, 0444, uncore_event_show, NULL), \
+       .config = _config,                                      \
+}
+
+#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format)                        \
+static ssize_t __uncore_##_var##_show(struct kobject *kobj,            \
+                               struct kobj_attribute *attr,            \
+                               char *page)                             \
+{                                                                      \
+       BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                     \
+       return sprintf(page, _format "\n");                             \
+}                                                                      \
+static struct kobj_attribute format_attr_##_var =                      \
+       __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
+
+
+static ssize_t uncore_event_show(struct kobject *kobj,
+                               struct kobj_attribute *attr, char *buf)
+{
+       struct uncore_event_desc *event =
+               container_of(attr, struct uncore_event_desc, attr);
+       return sprintf(buf, "%s", event->config);
+}
+
+static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
+{
+       return box->pmu->type->box_ctl;
+}
+
+static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
+{
+       return box->pmu->type->fixed_ctl;
+}
+
+static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
+{
+       return box->pmu->type->fixed_ctr;
+}
+
+static inline
+unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
+{
+       return idx * 4 + box->pmu->type->event_ctl;
+}
+
+static inline
+unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
+{
+       return idx * 8 + box->pmu->type->perf_ctr;
+}
+
+static inline
+unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
+{
+       if (!box->pmu->type->box_ctl)
+               return 0;
+       return box->pmu->type->box_ctl +
+               box->pmu->type->msr_offset * box->pmu->pmu_idx;
+}
+
+static inline
+unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
+{
+       if (!box->pmu->type->fixed_ctl)
+               return 0;
+       return box->pmu->type->fixed_ctl +
+               box->pmu->type->msr_offset * box->pmu->pmu_idx;
+}
+
+static inline
+unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
+{
+       return box->pmu->type->fixed_ctr +
+               box->pmu->type->msr_offset * box->pmu->pmu_idx;
+}
+
+static inline
+unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
+{
+       return idx + box->pmu->type->event_ctl +
+               box->pmu->type->msr_offset * box->pmu->pmu_idx;
+}
+
+static inline
+unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
+{
+       return idx + box->pmu->type->perf_ctr +
+               box->pmu->type->msr_offset * box->pmu->pmu_idx;
+}
+
+static inline
+unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
+{
+       if (box->pci_dev)
+               return uncore_pci_fixed_ctl(box);
+       else
+               return uncore_msr_fixed_ctl(box);
+}
+
+static inline
+unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
+{
+       if (box->pci_dev)
+               return uncore_pci_fixed_ctr(box);
+       else
+               return uncore_msr_fixed_ctr(box);
+}
+
+static inline
+unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
+{
+       if (box->pci_dev)
+               return uncore_pci_event_ctl(box, idx);
+       else
+               return uncore_msr_event_ctl(box, idx);
+}
+
+static inline
+unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
+{
+       if (box->pci_dev)
+               return uncore_pci_perf_ctr(box, idx);
+       else
+               return uncore_msr_perf_ctr(box, idx);
+}
+
+static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
+{
+       return box->pmu->type->perf_ctr_bits;
+}
+
+static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
+{
+       return box->pmu->type->fixed_ctr_bits;
+}
+
+static inline int uncore_num_counters(struct intel_uncore_box *box)
+{
+       return box->pmu->type->num_counters;
+}
+
+static inline void uncore_disable_box(struct intel_uncore_box *box)
+{
+       if (box->pmu->type->ops->disable_box)
+               box->pmu->type->ops->disable_box(box);
+}
+
+static inline void uncore_enable_box(struct intel_uncore_box *box)
+{
+       if (box->pmu->type->ops->enable_box)
+               box->pmu->type->ops->enable_box(box);
+}
+
+static inline void uncore_disable_event(struct intel_uncore_box *box,
+                               struct perf_event *event)
+{
+       box->pmu->type->ops->disable_event(box, event);
+}
+
+static inline void uncore_enable_event(struct intel_uncore_box *box,
+                               struct perf_event *event)
+{
+       box->pmu->type->ops->enable_event(box, event);
+}
+
+static inline u64 uncore_read_counter(struct intel_uncore_box *box,
+                               struct perf_event *event)
+{
+       return box->pmu->type->ops->read_counter(box, event);
+}
+
+static inline void uncore_box_init(struct intel_uncore_box *box)
+{
+       if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
+               if (box->pmu->type->ops->init_box)
+                       box->pmu->type->ops->init_box(box);
+       }
+}
index 47124a7..92c7e39 100644 (file)
@@ -895,8 +895,8 @@ static void p4_pmu_disable_pebs(void)
         * So at moment let leave metrics turned on forever -- it's
         * ok for now but need to be revisited!
         *
-        * (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)0);
-        * (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)0);
+        * (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)0);
+        * (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)0);
         */
 }
 
@@ -909,7 +909,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event)
         * state we need to clear P4_CCCR_OVF, otherwise interrupt get
         * asserted again and again
         */
-       (void)checking_wrmsrl(hwc->config_base,
+       (void)wrmsrl_safe(hwc->config_base,
                (u64)(p4_config_unpack_cccr(hwc->config)) &
                        ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
 }
@@ -943,8 +943,8 @@ static void p4_pmu_enable_pebs(u64 config)
 
        bind = &p4_pebs_bind_map[idx];
 
-       (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE,     (u64)bind->metric_pebs);
-       (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT,  (u64)bind->metric_vert);
+       (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs);
+       (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT,      (u64)bind->metric_vert);
 }
 
 static void p4_pmu_enable_event(struct perf_event *event)
@@ -978,8 +978,8 @@ static void p4_pmu_enable_event(struct perf_event *event)
         */
        p4_pmu_enable_pebs(hwc->config);
 
-       (void)checking_wrmsrl(escr_addr, escr_conf);
-       (void)checking_wrmsrl(hwc->config_base,
+       (void)wrmsrl_safe(escr_addr, escr_conf);
+       (void)wrmsrl_safe(hwc->config_base,
                                (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE);
 }
 
@@ -1325,7 +1325,7 @@ __init int p4_pmu_init(void)
        unsigned int low, high;
 
        /* If we get stripped -- indexing fails */
-       BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC);
+       BUILD_BUG_ON(ARCH_P4_MAX_CCCR > INTEL_PMC_MAX_GENERIC);
 
        rdmsr(MSR_IA32_MISC_ENABLE, low, high);
        if (!(low & (1 << 7))) {
index 32bcfc7..e4dd0f7 100644 (file)
@@ -71,7 +71,7 @@ p6_pmu_disable_event(struct perf_event *event)
        if (cpuc->enabled)
                val |= ARCH_PERFMON_EVENTSEL_ENABLE;
 
-       (void)checking_wrmsrl(hwc->config_base, val);
+       (void)wrmsrl_safe(hwc->config_base, val);
 }
 
 static void p6_pmu_enable_event(struct perf_event *event)
@@ -84,7 +84,7 @@ static void p6_pmu_enable_event(struct perf_event *event)
        if (cpuc->enabled)
                val |= ARCH_PERFMON_EVENTSEL_ENABLE;
 
-       (void)checking_wrmsrl(hwc->config_base, val);
+       (void)wrmsrl_safe(hwc->config_base, val);
 }
 
 PMU_FORMAT_ATTR(event, "config:0-7"    );
index addf9e8..ee8e9ab 100644 (file)
@@ -31,7 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
        const struct cpuid_bit *cb;
 
        static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
-               { X86_FEATURE_DTS,              CR_EAX, 0, 0x00000006, 0 },
+               { X86_FEATURE_DTHERM,           CR_EAX, 0, 0x00000006, 0 },
                { X86_FEATURE_IDA,              CR_EAX, 1, 0x00000006, 0 },
                { X86_FEATURE_ARAT,             CR_EAX, 2, 0x00000006, 0 },
                { X86_FEATURE_PLN,              CR_EAX, 4, 0x00000006, 0 },
index 8bfb614..3f61904 100644 (file)
@@ -444,12 +444,12 @@ void kgdb_roundup_cpus(unsigned long flags)
 
 /**
  *     kgdb_arch_handle_exception - Handle architecture specific GDB packets.
- *     @vector: The error vector of the exception that happened.
+ *     @e_vector: The error vector of the exception that happened.
  *     @signo: The signal number of the exception that happened.
  *     @err_code: The error code of the exception that happened.
- *     @remcom_in_buffer: The buffer of the packet we have read.
- *     @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
- *     @regs: The &struct pt_regs of the current process.
+ *     @remcomInBuffer: The buffer of the packet we have read.
+ *     @remcomOutBuffer: The buffer of %BUFMAX bytes to write a packet into.
+ *     @linux_regs: The &struct pt_regs of the current process.
  *
  *     This function MUST handle the 'c' and 's' command packets,
  *     as well packets to set / remove a hardware breakpoint, if used.
index c383b3f..4873e62 100644 (file)
@@ -87,6 +87,7 @@
 #include <asm/microcode.h>
 #include <asm/processor.h>
 #include <asm/cpu_device_id.h>
+#include <asm/perf_event.h>
 
 MODULE_DESCRIPTION("Microcode Update Driver");
 MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
@@ -277,7 +278,6 @@ static int reload_for_cpu(int cpu)
        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
        int err = 0;
 
-       mutex_lock(&microcode_mutex);
        if (uci->valid) {
                enum ucode_state ustate;
 
@@ -288,7 +288,6 @@ static int reload_for_cpu(int cpu)
                        if (ustate == UCODE_ERROR)
                                err = -EINVAL;
        }
-       mutex_unlock(&microcode_mutex);
 
        return err;
 }
@@ -298,19 +297,31 @@ static ssize_t reload_store(struct device *dev,
                            const char *buf, size_t size)
 {
        unsigned long val;
-       int cpu = dev->id;
-       ssize_t ret = 0;
+       int cpu;
+       ssize_t ret = 0, tmp_ret;
 
        ret = kstrtoul(buf, 0, &val);
        if (ret)
                return ret;
 
-       if (val == 1) {
-               get_online_cpus();
-               if (cpu_online(cpu))
-                       ret = reload_for_cpu(cpu);
-               put_online_cpus();
+       if (val != 1)
+               return size;
+
+       get_online_cpus();
+       mutex_lock(&microcode_mutex);
+       for_each_online_cpu(cpu) {
+               tmp_ret = reload_for_cpu(cpu);
+               if (tmp_ret != 0)
+                       pr_warn("Error reloading microcode on CPU %d\n", cpu);
+
+               /* save retval of the first encountered reload error */
+               if (!ret)
+                       ret = tmp_ret;
        }
+       if (!ret)
+               perf_check_microcode();
+       mutex_unlock(&microcode_mutex);
+       put_online_cpus();
 
        if (!ret)
                ret = size;
@@ -339,7 +350,6 @@ static DEVICE_ATTR(version, 0400, version_show, NULL);
 static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL);
 
 static struct attribute *mc_default_attrs[] = {
-       &dev_attr_reload.attr,
        &dev_attr_version.attr,
        &dev_attr_processor_flags.attr,
        NULL
@@ -516,6 +526,16 @@ static const struct x86_cpu_id __initconst microcode_id[] = {
 MODULE_DEVICE_TABLE(x86cpu, microcode_id);
 #endif
 
+static struct attribute *cpu_root_microcode_attrs[] = {
+       &dev_attr_reload.attr,
+       NULL
+};
+
+static struct attribute_group cpu_root_microcode_group = {
+       .name  = "microcode",
+       .attrs = cpu_root_microcode_attrs,
+};
+
 static int __init microcode_init(void)
 {
        struct cpuinfo_x86 *c = &cpu_data(0);
@@ -540,16 +560,25 @@ static int __init microcode_init(void)
        mutex_lock(&microcode_mutex);
 
        error = subsys_interface_register(&mc_cpu_interface);
-
+       if (!error)
+               perf_check_microcode();
        mutex_unlock(&microcode_mutex);
        put_online_cpus();
 
        if (error)
                goto out_pdev;
 
+       error = sysfs_create_group(&cpu_subsys.dev_root->kobj,
+                                  &cpu_root_microcode_group);
+
+       if (error) {
+               pr_err("Error creating microcode group!\n");
+               goto out_driver;
+       }
+
        error = microcode_dev_init();
        if (error)
-               goto out_driver;
+               goto out_ucode_group;
 
        register_syscore_ops(&mc_syscore_ops);
        register_hotcpu_notifier(&mc_cpu_notifier);
@@ -559,7 +588,11 @@ static int __init microcode_init(void)
 
        return 0;
 
-out_driver:
+ out_ucode_group:
+       sysfs_remove_group(&cpu_subsys.dev_root->kobj,
+                          &cpu_root_microcode_group);
+
+ out_driver:
        get_online_cpus();
        mutex_lock(&microcode_mutex);
 
@@ -568,7 +601,7 @@ out_driver:
        mutex_unlock(&microcode_mutex);
        put_online_cpus();
 
-out_pdev:
+ out_pdev:
        platform_device_unregister(microcode_pdev);
        return error;
 
@@ -584,6 +617,9 @@ static void __exit microcode_exit(void)
        unregister_hotcpu_notifier(&mc_cpu_notifier);
        unregister_syscore_ops(&mc_syscore_ops);
 
+       sysfs_remove_group(&cpu_subsys.dev_root->kobj,
+                          &cpu_root_microcode_group);
+
        get_online_cpus();
        mutex_lock(&microcode_mutex);
 
index 9ce8859..17fff18 100644 (file)
@@ -352,9 +352,7 @@ struct pv_cpu_ops pv_cpu_ops = {
 #endif
        .wbinvd = native_wbinvd,
        .read_msr = native_read_msr_safe,
-       .rdmsr_regs = native_rdmsr_safe_regs,
        .write_msr = native_write_msr_safe,
-       .wrmsr_regs = native_wrmsr_safe_regs,
        .read_tsc = native_read_tsc,
        .read_pmc = native_read_pmc,
        .read_tscp = native_read_tscp,
index 85151f3..0a980c9 100644 (file)
@@ -466,7 +466,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
                        task->thread.gs = addr;
                        if (doit) {
                                load_gs_index(0);
-                               ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
+                               ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
                        }
                }
                put_cpu();
@@ -494,7 +494,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
                                /* set the selector to 0 to not confuse
                                   __switch_to */
                                loadsegment(fs, 0);
-                               ret = checking_wrmsrl(MSR_FS_BASE, addr);
+                               ret = wrmsrl_safe(MSR_FS_BASE, addr);
                        }
                }
                put_cpu();
index 10ae9be..d0f81d3 100644 (file)
@@ -455,6 +455,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
                },
        },
+       {       /* Handle problems with rebooting on the Precision M6600. */
+               .callback = set_pci_reboot,
+               .ident = "Dell OptiPlex 990",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
+               },
+       },
        { }
 };
 
index 54e938d..c1a310f 100644 (file)
@@ -257,22 +257,13 @@ notrace static void __cpuinit start_secondary(void *unused)
        check_tsc_sync_target();
 
        /*
-        * We need to hold call_lock, so there is no inconsistency
-        * between the time smp_call_function() determines number of
-        * IPI recipients, and the time when the determination is made
-        * for which cpus receive the IPI. Holding this
-        * lock helps us to not include this cpu in a currently in progress
-        * smp_call_function().
-        *
         * We need to hold vector_lock so there the set of online cpus
         * does not change while we are assigning vectors to cpus.  Holding
         * this lock ensures we don't half assign or remove an irq from a cpu.
         */
-       ipi_call_lock();
        lock_vector_lock();
        set_cpu_online(smp_processor_id(), true);
        unlock_vector_lock();
-       ipi_call_unlock();
        per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
        x86_platform.nmi_init();
 
index dc4e910..36fd420 100644 (file)
@@ -409,9 +409,10 @@ static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm,
  * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
  * @mm: the probed address space.
  * @arch_uprobe: the probepoint information.
+ * @addr: virtual address at which to install the probepoint
  * Return 0 on success or a -ve number on error.
  */
-int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm)
+int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr)
 {
        int ret;
        struct insn insn;
index acdc125..8d141b3 100644 (file)
@@ -136,6 +136,19 @@ static int addr_to_vsyscall_nr(unsigned long addr)
        return nr;
 }
 
+#ifdef CONFIG_SECCOMP
+static int vsyscall_seccomp(struct task_struct *tsk, int syscall_nr)
+{
+       if (!seccomp_mode(&tsk->seccomp))
+               return 0;
+       task_pt_regs(tsk)->orig_ax = syscall_nr;
+       task_pt_regs(tsk)->ax = syscall_nr;
+       return __secure_computing(syscall_nr);
+}
+#else
+#define vsyscall_seccomp(_tsk, _nr) 0
+#endif
+
 static bool write_ok_or_segv(unsigned long ptr, size_t size)
 {
        /*
@@ -171,6 +184,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
        int vsyscall_nr;
        int prev_sig_on_uaccess_error;
        long ret;
+       int skip;
 
        /*
         * No point in checking CS -- the only way to get here is a user mode
@@ -202,9 +216,6 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
        }
 
        tsk = current;
-       if (seccomp_mode(&tsk->seccomp))
-               do_exit(SIGKILL);
-
        /*
         * With a real vsyscall, page faults cause SIGSEGV.  We want to
         * preserve that behavior to make writing exploits harder.
@@ -219,8 +230,13 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
         * address 0".
         */
        ret = -EFAULT;
+       skip = 0;
        switch (vsyscall_nr) {
        case 0:
+               skip = vsyscall_seccomp(tsk, __NR_gettimeofday);
+               if (skip)
+                       break;
+
                if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) ||
                    !write_ok_or_segv(regs->si, sizeof(struct timezone)))
                        break;
@@ -231,6 +247,10 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
                break;
 
        case 1:
+               skip = vsyscall_seccomp(tsk, __NR_time);
+               if (skip)
+                       break;
+
                if (!write_ok_or_segv(regs->di, sizeof(time_t)))
                        break;
 
@@ -238,6 +258,10 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
                break;
 
        case 2:
+               skip = vsyscall_seccomp(tsk, __NR_getcpu);
+               if (skip)
+                       break;
+
                if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
                    !write_ok_or_segv(regs->si, sizeof(unsigned)))
                        break;
@@ -250,6 +274,12 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
 
        current_thread_info()->sig_on_uaccess_error = prev_sig_on_uaccess_error;
 
+       if (skip) {
+               if ((long)regs->ax <= 0L) /* seccomp errno emulation */
+                       goto do_ret;
+               goto done; /* seccomp trace/trap */
+       }
+
        if (ret == -EFAULT) {
                /* Bad news -- userspace fed a bad pointer to a vsyscall. */
                warn_bad_vsyscall(KERN_INFO, regs,
@@ -268,10 +298,11 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
 
        regs->ax = ret;
 
+do_ret:
        /* Emulate a ret instruction. */
        regs->ip = caller;
        regs->sp += 8;
-
+done:
        return true;
 
 sigsegv:
index 9796c2f..6020f6f 100644 (file)
@@ -28,6 +28,7 @@ EXPORT_SYMBOL(__put_user_8);
 
 EXPORT_SYMBOL(copy_user_generic_string);
 EXPORT_SYMBOL(copy_user_generic_unrolled);
+EXPORT_SYMBOL(copy_user_enhanced_fast_string);
 EXPORT_SYMBOL(__copy_user_nocache);
 EXPORT_SYMBOL(_copy_from_user);
 EXPORT_SYMBOL(_copy_to_user);
index be3cea4..57e168e 100644 (file)
@@ -3934,6 +3934,9 @@ static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
 {
        struct kvm_mmu_page *page;
 
+       if (list_empty(&kvm->arch.active_mmu_pages))
+               return;
+
        page = container_of(kvm->arch.active_mmu_pages.prev,
                            struct kvm_mmu_page, link);
        kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
index 2e88438..9b7ec11 100644 (file)
@@ -80,10 +80,10 @@ static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx)
 
 static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
 {
-       if (idx < X86_PMC_IDX_FIXED)
+       if (idx < INTEL_PMC_IDX_FIXED)
                return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0);
        else
-               return get_fixed_pmc_idx(pmu, idx - X86_PMC_IDX_FIXED);
+               return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED);
 }
 
 void kvm_deliver_pmi(struct kvm_vcpu *vcpu)
@@ -291,7 +291,7 @@ static void reprogram_idx(struct kvm_pmu *pmu, int idx)
        if (pmc_is_gp(pmc))
                reprogram_gp_counter(pmc, pmc->eventsel);
        else {
-               int fidx = idx - X86_PMC_IDX_FIXED;
+               int fidx = idx - INTEL_PMC_IDX_FIXED;
                reprogram_fixed_counter(pmc,
                                fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx);
        }
@@ -452,7 +452,7 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
                return;
 
        pmu->nr_arch_gp_counters = min((int)(entry->eax >> 8) & 0xff,
-                       X86_PMC_MAX_GENERIC);
+                       INTEL_PMC_MAX_GENERIC);
        pmu->counter_bitmask[KVM_PMC_GP] =
                ((u64)1 << ((entry->eax >> 16) & 0xff)) - 1;
        bitmap_len = (entry->eax >> 24) & 0xff;
@@ -462,13 +462,13 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
                pmu->nr_arch_fixed_counters = 0;
        } else {
                pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f),
-                               X86_PMC_MAX_FIXED);
+                               INTEL_PMC_MAX_FIXED);
                pmu->counter_bitmask[KVM_PMC_FIXED] =
                        ((u64)1 << ((entry->edx >> 5) & 0xff)) - 1;
        }
 
        pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
-               (((1ull << pmu->nr_arch_fixed_counters) - 1) << X86_PMC_IDX_FIXED);
+               (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
        pmu->global_ctrl_mask = ~pmu->global_ctrl;
 }
 
@@ -478,15 +478,15 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
        struct kvm_pmu *pmu = &vcpu->arch.pmu;
 
        memset(pmu, 0, sizeof(*pmu));
-       for (i = 0; i < X86_PMC_MAX_GENERIC; i++) {
+       for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
                pmu->gp_counters[i].type = KVM_PMC_GP;
                pmu->gp_counters[i].vcpu = vcpu;
                pmu->gp_counters[i].idx = i;
        }
-       for (i = 0; i < X86_PMC_MAX_FIXED; i++) {
+       for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
                pmu->fixed_counters[i].type = KVM_PMC_FIXED;
                pmu->fixed_counters[i].vcpu = vcpu;
-               pmu->fixed_counters[i].idx = i + X86_PMC_IDX_FIXED;
+               pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
        }
        init_irq_work(&pmu->irq_work, trigger_pmi);
        kvm_pmu_cpuid_update(vcpu);
@@ -498,13 +498,13 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu)
        int i;
 
        irq_work_sync(&pmu->irq_work);
-       for (i = 0; i < X86_PMC_MAX_GENERIC; i++) {
+       for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
                struct kvm_pmc *pmc = &pmu->gp_counters[i];
                stop_counter(pmc);
                pmc->counter = pmc->eventsel = 0;
        }
 
-       for (i = 0; i < X86_PMC_MAX_FIXED; i++)
+       for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
                stop_counter(&pmu->fixed_counters[i]);
 
        pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
index 911d264..62d02e3 100644 (file)
@@ -710,16 +710,6 @@ TRACE_EVENT(kvm_skinit,
                  __entry->rip, __entry->slb)
 );
 
-#define __print_insn(insn, ilen) ({                             \
-       int i;                                                   \
-       const char *ret = p->buffer + p->len;                    \
-                                                                \
-       for (i = 0; i < ilen; ++i)                               \
-               trace_seq_printf(p, " %02x", insn[i]);           \
-       trace_seq_printf(p, "%c", 0);                            \
-       ret;                                                     \
-       })
-
 #define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
 #define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
 #define KVM_EMUL_INSN_F_CS_D   (1 << 2)
@@ -786,7 +776,7 @@ TRACE_EVENT(kvm_emulate_insn,
 
        TP_printk("%x:%llx:%s (%s)%s",
                  __entry->csbase, __entry->rip,
-                 __print_insn(__entry->insn, __entry->len),
+                 __print_hex(__entry->insn, __entry->len),
                  __print_symbolic(__entry->flags,
                                   kvm_trace_symbol_emul_flags),
                  __entry->failed ? " failed" : ""
index 459b58a..25b7ae8 100644 (file)
@@ -115,7 +115,7 @@ EXPORT_SYMBOL(csum_partial_copy_to_user);
  * @src: source address
  * @dst: destination address
  * @len: number of bytes to be copied.
- * @isum: initial sum that is added into the result (32bit unfolded)
+ * @sum: initial sum that is added into the result (32bit unfolded)
  *
  * Returns an 32bit unfolded checksum of the buffer.
  */
index a311cc5..8d6ef78 100644 (file)
@@ -1,5 +1,5 @@
 #include <linux/module.h>
 #include <asm/msr.h>
 
-EXPORT_SYMBOL(native_rdmsr_safe_regs);
-EXPORT_SYMBOL(native_wrmsr_safe_regs);
+EXPORT_SYMBOL(rdmsr_safe_regs);
+EXPORT_SYMBOL(wrmsr_safe_regs);
index 69fa106..f6d13ee 100644 (file)
@@ -6,13 +6,13 @@
 
 #ifdef CONFIG_X86_64
 /*
- * int native_{rdmsr,wrmsr}_safe_regs(u32 gprs[8]);
+ * int {rdmsr,wrmsr}_safe_regs(u32 gprs[8]);
  *
  * reg layout: u32 gprs[eax, ecx, edx, ebx, esp, ebp, esi, edi]
  *
  */
 .macro op_safe_regs op
-ENTRY(native_\op\()_safe_regs)
+ENTRY(\op\()_safe_regs)
        CFI_STARTPROC
        pushq_cfi %rbx
        pushq_cfi %rbp
@@ -45,13 +45,13 @@ ENTRY(native_\op\()_safe_regs)
 
        _ASM_EXTABLE(1b, 3b)
        CFI_ENDPROC
-ENDPROC(native_\op\()_safe_regs)
+ENDPROC(\op\()_safe_regs)
 .endm
 
 #else /* X86_32 */
 
 .macro op_safe_regs op
-ENTRY(native_\op\()_safe_regs)
+ENTRY(\op\()_safe_regs)
        CFI_STARTPROC
        pushl_cfi %ebx
        pushl_cfi %ebp
@@ -92,7 +92,7 @@ ENTRY(native_\op\()_safe_regs)
 
        _ASM_EXTABLE(1b, 3b)
        CFI_ENDPROC
-ENDPROC(native_\op\()_safe_regs)
+ENDPROC(\op\()_safe_regs)
 .endm
 
 #endif
index 303f086..b2b9443 100644 (file)
@@ -312,7 +312,7 @@ static int op_amd_fill_in_addresses(struct op_msrs * const msrs)
                        goto fail;
                }
                /* both registers must be reserved */
-               if (num_counters == AMD64_NUM_COUNTERS_F15H) {
+               if (num_counters == AMD64_NUM_COUNTERS_CORE) {
                        msrs->counters[i].addr = MSR_F15H_PERF_CTR + (i << 1);
                        msrs->controls[i].addr = MSR_F15H_PERF_CTL + (i << 1);
                } else {
@@ -514,7 +514,7 @@ static int op_amd_init(struct oprofile_operations *ops)
        ops->create_files = setup_ibs_files;
 
        if (boot_cpu_data.x86 == 0x15) {
-               num_counters = AMD64_NUM_COUNTERS_F15H;
+               num_counters = AMD64_NUM_COUNTERS_CORE;
        } else {
                num_counters = AMD64_NUM_COUNTERS;
        }
index 3c6e328..028454f 100644 (file)
@@ -110,19 +110,16 @@ static struct kmsg_dumper dw_dumper;
 static int dumper_registered;
 
 static void dw_kmsg_dump(struct kmsg_dumper *dumper,
-                       enum kmsg_dump_reason reason,
-                       const char *s1, unsigned long l1,
-                       const char *s2, unsigned long l2)
+                        enum kmsg_dump_reason reason)
 {
-       int i;
+       static char line[1024];
+       size_t len;
 
        /* When run to this, we'd better re-init the HW */
        mrst_early_console_init();
 
-       for (i = 0; i < l1; i++)
-               early_mrst_console.write(&early_mrst_console, s1 + i, 1);
-       for (i = 0; i < l2; i++)
-               early_mrst_console.write(&early_mrst_console, s2 + i, 1);
+       while (kmsg_dump_get_line(dumper, true, line, sizeof(line), &len))
+               early_mrst_console.write(&early_mrst_console, line, len);
 }
 
 /* Set the ratio rate to 115200, 8n1, IRQ disabled */
index 66e6d93..0faad64 100644 (file)
@@ -205,9 +205,9 @@ void syscall32_cpu_init(void)
 {
        /* Load these always in case some future AMD CPU supports
           SYSENTER from compat mode too. */
-       checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
-       checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
-       checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
+       wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
+       wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
+       wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
 
        wrmsrl(MSR_CSTAR, ia32_cstar_target);
 }
index ff962d4..ed7d549 100644 (file)
@@ -1124,9 +1124,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
        .wbinvd = native_wbinvd,
 
        .read_msr = native_read_msr_safe,
-       .rdmsr_regs = native_rdmsr_safe_regs,
        .write_msr = xen_write_msr_safe,
-       .wrmsr_regs = native_wrmsr_safe_regs,
 
        .read_tsc = native_read_tsc,
        .read_pmc = native_read_pmc,
index afb250d..f58dca7 100644 (file)
@@ -80,9 +80,7 @@ static void __cpuinit cpu_bringup(void)
 
        notify_cpu_starting(cpu);
 
-       ipi_call_lock();
        set_cpu_online(cpu, true);
-       ipi_call_unlock();
 
        this_cpu_write(cpu_state, CPU_ONLINE);
 
index 7608559..f973754 100644 (file)
@@ -68,8 +68,8 @@ endif
 
 # Only build variant and/or platform if it includes a Makefile
 
-buildvar := $(shell test -a $(srctree)/arch/xtensa/variants/$(VARIANT)/Makefile && echo arch/xtensa/variants/$(VARIANT)/)
-buildplf := $(shell test -a $(srctree)/arch/xtensa/platforms/$(PLATFORM)/Makefile && echo arch/xtensa/platforms/$(PLATFORM)/)
+buildvar := $(shell test -e $(srctree)/arch/xtensa/variants/$(VARIANT)/Makefile && echo arch/xtensa/variants/$(VARIANT)/)
+buildplf := $(shell test -e $(srctree)/arch/xtensa/platforms/$(PLATFORM)/Makefile && echo arch/xtensa/platforms/$(PLATFORM)/)
 
 # Find libgcc.a
 
index 9b306e5..2c8d6a3 100644 (file)
@@ -277,7 +277,7 @@ void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
 
        /* Don't leak any random bits. */
 
-       memset(elfregs, 0, sizeof (elfregs));
+       memset(elfregs, 0, sizeof(*elfregs));
 
        /* Note:  PS.EXCM is not set while user task is running; its
         * being set in regs->ps is for exception handling convenience.
index 88ecea3..ee2e208 100644 (file)
@@ -83,7 +83,6 @@ SECTIONS
 
   _text = .;
   _stext = .;
-  _ftext = .;
 
   .text :
   {
@@ -112,7 +111,7 @@ SECTIONS
   EXCEPTION_TABLE(16)
   /* Data section */
 
-  _fdata = .;
+  _sdata = .;
   RW_DATA_SECTION(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
   _edata = .;
 
index ba150e5..db95517 100644 (file)
 
 #include <asm/bootparam.h>
 #include <asm/page.h>
-
-/* References to section boundaries */
-
-extern char _ftext, _etext, _fdata, _edata, _rodata_end;
-extern char __init_begin, __init_end;
+#include <asm/sections.h>
 
 /*
  * mem_reserve(start, end, must_exist)
@@ -197,9 +193,9 @@ void __init mem_init(void)
                        reservedpages++;
        }
 
-       codesize =  (unsigned long) &_etext - (unsigned long) &_ftext;
-       datasize =  (unsigned long) &_edata - (unsigned long) &_fdata;
-       initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
+       codesize =  (unsigned long) _etext - (unsigned long) _stext;
+       datasize =  (unsigned long) _edata - (unsigned long) _sdata;
+       initsize =  (unsigned long) __init_end - (unsigned long) __init_begin;
 
        printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, "
               "%ldk data, %ldk init %ldk highmem)\n",
@@ -237,7 +233,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
 
 void free_initmem(void)
 {
-       free_reserved_mem(&__init_begin, &__init_end);
-       printk("Freeing unused kernel memory: %dk freed\n",
-              (&__init_end - &__init_begin) >> 10);
+       free_reserved_mem(__init_begin, __init_end);
+       printk("Freeing unused kernel memory: %zuk freed\n",
+              (__init_end - __init_begin) >> 10);
 }
index 02cf633..e7dee61 100644 (file)
@@ -125,12 +125,8 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
 
                blkg->pd[i] = pd;
                pd->blkg = blkg;
-       }
-
-       /* invoke per-policy init */
-       for (i = 0; i < BLKCG_MAX_POLS; i++) {
-               struct blkcg_policy *pol = blkcg_policy[i];
 
+               /* invoke per-policy init */
                if (blkcg_policy_enabled(blkg->q, pol))
                        pol->pd_init_fn(blkg);
        }
@@ -245,10 +241,9 @@ EXPORT_SYMBOL_GPL(blkg_lookup_create);
 
 static void blkg_destroy(struct blkcg_gq *blkg)
 {
-       struct request_queue *q = blkg->q;
        struct blkcg *blkcg = blkg->blkcg;
 
-       lockdep_assert_held(q->queue_lock);
+       lockdep_assert_held(blkg->q->queue_lock);
        lockdep_assert_held(&blkcg->lock);
 
        /* Something wrong if we are trying to remove same group twice */
index 3c923a7..93eb3e4 100644 (file)
@@ -361,9 +361,10 @@ EXPORT_SYMBOL(blk_put_queue);
  */
 void blk_drain_queue(struct request_queue *q, bool drain_all)
 {
+       int i;
+
        while (true) {
                bool drain = false;
-               int i;
 
                spin_lock_irq(q->queue_lock);
 
@@ -408,6 +409,18 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
                        break;
                msleep(10);
        }
+
+       /*
+        * With queue marked dead, any woken up waiter will fail the
+        * allocation path, so the wakeup chaining is lost and we're
+        * left with hung waiters. We need to wake up those waiters.
+        */
+       if (q->request_fn) {
+               spin_lock_irq(q->queue_lock);
+               for (i = 0; i < ARRAY_SIZE(q->rq.wait); i++)
+                       wake_up_all(&q->rq.wait[i]);
+               spin_unlock_irq(q->queue_lock);
+       }
 }
 
 /**
@@ -467,7 +480,6 @@ void blk_cleanup_queue(struct request_queue *q)
        /* mark @q DEAD, no new request or merges will be allowed afterwards */
        mutex_lock(&q->sysfs_lock);
        queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
-
        spin_lock_irq(lock);
 
        /*
@@ -485,10 +497,6 @@ void blk_cleanup_queue(struct request_queue *q)
        queue_flag_set(QUEUE_FLAG_NOMERGES, q);
        queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
        queue_flag_set(QUEUE_FLAG_DEAD, q);
-
-       if (q->queue_lock != &q->__queue_lock)
-               q->queue_lock = &q->__queue_lock;
-
        spin_unlock_irq(lock);
        mutex_unlock(&q->sysfs_lock);
 
@@ -499,6 +507,11 @@ void blk_cleanup_queue(struct request_queue *q)
        del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
        blk_sync_queue(q);
 
+       spin_lock_irq(lock);
+       if (q->queue_lock != &q->__queue_lock)
+               q->queue_lock = &q->__queue_lock;
+       spin_unlock_irq(lock);
+
        /* @q is and will stay empty, shutdown and put */
        blk_put_queue(q);
 }
index 7803548..6e4744c 100644 (file)
@@ -197,44 +197,3 @@ void blk_add_timer(struct request *req)
                mod_timer(&q->timeout, expiry);
 }
 
-/**
- * blk_abort_queue -- Abort all request on given queue
- * @queue:     pointer to queue
- *
- */
-void blk_abort_queue(struct request_queue *q)
-{
-       unsigned long flags;
-       struct request *rq, *tmp;
-       LIST_HEAD(list);
-
-       /*
-        * Not a request based block device, nothing to abort
-        */
-       if (!q->request_fn)
-               return;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-
-       elv_abort_queue(q);
-
-       /*
-        * Splice entries to local list, to avoid deadlocking if entries
-        * get readded to the timeout list by error handling
-        */
-       list_splice_init(&q->timeout_list, &list);
-
-       list_for_each_entry_safe(rq, tmp, &list, timeout_list)
-               blk_abort_request(rq);
-
-       /*
-        * Occasionally, blk_abort_request() will return without
-        * deleting the element from the list. Make sure we add those back
-        * instead of leaving them on the local stack list.
-        */
-       list_splice(&list, &q->timeout_list);
-
-       spin_unlock_irqrestore(q->queue_lock, flags);
-
-}
-EXPORT_SYMBOL_GPL(blk_abort_queue);
index 673c977..fb52df9 100644 (file)
@@ -17,8 +17,6 @@
 #include "blk.h"
 #include "blk-cgroup.h"
 
-static struct blkcg_policy blkcg_policy_cfq __maybe_unused;
-
 /*
  * tunables
  */
@@ -418,11 +416,6 @@ static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
        return pd ? container_of(pd, struct cfq_group, pd) : NULL;
 }
 
-static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
-{
-       return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
-}
-
 static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
 {
        return pd_to_blkg(&cfqg->pd);
@@ -572,6 +565,13 @@ static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
 
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
 
+static struct blkcg_policy blkcg_policy_cfq;
+
+static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
+{
+       return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
+}
+
 static inline void cfqg_get(struct cfq_group *cfqg)
 {
        return blkg_get(cfqg_to_blkg(cfqg));
@@ -3951,10 +3951,11 @@ static void cfq_exit_queue(struct elevator_queue *e)
 
        cfq_shutdown_timer_wq(cfqd);
 
-#ifndef CONFIG_CFQ_GROUP_IOSCHED
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+       blkcg_deactivate_policy(q, &blkcg_policy_cfq);
+#else
        kfree(cfqd->root_group);
 #endif
-       blkcg_deactivate_policy(q, &blkcg_policy_cfq);
        kfree(cfqd);
 }
 
@@ -4194,14 +4195,15 @@ static int __init cfq_init(void)
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
        if (!cfq_group_idle)
                cfq_group_idle = 1;
-#else
-               cfq_group_idle = 0;
-#endif
 
        ret = blkcg_policy_register(&blkcg_policy_cfq);
        if (ret)
                return ret;
+#else
+       cfq_group_idle = 0;
+#endif
 
+       ret = -ENOMEM;
        cfq_pool = KMEM_CACHE(cfq_queue, 0);
        if (!cfq_pool)
                goto err_pol_unreg;
@@ -4215,13 +4217,17 @@ static int __init cfq_init(void)
 err_free_pool:
        kmem_cache_destroy(cfq_pool);
 err_pol_unreg:
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
        blkcg_policy_unregister(&blkcg_policy_cfq);
+#endif
        return ret;
 }
 
 static void __exit cfq_exit(void)
 {
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
        blkcg_policy_unregister(&blkcg_policy_cfq);
+#endif
        elv_unregister(&iosched_cfq);
        kmem_cache_destroy(cfq_pool);
 }
index 260fa80..9a87daa 100644 (file)
@@ -721,11 +721,14 @@ int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
                break;
        }
 
+       if (capable(CAP_SYS_RAWIO))
+               return 0;
+
        /* In particular, rule out all resets and host-specific ioctls.  */
        printk_ratelimited(KERN_WARNING
                           "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
 
-       return capable(CAP_SYS_RAWIO) ? 0 : -ENOIOCTLCMD;
+       return -ENOIOCTLCMD;
 }
 EXPORT_SYMBOL(scsi_verify_blk_ioctl);
 
index a43fa1a..1502c50 100644 (file)
@@ -36,6 +36,7 @@
 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
 static DEFINE_MUTEX(isolated_cpus_lock);
+static DEFINE_MUTEX(round_robin_lock);
 
 static unsigned long power_saving_mwait_eax;
 
@@ -107,7 +108,7 @@ static void round_robin_cpu(unsigned int tsk_index)
        if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
                return;
 
-       mutex_lock(&isolated_cpus_lock);
+       mutex_lock(&round_robin_lock);
        cpumask_clear(tmp);
        for_each_cpu(cpu, pad_busy_cpus)
                cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
@@ -116,7 +117,7 @@ static void round_robin_cpu(unsigned int tsk_index)
        if (cpumask_empty(tmp))
                cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
        if (cpumask_empty(tmp)) {
-               mutex_unlock(&isolated_cpus_lock);
+               mutex_unlock(&round_robin_lock);
                return;
        }
        for_each_cpu(cpu, tmp) {
@@ -131,7 +132,7 @@ static void round_robin_cpu(unsigned int tsk_index)
        tsk_in_cpu[tsk_index] = preferred_cpu;
        cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
        cpu_weight[preferred_cpu]++;
-       mutex_unlock(&isolated_cpus_lock);
+       mutex_unlock(&round_robin_lock);
 
        set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
 }
index 0ed85ca..615996a 100644 (file)
@@ -95,18 +95,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
                return_ACPI_STATUS(status);
        }
 
-       if (sleep_state != ACPI_STATE_S5) {
-               /*
-                * Disable BM arbitration. This feature is contained within an
-                * optional register (PM2 Control), so ignore a BAD_ADDRESS
-                * exception.
-                */
-               status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
-               if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
-                       return_ACPI_STATUS(status);
-               }
-       }
-
        /*
         * 1) Disable/Clear all GPEs
         * 2) Enable all wakeup GPEs
@@ -364,16 +352,6 @@ acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags)
                                    [ACPI_EVENT_POWER_BUTTON].
                                    status_register_id, ACPI_CLEAR_STATUS);
 
-       /*
-        * Enable BM arbitration. This feature is contained within an
-        * optional register (PM2 Control), so ignore a BAD_ADDRESS
-        * exception.
-        */
-       status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
-       if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
-               return_ACPI_STATUS(status);
-       }
-
        acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING);
        return_ACPI_STATUS(status);
 }
index 23ce096..fe66260 100644 (file)
@@ -638,7 +638,7 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
                        /* Create the new outer package and populate it */
 
                        status =
-                           acpi_ns_wrap_with_package(data, *elements,
+                           acpi_ns_wrap_with_package(data, return_object,
                                                      return_object_ptr);
                        if (ACPI_FAILURE(status)) {
                                return (status);
index 5577762..6686b1e 100644 (file)
@@ -243,7 +243,7 @@ static int pre_map_gar_callback(struct apei_exec_context *ctx,
        u8 ins = entry->instruction;
 
        if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
-               return acpi_os_map_generic_address(&entry->register_region);
+               return apei_map_generic_address(&entry->register_region);
 
        return 0;
 }
@@ -276,7 +276,7 @@ static int post_unmap_gar_callback(struct apei_exec_context *ctx,
        u8 ins = entry->instruction;
 
        if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
-               acpi_os_unmap_generic_address(&entry->register_region);
+               apei_unmap_generic_address(&entry->register_region);
 
        return 0;
 }
@@ -606,6 +606,19 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
        return 0;
 }
 
+int apei_map_generic_address(struct acpi_generic_address *reg)
+{
+       int rc;
+       u32 access_bit_width;
+       u64 address;
+
+       rc = apei_check_gar(reg, &address, &access_bit_width);
+       if (rc)
+               return rc;
+       return acpi_os_map_generic_address(reg);
+}
+EXPORT_SYMBOL_GPL(apei_map_generic_address);
+
 /* read GAR in interrupt (including NMI) or process context */
 int apei_read(u64 *val, struct acpi_generic_address *reg)
 {
index cca240a..f220d64 100644 (file)
@@ -7,6 +7,8 @@
 #define APEI_INTERNAL_H
 
 #include <linux/cper.h>
+#include <linux/acpi.h>
+#include <linux/acpi_io.h>
 
 struct apei_exec_context;
 
@@ -68,6 +70,13 @@ static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 actio
 /* IP has been set in instruction function */
 #define APEI_EXEC_SET_IP       1
 
+int apei_map_generic_address(struct acpi_generic_address *reg);
+
+static inline void apei_unmap_generic_address(struct acpi_generic_address *reg)
+{
+       acpi_os_unmap_generic_address(reg);
+}
+
 int apei_read(u64 *val, struct acpi_generic_address *reg);
 int apei_write(u64 val, struct acpi_generic_address *reg);
 
index 9b3cac0..1599566 100644 (file)
@@ -301,7 +301,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
        if (!ghes)
                return ERR_PTR(-ENOMEM);
        ghes->generic = generic;
-       rc = acpi_os_map_generic_address(&generic->error_status_address);
+       rc = apei_map_generic_address(&generic->error_status_address);
        if (rc)
                goto err_free;
        error_block_length = generic->error_block_length;
@@ -321,7 +321,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
        return ghes;
 
 err_unmap:
-       acpi_os_unmap_generic_address(&generic->error_status_address);
+       apei_unmap_generic_address(&generic->error_status_address);
 err_free:
        kfree(ghes);
        return ERR_PTR(rc);
@@ -330,7 +330,7 @@ err_free:
 static void ghes_fini(struct ghes *ghes)
 {
        kfree(ghes->estatus);
-       acpi_os_unmap_generic_address(&ghes->generic->error_status_address);
+       apei_unmap_generic_address(&ghes->generic->error_status_address);
 }
 
 enum {
index c850de4..eff7222 100644 (file)
@@ -189,10 +189,12 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
                 *     Processor (CPU3, 0x03, 0x00000410, 0x06) {}
                 * }
                 *
-                * Ignores apic_id and always return 0 for CPU0's handle.
+                * Ignores apic_id and always returns 0 for the processor
+                * handle with acpi id 0 if nr_cpu_ids is 1.
+                * This should be the case if SMP tables are not found.
                 * Return -1 for other CPU's handle.
                 */
-               if (acpi_id == 0)
+               if (nr_cpu_ids <= 1 && acpi_id == 0)
                        return acpi_id;
                else
                        return apic_id;
index f3decb3..47a8caa 100644 (file)
@@ -224,6 +224,7 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
 /*
  * Suspend / resume control
  */
+static int acpi_idle_suspend;
 static u32 saved_bm_rld;
 
 static void acpi_idle_bm_rld_save(void)
@@ -242,13 +243,21 @@ static void acpi_idle_bm_rld_restore(void)
 
 int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
 {
+       if (acpi_idle_suspend == 1)
+               return 0;
+
        acpi_idle_bm_rld_save();
+       acpi_idle_suspend = 1;
        return 0;
 }
 
 int acpi_processor_resume(struct acpi_device * device)
 {
+       if (acpi_idle_suspend == 0)
+               return 0;
+
        acpi_idle_bm_rld_restore();
+       acpi_idle_suspend = 0;
        return 0;
 }
 
@@ -754,6 +763,12 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
 
        local_irq_disable();
 
+       if (acpi_idle_suspend) {
+               local_irq_enable();
+               cpu_relax();
+               return -EBUSY;
+       }
+
        lapic_timer_state_broadcast(pr, cx, 1);
        kt1 = ktime_get_real();
        acpi_idle_do_entry(cx);
@@ -823,6 +838,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
 
        local_irq_disable();
 
+       if (acpi_idle_suspend) {
+               local_irq_enable();
+               cpu_relax();
+               return -EBUSY;
+       }
+
        if (cx->entry_method != ACPI_CSTATE_FFH) {
                current_thread_info()->status &= ~TS_POLLING;
                /*
@@ -907,14 +928,21 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
                                                drv, drv->safe_state_index);
                } else {
                        local_irq_disable();
-                       acpi_safe_halt();
+                       if (!acpi_idle_suspend)
+                               acpi_safe_halt();
                        local_irq_enable();
-                       return -EINVAL;
+                       return -EBUSY;
                }
        }
 
        local_irq_disable();
 
+       if (acpi_idle_suspend) {
+               local_irq_enable();
+               cpu_relax();
+               return -EBUSY;
+       }
+
        if (cx->entry_method != ACPI_CSTATE_FFH) {
                current_thread_info()->status &= ~TS_POLLING;
                /*
index 9f66181..240a244 100644 (file)
@@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
 {
        int result = 0;
 
-       if (!strncmp(val, "enable", strlen("enable") - 1)) {
+       if (!strncmp(val, "enable", strlen("enable"))) {
                result = acpi_debug_trace(trace_method_name, trace_debug_level,
                                          trace_debug_layer, 0);
                if (result)
@@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
                goto exit;
        }
 
-       if (!strncmp(val, "disable", strlen("disable") - 1)) {
+       if (!strncmp(val, "disable", strlen("disable"))) {
                int name = 0;
                result = acpi_debug_trace((char *)&name, trace_debug_level,
                                          trace_debug_layer, 0);
index a576575..1e0a9e1 100644 (file)
@@ -558,6 +558,8 @@ acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag)
        union acpi_object arg0 = { ACPI_TYPE_INTEGER };
        struct acpi_object_list args = { 1, &arg0 };
 
+       if (!video->cap._DOS)
+               return 0;
 
        if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1)
                return -EINVAL;
index 3239517..ac6a5be 100644 (file)
@@ -4,7 +4,7 @@
  * Arasan Compact Flash host controller source file
  *
  * Copyright (C) 2011 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -959,7 +959,7 @@ static struct platform_driver arasan_cf_driver = {
 
 module_platform_driver(arasan_cf_driver);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:" DRIVER_NAME);
index 1b1cbb5..4b01ab3 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/wait.h>
 #include <linux/async.h>
 #include <linux/pm_runtime.h>
+#include <scsi/scsi_scan.h>
 
 #include "base.h"
 #include "power/power.h"
@@ -100,7 +101,7 @@ static void driver_deferred_probe_add(struct device *dev)
        mutex_lock(&deferred_probe_mutex);
        if (list_empty(&dev->p->deferred_probe)) {
                dev_dbg(dev, "Added to deferred list\n");
-               list_add(&dev->p->deferred_probe, &deferred_probe_pending_list);
+               list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list);
        }
        mutex_unlock(&deferred_probe_mutex);
 }
@@ -332,6 +333,7 @@ void wait_for_device_probe(void)
        /* wait for the known devices to complete their probing */
        wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
        async_synchronize_full();
+       scsi_complete_async_scans();
 }
 EXPORT_SYMBOL_GPL(wait_for_device_probe);
 
index e0fb5b0..9cb845e 100644 (file)
@@ -1031,7 +1031,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        dpm_wait_for_children(dev, async);
 
        if (async_error)
-               return 0;
+               goto Complete;
 
        pm_runtime_get_noresume(dev);
        if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
@@ -1040,7 +1040,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        if (pm_wakeup_pending()) {
                pm_runtime_put_sync(dev);
                async_error = -EBUSY;
-               return 0;
+               goto Complete;
        }
 
        device_lock(dev);
@@ -1097,6 +1097,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        }
 
        device_unlock(dev);
+
+ Complete:
        complete_all(&dev->power.completion);
 
        if (error) {
index b5c5ff5..fcb956b 100644 (file)
@@ -1475,10 +1475,17 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
                first_word = 0;
                spin_lock_irq(&b->bm_lock);
        }
-
        /* last page (respectively only page, for first page == last page) */
        last_word = MLPP(el >> LN2_BPL);
-       bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
+
+       /* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples).
+        * ==> e = 32767, el = 32768, last_page = 2,
+        * and now last_word = 0.
+        * We do not want to touch last_page in this case,
+        * as we did not allocate it, it is not present in bitmap->bm_pages.
+        */
+       if (last_word)
+               bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
 
        /* possibly trailing bits.
         * example: (e & 63) == 63, el will be e+1.
index 9c5c849..8e93a6a 100644 (file)
@@ -472,12 +472,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                req->rq_state |= RQ_LOCAL_COMPLETED;
                req->rq_state &= ~RQ_LOCAL_PENDING;
 
-               D_ASSERT(!(req->rq_state & RQ_NET_MASK));
+               if (req->rq_state & RQ_LOCAL_ABORTED) {
+                       _req_may_be_done(req, m);
+                       break;
+               }
 
                __drbd_chk_io_error(mdev, false);
 
        goto_queue_for_net_read:
 
+               D_ASSERT(!(req->rq_state & RQ_NET_MASK));
+
                /* no point in retrying if there is no good remote data,
                 * or we have no connection. */
                if (mdev->state.pdsk != D_UP_TO_DATE) {
@@ -765,6 +770,40 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s
        return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
 }
 
+static void maybe_pull_ahead(struct drbd_conf *mdev)
+{
+       int congested = 0;
+
+       /* If I don't even have good local storage, we can not reasonably try
+        * to pull ahead of the peer. We also need the local reference to make
+        * sure mdev->act_log is there.
+        * Note: caller has to make sure that net_conf is there.
+        */
+       if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
+               return;
+
+       if (mdev->net_conf->cong_fill &&
+           atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
+               dev_info(DEV, "Congestion-fill threshold reached\n");
+               congested = 1;
+       }
+
+       if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
+               dev_info(DEV, "Congestion-extents threshold reached\n");
+               congested = 1;
+       }
+
+       if (congested) {
+               queue_barrier(mdev); /* last barrier, after mirrored writes */
+
+               if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
+                       _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
+               else  /*mdev->net_conf->on_congestion == OC_DISCONNECT */
+                       _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
+       }
+       put_ldev(mdev);
+}
+
 static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
 {
        const int rw = bio_rw(bio);
@@ -972,29 +1011,8 @@ allocate_barrier:
                _req_mod(req, queue_for_send_oos);
 
        if (remote &&
-           mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) {
-               int congested = 0;
-
-               if (mdev->net_conf->cong_fill &&
-                   atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
-                       dev_info(DEV, "Congestion-fill threshold reached\n");
-                       congested = 1;
-               }
-
-               if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
-                       dev_info(DEV, "Congestion-extents threshold reached\n");
-                       congested = 1;
-               }
-
-               if (congested) {
-                       queue_barrier(mdev); /* last barrier, after mirrored writes */
-
-                       if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
-                               _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
-                       else  /*mdev->net_conf->on_congestion == OC_DISCONNECT */
-                               _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
-               }
-       }
+           mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96)
+               maybe_pull_ahead(mdev);
 
        spin_unlock_irq(&mdev->req_lock);
        kfree(b); /* if someone else has beaten us to it... */
index cce7df3..553f43a 100644 (file)
@@ -671,6 +671,7 @@ static void __reschedule_timeout(int drive, const char *message)
 
        if (drive == current_reqD)
                drive = current_drive;
+       __cancel_delayed_work(&fd_timeout);
 
        if (drive < 0 || drive >= N_DRIVE) {
                delay = 20UL * HZ;
index bbca966..3bba655 100644 (file)
@@ -1597,14 +1597,12 @@ static int loop_add(struct loop_device **l, int i)
        struct gendisk *disk;
        int err;
 
+       err = -ENOMEM;
        lo = kzalloc(sizeof(*lo), GFP_KERNEL);
-       if (!lo) {
-               err = -ENOMEM;
+       if (!lo)
                goto out;
-       }
 
-       err = idr_pre_get(&loop_index_idr, GFP_KERNEL);
-       if (err < 0)
+       if (!idr_pre_get(&loop_index_idr, GFP_KERNEL))
                goto out_free_dev;
 
        if (i >= 0) {
index 264bc77..a8fddeb 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/kthread.h>
 #include <../drivers/ata/ahci.h>
 #include <linux/export.h>
+#include <linux/debugfs.h>
 #include "mtip32xx.h"
 
 #define HW_CMD_SLOT_SZ         (MTIP_MAX_COMMAND_SLOTS * 32)
@@ -85,6 +86,7 @@ static int instance;
  * allocated in mtip_init().
  */
 static int mtip_major;
+static struct dentry *dfs_parent;
 
 static DEFINE_SPINLOCK(rssd_index_lock);
 static DEFINE_IDA(rssd_index_ida);
@@ -2546,7 +2548,7 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
 }
 
 /*
- * Sysfs register/status dump.
+ * Sysfs status dump.
  *
  * @dev  Pointer to the device structure, passed by the kernrel.
  * @attr Pointer to the device_attribute structure passed by the kernel.
@@ -2555,45 +2557,68 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
  * return value
  *     The size, in bytes, of the data copied into buf.
  */
-static ssize_t mtip_hw_show_registers(struct device *dev,
+static ssize_t mtip_hw_show_status(struct device *dev,
                                struct device_attribute *attr,
                                char *buf)
 {
-       u32 group_allocated;
        struct driver_data *dd = dev_to_disk(dev)->private_data;
        int size = 0;
+
+       if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
+               size += sprintf(buf, "%s", "thermal_shutdown\n");
+       else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
+               size += sprintf(buf, "%s", "write_protect\n");
+       else
+               size += sprintf(buf, "%s", "online\n");
+
+       return size;
+}
+
+static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
+
+static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
+                                 size_t len, loff_t *offset)
+{
+       struct driver_data *dd =  (struct driver_data *)f->private_data;
+       char buf[MTIP_DFS_MAX_BUF_SIZE];
+       u32 group_allocated;
+       int size = *offset;
        int n;
 
-       size += sprintf(&buf[size], "Hardware\n--------\n");
-       size += sprintf(&buf[size], "S ACTive      : [ 0x");
+       if (!len || size)
+               return 0;
+
+       if (size < 0)
+               return -EINVAL;
+
+       size += sprintf(&buf[size], "H/ S ACTive      : [ 0x");
 
        for (n = dd->slot_groups-1; n >= 0; n--)
                size += sprintf(&buf[size], "%08X ",
                                         readl(dd->port->s_active[n]));
 
        size += sprintf(&buf[size], "]\n");
-       size += sprintf(&buf[size], "Command Issue : [ 0x");
+       size += sprintf(&buf[size], "H/ Command Issue : [ 0x");
 
        for (n = dd->slot_groups-1; n >= 0; n--)
                size += sprintf(&buf[size], "%08X ",
                                        readl(dd->port->cmd_issue[n]));
 
        size += sprintf(&buf[size], "]\n");
-       size += sprintf(&buf[size], "Completed     : [ 0x");
+       size += sprintf(&buf[size], "H/ Completed     : [ 0x");
 
        for (n = dd->slot_groups-1; n >= 0; n--)
                size += sprintf(&buf[size], "%08X ",
                                readl(dd->port->completed[n]));
 
        size += sprintf(&buf[size], "]\n");
-       size += sprintf(&buf[size], "PORT IRQ STAT : [ 0x%08X ]\n",
+       size += sprintf(&buf[size], "H/ PORT IRQ STAT : [ 0x%08X ]\n",
                                readl(dd->port->mmio + PORT_IRQ_STAT));
-       size += sprintf(&buf[size], "HOST IRQ STAT : [ 0x%08X ]\n",
+       size += sprintf(&buf[size], "H/ HOST IRQ STAT : [ 0x%08X ]\n",
                                readl(dd->mmio + HOST_IRQ_STAT));
        size += sprintf(&buf[size], "\n");
 
-       size += sprintf(&buf[size], "Local\n-----\n");
-       size += sprintf(&buf[size], "Allocated    : [ 0x");
+       size += sprintf(&buf[size], "L/ Allocated     : [ 0x");
 
        for (n = dd->slot_groups-1; n >= 0; n--) {
                if (sizeof(long) > sizeof(u32))
@@ -2605,7 +2630,7 @@ static ssize_t mtip_hw_show_registers(struct device *dev,
        }
        size += sprintf(&buf[size], "]\n");
 
-       size += sprintf(&buf[size], "Commands in Q: [ 0x");
+       size += sprintf(&buf[size], "L/ Commands in Q : [ 0x");
 
        for (n = dd->slot_groups-1; n >= 0; n--) {
                if (sizeof(long) > sizeof(u32))
@@ -2617,44 +2642,53 @@ static ssize_t mtip_hw_show_registers(struct device *dev,
        }
        size += sprintf(&buf[size], "]\n");
 
-       return size;
+       *offset = size <= len ? size : len;
+       size = copy_to_user(ubuf, buf, *offset);
+       if (size)
+               return -EFAULT;
+
+       return *offset;
 }
 
-static ssize_t mtip_hw_show_status(struct device *dev,
-                               struct device_attribute *attr,
-                               char *buf)
+static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
+                                 size_t len, loff_t *offset)
 {
-       struct driver_data *dd = dev_to_disk(dev)->private_data;
-       int size = 0;
+       struct driver_data *dd =  (struct driver_data *)f->private_data;
+       char buf[MTIP_DFS_MAX_BUF_SIZE];
+       int size = *offset;
 
-       if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
-               size += sprintf(buf, "%s", "thermal_shutdown\n");
-       else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
-               size += sprintf(buf, "%s", "write_protect\n");
-       else
-               size += sprintf(buf, "%s", "online\n");
-
-       return size;
-}
+       if (!len || size)
+               return 0;
 
-static ssize_t mtip_hw_show_flags(struct device *dev,
-                               struct device_attribute *attr,
-                               char *buf)
-{
-       struct driver_data *dd = dev_to_disk(dev)->private_data;
-       int size = 0;
+       if (size < 0)
+               return -EINVAL;
 
-       size += sprintf(&buf[size], "Flag in port struct : [ %08lX ]\n",
+       size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
                                                        dd->port->flags);
-       size += sprintf(&buf[size], "Flag in dd struct   : [ %08lX ]\n",
+       size += sprintf(&buf[size], "Flag-dd   : [ %08lX ]\n",
                                                        dd->dd_flag);
 
-       return size;
+       *offset = size <= len ? size : len;
+       size = copy_to_user(ubuf, buf, *offset);
+       if (size)
+               return -EFAULT;
+
+       return *offset;
 }
 
-static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL);
-static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
-static DEVICE_ATTR(flags, S_IRUGO, mtip_hw_show_flags, NULL);
+static const struct file_operations mtip_regs_fops = {
+       .owner  = THIS_MODULE,
+       .open   = simple_open,
+       .read   = mtip_hw_read_registers,
+       .llseek = no_llseek,
+};
+
+static const struct file_operations mtip_flags_fops = {
+       .owner  = THIS_MODULE,
+       .open   = simple_open,
+       .read   = mtip_hw_read_flags,
+       .llseek = no_llseek,
+};
 
 /*
  * Create the sysfs related attributes.
@@ -2671,15 +2705,9 @@ static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
        if (!kobj || !dd)
                return -EINVAL;
 
-       if (sysfs_create_file(kobj, &dev_attr_registers.attr))
-               dev_warn(&dd->pdev->dev,
-                       "Error creating 'registers' sysfs entry\n");
        if (sysfs_create_file(kobj, &dev_attr_status.attr))
                dev_warn(&dd->pdev->dev,
                        "Error creating 'status' sysfs entry\n");
-       if (sysfs_create_file(kobj, &dev_attr_flags.attr))
-               dev_warn(&dd->pdev->dev,
-                       "Error creating 'flags' sysfs entry\n");
        return 0;
 }
 
@@ -2698,13 +2726,39 @@ static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
        if (!kobj || !dd)
                return -EINVAL;
 
-       sysfs_remove_file(kobj, &dev_attr_registers.attr);
        sysfs_remove_file(kobj, &dev_attr_status.attr);
-       sysfs_remove_file(kobj, &dev_attr_flags.attr);
 
        return 0;
 }
 
+static int mtip_hw_debugfs_init(struct driver_data *dd)
+{
+       if (!dfs_parent)
+               return -1;
+
+       dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent);
+       if (IS_ERR_OR_NULL(dd->dfs_node)) {
+               dev_warn(&dd->pdev->dev,
+                       "Error creating node %s under debugfs\n",
+                                               dd->disk->disk_name);
+               dd->dfs_node = NULL;
+               return -1;
+       }
+
+       debugfs_create_file("flags", S_IRUGO, dd->dfs_node, dd,
+                                                       &mtip_flags_fops);
+       debugfs_create_file("registers", S_IRUGO, dd->dfs_node, dd,
+                                                       &mtip_regs_fops);
+
+       return 0;
+}
+
+static void mtip_hw_debugfs_exit(struct driver_data *dd)
+{
+       debugfs_remove_recursive(dd->dfs_node);
+}
+
+
 /*
  * Perform any init/resume time hardware setup
  *
@@ -3730,6 +3784,7 @@ skip_create_disk:
                mtip_hw_sysfs_init(dd, kobj);
                kobject_put(kobj);
        }
+       mtip_hw_debugfs_init(dd);
 
        if (dd->mtip_svc_handler) {
                set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
@@ -3755,6 +3810,8 @@ start_service_thread:
        return rv;
 
 kthread_run_error:
+       mtip_hw_debugfs_exit(dd);
+
        /* Delete our gendisk. This also removes the device from /dev */
        del_gendisk(dd->disk);
 
@@ -3805,6 +3862,7 @@ static int mtip_block_remove(struct driver_data *dd)
                        kobject_put(kobj);
                }
        }
+       mtip_hw_debugfs_exit(dd);
 
        /*
         * Delete our gendisk structure. This also removes the device
@@ -4152,10 +4210,20 @@ static int __init mtip_init(void)
        }
        mtip_major = error;
 
+       if (!dfs_parent) {
+               dfs_parent = debugfs_create_dir("rssd", NULL);
+               if (IS_ERR_OR_NULL(dfs_parent)) {
+                       printk(KERN_WARNING "Error creating debugfs parent\n");
+                       dfs_parent = NULL;
+               }
+       }
+
        /* Register our PCI operations. */
        error = pci_register_driver(&mtip_pci_driver);
-       if (error)
+       if (error) {
+               debugfs_remove(dfs_parent);
                unregister_blkdev(mtip_major, MTIP_DRV_NAME);
+       }
 
        return error;
 }
@@ -4172,6 +4240,8 @@ static int __init mtip_init(void)
  */
 static void __exit mtip_exit(void)
 {
+       debugfs_remove_recursive(dfs_parent);
+
        /* Release the allocated major block device number. */
        unregister_blkdev(mtip_major, MTIP_DRV_NAME);
 
index b2c88da..f51fc23 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/ata.h>
 #include <linux/interrupt.h>
 #include <linux/genhd.h>
-#include <linux/version.h>
 
 /* Offset of Subsystem Device ID in pci confoguration space */
 #define PCI_SUBSYSTEM_DEVICEID 0x2E
  #define dbg_printk(format, arg...)
 #endif
 
+#define MTIP_DFS_MAX_BUF_SIZE 1024
+
 #define __force_bit2int (unsigned int __force)
 
 enum {
@@ -447,6 +448,8 @@ struct driver_data {
        unsigned long dd_flag; /* NOTE: use atomic bit operations on this */
 
        struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
+
+       struct dentry *dfs_node;
 };
 
 #endif
index 65665c9..8f428a8 100644 (file)
@@ -499,7 +499,7 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
                         / sizeof (*ondisk))
                return -EINVAL;
        header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
-                               snap_count * sizeof (*ondisk),
+                               snap_count * sizeof(u64),
                                gfp_flags);
        if (!header->snapc)
                return -ENOMEM;
@@ -977,7 +977,7 @@ static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
        op = (void *)(replyhead + 1);
        rc = le32_to_cpu(replyhead->result);
        bytes = le64_to_cpu(op->extent.length);
-       read_op = (le32_to_cpu(op->op) == CEPH_OSD_OP_READ);
+       read_op = (le16_to_cpu(op->op) == CEPH_OSD_OP_READ);
 
        dout("rbd_req_cb bytes=%lld readop=%d rc=%d\n", bytes, read_op, rc);
 
index aa27120..9a72277 100644 (file)
@@ -513,6 +513,44 @@ static void process_page(unsigned long data)
        }
 }
 
+struct mm_plug_cb {
+       struct blk_plug_cb cb;
+       struct cardinfo *card;
+};
+
+static void mm_unplug(struct blk_plug_cb *cb)
+{
+       struct mm_plug_cb *mmcb = container_of(cb, struct mm_plug_cb, cb);
+
+       spin_lock_irq(&mmcb->card->lock);
+       activate(mmcb->card);
+       spin_unlock_irq(&mmcb->card->lock);
+       kfree(mmcb);
+}
+
+static int mm_check_plugged(struct cardinfo *card)
+{
+       struct blk_plug *plug = current->plug;
+       struct mm_plug_cb *mmcb;
+
+       if (!plug)
+               return 0;
+
+       list_for_each_entry(mmcb, &plug->cb_list, cb.list) {
+               if (mmcb->cb.callback == mm_unplug && mmcb->card == card)
+                       return 1;
+       }
+       /* Not currently on the callback list */
+       mmcb = kmalloc(sizeof(*mmcb), GFP_ATOMIC);
+       if (!mmcb)
+               return 0;
+
+       mmcb->card = card;
+       mmcb->cb.callback = mm_unplug;
+       list_add(&mmcb->cb.list, &plug->cb_list);
+       return 1;
+}
+
 static void mm_make_request(struct request_queue *q, struct bio *bio)
 {
        struct cardinfo *card = q->queuedata;
@@ -523,6 +561,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
        *card->biotail = bio;
        bio->bi_next = NULL;
        card->biotail = &bio->bi_next;
+       if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card))
+               activate(card);
        spin_unlock_irq(&card->lock);
 
        return;
index 773cf27..9ad3b5e 100644 (file)
@@ -257,6 +257,7 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
                break;
        case BLKIF_OP_DISCARD:
                dst->u.discard.flag = src->u.discard.flag;
+               dst->u.discard.id = src->u.discard.id;
                dst->u.discard.sector_number = src->u.discard.sector_number;
                dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
                break;
@@ -287,6 +288,7 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
                break;
        case BLKIF_OP_DISCARD:
                dst->u.discard.flag = src->u.discard.flag;
+               dst->u.discard.id = src->u.discard.id;
                dst->u.discard.sector_number = src->u.discard.sector_number;
                dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
                break;
index 60eed4b..e4fb337 100644 (file)
@@ -141,14 +141,36 @@ static int get_id_from_freelist(struct blkfront_info *info)
        return free;
 }
 
-static void add_id_to_freelist(struct blkfront_info *info,
+static int add_id_to_freelist(struct blkfront_info *info,
                               unsigned long id)
 {
+       if (info->shadow[id].req.u.rw.id != id)
+               return -EINVAL;
+       if (info->shadow[id].request == NULL)
+               return -EINVAL;
        info->shadow[id].req.u.rw.id  = info->shadow_free;
        info->shadow[id].request = NULL;
        info->shadow_free = id;
+       return 0;
 }
 
+static const char *op_name(int op)
+{
+       static const char *const names[] = {
+               [BLKIF_OP_READ] = "read",
+               [BLKIF_OP_WRITE] = "write",
+               [BLKIF_OP_WRITE_BARRIER] = "barrier",
+               [BLKIF_OP_FLUSH_DISKCACHE] = "flush",
+               [BLKIF_OP_DISCARD] = "discard" };
+
+       if (op < 0 || op >= ARRAY_SIZE(names))
+               return "unknown";
+
+       if (!names[op])
+               return "reserved";
+
+       return names[op];
+}
 static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
 {
        unsigned int end = minor + nr;
@@ -746,20 +768,36 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
 
                bret = RING_GET_RESPONSE(&info->ring, i);
                id   = bret->id;
+               /*
+                * The backend has messed up and given us an id that we would
+                * never have given to it (we stamp it up to BLK_RING_SIZE -
+                * look in get_id_from_freelist.
+                */
+               if (id >= BLK_RING_SIZE) {
+                       WARN(1, "%s: response to %s has incorrect id (%ld)\n",
+                            info->gd->disk_name, op_name(bret->operation), id);
+                       /* We can't safely get the 'struct request' as
+                        * the id is busted. */
+                       continue;
+               }
                req  = info->shadow[id].request;
 
                if (bret->operation != BLKIF_OP_DISCARD)
                        blkif_completion(&info->shadow[id]);
 
-               add_id_to_freelist(info, id);
+               if (add_id_to_freelist(info, id)) {
+                       WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
+                            info->gd->disk_name, op_name(bret->operation), id);
+                       continue;
+               }
 
                error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
                switch (bret->operation) {
                case BLKIF_OP_DISCARD:
                        if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
                                struct request_queue *rq = info->rq;
-                               printk(KERN_WARNING "blkfront: %s: discard op failed\n",
-                                          info->gd->disk_name);
+                               printk(KERN_WARNING "blkfront: %s: %s op failed\n",
+                                          info->gd->disk_name, op_name(bret->operation));
                                error = -EOPNOTSUPP;
                                info->feature_discard = 0;
                                info->feature_secdiscard = 0;
@@ -771,18 +809,14 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                case BLKIF_OP_FLUSH_DISKCACHE:
                case BLKIF_OP_WRITE_BARRIER:
                        if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
-                               printk(KERN_WARNING "blkfront: %s: write %s op failed\n",
-                                      info->flush_op == BLKIF_OP_WRITE_BARRIER ?
-                                      "barrier" :  "flush disk cache",
-                                      info->gd->disk_name);
+                               printk(KERN_WARNING "blkfront: %s: %s op failed\n",
+                                      info->gd->disk_name, op_name(bret->operation));
                                error = -EOPNOTSUPP;
                        }
                        if (unlikely(bret->status == BLKIF_RSP_ERROR &&
                                     info->shadow[id].req.u.rw.nr_segments == 0)) {
-                               printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n",
-                                      info->flush_op == BLKIF_OP_WRITE_BARRIER ?
-                                      "barrier" :  "flush disk cache",
-                                      info->gd->disk_name);
+                               printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
+                                      info->gd->disk_name, op_name(bret->operation));
                                error = -EOPNOTSUPP;
                        }
                        if (unlikely(error)) {
index ad591bd..10308cd 100644 (file)
@@ -63,6 +63,7 @@ static struct usb_device_id ath3k_table[] = {
 
        /* Atheros AR3011 with sflash firmware*/
        { USB_DEVICE(0x0CF3, 0x3002) },
+       { USB_DEVICE(0x0CF3, 0xE019) },
        { USB_DEVICE(0x13d3, 0x3304) },
        { USB_DEVICE(0x0930, 0x0215) },
        { USB_DEVICE(0x0489, 0xE03D) },
@@ -77,6 +78,7 @@ static struct usb_device_id ath3k_table[] = {
        { USB_DEVICE(0x04CA, 0x3005) },
        { USB_DEVICE(0x13d3, 0x3362) },
        { USB_DEVICE(0x0CF3, 0xE004) },
+       { USB_DEVICE(0x0930, 0x0219) },
 
        /* Atheros AR5BBU12 with sflash firmware */
        { USB_DEVICE(0x0489, 0xE02C) },
@@ -101,6 +103,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
        { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
 
        /* Atheros AR5BBU22 with sflash firmware */
        { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
index 94f2d65..27068d1 100644 (file)
@@ -136,7 +136,7 @@ int btmrvl_remove_card(struct btmrvl_private *priv);
 
 void btmrvl_interrupt(struct btmrvl_private *priv);
 
-void btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
+bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
 int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb);
 
 int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd);
index 681ca9d..dc304de 100644 (file)
@@ -44,23 +44,33 @@ void btmrvl_interrupt(struct btmrvl_private *priv)
 }
 EXPORT_SYMBOL_GPL(btmrvl_interrupt);
 
-void btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
+bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
 {
        struct hci_event_hdr *hdr = (void *) skb->data;
        struct hci_ev_cmd_complete *ec;
-       u16 opcode, ocf;
+       u16 opcode, ocf, ogf;
 
        if (hdr->evt == HCI_EV_CMD_COMPLETE) {
                ec = (void *) (skb->data + HCI_EVENT_HDR_SIZE);
                opcode = __le16_to_cpu(ec->opcode);
                ocf = hci_opcode_ocf(opcode);
+               ogf = hci_opcode_ogf(opcode);
+
                if (ocf == BT_CMD_MODULE_CFG_REQ &&
                                        priv->btmrvl_dev.sendcmdflag) {
                        priv->btmrvl_dev.sendcmdflag = false;
                        priv->adapter->cmd_complete = true;
                        wake_up_interruptible(&priv->adapter->cmd_wait_q);
                }
+
+               if (ogf == OGF) {
+                       BT_DBG("vendor event skipped: ogf 0x%4.4x", ogf);
+                       kfree_skb(skb);
+                       return false;
+               }
        }
+
+       return true;
 }
 EXPORT_SYMBOL_GPL(btmrvl_check_evtpkt);
 
index a853244..0cd61d9 100644 (file)
@@ -562,10 +562,12 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
                skb_put(skb, buf_len);
                skb_pull(skb, SDIO_HEADER_LEN);
 
-               if (type == HCI_EVENT_PKT)
-                       btmrvl_check_evtpkt(priv, skb);
+               if (type == HCI_EVENT_PKT) {
+                       if (btmrvl_check_evtpkt(priv, skb))
+                               hci_recv_frame(skb);
+               } else
+                       hci_recv_frame(skb);
 
-               hci_recv_frame(skb);
                hdev->stat.byte_rx += buf_len;
                break;
 
index c9463af..83ebb24 100644 (file)
@@ -125,6 +125,7 @@ static struct usb_device_id blacklist_table[] = {
 
        /* Atheros 3011 with sflash firmware */
        { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
+       { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
        { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
        { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
        { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
@@ -139,6 +140,7 @@ static struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
 
        /* Atheros AR5BBU12 with sflash firmware */
        { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
index 6289f0e..731c904 100644 (file)
@@ -34,7 +34,7 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
        u32 *data = buf;
 
        /* data ready? */
-       if (readl(trng->base + TRNG_ODATA) & 1) {
+       if (readl(trng->base + TRNG_ISR) & 1) {
                *data = readl(trng->base + TRNG_ODATA);
                /*
                  ensure data ready is only set again AFTER the next data
index 687b00d..9a1eb0c 100644 (file)
@@ -850,18 +850,21 @@ static void clk_change_rate(struct clk *clk)
 {
        struct clk *child;
        unsigned long old_rate;
+       unsigned long best_parent_rate = 0;
        struct hlist_node *tmp;
 
        old_rate = clk->rate;
 
+       if (clk->parent)
+               best_parent_rate = clk->parent->rate;
+
        if (clk->ops->set_rate)
-               clk->ops->set_rate(clk->hw, clk->new_rate, clk->parent->rate);
+               clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
 
        if (clk->ops->recalc_rate)
-               clk->rate = clk->ops->recalc_rate(clk->hw,
-                               clk->parent->rate);
+               clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
        else
-               clk->rate = clk->parent->rate;
+               clk->rate = best_parent_rate;
 
        if (clk->notifier_count && old_rate != clk->rate)
                __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
@@ -999,7 +1002,7 @@ static struct clk *__clk_init_parent(struct clk *clk)
 
        if (!clk->parents)
                clk->parents =
-                       kmalloc((sizeof(struct clk*) * clk->num_parents),
+                       kzalloc((sizeof(struct clk*) * clk->num_parents),
                                        GFP_KERNEL);
 
        if (!clk->parents)
@@ -1064,21 +1067,24 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
 
        old_parent = clk->parent;
 
-       /* find index of new parent clock using cached parent ptrs */
-       for (i = 0; i < clk->num_parents; i++)
-               if (clk->parents[i] == parent)
-                       break;
+       if (!clk->parents)
+               clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
+                                                               GFP_KERNEL);
 
        /*
-        * find index of new parent clock using string name comparison
-        * also try to cache the parent to avoid future calls to __clk_lookup
+        * find index of new parent clock using cached parent ptrs,
+        * or if not yet cached, use string name comparison and cache
+        * them now to avoid future calls to __clk_lookup.
         */
-       if (i == clk->num_parents)
-               for (i = 0; i < clk->num_parents; i++)
-                       if (!strcmp(clk->parent_names[i], parent->name)) {
+       for (i = 0; i < clk->num_parents; i++) {
+               if (clk->parents && clk->parents[i] == parent)
+                       break;
+               else if (!strcmp(clk->parent_names[i], parent->name)) {
+                       if (clk->parents)
                                clk->parents[i] = __clk_lookup(parent->name);
-                               break;
-                       }
+                       break;
+               }
+       }
 
        if (i == clk->num_parents) {
                pr_debug("%s: clock %s is not a possible parent of clock %s\n",
index f7be225..db2391c 100644 (file)
@@ -71,7 +71,7 @@ static void __init clk_misc_init(void)
        __mxs_setl(30 << BP_FRAC_IOFRAC, FRAC);
 }
 
-static struct clk_lookup uart_lookups[] __initdata = {
+static struct clk_lookup uart_lookups[] = {
        { .dev_id = "duart", },
        { .dev_id = "mxs-auart.0", },
        { .dev_id = "mxs-auart.1", },
@@ -80,31 +80,31 @@ static struct clk_lookup uart_lookups[] __initdata = {
        { .dev_id = "80070000.serial", },
 };
 
-static struct clk_lookup hbus_lookups[] __initdata = {
+static struct clk_lookup hbus_lookups[] = {
        { .dev_id = "imx23-dma-apbh", },
        { .dev_id = "80004000.dma-apbh", },
 };
 
-static struct clk_lookup xbus_lookups[] __initdata = {
+static struct clk_lookup xbus_lookups[] = {
        { .dev_id = "duart", .con_id = "apb_pclk"},
        { .dev_id = "80070000.serial", .con_id = "apb_pclk"},
        { .dev_id = "imx23-dma-apbx", },
        { .dev_id = "80024000.dma-apbx", },
 };
 
-static struct clk_lookup ssp_lookups[] __initdata = {
+static struct clk_lookup ssp_lookups[] = {
        { .dev_id = "imx23-mmc.0", },
        { .dev_id = "imx23-mmc.1", },
        { .dev_id = "80010000.ssp", },
        { .dev_id = "80034000.ssp", },
 };
 
-static struct clk_lookup lcdif_lookups[] __initdata = {
+static struct clk_lookup lcdif_lookups[] = {
        { .dev_id = "imx23-fb", },
        { .dev_id = "80030000.lcdif", },
 };
 
-static struct clk_lookup gpmi_lookups[] __initdata = {
+static struct clk_lookup gpmi_lookups[] = {
        { .dev_id = "imx23-gpmi-nand", },
        { .dev_id = "8000c000.gpmi", },
 };
index 2826a26..7fad6c8 100644 (file)
@@ -120,7 +120,7 @@ static void __init clk_misc_init(void)
        writel_relaxed(val, FRAC0);
 }
 
-static struct clk_lookup uart_lookups[] __initdata = {
+static struct clk_lookup uart_lookups[] = {
        { .dev_id = "duart", },
        { .dev_id = "mxs-auart.0", },
        { .dev_id = "mxs-auart.1", },
@@ -135,71 +135,71 @@ static struct clk_lookup uart_lookups[] __initdata = {
        { .dev_id = "80074000.serial", },
 };
 
-static struct clk_lookup hbus_lookups[] __initdata = {
+static struct clk_lookup hbus_lookups[] = {
        { .dev_id = "imx28-dma-apbh", },
        { .dev_id = "80004000.dma-apbh", },
 };
 
-static struct clk_lookup xbus_lookups[] __initdata = {
+static struct clk_lookup xbus_lookups[] = {
        { .dev_id = "duart", .con_id = "apb_pclk"},
        { .dev_id = "80074000.serial", .con_id = "apb_pclk"},
        { .dev_id = "imx28-dma-apbx", },
        { .dev_id = "80024000.dma-apbx", },
 };
 
-static struct clk_lookup ssp0_lookups[] __initdata = {
+static struct clk_lookup ssp0_lookups[] = {
        { .dev_id = "imx28-mmc.0", },
        { .dev_id = "80010000.ssp", },
 };
 
-static struct clk_lookup ssp1_lookups[] __initdata = {
+static struct clk_lookup ssp1_lookups[] = {
        { .dev_id = "imx28-mmc.1", },
        { .dev_id = "80012000.ssp", },
 };
 
-static struct clk_lookup ssp2_lookups[] __initdata = {
+static struct clk_lookup ssp2_lookups[] = {
        { .dev_id = "imx28-mmc.2", },
        { .dev_id = "80014000.ssp", },
 };
 
-static struct clk_lookup ssp3_lookups[] __initdata = {
+static struct clk_lookup ssp3_lookups[] = {
        { .dev_id = "imx28-mmc.3", },
        { .dev_id = "80016000.ssp", },
 };
 
-static struct clk_lookup lcdif_lookups[] __initdata = {
+static struct clk_lookup lcdif_lookups[] = {
        { .dev_id = "imx28-fb", },
        { .dev_id = "80030000.lcdif", },
 };
 
-static struct clk_lookup gpmi_lookups[] __initdata = {
+static struct clk_lookup gpmi_lookups[] = {
        { .dev_id = "imx28-gpmi-nand", },
        { .dev_id = "8000c000.gpmi", },
 };
 
-static struct clk_lookup fec_lookups[] __initdata = {
+static struct clk_lookup fec_lookups[] = {
        { .dev_id = "imx28-fec.0", },
        { .dev_id = "imx28-fec.1", },
        { .dev_id = "800f0000.ethernet", },
        { .dev_id = "800f4000.ethernet", },
 };
 
-static struct clk_lookup can0_lookups[] __initdata = {
+static struct clk_lookup can0_lookups[] = {
        { .dev_id = "flexcan.0", },
        { .dev_id = "80032000.can", },
 };
 
-static struct clk_lookup can1_lookups[] __initdata = {
+static struct clk_lookup can1_lookups[] = {
        { .dev_id = "flexcan.1", },
        { .dev_id = "80034000.can", },
 };
 
-static struct clk_lookup saif0_lookups[] __initdata = {
+static struct clk_lookup saif0_lookups[] = {
        { .dev_id = "mxs-saif.0", },
        { .dev_id = "80042000.saif", },
 };
 
-static struct clk_lookup saif1_lookups[] __initdata = {
+static struct clk_lookup saif1_lookups[] = {
        { .dev_id = "mxs-saif.1", },
        { .dev_id = "80046000.saif", },
 };
@@ -245,8 +245,8 @@ int __init mx28_clocks_init(void)
        clks[pll2] = mxs_clk_pll("pll2", "ref_xtal", PLL2CTRL0, 23, 50000000);
        clks[ref_cpu] = mxs_clk_ref("ref_cpu", "pll0", FRAC0, 0);
        clks[ref_emi] = mxs_clk_ref("ref_emi", "pll0", FRAC0, 1);
-       clks[ref_io0] = mxs_clk_ref("ref_io0", "pll0", FRAC0, 2);
-       clks[ref_io1] = mxs_clk_ref("ref_io1", "pll0", FRAC0, 3);
+       clks[ref_io1] = mxs_clk_ref("ref_io1", "pll0", FRAC0, 2);
+       clks[ref_io0] = mxs_clk_ref("ref_io0", "pll0", FRAC0, 3);
        clks[ref_pix] = mxs_clk_ref("ref_pix", "pll0", FRAC1, 0);
        clks[ref_hsadc] = mxs_clk_ref("ref_hsadc", "pll0", FRAC1, 1);
        clks[ref_gpmi] = mxs_clk_ref("ref_gpmi", "pll0", FRAC1, 2);
index af34074..6756e7c 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 4dbdb3f..958aa3a 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index b471c97..1afc18c 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index dcd4bdf..5f1b6ba 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 376d4e5..7cd6378 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 3321c46..9317376 100644 (file)
@@ -2,7 +2,7 @@
  * Clock framework definitions for SPEAr platform
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 42b68df..0fcec2a 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr1310 machine clock framework source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -345,31 +345,30 @@ static struct frac_rate_tbl gen_rtbl[] = {
 /* clock parents */
 static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", };
 static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", };
-static const char *uart0_parents[] = { "pll5_clk", "uart_synth_gate_clk", };
-static const char *c3_parents[] = { "pll5_clk", "c3_synth_gate_clk", };
-static const char *gmac_phy_input_parents[] = { "gmii_125m_pad_clk", "pll2_clk",
+static const char *uart0_parents[] = { "pll5_clk", "uart_syn_gclk", };
+static const char *c3_parents[] = { "pll5_clk", "c3_syn_gclk", };
+static const char *gmac_phy_input_parents[] = { "gmii_pad_clk", "pll2_clk",
        "osc_25m_clk", };
-static const char *gmac_phy_parents[] = { "gmac_phy_input_mux_clk",
-       "gmac_phy_synth_gate_clk", };
+static const char *gmac_phy_parents[] = { "phy_input_mclk", "phy_syn_gclk", };
 static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", };
-static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_synth_clk", };
+static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_syn_clk", };
 static const char *i2s_src_parents[] = { "vco1div2_clk", "none", "pll3_clk",
        "i2s_src_pad_clk", };
-static const char *i2s_ref_parents[] = { "i2s_src_mux_clk", "i2s_prs1_clk", };
+static const char *i2s_ref_parents[] = { "i2s_src_mclk", "i2s_prs1_clk", };
 static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk",
        "pll3_clk", };
 static const char *gen_synth2_3_parents[] = { "vco1div4_clk", "vco3div2_clk",
        "pll2_clk", };
 static const char *rmii_phy_parents[] = { "ras_tx50_clk", "none",
-       "ras_pll2_clk", "ras_synth0_clk", };
+       "ras_pll2_clk", "ras_syn0_clk", };
 static const char *smii_rgmii_phy_parents[] = { "none", "ras_tx125_clk",
-       "ras_pll2_clk", "ras_synth0_clk", };
-static const char *uart_parents[] = { "ras_apb_clk", "gen_synth3_clk", };
-static const char *i2c_parents[] = { "ras_apb_clk", "gen_synth1_clk", };
-static const char *ssp1_parents[] = { "ras_apb_clk", "gen_synth1_clk",
+       "ras_pll2_clk", "ras_syn0_clk", };
+static const char *uart_parents[] = { "ras_apb_clk", "gen_syn3_clk", };
+static const char *i2c_parents[] = { "ras_apb_clk", "gen_syn1_clk", };
+static const char *ssp1_parents[] = { "ras_apb_clk", "gen_syn1_clk",
        "ras_plclk0_clk", };
-static const char *pci_parents[] = { "ras_pll3_clk", "gen_synth2_clk", };
-static const char *tdm_parents[] = { "ras_pll3_clk", "gen_synth1_clk", };
+static const char *pci_parents[] = { "ras_pll3_clk", "gen_syn2_clk", };
+static const char *tdm_parents[] = { "ras_pll3_clk", "gen_syn1_clk", };
 
 void __init spear1310_clk_init(void)
 {
@@ -390,9 +389,9 @@ void __init spear1310_clk_init(void)
                        25000000);
        clk_register_clkdev(clk, "osc_25m_clk", NULL);
 
-       clk = clk_register_fixed_rate(NULL, "gmii_125m_pad_clk", NULL,
-                       CLK_IS_ROOT, 125000000);
-       clk_register_clkdev(clk, "gmii_125m_pad_clk", NULL);
+       clk = clk_register_fixed_rate(NULL, "gmii_pad_clk", NULL, CLK_IS_ROOT,
+                       125000000);
+       clk_register_clkdev(clk, "gmii_pad_clk", NULL);
 
        clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
                        CLK_IS_ROOT, 12288000);
@@ -406,34 +405,34 @@ void __init spear1310_clk_init(void)
 
        /* clock derived from 24 or 25 MHz osc clk */
        /* vco-pll */
-       clk = clk_register_mux(NULL, "vco1_mux_clk", vco_parents,
+       clk = clk_register_mux(NULL, "vco1_mclk", vco_parents,
                        ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
                        SPEAR1310_PLL1_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "vco1_mux_clk", NULL);
-       clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mux_clk",
+       clk_register_clkdev(clk, "vco1_mclk", NULL);
+       clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mclk",
                        0, SPEAR1310_PLL1_CTR, SPEAR1310_PLL1_FRQ, pll_rtbl,
                        ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
        clk_register_clkdev(clk, "vco1_clk", NULL);
        clk_register_clkdev(clk1, "pll1_clk", NULL);
 
-       clk = clk_register_mux(NULL, "vco2_mux_clk", vco_parents,
+       clk = clk_register_mux(NULL, "vco2_mclk", vco_parents,
                        ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
                        SPEAR1310_PLL2_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "vco2_mux_clk", NULL);
-       clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mux_clk",
+       clk_register_clkdev(clk, "vco2_mclk", NULL);
+       clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mclk",
                        0, SPEAR1310_PLL2_CTR, SPEAR1310_PLL2_FRQ, pll_rtbl,
                        ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
        clk_register_clkdev(clk, "vco2_clk", NULL);
        clk_register_clkdev(clk1, "pll2_clk", NULL);
 
-       clk = clk_register_mux(NULL, "vco3_mux_clk", vco_parents,
+       clk = clk_register_mux(NULL, "vco3_mclk", vco_parents,
                        ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
                        SPEAR1310_PLL3_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "vco3_mux_clk", NULL);
-       clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mux_clk",
+       clk_register_clkdev(clk, "vco3_mclk", NULL);
+       clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mclk",
                        0, SPEAR1310_PLL3_CTR, SPEAR1310_PLL3_FRQ, pll_rtbl,
                        ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
        clk_register_clkdev(clk, "vco3_clk", NULL);
@@ -473,7 +472,7 @@ void __init spear1310_clk_init(void)
        /* peripherals */
        clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1,
                        128);
-       clk = clk_register_gate(NULL, "thermal_gate_clk", "thermal_clk", 0,
+       clk = clk_register_gate(NULL, "thermal_gclk", "thermal_clk", 0,
                        SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_THSENS_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "spear_thermal");
@@ -500,177 +499,176 @@ void __init spear1310_clk_init(void)
        clk_register_clkdev(clk, "apb_clk", NULL);
 
        /* gpt clocks */
-       clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt_parents,
+       clk = clk_register_mux(NULL, "gpt0_mclk", gpt_parents,
                        ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
                        SPEAR1310_GPT0_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gpt0_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt0_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mclk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT0_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "gpt0");
 
-       clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt_parents,
+       clk = clk_register_mux(NULL, "gpt1_mclk", gpt_parents,
                        ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
                        SPEAR1310_GPT1_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt1_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "gpt1");
 
-       clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt_parents,
+       clk = clk_register_mux(NULL, "gpt2_mclk", gpt_parents,
                        ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
                        SPEAR1310_GPT2_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt2_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
                        SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT2_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "gpt2");
 
-       clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt_parents,
+       clk = clk_register_mux(NULL, "gpt3_mclk", gpt_parents,
                        ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
                        SPEAR1310_GPT3_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt3_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
                        SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT3_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "gpt3");
 
        /* others */
-       clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
-                       "vco1div2_clk", 0, SPEAR1310_UART_CLK_SYNT, NULL,
-                       aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "uart_synth_clk", NULL);
-       clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+       clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "vco1div2_clk",
+                       0, SPEAR1310_UART_CLK_SYNT, NULL, aux_rtbl,
+                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "uart_syn_clk", NULL);
+       clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+       clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
                        ARRAY_SIZE(uart0_parents), 0, SPEAR1310_PERIP_CLK_CFG,
                        SPEAR1310_UART_CLK_SHIFT, SPEAR1310_UART_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart0_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart0_clk", "uart0_mux_clk", 0,
+       clk = clk_register_gate(NULL, "uart0_clk", "uart0_mclk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UART_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "e0000000.serial");
 
-       clk = clk_register_aux("sdhci_synth_clk", "sdhci_synth_gate_clk",
+       clk = clk_register_aux("sdhci_syn_clk", "sdhci_syn_gclk",
                        "vco1div2_clk", 0, SPEAR1310_SDHCI_CLK_SYNT, NULL,
                        aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "sdhci_synth_clk", NULL);
-       clk_register_clkdev(clk1, "sdhci_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "sdhci_syn_clk", NULL);
+       clk_register_clkdev(clk1, "sdhci_syn_gclk", NULL);
 
-       clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_synth_gate_clk", 0,
+       clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_syn_gclk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SDHCI_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "b3000000.sdhci");
 
-       clk = clk_register_aux("cfxd_synth_clk", "cfxd_synth_gate_clk",
-                       "vco1div2_clk", 0, SPEAR1310_CFXD_CLK_SYNT, NULL,
-                       aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "cfxd_synth_clk", NULL);
-       clk_register_clkdev(clk1, "cfxd_synth_gate_clk", NULL);
+       clk = clk_register_aux("cfxd_syn_clk", "cfxd_syn_gclk", "vco1div2_clk",
+                       0, SPEAR1310_CFXD_CLK_SYNT, NULL, aux_rtbl,
+                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "cfxd_syn_clk", NULL);
+       clk_register_clkdev(clk1, "cfxd_syn_gclk", NULL);
 
-       clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_synth_gate_clk", 0,
+       clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_syn_gclk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CFXD_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "b2800000.cf");
        clk_register_clkdev(clk, NULL, "arasan_xd");
 
-       clk = clk_register_aux("c3_synth_clk", "c3_synth_gate_clk",
-                       "vco1div2_clk", 0, SPEAR1310_C3_CLK_SYNT, NULL,
-                       aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "c3_synth_clk", NULL);
-       clk_register_clkdev(clk1, "c3_synth_gate_clk", NULL);
+       clk = clk_register_aux("c3_syn_clk", "c3_syn_gclk", "vco1div2_clk",
+                       0, SPEAR1310_C3_CLK_SYNT, NULL, aux_rtbl,
+                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "c3_syn_clk", NULL);
+       clk_register_clkdev(clk1, "c3_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "c3_mux_clk", c3_parents,
+       clk = clk_register_mux(NULL, "c3_mclk", c3_parents,
                        ARRAY_SIZE(c3_parents), 0, SPEAR1310_PERIP_CLK_CFG,
                        SPEAR1310_C3_CLK_SHIFT, SPEAR1310_C3_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "c3_mux_clk", NULL);
+       clk_register_clkdev(clk, "c3_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "c3_clk", "c3_mux_clk", 0,
+       clk = clk_register_gate(NULL, "c3_clk", "c3_mclk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_C3_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "c3");
 
        /* gmac */
-       clk = clk_register_mux(NULL, "gmac_phy_input_mux_clk",
-                       gmac_phy_input_parents,
+       clk = clk_register_mux(NULL, "phy_input_mclk", gmac_phy_input_parents,
                        ARRAY_SIZE(gmac_phy_input_parents), 0,
                        SPEAR1310_GMAC_CLK_CFG,
                        SPEAR1310_GMAC_PHY_INPUT_CLK_SHIFT,
                        SPEAR1310_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gmac_phy_input_mux_clk", NULL);
+       clk_register_clkdev(clk, "phy_input_mclk", NULL);
 
-       clk = clk_register_aux("gmac_phy_synth_clk", "gmac_phy_synth_gate_clk",
-                       "gmac_phy_input_mux_clk", 0, SPEAR1310_GMAC_CLK_SYNT,
-                       NULL, gmac_rtbl, ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "gmac_phy_synth_clk", NULL);
-       clk_register_clkdev(clk1, "gmac_phy_synth_gate_clk", NULL);
+       clk = clk_register_aux("phy_syn_clk", "phy_syn_gclk", "phy_input_mclk",
+                       0, SPEAR1310_GMAC_CLK_SYNT, NULL, gmac_rtbl,
+                       ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "phy_syn_clk", NULL);
+       clk_register_clkdev(clk1, "phy_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "gmac_phy_mux_clk", gmac_phy_parents,
+       clk = clk_register_mux(NULL, "phy_mclk", gmac_phy_parents,
                        ARRAY_SIZE(gmac_phy_parents), 0,
                        SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GMAC_PHY_CLK_SHIFT,
                        SPEAR1310_GMAC_PHY_CLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, NULL, "stmmacphy.0");
 
        /* clcd */
-       clk = clk_register_mux(NULL, "clcd_synth_mux_clk", clcd_synth_parents,
+       clk = clk_register_mux(NULL, "clcd_syn_mclk", clcd_synth_parents,
                        ARRAY_SIZE(clcd_synth_parents), 0,
                        SPEAR1310_CLCD_CLK_SYNT, SPEAR1310_CLCD_SYNT_CLK_SHIFT,
                        SPEAR1310_CLCD_SYNT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "clcd_synth_mux_clk", NULL);
+       clk_register_clkdev(clk, "clcd_syn_mclk", NULL);
 
-       clk = clk_register_frac("clcd_synth_clk", "clcd_synth_mux_clk", 0,
+       clk = clk_register_frac("clcd_syn_clk", "clcd_syn_mclk", 0,
                        SPEAR1310_CLCD_CLK_SYNT, clcd_rtbl,
                        ARRAY_SIZE(clcd_rtbl), &_lock);
-       clk_register_clkdev(clk, "clcd_synth_clk", NULL);
+       clk_register_clkdev(clk, "clcd_syn_clk", NULL);
 
-       clk = clk_register_mux(NULL, "clcd_pixel_mux_clk", clcd_pixel_parents,
+       clk = clk_register_mux(NULL, "clcd_pixel_mclk", clcd_pixel_parents,
                        ARRAY_SIZE(clcd_pixel_parents), 0,
                        SPEAR1310_PERIP_CLK_CFG, SPEAR1310_CLCD_CLK_SHIFT,
                        SPEAR1310_CLCD_CLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, "clcd_pixel_clk", NULL);
 
-       clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mux_clk", 0,
+       clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mclk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CLCD_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, "clcd_clk", NULL);
 
        /* i2s */
-       clk = clk_register_mux(NULL, "i2s_src_mux_clk", i2s_src_parents,
+       clk = clk_register_mux(NULL, "i2s_src_mclk", i2s_src_parents,
                        ARRAY_SIZE(i2s_src_parents), 0, SPEAR1310_I2S_CLK_CFG,
                        SPEAR1310_I2S_SRC_CLK_SHIFT, SPEAR1310_I2S_SRC_CLK_MASK,
                        0, &_lock);
        clk_register_clkdev(clk, "i2s_src_clk", NULL);
 
-       clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mux_clk", 0,
+       clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mclk", 0,
                        SPEAR1310_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl,
                        ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL);
        clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
 
-       clk = clk_register_mux(NULL, "i2s_ref_mux_clk", i2s_ref_parents,
+       clk = clk_register_mux(NULL, "i2s_ref_mclk", i2s_ref_parents,
                        ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1310_I2S_CLK_CFG,
                        SPEAR1310_I2S_REF_SHIFT, SPEAR1310_I2S_REF_SEL_MASK, 0,
                        &_lock);
        clk_register_clkdev(clk, "i2s_ref_clk", NULL);
 
-       clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mclk", 0,
                        SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_I2S_REF_PAD_CLK_ENB,
                        0, &_lock);
        clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL);
 
-       clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gate_clk",
+       clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gclk",
                        "i2s_ref_pad_clk", 0, SPEAR1310_I2S_CLK_CFG,
                        &i2s_sclk_masks, i2s_sclk_rtbl,
                        ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1);
        clk_register_clkdev(clk, "i2s_sclk_clk", NULL);
-       clk_register_clkdev(clk1, "i2s_sclk_gate_clk", NULL);
+       clk_register_clkdev(clk1, "i2s_sclk_gclk", NULL);
 
        /* clock derived from ahb clk */
        clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0,
@@ -747,13 +745,13 @@ void __init spear1310_clk_init(void)
                        &_lock);
        clk_register_clkdev(clk, "sysram1_clk", NULL);
 
-       clk = clk_register_aux("adc_synth_clk", "adc_synth_gate_clk", "ahb_clk",
+       clk = clk_register_aux("adc_syn_clk", "adc_syn_gclk", "ahb_clk",
                        0, SPEAR1310_ADC_CLK_SYNT, NULL, adc_rtbl,
                        ARRAY_SIZE(adc_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "adc_synth_clk", NULL);
-       clk_register_clkdev(clk1, "adc_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "adc_syn_clk", NULL);
+       clk_register_clkdev(clk1, "adc_syn_gclk", NULL);
 
-       clk = clk_register_gate(NULL, "adc_clk", "adc_synth_gate_clk", 0,
+       clk = clk_register_gate(NULL, "adc_clk", "adc_syn_gclk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_ADC_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "adc_clk");
@@ -790,37 +788,37 @@ void __init spear1310_clk_init(void)
        clk_register_clkdev(clk, NULL, "e0300000.kbd");
 
        /* RAS clks */
-       clk = clk_register_mux(NULL, "gen_synth0_1_mux_clk",
-                       gen_synth0_1_parents, ARRAY_SIZE(gen_synth0_1_parents),
-                       0, SPEAR1310_PLL_CFG, SPEAR1310_RAS_SYNT0_1_CLK_SHIFT,
+       clk = clk_register_mux(NULL, "gen_syn0_1_mclk", gen_synth0_1_parents,
+                       ARRAY_SIZE(gen_synth0_1_parents), 0, SPEAR1310_PLL_CFG,
+                       SPEAR1310_RAS_SYNT0_1_CLK_SHIFT,
                        SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gen_synth0_1_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn0_1_clk", NULL);
 
-       clk = clk_register_mux(NULL, "gen_synth2_3_mux_clk",
-                       gen_synth2_3_parents, ARRAY_SIZE(gen_synth2_3_parents),
-                       0, SPEAR1310_PLL_CFG, SPEAR1310_RAS_SYNT2_3_CLK_SHIFT,
+       clk = clk_register_mux(NULL, "gen_syn2_3_mclk", gen_synth2_3_parents,
+                       ARRAY_SIZE(gen_synth2_3_parents), 0, SPEAR1310_PLL_CFG,
+                       SPEAR1310_RAS_SYNT2_3_CLK_SHIFT,
                        SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gen_synth2_3_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn2_3_clk", NULL);
 
-       clk = clk_register_frac("gen_synth0_clk", "gen_synth0_1_clk", 0,
+       clk = clk_register_frac("gen_syn0_clk", "gen_syn0_1_clk", 0,
                        SPEAR1310_RAS_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl),
                        &_lock);
-       clk_register_clkdev(clk, "gen_synth0_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn0_clk", NULL);
 
-       clk = clk_register_frac("gen_synth1_clk", "gen_synth0_1_clk", 0,
+       clk = clk_register_frac("gen_syn1_clk", "gen_syn0_1_clk", 0,
                        SPEAR1310_RAS_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl),
                        &_lock);
-       clk_register_clkdev(clk, "gen_synth1_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn1_clk", NULL);
 
-       clk = clk_register_frac("gen_synth2_clk", "gen_synth2_3_clk", 0,
+       clk = clk_register_frac("gen_syn2_clk", "gen_syn2_3_clk", 0,
                        SPEAR1310_RAS_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl),
                        &_lock);
-       clk_register_clkdev(clk, "gen_synth2_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn2_clk", NULL);
 
-       clk = clk_register_frac("gen_synth3_clk", "gen_synth2_3_clk", 0,
+       clk = clk_register_frac("gen_syn3_clk", "gen_syn2_3_clk", 0,
                        SPEAR1310_RAS_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl),
                        &_lock);
-       clk_register_clkdev(clk, "gen_synth3_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn3_clk", NULL);
 
        clk = clk_register_gate(NULL, "ras_osc_24m_clk", "osc_24m_clk", 0,
                        SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_24M_CLK_ENB, 0,
@@ -847,7 +845,7 @@ void __init spear1310_clk_init(void)
                        &_lock);
        clk_register_clkdev(clk, "ras_pll3_clk", NULL);
 
-       clk = clk_register_gate(NULL, "ras_tx125_clk", "gmii_125m_pad_clk", 0,
+       clk = clk_register_gate(NULL, "ras_tx125_clk", "gmii_pad_clk", 0,
                        SPEAR1310_RAS_CLK_ENB, SPEAR1310_C125M_PAD_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, "ras_tx125_clk", NULL);
@@ -912,7 +910,7 @@ void __init spear1310_clk_init(void)
                        &_lock);
        clk_register_clkdev(clk, NULL, "5c700000.eth");
 
-       clk = clk_register_mux(NULL, "smii_rgmii_phy_mux_clk",
+       clk = clk_register_mux(NULL, "smii_rgmii_phy_mclk",
                        smii_rgmii_phy_parents,
                        ARRAY_SIZE(smii_rgmii_phy_parents), 0,
                        SPEAR1310_RAS_CTRL_REG1,
@@ -922,184 +920,184 @@ void __init spear1310_clk_init(void)
        clk_register_clkdev(clk, NULL, "stmmacphy.2");
        clk_register_clkdev(clk, NULL, "stmmacphy.4");
 
-       clk = clk_register_mux(NULL, "rmii_phy_mux_clk", rmii_phy_parents,
+       clk = clk_register_mux(NULL, "rmii_phy_mclk", rmii_phy_parents,
                        ARRAY_SIZE(rmii_phy_parents), 0,
                        SPEAR1310_RAS_CTRL_REG1, SPEAR1310_RMII_PHY_CLK_SHIFT,
                        SPEAR1310_PHY_CLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, NULL, "stmmacphy.3");
 
-       clk = clk_register_mux(NULL, "uart1_mux_clk", uart_parents,
+       clk = clk_register_mux(NULL, "uart1_mclk", uart_parents,
                        ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_UART1_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
                        0, &_lock);
-       clk_register_clkdev(clk, "uart1_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart1_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart1_clk", "uart1_mux_clk", 0,
+       clk = clk_register_gate(NULL, "uart1_clk", "uart1_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5c800000.serial");
 
-       clk = clk_register_mux(NULL, "uart2_mux_clk", uart_parents,
+       clk = clk_register_mux(NULL, "uart2_mclk", uart_parents,
                        ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_UART2_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
                        0, &_lock);
-       clk_register_clkdev(clk, "uart2_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart2_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart2_clk", "uart2_mux_clk", 0,
+       clk = clk_register_gate(NULL, "uart2_clk", "uart2_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART2_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5c900000.serial");
 
-       clk = clk_register_mux(NULL, "uart3_mux_clk", uart_parents,
+       clk = clk_register_mux(NULL, "uart3_mclk", uart_parents,
                        ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_UART3_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
                        0, &_lock);
-       clk_register_clkdev(clk, "uart3_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart3_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart3_clk", "uart3_mux_clk", 0,
+       clk = clk_register_gate(NULL, "uart3_clk", "uart3_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART3_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5ca00000.serial");
 
-       clk = clk_register_mux(NULL, "uart4_mux_clk", uart_parents,
+       clk = clk_register_mux(NULL, "uart4_mclk", uart_parents,
                        ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_UART4_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
                        0, &_lock);
-       clk_register_clkdev(clk, "uart4_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart4_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart4_clk", "uart4_mux_clk", 0,
+       clk = clk_register_gate(NULL, "uart4_clk", "uart4_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART4_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5cb00000.serial");
 
-       clk = clk_register_mux(NULL, "uart5_mux_clk", uart_parents,
+       clk = clk_register_mux(NULL, "uart5_mclk", uart_parents,
                        ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_UART5_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
                        0, &_lock);
-       clk_register_clkdev(clk, "uart5_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart5_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart5_clk", "uart5_mux_clk", 0,
+       clk = clk_register_gate(NULL, "uart5_clk", "uart5_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART5_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5cc00000.serial");
 
-       clk = clk_register_mux(NULL, "i2c1_mux_clk", i2c_parents,
+       clk = clk_register_mux(NULL, "i2c1_mclk", i2c_parents,
                        ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_I2C1_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "i2c1_mux_clk", NULL);
+       clk_register_clkdev(clk, "i2c1_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "i2c1_clk", "i2c1_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2c1_clk", "i2c1_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5cd00000.i2c");
 
-       clk = clk_register_mux(NULL, "i2c2_mux_clk", i2c_parents,
+       clk = clk_register_mux(NULL, "i2c2_mclk", i2c_parents,
                        ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_I2C2_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "i2c2_mux_clk", NULL);
+       clk_register_clkdev(clk, "i2c2_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "i2c2_clk", "i2c2_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2c2_clk", "i2c2_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C2_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5ce00000.i2c");
 
-       clk = clk_register_mux(NULL, "i2c3_mux_clk", i2c_parents,
+       clk = clk_register_mux(NULL, "i2c3_mclk", i2c_parents,
                        ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_I2C3_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "i2c3_mux_clk", NULL);
+       clk_register_clkdev(clk, "i2c3_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "i2c3_clk", "i2c3_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2c3_clk", "i2c3_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C3_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5cf00000.i2c");
 
-       clk = clk_register_mux(NULL, "i2c4_mux_clk", i2c_parents,
+       clk = clk_register_mux(NULL, "i2c4_mclk", i2c_parents,
                        ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_I2C4_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "i2c4_mux_clk", NULL);
+       clk_register_clkdev(clk, "i2c4_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "i2c4_clk", "i2c4_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2c4_clk", "i2c4_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C4_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5d000000.i2c");
 
-       clk = clk_register_mux(NULL, "i2c5_mux_clk", i2c_parents,
+       clk = clk_register_mux(NULL, "i2c5_mclk", i2c_parents,
                        ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_I2C5_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "i2c5_mux_clk", NULL);
+       clk_register_clkdev(clk, "i2c5_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "i2c5_clk", "i2c5_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2c5_clk", "i2c5_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C5_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5d100000.i2c");
 
-       clk = clk_register_mux(NULL, "i2c6_mux_clk", i2c_parents,
+       clk = clk_register_mux(NULL, "i2c6_mclk", i2c_parents,
                        ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_I2C6_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "i2c6_mux_clk", NULL);
+       clk_register_clkdev(clk, "i2c6_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "i2c6_clk", "i2c6_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2c6_clk", "i2c6_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C6_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5d200000.i2c");
 
-       clk = clk_register_mux(NULL, "i2c7_mux_clk", i2c_parents,
+       clk = clk_register_mux(NULL, "i2c7_mclk", i2c_parents,
                        ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_I2C7_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "i2c7_mux_clk", NULL);
+       clk_register_clkdev(clk, "i2c7_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "i2c7_clk", "i2c7_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2c7_clk", "i2c7_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C7_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5d300000.i2c");
 
-       clk = clk_register_mux(NULL, "ssp1_mux_clk", ssp1_parents,
+       clk = clk_register_mux(NULL, "ssp1_mclk", ssp1_parents,
                        ARRAY_SIZE(ssp1_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_SSP1_CLK_SHIFT, SPEAR1310_SSP1_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "ssp1_mux_clk", NULL);
+       clk_register_clkdev(clk, "ssp1_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "ssp1_clk", "ssp1_mux_clk", 0,
+       clk = clk_register_gate(NULL, "ssp1_clk", "ssp1_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_SSP1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5d400000.spi");
 
-       clk = clk_register_mux(NULL, "pci_mux_clk", pci_parents,
+       clk = clk_register_mux(NULL, "pci_mclk", pci_parents,
                        ARRAY_SIZE(pci_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_PCI_CLK_SHIFT, SPEAR1310_PCI_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "pci_mux_clk", NULL);
+       clk_register_clkdev(clk, "pci_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "pci_clk", "pci_mux_clk", 0,
+       clk = clk_register_gate(NULL, "pci_clk", "pci_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_PCI_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "pci");
 
-       clk = clk_register_mux(NULL, "tdm1_mux_clk", tdm_parents,
+       clk = clk_register_mux(NULL, "tdm1_mclk", tdm_parents,
                        ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_TDM1_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "tdm1_mux_clk", NULL);
+       clk_register_clkdev(clk, "tdm1_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "tdm1_clk", "tdm1_mux_clk", 0,
+       clk = clk_register_gate(NULL, "tdm1_clk", "tdm1_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "tdm_hdlc.0");
 
-       clk = clk_register_mux(NULL, "tdm2_mux_clk", tdm_parents,
+       clk = clk_register_mux(NULL, "tdm2_mclk", tdm_parents,
                        ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_TDM2_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "tdm2_mux_clk", NULL);
+       clk_register_clkdev(clk, "tdm2_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "tdm2_clk", "tdm2_mux_clk", 0,
+       clk = clk_register_gate(NULL, "tdm2_clk", "tdm2_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM2_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "tdm_hdlc.1");
index f130919..2352cee 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr1340 machine clock framework source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -369,27 +369,25 @@ static struct frac_rate_tbl gen_rtbl[] = {
 
 /* clock parents */
 static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", };
-static const char *sys_parents[] = { "none", "pll1_clk", "none", "none",
-       "sys_synth_clk", "none", "pll2_clk", "pll3_clk", };
-static const char *ahb_parents[] = { "cpu_div3_clk", "amba_synth_clk", };
+static const char *sys_parents[] = { "pll1_clk", "pll1_clk", "pll1_clk",
+       "pll1_clk", "sys_synth_clk", "sys_synth_clk", "pll2_clk", "pll3_clk", };
+static const char *ahb_parents[] = { "cpu_div3_clk", "amba_syn_clk", };
 static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", };
 static const char *uart0_parents[] = { "pll5_clk", "osc_24m_clk",
-       "uart0_synth_gate_clk", };
+       "uart0_syn_gclk", };
 static const char *uart1_parents[] = { "pll5_clk", "osc_24m_clk",
-       "uart1_synth_gate_clk", };
-static const char *c3_parents[] = { "pll5_clk", "c3_synth_gate_clk", };
-static const char *gmac_phy_input_parents[] = { "gmii_125m_pad_clk", "pll2_clk",
+       "uart1_syn_gclk", };
+static const char *c3_parents[] = { "pll5_clk", "c3_syn_gclk", };
+static const char *gmac_phy_input_parents[] = { "gmii_pad_clk", "pll2_clk",
        "osc_25m_clk", };
-static const char *gmac_phy_parents[] = { "gmac_phy_input_mux_clk",
-       "gmac_phy_synth_gate_clk", };
+static const char *gmac_phy_parents[] = { "phy_input_mclk", "phy_syn_gclk", };
 static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", };
-static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_synth_clk", };
+static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_syn_clk", };
 static const char *i2s_src_parents[] = { "vco1div2_clk", "pll2_clk", "pll3_clk",
        "i2s_src_pad_clk", };
-static const char *i2s_ref_parents[] = { "i2s_src_mux_clk", "i2s_prs1_clk", };
-static const char *spdif_out_parents[] = { "i2s_src_pad_clk", "gen_synth2_clk",
-};
-static const char *spdif_in_parents[] = { "pll2_clk", "gen_synth3_clk", };
+static const char *i2s_ref_parents[] = { "i2s_src_mclk", "i2s_prs1_clk", };
+static const char *spdif_out_parents[] = { "i2s_src_pad_clk", "gen_syn2_clk", };
+static const char *spdif_in_parents[] = { "pll2_clk", "gen_syn3_clk", };
 
 static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk",
        "pll3_clk", };
@@ -415,9 +413,9 @@ void __init spear1340_clk_init(void)
                        25000000);
        clk_register_clkdev(clk, "osc_25m_clk", NULL);
 
-       clk = clk_register_fixed_rate(NULL, "gmii_125m_pad_clk", NULL,
-                       CLK_IS_ROOT, 125000000);
-       clk_register_clkdev(clk, "gmii_125m_pad_clk", NULL);
+       clk = clk_register_fixed_rate(NULL, "gmii_pad_clk", NULL, CLK_IS_ROOT,
+                       125000000);
+       clk_register_clkdev(clk, "gmii_pad_clk", NULL);
 
        clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
                        CLK_IS_ROOT, 12288000);
@@ -431,35 +429,35 @@ void __init spear1340_clk_init(void)
 
        /* clock derived from 24 or 25 MHz osc clk */
        /* vco-pll */
-       clk = clk_register_mux(NULL, "vco1_mux_clk", vco_parents,
+       clk = clk_register_mux(NULL, "vco1_mclk", vco_parents,
                        ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
                        SPEAR1340_PLL1_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "vco1_mux_clk", NULL);
-       clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mux_clk",
-                       0, SPEAR1340_PLL1_CTR, SPEAR1340_PLL1_FRQ, pll_rtbl,
+       clk_register_clkdev(clk, "vco1_mclk", NULL);
+       clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mclk", 0,
+                       SPEAR1340_PLL1_CTR, SPEAR1340_PLL1_FRQ, pll_rtbl,
                        ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
        clk_register_clkdev(clk, "vco1_clk", NULL);
        clk_register_clkdev(clk1, "pll1_clk", NULL);
 
-       clk = clk_register_mux(NULL, "vco2_mux_clk", vco_parents,
+       clk = clk_register_mux(NULL, "vco2_mclk", vco_parents,
                        ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
                        SPEAR1340_PLL2_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "vco2_mux_clk", NULL);
-       clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mux_clk",
-                       0, SPEAR1340_PLL2_CTR, SPEAR1340_PLL2_FRQ, pll_rtbl,
+       clk_register_clkdev(clk, "vco2_mclk", NULL);
+       clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mclk", 0,
+                       SPEAR1340_PLL2_CTR, SPEAR1340_PLL2_FRQ, pll_rtbl,
                        ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
        clk_register_clkdev(clk, "vco2_clk", NULL);
        clk_register_clkdev(clk1, "pll2_clk", NULL);
 
-       clk = clk_register_mux(NULL, "vco3_mux_clk", vco_parents,
+       clk = clk_register_mux(NULL, "vco3_mclk", vco_parents,
                        ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
                        SPEAR1340_PLL3_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "vco3_mux_clk", NULL);
-       clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mux_clk",
-                       0, SPEAR1340_PLL3_CTR, SPEAR1340_PLL3_FRQ, pll_rtbl,
+       clk_register_clkdev(clk, "vco3_mclk", NULL);
+       clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mclk", 0,
+                       SPEAR1340_PLL3_CTR, SPEAR1340_PLL3_FRQ, pll_rtbl,
                        ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
        clk_register_clkdev(clk, "vco3_clk", NULL);
        clk_register_clkdev(clk1, "pll3_clk", NULL);
@@ -498,7 +496,7 @@ void __init spear1340_clk_init(void)
        /* peripherals */
        clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1,
                        128);
-       clk = clk_register_gate(NULL, "thermal_gate_clk", "thermal_clk", 0,
+       clk = clk_register_gate(NULL, "thermal_gclk", "thermal_clk", 0,
                        SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_THSENS_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "spear_thermal");
@@ -509,23 +507,23 @@ void __init spear1340_clk_init(void)
        clk_register_clkdev(clk, "ddr_clk", NULL);
 
        /* clock derived from pll1 clk */
-       clk = clk_register_frac("sys_synth_clk", "vco1div2_clk", 0,
+       clk = clk_register_frac("sys_syn_clk", "vco1div2_clk", 0,
                        SPEAR1340_SYS_CLK_SYNT, sys_synth_rtbl,
                        ARRAY_SIZE(sys_synth_rtbl), &_lock);
-       clk_register_clkdev(clk, "sys_synth_clk", NULL);
+       clk_register_clkdev(clk, "sys_syn_clk", NULL);
 
-       clk = clk_register_frac("amba_synth_clk", "vco1div2_clk", 0,
+       clk = clk_register_frac("amba_syn_clk", "vco1div2_clk", 0,
                        SPEAR1340_AMBA_CLK_SYNT, amba_synth_rtbl,
                        ARRAY_SIZE(amba_synth_rtbl), &_lock);
-       clk_register_clkdev(clk, "amba_synth_clk", NULL);
+       clk_register_clkdev(clk, "amba_syn_clk", NULL);
 
-       clk = clk_register_mux(NULL, "sys_mux_clk", sys_parents,
+       clk = clk_register_mux(NULL, "sys_mclk", sys_parents,
                        ARRAY_SIZE(sys_parents), 0, SPEAR1340_SYS_CLK_CTRL,
                        SPEAR1340_SCLK_SRC_SEL_SHIFT,
                        SPEAR1340_SCLK_SRC_SEL_MASK, 0, &_lock);
        clk_register_clkdev(clk, "sys_clk", NULL);
 
-       clk = clk_register_fixed_factor(NULL, "cpu_clk", "sys_mux_clk", 0, 1,
+       clk = clk_register_fixed_factor(NULL, "cpu_clk", "sys_mclk", 0, 1,
                        2);
        clk_register_clkdev(clk, "cpu_clk", NULL);
 
@@ -548,194 +546,193 @@ void __init spear1340_clk_init(void)
        clk_register_clkdev(clk, "apb_clk", NULL);
 
        /* gpt clocks */
-       clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt_parents,
+       clk = clk_register_mux(NULL, "gpt0_mclk", gpt_parents,
                        ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
                        SPEAR1340_GPT0_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gpt0_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt0_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mclk", 0,
                        SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT0_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "gpt0");
 
-       clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt_parents,
+       clk = clk_register_mux(NULL, "gpt1_mclk", gpt_parents,
                        ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
                        SPEAR1340_GPT1_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt1_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
                        SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "gpt1");
 
-       clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt_parents,
+       clk = clk_register_mux(NULL, "gpt2_mclk", gpt_parents,
                        ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
                        SPEAR1340_GPT2_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt2_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
                        SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT2_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "gpt2");
 
-       clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt_parents,
+       clk = clk_register_mux(NULL, "gpt3_mclk", gpt_parents,
                        ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
                        SPEAR1340_GPT3_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt3_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
                        SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT3_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "gpt3");
 
        /* others */
-       clk = clk_register_aux("uart0_synth_clk", "uart0_synth_gate_clk",
+       clk = clk_register_aux("uart0_syn_clk", "uart0_syn_gclk",
                        "vco1div2_clk", 0, SPEAR1340_UART0_CLK_SYNT, NULL,
                        aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "uart0_synth_clk", NULL);
-       clk_register_clkdev(clk1, "uart0_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "uart0_syn_clk", NULL);
+       clk_register_clkdev(clk1, "uart0_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+       clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
                        ARRAY_SIZE(uart0_parents), 0, SPEAR1340_PERIP_CLK_CFG,
                        SPEAR1340_UART0_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart0_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart0_clk", "uart0_mux_clk", 0,
+       clk = clk_register_gate(NULL, "uart0_clk", "uart0_mclk", 0,
                        SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART0_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "e0000000.serial");
 
-       clk = clk_register_aux("uart1_synth_clk", "uart1_synth_gate_clk",
+       clk = clk_register_aux("uart1_syn_clk", "uart1_syn_gclk",
                        "vco1div2_clk", 0, SPEAR1340_UART1_CLK_SYNT, NULL,
                        aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "uart1_synth_clk", NULL);
-       clk_register_clkdev(clk1, "uart1_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "uart1_syn_clk", NULL);
+       clk_register_clkdev(clk1, "uart1_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "uart1_mux_clk", uart1_parents,
+       clk = clk_register_mux(NULL, "uart1_mclk", uart1_parents,
                        ARRAY_SIZE(uart1_parents), 0, SPEAR1340_PERIP_CLK_CFG,
                        SPEAR1340_UART1_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "uart1_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart1_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart1_clk", "uart1_mux_clk", 0,
-                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART1_CLK_ENB, 0,
+       clk = clk_register_gate(NULL, "uart1_clk", "uart1_mclk", 0,
+                       SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_UART1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "b4100000.serial");
 
-       clk = clk_register_aux("sdhci_synth_clk", "sdhci_synth_gate_clk",
+       clk = clk_register_aux("sdhci_syn_clk", "sdhci_syn_gclk",
                        "vco1div2_clk", 0, SPEAR1340_SDHCI_CLK_SYNT, NULL,
                        aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "sdhci_synth_clk", NULL);
-       clk_register_clkdev(clk1, "sdhci_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "sdhci_syn_clk", NULL);
+       clk_register_clkdev(clk1, "sdhci_syn_gclk", NULL);
 
-       clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_synth_gate_clk", 0,
+       clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_syn_gclk", 0,
                        SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SDHCI_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "b3000000.sdhci");
 
-       clk = clk_register_aux("cfxd_synth_clk", "cfxd_synth_gate_clk",
-                       "vco1div2_clk", 0, SPEAR1340_CFXD_CLK_SYNT, NULL,
-                       aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "cfxd_synth_clk", NULL);
-       clk_register_clkdev(clk1, "cfxd_synth_gate_clk", NULL);
+       clk = clk_register_aux("cfxd_syn_clk", "cfxd_syn_gclk", "vco1div2_clk",
+                       0, SPEAR1340_CFXD_CLK_SYNT, NULL, aux_rtbl,
+                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "cfxd_syn_clk", NULL);
+       clk_register_clkdev(clk1, "cfxd_syn_gclk", NULL);
 
-       clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_synth_gate_clk", 0,
+       clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_syn_gclk", 0,
                        SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CFXD_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "b2800000.cf");
        clk_register_clkdev(clk, NULL, "arasan_xd");
 
-       clk = clk_register_aux("c3_synth_clk", "c3_synth_gate_clk",
-                       "vco1div2_clk", 0, SPEAR1340_C3_CLK_SYNT, NULL,
-                       aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "c3_synth_clk", NULL);
-       clk_register_clkdev(clk1, "c3_synth_gate_clk", NULL);
+       clk = clk_register_aux("c3_syn_clk", "c3_syn_gclk", "vco1div2_clk", 0,
+                       SPEAR1340_C3_CLK_SYNT, NULL, aux_rtbl,
+                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "c3_syn_clk", NULL);
+       clk_register_clkdev(clk1, "c3_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "c3_mux_clk", c3_parents,
+       clk = clk_register_mux(NULL, "c3_mclk", c3_parents,
                        ARRAY_SIZE(c3_parents), 0, SPEAR1340_PERIP_CLK_CFG,
                        SPEAR1340_C3_CLK_SHIFT, SPEAR1340_C3_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "c3_mux_clk", NULL);
+       clk_register_clkdev(clk, "c3_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "c3_clk", "c3_mux_clk", 0,
+       clk = clk_register_gate(NULL, "c3_clk", "c3_mclk", 0,
                        SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_C3_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "c3");
 
        /* gmac */
-       clk = clk_register_mux(NULL, "gmac_phy_input_mux_clk",
-                       gmac_phy_input_parents,
+       clk = clk_register_mux(NULL, "phy_input_mclk", gmac_phy_input_parents,
                        ARRAY_SIZE(gmac_phy_input_parents), 0,
                        SPEAR1340_GMAC_CLK_CFG,
                        SPEAR1340_GMAC_PHY_INPUT_CLK_SHIFT,
                        SPEAR1340_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gmac_phy_input_mux_clk", NULL);
+       clk_register_clkdev(clk, "phy_input_mclk", NULL);
 
-       clk = clk_register_aux("gmac_phy_synth_clk", "gmac_phy_synth_gate_clk",
-                       "gmac_phy_input_mux_clk", 0, SPEAR1340_GMAC_CLK_SYNT,
-                       NULL, gmac_rtbl, ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "gmac_phy_synth_clk", NULL);
-       clk_register_clkdev(clk1, "gmac_phy_synth_gate_clk", NULL);
+       clk = clk_register_aux("phy_syn_clk", "phy_syn_gclk", "phy_input_mclk",
+                       0, SPEAR1340_GMAC_CLK_SYNT, NULL, gmac_rtbl,
+                       ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "phy_syn_clk", NULL);
+       clk_register_clkdev(clk1, "phy_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "gmac_phy_mux_clk", gmac_phy_parents,
+       clk = clk_register_mux(NULL, "phy_mclk", gmac_phy_parents,
                        ARRAY_SIZE(gmac_phy_parents), 0,
                        SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GMAC_PHY_CLK_SHIFT,
                        SPEAR1340_GMAC_PHY_CLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, NULL, "stmmacphy.0");
 
        /* clcd */
-       clk = clk_register_mux(NULL, "clcd_synth_mux_clk", clcd_synth_parents,
+       clk = clk_register_mux(NULL, "clcd_syn_mclk", clcd_synth_parents,
                        ARRAY_SIZE(clcd_synth_parents), 0,
                        SPEAR1340_CLCD_CLK_SYNT, SPEAR1340_CLCD_SYNT_CLK_SHIFT,
                        SPEAR1340_CLCD_SYNT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "clcd_synth_mux_clk", NULL);
+       clk_register_clkdev(clk, "clcd_syn_mclk", NULL);
 
-       clk = clk_register_frac("clcd_synth_clk", "clcd_synth_mux_clk", 0,
+       clk = clk_register_frac("clcd_syn_clk", "clcd_syn_mclk", 0,
                        SPEAR1340_CLCD_CLK_SYNT, clcd_rtbl,
                        ARRAY_SIZE(clcd_rtbl), &_lock);
-       clk_register_clkdev(clk, "clcd_synth_clk", NULL);
+       clk_register_clkdev(clk, "clcd_syn_clk", NULL);
 
-       clk = clk_register_mux(NULL, "clcd_pixel_mux_clk", clcd_pixel_parents,
+       clk = clk_register_mux(NULL, "clcd_pixel_mclk", clcd_pixel_parents,
                        ARRAY_SIZE(clcd_pixel_parents), 0,
                        SPEAR1340_PERIP_CLK_CFG, SPEAR1340_CLCD_CLK_SHIFT,
                        SPEAR1340_CLCD_CLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, "clcd_pixel_clk", NULL);
 
-       clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mux_clk", 0,
+       clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mclk", 0,
                        SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CLCD_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, "clcd_clk", NULL);
 
        /* i2s */
-       clk = clk_register_mux(NULL, "i2s_src_mux_clk", i2s_src_parents,
+       clk = clk_register_mux(NULL, "i2s_src_mclk", i2s_src_parents,
                        ARRAY_SIZE(i2s_src_parents), 0, SPEAR1340_I2S_CLK_CFG,
                        SPEAR1340_I2S_SRC_CLK_SHIFT, SPEAR1340_I2S_SRC_CLK_MASK,
                        0, &_lock);
        clk_register_clkdev(clk, "i2s_src_clk", NULL);
 
-       clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mux_clk", 0,
+       clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mclk", 0,
                        SPEAR1340_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl,
                        ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL);
        clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
 
-       clk = clk_register_mux(NULL, "i2s_ref_mux_clk", i2s_ref_parents,
+       clk = clk_register_mux(NULL, "i2s_ref_mclk", i2s_ref_parents,
                        ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1340_I2S_CLK_CFG,
                        SPEAR1340_I2S_REF_SHIFT, SPEAR1340_I2S_REF_SEL_MASK, 0,
                        &_lock);
        clk_register_clkdev(clk, "i2s_ref_clk", NULL);
 
-       clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mclk", 0,
                        SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_I2S_REF_PAD_CLK_ENB,
                        0, &_lock);
        clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL);
 
-       clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gate_clk",
-                       "i2s_ref_mux_clk", 0, SPEAR1340_I2S_CLK_CFG,
-                       &i2s_sclk_masks, i2s_sclk_rtbl,
-                       ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1);
+       clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gclk", "i2s_ref_mclk",
+                       0, SPEAR1340_I2S_CLK_CFG, &i2s_sclk_masks,
+                       i2s_sclk_rtbl, ARRAY_SIZE(i2s_sclk_rtbl), &_lock,
+                       &clk1);
        clk_register_clkdev(clk, "i2s_sclk_clk", NULL);
-       clk_register_clkdev(clk1, "i2s_sclk_gate_clk", NULL);
+       clk_register_clkdev(clk1, "i2s_sclk_gclk", NULL);
 
        /* clock derived from ahb clk */
        clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0,
@@ -744,7 +741,7 @@ void __init spear1340_clk_init(void)
        clk_register_clkdev(clk, NULL, "e0280000.i2c");
 
        clk = clk_register_gate(NULL, "i2c1_clk", "ahb_clk", 0,
-                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2C1_CLK_ENB, 0,
+                       SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_I2C1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "b4000000.i2c");
 
@@ -800,13 +797,13 @@ void __init spear1340_clk_init(void)
                        &_lock);
        clk_register_clkdev(clk, "sysram1_clk", NULL);
 
-       clk = clk_register_aux("adc_synth_clk", "adc_synth_gate_clk", "ahb_clk",
+       clk = clk_register_aux("adc_syn_clk", "adc_syn_gclk", "ahb_clk",
                        0, SPEAR1340_ADC_CLK_SYNT, NULL, adc_rtbl,
                        ARRAY_SIZE(adc_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "adc_synth_clk", NULL);
-       clk_register_clkdev(clk1, "adc_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "adc_syn_clk", NULL);
+       clk_register_clkdev(clk1, "adc_syn_gclk", NULL);
 
-       clk = clk_register_gate(NULL, "adc_clk", "adc_synth_gate_clk", 0,
+       clk = clk_register_gate(NULL, "adc_clk", "adc_syn_gclk", 0,
                        SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_ADC_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "adc_clk");
@@ -843,39 +840,39 @@ void __init spear1340_clk_init(void)
        clk_register_clkdev(clk, NULL, "e0300000.kbd");
 
        /* RAS clks */
-       clk = clk_register_mux(NULL, "gen_synth0_1_mux_clk",
-                       gen_synth0_1_parents, ARRAY_SIZE(gen_synth0_1_parents),
-                       0, SPEAR1340_PLL_CFG, SPEAR1340_GEN_SYNT0_1_CLK_SHIFT,
+       clk = clk_register_mux(NULL, "gen_syn0_1_mclk", gen_synth0_1_parents,
+                       ARRAY_SIZE(gen_synth0_1_parents), 0, SPEAR1340_PLL_CFG,
+                       SPEAR1340_GEN_SYNT0_1_CLK_SHIFT,
                        SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gen_synth0_1_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn0_1_clk", NULL);
 
-       clk = clk_register_mux(NULL, "gen_synth2_3_mux_clk",
-                       gen_synth2_3_parents, ARRAY_SIZE(gen_synth2_3_parents),
-                       0, SPEAR1340_PLL_CFG, SPEAR1340_GEN_SYNT2_3_CLK_SHIFT,
+       clk = clk_register_mux(NULL, "gen_syn2_3_mclk", gen_synth2_3_parents,
+                       ARRAY_SIZE(gen_synth2_3_parents), 0, SPEAR1340_PLL_CFG,
+                       SPEAR1340_GEN_SYNT2_3_CLK_SHIFT,
                        SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gen_synth2_3_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn2_3_clk", NULL);
 
-       clk = clk_register_frac("gen_synth0_clk", "gen_synth0_1_clk", 0,
+       clk = clk_register_frac("gen_syn0_clk", "gen_syn0_1_clk", 0,
                        SPEAR1340_GEN_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl),
                        &_lock);
-       clk_register_clkdev(clk, "gen_synth0_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn0_clk", NULL);
 
-       clk = clk_register_frac("gen_synth1_clk", "gen_synth0_1_clk", 0,
+       clk = clk_register_frac("gen_syn1_clk", "gen_syn0_1_clk", 0,
                        SPEAR1340_GEN_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl),
                        &_lock);
-       clk_register_clkdev(clk, "gen_synth1_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn1_clk", NULL);
 
-       clk = clk_register_frac("gen_synth2_clk", "gen_synth2_3_clk", 0,
+       clk = clk_register_frac("gen_syn2_clk", "gen_syn2_3_clk", 0,
                        SPEAR1340_GEN_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl),
                        &_lock);
-       clk_register_clkdev(clk, "gen_synth2_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn2_clk", NULL);
 
-       clk = clk_register_frac("gen_synth3_clk", "gen_synth2_3_clk", 0,
+       clk = clk_register_frac("gen_syn3_clk", "gen_syn2_3_clk", 0,
                        SPEAR1340_GEN_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl),
                        &_lock);
-       clk_register_clkdev(clk, "gen_synth3_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn3_clk", NULL);
 
-       clk = clk_register_gate(NULL, "mali_clk", "gen_synth3_clk", 0,
+       clk = clk_register_gate(NULL, "mali_clk", "gen_syn3_clk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_MALI_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "mali");
@@ -890,74 +887,74 @@ void __init spear1340_clk_init(void)
                        &_lock);
        clk_register_clkdev(clk, NULL, "spear_cec.1");
 
-       clk = clk_register_mux(NULL, "spdif_out_mux_clk", spdif_out_parents,
+       clk = clk_register_mux(NULL, "spdif_out_mclk", spdif_out_parents,
                        ARRAY_SIZE(spdif_out_parents), 0,
                        SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_OUT_CLK_SHIFT,
                        SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "spdif_out_mux_clk", NULL);
+       clk_register_clkdev(clk, "spdif_out_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "spdif_out_clk", "spdif_out_mux_clk", 0,
+       clk = clk_register_gate(NULL, "spdif_out_clk", "spdif_out_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_OUT_CLK_ENB,
                        0, &_lock);
        clk_register_clkdev(clk, NULL, "spdif-out");
 
-       clk = clk_register_mux(NULL, "spdif_in_mux_clk", spdif_in_parents,
+       clk = clk_register_mux(NULL, "spdif_in_mclk", spdif_in_parents,
                        ARRAY_SIZE(spdif_in_parents), 0,
                        SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_IN_CLK_SHIFT,
                        SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "spdif_in_mux_clk", NULL);
+       clk_register_clkdev(clk, "spdif_in_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "spdif_in_clk", "spdif_in_mux_clk", 0,
+       clk = clk_register_gate(NULL, "spdif_in_clk", "spdif_in_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_IN_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "spdif-in");
 
-       clk = clk_register_gate(NULL, "acp_clk", "acp_mux_clk", 0,
+       clk = clk_register_gate(NULL, "acp_clk", "acp_mclk", 0,
                        SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_ACP_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "acp_clk");
 
-       clk = clk_register_gate(NULL, "plgpio_clk", "plgpio_mux_clk", 0,
+       clk = clk_register_gate(NULL, "plgpio_clk", "plgpio_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PLGPIO_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "plgpio");
 
-       clk = clk_register_gate(NULL, "video_dec_clk", "video_dec_mux_clk", 0,
+       clk = clk_register_gate(NULL, "video_dec_clk", "video_dec_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_DEC_CLK_ENB,
                        0, &_lock);
        clk_register_clkdev(clk, NULL, "video_dec");
 
-       clk = clk_register_gate(NULL, "video_enc_clk", "video_enc_mux_clk", 0,
+       clk = clk_register_gate(NULL, "video_enc_clk", "video_enc_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_ENC_CLK_ENB,
                        0, &_lock);
        clk_register_clkdev(clk, NULL, "video_enc");
 
-       clk = clk_register_gate(NULL, "video_in_clk", "video_in_mux_clk", 0,
+       clk = clk_register_gate(NULL, "video_in_clk", "video_in_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_IN_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "spear_vip");
 
-       clk = clk_register_gate(NULL, "cam0_clk", "cam0_mux_clk", 0,
+       clk = clk_register_gate(NULL, "cam0_clk", "cam0_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM0_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "spear_camif.0");
 
-       clk = clk_register_gate(NULL, "cam1_clk", "cam1_mux_clk", 0,
+       clk = clk_register_gate(NULL, "cam1_clk", "cam1_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "spear_camif.1");
 
-       clk = clk_register_gate(NULL, "cam2_clk", "cam2_mux_clk", 0,
+       clk = clk_register_gate(NULL, "cam2_clk", "cam2_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM2_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "spear_camif.2");
 
-       clk = clk_register_gate(NULL, "cam3_clk", "cam3_mux_clk", 0,
+       clk = clk_register_gate(NULL, "cam3_clk", "cam3_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM3_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "spear_camif.3");
 
-       clk = clk_register_gate(NULL, "pwm_clk", "pwm_mux_clk", 0,
+       clk = clk_register_gate(NULL, "pwm_clk", "pwm_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PWM_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "pwm");
index 440bb3e..c315745 100644 (file)
@@ -2,7 +2,7 @@
  * SPEAr3xx machines clock framework source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -122,12 +122,12 @@ static struct gpt_rate_tbl gpt_rtbl[] = {
 };
 
 /* clock parents */
-static const char *uart0_parents[] = { "pll3_48m_clk", "uart_synth_gate_clk", };
-static const char *firda_parents[] = { "pll3_48m_clk", "firda_synth_gate_clk",
+static const char *uart0_parents[] = { "pll3_clk", "uart_syn_gclk", };
+static const char *firda_parents[] = { "pll3_clk", "firda_syn_gclk",
 };
-static const char *gpt0_parents[] = { "pll3_48m_clk", "gpt0_synth_clk", };
-static const char *gpt1_parents[] = { "pll3_48m_clk", "gpt1_synth_clk", };
-static const char *gpt2_parents[] = { "pll3_48m_clk", "gpt2_synth_clk", };
+static const char *gpt0_parents[] = { "pll3_clk", "gpt0_syn_clk", };
+static const char *gpt1_parents[] = { "pll3_clk", "gpt1_syn_clk", };
+static const char *gpt2_parents[] = { "pll3_clk", "gpt2_syn_clk", };
 static const char *gen2_3_parents[] = { "pll1_clk", "pll2_clk", };
 static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none",
        "pll2_clk", };
@@ -137,7 +137,7 @@ static void __init spear300_clk_init(void)
 {
        struct clk *clk;
 
-       clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_48m_clk", 0,
+       clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_clk", 0,
                        1, 1);
        clk_register_clkdev(clk, NULL, "60000000.clcd");
 
@@ -219,15 +219,11 @@ static void __init spear310_clk_init(void)
        #define SPEAR320_UARTX_PCLK_VAL_SYNTH1          0x0
        #define SPEAR320_UARTX_PCLK_VAL_APB             0x1
 
-static const char *i2s_ref_parents[] = { "ras_pll2_clk",
-       "ras_gen2_synth_gate_clk", };
-static const char *sdhci_parents[] = { "ras_pll3_48m_clk",
-       "ras_gen3_synth_gate_clk",
-};
+static const char *i2s_ref_parents[] = { "ras_pll2_clk", "ras_syn2_gclk", };
+static const char *sdhci_parents[] = { "ras_pll3_clk", "ras_syn3_gclk", };
 static const char *smii0_parents[] = { "smii_125m_pad", "ras_pll2_clk",
-       "ras_gen0_synth_gate_clk", };
-static const char *uartx_parents[] = { "ras_gen1_synth_gate_clk", "ras_apb_clk",
-};
+       "ras_syn0_gclk", };
+static const char *uartx_parents[] = { "ras_syn1_gclk", "ras_apb_clk", };
 
 static void __init spear320_clk_init(void)
 {
@@ -237,7 +233,7 @@ static void __init spear320_clk_init(void)
                        CLK_IS_ROOT, 125000000);
        clk_register_clkdev(clk, "smii_125m_pad", NULL);
 
-       clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_48m_clk", 0,
+       clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_clk", 0,
                        1, 1);
        clk_register_clkdev(clk, NULL, "90000000.clcd");
 
@@ -363,9 +359,9 @@ void __init spear3xx_clk_init(void)
        clk_register_clkdev(clk, NULL, "fc900000.rtc");
 
        /* clock derived from 24 MHz osc clk */
-       clk = clk_register_fixed_rate(NULL, "pll3_48m_clk", "osc_24m_clk", 0,
+       clk = clk_register_fixed_rate(NULL, "pll3_clk", "osc_24m_clk", 0,
                        48000000);
-       clk_register_clkdev(clk, "pll3_48m_clk", NULL);
+       clk_register_clkdev(clk, "pll3_clk", NULL);
 
        clk = clk_register_fixed_factor(NULL, "wdt_clk", "osc_24m_clk", 0, 1,
                        1);
@@ -392,98 +388,98 @@ void __init spear3xx_clk_init(void)
                        HCLK_RATIO_MASK, 0, &_lock);
        clk_register_clkdev(clk, "ahb_clk", NULL);
 
-       clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
-                       "pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl,
-                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "uart_synth_clk", NULL);
-       clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+       clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "pll1_clk", 0,
+                       UART_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+                       &_lock, &clk1);
+       clk_register_clkdev(clk, "uart_syn_clk", NULL);
+       clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+       clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
                        ARRAY_SIZE(uart0_parents), 0, PERIP_CLK_CFG,
                        UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart0_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart0", "uart0_mux_clk", 0,
-                       PERIP1_CLK_ENB, UART_CLK_ENB, 0, &_lock);
+       clk = clk_register_gate(NULL, "uart0", "uart0_mclk", 0, PERIP1_CLK_ENB,
+                       UART_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "d0000000.serial");
 
-       clk = clk_register_aux("firda_synth_clk", "firda_synth_gate_clk",
-                       "pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl,
-                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "firda_synth_clk", NULL);
-       clk_register_clkdev(clk1, "firda_synth_gate_clk", NULL);
+       clk = clk_register_aux("firda_syn_clk", "firda_syn_gclk", "pll1_clk", 0,
+                       FIRDA_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+                       &_lock, &clk1);
+       clk_register_clkdev(clk, "firda_syn_clk", NULL);
+       clk_register_clkdev(clk1, "firda_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "firda_mux_clk", firda_parents,
+       clk = clk_register_mux(NULL, "firda_mclk", firda_parents,
                        ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
                        FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "firda_mux_clk", NULL);
+       clk_register_clkdev(clk, "firda_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "firda_clk", "firda_mux_clk", 0,
+       clk = clk_register_gate(NULL, "firda_clk", "firda_mclk", 0,
                        PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "firda");
 
        /* gpt clocks */
-       clk_register_gpt("gpt0_synth_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
-                       gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+       clk_register_gpt("gpt0_syn_clk", "pll1_clk", 0, PRSC0_CLK_CFG, gpt_rtbl,
+                       ARRAY_SIZE(gpt_rtbl), &_lock);
        clk = clk_register_mux(NULL, "gpt0_clk", gpt0_parents,
                        ARRAY_SIZE(gpt0_parents), 0, PERIP_CLK_CFG,
                        GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, NULL, "gpt0");
 
-       clk_register_gpt("gpt1_synth_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
-                       gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
-       clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt1_parents,
+       clk_register_gpt("gpt1_syn_clk", "pll1_clk", 0, PRSC1_CLK_CFG, gpt_rtbl,
+                       ARRAY_SIZE(gpt_rtbl), &_lock);
+       clk = clk_register_mux(NULL, "gpt1_mclk", gpt1_parents,
                        ARRAY_SIZE(gpt1_parents), 0, PERIP_CLK_CFG,
                        GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt1_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
                        PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "gpt1");
 
-       clk_register_gpt("gpt2_synth_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
-                       gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
-       clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt2_parents,
+       clk_register_gpt("gpt2_syn_clk", "pll1_clk", 0, PRSC2_CLK_CFG, gpt_rtbl,
+                       ARRAY_SIZE(gpt_rtbl), &_lock);
+       clk = clk_register_mux(NULL, "gpt2_mclk", gpt2_parents,
                        ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
                        GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt2_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
                        PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "gpt2");
 
        /* general synths clocks */
-       clk = clk_register_aux("gen0_synth_clk", "gen0_synth_gate_clk",
-                       "pll1_clk", 0, GEN0_CLK_SYNT, NULL, aux_rtbl,
-                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "gen0_synth_clk", NULL);
-       clk_register_clkdev(clk1, "gen0_synth_gate_clk", NULL);
-
-       clk = clk_register_aux("gen1_synth_clk", "gen1_synth_gate_clk",
-                       "pll1_clk", 0, GEN1_CLK_SYNT, NULL, aux_rtbl,
-                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "gen1_synth_clk", NULL);
-       clk_register_clkdev(clk1, "gen1_synth_gate_clk", NULL);
-
-       clk = clk_register_mux(NULL, "gen2_3_parent_clk", gen2_3_parents,
+       clk = clk_register_aux("gen0_syn_clk", "gen0_syn_gclk", "pll1_clk",
+                       0, GEN0_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+                       &_lock, &clk1);
+       clk_register_clkdev(clk, "gen0_syn_clk", NULL);
+       clk_register_clkdev(clk1, "gen0_syn_gclk", NULL);
+
+       clk = clk_register_aux("gen1_syn_clk", "gen1_syn_gclk", "pll1_clk",
+                       0, GEN1_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+                       &_lock, &clk1);
+       clk_register_clkdev(clk, "gen1_syn_clk", NULL);
+       clk_register_clkdev(clk1, "gen1_syn_gclk", NULL);
+
+       clk = clk_register_mux(NULL, "gen2_3_par_clk", gen2_3_parents,
                        ARRAY_SIZE(gen2_3_parents), 0, CORE_CLK_CFG,
                        GEN_SYNTH2_3_CLK_SHIFT, GEN_SYNTH2_3_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gen2_3_parent_clk", NULL);
+       clk_register_clkdev(clk, "gen2_3_par_clk", NULL);
 
-       clk = clk_register_aux("gen2_synth_clk", "gen2_synth_gate_clk",
-                       "gen2_3_parent_clk", 0, GEN2_CLK_SYNT, NULL, aux_rtbl,
+       clk = clk_register_aux("gen2_syn_clk", "gen2_syn_gclk",
+                       "gen2_3_par_clk", 0, GEN2_CLK_SYNT, NULL, aux_rtbl,
                        ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "gen2_synth_clk", NULL);
-       clk_register_clkdev(clk1, "gen2_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "gen2_syn_clk", NULL);
+       clk_register_clkdev(clk1, "gen2_syn_gclk", NULL);
 
-       clk = clk_register_aux("gen3_synth_clk", "gen3_synth_gate_clk",
-                       "gen2_3_parent_clk", 0, GEN3_CLK_SYNT, NULL, aux_rtbl,
+       clk = clk_register_aux("gen3_syn_clk", "gen3_syn_gclk",
+                       "gen2_3_par_clk", 0, GEN3_CLK_SYNT, NULL, aux_rtbl,
                        ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "gen3_synth_clk", NULL);
-       clk_register_clkdev(clk1, "gen3_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "gen3_syn_clk", NULL);
+       clk_register_clkdev(clk1, "gen3_syn_gclk", NULL);
 
        /* clock derived from pll3 clk */
-       clk = clk_register_gate(NULL, "usbh_clk", "pll3_48m_clk", 0,
-                       PERIP1_CLK_ENB, USBH_CLK_ENB, 0, &_lock);
+       clk = clk_register_gate(NULL, "usbh_clk", "pll3_clk", 0, PERIP1_CLK_ENB,
+                       USBH_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, "usbh_clk", NULL);
 
        clk = clk_register_fixed_factor(NULL, "usbh.0_clk", "usbh_clk", 0, 1,
@@ -494,8 +490,8 @@ void __init spear3xx_clk_init(void)
                        1);
        clk_register_clkdev(clk, "usbh.1_clk", NULL);
 
-       clk = clk_register_gate(NULL, "usbd_clk", "pll3_48m_clk", 0,
-                       PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock);
+       clk = clk_register_gate(NULL, "usbd_clk", "pll3_clk", 0, PERIP1_CLK_ENB,
+                       USBD_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "designware_udc");
 
        /* clock derived from ahb clk */
@@ -579,29 +575,25 @@ void __init spear3xx_clk_init(void)
                        RAS_CLK_ENB, RAS_PLL2_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, "ras_pll2_clk", NULL);
 
-       clk = clk_register_gate(NULL, "ras_pll3_48m_clk", "pll3_48m_clk", 0,
+       clk = clk_register_gate(NULL, "ras_pll3_clk", "pll3_clk", 0,
                        RAS_CLK_ENB, RAS_48M_CLK_ENB, 0, &_lock);
-       clk_register_clkdev(clk, "ras_pll3_48m_clk", NULL);
-
-       clk = clk_register_gate(NULL, "ras_gen0_synth_gate_clk",
-                       "gen0_synth_gate_clk", 0, RAS_CLK_ENB,
-                       RAS_SYNT0_CLK_ENB, 0, &_lock);
-       clk_register_clkdev(clk, "ras_gen0_synth_gate_clk", NULL);
-
-       clk = clk_register_gate(NULL, "ras_gen1_synth_gate_clk",
-                       "gen1_synth_gate_clk", 0, RAS_CLK_ENB,
-                       RAS_SYNT1_CLK_ENB, 0, &_lock);
-       clk_register_clkdev(clk, "ras_gen1_synth_gate_clk", NULL);
-
-       clk = clk_register_gate(NULL, "ras_gen2_synth_gate_clk",
-                       "gen2_synth_gate_clk", 0, RAS_CLK_ENB,
-                       RAS_SYNT2_CLK_ENB, 0, &_lock);
-       clk_register_clkdev(clk, "ras_gen2_synth_gate_clk", NULL);
-
-       clk = clk_register_gate(NULL, "ras_gen3_synth_gate_clk",
-                       "gen3_synth_gate_clk", 0, RAS_CLK_ENB,
-                       RAS_SYNT3_CLK_ENB, 0, &_lock);
-       clk_register_clkdev(clk, "ras_gen3_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "ras_pll3_clk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_syn0_gclk", "gen0_syn_gclk", 0,
+                       RAS_CLK_ENB, RAS_SYNT0_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, "ras_syn0_gclk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_syn1_gclk", "gen1_syn_gclk", 0,
+                       RAS_CLK_ENB, RAS_SYNT1_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, "ras_syn1_gclk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_syn2_gclk", "gen2_syn_gclk", 0,
+                       RAS_CLK_ENB, RAS_SYNT2_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, "ras_syn2_gclk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_syn3_gclk", "gen3_syn_gclk", 0,
+                       RAS_CLK_ENB, RAS_SYNT3_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, "ras_syn3_gclk", NULL);
 
        if (of_machine_is_compatible("st,spear300"))
                spear300_clk_init();
index f9a20b3..a98d086 100644 (file)
@@ -2,7 +2,7 @@
  * SPEAr6xx machines clock framework source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -97,13 +97,12 @@ static struct aux_rate_tbl aux_rtbl[] = {
        {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
 };
 
-static const char *clcd_parents[] = { "pll3_48m_clk", "clcd_synth_gate_clk", };
-static const char *firda_parents[] = { "pll3_48m_clk", "firda_synth_gate_clk",
-};
-static const char *uart_parents[] = { "pll3_48m_clk", "uart_synth_gate_clk", };
-static const char *gpt0_1_parents[] = { "pll3_48m_clk", "gpt0_1_synth_clk", };
-static const char *gpt2_parents[] = { "pll3_48m_clk", "gpt2_synth_clk", };
-static const char *gpt3_parents[] = { "pll3_48m_clk", "gpt3_synth_clk", };
+static const char *clcd_parents[] = { "pll3_clk", "clcd_syn_gclk", };
+static const char *firda_parents[] = { "pll3_clk", "firda_syn_gclk", };
+static const char *uart_parents[] = { "pll3_clk", "uart_syn_gclk", };
+static const char *gpt0_1_parents[] = { "pll3_clk", "gpt0_1_syn_clk", };
+static const char *gpt2_parents[] = { "pll3_clk", "gpt2_syn_clk", };
+static const char *gpt3_parents[] = { "pll3_clk", "gpt3_syn_clk", };
 static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none",
        "pll2_clk", };
 
@@ -136,9 +135,9 @@ void __init spear6xx_clk_init(void)
        clk_register_clkdev(clk, NULL, "rtc-spear");
 
        /* clock derived from 30 MHz osc clk */
-       clk = clk_register_fixed_rate(NULL, "pll3_48m_clk", "osc_24m_clk", 0,
+       clk = clk_register_fixed_rate(NULL, "pll3_clk", "osc_24m_clk", 0,
                        48000000);
-       clk_register_clkdev(clk, "pll3_48m_clk", NULL);
+       clk_register_clkdev(clk, "pll3_clk", NULL);
 
        clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "osc_30m_clk",
                        0, PLL1_CTR, PLL1_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl),
@@ -146,9 +145,9 @@ void __init spear6xx_clk_init(void)
        clk_register_clkdev(clk, "vco1_clk", NULL);
        clk_register_clkdev(clk1, "pll1_clk", NULL);
 
-       clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL,
-                       "osc_30m_clk", 0, PLL2_CTR, PLL2_FRQ, pll_rtbl,
-                       ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+       clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "osc_30m_clk",
+                       0, PLL2_CTR, PLL2_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl),
+                       &_lock, &clk1, NULL);
        clk_register_clkdev(clk, "vco2_clk", NULL);
        clk_register_clkdev(clk1, "pll2_clk", NULL);
 
@@ -165,111 +164,111 @@ void __init spear6xx_clk_init(void)
                        HCLK_RATIO_MASK, 0, &_lock);
        clk_register_clkdev(clk, "ahb_clk", NULL);
 
-       clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
-                       "pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl,
-                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "uart_synth_clk", NULL);
-       clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+       clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "pll1_clk", 0,
+                       UART_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+                       &_lock, &clk1);
+       clk_register_clkdev(clk, "uart_syn_clk", NULL);
+       clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "uart_mux_clk", uart_parents,
+       clk = clk_register_mux(NULL, "uart_mclk", uart_parents,
                        ARRAY_SIZE(uart_parents), 0, PERIP_CLK_CFG,
                        UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "uart_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart0", "uart_mux_clk", 0,
-                       PERIP1_CLK_ENB, UART0_CLK_ENB, 0, &_lock);
+       clk = clk_register_gate(NULL, "uart0", "uart_mclk", 0, PERIP1_CLK_ENB,
+                       UART0_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "d0000000.serial");
 
-       clk = clk_register_gate(NULL, "uart1", "uart_mux_clk", 0,
-                       PERIP1_CLK_ENB, UART1_CLK_ENB, 0, &_lock);
+       clk = clk_register_gate(NULL, "uart1", "uart_mclk", 0, PERIP1_CLK_ENB,
+                       UART1_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "d0080000.serial");
 
-       clk = clk_register_aux("firda_synth_clk", "firda_synth_gate_clk",
-                       "pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl,
-                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "firda_synth_clk", NULL);
-       clk_register_clkdev(clk1, "firda_synth_gate_clk", NULL);
+       clk = clk_register_aux("firda_syn_clk", "firda_syn_gclk", "pll1_clk",
+                       0, FIRDA_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+                       &_lock, &clk1);
+       clk_register_clkdev(clk, "firda_syn_clk", NULL);
+       clk_register_clkdev(clk1, "firda_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "firda_mux_clk", firda_parents,
+       clk = clk_register_mux(NULL, "firda_mclk", firda_parents,
                        ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
                        FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "firda_mux_clk", NULL);
+       clk_register_clkdev(clk, "firda_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "firda_clk", "firda_mux_clk", 0,
+       clk = clk_register_gate(NULL, "firda_clk", "firda_mclk", 0,
                        PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "firda");
 
-       clk = clk_register_aux("clcd_synth_clk", "clcd_synth_gate_clk",
-                       "pll1_clk", 0, CLCD_CLK_SYNT, NULL, aux_rtbl,
-                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "clcd_synth_clk", NULL);
-       clk_register_clkdev(clk1, "clcd_synth_gate_clk", NULL);
+       clk = clk_register_aux("clcd_syn_clk", "clcd_syn_gclk", "pll1_clk",
+                       0, CLCD_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+                       &_lock, &clk1);
+       clk_register_clkdev(clk, "clcd_syn_clk", NULL);
+       clk_register_clkdev(clk1, "clcd_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "clcd_mux_clk", clcd_parents,
+       clk = clk_register_mux(NULL, "clcd_mclk", clcd_parents,
                        ARRAY_SIZE(clcd_parents), 0, PERIP_CLK_CFG,
                        CLCD_CLK_SHIFT, CLCD_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "clcd_mux_clk", NULL);
+       clk_register_clkdev(clk, "clcd_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "clcd_clk", "clcd_mux_clk", 0,
+       clk = clk_register_gate(NULL, "clcd_clk", "clcd_mclk", 0,
                        PERIP1_CLK_ENB, CLCD_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "clcd");
 
        /* gpt clocks */
-       clk = clk_register_gpt("gpt0_1_synth_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
+       clk = clk_register_gpt("gpt0_1_syn_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
                        gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
-       clk_register_clkdev(clk, "gpt0_1_synth_clk", NULL);
+       clk_register_clkdev(clk, "gpt0_1_syn_clk", NULL);
 
-       clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt0_1_parents,
+       clk = clk_register_mux(NULL, "gpt0_mclk", gpt0_1_parents,
                        ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
                        GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, NULL, "gpt0");
 
-       clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt0_1_parents,
+       clk = clk_register_mux(NULL, "gpt1_mclk", gpt0_1_parents,
                        ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
                        GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
+       clk_register_clkdev(clk, "gpt1_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
                        PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "gpt1");
 
-       clk = clk_register_gpt("gpt2_synth_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
+       clk = clk_register_gpt("gpt2_syn_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
                        gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
-       clk_register_clkdev(clk, "gpt2_synth_clk", NULL);
+       clk_register_clkdev(clk, "gpt2_syn_clk", NULL);
 
-       clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt2_parents,
+       clk = clk_register_mux(NULL, "gpt2_mclk", gpt2_parents,
                        ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
                        GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
+       clk_register_clkdev(clk, "gpt2_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
                        PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "gpt2");
 
-       clk = clk_register_gpt("gpt3_synth_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
+       clk = clk_register_gpt("gpt3_syn_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
                        gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
-       clk_register_clkdev(clk, "gpt3_synth_clk", NULL);
+       clk_register_clkdev(clk, "gpt3_syn_clk", NULL);
 
-       clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt3_parents,
+       clk = clk_register_mux(NULL, "gpt3_mclk", gpt3_parents,
                        ARRAY_SIZE(gpt3_parents), 0, PERIP_CLK_CFG,
                        GPT3_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
+       clk_register_clkdev(clk, "gpt3_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+       clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
                        PERIP1_CLK_ENB, GPT3_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "gpt3");
 
        /* clock derived from pll3 clk */
-       clk = clk_register_gate(NULL, "usbh0_clk", "pll3_48m_clk", 0,
+       clk = clk_register_gate(NULL, "usbh0_clk", "pll3_clk", 0,
                        PERIP1_CLK_ENB, USBH0_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "usbh.0_clk");
 
-       clk = clk_register_gate(NULL, "usbh1_clk", "pll3_48m_clk", 0,
+       clk = clk_register_gate(NULL, "usbh1_clk", "pll3_clk", 0,
                        PERIP1_CLK_ENB, USBH1_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "usbh.1_clk");
 
-       clk = clk_register_gate(NULL, "usbd_clk", "pll3_48m_clk", 0,
-                       PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock);
+       clk = clk_register_gate(NULL, "usbd_clk", "pll3_clk", 0, PERIP1_CLK_ENB,
+                       USBD_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "designware_udc");
 
        /* clock derived from ahb clk */
@@ -278,9 +277,8 @@ void __init spear6xx_clk_init(void)
        clk_register_clkdev(clk, "ahbmult2_clk", NULL);
 
        clk = clk_register_mux(NULL, "ddr_clk", ddr_parents,
-                       ARRAY_SIZE(ddr_parents),
-                       0, PLL_CLK_CFG, MCTR_CLK_SHIFT, MCTR_CLK_MASK, 0,
-                       &_lock);
+                       ARRAY_SIZE(ddr_parents), 0, PLL_CLK_CFG, MCTR_CLK_SHIFT,
+                       MCTR_CLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, "ddr_clk", NULL);
 
        clk = clk_register_divider(NULL, "apb_clk", "ahb_clk",
@@ -298,7 +296,7 @@ void __init spear6xx_clk_init(void)
 
        clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
                        GMAC_CLK_ENB, 0, &_lock);
-       clk_register_clkdev(clk, NULL, "gmac");
+       clk_register_clkdev(clk, NULL, "e0800000.ethernet");
 
        clk = clk_register_gate(NULL, "i2c_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
                        I2C_CLK_ENB, 0, &_lock);
index e23dc82..7212961 100644 (file)
@@ -1626,4 +1626,4 @@ module_exit(dw_exit);
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
index fb4f499..1dc2a4a 100644 (file)
@@ -815,8 +815,6 @@ static int sdma_request_channel(struct sdma_channel *sdmac)
 
        init_completion(&sdmac->done);
 
-       sdmac->buf_tail = 0;
-
        return 0;
 out:
 
@@ -927,6 +925,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
 
        sdmac->flags = 0;
 
+       sdmac->buf_tail = 0;
+
        dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
                        sg_len, channel);
 
@@ -1027,6 +1027,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
 
        sdmac->status = DMA_IN_PROGRESS;
 
+       sdmac->buf_tail = 0;
+
        sdmac->flags |= IMX_DMA_SG_LOOP;
        sdmac->direction = direction;
        ret = sdma_load_context(sdmac);
index cbcc28e..e4feba6 100644 (file)
@@ -392,6 +392,8 @@ struct pl330_req {
        struct pl330_reqcfg *cfg;
        /* Pointer to first xfer in the request. */
        struct pl330_xfer *x;
+       /* Hook to attach to DMAC's list of reqs with due callback */
+       struct list_head rqd;
 };
 
 /*
@@ -461,8 +463,6 @@ struct _pl330_req {
        /* Number of bytes taken to setup MC for the req */
        u32 mc_len;
        struct pl330_req *r;
-       /* Hook to attach to DMAC's list of reqs with due callback */
-       struct list_head rqd;
 };
 
 /* ToBeDone for tasklet */
@@ -1683,7 +1683,7 @@ static void pl330_dotask(unsigned long data)
 /* Returns 1 if state was updated, 0 otherwise */
 static int pl330_update(const struct pl330_info *pi)
 {
-       struct _pl330_req *rqdone;
+       struct pl330_req *rqdone, *tmp;
        struct pl330_dmac *pl330;
        unsigned long flags;
        void __iomem *regs;
@@ -1750,7 +1750,10 @@ static int pl330_update(const struct pl330_info *pi)
                        if (active == -1) /* Aborted */
                                continue;
 
-                       rqdone = &thrd->req[active];
+                       /* Detach the req */
+                       rqdone = thrd->req[active].r;
+                       thrd->req[active].r = NULL;
+
                        mark_free(thrd, active);
 
                        /* Get going again ASAP */
@@ -1762,20 +1765,11 @@ static int pl330_update(const struct pl330_info *pi)
        }
 
        /* Now that we are in no hurry, do the callbacks */
-       while (!list_empty(&pl330->req_done)) {
-               struct pl330_req *r;
-
-               rqdone = container_of(pl330->req_done.next,
-                                       struct _pl330_req, rqd);
-
-               list_del_init(&rqdone->rqd);
-
-               /* Detach the req */
-               r = rqdone->r;
-               rqdone->r = NULL;
+       list_for_each_entry_safe(rqdone, tmp, &pl330->req_done, rqd) {
+               list_del(&rqdone->rqd);
 
                spin_unlock_irqrestore(&pl330->lock, flags);
-               _callback(r, PL330_ERR_NONE);
+               _callback(rqdone, PL330_ERR_NONE);
                spin_lock_irqsave(&pl330->lock, flags);
        }
 
@@ -2321,7 +2315,7 @@ static void pl330_tasklet(unsigned long data)
        /* Pick up ripe tomatoes */
        list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
                if (desc->status == DONE) {
-                       if (pch->cyclic)
+                       if (!pch->cyclic)
                                dma_cookie_complete(&desc->txd);
                        list_move_tail(&desc->node, &list);
                }
@@ -2539,7 +2533,7 @@ static inline void _init_desc(struct dma_pl330_desc *desc)
 }
 
 /* Returns the number of descriptors added to the DMAC pool */
-int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
+static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
 {
        struct dma_pl330_desc *desc;
        unsigned long flags;
index 10f3750..de5ba86 100644 (file)
@@ -164,7 +164,7 @@ void *edac_align_ptr(void **p, unsigned size, int n_elems)
        else
                return (char *)ptr;
 
-       r = size % align;
+       r = (unsigned long)p % align;
 
        if (r == 0)
                return (char *)ptr;
index d27778f..a499c7e 100644 (file)
@@ -1814,12 +1814,6 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
        if (mce->bank != 8)
                return NOTIFY_DONE;
 
-#ifdef CONFIG_SMP
-       /* Only handle if it is the right mc controller */
-       if (mce->socketid != pvt->i7core_dev->socket)
-               return NOTIFY_DONE;
-#endif
-
        smp_rmb();
        if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
                smp_wmb();
@@ -2116,8 +2110,6 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
        if (pvt->enable_scrub)
                disable_sdram_scrub_setting(mci);
 
-       mce_unregister_decode_chain(&i7_mce_dec);
-
        /* Disable EDAC polling */
        i7core_pci_ctl_release(pvt);
 
@@ -2222,8 +2214,6 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
        /* DCLK for scrub rate setting */
        pvt->dclk_freq = get_dclk_freq();
 
-       mce_register_decode_chain(&i7_mce_dec);
-
        return 0;
 
 fail0:
@@ -2367,8 +2357,10 @@ static int __init i7core_init(void)
 
        pci_rc = pci_register_driver(&i7core_driver);
 
-       if (pci_rc >= 0)
+       if (pci_rc >= 0) {
+               mce_register_decode_chain(&i7_mce_dec);
                return 0;
+       }
 
        i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
                      pci_rc);
@@ -2384,6 +2376,7 @@ static void __exit i7core_exit(void)
 {
        debugf2("MC: " __FILE__ ": %s()\n", __func__);
        pci_unregister_driver(&i7core_driver);
+       mce_unregister_decode_chain(&i7_mce_dec);
 }
 
 module_init(i7core_init);
index 4c40235..0e37462 100644 (file)
@@ -980,7 +980,8 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
        layers[1].type = EDAC_MC_LAYER_CHANNEL;
        layers[1].size = 1;
        layers[1].is_virt_csrow = false;
-       mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), sizeof(*pdata));
+       mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
+                           sizeof(*pdata));
        if (!mci) {
                devres_release_group(&op->dev, mpc85xx_mc_err_probe);
                return -ENOMEM;
index 4adaf4b..36ad17e 100644 (file)
@@ -555,7 +555,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
                pvt->is_close_pg = false;
        }
 
-       pci_read_config_dword(pvt->pci_ta, RANK_CFG_A, &reg);
+       pci_read_config_dword(pvt->pci_ddrio, RANK_CFG_A, &reg);
        if (IS_RDIMM_ENABLED(reg)) {
                /* FIXME: Can also be LRDIMM */
                debugf0("Memory is registered\n");
@@ -1604,8 +1604,6 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
        debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
                __func__, mci, &sbridge_dev->pdev[0]->dev);
 
-       mce_unregister_decode_chain(&sbridge_mce_dec);
-
        /* Remove MC sysfs nodes */
        edac_mc_del_mc(mci->dev);
 
@@ -1682,7 +1680,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
                goto fail0;
        }
 
-       mce_register_decode_chain(&sbridge_mce_dec);
        return 0;
 
 fail0:
@@ -1811,8 +1808,10 @@ static int __init sbridge_init(void)
 
        pci_rc = pci_register_driver(&sbridge_driver);
 
-       if (pci_rc >= 0)
+       if (pci_rc >= 0) {
+               mce_register_decode_chain(&sbridge_mce_dec);
                return 0;
+       }
 
        sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
                      pci_rc);
@@ -1828,6 +1827,7 @@ static void __exit sbridge_exit(void)
 {
        debugf2("MC: " __FILE__ ": %s()\n", __func__);
        pci_unregister_driver(&sbridge_driver);
+       mce_unregister_decode_chain(&sbridge_mce_dec);
 }
 
 module_init(sbridge_init);
index 23416e4..a4ed30b 100644 (file)
@@ -116,8 +116,8 @@ const char *max8997_extcon_cable[] = {
        [5] = "Charge-downstream",
        [6] = "MHL",
        [7] = "Dock-desk",
-       [7] = "Dock-card",
-       [8] = "JIG",
+       [8] = "Dock-card",
+       [9] = "JIG",
 
        NULL,
 };
@@ -514,6 +514,7 @@ static int __devexit max8997_muic_remove(struct platform_device *pdev)
 
        extcon_dev_unregister(info->edev);
 
+       kfree(info->edev);
        kfree(info);
 
        return 0;
index f598a70..159aeb0 100644 (file)
@@ -762,7 +762,7 @@ int extcon_dev_register(struct extcon_dev *edev, struct device *dev)
 #if defined(CONFIG_ANDROID)
        if (switch_class)
                ret = class_compat_create_link(switch_class, edev->dev,
-                                              dev);
+                                              NULL);
 #endif /* CONFIG_ANDROID */
 
        spin_lock_init(&edev->lock);
index fe7a07b..8a0dcc1 100644 (file)
@@ -125,6 +125,7 @@ static int __devinit gpio_extcon_probe(struct platform_device *pdev)
        if (ret < 0)
                goto err_request_irq;
 
+       platform_set_drvdata(pdev, extcon_data);
        /* Perform initial detection */
        gpio_extcon_work(&extcon_data->work.work);
 
@@ -146,6 +147,7 @@ static int __devexit gpio_extcon_remove(struct platform_device *pdev)
        struct gpio_extcon_data *extcon_data = platform_get_drvdata(pdev);
 
        cancel_delayed_work_sync(&extcon_data->work);
+       free_irq(extcon_data->irq, extcon_data);
        gpio_free(extcon_data->gpio);
        extcon_dev_unregister(&extcon_data->edev);
        devm_kfree(&pdev->dev, extcon_data);
index c4067d0..542f0c0 100644 (file)
@@ -136,7 +136,7 @@ config GPIO_MPC8XXX
 
 config GPIO_MSM_V1
        tristate "Qualcomm MSM GPIO v1"
-       depends on GPIOLIB && ARCH_MSM
+       depends on GPIOLIB && ARCH_MSM && (ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50)
        help
          Say yes here to support the GPIO interface on ARM v6 based
          Qualcomm MSM chips.  Most of the pins on the MSM can be
index 9e9947c..1077754 100644 (file)
@@ -98,6 +98,7 @@ int devm_gpio_request_one(struct device *dev, unsigned gpio,
 
        return 0;
 }
+EXPORT_SYMBOL(devm_gpio_request_one);
 
 /**
  *      devm_gpio_free - free an interrupt
index c337143..c89c4c1 100644 (file)
@@ -398,10 +398,12 @@ static int __devinit mxc_gpio_probe(struct platform_device *pdev)
        writel(~0, port->base + GPIO_ISR);
 
        if (mxc_gpio_hwtype == IMX21_GPIO) {
-               /* setup one handler for all GPIO interrupts */
-               if (pdev->id == 0)
-                       irq_set_chained_handler(port->irq,
-                                               mx2_gpio_irq_handler);
+               /*
+                * Setup one handler for all GPIO interrupts. Actually setting
+                * the handler is needed only once, but doing it for every port
+                * is more robust and easier.
+                */
+               irq_set_chained_handler(port->irq, mx2_gpio_irq_handler);
        } else {
                /* setup one handler for each entry */
                irq_set_chained_handler(port->irq, mx3_gpio_irq_handler);
index c4ed172..4fbc208 100644 (file)
@@ -174,12 +174,22 @@ static inline void _gpio_dbck_enable(struct gpio_bank *bank)
        if (bank->dbck_enable_mask && !bank->dbck_enabled) {
                clk_enable(bank->dbck);
                bank->dbck_enabled = true;
+
+               __raw_writel(bank->dbck_enable_mask,
+                            bank->base + bank->regs->debounce_en);
        }
 }
 
 static inline void _gpio_dbck_disable(struct gpio_bank *bank)
 {
        if (bank->dbck_enable_mask && bank->dbck_enabled) {
+               /*
+                * Disable debounce before cutting it's clock. If debounce is
+                * enabled but the clock is not, GPIO module seems to be unable
+                * to detect events and generate interrupts at least on OMAP3.
+                */
+               __raw_writel(0, bank->base + bank->regs->debounce_en);
+
                clk_disable(bank->dbck);
                bank->dbck_enabled = false;
        }
@@ -1081,7 +1091,6 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
        bank->is_mpuio = pdata->is_mpuio;
        bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
        bank->loses_context = pdata->loses_context;
-       bank->get_context_loss_count = pdata->get_context_loss_count;
        bank->regs = pdata->regs;
 #ifdef CONFIG_OF_GPIO
        bank->chip.of_node = of_node_get(node);
@@ -1135,6 +1144,9 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
        omap_gpio_chip_init(bank);
        omap_gpio_show_rev(bank);
 
+       if (bank->loses_context)
+               bank->get_context_loss_count = pdata->get_context_loss_count;
+
        pm_runtime_put(bank->dev);
 
        list_add_tail(&bank->node, &omap_gpio_list);
index 38416be..6064fb3 100644 (file)
@@ -383,8 +383,9 @@ static int __devinit gsta_probe(struct platform_device *dev)
        }
        spin_lock_init(&chip->lock);
        gsta_gpio_setup(chip);
-       for (i = 0; i < GSTA_NR_GPIO; i++)
-               gsta_set_config(chip, i, gpio_pdata->pinconfig[i]);
+       if (gpio_pdata)
+               for (i = 0; i < GSTA_NR_GPIO; i++)
+                       gsta_set_config(chip, i, gpio_pdata->pinconfig[i]);
 
        /* 384 was used in previous code: be compatible for other drivers */
        err = irq_alloc_descs(-1, 384, GSTA_NR_GPIO, NUMA_NO_NODE);
index c1ad288..11f29c8 100644 (file)
@@ -149,6 +149,9 @@ static int __devinit tps65910_gpio_probe(struct platform_device *pdev)
        tps65910_gpio->gpio_chip.set    = tps65910_gpio_set;
        tps65910_gpio->gpio_chip.get    = tps65910_gpio_get;
        tps65910_gpio->gpio_chip.dev = &pdev->dev;
+#ifdef CONFIG_OF_GPIO
+       tps65910_gpio->gpio_chip.of_node = tps65910->dev->of_node;
+#endif
        if (pdata && pdata->gpio_base)
                tps65910_gpio->gpio_chip.base = pdata->gpio_base;
        else
index 92ea535..aa61ad2 100644 (file)
@@ -89,8 +89,11 @@ static int wm8994_gpio_direction_out(struct gpio_chip *chip,
        struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
        struct wm8994 *wm8994 = wm8994_gpio->wm8994;
 
+       if (value)
+               value = WM8994_GPN_LVL;
+
        return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset,
-                              WM8994_GPN_DIR, 0);
+                              WM8994_GPN_DIR | WM8994_GPN_LVL, value);
 }
 
 static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
index eb92fe2..a8743c3 100644 (file)
@@ -610,7 +610,7 @@ static bool
 drm_monitor_supports_rb(struct edid *edid)
 {
        if (edid->revision >= 4) {
-               bool ret;
+               bool ret = false;
                drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
                return ret;
        }
@@ -1039,6 +1039,24 @@ mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
        return true;
 }
 
+static bool valid_inferred_mode(const struct drm_connector *connector,
+                               const struct drm_display_mode *mode)
+{
+       struct drm_display_mode *m;
+       bool ok = false;
+
+       list_for_each_entry(m, &connector->probed_modes, head) {
+               if (mode->hdisplay == m->hdisplay &&
+                   mode->vdisplay == m->vdisplay &&
+                   drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
+                       return false; /* duplicated */
+               if (mode->hdisplay <= m->hdisplay &&
+                   mode->vdisplay <= m->vdisplay)
+                       ok = true;
+       }
+       return ok;
+}
+
 static int
 drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
                        struct detailed_timing *timing)
@@ -1048,7 +1066,8 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
        struct drm_device *dev = connector->dev;
 
        for (i = 0; i < drm_num_dmt_modes; i++) {
-               if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
+               if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
+                   valid_inferred_mode(connector, drm_dmt_modes + i)) {
                        newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
                        if (newmode) {
                                drm_mode_probed_add(connector, newmode);
@@ -1088,7 +1107,8 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
                        return modes;
 
                fixup_mode_1366x768(newmode);
-               if (!mode_in_range(newmode, edid, timing)) {
+               if (!mode_in_range(newmode, edid, timing) ||
+                   !valid_inferred_mode(connector, newmode)) {
                        drm_mode_destroy(dev, newmode);
                        continue;
                }
@@ -1116,7 +1136,8 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
                        return modes;
 
                fixup_mode_1366x768(newmode);
-               if (!mode_in_range(newmode, edid, timing)) {
+               if (!mode_in_range(newmode, edid, timing) ||
+                   !valid_inferred_mode(connector, newmode)) {
                        drm_mode_destroy(dev, newmode);
                        continue;
                }
index 9764045..b7e7b49 100644 (file)
@@ -78,21 +78,6 @@ static int cdv_backlight_combination_mode(struct drm_device *dev)
        return REG_READ(BLC_PWM_CTL2) & PWM_LEGACY_MODE;
 }
 
-static int cdv_get_brightness(struct backlight_device *bd)
-{
-       struct drm_device *dev = bl_get_data(bd);
-       u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
-
-       if (cdv_backlight_combination_mode(dev)) {
-               u8 lbpc;
-
-               val &= ~1;
-               pci_read_config_byte(dev->pdev, 0xF4, &lbpc);
-               val *= lbpc;
-       }
-       return val;
-}
-
 static u32 cdv_get_max_backlight(struct drm_device *dev)
 {
        u32 max = REG_READ(BLC_PWM_CTL);
@@ -110,6 +95,22 @@ static u32 cdv_get_max_backlight(struct drm_device *dev)
        return max;
 }
 
+static int cdv_get_brightness(struct backlight_device *bd)
+{
+       struct drm_device *dev = bl_get_data(bd);
+       u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+
+       if (cdv_backlight_combination_mode(dev)) {
+               u8 lbpc;
+
+               val &= ~1;
+               pci_read_config_byte(dev->pdev, 0xF4, &lbpc);
+               val *= lbpc;
+       }
+       return (val * 100)/cdv_get_max_backlight(dev);
+
+}
+
 static int cdv_set_brightness(struct backlight_device *bd)
 {
        struct drm_device *dev = bl_get_data(bd);
@@ -120,6 +121,9 @@ static int cdv_set_brightness(struct backlight_device *bd)
        if (level < 1)
                level = 1;
 
+       level *= cdv_get_max_backlight(dev);
+       level /= 100;
+
        if (cdv_backlight_combination_mode(dev)) {
                u32 max = cdv_get_max_backlight(dev);
                u8 lbpc;
@@ -157,7 +161,6 @@ static int cdv_backlight_init(struct drm_device *dev)
 
        cdv_backlight_device->props.brightness =
                        cdv_get_brightness(cdv_backlight_device);
-       cdv_backlight_device->props.max_brightness = cdv_get_max_backlight(dev);
        backlight_update_status(cdv_backlight_device);
        dev_priv->backlight_device = cdv_backlight_device;
        return 0;
index 4f186ec..c430bd4 100644 (file)
@@ -144,6 +144,8 @@ struct opregion_asle {
 
 #define ASLE_CBLV_VALID         (1<<31)
 
+static struct psb_intel_opregion *system_opregion;
+
 static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
 {
        struct drm_psb_private *dev_priv = dev->dev_private;
@@ -205,7 +207,7 @@ void psb_intel_opregion_enable_asle(struct drm_device *dev)
        struct drm_psb_private *dev_priv = dev->dev_private;
        struct opregion_asle *asle = dev_priv->opregion.asle;
 
-       if (asle) {
+       if (asle && system_opregion ) {
                /* Don't do this on Medfield or other non PC like devices, they
                   use the bit for something different altogether */
                psb_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
@@ -221,7 +223,6 @@ void psb_intel_opregion_enable_asle(struct drm_device *dev)
 #define ACPI_EV_LID            (1<<1)
 #define ACPI_EV_DOCK           (1<<2)
 
-static struct psb_intel_opregion *system_opregion;
 
 static int psb_intel_opregion_video_event(struct notifier_block *nb,
                                          unsigned long val, void *data)
@@ -266,9 +267,6 @@ void psb_intel_opregion_init(struct drm_device *dev)
                system_opregion = opregion;
                register_acpi_notifier(&psb_intel_opregion_notifier);
        }
-
-       if (opregion->asle)
-               psb_intel_opregion_enable_asle(dev);
 }
 
 void psb_intel_opregion_fini(struct drm_device *dev)
index 72dc6b9..4a90f8b 100644 (file)
@@ -27,6 +27,7 @@ extern void psb_intel_opregion_asle_intr(struct drm_device *dev);
 extern void psb_intel_opregion_init(struct drm_device *dev);
 extern void psb_intel_opregion_fini(struct drm_device *dev);
 extern int psb_intel_opregion_setup(struct drm_device *dev);
+extern void psb_intel_opregion_enable_asle(struct drm_device *dev);
 
 #else
 
@@ -46,4 +47,8 @@ extern inline int psb_intel_opregion_setup(struct drm_device *dev)
 {
        return 0;
 }
+
+extern inline void psb_intel_opregion_enable_asle(struct drm_device *dev)
+{
+}
 #endif
index eff039b..5971bc8 100644 (file)
@@ -144,6 +144,10 @@ static int psb_backlight_init(struct drm_device *dev)
        psb_backlight_device->props.max_brightness = 100;
        backlight_update_status(psb_backlight_device);
        dev_priv->backlight_device = psb_backlight_device;
+
+       /* This must occur after the backlight is properly initialised */
+       psb_lid_timer_init(dev_priv);
+
        return 0;
 }
 
@@ -354,13 +358,6 @@ static int psb_chip_setup(struct drm_device *dev)
        return 0;
 }
 
-/* Not exactly an erratum more an irritation */
-static void psb_chip_errata(struct drm_device *dev)
-{
-       struct drm_psb_private *dev_priv = dev->dev_private;
-       psb_lid_timer_init(dev_priv);
-}
-
 static void psb_chip_teardown(struct drm_device *dev)
 {
        struct drm_psb_private *dev_priv = dev->dev_private;
@@ -379,7 +376,6 @@ const struct psb_ops psb_chip_ops = {
        .sgx_offset = PSB_SGX_OFFSET,
        .chip_setup = psb_chip_setup,
        .chip_teardown = psb_chip_teardown,
-       .errata = psb_chip_errata,
 
        .crtc_helper = &psb_intel_helper_funcs,
        .crtc_funcs = &psb_intel_crtc_funcs,
index caba6e0..a8858a9 100644 (file)
@@ -374,6 +374,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
 
        if (ret)
                return ret;
+       psb_intel_opregion_enable_asle(dev);
 #if 0
        /*enable runtime pm at last*/
        pm_runtime_enable(&dev->pdev->dev);
index f947926..36822b9 100644 (file)
@@ -1401,6 +1401,27 @@ i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
        }
 }
 
+static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+       struct apertures_struct *ap;
+       struct pci_dev *pdev = dev_priv->dev->pdev;
+       bool primary;
+
+       ap = alloc_apertures(1);
+       if (!ap)
+               return;
+
+       ap->ranges[0].base = dev_priv->dev->agp->base;
+       ap->ranges[0].size =
+               dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+       primary =
+               pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+
+       remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+
+       kfree(ap);
+}
+
 /**
  * i915_driver_load - setup chip and create an initial config
  * @dev: DRM device
@@ -1446,6 +1467,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto free_priv;
        }
 
+       dev_priv->mm.gtt = intel_gtt_get();
+       if (!dev_priv->mm.gtt) {
+               DRM_ERROR("Failed to initialize GTT\n");
+               ret = -ENODEV;
+               goto put_bridge;
+       }
+
+       i915_kick_out_firmware_fb(dev_priv);
+
        pci_set_master(dev->pdev);
 
        /* overlay on gen2 is broken and can't address above 1G */
@@ -1471,13 +1501,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto put_bridge;
        }
 
-       dev_priv->mm.gtt = intel_gtt_get();
-       if (!dev_priv->mm.gtt) {
-               DRM_ERROR("Failed to initialize GTT\n");
-               ret = -ENODEV;
-               goto out_rmmap;
-       }
-
        aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
 
        dev_priv->mm.gtt_mapping =
index b1fe0ed..ed3224c 100644 (file)
@@ -412,7 +412,6 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
         */
 
        spin_lock_irqsave(&dev_priv->rps_lock, flags);
-       WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
        dev_priv->pm_iir |= pm_iir;
        I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
        POSTING_READ(GEN6_PMIMR);
index 0ede02a..a748e5c 100644 (file)
@@ -740,8 +740,11 @@ static void i915_restore_display(struct drm_device *dev)
        if (HAS_PCH_SPLIT(dev)) {
                I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
                I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
-               I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
+               /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
+                * otherwise we get blank eDP screen after S3 on some machines
+                */
                I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
+               I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
                I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
                I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
                I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
index e0aa064..a8538ac 100644 (file)
@@ -6558,7 +6558,7 @@ static void intel_setup_outputs(struct drm_device *dev)
                if (I915_READ(HDMIC) & PORT_DETECTED)
                        intel_hdmi_init(dev, HDMIC);
 
-               if (I915_READ(HDMID) & PORT_DETECTED)
+               if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
                        intel_hdmi_init(dev, HDMID);
 
                if (I915_READ(PCH_DP_C) & DP_DETECTED)
@@ -6921,19 +6921,6 @@ static void i915_disable_vga(struct drm_device *dev)
        POSTING_READ(vga_reg);
 }
 
-static void ivb_pch_pwm_override(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       /*
-        * IVB has CPU eDP backlight regs too, set things up to let the
-        * PCH regs control the backlight
-        */
-       I915_WRITE(BLC_PWM_CPU_CTL2, PWM_ENABLE);
-       I915_WRITE(BLC_PWM_CPU_CTL, 0);
-       I915_WRITE(BLC_PWM_PCH_CTL1, PWM_ENABLE | (1<<30));
-}
-
 void intel_modeset_init_hw(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6950,9 +6937,6 @@ void intel_modeset_init_hw(struct drm_device *dev)
                gen6_enable_rps(dev_priv);
                gen6_update_ring_freq(dev_priv);
        }
-
-       if (IS_IVYBRIDGE(dev))
-               ivb_pch_pwm_override(dev);
 }
 
 void intel_modeset_init(struct drm_device *dev)
index 296cfc2..c044932 100644 (file)
@@ -32,6 +32,7 @@
 #include "drm.h"
 #include "drm_crtc.h"
 #include "drm_crtc_helper.h"
+#include "drm_edid.h"
 #include "intel_drv.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
@@ -67,6 +68,8 @@ struct intel_dp {
        struct drm_display_mode *panel_fixed_mode;  /* for eDP */
        struct delayed_work panel_vdd_work;
        bool want_panel_vdd;
+       struct edid *edid; /* cached EDID for eDP */
+       int edid_mode_count;
 };
 
 /**
@@ -371,7 +374,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
        int recv_bytes;
        uint32_t status;
        uint32_t aux_clock_divider;
-       int try, precharge = 5;
+       int try, precharge;
 
        intel_dp_check_edp(intel_dp);
        /* The clock divider is based off the hrawclk,
@@ -391,6 +394,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
        else
                aux_clock_divider = intel_hrawclk(dev) / 2;
 
+       if (IS_GEN6(dev))
+               precharge = 3;
+       else
+               precharge = 5;
+
        /* Try to wait for any previous AUX channel activity */
        for (try = 0; try < 3; try++) {
                status = I915_READ(ch_ctl);
@@ -1973,6 +1981,8 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
        if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
                return;
 
+       ironlake_edp_panel_vdd_on(intel_dp);
+
        if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
                DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
                              buf[0], buf[1], buf[2]);
@@ -1980,6 +1990,8 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
        if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
                DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
                              buf[0], buf[1], buf[2]);
+
+       ironlake_edp_panel_vdd_off(intel_dp, false);
 }
 
 static bool
@@ -2116,10 +2128,22 @@ intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
 {
        struct intel_dp *intel_dp = intel_attached_dp(connector);
        struct edid     *edid;
+       int size;
+
+       if (is_edp(intel_dp)) {
+               if (!intel_dp->edid)
+                       return NULL;
+
+               size = (intel_dp->edid->extensions + 1) * EDID_LENGTH;
+               edid = kmalloc(size, GFP_KERNEL);
+               if (!edid)
+                       return NULL;
+
+               memcpy(edid, intel_dp->edid, size);
+               return edid;
+       }
 
-       ironlake_edp_panel_vdd_on(intel_dp);
        edid = drm_get_edid(connector, adapter);
-       ironlake_edp_panel_vdd_off(intel_dp, false);
        return edid;
 }
 
@@ -2129,9 +2153,17 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada
        struct intel_dp *intel_dp = intel_attached_dp(connector);
        int     ret;
 
-       ironlake_edp_panel_vdd_on(intel_dp);
+       if (is_edp(intel_dp)) {
+               drm_mode_connector_update_edid_property(connector,
+                                                       intel_dp->edid);
+               ret = drm_add_edid_modes(connector, intel_dp->edid);
+               drm_edid_to_eld(connector,
+                               intel_dp->edid);
+               connector->display_info.raw_edid = NULL;
+               return intel_dp->edid_mode_count;
+       }
+
        ret = intel_ddc_get_modes(connector, adapter);
-       ironlake_edp_panel_vdd_off(intel_dp, false);
        return ret;
 }
 
@@ -2321,6 +2353,7 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
        i2c_del_adapter(&intel_dp->adapter);
        drm_encoder_cleanup(encoder);
        if (is_edp(intel_dp)) {
+               kfree(intel_dp->edid);
                cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
                ironlake_panel_vdd_off_sync(intel_dp);
        }
@@ -2504,11 +2537,14 @@ intel_dp_init(struct drm_device *dev, int output_reg)
                        break;
        }
 
+       intel_dp_i2c_init(intel_dp, intel_connector, name);
+
        /* Cache some DPCD data in the eDP case */
        if (is_edp(intel_dp)) {
                bool ret;
                struct edp_power_seq    cur, vbt;
                u32 pp_on, pp_off, pp_div;
+               struct edid *edid;
 
                pp_on = I915_READ(PCH_PP_ON_DELAYS);
                pp_off = I915_READ(PCH_PP_OFF_DELAYS);
@@ -2576,9 +2612,19 @@ intel_dp_init(struct drm_device *dev, int output_reg)
                        intel_dp_destroy(&intel_connector->base);
                        return;
                }
-       }
 
-       intel_dp_i2c_init(intel_dp, intel_connector, name);
+               ironlake_edp_panel_vdd_on(intel_dp);
+               edid = drm_get_edid(connector, &intel_dp->adapter);
+               if (edid) {
+                       drm_mode_connector_update_edid_property(connector,
+                                                               edid);
+                       intel_dp->edid_mode_count =
+                               drm_add_edid_modes(connector, edid);
+                       drm_edid_to_eld(connector, edid);
+                       intel_dp->edid = edid;
+               }
+               ironlake_edp_panel_vdd_off(intel_dp, false);
+       }
 
        intel_encoder->hot_plug = intel_dp_hot_plug;
 
index 153b9a1..1074bc5 100644 (file)
@@ -467,7 +467,7 @@ int nouveau_fbcon_init(struct drm_device *dev)
        nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
 
        ret = drm_fb_helper_init(dev, &nfbdev->helper,
-                                nv_two_heads(dev) ? 2 : 1, 4);
+                                dev->mode_config.num_crtc, 4);
        if (ret) {
                kfree(nfbdev);
                return ret;
index a89240e..a25cf2c 100644 (file)
@@ -1,3 +1,26 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ */
 
 #include "drmP.h"
 #include "drm.h"
index 01d77d1..3904d79 100644 (file)
@@ -1149,7 +1149,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
        }
 
        if (tiling_flags & RADEON_TILING_MACRO) {
-               if (rdev->family >= CHIP_CAYMAN)
+               if (rdev->family >= CHIP_TAHITI)
+                       tmp = rdev->config.si.tile_config;
+               else if (rdev->family >= CHIP_CAYMAN)
                        tmp = rdev->config.cayman.tile_config;
                else
                        tmp = rdev->config.evergreen.tile_config;
@@ -1177,6 +1179,12 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
        } else if (tiling_flags & RADEON_TILING_MICRO)
                fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
 
+       if ((rdev->family == CHIP_TAHITI) ||
+           (rdev->family == CHIP_PITCAIRN))
+               fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
+       else if (rdev->family == CHIP_VERDE)
+               fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
+
        switch (radeon_crtc->crtc_id) {
        case 0:
                WREG32(AVIVO_D1VGA_CONTROL, 0);
index e7b1ec5..486ccdf 100644 (file)
@@ -1926,7 +1926,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
 
        if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
                r600_hdmi_enable(encoder);
-               if (ASIC_IS_DCE4(rdev))
+               if (ASIC_IS_DCE6(rdev))
+                       ; /* TODO (use pointers instead of if-s?) */
+               else if (ASIC_IS_DCE4(rdev))
                        evergreen_hdmi_setmode(encoder, adjusted_mode);
                else
                        r600_hdmi_setmode(encoder, adjusted_mode);
index 01550d0..7fb3d2e 100644 (file)
@@ -1932,6 +1932,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
        smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
        WREG32(SMX_DC_CTL0, smx_dc_ctl0);
 
+       if (rdev->family <= CHIP_SUMO2)
+               WREG32(SMX_SAR_CTL0, 0x00010000);
+
        WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
                                        POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
                                        SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
index a51f880..65c5416 100644 (file)
@@ -156,9 +156,6 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
        uint32_t offset;
 
-       if (ASIC_IS_DCE5(rdev))
-               return;
-
        /* Silent, r600_hdmi_enable will raise WARN for us */
        if (!dig->afmt->enabled)
                return;
index 2773039..b50b15c 100644 (file)
 #define        SCRATCH_UMSK                                    0x8540
 #define        SCRATCH_ADDR                                    0x8544
 
+#define        SMX_SAR_CTL0                                    0xA008
 #define        SMX_DC_CTL0                                     0xA020
 #define                USE_HASH_FUNCTION                               (1 << 0)
 #define                NUMBER_OF_SETS(x)                               ((x) << 1)
index 3186522..b7bf18e 100644 (file)
@@ -1303,6 +1303,10 @@ static int cayman_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = r600_audio_init(rdev);
+       if (r)
+               return r;
+
        return 0;
 }
 
@@ -1329,6 +1333,7 @@ int cayman_resume(struct radeon_device *rdev)
 
 int cayman_suspend(struct radeon_device *rdev)
 {
+       r600_audio_fini(rdev);
        /* FIXME: we should wait for ring to be empty */
        radeon_ib_pool_suspend(rdev);
        radeon_vm_manager_suspend(rdev);
index f30dc95..bff6272 100644 (file)
@@ -1839,6 +1839,7 @@ void r600_gpu_init(struct radeon_device *rdev)
        WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
                               NUM_CLIP_SEQ(3)));
        WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
+       WREG32(VC_ENHANCE, 0);
 }
 
 
index 7479a5c..79b5591 100644 (file)
@@ -57,7 +57,7 @@ static bool radeon_dig_encoder(struct drm_encoder *encoder)
  */
 static int r600_audio_chipset_supported(struct radeon_device *rdev)
 {
-       return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE5(rdev))
+       return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE6(rdev))
                || rdev->family == CHIP_RS600
                || rdev->family == CHIP_RS690
                || rdev->family == CHIP_RS740;
index 0133f5f..ca87f7a 100644 (file)
@@ -2079,6 +2079,48 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                        return -EINVAL;
                }
                break;
+       case PACKET3_STRMOUT_BASE_UPDATE:
+               if (p->family < CHIP_RV770) {
+                       DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
+                       return -EINVAL;
+               }
+               if (pkt->count != 1) {
+                       DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
+                       return -EINVAL;
+               }
+               if (idx_value > 3) {
+                       DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
+                       return -EINVAL;
+               }
+               {
+                       u64 offset;
+
+                       r = r600_cs_packet_next_reloc(p, &reloc);
+                       if (r) {
+                               DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
+                               return -EINVAL;
+                       }
+
+                       if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
+                               DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
+                               return -EINVAL;
+                       }
+
+                       offset = radeon_get_ib_value(p, idx+1) << 8;
+                       if (offset != track->vgt_strmout_bo_offset[idx_value]) {
+                               DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
+                                         offset, track->vgt_strmout_bo_offset[idx_value]);
+                               return -EINVAL;
+                       }
+
+                       if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+                               DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
+                                         offset + 4, radeon_bo_size(reloc->robj));
+                               return -EINVAL;
+                       }
+                       ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               }
+               break;
        case PACKET3_SURFACE_BASE_UPDATE:
                if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
                        DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
index 969c275..82a0a4c 100644 (file)
@@ -322,9 +322,6 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
        uint32_t offset;
 
-       if (ASIC_IS_DCE5(rdev))
-               return;
-
        /* Silent, r600_hdmi_enable will raise WARN for us */
        if (!dig->afmt->enabled)
                return;
@@ -483,7 +480,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
        uint32_t offset;
        u32 hdmi;
 
-       if (ASIC_IS_DCE5(rdev))
+       if (ASIC_IS_DCE6(rdev))
                return;
 
        /* Silent, r600_hdmi_enable will raise WARN for us */
@@ -543,7 +540,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
        uint32_t offset;
 
-       if (ASIC_IS_DCE5(rdev))
+       if (ASIC_IS_DCE6(rdev))
                return;
 
        /* Called for ATOM_ENCODER_MODE_HDMI only */
index a0dbf1f..025fd5b 100644 (file)
 #define                TC_L2_SIZE(x)                                   ((x)<<5)
 #define                L2_DISABLE_LATE_HIT                             (1<<9)
 
+#define        VC_ENHANCE                                      0x9714
 
 #define        VGT_CACHE_INVALIDATION                          0x88C4
 #define                CACHE_INVALIDATION(x)                           ((x)<<0)
 #define        PACKET3_SET_CTL_CONST                           0x6F
 #define                PACKET3_SET_CTL_CONST_OFFSET                    0x0003cff0
 #define                PACKET3_SET_CTL_CONST_END                       0x0003e200
+#define        PACKET3_STRMOUT_BASE_UPDATE                     0x72 /* r7xx */
 #define        PACKET3_SURFACE_BASE_UPDATE                     0x73
 
 
index 03e5f5d..2c4d53f 100644 (file)
  *   2.14.0 - add evergreen tiling informations
  *   2.15.0 - add max_pipes query
  *   2.16.0 - fix evergreen 2D tiled surface calculation
+ *   2.17.0 - add STRMOUT_BASE_UPDATE for r7xx
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       16
+#define KMS_DRIVER_MINOR       17
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
index 59d4493..84b648a 100644 (file)
@@ -289,8 +289,9 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
        rdev->vm_manager.enabled = false;
 
        /* mark first vm as always in use, it's the system one */
+       /* allocate enough for 2 full VM pts */
        r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
-                                     rdev->vm_manager.max_pfn * 8,
+                                     rdev->vm_manager.max_pfn * 8 * 2,
                                      RADEON_GEM_DOMAIN_VRAM);
        if (r) {
                dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@@ -633,7 +634,15 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
        mutex_init(&vm->mutex);
        INIT_LIST_HEAD(&vm->list);
        INIT_LIST_HEAD(&vm->va);
-       vm->last_pfn = 0;
+       /* SI requires equal sized PTs for all VMs, so always set
+        * last_pfn to max_pfn.  cayman allows variable sized
+        * pts so we can grow then as needed.  Once we switch
+        * to two level pts we can unify this again.
+        */
+       if (rdev->family >= CHIP_TAHITI)
+               vm->last_pfn = rdev->vm_manager.max_pfn;
+       else
+               vm->last_pfn = 0;
        /* map the ib pool buffer at 0 in virtual address space, set
         * read only
         */
index f28bd4b..21ec9f5 100644 (file)
@@ -292,6 +292,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *filp)
 {
+       struct radeon_device *rdev = dev->dev_private;
        struct drm_radeon_gem_busy *args = data;
        struct drm_gem_object *gobj;
        struct radeon_bo *robj;
@@ -317,13 +318,14 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
                break;
        }
        drm_gem_object_unreference_unlocked(gobj);
-       r = radeon_gem_handle_lockup(robj->rdev, r);
+       r = radeon_gem_handle_lockup(rdev, r);
        return r;
 }
 
 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
                              struct drm_file *filp)
 {
+       struct radeon_device *rdev = dev->dev_private;
        struct drm_radeon_gem_wait_idle *args = data;
        struct drm_gem_object *gobj;
        struct radeon_bo *robj;
@@ -336,10 +338,10 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
        robj = gem_to_radeon_bo(gobj);
        r = radeon_bo_wait(robj, NULL, false);
        /* callback hw specific functions if any */
-       if (robj->rdev->asic->ioctl_wait_idle)
-               robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
+       if (rdev->asic->ioctl_wait_idle)
+               robj->rdev->asic->ioctl_wait_idle(rdev, robj);
        drm_gem_object_unreference_unlocked(gobj);
-       r = radeon_gem_handle_lockup(robj->rdev, r);
+       r = radeon_gem_handle_lockup(rdev, r);
        return r;
 }
 
index 0882554..5b37e28 100644 (file)
@@ -801,9 +801,13 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
                int i;
 
                for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-                       not_processed += radeon_fence_count_emitted(rdev, i);
-                       if (not_processed >= 3)
-                               break;
+                       struct radeon_ring *ring = &rdev->ring[i];
+
+                       if (ring->ready) {
+                               not_processed += radeon_fence_count_emitted(rdev, i);
+                               if (not_processed >= 3)
+                                       break;
+                       }
                }
 
                if (not_processed >= 3) { /* should upclock */
index 8ddab4c..6bef46a 100644 (file)
@@ -169,11 +169,17 @@ struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
        struct radeon_bo *bo = gem_to_radeon_bo(obj);
        int ret = 0;
 
+       ret = radeon_bo_reserve(bo, false);
+       if (unlikely(ret != 0))
+               return ERR_PTR(ret);
+
        /* pin buffer into GTT */
        ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
-       if (ret)
+       if (ret) {
+               radeon_bo_unreserve(bo);
                return ERR_PTR(ret);
-
+       }
+       radeon_bo_unreserve(bo);
        return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
 }
 
index 4ad0281..b4f51c5 100644 (file)
@@ -616,6 +616,9 @@ static void rv770_gpu_init(struct radeon_device *rdev)
                                       ACK_FLUSH_CTL(3) |
                                       SYNC_FLUSH_CTL));
 
+       if (rdev->family != CHIP_RV770)
+               WREG32(SMX_SAR_CTL0, 0x00003f3f);
+
        db_debug3 = RREG32(DB_DEBUG3);
        db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
        switch (rdev->family) {
@@ -792,7 +795,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)
 
        WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
                                          NUM_CLIP_SEQ(3)));
-
+       WREG32(VC_ENHANCE, 0);
 }
 
 void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
index fdc0898..b0adfc5 100644 (file)
 #define        SCRATCH_UMSK                                    0x8540
 #define        SCRATCH_ADDR                                    0x8544
 
+#define        SMX_SAR_CTL0                                    0xA008
 #define        SMX_DC_CTL0                                     0xA020
 #define                USE_HASH_FUNCTION                               (1 << 0)
 #define                CACHE_DEPTH(x)                                  ((x) << 1)
 #define        TCP_CNTL                                        0x9610
 #define        TCP_CHAN_STEER                                  0x9614
 
+#define        VC_ENHANCE                                      0x9714
+
 #define        VGT_CACHE_INVALIDATION                          0x88C4
 #define                CACHE_INVALIDATION(x)                           ((x)<<0)
 #define                        VC_ONLY                                         0
index c7b61f1..0b02792 100644 (file)
@@ -2365,12 +2365,12 @@ int si_pcie_gart_enable(struct radeon_device *rdev)
        WREG32(0x15DC, 0);
 
        /* empty context1-15 */
-       /* FIXME start with 1G, once using 2 level pt switch to full
+       /* FIXME start with 4G, once using 2 level pt switch to full
         * vm size space
         */
        /* set vm size, must be a multiple of 4 */
        WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
-       WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, (1 << 30) / RADEON_GPU_PAGE_SIZE);
+       WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
        for (i = 1; i < 16; i++) {
                if (i < 8)
                        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
index eda938a..501f9d4 100644 (file)
 #define SI_DC_GPIO_HPD_EN                        0x65b8
 #define SI_DC_GPIO_HPD_Y                         0x65bc
 
+#define SI_GRPH_CONTROL                          0x6804
+#       define SI_GRPH_DEPTH(x)                  (((x) & 0x3) << 0)
+#       define SI_GRPH_DEPTH_8BPP                0
+#       define SI_GRPH_DEPTH_16BPP               1
+#       define SI_GRPH_DEPTH_32BPP               2
+#       define SI_GRPH_NUM_BANKS(x)              (((x) & 0x3) << 2)
+#       define SI_ADDR_SURF_2_BANK               0
+#       define SI_ADDR_SURF_4_BANK               1
+#       define SI_ADDR_SURF_8_BANK               2
+#       define SI_ADDR_SURF_16_BANK              3
+#       define SI_GRPH_Z(x)                      (((x) & 0x3) << 4)
+#       define SI_GRPH_BANK_WIDTH(x)             (((x) & 0x3) << 6)
+#       define SI_ADDR_SURF_BANK_WIDTH_1         0
+#       define SI_ADDR_SURF_BANK_WIDTH_2         1
+#       define SI_ADDR_SURF_BANK_WIDTH_4         2
+#       define SI_ADDR_SURF_BANK_WIDTH_8         3
+#       define SI_GRPH_FORMAT(x)                 (((x) & 0x7) << 8)
+/* 8 BPP */
+#       define SI_GRPH_FORMAT_INDEXED            0
+/* 16 BPP */
+#       define SI_GRPH_FORMAT_ARGB1555           0
+#       define SI_GRPH_FORMAT_ARGB565            1
+#       define SI_GRPH_FORMAT_ARGB4444           2
+#       define SI_GRPH_FORMAT_AI88               3
+#       define SI_GRPH_FORMAT_MONO16             4
+#       define SI_GRPH_FORMAT_BGRA5551           5
+/* 32 BPP */
+#       define SI_GRPH_FORMAT_ARGB8888           0
+#       define SI_GRPH_FORMAT_ARGB2101010        1
+#       define SI_GRPH_FORMAT_32BPP_DIG          2
+#       define SI_GRPH_FORMAT_8B_ARGB2101010     3
+#       define SI_GRPH_FORMAT_BGRA1010102        4
+#       define SI_GRPH_FORMAT_8B_BGRA1010102     5
+#       define SI_GRPH_FORMAT_RGB111110          6
+#       define SI_GRPH_FORMAT_BGR101111          7
+#       define SI_GRPH_BANK_HEIGHT(x)            (((x) & 0x3) << 11)
+#       define SI_ADDR_SURF_BANK_HEIGHT_1        0
+#       define SI_ADDR_SURF_BANK_HEIGHT_2        1
+#       define SI_ADDR_SURF_BANK_HEIGHT_4        2
+#       define SI_ADDR_SURF_BANK_HEIGHT_8        3
+#       define SI_GRPH_TILE_SPLIT(x)             (((x) & 0x7) << 13)
+#       define SI_ADDR_SURF_TILE_SPLIT_64B       0
+#       define SI_ADDR_SURF_TILE_SPLIT_128B      1
+#       define SI_ADDR_SURF_TILE_SPLIT_256B      2
+#       define SI_ADDR_SURF_TILE_SPLIT_512B      3
+#       define SI_ADDR_SURF_TILE_SPLIT_1KB       4
+#       define SI_ADDR_SURF_TILE_SPLIT_2KB       5
+#       define SI_ADDR_SURF_TILE_SPLIT_4KB       6
+#       define SI_GRPH_MACRO_TILE_ASPECT(x)      (((x) & 0x3) << 18)
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_8  3
+#       define SI_GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
+#       define SI_GRPH_ARRAY_LINEAR_GENERAL      0
+#       define SI_GRPH_ARRAY_LINEAR_ALIGNED      1
+#       define SI_GRPH_ARRAY_1D_TILED_THIN1      2
+#       define SI_GRPH_ARRAY_2D_TILED_THIN1      4
+#       define SI_GRPH_PIPE_CONFIG(x)           (((x) & 0x1f) << 24)
+#       define SI_ADDR_SURF_P2                  0
+#       define SI_ADDR_SURF_P4_8x16             4
+#       define SI_ADDR_SURF_P4_16x16            5
+#       define SI_ADDR_SURF_P4_16x32            6
+#       define SI_ADDR_SURF_P4_32x32            7
+#       define SI_ADDR_SURF_P8_16x16_8x16       8
+#       define SI_ADDR_SURF_P8_16x32_8x16       9
+#       define SI_ADDR_SURF_P8_32x32_8x16       10
+#       define SI_ADDR_SURF_P8_16x32_16x16      11
+#       define SI_ADDR_SURF_P8_32x32_16x16      12
+#       define SI_ADDR_SURF_P8_32x32_16x32      13
+#       define SI_ADDR_SURF_P8_32x64_32x32      14
+
 #endif
index 30d98d1..dd14cd1 100644 (file)
@@ -47,9 +47,9 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
        if (dev_priv == NULL)
                return -ENOMEM;
 
+       idr_init(&dev_priv->object_idr);
        dev->dev_private = (void *)dev_priv;
        dev_priv->chipset = chipset;
-       idr_init(&dev->object_name_idr);
 
        return 0;
 }
index 4d02c46..6e52069 100644 (file)
 
 static struct drm_driver driver;
 
+/*
+ * There are many DisplayLink-based graphics products, all with unique PIDs.
+ * So we match on DisplayLink's VID + Vendor-Defined Interface Class (0xff)
+ * We also require a match on SubClass (0x00) and Protocol (0x00),
+ * which is compatible with all known USB 2.0 era graphics chips and firmware,
+ * but allows DisplayLink to increment those for any future incompatible chips
+ */
 static struct usb_device_id id_table[] = {
-       {.idVendor = 0x17e9, .match_flags = USB_DEVICE_ID_MATCH_VENDOR,},
+       {.idVendor = 0x17e9, .bInterfaceClass = 0xff,
+        .bInterfaceSubClass = 0x00,
+        .bInterfaceProtocol = 0x00,
+        .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
+                       USB_DEVICE_ID_MATCH_INT_CLASS |
+                       USB_DEVICE_ID_MATCH_INT_SUBCLASS |
+                       USB_DEVICE_ID_MATCH_INT_PROTOCOL,},
        {},
 };
 MODULE_DEVICE_TABLE(usb, id_table);
index a8d5f09..4c2d836 100644 (file)
@@ -61,7 +61,7 @@ static int udl_parse_vendor_descriptor(struct drm_device *dev,
                        u8 length;
                        u16 key;
 
-                       key = *((u16 *) desc);
+                       key = le16_to_cpu(*((u16 *) desc));
                        desc += sizeof(u16);
                        length = *desc;
                        desc++;
index 1f18225..c126182 100644 (file)
@@ -100,12 +100,11 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
        if (dev_priv == NULL)
                return -ENOMEM;
 
+       idr_init(&dev_priv->object_idr);
        dev->dev_private = (void *)dev_priv;
 
        dev_priv->chipset = chipset;
 
-       idr_init(&dev->object_name_idr);
-
        pci_set_master(dev->pdev);
 
        ret = drm_vblank_init(dev, 1);
index 034c80a..3fda8c8 100644 (file)
@@ -1,20 +1,11 @@
 #
 # HID driver configuration
 #
-menuconfig HID_SUPPORT
-       bool "HID Devices"
-       depends on INPUT
-       default y
-       ---help---
-         Say Y here to get to see options for various computer-human interface
-         device drivers. This option alone does not add any kernel code.
-
-         If you say N, all options in this submenu will be skipped and disabled.
-
-if HID_SUPPORT
+menu "HID support"
+     depends on INPUT
 
 config HID
-       tristate "Generic HID support"
+       tristate "HID bus support"
        depends on INPUT
        default y
        ---help---
@@ -23,14 +14,17 @@ config HID
          most commonly used to refer to the USB-HID specification, but other
          devices (such as, but not strictly limited to, Bluetooth) are
          designed using HID specification (this involves certain keyboards,
-         mice, tablets, etc). This option compiles into kernel the generic
-         HID layer code (parser, usages, etc.), which can then be used by
-         transport-specific HID implementation (like USB or Bluetooth).
+         mice, tablets, etc). This option adds the HID bus to the kernel,
+         together with generic HID layer code. The HID devices are added and
+         removed from the HID bus by the transport-layer drivers, such as
+         usbhid (USB_HID) and hidp (BT_HIDP).
 
          For docs and specs, see http://www.usb.org/developers/hidpage/
 
          If unsure, say Y.
 
+if HID
+
 config HID_BATTERY_STRENGTH
        bool "Battery level reporting for HID devices"
        depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY
@@ -59,23 +53,22 @@ config HIDRAW
 
        If unsure, say Y.
 
-source "drivers/hid/usbhid/Kconfig"
-
-menu "Special HID drivers"
-       depends on HID
-
 config HID_GENERIC
        tristate "Generic HID driver"
        depends on HID
-       default y
+       default HID
        ---help---
-       Support for generic HID devices.
+       Support for generic devices on the HID bus. This includes most
+       keyboards and mice, joysticks, tablets and digitizers.
 
        To compile this driver as a module, choose M here: the module
        will be called hid-generic.
 
        If unsure, say Y.
 
+menu "Special HID drivers"
+       depends on HID
+
 config HID_A4TECH
        tristate "A4 tech mice" if EXPERT
        depends on USB_HID
@@ -393,6 +386,7 @@ config HID_MULTITOUCH
          - Unitec Panels
          - XAT optical touch panels
          - Xiroku optical touch panels
+         - Zytronic touch panels
 
          If unsure, say N.
 
@@ -662,4 +656,8 @@ config HID_ZYDACRON
 
 endmenu
 
-endif # HID_SUPPORT
+endif # HID
+
+source "drivers/hid/usbhid/Kconfig"
+
+endmenu
index fa10f84..585344b 100644 (file)
@@ -517,6 +517,12 @@ static const struct hid_device_id apple_devices[] = {
                .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
                .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
+               .driver_data = APPLE_HAS_FN },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
+               .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
+               .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
                .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
index 8e3a6b2..4c87276 100644 (file)
@@ -1503,6 +1503,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
@@ -1880,6 +1883,7 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
        { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_AXENTIA, USB_DEVICE_ID_AXENTIA_FM_RADIO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
@@ -1994,6 +1998,7 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
@@ -2088,6 +2093,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
        { }
index 9373f53..3203923 100644 (file)
 #define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI   0x024c
 #define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO    0x024d
 #define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS    0x024e
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI   0x0262
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO    0x0263
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS    0x0264
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI  0x0239
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO   0x023a
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS   0x023b
 #define USB_VENDOR_ID_AVERMEDIA                0x07ca
 #define USB_DEVICE_ID_AVER_FM_MR800    0xb800
 
+#define USB_VENDOR_ID_AXENTIA          0x12cf
+#define USB_DEVICE_ID_AXENTIA_FM_RADIO 0x7111
+
 #define USB_VENDOR_ID_BAANTO           0x2453
 #define USB_DEVICE_ID_BAANTO_MT_190W2  0x0100
 
 #define USB_DEVICE_ID_CRYSTALTOUCH     0x0006
 #define USB_DEVICE_ID_CRYSTALTOUCH_DUAL        0x0007
 
+#define USB_VENDOR_ID_MADCATZ          0x0738
+#define USB_DEVICE_ID_MADCATZ_BEATPAD  0x4540
+
 #define USB_VENDOR_ID_MCC              0x09db
 #define USB_DEVICE_ID_MCC_PMD1024LS    0x0076
 #define USB_DEVICE_ID_MCC_PMD1208LS    0x007a
 #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE        0x0001
 #define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE       0x0600
 
+#define USB_VENDOR_ID_SENNHEISER       0x1395
+#define USB_DEVICE_ID_SENNHEISER_BTD500USB     0x002c
+
 #define USB_VENDOR_ID_SIGMA_MICRO      0x1c4f
 #define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD     0x0002
 
 #define USB_VENDOR_ID_ZYDACRON 0x13EC
 #define USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL  0x0006
 
+#define USB_VENDOR_ID_ZYTRONIC         0x14c8
+#define USB_DEVICE_ID_ZYTRONIC_ZXY100  0x0005
+
 #define USB_VENDOR_ID_PRIMAX   0x0461
 #define USB_DEVICE_ID_PRIMAX_KEYBOARD  0x4e05
 
index 132b001..5301006 100644 (file)
@@ -301,6 +301,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
                               USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
          HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+               USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
+         HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
        {}
 };
 
index 5e8a7ed..0f9c146 100644 (file)
@@ -436,27 +436,37 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
 
 static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
 {
-       struct dj_report dj_report;
+       struct dj_report *dj_report;
+       int retval;
 
-       memset(&dj_report, 0, sizeof(dj_report));
-       dj_report.report_id = REPORT_ID_DJ_SHORT;
-       dj_report.device_index = 0xFF;
-       dj_report.report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
-       return logi_dj_recv_send_report(djrcv_dev, &dj_report);
+       dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL);
+       if (!dj_report)
+               return -ENOMEM;
+       dj_report->report_id = REPORT_ID_DJ_SHORT;
+       dj_report->device_index = 0xFF;
+       dj_report->report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
+       retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
+       kfree(dj_report);
+       return retval;
 }
 
 static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
                                          unsigned timeout)
 {
-       struct dj_report dj_report;
+       struct dj_report *dj_report;
+       int retval;
 
-       memset(&dj_report, 0, sizeof(dj_report));
-       dj_report.report_id = REPORT_ID_DJ_SHORT;
-       dj_report.device_index = 0xFF;
-       dj_report.report_type = REPORT_TYPE_CMD_SWITCH;
-       dj_report.report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
-       dj_report.report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
-       return logi_dj_recv_send_report(djrcv_dev, &dj_report);
+       dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL);
+       if (!dj_report)
+               return -ENOMEM;
+       dj_report->report_id = REPORT_ID_DJ_SHORT;
+       dj_report->device_index = 0xFF;
+       dj_report->report_type = REPORT_TYPE_CMD_SWITCH;
+       dj_report->report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
+       dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
+       retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
+       kfree(dj_report);
+       return retval;
 }
 
 
index 7cf3ffe..40ac665 100644 (file)
@@ -426,8 +426,10 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
                __set_bit(EV_ABS, input->evbit);
 
                input_set_abs_params(input, ABS_MT_TRACKING_ID, 0, 15, 0, 0);
-               input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255, 4, 0);
-               input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255, 4, 0);
+               input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255 << 2,
+                                    4, 0);
+               input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255 << 2,
+                                    4, 0);
                input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
 
                /* Note: Touch Y position from the device is inverted relative
index 6e3332a..7647924 100644 (file)
@@ -1048,6 +1048,11 @@ static const struct hid_device_id mt_devices[] = {
                MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
                        USB_DEVICE_ID_XIROKU_CSR2) },
 
+       /* Zytronic panels */
+       { .driver_data = MT_CLS_SERIAL,
+               MT_USB_DEVICE(USB_VENDOR_ID_ZYTRONIC,
+                       USB_DEVICE_ID_ZYTRONIC_ZXY100) },
+
        /* Generic MT device */
        { HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) },
        { }
index 0f20fd1..0108c59 100644 (file)
@@ -1,13 +1,13 @@
-comment "USB Input Devices"
+menu "USB HID support"
        depends on USB
 
 config USB_HID
-       tristate "USB Human Interface Device (full HID) support"
+       tristate "USB HID transport layer"
        default y
        depends on USB && INPUT
        select HID
        ---help---
-         Say Y here if you want full HID support to connect USB keyboards,
+         Say Y here if you want to connect USB keyboards,
          mice, joysticks, graphic tablets, or any other HID based devices
          to your computer via USB, as well as Uninterruptible Power Supply
          (UPS) and monitor control devices.
@@ -81,4 +81,4 @@ config USB_MOUSE
 
 endmenu
 
-
+endmenu
index 0597ee6..903eef3 100644 (file)
@@ -76,6 +76,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
index f082e48..2cde9ec 100644 (file)
@@ -8,7 +8,7 @@
  *
  * Based on hdaps.c driver:
  * Copyright (C) 2005 Robert Love <rml@novell.com>
- * Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com>
+ * Copyright (C) 2005 Jesper Juhl <jj@chaosbits.net>
  *
  * Fan control based on smcFanControl:
  * Copyright (C) 2006 Hendrik Holtmann <holtmann@mac.com>
@@ -215,7 +215,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
        int i;
 
        if (send_command(cmd) || send_argument(key)) {
-               pr_warn("%s: read arg fail\n", key);
+               pr_warn("%.4s: read arg fail\n", key);
                return -EIO;
        }
 
@@ -223,7 +223,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
 
        for (i = 0; i < len; i++) {
                if (__wait_status(0x05)) {
-                       pr_warn("%s: read data fail\n", key);
+                       pr_warn("%.4s: read data fail\n", key);
                        return -EIO;
                }
                buffer[i] = inb(APPLESMC_DATA_PORT);
index b9d5123..637c51c 100644 (file)
@@ -191,6 +191,24 @@ static ssize_t show_temp(struct device *dev,
        return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN;
 }
 
+struct tjmax {
+       char const *id;
+       int tjmax;
+};
+
+static struct tjmax __cpuinitconst tjmax_table[] = {
+       { "CPU D410", 100000 },
+       { "CPU D425", 100000 },
+       { "CPU D510", 100000 },
+       { "CPU D525", 100000 },
+       { "CPU N450", 100000 },
+       { "CPU N455", 100000 },
+       { "CPU N470", 100000 },
+       { "CPU N475", 100000 },
+       { "CPU  230", 100000 },
+       { "CPU  330", 125000 },
+};
+
 static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
                                  struct device *dev)
 {
@@ -202,6 +220,13 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
        int err;
        u32 eax, edx;
        struct pci_dev *host_bridge;
+       int i;
+
+       /* explicit tjmax table entries override heuristics */
+       for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) {
+               if (strstr(c->x86_model_id, tjmax_table[i].id))
+                       return tjmax_table[i].tjmax;
+       }
 
        /* Early chips have no MSR for TjMax */
 
@@ -210,7 +235,8 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
 
        /* Atom CPUs */
 
-       if (c->x86_model == 0x1c) {
+       if (c->x86_model == 0x1c || c->x86_model == 0x26
+           || c->x86_model == 0x27) {
                usemsr_ee = 0;
 
                host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
@@ -223,6 +249,9 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
                        tjmax = 90000;
 
                pci_dev_put(host_bridge);
+       } else if (c->x86_model == 0x36) {
+               usemsr_ee = 0;
+               tjmax = 100000;
        }
 
        if (c->x86_model > 0xe && usemsr_ee) {
@@ -664,7 +693,7 @@ static void __cpuinit get_core_online(unsigned int cpu)
         * sensors. We check this bit only, all the early CPUs
         * without thermal sensors will be filtered out.
         */
-       if (!cpu_has(c, X86_FEATURE_DTS))
+       if (!cpu_has(c, X86_FEATURE_DTHERM))
                return;
 
        if (!pdev) {
@@ -765,14 +794,14 @@ static struct notifier_block coretemp_cpu_notifier __refdata = {
 };
 
 static const struct x86_cpu_id coretemp_ids[] = {
-       { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTS },
+       { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM },
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, coretemp_ids);
 
 static int __init coretemp_init(void)
 {
-       int i, err = -ENODEV;
+       int i, err;
 
        /*
         * CPUID.06H.EAX[0] indicates whether the CPU has thermal
index 9691f66..e7d234b 100644 (file)
@@ -451,11 +451,15 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
                data->fan_rpm_control = true;
                break;
        default:
-               mutex_unlock(&data->update_lock);
-               return -EINVAL;
+               count = -EINVAL;
+               goto err;
        }
 
-       read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg);
+       result = read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg);
+       if (result) {
+               count = result;
+               goto err;
+       }
 
        if (data->fan_rpm_control)
                conf_reg |= 0x80;
@@ -463,7 +467,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
                conf_reg &= ~0x80;
 
        i2c_smbus_write_byte_data(client, REG_FAN_CONF1, conf_reg);
-
+err:
        mutex_unlock(&data->update_lock);
        return count;
 }
index e7701d9..f1de397 100644 (file)
@@ -2341,7 +2341,7 @@ static void __devinit it87_init_device(struct platform_device *pdev)
 
        /* Start monitoring */
        it87_write_value(data, IT87_REG_CONFIG,
-                        (it87_read_value(data, IT87_REG_CONFIG) & 0x36)
+                        (it87_read_value(data, IT87_REG_CONFIG) & 0x3e)
                         | (update_vbat ? 0x41 : 0x01));
 }
 
index a9bfd67..e72ba5d 100644 (file)
@@ -590,6 +590,6 @@ abort:
 
 module_i2c_driver(jc42_driver);
 
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
 MODULE_DESCRIPTION("JC42 driver");
 MODULE_LICENSE("GPL");
index d264937..bd75d24 100644 (file)
@@ -567,6 +567,6 @@ static struct i2c_driver pem_driver = {
 
 module_i2c_driver(pem_driver);
 
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
 MODULE_DESCRIPTION("Lineage CPL PEM hardware monitoring driver");
 MODULE_LICENSE("GPL");
index 069b7d3..77476a5 100644 (file)
@@ -292,6 +292,6 @@ static struct i2c_driver ltc4261_driver = {
 
 module_i2c_driver(ltc4261_driver);
 
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
 MODULE_DESCRIPTION("LTC4261 driver");
 MODULE_LICENSE("GPL");
index 822261b..019427d 100644 (file)
@@ -692,6 +692,6 @@ static struct i2c_driver max16065_driver = {
 
 module_i2c_driver(max16065_driver);
 
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
 MODULE_DESCRIPTION("MAX16065 driver");
 MODULE_LICENSE("GPL");
index 61c9cf1..1201a15 100644 (file)
@@ -345,7 +345,7 @@ int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
                spin_lock_init(&hwlock->lock);
                hwlock->bank = bank;
 
-               ret = hwspin_lock_register_single(hwlock, i);
+               ret = hwspin_lock_register_single(hwlock, base_id + i);
                if (ret)
                        goto reg_failed;
        }
@@ -354,7 +354,7 @@ int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
 
 reg_failed:
        while (--i >= 0)
-               hwspin_lock_unregister_single(i);
+               hwspin_lock_unregister_single(base_id + i);
        return ret;
 }
 EXPORT_SYMBOL_GPL(hwspin_lock_register);
index 56eecef..2ec93da 100644 (file)
@@ -8,8 +8,7 @@ menuconfig IIO
        help
          The industrial I/O subsystem provides a unified framework for
          drivers for many different types of embedded sensors using a
-         number of different physical interfaces (i2c, spi, etc). See
-         Documentation/iio for more information.
+         number of different physical interfaces (i2c, spi, etc).
 
 if IIO
 
index 1ddd886..4f947e4 100644 (file)
@@ -661,7 +661,6 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
         * New channel registration method - relies on the fact a group does
         * not need to be initialized if it is name is NULL.
         */
-       INIT_LIST_HEAD(&indio_dev->channel_attr_list);
        if (indio_dev->channels)
                for (i = 0; i < indio_dev->num_channels; i++) {
                        ret = iio_device_add_channel_sysfs(indio_dev,
@@ -725,12 +724,16 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
 static void iio_dev_release(struct device *device)
 {
        struct iio_dev *indio_dev = dev_to_iio_dev(device);
-       cdev_del(&indio_dev->chrdev);
+       if (indio_dev->chrdev.dev)
+               cdev_del(&indio_dev->chrdev);
        if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
                iio_device_unregister_trigger_consumer(indio_dev);
        iio_device_unregister_eventset(indio_dev);
        iio_device_unregister_sysfs(indio_dev);
        iio_device_unregister_debugfs(indio_dev);
+
+       ida_simple_remove(&iio_ida, indio_dev->id);
+       kfree(indio_dev);
 }
 
 static struct device_type iio_dev_type = {
@@ -761,6 +764,7 @@ struct iio_dev *iio_device_alloc(int sizeof_priv)
                dev_set_drvdata(&dev->dev, (void *)dev);
                mutex_init(&dev->mlock);
                mutex_init(&dev->info_exist_lock);
+               INIT_LIST_HEAD(&dev->channel_attr_list);
 
                dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
                if (dev->id < 0) {
@@ -778,10 +782,8 @@ EXPORT_SYMBOL(iio_device_alloc);
 
 void iio_device_free(struct iio_dev *dev)
 {
-       if (dev) {
-               ida_simple_remove(&iio_ida, dev->id);
-               kfree(dev);
-       }
+       if (dev)
+               put_device(&dev->dev);
 }
 EXPORT_SYMBOL(iio_device_free);
 
@@ -902,7 +904,7 @@ void iio_device_unregister(struct iio_dev *indio_dev)
        mutex_lock(&indio_dev->info_exist_lock);
        indio_dev->info = NULL;
        mutex_unlock(&indio_dev->info_exist_lock);
-       device_unregister(&indio_dev->dev);
+       device_del(&indio_dev->dev);
 }
 EXPORT_SYMBOL(iio_device_unregister);
 subsys_initcall(iio_init);
index 55d5642..2e826f9 100644 (file)
@@ -1184,7 +1184,7 @@ static void cma_set_req_event_data(struct rdma_cm_event *event,
 
 static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event)
 {
-       return (((ib_event->event == IB_CM_REQ_RECEIVED) ||
+       return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
                 (ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
                ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
                 (id->qp_type == IB_QPT_UD)) ||
index 037f5ce..48970af 100644 (file)
@@ -61,6 +61,7 @@ struct ocrdma_dev_attr {
        u32 max_inline_data;
        int max_send_sge;
        int max_recv_sge;
+       int max_srq_sge;
        int max_mr;
        u64 max_mr_size;
        u32 max_num_mr_pbl;
index 9343a15..71942af 100644 (file)
@@ -990,8 +990,6 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
                              struct ocrdma_dev_attr *attr,
                              struct ocrdma_mbx_query_config *rsp)
 {
-       int max_q_mem;
-
        attr->max_pd =
            (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
            OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
@@ -1004,6 +1002,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
        attr->max_recv_sge = (rsp->max_write_send_sge &
                              OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
            OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT;
+       attr->max_srq_sge = (rsp->max_srq_rqe_sge &
+                             OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
+           OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
        attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
                                OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
            OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
@@ -1037,18 +1038,15 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
        attr->max_inline_data =
            attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
                              sizeof(struct ocrdma_sge));
-       max_q_mem = OCRDMA_Q_PAGE_BASE_SIZE << (OCRDMA_MAX_Q_PAGE_SIZE_CNT - 1);
-       /* hw can queue one less then the configured size,
-        * so publish less by one to stack.
-        */
        if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
-               dev->attr.max_wqe = max_q_mem / dev->attr.wqe_size;
                attr->ird = 1;
                attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;
                attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;
-       } else
-               dev->attr.max_wqe = (max_q_mem / dev->attr.wqe_size) - 1;
-       dev->attr.max_rqe = (max_q_mem / dev->attr.rqe_size) - 1;
+       }
+       dev->attr.max_wqe = rsp->max_wqes_rqes_per_q >>
+                OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET;
+       dev->attr.max_rqe = rsp->max_wqes_rqes_per_q &
+               OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK;
 }
 
 static int ocrdma_check_fw_config(struct ocrdma_dev *dev,
index 04fef3d..b050e62 100644 (file)
@@ -97,13 +97,11 @@ static void ocrdma_build_sgid_mac(union ib_gid *sgid, unsigned char *mac_addr,
        sgid->raw[15] = mac_addr[5];
 }
 
-static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
+static bool ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
                            bool is_vlan, u16 vlan_id)
 {
        int i;
-       bool found = false;
        union ib_gid new_sgid;
-       int free_idx = OCRDMA_MAX_SGID;
        unsigned long flags;
 
        memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid));
@@ -115,23 +113,19 @@ static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
                if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid,
                            sizeof(union ib_gid))) {
                        /* found free entry */
-                       if (!found) {
-                               free_idx = i;
-                               found = true;
-                               break;
-                       }
+                       memcpy(&dev->sgid_tbl[i], &new_sgid,
+                              sizeof(union ib_gid));
+                       spin_unlock_irqrestore(&dev->sgid_lock, flags);
+                       return true;
                } else if (!memcmp(&dev->sgid_tbl[i], &new_sgid,
                                   sizeof(union ib_gid))) {
                        /* entry already present, no addition is required. */
                        spin_unlock_irqrestore(&dev->sgid_lock, flags);
-                       return;
+                       return false;
                }
        }
-       /* if entry doesn't exist and if table has some space, add entry */
-       if (found)
-               memcpy(&dev->sgid_tbl[free_idx], &new_sgid,
-                      sizeof(union ib_gid));
        spin_unlock_irqrestore(&dev->sgid_lock, flags);
+       return false;
 }
 
 static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
@@ -167,7 +161,8 @@ static void ocrdma_add_default_sgid(struct ocrdma_dev *dev)
        ocrdma_get_guid(dev, &sgid->raw[8]);
 }
 
-static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
 {
        struct net_device *netdev, *tmp;
        u16 vlan_id;
@@ -175,8 +170,6 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
 
        netdev = dev->nic_info.netdev;
 
-       ocrdma_add_default_sgid(dev);
-
        rcu_read_lock();
        for_each_netdev_rcu(&init_net, tmp) {
                if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) {
@@ -194,10 +187,23 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
                }
        }
        rcu_read_unlock();
+}
+#else
+static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
+{
+
+}
+#endif /* VLAN */
+
+static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
+{
+       ocrdma_add_default_sgid(dev);
+       ocrdma_add_vlan_sgids(dev);
        return 0;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) || \
+defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 
 static int ocrdma_inet6addr_event(struct notifier_block *notifier,
                                  unsigned long event, void *ptr)
@@ -208,6 +214,7 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
        struct ib_event gid_event;
        struct ocrdma_dev *dev;
        bool found = false;
+       bool updated = false;
        bool is_vlan = false;
        u16 vid = 0;
 
@@ -233,23 +240,21 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
        mutex_lock(&dev->dev_lock);
        switch (event) {
        case NETDEV_UP:
-               ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid);
+               updated = ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid);
                break;
        case NETDEV_DOWN:
-               found = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid);
-               if (found) {
-                       /* found the matching entry, notify
-                        * the consumers about it
-                        */
-                       gid_event.device = &dev->ibdev;
-                       gid_event.element.port_num = 1;
-                       gid_event.event = IB_EVENT_GID_CHANGE;
-                       ib_dispatch_event(&gid_event);
-               }
+               updated = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid);
                break;
        default:
                break;
        }
+       if (updated) {
+               /* GID table updated, notify the consumers about it */
+               gid_event.device = &dev->ibdev;
+               gid_event.element.port_num = 1;
+               gid_event.event = IB_EVENT_GID_CHANGE;
+               ib_dispatch_event(&gid_event);
+       }
        mutex_unlock(&dev->dev_lock);
        return NOTIFY_OK;
 }
@@ -258,7 +263,7 @@ static struct notifier_block ocrdma_inet6addr_notifier = {
        .notifier_call = ocrdma_inet6addr_event
 };
 
-#endif /* IPV6 */
+#endif /* IPV6 and VLAN */
 
 static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
                                              u8 port_num)
index 7fd80cc..c75cbdf 100644 (file)
@@ -418,6 +418,9 @@ enum {
 
        OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT         = 0,
        OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK          = 0xFFFF,
+       OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT        = 16,
+       OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK         = 0xFFFF <<
+                               OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT,
 
        OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT       = 0,
        OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK        = 0xFFFF,
@@ -458,7 +461,7 @@ enum {
                                OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET,
        OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET     = 0,
        OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK       = 0xFFFF <<
-                               OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET,
+                               OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET,
 
        OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET              = 16,
        OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK                = 0xFFFF <<
index d16d172..2e2e7ae 100644 (file)
@@ -53,7 +53,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
 
        dev = get_ocrdma_dev(ibdev);
        memset(sgid, 0, sizeof(*sgid));
-       if (index > OCRDMA_MAX_SGID)
+       if (index >= OCRDMA_MAX_SGID)
                return -EINVAL;
 
        memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
@@ -83,8 +83,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
                                        IB_DEVICE_SHUTDOWN_PORT |
                                        IB_DEVICE_SYS_IMAGE_GUID |
                                        IB_DEVICE_LOCAL_DMA_LKEY;
-       attr->max_sge = dev->attr.max_send_sge;
-       attr->max_sge_rd = dev->attr.max_send_sge;
+       attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
+       attr->max_sge_rd = 0;
        attr->max_cq = dev->attr.max_cq;
        attr->max_cqe = dev->attr.max_cqe;
        attr->max_mr = dev->attr.max_mr;
@@ -97,7 +97,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
            min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
        attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
        attr->max_srq = (dev->attr.max_qp - 1);
-       attr->max_srq_sge = attr->max_sge;
+       attr->max_srq_sge = attr->max_srq_sge;
        attr->max_srq_wr = dev->attr.max_rqe;
        attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
        attr->max_fast_reg_page_list_len = 0;
@@ -2301,8 +2301,10 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
                        *stop = true;
                        expand = false;
                }
-       } else
+       } else {
+               *polled = true;
                expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
+       }
        return expand;
 }
 
index 5c1bc99..f10221f 100644 (file)
@@ -123,7 +123,7 @@ static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
 
                skb_frag_size_set(frag, size);
                skb->data_len += size;
-               skb->truesize += size;
+               skb->truesize += PAGE_SIZE;
        } else
                skb_put(skb, length);
 
@@ -156,14 +156,18 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct sk_buff *skb;
        int buf_size;
+       int tailroom;
        u64 *mapping;
 
-       if (ipoib_ud_need_sg(priv->max_ib_mtu))
+       if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
                buf_size = IPOIB_UD_HEAD_SIZE;
-       else
+               tailroom = 128; /* reserve some tailroom for IP/TCP headers */
+       } else {
                buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
+               tailroom = 0;
+       }
 
-       skb = dev_alloc_skb(buf_size + 4);
+       skb = dev_alloc_skb(buf_size + tailroom + 4);
        if (unlikely(!skb))
                return NULL;
 
index 57d19d4..c96653b 100644 (file)
@@ -282,7 +282,8 @@ static int __devinit as5011_probe(struct i2c_client *client,
 
        error = request_threaded_irq(as5011->button_irq,
                                     NULL, as5011_button_interrupt,
-                                    IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+                                    IRQF_TRIGGER_RISING |
+                                       IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                     "as5011_button", as5011);
        if (error < 0) {
                dev_err(&client->dev,
@@ -296,7 +297,7 @@ static int __devinit as5011_probe(struct i2c_client *client,
 
        error = request_threaded_irq(as5011->axis_irq, NULL,
                                     as5011_axis_interrupt,
-                                    plat_data->axis_irqflags,
+                                    plat_data->axis_irqflags | IRQF_ONESHOT,
                                     "as5011_joystick", as5011);
        if (error) {
                dev_err(&client->dev,
index ee16fb6..83811e4 100644 (file)
@@ -142,6 +142,7 @@ static const struct xpad_device {
        { 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX },
        { 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
        { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
+       { 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
        { 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX },
        { 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX },
        { 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX },
@@ -164,6 +165,7 @@ static const struct xpad_device {
        { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
        { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
        { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+       { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
        { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
        { 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
 };
@@ -238,12 +240,14 @@ static struct usb_device_id xpad_table [] = {
        XPAD_XBOX360_VENDOR(0x045e),            /* Microsoft X-Box 360 controllers */
        XPAD_XBOX360_VENDOR(0x046d),            /* Logitech X-Box 360 style controllers */
        XPAD_XBOX360_VENDOR(0x0738),            /* Mad Catz X-Box 360 controllers */
+       { USB_DEVICE(0x0738, 0x4540) },         /* Mad Catz Beat Pad */
        XPAD_XBOX360_VENDOR(0x0e6f),            /* 0x0e6f X-Box 360 controllers */
        XPAD_XBOX360_VENDOR(0x12ab),            /* X-Box 360 dance pads */
        XPAD_XBOX360_VENDOR(0x1430),            /* RedOctane X-Box 360 controllers */
        XPAD_XBOX360_VENDOR(0x146b),            /* BigBen Interactive Controllers */
        XPAD_XBOX360_VENDOR(0x1bad),            /* Harminix Rock Band Guitar and Drums */
-       XPAD_XBOX360_VENDOR(0x0f0d),            /* Hori Controllers */
+       XPAD_XBOX360_VENDOR(0x0f0d),            /* Hori Controllers */
+       XPAD_XBOX360_VENDOR(0x1689),            /* Razer Onza */
        { }
 };
 
index 64a0ca4..0d77f6c 100644 (file)
@@ -178,7 +178,8 @@ static int __devinit mcs_touchkey_probe(struct i2c_client *client,
        }
 
        error = request_threaded_irq(client->irq, NULL, mcs_touchkey_interrupt,
-                       IRQF_TRIGGER_FALLING, client->dev.driver->name, data);
+                                    IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                                    client->dev.driver->name, data);
        if (error) {
                dev_err(&client->dev, "Failed to register interrupt\n");
                goto err_free_mem;
index caa218a..7613f1c 100644 (file)
@@ -248,7 +248,7 @@ static int __devinit mpr_touchkey_probe(struct i2c_client *client,
 
        error = request_threaded_irq(client->irq, NULL,
                                     mpr_touchkey_interrupt,
-                                    IRQF_TRIGGER_FALLING,
+                                    IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                     client->dev.driver->name, mpr121);
        if (error) {
                dev_err(&client->dev, "Failed to register interrupt\n");
index 0b7b2f8..ca68f29 100644 (file)
@@ -201,7 +201,8 @@ static int __devinit qt1070_probe(struct i2c_client *client,
        msleep(QT1070_RESET_TIME);
 
        err = request_threaded_irq(client->irq, NULL, qt1070_interrupt,
-               IRQF_TRIGGER_NONE, client->dev.driver->name, data);
+                                  IRQF_TRIGGER_NONE | IRQF_ONESHOT,
+                                  client->dev.driver->name, data);
        if (err) {
                dev_err(&client->dev, "fail to request irq\n");
                goto err_free_mem;
index 3afea3f..c355cdd 100644 (file)
@@ -278,7 +278,8 @@ static int __devinit tca6416_keypad_probe(struct i2c_client *client,
 
                error = request_threaded_irq(chip->irqnum, NULL,
                                             tca6416_keys_isr,
-                                            IRQF_TRIGGER_FALLING,
+                                            IRQF_TRIGGER_FALLING |
+                                               IRQF_ONESHOT,
                                             "tca6416-keypad", chip);
                if (error) {
                        dev_dbg(&client->dev,
index 5f87b28..893869b 100644 (file)
@@ -360,7 +360,7 @@ static int __devinit tca8418_keypad_probe(struct i2c_client *client,
                client->irq = gpio_to_irq(client->irq);
 
        error = request_threaded_irq(client->irq, NULL, tca8418_irq_handler,
-                                    IRQF_TRIGGER_FALLING,
+                                    IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                     client->name, keypad_data);
        if (error) {
                dev_dbg(&client->dev,
index a4a445f..4c34f21 100644 (file)
@@ -227,15 +227,15 @@ static int __devinit keypad_probe(struct platform_device *pdev)
                goto error_clk;
        }
 
-       error = request_threaded_irq(kp->irq_press, NULL, keypad_irq, 0,
-                                    dev_name(dev), kp);
+       error = request_threaded_irq(kp->irq_press, NULL, keypad_irq,
+                                    IRQF_ONESHOT, dev_name(dev), kp);
        if (error < 0) {
                dev_err(kp->dev, "Could not allocate keypad press key irq\n");
                goto error_irq_press;
        }
 
-       error = request_threaded_irq(kp->irq_release, NULL, keypad_irq, 0,
-                                    dev_name(dev), kp);
+       error = request_threaded_irq(kp->irq_release, NULL, keypad_irq,
+                                    IRQF_ONESHOT, dev_name(dev), kp);
        if (error < 0) {
                dev_err(kp->dev, "Could not allocate keypad release key irq\n");
                goto error_irq_release;
index 0ac75bb..2e5d5e1 100644 (file)
@@ -972,6 +972,7 @@ struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
        struct ad714x_platform_data *plat_data = dev->platform_data;
        struct ad714x_chip *ad714x;
        void *drv_mem;
+       unsigned long irqflags;
 
        struct ad714x_button_drv *bt_drv;
        struct ad714x_slider_drv *sd_drv;
@@ -1162,10 +1163,11 @@ struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
                alloc_idx++;
        }
 
+       irqflags = plat_data->irqflags ?: IRQF_TRIGGER_FALLING;
+       irqflags |= IRQF_ONESHOT;
+
        error = request_threaded_irq(ad714x->irq, NULL, ad714x_interrupt_thread,
-                               plat_data->irqflags ?
-                                       plat_data->irqflags : IRQF_TRIGGER_FALLING,
-                               "ad714x_captouch", ad714x);
+                                    irqflags, "ad714x_captouch", ad714x);
        if (error) {
                dev_err(dev, "can't allocate irq %d\n", ad714x->irq);
                goto err_unreg_dev;
index 35083c6..c1313d8 100644 (file)
@@ -213,7 +213,8 @@ static int __devinit dm355evm_keys_probe(struct platform_device *pdev)
        /* REVISIT:  flush the event queue? */
 
        status = request_threaded_irq(keys->irq, NULL, dm355evm_keys_irq,
-                       IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), keys);
+                                     IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                                     dev_name(&pdev->dev), keys);
        if (status < 0)
                goto fail2;
 
index 2cf681d..d528c23 100644 (file)
 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI  0x0252
 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO   0x0253
 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS   0x0254
+/* MacbookPro10,1 (unibody, June 2012) */
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI   0x0262
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO    0x0263
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS    0x0264
 
 #define BCM5974_DEVICE(prod) {                                 \
        .match_flags = (USB_DEVICE_ID_MATCH_DEVICE |            \
@@ -128,6 +132,10 @@ static const struct usb_device_id bcm5974_table[] = {
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
+       /* MacbookPro10,1 */
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
        /* Terminating entry */
        {}
 };
@@ -354,6 +362,18 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                { DIM_X, DIM_X / SN_COORD, -4620, 5140 },
                { DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
        },
+       {
+               USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI,
+               USB_DEVICE_ID_APPLE_WELLSPRING7_ISO,
+               USB_DEVICE_ID_APPLE_WELLSPRING7_JIS,
+               HAS_INTEGRATED_BUTTON,
+               0x84, sizeof(struct bt_data),
+               0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+               { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
+               { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
+               { DIM_X, DIM_X / SN_COORD, -4750, 5280 },
+               { DIM_Y, DIM_Y / SN_COORD, -150, 6730 }
+       },
        {}
 };
 
index cad5602..8b31473 100644 (file)
@@ -216,7 +216,7 @@ static void wacom_retrieve_report_data(struct usb_interface *intf,
 
                rep_data[0] = 12;
                result = wacom_get_report(intf, WAC_HID_FEATURE_REPORT,
-                                         rep_data[0], &rep_data, 2,
+                                         rep_data[0], rep_data, 2,
                                          WAC_MSG_RETRIES);
 
                if (result >= 0 && rep_data[1] > 2)
@@ -401,7 +401,9 @@ static int wacom_parse_hid(struct usb_interface *intf,
                                break;
 
                        case HID_USAGE_CONTACTMAX:
-                               wacom_retrieve_report_data(intf, features);
+                               /* leave touch_max as is if predefined */
+                               if (!features->touch_max)
+                                       wacom_retrieve_report_data(intf, features);
                                i++;
                                break;
                        }
index e2482b4..bd4eb42 100644 (file)
@@ -597,7 +597,7 @@ struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned int irq,
                        AD7879_TMR(ts->pen_down_acc_interval);
 
        err = request_threaded_irq(ts->irq, NULL, ad7879_irq,
-                                  IRQF_TRIGGER_FALLING,
+                                  IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                   dev_name(dev), ts);
        if (err) {
                dev_err(dev, "irq %d busy?\n", ts->irq);
index 42e6450..25fd056 100644 (file)
@@ -1149,7 +1149,8 @@ static int __devinit mxt_probe(struct i2c_client *client,
                goto err_free_object;
 
        error = request_threaded_irq(client->irq, NULL, mxt_interrupt,
-                       pdata->irqflags, client->dev.driver->name, data);
+                                    pdata->irqflags | IRQF_ONESHOT,
+                                    client->dev.driver->name, data);
        if (error) {
                dev_err(&client->dev, "Failed to register interrupt\n");
                goto err_free_object;
index f2d03c0..5c487d2 100644 (file)
@@ -509,7 +509,8 @@ static int __devinit bu21013_probe(struct i2c_client *client,
        input_set_drvdata(in_dev, bu21013_data);
 
        error = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq,
-                                    IRQF_TRIGGER_FALLING | IRQF_SHARED,
+                                    IRQF_TRIGGER_FALLING | IRQF_SHARED |
+                                       IRQF_ONESHOT,
                                     DRIVER_TP, bu21013_data);
        if (error) {
                dev_err(&client->dev, "request irq %d failed\n", pdata->irq);
index 237753a..464f1bf 100644 (file)
@@ -251,7 +251,8 @@ static int __devinit cy8ctmg110_probe(struct i2c_client *client,
        }
 
        err = request_threaded_irq(client->irq, NULL, cy8ctmg110_irq_thread,
-                                  IRQF_TRIGGER_RISING, "touch_reset_key", ts);
+                                  IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+                                  "touch_reset_key", ts);
        if (err < 0) {
                dev_err(&client->dev,
                        "irq %d busy? error %d\n", client->irq, err);
index 3cd7a83..cf29937 100644 (file)
@@ -620,7 +620,7 @@ static int __devinit mrstouch_probe(struct platform_device *pdev)
                             MRST_PRESSURE_MIN, MRST_PRESSURE_MAX, 0, 0);
 
        err = request_threaded_irq(tsdev->irq, NULL, mrstouch_pendet_irq,
-                                  0, "mrstouch", tsdev);
+                                  IRQF_ONESHOT, "mrstouch", tsdev);
        if (err) {
                dev_err(tsdev->dev, "unable to allocate irq\n");
                goto err_free_mem;
index 72f6ba3..953b4c1 100644 (file)
@@ -165,7 +165,7 @@ static int __devinit pixcir_i2c_ts_probe(struct i2c_client *client,
        input_set_drvdata(input, tsdata);
 
        error = request_threaded_irq(client->irq, NULL, pixcir_ts_isr,
-                                    IRQF_TRIGGER_FALLING,
+                                    IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                     client->name, tsdata);
        if (error) {
                dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
index 7e74880..368d2c6 100644 (file)
@@ -297,7 +297,7 @@ static int __devinit tsc_probe(struct platform_device *pdev)
                goto error_clk;
        }
 
-       error = request_threaded_irq(ts->tsc_irq, NULL, tsc_irq, 0,
+       error = request_threaded_irq(ts->tsc_irq, NULL, tsc_irq, IRQF_ONESHOT,
                                     dev_name(dev), ts);
        if (error < 0) {
                dev_err(ts->dev, "Could not allocate ts irq\n");
index b6adeae..5ce3fa8 100644 (file)
@@ -650,7 +650,8 @@ static int __devinit tsc2005_probe(struct spi_device *spi)
        tsc2005_stop_scan(ts);
 
        error = request_threaded_irq(spi->irq, NULL, tsc2005_irq_thread,
-                                    IRQF_TRIGGER_RISING, "tsc2005", ts);
+                                    IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+                                    "tsc2005", ts);
        if (error) {
                dev_err(&spi->dev, "Failed to request irq, err: %d\n", error);
                goto err_free_mem;
index a2e418c..6256263 100644 (file)
@@ -83,6 +83,8 @@ static struct iommu_ops amd_iommu_ops;
 static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
 int amd_iommu_max_glx_val = -1;
 
+static struct dma_map_ops amd_iommu_dma_ops;
+
 /*
  * general struct to manage commands send to an IOMMU
  */
@@ -402,7 +404,7 @@ static void amd_iommu_stats_init(void)
                return;
 
        de_fflush  = debugfs_create_bool("fullflush", 0444, stats_dir,
-                                        (u32 *)&amd_iommu_unmap_flush);
+                                        &amd_iommu_unmap_flush);
 
        amd_iommu_stats_add(&compl_wait);
        amd_iommu_stats_add(&cnt_map_single);
@@ -2267,6 +2269,13 @@ static int device_change_notifier(struct notifier_block *nb,
                list_add_tail(&dma_domain->list, &iommu_pd_list);
                spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
 
+               dev_data = get_dev_data(dev);
+
+               if (!dev_data->passthrough)
+                       dev->archdata.dma_ops = &amd_iommu_dma_ops;
+               else
+                       dev->archdata.dma_ops = &nommu_dma_ops;
+
                break;
        case BUS_NOTIFY_DEL_DEVICE:
 
index 542024b..a33612f 100644 (file)
@@ -129,7 +129,7 @@ u16 amd_iommu_last_bdf;                     /* largest PCI device id we have
                                           to handle */
 LIST_HEAD(amd_iommu_unity_map);                /* a list of required unity mappings
                                           we find in ACPI */
-bool amd_iommu_unmap_flush;            /* if true, flush on every unmap */
+u32 amd_iommu_unmap_flush;             /* if true, flush on every unmap */
 
 LIST_HEAD(amd_iommu_list);             /* list of all AMD IOMMUs in the
                                           system */
@@ -1641,6 +1641,8 @@ static int __init amd_iommu_init(void)
 
        amd_iommu_init_api();
 
+       x86_platform.iommu_shutdown = disable_iommus;
+
        if (iommu_pass_through)
                goto out;
 
@@ -1649,8 +1651,6 @@ static int __init amd_iommu_init(void)
        else
                printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
 
-       x86_platform.iommu_shutdown = disable_iommus;
-
 out:
        return ret;
 
index 2435555..c1b1d48 100644 (file)
@@ -652,7 +652,7 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap;
  * If true, the addresses will be flushed on unmap time, not when
  * they are reused
  */
-extern bool amd_iommu_unmap_flush;
+extern u32 amd_iommu_unmap_flush;
 
 /* Smallest number of PASIDs supported by any IOMMU in the system */
 extern u32 amd_iommu_max_pasids;
index 3a74e44..86e2f4a 100644 (file)
@@ -26,6 +26,8 @@
  * These routines are used by both DMA-remapping and Interrupt-remapping
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
+
 #include <linux/pci.h>
 #include <linux/dmar.h>
 #include <linux/iova.h>
@@ -39,8 +41,6 @@
 #include <asm/irq_remapping.h>
 #include <asm/iommu_table.h>
 
-#define PREFIX "DMAR: "
-
 /* No locks are needed as DMA remapping hardware unit
  * list is constructed at boot time and hotplug of
  * these units are not supported by the architecture.
@@ -83,16 +83,12 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
                 * ignore it
                 */
                if (!bus) {
-                       printk(KERN_WARNING
-                       PREFIX "Device scope bus [%d] not found\n",
-                       scope->bus);
+                       pr_warn("Device scope bus [%d] not found\n", scope->bus);
                        break;
                }
                pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
                if (!pdev) {
-                       printk(KERN_WARNING PREFIX
-                       "Device scope device [%04x:%02x:%02x.%02x] not found\n",
-                               segment, bus->number, path->dev, path->fn);
+                       /* warning will be printed below */
                        break;
                }
                path ++;
@@ -100,9 +96,8 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
                bus = pdev->subordinate;
        }
        if (!pdev) {
-               printk(KERN_WARNING PREFIX
-               "Device scope device [%04x:%02x:%02x.%02x] not found\n",
-               segment, scope->bus, path->dev, path->fn);
+               pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
+                       segment, scope->bus, path->dev, path->fn);
                *dev = NULL;
                return 0;
        }
@@ -110,9 +105,8 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
                        pdev->subordinate) || (scope->entry_type == \
                        ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
                pci_dev_put(pdev);
-               printk(KERN_WARNING PREFIX
-                       "Device scope type does not match for %s\n",
-                        pci_name(pdev));
+               pr_warn("Device scope type does not match for %s\n",
+                       pci_name(pdev));
                return -EINVAL;
        }
        *dev = pdev;
@@ -134,8 +128,7 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
                    scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
                        (*cnt)++;
                else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
-                       printk(KERN_WARNING PREFIX
-                              "Unsupported device scope\n");
+                       pr_warn("Unsupported device scope\n");
                }
                start += scope->length;
        }
@@ -261,25 +254,23 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
        case ACPI_DMAR_TYPE_HARDWARE_UNIT:
                drhd = container_of(header, struct acpi_dmar_hardware_unit,
                                    header);
-               printk (KERN_INFO PREFIX
-                       "DRHD base: %#016Lx flags: %#x\n",
+               pr_info("DRHD base: %#016Lx flags: %#x\n",
                        (unsigned long long)drhd->address, drhd->flags);
                break;
        case ACPI_DMAR_TYPE_RESERVED_MEMORY:
                rmrr = container_of(header, struct acpi_dmar_reserved_memory,
                                    header);
-               printk (KERN_INFO PREFIX
-                       "RMRR base: %#016Lx end: %#016Lx\n",
+               pr_info("RMRR base: %#016Lx end: %#016Lx\n",
                        (unsigned long long)rmrr->base_address,
                        (unsigned long long)rmrr->end_address);
                break;
        case ACPI_DMAR_TYPE_ATSR:
                atsr = container_of(header, struct acpi_dmar_atsr, header);
-               printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
+               pr_info("ATSR flags: %#x\n", atsr->flags);
                break;
        case ACPI_DMAR_HARDWARE_AFFINITY:
                rhsa = container_of(header, struct acpi_dmar_rhsa, header);
-               printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
+               pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
                       (unsigned long long)rhsa->base_address,
                       rhsa->proximity_domain);
                break;
@@ -299,7 +290,7 @@ static int __init dmar_table_detect(void)
                                &dmar_tbl_size);
 
        if (ACPI_SUCCESS(status) && !dmar_tbl) {
-               printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
+               pr_warn("Unable to map DMAR\n");
                status = AE_NOT_FOUND;
        }
 
@@ -333,20 +324,18 @@ parse_dmar_table(void)
                return -ENODEV;
 
        if (dmar->width < PAGE_SHIFT - 1) {
-               printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
+               pr_warn("Invalid DMAR haw\n");
                return -EINVAL;
        }
 
-       printk (KERN_INFO PREFIX "Host address width %d\n",
-               dmar->width + 1);
+       pr_info("Host address width %d\n", dmar->width + 1);
 
        entry_header = (struct acpi_dmar_header *)(dmar + 1);
        while (((unsigned long)entry_header) <
                        (((unsigned long)dmar) + dmar_tbl->length)) {
                /* Avoid looping forever on bad ACPI tables */
                if (entry_header->length == 0) {
-                       printk(KERN_WARNING PREFIX
-                               "Invalid 0-length structure\n");
+                       pr_warn("Invalid 0-length structure\n");
                        ret = -EINVAL;
                        break;
                }
@@ -369,8 +358,7 @@ parse_dmar_table(void)
 #endif
                        break;
                default:
-                       printk(KERN_WARNING PREFIX
-                               "Unknown DMAR structure type %d\n",
+                       pr_warn("Unknown DMAR structure type %d\n",
                                entry_header->type);
                        ret = 0; /* for forward compatibility */
                        break;
@@ -469,12 +457,12 @@ int __init dmar_table_init(void)
        ret = parse_dmar_table();
        if (ret) {
                if (ret != -ENODEV)
-                       printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
+                       pr_info("parse DMAR table failure.\n");
                return ret;
        }
 
        if (list_empty(&dmar_drhd_units)) {
-               printk(KERN_INFO PREFIX "No DMAR devices found\n");
+               pr_info("No DMAR devices found\n");
                return -ENODEV;
        }
 
@@ -506,8 +494,7 @@ int __init check_zero_address(void)
                        (((unsigned long)dmar) + dmar_tbl->length)) {
                /* Avoid looping forever on bad ACPI tables */
                if (entry_header->length == 0) {
-                       printk(KERN_WARNING PREFIX
-                               "Invalid 0-length structure\n");
+                       pr_warn("Invalid 0-length structure\n");
                        return 0;
                }
 
@@ -558,8 +545,7 @@ int __init detect_intel_iommu(void)
 
                if (ret && irq_remapping_enabled && cpu_has_x2apic &&
                    dmar->flags & 0x1)
-                       printk(KERN_INFO
-                              "Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
+                       pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
 
                if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
                        iommu_detected = 1;
@@ -579,14 +565,89 @@ int __init detect_intel_iommu(void)
 }
 
 
+static void unmap_iommu(struct intel_iommu *iommu)
+{
+       iounmap(iommu->reg);
+       release_mem_region(iommu->reg_phys, iommu->reg_size);
+}
+
+/**
+ * map_iommu: map the iommu's registers
+ * @iommu: the iommu to map
+ * @phys_addr: the physical address of the base resgister
+ *
+ * Memory map the iommu's registers.  Start w/ a single page, and
+ * possibly expand if that turns out to be insufficent.
+ */
+static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
+{
+       int map_size, err=0;
+
+       iommu->reg_phys = phys_addr;
+       iommu->reg_size = VTD_PAGE_SIZE;
+
+       if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
+               pr_err("IOMMU: can't reserve memory\n");
+               err = -EBUSY;
+               goto out;
+       }
+
+       iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
+       if (!iommu->reg) {
+               pr_err("IOMMU: can't map the region\n");
+               err = -ENOMEM;
+               goto release;
+       }
+
+       iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
+       iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
+
+       if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
+               err = -EINVAL;
+               warn_invalid_dmar(phys_addr, " returns all ones");
+               goto unmap;
+       }
+
+       /* the registers might be more than one page */
+       map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
+                        cap_max_fault_reg_offset(iommu->cap));
+       map_size = VTD_PAGE_ALIGN(map_size);
+       if (map_size > iommu->reg_size) {
+               iounmap(iommu->reg);
+               release_mem_region(iommu->reg_phys, iommu->reg_size);
+               iommu->reg_size = map_size;
+               if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
+                                       iommu->name)) {
+                       pr_err("IOMMU: can't reserve memory\n");
+                       err = -EBUSY;
+                       goto out;
+               }
+               iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
+               if (!iommu->reg) {
+                       pr_err("IOMMU: can't map the region\n");
+                       err = -ENOMEM;
+                       goto release;
+               }
+       }
+       err = 0;
+       goto out;
+
+unmap:
+       iounmap(iommu->reg);
+release:
+       release_mem_region(iommu->reg_phys, iommu->reg_size);
+out:
+       return err;
+}
+
 int alloc_iommu(struct dmar_drhd_unit *drhd)
 {
        struct intel_iommu *iommu;
-       int map_size;
        u32 ver;
        static int iommu_allocated = 0;
        int agaw = 0;
        int msagaw = 0;
+       int err;
 
        if (!drhd->reg_base_addr) {
                warn_invalid_dmar(0, "");
@@ -600,30 +661,22 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
        iommu->seq_id = iommu_allocated++;
        sprintf (iommu->name, "dmar%d", iommu->seq_id);
 
-       iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
-       if (!iommu->reg) {
-               printk(KERN_ERR "IOMMU: can't map the region\n");
+       err = map_iommu(iommu, drhd->reg_base_addr);
+       if (err) {
+               pr_err("IOMMU: failed to map %s\n", iommu->name);
                goto error;
        }
-       iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
-       iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
-
-       if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
-               warn_invalid_dmar(drhd->reg_base_addr, " returns all ones");
-               goto err_unmap;
-       }
 
+       err = -EINVAL;
        agaw = iommu_calculate_agaw(iommu);
        if (agaw < 0) {
-               printk(KERN_ERR
-                      "Cannot get a valid agaw for iommu (seq_id = %d)\n",
-                      iommu->seq_id);
+               pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
+                       iommu->seq_id);
                goto err_unmap;
        }
        msagaw = iommu_calculate_max_sagaw(iommu);
        if (msagaw < 0) {
-               printk(KERN_ERR
-                       "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
+               pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
                        iommu->seq_id);
                goto err_unmap;
        }
@@ -632,19 +685,6 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
 
        iommu->node = -1;
 
-       /* the registers might be more than one page */
-       map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
-               cap_max_fault_reg_offset(iommu->cap));
-       map_size = VTD_PAGE_ALIGN(map_size);
-       if (map_size > VTD_PAGE_SIZE) {
-               iounmap(iommu->reg);
-               iommu->reg = ioremap(drhd->reg_base_addr, map_size);
-               if (!iommu->reg) {
-                       printk(KERN_ERR "IOMMU: can't map the region\n");
-                       goto error;
-               }
-       }
-
        ver = readl(iommu->reg + DMAR_VER_REG);
        pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
                iommu->seq_id,
@@ -659,10 +699,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
        return 0;
 
  err_unmap:
-       iounmap(iommu->reg);
+       unmap_iommu(iommu);
  error:
        kfree(iommu);
-       return -1;
+       return err;
 }
 
 void free_iommu(struct intel_iommu *iommu)
@@ -673,7 +713,8 @@ void free_iommu(struct intel_iommu *iommu)
        free_dmar_iommu(iommu);
 
        if (iommu->reg)
-               iounmap(iommu->reg);
+               unmap_iommu(iommu);
+
        kfree(iommu);
 }
 
@@ -710,7 +751,7 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
        if (fault & DMA_FSTS_IQE) {
                head = readl(iommu->reg + DMAR_IQH_REG);
                if ((head >> DMAR_IQ_SHIFT) == index) {
-                       printk(KERN_ERR "VT-d detected invalid descriptor: "
+                       pr_err("VT-d detected invalid descriptor: "
                                "low=%llx, high=%llx\n",
                                (unsigned long long)qi->desc[index].low,
                                (unsigned long long)qi->desc[index].high);
@@ -1129,15 +1170,14 @@ static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
        reason = dmar_get_fault_reason(fault_reason, &fault_type);
 
        if (fault_type == INTR_REMAP)
-               printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
+               pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
                       "fault index %llx\n"
                        "INTR-REMAP:[fault reason %02d] %s\n",
                        (source_id >> 8), PCI_SLOT(source_id & 0xFF),
                        PCI_FUNC(source_id & 0xFF), addr >> 48,
                        fault_reason, reason);
        else
-               printk(KERN_ERR
-                      "DMAR:[%s] Request device [%02x:%02x.%d] "
+               pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
                       "fault addr %llx \n"
                       "DMAR:[fault reason %02d] %s\n",
                       (type ? "DMA Read" : "DMA Write"),
@@ -1157,8 +1197,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
        raw_spin_lock_irqsave(&iommu->register_lock, flag);
        fault_status = readl(iommu->reg + DMAR_FSTS_REG);
        if (fault_status)
-               printk(KERN_ERR "DRHD: handling fault status reg %x\n",
-                      fault_status);
+               pr_err("DRHD: handling fault status reg %x\n", fault_status);
 
        /* TBD: ignore advanced fault log currently */
        if (!(fault_status & DMA_FSTS_PPF))
@@ -1224,7 +1263,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
 
        irq = create_irq();
        if (!irq) {
-               printk(KERN_ERR "IOMMU: no free vectors\n");
+               pr_err("IOMMU: no free vectors\n");
                return -EINVAL;
        }
 
@@ -1241,7 +1280,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
 
        ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
        if (ret)
-               printk(KERN_ERR "IOMMU: can't request irq\n");
+               pr_err("IOMMU: can't request irq\n");
        return ret;
 }
 
@@ -1258,8 +1297,7 @@ int __init enable_drhd_fault_handling(void)
                ret = dmar_set_interrupt(iommu);
 
                if (ret) {
-                       printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
-                              " interrupt, ret %d\n",
+                       pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
                               (unsigned long long)drhd->reg_base_addr, ret);
                        return -1;
                }
index ecd6790..3f3d09d 100644 (file)
@@ -550,13 +550,13 @@ static int alloc_pdir(struct smmu_as *as)
                return 0;
 
        as->pte_count = devm_kzalloc(smmu->dev,
-                    sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_KERNEL);
+                    sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_ATOMIC);
        if (!as->pte_count) {
                dev_err(smmu->dev,
                        "failed to allocate smmu_device PTE cunters\n");
                return -ENOMEM;
        }
-       as->pdir_page = alloc_page(GFP_KERNEL | __GFP_DMA);
+       as->pdir_page = alloc_page(GFP_ATOMIC | __GFP_DMA);
        if (!as->pdir_page) {
                dev_err(smmu->dev,
                        "failed to allocate smmu_device page directory\n");
index 1a0ae44..5f21f62 100644 (file)
@@ -135,8 +135,8 @@ send_layer2(struct mISDNstack *st, struct sk_buff *skb)
                        skb = NULL;
                else if (*debug & DEBUG_SEND_ERR)
                        printk(KERN_DEBUG
-                              "%s ch%d mgr prim(%x) addr(%x) err %d\n",
-                              __func__, ch->nr, hh->prim, ch->addr, ret);
+                              "%s mgr prim(%x) err %d\n",
+                              __func__, hh->prim, ret);
        }
 out:
        mutex_unlock(&st->lmutex);
index 41dc76d..a019fbb 100644 (file)
@@ -21,6 +21,8 @@
 #include <linux/reboot.h>
 #include "leds.h"
 
+static int panic_heartbeats;
+
 struct heartbeat_trig_data {
        unsigned int phase;
        unsigned int period;
@@ -34,6 +36,11 @@ static void led_heartbeat_function(unsigned long data)
        unsigned long brightness = LED_OFF;
        unsigned long delay = 0;
 
+       if (unlikely(panic_heartbeats)) {
+               led_set_brightness(led_cdev, LED_OFF);
+               return;
+       }
+
        /* acts like an actual heart beat -- ie thump-thump-pause... */
        switch (heartbeat_data->phase) {
        case 0:
@@ -111,12 +118,19 @@ static int heartbeat_reboot_notifier(struct notifier_block *nb,
        return NOTIFY_DONE;
 }
 
+static int heartbeat_panic_notifier(struct notifier_block *nb,
+                                    unsigned long code, void *unused)
+{
+       panic_heartbeats = 1;
+       return NOTIFY_DONE;
+}
+
 static struct notifier_block heartbeat_reboot_nb = {
        .notifier_call = heartbeat_reboot_notifier,
 };
 
 static struct notifier_block heartbeat_panic_nb = {
-       .notifier_call = heartbeat_reboot_notifier,
+       .notifier_call = heartbeat_panic_notifier,
 };
 
 static int __init heartbeat_trig_init(void)
index d039de8..b58b7a3 100644 (file)
@@ -1084,6 +1084,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        ti->split_io = dm_rh_get_region_size(ms->rh);
        ti->num_flush_requests = 1;
        ti->num_discard_requests = 1;
+       ti->discard_zeroes_data_unsupported = 1;
 
        ms->kmirrord_wq = alloc_workqueue("kmirrord",
                                          WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
@@ -1214,7 +1215,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
         * We need to dec pending if this was a write.
         */
        if (rw == WRITE) {
-               if (!(bio->bi_rw & REQ_FLUSH))
+               if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)))
                        dm_rh_dec(ms->rh, map_context->ll);
                return error;
        }
index 7771ed2..69732e0 100644 (file)
@@ -404,6 +404,9 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
                return;
        }
 
+       if (bio->bi_rw & REQ_DISCARD)
+               return;
+
        /* We must inform the log that the sync count has changed. */
        log->type->set_region_sync(log, region, 0);
 
@@ -524,7 +527,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
        struct bio *bio;
 
        for (bio = bios->head; bio; bio = bio->bi_next) {
-               if (bio->bi_rw & REQ_FLUSH)
+               if (bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))
                        continue;
                rh_inc(rh, dm_rh_bio_to_region(rh, bio));
        }
index 37fdaf8..68694da 100644 (file)
@@ -1245,7 +1245,10 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
 
                        cell_release_singleton(cell, bio);
                        cell_release_singleton(cell2, bio);
-                       remap_and_issue(tc, bio, lookup_result.block);
+                       if ((!lookup_result.shared) && pool->pf.discard_passdown)
+                               remap_and_issue(tc, bio, lookup_result.block);
+                       else
+                               bio_endio(bio, 0);
                }
                break;
 
@@ -2292,6 +2295,13 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct
        if (r)
                return r;
 
+       r = dm_pool_commit_metadata(pool->pmd);
+       if (r) {
+               DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
+                     __func__, r);
+               return r;
+       }
+
        r = dm_pool_reserve_metadata_snap(pool->pmd);
        if (r)
                DMWARN("reserve_metadata_snap message failed.");
@@ -2621,6 +2631,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
        if (tc->pool->pf.discard_enabled) {
                ti->discards_supported = 1;
                ti->num_discard_requests = 1;
+               ti->discard_zeroes_data_unsupported = 1;
        }
 
        dm_put(pool_md);
index 1c2f904..d5ab449 100644 (file)
@@ -2931,6 +2931,7 @@ offset_store(struct md_rdev *rdev, const char *buf, size_t len)
                 * can be sane */
                return -EBUSY;
        rdev->data_offset = offset;
+       rdev->new_data_offset = offset;
        return len;
 }
 
@@ -3926,8 +3927,8 @@ array_state_show(struct mddev *mddev, char *page)
        return sprintf(page, "%s\n", array_states[st]);
 }
 
-static int do_md_stop(struct mddev * mddev, int ro, int is_open);
-static int md_set_readonly(struct mddev * mddev, int is_open);
+static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
+static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
 static int do_md_run(struct mddev * mddev);
 static int restart_array(struct mddev *mddev);
 
@@ -3943,14 +3944,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
                /* stopping an active array */
                if (atomic_read(&mddev->openers) > 0)
                        return -EBUSY;
-               err = do_md_stop(mddev, 0, 0);
+               err = do_md_stop(mddev, 0, NULL);
                break;
        case inactive:
                /* stopping an active array */
                if (mddev->pers) {
                        if (atomic_read(&mddev->openers) > 0)
                                return -EBUSY;
-                       err = do_md_stop(mddev, 2, 0);
+                       err = do_md_stop(mddev, 2, NULL);
                } else
                        err = 0; /* already inactive */
                break;
@@ -3958,7 +3959,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
                break; /* not supported yet */
        case readonly:
                if (mddev->pers)
-                       err = md_set_readonly(mddev, 0);
+                       err = md_set_readonly(mddev, NULL);
                else {
                        mddev->ro = 1;
                        set_disk_ro(mddev->gendisk, 1);
@@ -3968,7 +3969,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
        case read_auto:
                if (mddev->pers) {
                        if (mddev->ro == 0)
-                               err = md_set_readonly(mddev, 0);
+                               err = md_set_readonly(mddev, NULL);
                        else if (mddev->ro == 1)
                                err = restart_array(mddev);
                        if (err == 0) {
@@ -5351,15 +5352,17 @@ void md_stop(struct mddev *mddev)
 }
 EXPORT_SYMBOL_GPL(md_stop);
 
-static int md_set_readonly(struct mddev *mddev, int is_open)
+static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
 {
        int err = 0;
        mutex_lock(&mddev->open_mutex);
-       if (atomic_read(&mddev->openers) > is_open) {
+       if (atomic_read(&mddev->openers) > !!bdev) {
                printk("md: %s still in use.\n",mdname(mddev));
                err = -EBUSY;
                goto out;
        }
+       if (bdev)
+               sync_blockdev(bdev);
        if (mddev->pers) {
                __md_stop_writes(mddev);
 
@@ -5381,18 +5384,26 @@ out:
  *   0 - completely stop and dis-assemble array
  *   2 - stop but do not disassemble array
  */
-static int do_md_stop(struct mddev * mddev, int mode, int is_open)
+static int do_md_stop(struct mddev * mddev, int mode,
+                     struct block_device *bdev)
 {
        struct gendisk *disk = mddev->gendisk;
        struct md_rdev *rdev;
 
        mutex_lock(&mddev->open_mutex);
-       if (atomic_read(&mddev->openers) > is_open ||
+       if (atomic_read(&mddev->openers) > !!bdev ||
            mddev->sysfs_active) {
                printk("md: %s still in use.\n",mdname(mddev));
                mutex_unlock(&mddev->open_mutex);
                return -EBUSY;
        }
+       if (bdev)
+               /* It is possible IO was issued on some other
+                * open file which was closed before we took ->open_mutex.
+                * As that was not the last close __blkdev_put will not
+                * have called sync_blockdev, so we must.
+                */
+               sync_blockdev(bdev);
 
        if (mddev->pers) {
                if (mddev->ro)
@@ -5466,7 +5477,7 @@ static void autorun_array(struct mddev *mddev)
        err = do_md_run(mddev);
        if (err) {
                printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
-               do_md_stop(mddev, 0, 0);
+               do_md_stop(mddev, 0, NULL);
        }
 }
 
@@ -5784,8 +5795,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
                        super_types[mddev->major_version].
                                validate_super(mddev, rdev);
                if ((info->state & (1<<MD_DISK_SYNC)) &&
-                   (!test_bit(In_sync, &rdev->flags) ||
-                    rdev->raid_disk != info->raid_disk)) {
+                    rdev->raid_disk != info->raid_disk) {
                        /* This was a hot-add request, but events doesn't
                         * match, so reject it.
                         */
@@ -6482,11 +6492,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
                        goto done_unlock;
 
                case STOP_ARRAY:
-                       err = do_md_stop(mddev, 0, 1);
+                       err = do_md_stop(mddev, 0, bdev);
                        goto done_unlock;
 
                case STOP_ARRAY_RO:
-                       err = md_set_readonly(mddev, 1);
+                       err = md_set_readonly(mddev, bdev);
                        goto done_unlock;
 
                case BLKROSET:
@@ -6751,7 +6761,7 @@ struct md_thread *md_register_thread(void (*run) (struct mddev *), struct mddev
        thread->tsk = kthread_run(md_thread, thread,
                                  "%s_%s",
                                  mdname(thread->mddev),
-                                 name ?: mddev->pers->name);
+                                 name);
        if (IS_ERR(thread->tsk)) {
                kfree(thread);
                return NULL;
@@ -7298,6 +7308,7 @@ void md_do_sync(struct mddev *mddev)
        int skipped = 0;
        struct md_rdev *rdev;
        char *desc;
+       struct blk_plug plug;
 
        /* just incase thread restarts... */
        if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
@@ -7447,6 +7458,7 @@ void md_do_sync(struct mddev *mddev)
        }
        mddev->curr_resync_completed = j;
 
+       blk_start_plug(&plug);
        while (j < max_sectors) {
                sector_t sectors;
 
@@ -7552,6 +7564,7 @@ void md_do_sync(struct mddev *mddev)
         * this also signals 'finished resyncing' to md_stop
         */
  out:
+       blk_finish_plug(&plug);
        wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
 
        /* tell personality that we are finished */
index 9339e67..61a1833 100644 (file)
@@ -474,7 +474,8 @@ static int multipath_run (struct mddev *mddev)
        }
 
        {
-               mddev->thread = md_register_thread(multipathd, mddev, NULL);
+               mddev->thread = md_register_thread(multipathd, mddev,
+                                                  "multipath");
                if (!mddev->thread) {
                        printk(KERN_ERR "multipath: couldn't allocate thread"
                                " for %s\n", mdname(mddev));
index 50ed53b..fc90c11 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/device-mapper.h>
 #include <linux/export.h>
+#include <linux/vmalloc.h>
 
 #ifdef CONFIG_DM_DEBUG_SPACE_MAPS
 
@@ -89,13 +90,23 @@ static int ca_create(struct count_array *ca, struct dm_space_map *sm)
 
        ca->nr = nr_blocks;
        ca->nr_free = nr_blocks;
-       ca->counts = kzalloc(sizeof(*ca->counts) * nr_blocks, GFP_KERNEL);
-       if (!ca->counts)
-               return -ENOMEM;
+
+       if (!nr_blocks)
+               ca->counts = NULL;
+       else {
+               ca->counts = vzalloc(sizeof(*ca->counts) * nr_blocks);
+               if (!ca->counts)
+                       return -ENOMEM;
+       }
 
        return 0;
 }
 
+static void ca_destroy(struct count_array *ca)
+{
+       vfree(ca->counts);
+}
+
 static int ca_load(struct count_array *ca, struct dm_space_map *sm)
 {
        int r;
@@ -126,12 +137,14 @@ static int ca_load(struct count_array *ca, struct dm_space_map *sm)
 static int ca_extend(struct count_array *ca, dm_block_t extra_blocks)
 {
        dm_block_t nr_blocks = ca->nr + extra_blocks;
-       uint32_t *counts = kzalloc(sizeof(*counts) * nr_blocks, GFP_KERNEL);
+       uint32_t *counts = vzalloc(sizeof(*counts) * nr_blocks);
        if (!counts)
                return -ENOMEM;
 
-       memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
-       kfree(ca->counts);
+       if (ca->counts) {
+               memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
+               ca_destroy(ca);
+       }
        ca->nr = nr_blocks;
        ca->nr_free += extra_blocks;
        ca->counts = counts;
@@ -151,11 +164,6 @@ static int ca_commit(struct count_array *old, struct count_array *new)
        return 0;
 }
 
-static void ca_destroy(struct count_array *ca)
-{
-       kfree(ca->counts);
-}
-
 /*----------------------------------------------------------------*/
 
 struct sm_checker {
@@ -343,25 +351,25 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
        int r;
        struct sm_checker *smc;
 
-       if (!sm)
-               return NULL;
+       if (IS_ERR_OR_NULL(sm))
+               return ERR_PTR(-EINVAL);
 
        smc = kmalloc(sizeof(*smc), GFP_KERNEL);
        if (!smc)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        memcpy(&smc->sm, &ops_, sizeof(smc->sm));
        r = ca_create(&smc->old_counts, sm);
        if (r) {
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        r = ca_create(&smc->counts, sm);
        if (r) {
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        smc->real_sm = sm;
@@ -371,7 +379,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
                ca_destroy(&smc->counts);
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        r = ca_commit(&smc->old_counts, &smc->counts);
@@ -379,7 +387,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
                ca_destroy(&smc->counts);
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        return &smc->sm;
@@ -391,25 +399,25 @@ struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm)
        int r;
        struct sm_checker *smc;
 
-       if (!sm)
-               return NULL;
+       if (IS_ERR_OR_NULL(sm))
+               return ERR_PTR(-EINVAL);
 
        smc = kmalloc(sizeof(*smc), GFP_KERNEL);
        if (!smc)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        memcpy(&smc->sm, &ops_, sizeof(smc->sm));
        r = ca_create(&smc->old_counts, sm);
        if (r) {
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        r = ca_create(&smc->counts, sm);
        if (r) {
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        smc->real_sm = sm;
index fc469ba..3d0ed53 100644 (file)
@@ -290,7 +290,16 @@ struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
                                       dm_block_t nr_blocks)
 {
        struct dm_space_map *sm = dm_sm_disk_create_real(tm, nr_blocks);
-       return dm_sm_checker_create_fresh(sm);
+       struct dm_space_map *smc;
+
+       if (IS_ERR_OR_NULL(sm))
+               return sm;
+
+       smc = dm_sm_checker_create_fresh(sm);
+       if (IS_ERR(smc))
+               dm_sm_destroy(sm);
+
+       return smc;
 }
 EXPORT_SYMBOL_GPL(dm_sm_disk_create);
 
index 400fe14..e5604b3 100644 (file)
@@ -138,6 +138,9 @@ EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
 
 void dm_tm_destroy(struct dm_transaction_manager *tm)
 {
+       if (!tm->is_clone)
+               wipe_shadow_table(tm);
+
        kfree(tm);
 }
 EXPORT_SYMBOL_GPL(dm_tm_destroy);
@@ -344,8 +347,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
                }
 
                *sm = dm_sm_checker_create(inner);
-               if (!*sm)
+               if (IS_ERR(*sm)) {
+                       r = PTR_ERR(*sm);
                        goto bad2;
+               }
 
        } else {
                r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location,
@@ -364,8 +369,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
                }
 
                *sm = dm_sm_checker_create(inner);
-               if (!*sm)
+               if (IS_ERR(*sm)) {
+                       r = PTR_ERR(*sm);
                        goto bad2;
+               }
        }
 
        return 0;
index a9c7981..cacd008 100644 (file)
@@ -517,8 +517,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
                int bad_sectors;
 
                int disk = start_disk + i;
-               if (disk >= conf->raid_disks)
-                       disk -= conf->raid_disks;
+               if (disk >= conf->raid_disks * 2)
+                       disk -= conf->raid_disks * 2;
 
                rdev = rcu_dereference(conf->mirrors[disk].rdev);
                if (r1_bio->bios[disk] == IO_BLOCKED
@@ -883,7 +883,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
        const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
        struct md_rdev *blocked_rdev;
-       int plugged;
        int first_clone;
        int sectors_handled;
        int max_sectors;
@@ -1034,7 +1033,6 @@ read_again:
         * the bad blocks.  Each set of writes gets it's own r1bio
         * with a set of bios attached.
         */
-       plugged = mddev_check_plugged(mddev);
 
        disks = conf->raid_disks * 2;
  retry_write:
@@ -1191,6 +1189,8 @@ read_again:
                bio_list_add(&conf->pending_bio_list, mbio);
                conf->pending_count++;
                spin_unlock_irqrestore(&conf->device_lock, flags);
+               if (!mddev_check_plugged(mddev))
+                       md_wakeup_thread(mddev->thread);
        }
        /* Mustn't call r1_bio_write_done before this next test,
         * as it could result in the bio being freed.
@@ -1213,9 +1213,6 @@ read_again:
 
        /* In case raid1d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
-
-       if (do_sync || !bitmap || !plugged)
-               md_wakeup_thread(mddev->thread);
 }
 
 static void status(struct seq_file *seq, struct mddev *mddev)
@@ -1821,8 +1818,14 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
 
        if (atomic_dec_and_test(&r1_bio->remaining)) {
                /* if we're here, all write(s) have completed, so clean up */
-               md_done_sync(mddev, r1_bio->sectors, 1);
-               put_buf(r1_bio);
+               int s = r1_bio->sectors;
+               if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
+                   test_bit(R1BIO_WriteError, &r1_bio->state))
+                       reschedule_retry(r1_bio);
+               else {
+                       put_buf(r1_bio);
+                       md_done_sync(mddev, s, 1);
+               }
        }
 }
 
@@ -2488,9 +2491,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
         */
        if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
                atomic_set(&r1_bio->remaining, read_targets);
-               for (i = 0; i < conf->raid_disks * 2; i++) {
+               for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
                        bio = r1_bio->bios[i];
                        if (bio->bi_end_io == end_sync_read) {
+                               read_targets--;
                                md_sync_acct(bio->bi_bdev, nr_sectors);
                                generic_make_request(bio);
                        }
@@ -2621,7 +2625,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
                goto abort;
        }
        err = -ENOMEM;
-       conf->thread = md_register_thread(raid1d, mddev, NULL);
+       conf->thread = md_register_thread(raid1d, mddev, "raid1");
        if (!conf->thread) {
                printk(KERN_ERR
                       "md/raid1:%s: couldn't allocate thread\n",
index 99ae606..8da6282 100644 (file)
@@ -1039,7 +1039,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
        unsigned long flags;
        struct md_rdev *blocked_rdev;
-       int plugged;
        int sectors_handled;
        int max_sectors;
        int sectors;
@@ -1239,7 +1238,6 @@ read_again:
         * of r10_bios is recored in bio->bi_phys_segments just as with
         * the read case.
         */
-       plugged = mddev_check_plugged(mddev);
 
        r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
        raid10_find_phys(conf, r10_bio);
@@ -1396,6 +1394,8 @@ retry_write:
                bio_list_add(&conf->pending_bio_list, mbio);
                conf->pending_count++;
                spin_unlock_irqrestore(&conf->device_lock, flags);
+               if (!mddev_check_plugged(mddev))
+                       md_wakeup_thread(mddev->thread);
 
                if (!r10_bio->devs[i].repl_bio)
                        continue;
@@ -1423,6 +1423,8 @@ retry_write:
                bio_list_add(&conf->pending_bio_list, mbio);
                conf->pending_count++;
                spin_unlock_irqrestore(&conf->device_lock, flags);
+               if (!mddev_check_plugged(mddev))
+                       md_wakeup_thread(mddev->thread);
        }
 
        /* Don't remove the bias on 'remaining' (one_write_done) until
@@ -1448,9 +1450,6 @@ retry_write:
 
        /* In case raid10d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
-
-       if (do_sync || !mddev->bitmap || !plugged)
-               md_wakeup_thread(mddev->thread);
 }
 
 static void status(struct seq_file *seq, struct mddev *mddev)
@@ -2310,7 +2309,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                        if (r10_sync_page_io(rdev,
                                             r10_bio->devs[sl].addr +
                                             sect,
-                                            s<<9, conf->tmppage, WRITE)
+                                            s, conf->tmppage, WRITE)
                            == 0) {
                                /* Well, this device is dead */
                                printk(KERN_NOTICE
@@ -2349,7 +2348,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                        switch (r10_sync_page_io(rdev,
                                             r10_bio->devs[sl].addr +
                                             sect,
-                                            s<<9, conf->tmppage,
+                                            s, conf->tmppage,
                                                 READ)) {
                        case 0:
                                /* Well, this device is dead */
@@ -2512,7 +2511,7 @@ read_more:
        slot = r10_bio->read_slot;
        printk_ratelimited(
                KERN_ERR
-               "md/raid10:%s: %s: redirecting"
+               "md/raid10:%s: %s: redirecting "
                "sector %llu to another mirror\n",
                mdname(mddev),
                bdevname(rdev->bdev, b),
@@ -2661,7 +2660,8 @@ static void raid10d(struct mddev *mddev)
        blk_start_plug(&plug);
        for (;;) {
 
-               flush_pending_writes(conf);
+               if (atomic_read(&mddev->plug_cnt) == 0)
+                       flush_pending_writes(conf);
 
                spin_lock_irqsave(&conf->device_lock, flags);
                if (list_empty(head)) {
@@ -2890,6 +2890,12 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                        /* want to reconstruct this device */
                        rb2 = r10_bio;
                        sect = raid10_find_virt(conf, sector_nr, i);
+                       if (sect >= mddev->resync_max_sectors) {
+                               /* last stripe is not complete - don't
+                                * try to recover this sector.
+                                */
+                               continue;
+                       }
                        /* Unless we are doing a full sync, or a replacement
                         * we only need to recover the block if it is set in
                         * the bitmap
@@ -3421,7 +3427,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
        spin_lock_init(&conf->resync_lock);
        init_waitqueue_head(&conf->wait_barrier);
 
-       conf->thread = md_register_thread(raid10d, mddev, NULL);
+       conf->thread = md_register_thread(raid10d, mddev, "raid10");
        if (!conf->thread)
                goto out;
 
index d267672..04348d7 100644 (file)
@@ -196,12 +196,14 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
                BUG_ON(!list_empty(&sh->lru));
                BUG_ON(atomic_read(&conf->active_stripes)==0);
                if (test_bit(STRIPE_HANDLE, &sh->state)) {
-                       if (test_bit(STRIPE_DELAYED, &sh->state))
+                       if (test_bit(STRIPE_DELAYED, &sh->state) &&
+                           !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
                                list_add_tail(&sh->lru, &conf->delayed_list);
                        else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
                                   sh->bm_seq - conf->seq_write > 0)
                                list_add_tail(&sh->lru, &conf->bitmap_list);
                        else {
+                               clear_bit(STRIPE_DELAYED, &sh->state);
                                clear_bit(STRIPE_BIT_DELAY, &sh->state);
                                list_add_tail(&sh->lru, &conf->handle_list);
                        }
@@ -606,6 +608,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                                         * a chance*/
                                        md_check_recovery(conf->mddev);
                                }
+                               /*
+                                * Because md_wait_for_blocked_rdev
+                                * will dec nr_pending, we must
+                                * increment it first.
+                                */
+                               atomic_inc(&rdev->nr_pending);
                                md_wait_for_blocked_rdev(rdev, conf->mddev);
                        } else {
                                /* Acknowledged bad block - skip the write */
@@ -1737,6 +1745,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
        } else {
                const char *bdn = bdevname(rdev->bdev, b);
                int retry = 0;
+               int set_bad = 0;
 
                clear_bit(R5_UPTODATE, &sh->dev[i].flags);
                atomic_inc(&rdev->read_errors);
@@ -1748,7 +1757,8 @@ static void raid5_end_read_request(struct bio * bi, int error)
                                mdname(conf->mddev),
                                (unsigned long long)s,
                                bdn);
-               else if (conf->mddev->degraded >= conf->max_degraded)
+               else if (conf->mddev->degraded >= conf->max_degraded) {
+                       set_bad = 1;
                        printk_ratelimited(
                                KERN_WARNING
                                "md/raid:%s: read error not correctable "
@@ -1756,8 +1766,9 @@ static void raid5_end_read_request(struct bio * bi, int error)
                                mdname(conf->mddev),
                                (unsigned long long)s,
                                bdn);
-               else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
+               } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
                        /* Oh, no!!! */
+                       set_bad = 1;
                        printk_ratelimited(
                                KERN_WARNING
                                "md/raid:%s: read error NOT corrected!! "
@@ -1765,7 +1776,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
                                mdname(conf->mddev),
                                (unsigned long long)s,
                                bdn);
-               else if (atomic_read(&rdev->read_errors)
+               else if (atomic_read(&rdev->read_errors)
                         > conf->max_nr_stripes)
                        printk(KERN_WARNING
                               "md/raid:%s: Too many read errors, failing device %s.\n",
@@ -1777,7 +1788,11 @@ static void raid5_end_read_request(struct bio * bi, int error)
                else {
                        clear_bit(R5_ReadError, &sh->dev[i].flags);
                        clear_bit(R5_ReWrite, &sh->dev[i].flags);
-                       md_error(conf->mddev, rdev);
+                       if (!(set_bad
+                             && test_bit(In_sync, &rdev->flags)
+                             && rdev_set_badblocks(
+                                     rdev, sh->sector, STRIPE_SECTORS, 0)))
+                               md_error(conf->mddev, rdev);
                }
        }
        rdev_dec_pending(rdev, conf->mddev);
@@ -3582,8 +3597,18 @@ static void handle_stripe(struct stripe_head *sh)
 
 finish:
        /* wait for this device to become unblocked */
-       if (conf->mddev->external && unlikely(s.blocked_rdev))
-               md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
+       if (unlikely(s.blocked_rdev)) {
+               if (conf->mddev->external)
+                       md_wait_for_blocked_rdev(s.blocked_rdev,
+                                                conf->mddev);
+               else
+                       /* Internal metadata will immediately
+                        * be written by raid5d, so we don't
+                        * need to wait here.
+                        */
+                       rdev_dec_pending(s.blocked_rdev,
+                                        conf->mddev);
+       }
 
        if (s.handle_bad_blocks)
                for (i = disks; i--; ) {
@@ -3881,8 +3906,6 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                raid_bio->bi_next = (void*)rdev;
                align_bi->bi_bdev =  rdev->bdev;
                align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
-               /* No reshape active, so we can trust rdev->data_offset */
-               align_bi->bi_sector += rdev->data_offset;
 
                if (!bio_fits_rdev(align_bi) ||
                    is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
@@ -3893,6 +3916,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                        return 0;
                }
 
+               /* No reshape active, so we can trust rdev->data_offset */
+               align_bi->bi_sector += rdev->data_offset;
+
                spin_lock_irq(&conf->device_lock);
                wait_event_lock_irq(conf->wait_for_stripe,
                                    conf->quiesce == 0,
@@ -3971,7 +3997,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
        struct stripe_head *sh;
        const int rw = bio_data_dir(bi);
        int remaining;
-       int plugged;
 
        if (unlikely(bi->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bi);
@@ -3990,7 +4015,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
        bi->bi_next = NULL;
        bi->bi_phys_segments = 1;       /* over-loaded to count active stripes */
 
-       plugged = mddev_check_plugged(mddev);
        for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
                DEFINE_WAIT(w);
                int previous;
@@ -4092,6 +4116,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                        if ((bi->bi_rw & REQ_SYNC) &&
                            !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
                                atomic_inc(&conf->preread_active_stripes);
+                       mddev_check_plugged(mddev);
                        release_stripe(sh);
                } else {
                        /* cannot get stripe for read-ahead, just give-up */
@@ -4099,10 +4124,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                        finish_wait(&conf->wait_for_overlap, &w);
                        break;
                }
-                       
        }
-       if (!plugged)
-               md_wakeup_thread(mddev->thread);
 
        spin_lock_irq(&conf->device_lock);
        remaining = raid5_dec_bi_phys_segments(bi);
@@ -4823,6 +4845,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
        int raid_disk, memory, max_disks;
        struct md_rdev *rdev;
        struct disk_info *disk;
+       char pers_name[6];
 
        if (mddev->new_level != 5
            && mddev->new_level != 4
@@ -4946,7 +4969,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
                printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
                       mdname(mddev), memory);
 
-       conf->thread = md_register_thread(raid5d, mddev, NULL);
+       sprintf(pers_name, "raid%d", mddev->new_level);
+       conf->thread = md_register_thread(raid5d, mddev, pers_name);
        if (!conf->thread) {
                printk(KERN_ERR
                       "md/raid:%s: couldn't allocate thread.\n",
@@ -5465,10 +5489,9 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
        if (rdev->saved_raid_disk >= 0 &&
            rdev->saved_raid_disk >= first &&
            conf->disks[rdev->saved_raid_disk].rdev == NULL)
-               disk = rdev->saved_raid_disk;
-       else
-               disk = first;
-       for ( ; disk <= last ; disk++) {
+               first = rdev->saved_raid_disk;
+
+       for (disk = first; disk <= last; disk++) {
                p = conf->disks + disk;
                if (p->rdev == NULL) {
                        clear_bit(In_sync, &rdev->flags);
@@ -5477,8 +5500,11 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                        if (rdev->saved_raid_disk != disk)
                                conf->fullsync = 1;
                        rcu_assign_pointer(p->rdev, rdev);
-                       break;
+                       goto out;
                }
+       }
+       for (disk = first; disk <= last; disk++) {
+               p = conf->disks + disk;
                if (test_bit(WantReplacement, &p->rdev->flags) &&
                    p->replacement == NULL) {
                        clear_bit(In_sync, &rdev->flags);
@@ -5490,6 +5516,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                        break;
                }
        }
+out:
        print_raid5_conf(conf);
        return err;
 }
index 7d42c11..0cdbd74 100644 (file)
@@ -198,7 +198,6 @@ static int fops_open(struct file *file)
        struct saa7146_dev *dev = video_drvdata(file);
        struct saa7146_fh *fh = NULL;
        int result = 0;
-       enum v4l2_buf_type type;
 
        DEB_EE("file:%p, dev:%s\n", file, video_device_node_name(vdev));
 
@@ -207,10 +206,6 @@ static int fops_open(struct file *file)
 
        DEB_D("using: %p\n", dev);
 
-       type = vdev->vfl_type == VFL_TYPE_GRABBER
-            ? V4L2_BUF_TYPE_VIDEO_CAPTURE
-            : V4L2_BUF_TYPE_VBI_CAPTURE;
-
        /* check if an extension is registered */
        if( NULL == dev->ext ) {
                DEB_S("no extension registered for this device\n");
index 00a6732..39eab73 100644 (file)
@@ -243,6 +243,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
        if (minor == MAX_DVB_MINORS) {
                kfree(dvbdevfops);
                kfree(dvbdev);
+               up_write(&minor_rwsem);
                mutex_unlock(&dvbdev_register_lock);
                return -EINVAL;
        }
index 98ecaf0..3180f5b 100644 (file)
@@ -516,9 +516,9 @@ static int cx24110_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
        if(cx24110_readreg(state,0x10)&0x40) {
                /* the RS error counter has finished one counting window */
                cx24110_writereg(state,0x10,0x60); /* select the byer reg */
-               cx24110_readreg(state, 0x12) |
+               (void)(cx24110_readreg(state, 0x12) |
                        (cx24110_readreg(state, 0x13) << 8) |
-                       (cx24110_readreg(state, 0x14) << 16);
+                       (cx24110_readreg(state, 0x14) << 16));
                cx24110_writereg(state,0x10,0x70); /* select the bler reg */
                state->lastbler=cx24110_readreg(state,0x12)|
                        (cx24110_readreg(state,0x13)<<8)|
index 9454049..ed3b0ba 100644 (file)
@@ -121,7 +121,7 @@ int cxd2820r_get_frontend_c(struct dvb_frontend *fe)
        if (ret)
                goto error;
 
-       switch ((buf[0] >> 0) & 0x03) {
+       switch ((buf[0] >> 0) & 0x07) {
        case 0:
                c->modulation = QAM_16;
                break;
index a3ab1a5..cc11260 100644 (file)
@@ -126,7 +126,7 @@ static int lg216x_write_regs(struct lg216x_state *state,
 
        lg_reg("writing %d registers...\n", len);
 
-       for (i = 0; i < len - 1; i++) {
+       for (i = 0; i < len; i++) {
                ret = lg216x_write_reg(state, regs[i].reg, regs[i].val);
                if (lg_fail(ret))
                        return ret;
index 63c004a..664e460 100644 (file)
@@ -544,6 +544,8 @@ static const struct usb_device_id smsusb_id_table[] __devinitconst = {
                .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
        { USB_DEVICE(0x2040, 0xc0a0),
                .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
+       { USB_DEVICE(0x2040, 0xf5a0),
+               .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
        { } /* Terminating entry */
        };
 
index 740a3d5..b415211 100644 (file)
@@ -157,7 +157,7 @@ static int __devinit maxiradio_probe(struct pci_dev *pdev, const struct pci_devi
                goto err_out_free_region;
 
        dev->io = pci_resource_start(pdev, 0);
-       if (snd_tea575x_init(&dev->tea)) {
+       if (snd_tea575x_init(&dev->tea, THIS_MODULE)) {
                printk(KERN_ERR "radio-maxiradio: Unable to detect TEA575x tuner\n");
                goto err_out_free_region;
        }
index 52b8011..4efcbec 100644 (file)
@@ -238,7 +238,7 @@ static int __devinit fmr2_probe(struct fmr2 *fmr2, struct device *pdev, int io)
        snprintf(fmr2->tea.bus_info, sizeof(fmr2->tea.bus_info), "%s:%s",
                        fmr2->is_fmd2 ? "PnP" : "ISA", dev_name(pdev));
 
-       if (snd_tea575x_init(&fmr2->tea)) {
+       if (snd_tea575x_init(&fmr2->tea, THIS_MODULE)) {
                printk(KERN_ERR "radio-sf16fmr2: Unable to detect TEA575x tuner\n");
                release_region(fmr2->io, 2);
                return -ENODEV;
index e9f6387..f412f7a 100644 (file)
@@ -51,6 +51,8 @@ static struct usb_device_id si470x_usb_driver_id_table[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(0x1b80, 0xd700, USB_CLASS_HID, 0, 0) },
        /* Sanei Electric, Inc. FM USB Radio (sold as DealExtreme.com PCear) */
        { USB_DEVICE_AND_INTERFACE_INFO(0x10c5, 0x819a, USB_CLASS_HID, 0, 0) },
+       /* Axentia ALERT FM USB Receiver */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x12cf, 0x7111, USB_CLASS_HID, 0, 0) },
        /* Terminating entry */
        { }
 };
index 342c2c8..54ee348 100644 (file)
@@ -232,7 +232,7 @@ MODULE_PARM_DESC(invert, "Invert the signal from the IR receiver");
 
 static bool txandrx; /* default = 0 */
 module_param(txandrx, bool, 0444);
-MODULE_PARM_DESC(invert, "Allow simultaneous TX and RX");
+MODULE_PARM_DESC(txandrx, "Allow simultaneous TX and RX");
 
 static unsigned int wake_sc = 0x800F040C;
 module_param(wake_sc, uint, 0644);
@@ -1032,6 +1032,8 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
        data->dev->tx_ir = wbcir_tx;
        data->dev->priv = data;
        data->dev->dev.parent = &device->dev;
+       data->dev->timeout = MS_TO_NS(100);
+       data->dev->allowed_protos = RC_TYPE_ALL;
 
        if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) {
                dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
index ff2933a..856ab96 100644 (file)
@@ -371,7 +371,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 2, 0, 0, 0 },
                .gpiomute       = 10,
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -384,7 +383,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 1, 2, 3 },
                .gpiomute       = 4,
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -398,7 +396,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomux        = { 4, 0, 2, 3 },
                .gpiomute       = 1,
                .no_msp34xx     = 1,
-               .needs_tvaudio  = 1,
                .tuner_type     = TUNER_PHILIPS_NTSC,
                .tuner_addr     = ADDR_UNSET,
                .pll            = PLL_28,
@@ -414,7 +411,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 0,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -427,7 +423,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 0),
                .gpiomux        = { 0, 1, 0, 1 },
                .gpiomute       = 3,
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -440,7 +435,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0x0f,
                .gpiomux        = { 0x0c, 0x04, 0x08, 0x04 },
                /*                0x04 for some cards ?? */
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
                .audio_mode_gpio= avermedia_tvphone_audio,
@@ -454,7 +448,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 0, 0),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 1,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -469,7 +462,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 0xc00, 0x800, 0x400 },
                .gpiomute       = 0xc00,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -482,7 +474,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 3,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 1, 1, 2, 3 },
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_TEMIC_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -496,7 +487,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 0, 1, 1),
                .gpiomux        = { 0, 1, 2, 3 },
                .gpiomute       = 4,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -510,7 +500,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x20001,0x10001, 0, 0 },
                .gpiomute       = 10,
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -524,7 +513,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 15,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 13, 14, 11, 7 },
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -536,7 +524,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 15,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 13, 14, 11, 7 },
-               .needs_tvaudio  = 1,
                .msp34xx_alt    = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
@@ -553,7 +540,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 2, 1, 3 }, /* old: {0, 1, 2, 3, 4} */
                .gpiomute       = 4,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -567,7 +553,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 0, 1, 0 },
                .gpiomute       = 10,
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -583,7 +568,6 @@ struct tvcard bttv_tvcards[] = {
                /* 2003-10-20 by "Anton A. Arapov" <arapov@mail.ru> */
                .gpiomux        = { 0x001e00, 0, 0x018000, 0x014000 },
                .gpiomute       = 0x002000,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -597,7 +581,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1, 0),
                .gpiomux        = { 0x4fa007,0xcfa007,0xcfa007,0xcfa007 },
                .gpiomute       = 0xcfa007,
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
                .volume_gpio    = winview_volume,
@@ -611,7 +594,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 1, 0, 0, 0 },
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -660,7 +642,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 1, 0x800, 0x400 },
                .gpiomute       = 0xc00,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -691,7 +672,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = {0x400, 0x400, 0x400, 0x400 },
                .gpiomute       = 0xc00,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -706,7 +686,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x20000, 0x30000, 0x10000, 0 },
                .gpiomute       = 0x40000,
-               .needs_tvaudio  = 0,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
                .audio_mode_gpio= terratv_audio,
@@ -720,7 +699,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 0, 1, 1),
                .gpiomux        = { 0, 1, 2, 3 },
                .gpiomute       = 4,
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -748,7 +726,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x20000, 0x30000, 0x10000, 0x00000 },
                .gpiomute       = 0x40000,
-               .needs_tvaudio  = 0,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
                .audio_mode_gpio= terratv_audio,
@@ -793,7 +770,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 0, 0),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 1,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
                .muxsel_hook    = PXC200_muxsel,
@@ -834,7 +810,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 0,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -847,7 +822,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x500, 0, 0x300, 0x900 },
                .gpiomute       = 0x900,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -874,7 +848,6 @@ struct tvcard bttv_tvcards[] = {
                Note: There exists another variant "Winfast 2000" with tv stereo !?
                Note: eeprom only contains FF and pci subsystem id 107d:6606
                */
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .has_radio      = 1,
                .tuner_type     = TUNER_PHILIPS_PAL, /* default for now, gpio reads BFFF06 for Pal bg+dk */
@@ -934,7 +907,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 0),
                .gpiomux        = { 0x551400, 0x551200, 0, 0 },
                .gpiomute       = 0x551c00,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL_I,
                .tuner_addr     = ADDR_UNSET,
@@ -949,7 +921,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 2, 0xd0001, 0, 0 },
                .gpiomute       = 1,
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -966,7 +937,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomux        = { 4, 0, 2, 3 },
                .gpiomute       = 1,
                .no_msp34xx     = 1,
-               .needs_tvaudio  = 1,
                .tuner_type     = TUNER_PHILIPS_NTSC,
                .tuner_addr     = ADDR_UNSET,
                .pll            = PLL_28,
@@ -980,7 +950,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 15,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 13, 4, 11, 7 },
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -995,7 +964,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 0, 0, 0},
-               .needs_tvaudio  = 1,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL_I,
@@ -1066,7 +1034,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x20000, 0x30000, 0x10000, 0 },
                .gpiomute       = 0x40000,
-               .needs_tvaudio  = 1,
                .no_msp34xx     = 1,
                .pll            = PLL_35,
                .tuner_type     = TUNER_PHILIPS_PAL_I,
@@ -1084,7 +1051,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = {2,0,0,0 },
                .gpiomute       = 1,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -1163,7 +1129,6 @@ struct tvcard bttv_tvcards[] = {
                                MUX2 (mask 0x30000):
                                        0,2,3= from MSP34xx
                                        1= FM stereo Radio from Tuner */
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -1179,7 +1144,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 0, 0x10, 8 },
                .gpiomute       = 4,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -1218,7 +1182,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 0),
                .gpiomux        = { 2, 0, 0, 0 },
                .gpiomute       = 10,
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_TEMIC_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -1250,7 +1213,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(3, 1),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 0,
                .no_msp34xx     = 1,
                .pll            = PLL_35,
                .tuner_type     = TUNER_ABSENT,
@@ -1266,7 +1228,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x400, 0x400, 0x400, 0x400 },
                .gpiomute       = 0x800,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_TEMIC_4036FY5_NTSC,
                .tuner_addr     = ADDR_UNSET,
@@ -1312,7 +1273,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 2),
                .gpiomux        = { },
                .no_msp34xx     = 1,
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -1329,7 +1289,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 0),
                .gpiomux        = { 1, 0, 4, 4 },
                .gpiomute       = 9,
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -1379,7 +1338,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomute       = 0x1800,
                .audio_mode_gpio= fv2000s_audio,
                .no_msp34xx     = 1,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -1393,7 +1351,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x500, 0x500, 0x300, 0x900 },
                .gpiomute       = 0x900,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -1477,7 +1434,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 0, 11, 7 }, /* TV and Radio with same GPIO ! */
                .gpiomute       = 13,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_LG_PAL_I_FM,
                .tuner_addr     = ADDR_UNSET,
@@ -1514,7 +1470,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x01, 0x00, 0x03, 0x03 },
                .gpiomute       = 0x09,
-               .needs_tvaudio  = 1,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
@@ -1540,7 +1495,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 0, 0),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 0,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -1567,7 +1521,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 1, 1),
                .gpiomux        = { 0, 1, 2, 2 },
                .gpiomute       = 4,
-               .needs_tvaudio  = 0,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
                .pll            = PLL_28,
@@ -1597,7 +1550,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 0),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 0,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
@@ -1619,7 +1571,6 @@ struct tvcard bttv_tvcards[] = {
                                                * btwincap uses 0x80000/0x80003
                                                */
                .gpiomute       = 4,
-               .needs_tvaudio  = 0,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
@@ -1655,7 +1606,6 @@ struct tvcard bttv_tvcards[] = {
                /* .audio_inputs= 1, */
                .svhs           = 2,
                .muxsel         = MUXSEL(2, 0, 1, 1),
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -1875,7 +1825,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 1, 2, 3},
                .gpiomute       = 4,
-               .needs_tvaudio  = 1,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
                .pll            = PLL_28,
@@ -1902,7 +1851,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 0,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
@@ -1920,7 +1868,6 @@ struct tvcard bttv_tvcards[] = {
                /*                  Tuner, Radio, external, internal, off,  on */
                .gpiomux        = { 0x08,  0x0f,  0x0a,     0x08 },
                .gpiomute       = 0x0f,
-               .needs_tvaudio  = 0,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_NTSC,
@@ -1936,7 +1883,6 @@ struct tvcard bttv_tvcards[] = {
                .svhs           = 2,
                .gpiomask       = 0x00,
                .muxsel         = MUXSEL(2, 3, 1, 1),
-               .needs_tvaudio  = 1,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
@@ -2034,7 +1980,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 0),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 0,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
@@ -2049,7 +1994,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0x00,
                .muxsel         = MUXSEL(2, 3, 1, 0),
                .gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -2062,7 +2006,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0x00,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -2079,7 +2022,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 2, 2, 2, 3, 3, 3, 3, 1, 0),
                .muxsel_hook    = phytec_muxsel,
                .gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -2094,7 +2036,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 2, 2, 2, 3, 3, 3, 3, 1, 1),
                .muxsel_hook    = phytec_muxsel,
                .gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -2118,7 +2059,6 @@ struct tvcard bttv_tvcards[] = {
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
                .svhs           = NO_SVHS,   /* card has no svhs */
-               .needs_tvaudio  = 0,
                .no_msp34xx     = 1,
                .no_tda7432     = 1,
                .gpiomask       = 0x00,
@@ -2168,7 +2108,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 3,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 1, 1, 1, 1 },
-               .needs_tvaudio  = 1,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
                .pll            = PLL_35,
@@ -2210,7 +2149,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 0),
                .no_msp34xx     = 1,
                .no_tda7432     = 1,
-               .needs_tvaudio  = 0,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -2222,7 +2160,6 @@ struct tvcard bttv_tvcards[] = {
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
                .svhs           = 2,
-               .needs_tvaudio  = 0,
                .gpiomask       = 0x68,
                .muxsel         = MUXSEL(2, 3, 1),
                .gpiomux        = { 0x68, 0x68, 0x61, 0x61 },
@@ -2241,7 +2178,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 1, 2, 2 },
                .gpiomute       = 3,
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -2265,7 +2201,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 2, 2, 2),
                .gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
                .pll            = PLL_28,
-               .needs_tvaudio  = 0,
                .muxsel_hook    = picolo_tetra_muxsel,/*Required as it doesn't follow the classic input selection policy*/
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -2358,7 +2293,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 2, 0, 0, 0 },
                .gpiomute       = 10,
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -2405,7 +2339,6 @@ struct tvcard bttv_tvcards[] = {
                .tuner_addr     = ADDR_UNSET,
                .gpiomask       = 0x008007,
                .gpiomux        = { 0, 0x000001,0,0 },
-               .needs_tvaudio  = 1,
                .has_radio      = 1,
        },
        [BTTV_BOARD_TIBET_CS16] = {
@@ -2518,7 +2451,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x001e00, 0, 0x018000, 0x014000 },
                .gpiomute       = 0x002000,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_YMEC_TVF66T5_B_DFF,
                .tuner_addr     = 0xc1 >>1,
@@ -2534,7 +2466,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 1, 2, 2 },
                .gpiomute       = 3,
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_TENA_9533_DI,
                .tuner_addr     = ADDR_UNSET,
@@ -2615,7 +2546,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 2, 0, 0, 0 },
                .gpiomute       = 1,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_NTSC,
                .tuner_addr     = ADDR_UNSET,
@@ -2714,7 +2644,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x20001,0x10001, 0, 0 },
                .gpiomute       = 10,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL_I,
                .tuner_addr     = ADDR_UNSET,
@@ -2746,7 +2675,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 1, 2, 2 }, /* CONTVFMi */
                .gpiomute       = 3, /* CONTVFMi */
-               .needs_tvaudio  = 0,
                .tuner_type     = TUNER_PHILIPS_FM1216ME_MK3, /* TCL MK3 */
                .tuner_addr     = ADDR_UNSET,
                .pll            = PLL_28,
@@ -2785,7 +2713,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0x00,
                .muxsel         = MUXSEL(0, 2, 3, 1),
                .gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -2799,7 +2726,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0x00,
                .muxsel         = MUXSEL(2, 3, 1),
                .gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -2813,7 +2739,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0x00,
                .muxsel         = MUXSEL(3, 2, 1),
                .gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -2877,7 +2802,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 0,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
@@ -3649,7 +3573,7 @@ void __devinit bttv_init_tuner(struct bttv *btv)
                struct tuner_setup tun_setup;
 
                /* Load tuner module before issuing tuner config call! */
-               if (bttv_tvcards[btv->c.type].has_radio)
+               if (btv->has_radio)
                        v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
                                &btv->c.i2c_adap, "tuner",
                                0, v4l2_i2c_tuner_addrs(ADDRS_RADIO));
@@ -3664,7 +3588,7 @@ void __devinit bttv_init_tuner(struct bttv *btv)
                tun_setup.type = btv->tuner_type;
                tun_setup.addr = addr;
 
-               if (bttv_tvcards[btv->c.type].has_radio)
+               if (btv->has_radio)
                        tun_setup.mode_mask |= T_RADIO;
 
                bttv_call_all(btv, tuner, s_type_addr, &tun_setup);
@@ -3724,6 +3648,10 @@ static void __devinit hauppauge_eeprom(struct bttv *btv)
                        bttv_tvcards[BTTV_BOARD_HAUPPAUGE_IMPACTVCB].name);
                btv->c.type = BTTV_BOARD_HAUPPAUGE_IMPACTVCB;
        }
+
+       /* The 61334 needs the msp3410 to do the radio demod to get sound */
+       if (tv.model == 61334)
+               btv->radio_uses_msp_demodulator = 1;
 }
 
 static int terratec_active_radio_upgrade(struct bttv *btv)
index a9cfb0f..ff7a589 100644 (file)
@@ -1218,6 +1218,11 @@ audio_mux(struct bttv *btv, int input, int mute)
                   For now this is sufficient. */
                switch (input) {
                case TVAUDIO_INPUT_RADIO:
+                       /* Some boards need the msp do to the radio demod */
+                       if (btv->radio_uses_msp_demodulator) {
+                               in = MSP_INPUT_DEFAULT;
+                               break;
+                       }
                        in = MSP_INPUT(MSP_IN_SCART2, MSP_IN_TUNER1,
                                    MSP_DSP_IN_SCART, MSP_DSP_IN_SCART);
                        break;
index c517161..acfe2f3 100644 (file)
@@ -236,7 +236,6 @@ struct tvcard {
        /* i2c audio flags */
        unsigned int no_msp34xx:1;
        unsigned int no_tda7432:1;
-       unsigned int needs_tvaudio:1;
        unsigned int msp34xx_alt:1;
        /* Note: currently no card definition needs to mark the presence
           of a RDS saa6588 chip. If this is ever needed, then add a new
index db943a8..70fd4f2 100644 (file)
@@ -440,6 +440,7 @@ struct bttv {
        /* radio data/state */
        int has_radio;
        int radio_user;
+       int radio_uses_msp_demodulator;
 
        /* miro/pinnacle + Aimslab VHX
           philips matchbox (tea5757 radio tuner) support */
index 2520219..5b75a64 100644 (file)
@@ -607,8 +607,9 @@ static long qc_capture(struct qcam *q, char __user *buf, unsigned long len)
                                }
                                o = i * pixels_per_line + pixels_read + k;
                                if (o < len) {
+                                       u8 ch = invert - buffer[k];
                                        got++;
-                                       put_user((invert - buffer[k]) << shift, buf + o);
+                                       put_user(ch << shift, buf + o);
                                }
                        }
                        pixels_read += bytes;
@@ -648,8 +649,8 @@ static int qcam_querycap(struct file *file, void  *priv,
        struct qcam *qcam = video_drvdata(file);
 
        strlcpy(vcap->driver, qcam->v4l2_dev.name, sizeof(vcap->driver));
-       strlcpy(vcap->card, "B&W Quickcam", sizeof(vcap->card));
-       strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
+       strlcpy(vcap->card, "Connectix B&W Quickcam", sizeof(vcap->card));
+       strlcpy(vcap->bus_info, qcam->pport->name, sizeof(vcap->bus_info));
        vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
        vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS;
        return 0;
@@ -688,8 +689,8 @@ static int qcam_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
        pix->height = qcam->height / qcam->transfer_scale;
        pix->pixelformat = (qcam->bpp == 4) ? V4L2_PIX_FMT_Y4 : V4L2_PIX_FMT_Y6;
        pix->field = V4L2_FIELD_NONE;
-       pix->bytesperline = qcam->width;
-       pix->sizeimage = qcam->width * qcam->height;
+       pix->bytesperline = pix->width;
+       pix->sizeimage = pix->width * pix->height;
        /* Just a guess */
        pix->colorspace = V4L2_COLORSPACE_SRGB;
        return 0;
@@ -757,7 +758,7 @@ static int qcam_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdes
                  "4-Bit Monochrome", V4L2_PIX_FMT_Y4,
                  { 0, 0, 0, 0 }
                },
-               { 0, 0, 0,
+               { 1, 0, 0,
                  "6-Bit Monochrome", V4L2_PIX_FMT_Y6,
                  { 0, 0, 0, 0 }
                },
@@ -772,6 +773,25 @@ static int qcam_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdes
        return 0;
 }
 
+static int qcam_enum_framesizes(struct file *file, void *fh,
+                                        struct v4l2_frmsizeenum *fsize)
+{
+       static const struct v4l2_frmsize_discrete sizes[] = {
+               {  80,  60 },
+               { 160, 120 },
+               { 320, 240 },
+       };
+
+       if (fsize->index > 2)
+               return -EINVAL;
+       if (fsize->pixel_format != V4L2_PIX_FMT_Y4 &&
+           fsize->pixel_format != V4L2_PIX_FMT_Y6)
+               return -EINVAL;
+       fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+       fsize->discrete = sizes[fsize->index];
+       return 0;
+}
+
 static ssize_t qcam_read(struct file *file, char __user *buf,
                size_t count, loff_t *ppos)
 {
@@ -795,6 +815,11 @@ static ssize_t qcam_read(struct file *file, char __user *buf,
        return len;
 }
 
+static unsigned int qcam_poll(struct file *filp, poll_table *wait)
+{
+       return v4l2_ctrl_poll(filp, wait) | POLLIN | POLLRDNORM;
+}
+
 static int qcam_s_ctrl(struct v4l2_ctrl *ctrl)
 {
        struct qcam *qcam =
@@ -828,7 +853,7 @@ static const struct v4l2_file_operations qcam_fops = {
        .owner          = THIS_MODULE,
        .open           = v4l2_fh_open,
        .release        = v4l2_fh_release,
-       .poll           = v4l2_ctrl_poll,
+       .poll           = qcam_poll,
        .unlocked_ioctl = video_ioctl2,
        .read           = qcam_read,
 };
@@ -839,6 +864,7 @@ static const struct v4l2_ioctl_ops qcam_ioctl_ops = {
        .vidioc_s_input                     = qcam_s_input,
        .vidioc_enum_input                  = qcam_enum_input,
        .vidioc_enum_fmt_vid_cap            = qcam_enum_fmt_vid_cap,
+       .vidioc_enum_framesizes             = qcam_enum_framesizes,
        .vidioc_g_fmt_vid_cap               = qcam_g_fmt_vid_cap,
        .vidioc_s_fmt_vid_cap               = qcam_s_fmt_vid_cap,
        .vidioc_try_fmt_vid_cap             = qcam_try_fmt_vid_cap,
@@ -864,9 +890,9 @@ static struct qcam *qcam_init(struct parport *port)
                return NULL;
 
        v4l2_dev = &qcam->v4l2_dev;
-       strlcpy(v4l2_dev->name, "bw-qcam", sizeof(v4l2_dev->name));
+       snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), "bw-qcam%d", num_cams);
 
-       if (v4l2_device_register(NULL, v4l2_dev) < 0) {
+       if (v4l2_device_register(port->dev, v4l2_dev) < 0) {
                v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
                kfree(qcam);
                return NULL;
@@ -886,7 +912,7 @@ static struct qcam *qcam_init(struct parport *port)
                return NULL;
        }
        qcam->pport = port;
-       qcam->pdev = parport_register_device(port, "bw-qcam", NULL, NULL,
+       qcam->pdev = parport_register_device(port, v4l2_dev->name, NULL, NULL,
                        NULL, 0, NULL);
        if (qcam->pdev == NULL) {
                v4l2_err(v4l2_dev, "couldn't register for %s.\n", port->name);
@@ -975,6 +1001,7 @@ static int init_bwqcam(struct parport *port)
                return -ENODEV;
        }
        qc_calibrate(qcam);
+       v4l2_ctrl_handler_setup(&qcam->hdl);
 
        parport_release(qcam->pdev);
 
index b55d57c..7e5ffd6 100644 (file)
@@ -838,10 +838,10 @@ static int cx18_setup_pci(struct cx18 *cx, struct pci_dev *pci_dev,
        }
 
        CX18_DEBUG_INFO("cx%d (rev %d) at %02x:%02x.%x, "
-                  "irq: %d, latency: %d, memory: 0x%lx\n",
+                  "irq: %d, latency: %d, memory: 0x%llx\n",
                   cx->pci_dev->device, cx->card_rev, pci_dev->bus->number,
                   PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn),
-                  cx->pci_dev->irq, pci_latency, (unsigned long)cx->base_addr);
+                  cx->pci_dev->irq, pci_latency, (u64)cx->base_addr);
 
        return 0;
 }
@@ -938,7 +938,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
        if (retval)
                goto err;
 
-       CX18_DEBUG_INFO("base addr: 0x%08x\n", cx->base_addr);
+       CX18_DEBUG_INFO("base addr: 0x%llx\n", (u64)cx->base_addr);
 
        /* PCI Device Setup */
        retval = cx18_setup_pci(cx, pci_dev, pci_id);
@@ -946,8 +946,8 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
                goto free_workqueues;
 
        /* map io memory */
-       CX18_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
-                  cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE);
+       CX18_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+                  (u64)cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE);
        cx->enc_mem = ioremap_nocache(cx->base_addr + CX18_MEM_OFFSET,
                                       CX18_MEM_SIZE);
        if (!cx->enc_mem) {
index 7a37e0e..2767c64 100644 (file)
@@ -622,7 +622,7 @@ struct cx18 {
                                   unique ID. Starts at 1, so 0 can be used as
                                   uninitialized value in the stream->id. */
 
-       u32 base_addr;
+       resource_size_t base_addr;
 
        u8 card_rev;
        void __iomem *enc_mem, *reg_mem;
index 1b3fb50..b85c292 100644 (file)
@@ -164,8 +164,13 @@ static int load_apu_fw_direct(const char *fn, u8 __iomem *dst, struct cx18 *cx,
 
        apu_version = (vers[0] << 24) | (vers[4] << 16) | vers[32];
        while (offset + sizeof(seghdr) < fw->size) {
-               /* TODO: byteswapping */
-               memcpy(&seghdr, src + offset / 4, sizeof(seghdr));
+               const u32 *shptr = src + offset / 4;
+
+               seghdr.sync1 = le32_to_cpu(shptr[0]);
+               seghdr.sync2 = le32_to_cpu(shptr[1]);
+               seghdr.addr = le32_to_cpu(shptr[2]);
+               seghdr.size = le32_to_cpu(shptr[3]);
+
                offset += sizeof(seghdr);
                if (seghdr.sync1 != APU_ROM_SYNC1 ||
                    seghdr.sync2 != APU_ROM_SYNC2) {
index ed81183..eabf00c 100644 (file)
@@ -434,6 +434,7 @@ static int epu_dma_done_irq(struct cx18 *cx, struct cx18_in_work_order *order)
 {
        u32 handle, mdl_ack_offset, mdl_ack_count;
        struct cx18_mailbox *mb;
+       int i;
 
        mb = &order->mb;
        handle = mb->args[0];
@@ -447,8 +448,9 @@ static int epu_dma_done_irq(struct cx18 *cx, struct cx18_in_work_order *order)
                return -1;
        }
 
-       cx18_memcpy_fromio(cx, order->mdl_ack, cx->enc_mem + mdl_ack_offset,
-                          sizeof(struct cx18_mdl_ack) * mdl_ack_count);
+       for (i = 0; i < sizeof(struct cx18_mdl_ack) * mdl_ack_count; i += sizeof(u32))
+               ((u32 *)order->mdl_ack)[i / sizeof(u32)] =
+                       cx18_readl(cx, cx->enc_mem + mdl_ack_offset + i);
 
        if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
                mb_ack_irq(cx, order);
@@ -538,6 +540,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
        struct cx18_mailbox *order_mb;
        struct cx18_in_work_order *order;
        int submit;
+       int i;
 
        switch (rpu) {
        case CPU:
@@ -562,10 +565,12 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
        order_mb = &order->mb;
 
        /* mb->cmd and mb->args[0] through mb->args[2] */
-       cx18_memcpy_fromio(cx, &order_mb->cmd, &mb->cmd, 4 * sizeof(u32));
+       for (i = 0; i < 4; i++)
+               (&order_mb->cmd)[i] = cx18_readl(cx, &mb->cmd + i);
+
        /* mb->request and mb->ack.  N.B. we want to read mb->ack last */
-       cx18_memcpy_fromio(cx, &order_mb->request, &mb->request,
-                          2 * sizeof(u32));
+       for (i = 0; i < 2; i++)
+               (&order_mb->request)[i] = cx18_readl(cx, &mb->request + i);
 
        if (order_mb->request == order_mb->ack) {
                CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our "
index 068f78d..b4c99c7 100644 (file)
@@ -307,7 +307,7 @@ static int cx231xx_init_audio_isoc(struct cx231xx *dev)
                urb->context = dev;
                urb->pipe = usb_rcvisocpipe(dev->udev,
                                                dev->adev.end_point_addr);
-               urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
+               urb->transfer_flags = URB_ISO_ASAP;
                urb->transfer_buffer = dev->adev.transfer_buffer[i];
                urb->interval = 1;
                urb->complete = cx231xx_audio_isocirq;
@@ -368,7 +368,7 @@ static int cx231xx_init_audio_bulk(struct cx231xx *dev)
                urb->context = dev;
                urb->pipe = usb_rcvbulkpipe(dev->udev,
                                                dev->adev.end_point_addr);
-               urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
+               urb->transfer_flags = 0;
                urb->transfer_buffer = dev->adev.transfer_buffer[i];
                urb->complete = cx231xx_audio_bulkirq;
                urb->transfer_buffer_length = sb_size;
index 3d15314..ac7db52 100644 (file)
@@ -448,7 +448,7 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
                        return -ENOMEM;
                }
                dev->vbi_mode.bulk_ctl.urb[i] = urb;
-               urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
+               urb->transfer_flags = 0;
 
                dev->vbi_mode.bulk_ctl.transfer_buffer[i] =
                    kzalloc(sb_size, GFP_KERNEL);
index 13739e0..080e111 100644 (file)
@@ -127,22 +127,37 @@ struct cx23885_board cx23885_boards[] = {
        },
        [CX23885_BOARD_HAUPPAUGE_HVR1250] = {
                .name           = "Hauppauge WinTV-HVR1250",
+               .porta          = CX23885_ANALOG_VIDEO,
                .portc          = CX23885_MPEG_DVB,
+#ifdef MT2131_NO_ANALOG_SUPPORT_YET
+               .tuner_type     = TUNER_PHILIPS_TDA8290,
+               .tuner_addr     = 0x42, /* 0x84 >> 1 */
+               .tuner_bus      = 1,
+#endif
+               .force_bff      = 1,
                .input          = {{
+#ifdef MT2131_NO_ANALOG_SUPPORT_YET
                        .type   = CX23885_VMUX_TELEVISION,
-                       .vmux   = 0,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN5_CH2 |
+                                       CX25840_VIN2_CH1,
+                       .amux   = CX25840_AUDIO8,
                        .gpio0  = 0xff00,
                }, {
-                       .type   = CX23885_VMUX_DEBUG,
-                       .vmux   = 0,
-                       .gpio0  = 0xff01,
-               }, {
+#endif
                        .type   = CX23885_VMUX_COMPOSITE1,
-                       .vmux   = 1,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN4_CH2 |
+                                       CX25840_VIN6_CH1,
+                       .amux   = CX25840_AUDIO7,
                        .gpio0  = 0xff02,
                }, {
                        .type   = CX23885_VMUX_SVIDEO,
-                       .vmux   = 2,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN4_CH2 |
+                                       CX25840_VIN8_CH1 |
+                                       CX25840_SVIDEO_ON,
+                       .amux   = CX25840_AUDIO7,
                        .gpio0  = 0xff02,
                } },
        },
@@ -267,7 +282,55 @@ struct cx23885_board cx23885_boards[] = {
        },
        [CX23885_BOARD_HAUPPAUGE_HVR1255] = {
                .name           = "Hauppauge WinTV-HVR1255",
+               .porta          = CX23885_ANALOG_VIDEO,
+               .portc          = CX23885_MPEG_DVB,
+               .tuner_type     = TUNER_ABSENT,
+               .tuner_addr     = 0x42, /* 0x84 >> 1 */
+               .force_bff      = 1,
+               .input          = {{
+                       .type   = CX23885_VMUX_TELEVISION,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN5_CH2 |
+                                       CX25840_VIN2_CH1 |
+                                       CX25840_DIF_ON,
+                       .amux   = CX25840_AUDIO8,
+               }, {
+                       .type   = CX23885_VMUX_COMPOSITE1,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN4_CH2 |
+                                       CX25840_VIN6_CH1,
+                       .amux   = CX25840_AUDIO7,
+               }, {
+                       .type   = CX23885_VMUX_SVIDEO,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN4_CH2 |
+                                       CX25840_VIN8_CH1 |
+                                       CX25840_SVIDEO_ON,
+                       .amux   = CX25840_AUDIO7,
+               } },
+       },
+       [CX23885_BOARD_HAUPPAUGE_HVR1255_22111] = {
+               .name           = "Hauppauge WinTV-HVR1255",
+               .porta          = CX23885_ANALOG_VIDEO,
                .portc          = CX23885_MPEG_DVB,
+               .tuner_type     = TUNER_ABSENT,
+               .tuner_addr     = 0x42, /* 0x84 >> 1 */
+               .force_bff      = 1,
+               .input          = {{
+                       .type   = CX23885_VMUX_TELEVISION,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN5_CH2 |
+                                       CX25840_VIN2_CH1 |
+                                       CX25840_DIF_ON,
+                       .amux   = CX25840_AUDIO8,
+               }, {
+                       .type   = CX23885_VMUX_SVIDEO,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN4_CH2 |
+                                       CX25840_VIN8_CH1 |
+                                       CX25840_SVIDEO_ON,
+                       .amux   = CX25840_AUDIO7,
+               } },
        },
        [CX23885_BOARD_HAUPPAUGE_HVR1210] = {
                .name           = "Hauppauge WinTV-HVR1210",
@@ -624,7 +687,7 @@ struct cx23885_subid cx23885_subids[] = {
        }, {
                .subvendor = 0x0070,
                .subdevice = 0x2259,
-               .card      = CX23885_BOARD_HAUPPAUGE_HVR1255,
+               .card      = CX23885_BOARD_HAUPPAUGE_HVR1255_22111,
        }, {
                .subvendor = 0x0070,
                .subdevice = 0x2291,
@@ -900,7 +963,7 @@ int cx23885_tuner_callback(void *priv, int component, int command, int arg)
        struct cx23885_dev *dev = port->dev;
        u32 bitmask = 0;
 
-       if (command == XC2028_RESET_CLK)
+       if ((command == XC2028_RESET_CLK) || (command == XC2028_I2C_FLUSH))
                return 0;
 
        if (command != 0) {
@@ -1130,6 +1193,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
        case CX23885_BOARD_HAUPPAUGE_HVR1270:
        case CX23885_BOARD_HAUPPAUGE_HVR1275:
        case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
        case CX23885_BOARD_HAUPPAUGE_HVR1210:
                /* GPIO-5 RF Control: 0 = RF1 Terrestrial, 1 = RF2 Cable */
                /* GPIO-6 I2C Gate which can isolate the demod from the bus */
@@ -1267,6 +1331,7 @@ int cx23885_ir_init(struct cx23885_dev *dev)
        case CX23885_BOARD_HAUPPAUGE_HVR1400:
        case CX23885_BOARD_HAUPPAUGE_HVR1275:
        case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
        case CX23885_BOARD_HAUPPAUGE_HVR1210:
                /* FIXME: Implement me */
                break;
@@ -1424,6 +1489,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
        case CX23885_BOARD_HAUPPAUGE_HVR1270:
        case CX23885_BOARD_HAUPPAUGE_HVR1275:
        case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
        case CX23885_BOARD_HAUPPAUGE_HVR1210:
        case CX23885_BOARD_HAUPPAUGE_HVR1850:
        case CX23885_BOARD_HAUPPAUGE_HVR1290:
@@ -1511,6 +1577,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
        case CX23885_BOARD_HAUPPAUGE_HVR1270:
        case CX23885_BOARD_HAUPPAUGE_HVR1275:
        case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
        case CX23885_BOARD_HAUPPAUGE_HVR1210:
        case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
        case CX23885_BOARD_HAUPPAUGE_HVR1290:
@@ -1526,10 +1593,10 @@ void cx23885_card_setup(struct cx23885_dev *dev)
         */
        switch (dev->board) {
        case CX23885_BOARD_TEVII_S470:
-       case CX23885_BOARD_HAUPPAUGE_HVR1250:
                /* Currently only enabled for the integrated IR controller */
                if (!enable_885_ir)
                        break;
+       case CX23885_BOARD_HAUPPAUGE_HVR1250:
        case CX23885_BOARD_HAUPPAUGE_HVR1800:
        case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
        case CX23885_BOARD_HAUPPAUGE_HVR1700:
@@ -1539,6 +1606,8 @@ void cx23885_card_setup(struct cx23885_dev *dev)
        case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
        case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
        case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
        case CX23885_BOARD_HAUPPAUGE_HVR1270:
        case CX23885_BOARD_HAUPPAUGE_HVR1850:
        case CX23885_BOARD_MYGICA_X8506:
index a80a92c..cd54268 100644 (file)
@@ -712,6 +712,7 @@ static int dvb_register(struct cx23885_tsport *port)
                }
                break;
        case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
                i2c_bus = &dev->i2c_bus[0];
                fe0->dvb.frontend = dvb_attach(s5h1411_attach,
                                               &hcw_s5h1411_config,
@@ -721,6 +722,11 @@ static int dvb_register(struct cx23885_tsport *port)
                                   0x60, &dev->i2c_bus[1].i2c_adap,
                                   &hauppauge_tda18271_config);
                }
+
+               tda18271_attach(&dev->ts1.analog_fe,
+                       0x60, &dev->i2c_bus[1].i2c_adap,
+                       &hauppauge_tda18271_config);
+
                break;
        case CX23885_BOARD_HAUPPAUGE_HVR1800:
                i2c_bus = &dev->i2c_bus[0];
index c654bdc..22f8e7f 100644 (file)
@@ -505,6 +505,9 @@ static int cx23885_video_mux(struct cx23885_dev *dev, unsigned int input)
 
        if ((dev->board == CX23885_BOARD_HAUPPAUGE_HVR1800) ||
                (dev->board == CX23885_BOARD_MPX885) ||
+               (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1250) ||
+               (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255) ||
+               (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255_22111) ||
                (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850)) {
                /* Configure audio routing */
                v4l2_subdev_call(dev->sd_cx25840, audio, s_routing,
@@ -1578,7 +1581,9 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
 
        fe = vfe->dvb.frontend;
 
-       if (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850)
+       if ((dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850) ||
+           (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255) ||
+           (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255_22111))
                fe = &dev->ts1.analog_fe;
 
        if (fe && fe->ops.tuner_ops.set_analog_params) {
@@ -1608,6 +1613,8 @@ int cx23885_set_frequency(struct file *file, void *priv,
        int ret;
 
        switch (dev->board) {
+       case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
        case CX23885_BOARD_HAUPPAUGE_HVR1850:
                ret = cx23885_set_freq_via_ops(dev, f);
                break;
index d884784..13c37ec 100644 (file)
@@ -90,6 +90,7 @@
 #define CX23885_BOARD_MYGICA_X8507             33
 #define CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL 34
 #define CX23885_BOARD_TEVII_S471               35
+#define CX23885_BOARD_HAUPPAUGE_HVR1255_22111  36
 
 #define GPIO_0 0x00000001
 #define GPIO_1 0x00000002
index 83c1aa6..f11f6f0 100644 (file)
@@ -904,9 +904,6 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
        list_add_tail(&dev->devlist, &cx25821_devlist);
        mutex_unlock(&cx25821_devlist_mutex);
 
-       strcpy(cx25821_boards[UNKNOWN_BOARD].name, "unknown");
-       strcpy(cx25821_boards[CX25821_BOARD].name, "cx25821");
-
        if (dev->pci->device != 0x8210) {
                pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
                        __func__, dev->pci->device);
index b9aa801..029f293 100644 (file)
@@ -187,7 +187,7 @@ enum port {
 };
 
 struct cx25821_board {
-       char *name;
+       const char *name;
        enum port porta;
        enum port portb;
        enum port portc;
index fc1ff69..d8eac3e 100644 (file)
@@ -84,7 +84,7 @@ MODULE_PARM_DESC(debug, "Debugging messages [0=Off (default) 1=On]");
 
 
 /* ----------------------------------------------------------------------- */
-static void cx23885_std_setup(struct i2c_client *client);
+static void cx23888_std_setup(struct i2c_client *client);
 
 int cx25840_write(struct i2c_client *client, u16 addr, u8 value)
 {
@@ -638,10 +638,13 @@ static void cx23885_initialize(struct i2c_client *client)
        finish_wait(&state->fw_wait, &wait);
        destroy_workqueue(q);
 
-       /* Call the cx23885 specific std setup func, we no longer rely on
+       /* Call the cx23888 specific std setup func, we no longer rely on
         * the generic cx24840 func.
         */
-       cx23885_std_setup(client);
+       if (is_cx23888(state))
+               cx23888_std_setup(client);
+       else
+               cx25840_std_setup(client);
 
        /* (re)set input */
        set_input(client, state->vid_input, state->aud_input);
@@ -1103,9 +1106,23 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
 
                        cx25840_write4(client, 0x410, 0xffff0dbf);
                        cx25840_write4(client, 0x414, 0x00137d03);
-                       cx25840_write4(client, 0x418, 0x01008080);
+
+                       /* on the 887, 0x418 is HSCALE_CTRL, on the 888 it is 
+                          CHROMA_CTRL */
+                       if (is_cx23888(state))
+                               cx25840_write4(client, 0x418, 0x01008080);
+                       else
+                               cx25840_write4(client, 0x418, 0x01000000);
+
                        cx25840_write4(client, 0x41c, 0x00000000);
-                       cx25840_write4(client, 0x420, 0x001c3e0f);
+
+                       /* on the 887, 0x420 is CHROMA_CTRL, on the 888 it is 
+                          CRUSH_CTRL */
+                       if (is_cx23888(state))
+                               cx25840_write4(client, 0x420, 0x001c3e0f);
+                       else
+                               cx25840_write4(client, 0x420, 0x001c8282);
+
                        cx25840_write4(client, 0x42c, 0x42600000);
                        cx25840_write4(client, 0x430, 0x0000039b);
                        cx25840_write4(client, 0x438, 0x00000000);
@@ -1233,7 +1250,7 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
                cx25840_write4(client, 0x8d0, 0x1f063870);
        }
 
-       if (is_cx2388x(state)) {
+       if (is_cx23888(state)) {
                /* HVR1850 */
                /* AUD_IO_CTRL - I2S Input, Parallel1*/
                /*  - Channel 1 src - Parallel1 (Merlin out) */
@@ -1298,8 +1315,8 @@ static int set_v4lstd(struct i2c_client *client)
        }
        cx25840_and_or(client, 0x400, ~0xf, fmt);
        cx25840_and_or(client, 0x403, ~0x3, pal_m);
-       if (is_cx2388x(state))
-               cx23885_std_setup(client);
+       if (is_cx23888(state))
+               cx23888_std_setup(client);
        else
                cx25840_std_setup(client);
        if (!is_cx2583x(state))
@@ -1312,6 +1329,7 @@ static int set_v4lstd(struct i2c_client *client)
 static int cx25840_s_ctrl(struct v4l2_ctrl *ctrl)
 {
        struct v4l2_subdev *sd = to_sd(ctrl);
+       struct cx25840_state *state = to_state(sd);
        struct i2c_client *client = v4l2_get_subdevdata(sd);
 
        switch (ctrl->id) {
@@ -1324,12 +1342,20 @@ static int cx25840_s_ctrl(struct v4l2_ctrl *ctrl)
                break;
 
        case V4L2_CID_SATURATION:
-               cx25840_write(client, 0x420, ctrl->val << 1);
-               cx25840_write(client, 0x421, ctrl->val << 1);
+               if (is_cx23888(state)) {
+                       cx25840_write(client, 0x418, ctrl->val << 1);
+                       cx25840_write(client, 0x419, ctrl->val << 1);
+               } else {
+                       cx25840_write(client, 0x420, ctrl->val << 1);
+                       cx25840_write(client, 0x421, ctrl->val << 1);
+               }
                break;
 
        case V4L2_CID_HUE:
-               cx25840_write(client, 0x422, ctrl->val);
+               if (is_cx23888(state))
+                       cx25840_write(client, 0x41a, ctrl->val);
+               else
+                       cx25840_write(client, 0x422, ctrl->val);
                break;
 
        default:
@@ -1354,11 +1380,21 @@ static int cx25840_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt
        fmt->field = V4L2_FIELD_INTERLACED;
        fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
 
-       Vsrc = (cx25840_read(client, 0x476) & 0x3f) << 4;
-       Vsrc |= (cx25840_read(client, 0x475) & 0xf0) >> 4;
+       if (is_cx23888(state)) {
+               Vsrc = (cx25840_read(client, 0x42a) & 0x3f) << 4;
+               Vsrc |= (cx25840_read(client, 0x429) & 0xf0) >> 4;
+       } else {
+               Vsrc = (cx25840_read(client, 0x476) & 0x3f) << 4;
+               Vsrc |= (cx25840_read(client, 0x475) & 0xf0) >> 4;
+       }
 
-       Hsrc = (cx25840_read(client, 0x472) & 0x3f) << 4;
-       Hsrc |= (cx25840_read(client, 0x471) & 0xf0) >> 4;
+       if (is_cx23888(state)) {
+               Hsrc = (cx25840_read(client, 0x426) & 0x3f) << 4;
+               Hsrc |= (cx25840_read(client, 0x425) & 0xf0) >> 4;
+       } else {
+               Hsrc = (cx25840_read(client, 0x472) & 0x3f) << 4;
+               Hsrc |= (cx25840_read(client, 0x471) & 0xf0) >> 4;
+       }
 
        Vlines = fmt->height + (is_50Hz ? 4 : 7);
 
@@ -1782,8 +1818,8 @@ static int cx25840_s_video_routing(struct v4l2_subdev *sd,
        struct cx25840_state *state = to_state(sd);
        struct i2c_client *client = v4l2_get_subdevdata(sd);
 
-       if (is_cx2388x(state))
-               cx23885_std_setup(client);
+       if (is_cx23888(state))
+               cx23888_std_setup(client);
 
        return set_input(client, input, state->aud_input);
 }
@@ -1794,8 +1830,8 @@ static int cx25840_s_audio_routing(struct v4l2_subdev *sd,
        struct cx25840_state *state = to_state(sd);
        struct i2c_client *client = v4l2_get_subdevdata(sd);
 
-       if (is_cx2388x(state))
-               cx23885_std_setup(client);
+       if (is_cx23888(state))
+               cx23888_std_setup(client);
        return set_input(client, state->vid_input, input);
 }
 
@@ -4939,7 +4975,7 @@ void cx23885_dif_setup(struct i2c_client *client, u32 ifHz)
        }
 }
 
-static void cx23885_std_setup(struct i2c_client *client)
+static void cx23888_std_setup(struct i2c_client *client)
 {
        struct cx25840_state *state = to_state(i2c_get_clientdata(client));
        v4l2_std_id std = state->std;
index e46446a..ed7b2aa 100644 (file)
@@ -471,7 +471,7 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
        dprintk(1,"Loading firmware ...\n");
        dataptr = (u32*)firmware->data;
        for (i = 0; i < (firmware->size >> 2); i++) {
-               value = *dataptr;
+               value = le32_to_cpu(*dataptr);
                checksum += ~value;
                memory_write(dev->core, i, value);
                dataptr++;
index 20a7e24..862c657 100644 (file)
@@ -974,6 +974,7 @@ struct em28xx_board em28xx_boards[] = {
        [EM2884_BOARD_CINERGY_HTC_STICK] = {
                .name         = "Terratec Cinergy HTC Stick",
                .has_dvb      = 1,
+               .ir_codes     = RC_MAP_NEC_TERRATEC_CINERGY_XS,
 #if 0
                .tuner_type   = TUNER_PHILIPS_TDA8290,
                .tuner_addr   = 0x41,
@@ -2892,7 +2893,7 @@ static void request_module_async(struct work_struct *work)
 
        if (dev->board.has_dvb)
                request_module("em28xx-dvb");
-       if (dev->board.has_ir_i2c && !disable_ir)
+       if (dev->board.ir_codes && !disable_ir)
                request_module("em28xx-rc");
 }
 
index fce5f76..5e30c4f 100644 (file)
@@ -527,6 +527,8 @@ static int em28xx_ir_init(struct em28xx *dev)
 
        if (dev->board.ir_codes == NULL) {
                /* No remote control support */
+               em28xx_warn("Remote control support is not available for "
+                               "this card.\n");
                return 0;
        }
 
index 137166d..31721ea 100644 (file)
@@ -1653,7 +1653,7 @@ static int vidioc_streamoff(struct file *file, void *priv,
                                enum v4l2_buf_type buf_type)
 {
        struct gspca_dev *gspca_dev = video_drvdata(file);
-       int ret;
+       int i, ret;
 
        if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
                return -EINVAL;
@@ -1678,6 +1678,8 @@ static int vidioc_streamoff(struct file *file, void *priv,
        wake_up_interruptible(&gspca_dev->wq);
 
        /* empty the transfer queues */
+       for (i = 0; i < gspca_dev->nframes; i++)
+               gspca_dev->frame[i].v4l2_buf.flags &= ~BUF_ALL_FLAGS;
        atomic_set(&gspca_dev->fr_q, 0);
        atomic_set(&gspca_dev->fr_i, 0);
        gspca_dev->fr_o = 0;
index b5acb1e..80c81dd 100644 (file)
@@ -96,7 +96,7 @@ static void setbrightness(struct gspca_dev *gspca_dev);
 static void setcontrast(struct gspca_dev *gspca_dev);
 static void setgain(struct gspca_dev *gspca_dev);
 static void setexposure(struct gspca_dev *gspca_dev);
-static int sd_setagc(struct gspca_dev *gspca_dev, __s32 val);
+static void setagc(struct gspca_dev *gspca_dev);
 static void setawb(struct gspca_dev *gspca_dev);
 static void setaec(struct gspca_dev *gspca_dev);
 static void setsharpness(struct gspca_dev *gspca_dev);
@@ -189,7 +189,7 @@ static const struct ctrl sd_ctrls[] = {
                        .step    = 1,
                        .default_value = 1,
                },
-               .set = sd_setagc
+               .set_control = setagc
        },
 [AWB] = {
                {
@@ -851,6 +851,7 @@ static int sccb_check_status(struct gspca_dev *gspca_dev)
        int i;
 
        for (i = 0; i < 5; i++) {
+               msleep(10);
                data = ov534_reg_read(gspca_dev, OV534_REG_STATUS);
 
                switch (data) {
@@ -1242,10 +1243,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
 
        cam->ctrls = sd->ctrls;
 
-       /* the auto white balance control works only when auto gain is set */
-       if (sd_ctrls[AGC].qctrl.default_value == 0)
-               gspca_dev->ctrl_inac |= (1 << AWB);
-
        cam->cam_mode = ov772x_mode;
        cam->nmodes = ARRAY_SIZE(ov772x_mode);
 
@@ -1486,29 +1483,6 @@ scan_next:
        } while (remaining_len > 0);
 }
 
-static int sd_setagc(struct gspca_dev *gspca_dev, __s32 val)
-{
-       struct sd *sd = (struct sd *) gspca_dev;
-
-       sd->ctrls[AGC].val = val;
-
-       /* the auto white balance control works only
-        * when auto gain is set */
-       if (val) {
-               gspca_dev->ctrl_inac &= ~(1 << AWB);
-       } else {
-               gspca_dev->ctrl_inac |= (1 << AWB);
-               if (sd->ctrls[AWB].val) {
-                       sd->ctrls[AWB].val = 0;
-                       if (gspca_dev->streaming)
-                               setawb(gspca_dev);
-               }
-       }
-       if (gspca_dev->streaming)
-               setagc(gspca_dev);
-       return gspca_dev->usb_err;
-}
-
 static int sd_querymenu(struct gspca_dev *gspca_dev,
                struct v4l2_querymenu *menu)
 {
index b579730..1fd41f0 100644 (file)
@@ -1008,6 +1008,7 @@ static int sccb_check_status(struct gspca_dev *gspca_dev)
        int i;
 
        for (i = 0; i < 5; i++) {
+               msleep(10);
                data = reg_r(gspca_dev, OV534_REG_STATUS);
 
                switch (data) {
index 2cb7d95..115da16 100644 (file)
@@ -418,7 +418,7 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
        struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
 
        gspca_dev->vdev.ctrl_handler = hdl;
-       v4l2_ctrl_handler_init(hdl, 4);
+       v4l2_ctrl_handler_init(hdl, 5);
 
        sd->contrast = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
                                        V4L2_CID_CONTRAST, 0, 15, 1, 7);
index ad09820..b9c6f17 100644 (file)
@@ -1761,7 +1761,6 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
                        V4L2_CID_SATURATION, 0, 255, 1, 127);
        sd->hue = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
                        V4L2_CID_HUE, -180, 180, 1, 0);
-       v4l2_ctrl_cluster(4, &sd->brightness);
 
        sd->gamma = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
                        V4L2_CID_GAMMA, 0, 255, 1, 0x10);
@@ -1770,7 +1769,6 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
                        V4L2_CID_BLUE_BALANCE, 0, 127, 1, 0x28);
        sd->red = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
                        V4L2_CID_RED_BALANCE, 0, 127, 1, 0x28);
-       v4l2_ctrl_cluster(2, &sd->blue);
 
        if (sd->sensor != SENSOR_OV9655 && sd->sensor != SENSOR_SOI968 &&
            sd->sensor != SENSOR_OV7670 && sd->sensor != SENSOR_MT9M001 &&
@@ -1779,7 +1777,6 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
                        V4L2_CID_HFLIP, 0, 1, 1, 0);
                sd->vflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
                        V4L2_CID_VFLIP, 0, 1, 1, 0);
-               v4l2_ctrl_cluster(2, &sd->hflip);
        }
 
        if (sd->sensor != SENSOR_SOI968 && sd->sensor != SENSOR_MT9VPRB &&
@@ -1794,6 +1791,20 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
                        V4L2_CID_GAIN, 0, 28, 1, 0);
                sd->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
                        V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+       }
+
+       sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+                       V4L2_CID_JPEG_COMPRESSION_QUALITY, 50, 90, 1, 80);
+       if (hdl->error) {
+               pr_err("Could not initialize controls\n");
+               return hdl->error;
+       }
+
+       v4l2_ctrl_cluster(4, &sd->brightness);
+       v4l2_ctrl_cluster(2, &sd->blue);
+       if (sd->hflip)
+               v4l2_ctrl_cluster(2, &sd->hflip);
+       if (sd->autogain) {
                if (sd->sensor == SENSOR_SOI968)
                        /* this sensor doesn't have the exposure control and
                           autogain is clustered with gain instead. This works
@@ -1803,13 +1814,6 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
                        /* Otherwise autogain is clustered with exposure. */
                        v4l2_ctrl_auto_cluster(2, &sd->autogain, 0, false);
        }
-
-       sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
-                       V4L2_CID_JPEG_COMPRESSION_QUALITY, 50, 90, 1, 80);
-       if (hdl->error) {
-               pr_err("Could not initialize controls\n");
-               return hdl->error;
-       }
        return 0;
 }
 
@@ -2066,10 +2070,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
        set_gamma(gspca_dev, v4l2_ctrl_g_ctrl(sd->gamma));
        set_redblue(gspca_dev, v4l2_ctrl_g_ctrl(sd->blue),
                        v4l2_ctrl_g_ctrl(sd->red));
-       set_gain(gspca_dev, v4l2_ctrl_g_ctrl(sd->gain));
-       set_exposure(gspca_dev, v4l2_ctrl_g_ctrl(sd->exposure));
-       set_hvflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->hflip),
-                       v4l2_ctrl_g_ctrl(sd->vflip));
+       if (sd->gain)
+               set_gain(gspca_dev, v4l2_ctrl_g_ctrl(sd->gain));
+       if (sd->exposure)
+               set_exposure(gspca_dev, v4l2_ctrl_g_ctrl(sd->exposure));
+       if (sd->hflip)
+               set_hvflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->hflip),
+                               v4l2_ctrl_g_ctrl(sd->vflip));
 
        reg_w1(gspca_dev, 0x1007, 0x20);
        reg_w1(gspca_dev, 0x1061, 0x03);
@@ -2172,7 +2179,7 @@ static void sd_dqcallback(struct gspca_dev *gspca_dev)
        struct sd *sd = (struct sd *) gspca_dev;
        int avg_lum;
 
-       if (!v4l2_ctrl_g_ctrl(sd->autogain))
+       if (sd->autogain == NULL || !v4l2_ctrl_g_ctrl(sd->autogain))
                return;
 
        avg_lum = atomic_read(&sd->avg_lum);
index 4d1696d..f38faa9 100644 (file)
@@ -3120,7 +3120,7 @@ static const struct sd_desc sd_desc = {
                        | (SENSOR_ ## sensor << 8) \
                        | (flags)
 static const struct usb_device_id device_table[] = {
-       {USB_DEVICE(0x0458, 0x7025), BS(SN9C120, MI0360)},
+       {USB_DEVICE(0x0458, 0x7025), BSF(SN9C120, MI0360B, F_PDN_INV)},
        {USB_DEVICE(0x0458, 0x702e), BS(SN9C120, OV7660)},
        {USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, F_PDN_INV)},
        {USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, F_PDN_INV)},
index 057929e..5462ce2 100644 (file)
@@ -866,10 +866,10 @@ static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *pdev,
        pci_write_config_dword(pdev, 0x40, 0xffff);
 
        IVTV_DEBUG_INFO("%d (rev %d) at %02x:%02x.%x, "
-                  "irq: %d, latency: %d, memory: 0x%lx\n",
+                  "irq: %d, latency: %d, memory: 0x%llx\n",
                   pdev->device, pdev->revision, pdev->bus->number,
                   PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
-                  pdev->irq, pci_latency, (unsigned long)itv->base_addr);
+                  pdev->irq, pci_latency, (u64)itv->base_addr);
 
        return 0;
 }
@@ -1007,7 +1007,7 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
        itv->cxhdl.priv = itv;
        itv->cxhdl.func = ivtv_api_func;
 
-       IVTV_DEBUG_INFO("base addr: 0x%08x\n", itv->base_addr);
+       IVTV_DEBUG_INFO("base addr: 0x%llx\n", (u64)itv->base_addr);
 
        /* PCI Device Setup */
        retval = ivtv_setup_pci(itv, pdev, pci_id);
@@ -1017,8 +1017,8 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
                goto free_mem;
 
        /* map io memory */
-       IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
-                  itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE);
+       IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+                  (u64)itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE);
        itv->enc_mem = ioremap_nocache(itv->base_addr + IVTV_ENCODER_OFFSET,
                                       IVTV_ENCODER_SIZE);
        if (!itv->enc_mem) {
@@ -1034,8 +1034,8 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
        }
 
        if (itv->has_cx23415) {
-               IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
-                               itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
+               IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+                               (u64)itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
                itv->dec_mem = ioremap_nocache(itv->base_addr + IVTV_DECODER_OFFSET,
                                IVTV_DECODER_SIZE);
                if (!itv->dec_mem) {
@@ -1056,8 +1056,8 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
        }
 
        /* map registers memory */
-       IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
-                  itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
+       IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+                  (u64)itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
        itv->reg_mem =
            ioremap_nocache(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
        if (!itv->reg_mem) {
index 2e22002..a7e00f8 100644 (file)
@@ -622,7 +622,7 @@ struct ivtv {
        struct v4l2_subdev *sd_video;   /* controlling video decoder subdev */
        struct v4l2_subdev *sd_audio;   /* controlling audio subdev */
        struct v4l2_subdev *sd_muxer;   /* controlling audio muxer subdev */
-       u32 base_addr;                  /* PCI resource base address */
+       resource_size_t base_addr;      /* PCI resource base address */
        volatile void __iomem *enc_mem; /* pointer to mapped encoder memory */
        volatile void __iomem *dec_mem; /* pointer to mapped decoder memory */
        volatile void __iomem *reg_mem; /* pointer to mapped registers */
index d2dec58..3945556 100644 (file)
@@ -110,22 +110,6 @@ enum {
        V4L2_M2M_DST = 1,
 };
 
-/* Source and destination queue data */
-static struct m2mtest_q_data q_data[2];
-
-static struct m2mtest_q_data *get_q_data(enum v4l2_buf_type type)
-{
-       switch (type) {
-       case V4L2_BUF_TYPE_VIDEO_OUTPUT:
-               return &q_data[V4L2_M2M_SRC];
-       case V4L2_BUF_TYPE_VIDEO_CAPTURE:
-               return &q_data[V4L2_M2M_DST];
-       default:
-               BUG();
-       }
-       return NULL;
-}
-
 #define V4L2_CID_TRANS_TIME_MSEC       V4L2_CID_PRIVATE_BASE
 #define V4L2_CID_TRANS_NUM_BUFS                (V4L2_CID_PRIVATE_BASE + 1)
 
@@ -198,8 +182,26 @@ struct m2mtest_ctx {
        int                     aborting;
 
        struct v4l2_m2m_ctx     *m2m_ctx;
+
+       /* Source and destination queue data */
+       struct m2mtest_q_data   q_data[2];
 };
 
+static struct m2mtest_q_data *get_q_data(struct m2mtest_ctx *ctx,
+                                        enum v4l2_buf_type type)
+{
+       switch (type) {
+       case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+               return &ctx->q_data[V4L2_M2M_SRC];
+       case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+               return &ctx->q_data[V4L2_M2M_DST];
+       default:
+               BUG();
+       }
+       return NULL;
+}
+
+
 static struct v4l2_queryctrl *get_ctrl(int id)
 {
        int i;
@@ -223,7 +225,7 @@ static int device_process(struct m2mtest_ctx *ctx,
        int tile_w, bytes_left;
        int width, height, bytesperline;
 
-       q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_OUTPUT);
+       q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
 
        width   = q_data->width;
        height  = q_data->height;
@@ -436,7 +438,7 @@ static int vidioc_g_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
        if (!vq)
                return -EINVAL;
 
-       q_data = get_q_data(f->type);
+       q_data = get_q_data(ctx, f->type);
 
        f->fmt.pix.width        = q_data->width;
        f->fmt.pix.height       = q_data->height;
@@ -535,7 +537,7 @@ static int vidioc_s_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
        if (!vq)
                return -EINVAL;
 
-       q_data = get_q_data(f->type);
+       q_data = get_q_data(ctx, f->type);
        if (!q_data)
                return -EINVAL;
 
@@ -747,7 +749,7 @@ static int m2mtest_queue_setup(struct vb2_queue *vq,
        struct m2mtest_q_data *q_data;
        unsigned int size, count = *nbuffers;
 
-       q_data = get_q_data(vq->type);
+       q_data = get_q_data(ctx, vq->type);
 
        size = q_data->width * q_data->height * q_data->fmt->depth >> 3;
 
@@ -775,7 +777,7 @@ static int m2mtest_buf_prepare(struct vb2_buffer *vb)
 
        dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
 
-       q_data = get_q_data(vb->vb2_queue->type);
+       q_data = get_q_data(ctx, vb->vb2_queue->type);
 
        if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
                dprintk(ctx->dev, "%s data will not fit into plane (%lu < %lu)\n",
@@ -860,6 +862,9 @@ static int m2mtest_open(struct file *file)
        ctx->transtime = MEM2MEM_DEF_TRANSTIME;
        ctx->num_processed = 0;
 
+       ctx->q_data[V4L2_M2M_SRC].fmt = &formats[0];
+       ctx->q_data[V4L2_M2M_DST].fmt = &formats[0];
+
        ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
 
        if (IS_ERR(ctx->m2m_ctx)) {
@@ -986,9 +991,6 @@ static int m2mtest_probe(struct platform_device *pdev)
                goto err_m2m;
        }
 
-       q_data[V4L2_M2M_SRC].fmt = &formats[0];
-       q_data[V4L2_M2M_DST].fmt = &formats[0];
-
        return 0;
 
        v4l2_m2m_release(dev->m2m_dev);
index ded26b7..637bde8 100644 (file)
@@ -83,6 +83,7 @@
 #define CSICR1_INV_DATA                (1 << 3)
 #define CSICR1_INV_PCLK                (1 << 2)
 #define CSICR1_REDGE           (1 << 1)
+#define CSICR1_FMT_MASK                (CSICR1_PACK_DIR | CSICR1_SWAP16_EN)
 
 #define SHIFT_STATFF_LEVEL     22
 #define SHIFT_RXFF_LEVEL       19
@@ -230,6 +231,7 @@ struct mx2_prp_cfg {
        u32 src_pixel;
        u32 ch1_pixel;
        u32 irq_flags;
+       u32 csicr1;
 };
 
 /* prp resizing parameters */
@@ -330,6 +332,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
                        .ch1_pixel      = 0x2ca00565, /* RGB565 */
                        .irq_flags      = PRP_INTR_RDERR | PRP_INTR_CH1WERR |
                                                PRP_INTR_CH1FC | PRP_INTR_LBOVF,
+                       .csicr1         = 0,
                }
        },
        {
@@ -343,6 +346,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
                        .irq_flags      = PRP_INTR_RDERR | PRP_INTR_CH2WERR |
                                        PRP_INTR_CH2FC | PRP_INTR_LBOVF |
                                        PRP_INTR_CH2OVF,
+                       .csicr1         = CSICR1_PACK_DIR,
                }
        },
        {
@@ -356,6 +360,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
                        .irq_flags      = PRP_INTR_RDERR | PRP_INTR_CH2WERR |
                                        PRP_INTR_CH2FC | PRP_INTR_LBOVF |
                                        PRP_INTR_CH2OVF,
+                       .csicr1         = CSICR1_SWAP16_EN,
                }
        },
 };
@@ -984,7 +989,6 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
        struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
        struct mx2_camera_dev *pcdev = ici->priv;
        struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
-       const struct soc_camera_format_xlate *xlate;
        unsigned long common_flags;
        int ret;
        int bytesperline;
@@ -1029,24 +1033,7 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
                return ret;
        }
 
-       xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
-       if (!xlate) {
-               dev_warn(icd->parent, "Format %x not found\n", pixfmt);
-               return -EINVAL;
-       }
-
-       if (xlate->code == V4L2_MBUS_FMT_YUYV8_2X8) {
-               csicr1 |= CSICR1_PACK_DIR;
-               csicr1 &= ~CSICR1_SWAP16_EN;
-               dev_dbg(icd->parent, "already yuyv format, don't convert\n");
-       } else if (xlate->code == V4L2_MBUS_FMT_UYVY8_2X8) {
-               csicr1 &= ~CSICR1_PACK_DIR;
-               csicr1 |= CSICR1_SWAP16_EN;
-               dev_dbg(icd->parent, "convert uyvy mbus format into yuyv\n");
-       } else {
-               dev_warn(icd->parent, "mbus format not supported\n");
-               return -EINVAL;
-       }
+       csicr1 = (csicr1 & ~CSICR1_FMT_MASK) | pcdev->emma_prp->cfg.csicr1;
 
        if (common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
                csicr1 |= CSICR1_REDGE;
@@ -1155,18 +1142,6 @@ static int mx2_camera_get_formats(struct soc_camera_device *icd,
                }
        }
 
-       if (code == V4L2_MBUS_FMT_UYVY8_2X8) {
-               formats++;
-               if (xlate) {
-                       xlate->host_fmt =
-                               soc_mbus_get_fmtdesc(V4L2_MBUS_FMT_YUYV8_2X8);
-                       xlate->code     = code;
-                       dev_dbg(dev, "Providing host format %s for sensor code %d\n",
-                               xlate->host_fmt->name, code);
-                       xlate++;
-               }
-       }
-
        /* Generic pass-trough */
        formats++;
        if (xlate) {
index 8a4935e..dd91da2 100644 (file)
@@ -888,12 +888,12 @@ static const struct preview_update update_attrs[] = {
                preview_config_contrast,
                NULL,
                offsetof(struct prev_params, contrast),
-               0, true,
+               0, 0, true,
        }, /* OMAP3ISP_PREV_BRIGHTNESS */ {
                preview_config_brightness,
                NULL,
                offsetof(struct prev_params, brightness),
-               0, true,
+               0, 0, true,
        },
 };
 
@@ -1102,7 +1102,7 @@ static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
        unsigned int elv = prev->crop.top + prev->crop.height - 1;
        u32 features;
 
-       if (format->code == V4L2_MBUS_FMT_Y10_1X10) {
+       if (format->code != V4L2_MBUS_FMT_Y10_1X10) {
                sph -= 2;
                eph += 2;
                slv -= 2;
index af2d908..b4c679b 100644 (file)
 #include <linux/fs.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
+#include <linux/slab.h>
 #include <linux/ioport.h>
 #include <linux/init.h>
 #include <linux/mutex.h>
+#include <linux/slab.h>
 #include <linux/uaccess.h>
 #include <linux/isa.h>
 #include <asm/io.h>
index 3545745..725812a 100644 (file)
@@ -350,7 +350,8 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
                if (pixm)
                        sizes[i] = max(size, pixm->plane_fmt[i].sizeimage);
                else
-                       sizes[i] = size;
+                       sizes[i] = max_t(u32, size, frame->payload[i]);
+
                allocators[i] = ctx->fimc_dev->alloc_ctx;
        }
 
@@ -479,37 +480,39 @@ static int fimc_capture_set_default_format(struct fimc_dev *fimc);
 static int fimc_capture_open(struct file *file)
 {
        struct fimc_dev *fimc = video_drvdata(file);
-       int ret = v4l2_fh_open(file);
-
-       if (ret)
-               return ret;
+       int ret;
 
        dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
 
-       /* Return if the corresponding video mem2mem node is already opened. */
        if (fimc_m2m_active(fimc))
                return -EBUSY;
 
        set_bit(ST_CAPT_BUSY, &fimc->state);
-       pm_runtime_get_sync(&fimc->pdev->dev);
+       ret = pm_runtime_get_sync(&fimc->pdev->dev);
+       if (ret < 0)
+               return ret;
 
-       if (++fimc->vid_cap.refcnt == 1) {
-               ret = fimc_pipeline_initialize(&fimc->pipeline,
-                              &fimc->vid_cap.vfd->entity, true);
-               if (ret < 0) {
-                       dev_err(&fimc->pdev->dev,
-                               "Video pipeline initialization failed\n");
-                       pm_runtime_put_sync(&fimc->pdev->dev);
-                       fimc->vid_cap.refcnt--;
-                       v4l2_fh_release(file);
-                       clear_bit(ST_CAPT_BUSY, &fimc->state);
-                       return ret;
-               }
-               ret = fimc_capture_ctrls_create(fimc);
+       ret = v4l2_fh_open(file);
+       if (ret)
+               return ret;
 
-               if (!ret && !fimc->vid_cap.user_subdev_api)
-                       ret = fimc_capture_set_default_format(fimc);
+       if (++fimc->vid_cap.refcnt != 1)
+               return 0;
+
+       ret = fimc_pipeline_initialize(&fimc->pipeline,
+                                      &fimc->vid_cap.vfd->entity, true);
+       if (ret < 0) {
+               clear_bit(ST_CAPT_BUSY, &fimc->state);
+               pm_runtime_put_sync(&fimc->pdev->dev);
+               fimc->vid_cap.refcnt--;
+               v4l2_fh_release(file);
+               return ret;
        }
+       ret = fimc_capture_ctrls_create(fimc);
+
+       if (!ret && !fimc->vid_cap.user_subdev_api)
+               ret = fimc_capture_set_default_format(fimc);
+
        return ret;
 }
 
@@ -818,9 +821,6 @@ static int fimc_cap_g_fmt_mplane(struct file *file, void *fh,
        struct fimc_dev *fimc = video_drvdata(file);
        struct fimc_ctx *ctx = fimc->vid_cap.ctx;
 
-       if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-               return -EINVAL;
-
        return fimc_fill_format(&ctx->d_frame, f);
 }
 
@@ -833,9 +833,6 @@ static int fimc_cap_try_fmt_mplane(struct file *file, void *fh,
        struct v4l2_mbus_framefmt mf;
        struct fimc_fmt *ffmt = NULL;
 
-       if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-               return -EINVAL;
-
        if (pix->pixelformat == V4L2_PIX_FMT_JPEG) {
                fimc_capture_try_format(ctx, &pix->width, &pix->height,
                                        NULL, &pix->pixelformat,
@@ -887,8 +884,6 @@ static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
        struct fimc_fmt *s_fmt = NULL;
        int ret, i;
 
-       if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-               return -EINVAL;
        if (vb2_is_busy(&fimc->vid_cap.vbq))
                return -EBUSY;
 
@@ -924,10 +919,10 @@ static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
                pix->width  = mf->width;
                pix->height = mf->height;
        }
+
        fimc_adjust_mplane_format(ff->fmt, pix->width, pix->height, pix);
        for (i = 0; i < ff->fmt->colplanes; i++)
-               ff->payload[i] =
-                       (pix->width * pix->height * ff->fmt->depth[i]) / 8;
+               ff->payload[i] = pix->plane_fmt[i].sizeimage;
 
        set_frame_bounds(ff, pix->width, pix->height);
        /* Reset the composition rectangle if not yet configured */
@@ -1045,18 +1040,22 @@ static int fimc_cap_streamon(struct file *file, void *priv,
 {
        struct fimc_dev *fimc = video_drvdata(file);
        struct fimc_pipeline *p = &fimc->pipeline;
+       struct v4l2_subdev *sd = p->subdevs[IDX_SENSOR];
        int ret;
 
        if (fimc_capture_active(fimc))
                return -EBUSY;
 
-       media_entity_pipeline_start(&p->subdevs[IDX_SENSOR]->entity,
-                                   p->m_pipeline);
+       ret = media_entity_pipeline_start(&sd->entity, p->m_pipeline);
+       if (ret < 0)
+               return ret;
 
        if (fimc->vid_cap.user_subdev_api) {
                ret = fimc_pipeline_validate(fimc);
-               if (ret)
+               if (ret < 0) {
+                       media_entity_pipeline_stop(&sd->entity);
                        return ret;
+               }
        }
        return vb2_streamon(&fimc->vid_cap.vbq, type);
 }
index fedcd56..a4646ca 100644 (file)
@@ -153,7 +153,7 @@ static struct fimc_fmt fimc_formats[] = {
                .colplanes      = 2,
                .flags          = FMT_FLAGS_M2M,
        }, {
-               .name           = "YUV 4:2:0 non-contiguous 2-planar, Y/CbCr",
+               .name           = "YUV 4:2:0 non-contig. 2p, Y/CbCr",
                .fourcc         = V4L2_PIX_FMT_NV12M,
                .color          = FIMC_FMT_YCBCR420,
                .depth          = { 8, 4 },
@@ -161,7 +161,7 @@ static struct fimc_fmt fimc_formats[] = {
                .colplanes      = 2,
                .flags          = FMT_FLAGS_M2M,
        }, {
-               .name           = "YUV 4:2:0 non-contiguous 3-planar, Y/Cb/Cr",
+               .name           = "YUV 4:2:0 non-contig. 3p, Y/Cb/Cr",
                .fourcc         = V4L2_PIX_FMT_YUV420M,
                .color          = FIMC_FMT_YCBCR420,
                .depth          = { 8, 2, 2 },
@@ -169,7 +169,7 @@ static struct fimc_fmt fimc_formats[] = {
                .colplanes      = 3,
                .flags          = FMT_FLAGS_M2M,
        }, {
-               .name           = "YUV 4:2:0 non-contiguous 2-planar, Y/CbCr, tiled",
+               .name           = "YUV 4:2:0 non-contig. 2p, tiled",
                .fourcc         = V4L2_PIX_FMT_NV12MT,
                .color          = FIMC_FMT_YCBCR420,
                .depth          = { 8, 4 },
@@ -615,7 +615,7 @@ int fimc_ctrls_create(struct fimc_ctx *ctx)
        ctx->effect.type = FIMC_REG_CIIMGEFF_FIN_BYPASS;
 
        if (!handler->error) {
-               v4l2_ctrl_cluster(3, &ctrls->colorfx);
+               v4l2_ctrl_cluster(2, &ctrls->colorfx);
                ctrls->ready = true;
        }
 
@@ -641,7 +641,7 @@ void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active)
        if (!ctrls->ready)
                return;
 
-       mutex_lock(&ctrls->handler.lock);
+       mutex_lock(ctrls->handler.lock);
        v4l2_ctrl_activate(ctrls->rotate, active);
        v4l2_ctrl_activate(ctrls->hflip, active);
        v4l2_ctrl_activate(ctrls->vflip, active);
@@ -660,7 +660,7 @@ void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active)
                ctx->hflip    = 0;
                ctx->vflip    = 0;
        }
-       mutex_unlock(&ctrls->handler.lock);
+       mutex_unlock(ctrls->handler.lock);
 }
 
 /* Update maximum value of the alpha color control */
@@ -741,8 +741,8 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
        pix->width = width;
 
        for (i = 0; i < pix->num_planes; ++i) {
-               u32 bpl = pix->plane_fmt[i].bytesperline;
-               u32 *sizeimage = &pix->plane_fmt[i].sizeimage;
+               struct v4l2_plane_pix_format *plane_fmt = &pix->plane_fmt[i];
+               u32 bpl = plane_fmt->bytesperline;
 
                if (fmt->colplanes > 1 && (bpl == 0 || bpl < pix->width))
                        bpl = pix->width; /* Planar */
@@ -754,8 +754,9 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
                if (i == 0) /* Same bytesperline for each plane. */
                        bytesperline = bpl;
 
-               pix->plane_fmt[i].bytesperline = bytesperline;
-               *sizeimage = (pix->width * pix->height * fmt->depth[i]) / 8;
+               plane_fmt->bytesperline = bytesperline;
+               plane_fmt->sizeimage = max((pix->width * pix->height *
+                                  fmt->depth[i]) / 8, plane_fmt->sizeimage);
        }
 }
 
index 400d701..74ff310 100644 (file)
@@ -451,34 +451,44 @@ static void fimc_lite_clear_event_counters(struct fimc_lite *fimc)
 static int fimc_lite_open(struct file *file)
 {
        struct fimc_lite *fimc = video_drvdata(file);
-       int ret = v4l2_fh_open(file);
+       int ret;
 
-       if (ret)
-               return ret;
+       if (mutex_lock_interruptible(&fimc->lock))
+               return -ERESTARTSYS;
 
        set_bit(ST_FLITE_IN_USE, &fimc->state);
-       pm_runtime_get_sync(&fimc->pdev->dev);
+       ret = pm_runtime_get_sync(&fimc->pdev->dev);
+       if (ret < 0)
+               goto done;
 
-       if (++fimc->ref_count != 1 || fimc->out_path != FIMC_IO_DMA)
-               return ret;
+       ret = v4l2_fh_open(file);
+       if (ret < 0)
+               goto done;
 
-       ret = fimc_pipeline_initialize(&fimc->pipeline, &fimc->vfd->entity,
-                                      true);
-       if (ret < 0) {
-               v4l2_err(fimc->vfd, "Video pipeline initialization failed\n");
-               pm_runtime_put_sync(&fimc->pdev->dev);
-               fimc->ref_count--;
-               v4l2_fh_release(file);
-               clear_bit(ST_FLITE_IN_USE, &fimc->state);
-       }
+       if (++fimc->ref_count == 1 && fimc->out_path == FIMC_IO_DMA) {
+               ret = fimc_pipeline_initialize(&fimc->pipeline,
+                                              &fimc->vfd->entity, true);
+               if (ret < 0) {
+                       pm_runtime_put_sync(&fimc->pdev->dev);
+                       fimc->ref_count--;
+                       v4l2_fh_release(file);
+                       clear_bit(ST_FLITE_IN_USE, &fimc->state);
+               }
 
-       fimc_lite_clear_event_counters(fimc);
+               fimc_lite_clear_event_counters(fimc);
+       }
+done:
+       mutex_unlock(&fimc->lock);
        return ret;
 }
 
 static int fimc_lite_close(struct file *file)
 {
        struct fimc_lite *fimc = video_drvdata(file);
+       int ret;
+
+       if (mutex_lock_interruptible(&fimc->lock))
+               return -ERESTARTSYS;
 
        if (--fimc->ref_count == 0 && fimc->out_path == FIMC_IO_DMA) {
                clear_bit(ST_FLITE_IN_USE, &fimc->state);
@@ -492,20 +502,39 @@ static int fimc_lite_close(struct file *file)
        if (fimc->ref_count == 0)
                vb2_queue_release(&fimc->vb_queue);
 
-       return v4l2_fh_release(file);
+       ret = v4l2_fh_release(file);
+
+       mutex_unlock(&fimc->lock);
+       return ret;
 }
 
 static unsigned int fimc_lite_poll(struct file *file,
                                   struct poll_table_struct *wait)
 {
        struct fimc_lite *fimc = video_drvdata(file);
-       return vb2_poll(&fimc->vb_queue, file, wait);
+       int ret;
+
+       if (mutex_lock_interruptible(&fimc->lock))
+               return POLL_ERR;
+
+       ret = vb2_poll(&fimc->vb_queue, file, wait);
+       mutex_unlock(&fimc->lock);
+
+       return ret;
 }
 
 static int fimc_lite_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct fimc_lite *fimc = video_drvdata(file);
-       return vb2_mmap(&fimc->vb_queue, vma);
+       int ret;
+
+       if (mutex_lock_interruptible(&fimc->lock))
+               return -ERESTARTSYS;
+
+       ret = vb2_mmap(&fimc->vb_queue, vma);
+       mutex_unlock(&fimc->lock);
+
+       return ret;
 }
 
 static const struct v4l2_file_operations fimc_lite_fops = {
@@ -762,7 +791,9 @@ static int fimc_lite_streamon(struct file *file, void *priv,
        if (fimc_lite_active(fimc))
                return -EBUSY;
 
-       media_entity_pipeline_start(&sensor->entity, p->m_pipeline);
+       ret = media_entity_pipeline_start(&sensor->entity, p->m_pipeline);
+       if (ret < 0)
+               return ret;
 
        ret = fimc_pipeline_validate(fimc);
        if (ret) {
@@ -1508,7 +1539,7 @@ static int fimc_lite_suspend(struct device *dev)
                return 0;
 
        ret = fimc_lite_stop_capture(fimc, suspend);
-       if (ret)
+       if (ret < 0 || !fimc_lite_active(fimc))
                return ret;
 
        return fimc_pipeline_shutdown(&fimc->pipeline);
index 6753c45..52cef48 100644 (file)
@@ -193,9 +193,13 @@ int __fimc_pipeline_shutdown(struct fimc_pipeline *p)
 
 int fimc_pipeline_shutdown(struct fimc_pipeline *p)
 {
-       struct media_entity *me = &p->subdevs[IDX_SENSOR]->entity;
+       struct media_entity *me;
        int ret;
 
+       if (!p || !p->subdevs[IDX_SENSOR])
+               return -EINVAL;
+
+       me = &p->subdevs[IDX_SENSOR]->entity;
        mutex_lock(&me->parent->graph_mutex);
        ret = __fimc_pipeline_shutdown(p);
        mutex_unlock(&me->parent->graph_mutex);
@@ -498,12 +502,12 @@ static void fimc_md_unregister_entities(struct fimc_md *fmd)
  * @source: the source entity to create links to all fimc entities from
  * @sensor: sensor subdev linked to FIMC[fimc_id] entity, may be null
  * @pad: the source entity pad index
- * @fimc_id: index of the fimc device for which link should be enabled
+ * @link_mask: bitmask of the fimc devices for which link should be enabled
  */
 static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
                                            struct media_entity *source,
                                            struct v4l2_subdev *sensor,
-                                           int pad, int fimc_id)
+                                           int pad, int link_mask)
 {
        struct fimc_sensor_info *s_info;
        struct media_entity *sink;
@@ -520,7 +524,7 @@ static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
                if (!fmd->fimc[i]->variant->has_cam_if)
                        continue;
 
-               flags = (i == fimc_id) ? MEDIA_LNK_FL_ENABLED : 0;
+               flags = ((1 << i) & link_mask) ? MEDIA_LNK_FL_ENABLED : 0;
 
                sink = &fmd->fimc[i]->vid_cap.subdev.entity;
                ret = media_entity_create_link(source, pad, sink,
@@ -552,7 +556,10 @@ static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
                if (!fmd->fimc_lite[i])
                        continue;
 
-               flags = (i == fimc_id) ? MEDIA_LNK_FL_ENABLED : 0;
+               if (link_mask & (1 << (i + FIMC_MAX_DEVS)))
+                       flags = MEDIA_LNK_FL_ENABLED;
+               else
+                       flags = 0;
 
                sink = &fmd->fimc_lite[i]->subdev.entity;
                ret = media_entity_create_link(source, pad, sink,
@@ -614,9 +621,8 @@ static int fimc_md_create_links(struct fimc_md *fmd)
        struct s5p_fimc_isp_info *pdata;
        struct fimc_sensor_info *s_info;
        struct media_entity *source, *sink;
-       int i, pad, fimc_id = 0;
-       int ret = 0;
-       u32 flags;
+       int i, pad, fimc_id = 0, ret = 0;
+       u32 flags, link_mask = 0;
 
        for (i = 0; i < fmd->num_sensors; i++) {
                if (fmd->sensor[i].subdev == NULL)
@@ -668,19 +674,20 @@ static int fimc_md_create_links(struct fimc_md *fmd)
                if (source == NULL)
                        continue;
 
+               link_mask = 1 << fimc_id++;
                ret = __fimc_md_create_fimc_sink_links(fmd, source, sensor,
-                                                      pad, fimc_id++);
+                                                      pad, link_mask);
        }
 
-       fimc_id = 0;
        for (i = 0; i < ARRAY_SIZE(fmd->csis); i++) {
                if (fmd->csis[i].sd == NULL)
                        continue;
                source = &fmd->csis[i].sd->entity;
                pad = CSIS_PAD_SOURCE;
 
+               link_mask = 1 << fimc_id++;
                ret = __fimc_md_create_fimc_sink_links(fmd, source, NULL,
-                                                      pad, fimc_id++);
+                                                      pad, link_mask);
        }
 
        /* Create immutable links between each FIMC's subdev and video node */
@@ -734,8 +741,8 @@ static void fimc_md_put_clocks(struct fimc_md *fmd)
 }
 
 static int __fimc_md_set_camclk(struct fimc_md *fmd,
-                                        struct fimc_sensor_info *s_info,
-                                        bool on)
+                               struct fimc_sensor_info *s_info,
+                               bool on)
 {
        struct s5p_fimc_isp_info *pdata = s_info->pdata;
        struct fimc_camclk_info *camclk;
@@ -744,12 +751,10 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
        if (WARN_ON(pdata->clk_id >= FIMC_MAX_CAMCLKS) || fmd == NULL)
                return -EINVAL;
 
-       if (s_info->clk_on == on)
-               return 0;
        camclk = &fmd->camclk[pdata->clk_id];
 
-       dbg("camclk %d, f: %lu, clk: %p, on: %d",
-           pdata->clk_id, pdata->clk_frequency, camclk, on);
+       dbg("camclk %d, f: %lu, use_count: %d, on: %d",
+           pdata->clk_id, pdata->clk_frequency, camclk->use_count, on);
 
        if (on) {
                if (camclk->use_count > 0 &&
@@ -760,11 +765,9 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
                        clk_set_rate(camclk->clock, pdata->clk_frequency);
                        camclk->frequency = pdata->clk_frequency;
                        ret = clk_enable(camclk->clock);
+                       dbg("Enabled camclk %d: f: %lu", pdata->clk_id,
+                           clk_get_rate(camclk->clock));
                }
-               s_info->clk_on = 1;
-               dbg("Enabled camclk %d: f: %lu", pdata->clk_id,
-                   clk_get_rate(camclk->clock));
-
                return ret;
        }
 
@@ -773,7 +776,6 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
 
        if (--camclk->use_count == 0) {
                clk_disable(camclk->clock);
-               s_info->clk_on = 0;
                dbg("Disabled camclk %d", pdata->clk_id);
        }
        return ret;
@@ -789,8 +791,6 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
  * devices to which sensors can be attached, either directly or through
  * the MIPI CSI receiver. The clock is allowed here to be used by
  * multiple sensors concurrently if they use same frequency.
- * The per sensor subdev clk_on attribute helps to synchronize accesses
- * to the sclk_cam clocks from the video and media device nodes.
  * This function should only be called when the graph mutex is held.
  */
 int fimc_md_set_camclk(struct v4l2_subdev *sd, bool on)
index 3b8a349..1f5dbaf 100644 (file)
@@ -47,7 +47,6 @@ struct fimc_camclk_info {
  * @pdata: sensor's atrributes passed as media device's platform data
  * @subdev: image sensor v4l2 subdev
  * @host: fimc device the sensor is currently linked to
- * @clk_on: sclk_cam clock's state associated with this subdev
  *
  * This data structure applies to image sensor and the writeback subdevs.
  */
@@ -55,7 +54,6 @@ struct fimc_sensor_info {
        struct s5p_fimc_isp_info *pdata;
        struct v4l2_subdev *subdev;
        struct fimc_dev *host;
-       bool clk_on;
 };
 
 /**
index 053a8a8..a19bece 100644 (file)
                                                                decoded pic */
 #define S5P_FIMV_SI_DISPLAY_Y_ADR      0x2010 /* luma addr of displayed pic */
 #define S5P_FIMV_SI_DISPLAY_C_ADR      0x2014 /* chroma addrof displayed pic */
+
 #define S5P_FIMV_SI_CONSUMED_BYTES     0x2018 /* Consumed number of bytes to
                                                        decode a frame */
 #define S5P_FIMV_SI_DISPLAY_STATUS     0x201c /* status of decoded picture */
 
+#define S5P_FIMV_SI_DECODE_Y_ADR       0x2024 /* luma addr of decoded pic */
+#define S5P_FIMV_SI_DECODE_C_ADR       0x2028 /* chroma addrof decoded pic */
+#define S5P_FIMV_SI_DECODE_STATUS      0x202c /* status of decoded picture */
+
 #define S5P_FIMV_SI_CH0_SB_ST_ADR      0x2044 /* start addr of stream buf */
 #define S5P_FIMV_SI_CH0_SB_FRM_SIZE    0x2048 /* size of stream buf */
 #define S5P_FIMV_SI_CH0_DESC_ADR       0x204c /* addr of descriptor buf */
index c25ec02..feea867 100644 (file)
@@ -627,13 +627,13 @@ static int s5p_mfc_dec_s_ctrl(struct v4l2_ctrl *ctrl)
 
        switch (ctrl->id) {
        case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY:
-               ctx->loop_filter_mpeg4 = ctrl->val;
+               ctx->display_delay = ctrl->val;
                break;
        case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE:
                ctx->display_delay_enable = ctrl->val;
                break;
        case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
-               ctx->display_delay = ctrl->val;
+               ctx->loop_filter_mpeg4 = ctrl->val;
                break;
        case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:
                ctx->slice_interface = ctrl->val;
@@ -996,6 +996,7 @@ int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx)
 
        for (i = 0; i < NUM_CTRLS; i++) {
                if (IS_MFC51_PRIV(controls[i].id)) {
+                       memset(&cfg, 0, sizeof(struct v4l2_ctrl_config));
                        cfg.ops = &s5p_mfc_dec_ctrl_ops;
                        cfg.id = controls[i].id;
                        cfg.min = controls[i].minimum;
index acedb20..158b789 100644 (file)
@@ -243,12 +243,6 @@ static struct mfc_control controls[] = {
                .minimum = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
                .maximum = V4L2_MPEG_VIDEO_H264_LEVEL_4_0,
                .default_value = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
-               .menu_skip_mask = ~(
-                               (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
-                               (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_2) |
-                               (1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_0) |
-                               (1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_1)
-                               ),
        },
        {
                .id = V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
@@ -494,7 +488,7 @@ static struct mfc_control controls[] = {
                .type = V4L2_CTRL_TYPE_MENU,
                .minimum = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED,
                .maximum = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED,
-               .default_value = 0,
+               .default_value = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED,
                .menu_skip_mask = 0,
        },
        {
@@ -534,7 +528,7 @@ static struct mfc_control controls[] = {
                .type = V4L2_CTRL_TYPE_MENU,
                .minimum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
                .maximum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE,
-               .default_value = 0,
+               .default_value = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
                .menu_skip_mask = 0,
        },
        {
@@ -907,6 +901,8 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
                        mfc_err("failed to try output format\n");
                        return -EINVAL;
                }
+               v4l_bound_align_image(&pix_fmt_mp->width, 8, 1920, 1,
+                       &pix_fmt_mp->height, 4, 1080, 1, 0);
        } else {
                mfc_err("invalid buf type\n");
                return -EINVAL;
@@ -1777,6 +1773,7 @@ int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx)
        }
        for (i = 0; i < NUM_CTRLS; i++) {
                if (IS_MFC51_PRIV(controls[i].id)) {
+                       memset(&cfg, 0, sizeof(struct v4l2_ctrl_config));
                        cfg.ops = &s5p_mfc_enc_ctrl_ops;
                        cfg.id = controls[i].id;
                        cfg.min = controls[i].minimum;
index db83836..5932d1c 100644 (file)
@@ -57,10 +57,12 @@ void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq);
                                        S5P_FIMV_SI_DISPLAY_Y_ADR) << \
                                        MFC_OFFSET_SHIFT)
 #define s5p_mfc_get_dec_y_adr()                (readl(dev->regs_base + \
-                                       S5P_FIMV_SI_DISPLAY_Y_ADR) << \
+                                       S5P_FIMV_SI_DECODE_Y_ADR) << \
                                        MFC_OFFSET_SHIFT)
 #define s5p_mfc_get_dspl_status()      readl(dev->regs_base + \
                                                S5P_FIMV_SI_DISPLAY_STATUS)
+#define s5p_mfc_get_dec_status()       readl(dev->regs_base + \
+                                               S5P_FIMV_SI_DECODE_STATUS)
 #define s5p_mfc_get_frame_type()       (readl(dev->regs_base + \
                                                S5P_FIMV_DECODE_FRAME_TYPE) \
                                        & S5P_FIMV_DECODE_FRAME_MASK)
index 764eac6..cf962a4 100644 (file)
@@ -13,8 +13,7 @@
 #ifndef S5P_MFC_SHM_H_
 #define S5P_MFC_SHM_H_
 
-enum MFC_SHM_OFS
-{
+enum MFC_SHM_OFS {
        EXTENEDED_DECODE_STATUS = 0x00, /* D */
        SET_FRAME_TAG           = 0x04, /* D */
        GET_FRAME_TAG_TOP       = 0x08, /* D */
index f7b35ff..fb99ff1 100644 (file)
@@ -1,6 +1,6 @@
 config VIDEO_SMIAPP
        tristate "SMIA++/SMIA sensor support"
-       depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+       depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAVE_CLK
        select VIDEO_SMIAPP_PLL
        ---help---
          This is a generic driver for SMIA++/SMIA camera modules.
index f518026..9cf5bda 100644 (file)
@@ -31,7 +31,9 @@
 #include <linux/device.h>
 #include <linux/gpio.h>
 #include <linux/module.h>
+#include <linux/slab.h>
 #include <linux/regulator/consumer.h>
+#include <linux/slab.h>
 #include <linux/v4l2-mediabus.h>
 #include <media/v4l2-device.h>
 
index 3e050e1..1ad5ab6 100644 (file)
@@ -1178,7 +1178,7 @@ static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
                return 0;
        if (vt->type == t->mode && analog_ops->get_afc)
                vt->afc = analog_ops->get_afc(&t->fe);
-       if (t->mode != V4L2_TUNER_RADIO) {
+       if (vt->type != V4L2_TUNER_RADIO) {
                vt->capability |= V4L2_TUNER_CAP_NORM;
                vt->rangelow = tv_range[0] * 16;
                vt->rangehigh = tv_range[1] * 16;
index 5ccbd46..0cbada1 100644 (file)
@@ -656,7 +656,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
        SET_VALID_IOCTL(ops, VIDIOC_TRY_ENCODER_CMD, vidioc_try_encoder_cmd);
        SET_VALID_IOCTL(ops, VIDIOC_DECODER_CMD, vidioc_decoder_cmd);
        SET_VALID_IOCTL(ops, VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd);
-       if (ops->vidioc_g_parm || vdev->current_norm)
+       if (ops->vidioc_g_parm || vdev->vfl_type == VFL_TYPE_GRABBER)
                set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls);
        SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
        SET_VALID_IOCTL(ops, VIDIOC_G_TUNER, vidioc_g_tuner);
@@ -679,6 +679,9 @@ static void determine_valid_ioctls(struct video_device *vdev)
        SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_PRESET, vidioc_query_dv_preset);
        SET_VALID_IOCTL(ops, VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings);
        SET_VALID_IOCTL(ops, VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings);
+       SET_VALID_IOCTL(ops, VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings);
+       SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings);
+       SET_VALID_IOCTL(ops, VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap);
        /* yes, really vidioc_subscribe_event */
        SET_VALID_IOCTL(ops, VIDIOC_DQEVENT, vidioc_subscribe_event);
        SET_VALID_IOCTL(ops, VIDIOC_SUBSCRIBE_EVENT, vidioc_subscribe_event);
index 91be4e8..d7fa896 100644 (file)
@@ -1680,6 +1680,7 @@ static long __video_do_ioctl(struct file *file,
                                break;
 
                        ret = 0;
+                       p->parm.capture.readbuffers = 2;
                        if (ops->vidioc_g_std)
                                ret = ops->vidioc_g_std(file, fh, &std);
                        if (ret == 0)
index 4d7391e..aae1720 100644 (file)
@@ -2561,7 +2561,7 @@ static int vino_acquire_input(struct vino_channel_settings *vcs)
        } else if (vino_drvdata->decoder
                   && (vino_drvdata->decoder_owner == VINO_NO_CHANNEL)) {
                int input;
-               int data_norm;
+               int data_norm = 0;
                v4l2_std_id norm;
 
                input = VINO_INPUT_COMPOSITE;
@@ -2651,7 +2651,7 @@ static int vino_set_input(struct vino_channel_settings *vcs, int input)
                }
 
                if (vino_drvdata->decoder_owner == vcs->channel) {
-                       int data_norm;
+                       int data_norm = 0;
                        v4l2_std_id norm;
 
                        ret = decoder_call(video, s_routing,
index 0960d7f..08c1024 100644 (file)
@@ -1149,10 +1149,14 @@ static ssize_t
 vivi_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
 {
        struct vivi_dev *dev = video_drvdata(file);
+       int err;
 
        dprintk(dev, 1, "read called\n");
-       return vb2_read(&dev->vb_vidq, data, count, ppos,
+       mutex_lock(&dev->mutex);
+       err = vb2_read(&dev->vb_vidq, data, count, ppos,
                       file->f_flags & O_NONBLOCK);
+       mutex_unlock(&dev->mutex);
+       return err;
 }
 
 static unsigned int
index e129c82..92144ed 100644 (file)
@@ -286,6 +286,7 @@ config TWL6040_CORE
        depends on I2C=y && GENERIC_HARDIRQS
        select MFD_CORE
        select REGMAP_I2C
+       select IRQ_DOMAIN
        default n
        help
          Say yes here if you want support for Texas Instruments TWL6040 audio
diff --git a/drivers/mfd/ab5500-core.h b/drivers/mfd/ab5500-core.h
deleted file mode 100644 (file)
index 63b30b1..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (C) 2011 ST-Ericsson
- * License terms: GNU General Public License (GPL) version 2
- * Shared definitions and data structures for the AB5500 MFD driver
- */
-
-/* Read/write operation values. */
-#define AB5500_PERM_RD (0x01)
-#define AB5500_PERM_WR (0x02)
-
-/* Read/write permissions. */
-#define AB5500_PERM_RO (AB5500_PERM_RD)
-#define AB5500_PERM_RW (AB5500_PERM_RD | AB5500_PERM_WR)
-
-#define AB5500_MASK_BASE (0x60)
-#define AB5500_MASK_END (0x79)
-#define AB5500_CHIP_ID (0x20)
-
-/**
- * struct ab5500_reg_range
- * @first: the first address of the range
- * @last: the last address of the range
- * @perm: access permissions for the range
- */
-struct ab5500_reg_range {
-       u8 first;
-       u8 last;
-       u8 perm;
-};
-
-/**
- * struct ab5500_i2c_ranges
- * @count: the number of ranges in the list
- * @range: the list of register ranges
- */
-struct ab5500_i2c_ranges {
-       u8 nranges;
-       u8 bankid;
-       const struct ab5500_reg_range *range;
-};
-
-/**
- * struct ab5500_i2c_banks
- * @count: the number of ranges in the list
- * @range: the list of register ranges
- */
-struct ab5500_i2c_banks {
-       u8 nbanks;
-       const struct ab5500_i2c_ranges *bank;
-};
-
-/**
- * struct ab5500_bank
- * @slave_addr: I2C slave_addr found in AB5500 specification
- * @name: Documentation name of the bank. For reference
- */
-struct ab5500_bank {
-       u8 slave_addr;
-       const char *name;
-};
-
-static const struct ab5500_bank bankinfo[AB5500_NUM_BANKS] = {
-       [AB5500_BANK_VIT_IO_I2C_CLK_TST_OTP] = {
-               AB5500_ADDR_VIT_IO_I2C_CLK_TST_OTP, "VIT_IO_I2C_CLK_TST_OTP"},
-       [AB5500_BANK_VDDDIG_IO_I2C_CLK_TST] = {
-               AB5500_ADDR_VDDDIG_IO_I2C_CLK_TST, "VDDDIG_IO_I2C_CLK_TST"},
-       [AB5500_BANK_VDENC] = {AB5500_ADDR_VDENC, "VDENC"},
-       [AB5500_BANK_SIM_USBSIM] = {AB5500_ADDR_SIM_USBSIM, "SIM_USBSIM"},
-       [AB5500_BANK_LED] = {AB5500_ADDR_LED, "LED"},
-       [AB5500_BANK_ADC] = {AB5500_ADDR_ADC, "ADC"},
-       [AB5500_BANK_RTC] = {AB5500_ADDR_RTC, "RTC"},
-       [AB5500_BANK_STARTUP] = {AB5500_ADDR_STARTUP, "STARTUP"},
-       [AB5500_BANK_DBI_ECI] = {AB5500_ADDR_DBI_ECI, "DBI-ECI"},
-       [AB5500_BANK_CHG] = {AB5500_ADDR_CHG, "CHG"},
-       [AB5500_BANK_FG_BATTCOM_ACC] = {
-               AB5500_ADDR_FG_BATTCOM_ACC, "FG_BATCOM_ACC"},
-       [AB5500_BANK_USB] = {AB5500_ADDR_USB, "USB"},
-       [AB5500_BANK_IT] = {AB5500_ADDR_IT, "IT"},
-       [AB5500_BANK_VIBRA] = {AB5500_ADDR_VIBRA, "VIBRA"},
-       [AB5500_BANK_AUDIO_HEADSETUSB] = {
-               AB5500_ADDR_AUDIO_HEADSETUSB, "AUDIO_HEADSETUSB"},
-};
-
-int ab5500_get_register_interruptible_raw(struct ab5500 *ab, u8 bank, u8 reg,
-       u8 *value);
-int ab5500_mask_and_set_register_interruptible_raw(struct ab5500 *ab, u8 bank,
-       u8 reg, u8 bitmask, u8 bitvalues);
index 3fcdab3..03df422 100644 (file)
@@ -49,10 +49,72 @@ static struct regmap_config mc13xxx_regmap_spi_config = {
        .reg_bits = 7,
        .pad_bits = 1,
        .val_bits = 24,
+       .write_flag_mask = 0x80,
 
        .max_register = MC13XXX_NUMREGS,
 
        .cache_type = REGCACHE_NONE,
+       .use_single_rw = 1,
+};
+
+static int mc13xxx_spi_read(void *context, const void *reg, size_t reg_size,
+                               void *val, size_t val_size)
+{
+       unsigned char w[4] = { *((unsigned char *) reg), 0, 0, 0};
+       unsigned char r[4];
+       unsigned char *p = val;
+       struct device *dev = context;
+       struct spi_device *spi = to_spi_device(dev);
+       struct spi_transfer t = {
+               .tx_buf = w,
+               .rx_buf = r,
+               .len = 4,
+       };
+
+       struct spi_message m;
+       int ret;
+
+       if (val_size != 3 || reg_size != 1)
+               return -ENOTSUPP;
+
+       spi_message_init(&m);
+       spi_message_add_tail(&t, &m);
+       ret = spi_sync(spi, &m);
+
+       memcpy(p, &r[1], 3);
+
+       return ret;
+}
+
+static int mc13xxx_spi_write(void *context, const void *data, size_t count)
+{
+       struct device *dev = context;
+       struct spi_device *spi = to_spi_device(dev);
+
+       if (count != 4)
+               return -ENOTSUPP;
+
+       return spi_write(spi, data, count);
+}
+
+/*
+ * We cannot use regmap-spi generic bus implementation here.
+ * The MC13783 chip will get corrupted if CS signal is deasserted
+ * and on i.Mx31 SoC (the target SoC for MC13783 PMIC) the SPI controller
+ * has the following errata (DSPhl22960):
+ * "The CSPI negates SS when the FIFO becomes empty with
+ * SSCTL= 0. Software cannot guarantee that the FIFO will not
+ * drain because of higher priority interrupts and the
+ * non-realtime characteristics of the operating system. As a
+ * result, the SS will negate before all of the data has been
+ * transferred to/from the peripheral."
+ * We workaround this by accessing the SPI controller with a
+ * single transfert.
+ */
+
+static struct regmap_bus regmap_mc13xxx_bus = {
+       .write = mc13xxx_spi_write,
+       .read = mc13xxx_spi_read,
 };
 
 static int mc13xxx_spi_probe(struct spi_device *spi)
@@ -73,12 +135,13 @@ static int mc13xxx_spi_probe(struct spi_device *spi)
 
        dev_set_drvdata(&spi->dev, mc13xxx);
        spi->mode = SPI_MODE_0 | SPI_CS_HIGH;
-       spi->bits_per_word = 32;
 
        mc13xxx->dev = &spi->dev;
        mutex_init(&mc13xxx->lock);
 
-       mc13xxx->regmap = regmap_init_spi(spi, &mc13xxx_regmap_spi_config);
+       mc13xxx->regmap = regmap_init(&spi->dev, &regmap_mc13xxx_bus, &spi->dev,
+                                       &mc13xxx_regmap_spi_config);
+
        if (IS_ERR(mc13xxx->regmap)) {
                ret = PTR_ERR(mc13xxx->regmap);
                dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
index 7e96bb2..41088ec 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
 #include <linux/spinlock.h>
+#include <linux/gpio.h>
 #include <plat/cpu.h>
 #include <plat/usb.h>
 #include <linux/pm_runtime.h>
@@ -500,8 +501,21 @@ static void omap_usbhs_init(struct device *dev)
        dev_dbg(dev, "starting TI HSUSB Controller\n");
 
        pm_runtime_get_sync(dev);
-       spin_lock_irqsave(&omap->lock, flags);
 
+       if (pdata->ehci_data->phy_reset) {
+               if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+                       gpio_request_one(pdata->ehci_data->reset_gpio_port[0],
+                                        GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
+
+               if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+                       gpio_request_one(pdata->ehci_data->reset_gpio_port[1],
+                                        GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
+
+               /* Hold the PHY in RESET for enough time till DIR is high */
+               udelay(10);
+       }
+
+       spin_lock_irqsave(&omap->lock, flags);
        omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
        dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
 
@@ -581,9 +595,39 @@ static void omap_usbhs_init(struct device *dev)
        }
 
        spin_unlock_irqrestore(&omap->lock, flags);
+
+       if (pdata->ehci_data->phy_reset) {
+               /* Hold the PHY in RESET for enough time till
+                * PHY is settled and ready
+                */
+               udelay(10);
+
+               if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+                       gpio_set_value_cansleep
+                               (pdata->ehci_data->reset_gpio_port[0], 1);
+
+               if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+                       gpio_set_value_cansleep
+                               (pdata->ehci_data->reset_gpio_port[1], 1);
+       }
+
        pm_runtime_put_sync(dev);
 }
 
+static void omap_usbhs_deinit(struct device *dev)
+{
+       struct usbhs_hcd_omap           *omap = dev_get_drvdata(dev);
+       struct usbhs_omap_platform_data *pdata = &omap->platdata;
+
+       if (pdata->ehci_data->phy_reset) {
+               if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+                       gpio_free(pdata->ehci_data->reset_gpio_port[0]);
+
+               if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+                       gpio_free(pdata->ehci_data->reset_gpio_port[1]);
+       }
+}
+
 
 /**
  * usbhs_omap_probe - initialize TI-based HCDs
@@ -767,6 +811,7 @@ static int __devinit usbhs_omap_probe(struct platform_device *pdev)
        goto end_probe;
 
 err_alloc:
+       omap_usbhs_deinit(&pdev->dev);
        iounmap(omap->tll_base);
 
 err_tll:
@@ -818,6 +863,7 @@ static int __devexit usbhs_omap_remove(struct platform_device *pdev)
 {
        struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev);
 
+       omap_usbhs_deinit(&pdev->dev);
        iounmap(omap->tll_base);
        iounmap(omap->uhh_base);
        clk_put(omap->init_60m_fclk);
index 00c0aba..c4a69f1 100644 (file)
@@ -356,7 +356,14 @@ static int __devinit palmas_i2c_probe(struct i2c_client *i2c,
                }
        }
 
-       ret = regmap_add_irq_chip(palmas->regmap[1], palmas->irq,
+       /* Change IRQ into clear on read mode for efficiency */
+       slave = PALMAS_BASE_TO_SLAVE(PALMAS_INTERRUPT_BASE);
+       addr = PALMAS_BASE_TO_REG(PALMAS_INTERRUPT_BASE, PALMAS_INT_CTRL);
+       reg = PALMAS_INT_CTRL_INT_CLEAR;
+
+       regmap_write(palmas->regmap[slave], addr, reg);
+
+       ret = regmap_add_irq_chip(palmas->regmap[slave], palmas->irq,
                        IRQF_ONESHOT | IRQF_TRIGGER_LOW, -1, &palmas_irq_chip,
                        &palmas->irq_data);
        if (ret < 0)
@@ -441,6 +448,9 @@ static int __devinit palmas_i2c_probe(struct i2c_client *i2c,
                goto err;
        }
 
+       children[PALMAS_PMIC_ID].platform_data = pdata->pmic_pdata;
+       children[PALMAS_PMIC_ID].pdata_size = sizeof(*pdata->pmic_pdata);
+
        ret = mfd_add_devices(palmas->dev, -1,
                              children, ARRAY_SIZE(palmas_children),
                              NULL, regmap_irq_chip_get_base(palmas->irq_data));
@@ -472,6 +482,7 @@ static const struct i2c_device_id palmas_i2c_id[] = {
        { "twl6035", },
        { "twl6037", },
        { "tps65913", },
+       { /* end */ }
 };
 MODULE_DEVICE_TABLE(i2c, palmas_i2c_id);
 
index 373f423..947a06a 100644 (file)
@@ -6,7 +6,7 @@
  *
  * License Terms: GNU General Public License, version 2
  * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
- * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics
+ * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics
  */
 
 #include <linux/i2c.h>
index afd4590..9edfe86 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) ST Microelectronics SA 2011
  *
  * License Terms: GNU General Public License, version 2
- * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics
+ * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics
  */
 
 #include <linux/spi/spi.h>
@@ -146,4 +146,4 @@ module_exit(stmpe_exit);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver");
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
index 93936f1..23f5463 100644 (file)
@@ -835,7 +835,7 @@ static int _mei_irq_thread_read(struct mei_device *dev,     s32 *slots,
                        struct mei_cl *cl,
                        struct mei_io_list *cmpl_list)
 {
-       if ((*slots * sizeof(u32)) >= (sizeof(struct mei_msg_hdr) +
+       if ((*slots * sizeof(u32)) < (sizeof(struct mei_msg_hdr) +
                        sizeof(struct hbm_flow_control))) {
                /* return the cancel routine */
                list_del(&cb_pos->cb_list);
index c703332..783fcd7 100644 (file)
@@ -982,7 +982,7 @@ static int __devinit mei_probe(struct pci_dev *pdev,
                err = request_threaded_irq(pdev->irq,
                        NULL,
                        mei_interrupt_thread_handler,
-                       0, mei_driver_name, dev);
+                       IRQF_ONESHOT, mei_driver_name, dev);
        else
                err = request_threaded_irq(pdev->irq,
                        mei_interrupt_quick_handler,
@@ -992,7 +992,7 @@ static int __devinit mei_probe(struct pci_dev *pdev,
        if (err) {
                dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
                       pdev->irq);
-               goto unmap_memory;
+               goto disable_msi;
        }
        INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
        if (mei_hw_init(dev)) {
@@ -1023,8 +1023,8 @@ release_irq:
        mei_disable_interrupts(dev);
        flush_scheduled_work();
        free_irq(pdev->irq, dev);
+disable_msi:
        pci_disable_msi(pdev);
-unmap_memory:
        pci_iounmap(pdev, dev->mem_addr);
 free_device:
        kfree(dev);
@@ -1101,6 +1101,8 @@ static void __devexit mei_remove(struct pci_dev *pdev)
 
        pci_release_regions(pdev);
        pci_disable_device(pdev);
+
+       misc_deregister(&mei_misc_device);
 }
 #ifdef CONFIG_PM
 static int mei_pci_suspend(struct device *device)
@@ -1145,7 +1147,7 @@ static int mei_pci_resume(struct device *device)
                err = request_threaded_irq(pdev->irq,
                        NULL,
                        mei_interrupt_thread_handler,
-                       0, mei_driver_name, dev);
+                       IRQF_ONESHOT, mei_driver_name, dev);
        else
                err = request_threaded_irq(pdev->irq,
                        mei_interrupt_quick_handler,
@@ -1216,7 +1218,6 @@ module_init(mei_init_module);
  */
 static void __exit mei_exit_module(void)
 {
-       misc_deregister(&mei_misc_device);
        pci_unregister_driver(&mei_driver);
 
        pr_debug("unloaded successfully.\n");
index 6be5605..e2ec050 100644 (file)
@@ -341,7 +341,7 @@ static const struct watchdog_ops wd_ops = {
 };
 static const struct watchdog_info wd_info = {
                .identity = INTEL_AMT_WATCHDOG_ID,
-               .options = WDIOF_KEEPALIVEPING,
+               .options = WDIOF_KEEPALIVEPING | WDIOF_ALARMONLY,
 };
 
 static struct watchdog_device amt_wd_dev = {
index 17bbacb..87b251a 100644 (file)
@@ -452,9 +452,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
 
                if (msg->activate_gru_mq_desc_gpa !=
                    part_uv->activate_gru_mq_desc_gpa) {
-                       spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
+                       spin_lock(&part_uv->flags_lock);
                        part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
-                       spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
+                       spin_unlock(&part_uv->flags_lock);
                        part_uv->activate_gru_mq_desc_gpa =
                            msg->activate_gru_mq_desc_gpa;
                }
index dd2d374..276d21c 100644 (file)
@@ -554,7 +554,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
        struct mmc_request mrq = {NULL};
        struct mmc_command cmd = {0};
        struct mmc_data data = {0};
-       unsigned int timeout_us;
 
        struct scatterlist sg;
 
@@ -574,23 +573,12 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
        cmd.arg = 0;
        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 
-       data.timeout_ns = card->csd.tacc_ns * 100;
-       data.timeout_clks = card->csd.tacc_clks * 100;
-
-       timeout_us = data.timeout_ns / 1000;
-       timeout_us += data.timeout_clks * 1000 /
-               (card->host->ios.clock / 1000);
-
-       if (timeout_us > 100000) {
-               data.timeout_ns = 100000000;
-               data.timeout_clks = 0;
-       }
-
        data.blksz = 4;
        data.blocks = 1;
        data.flags = MMC_DATA_READ;
        data.sg = &sg;
        data.sg_len = 1;
+       mmc_set_data_timeout(&data, card);
 
        mrq.cmd = &cmd;
        mrq.data = &data;
index f13e38d..8f5dc08 100644 (file)
@@ -50,8 +50,8 @@ int mmc_cd_gpio_request(struct mmc_host *host, unsigned int gpio)
                goto egpioreq;
 
        ret = request_threaded_irq(irq, NULL, mmc_cd_gpio_irqt,
-                                  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-                                  cd->label, host);
+                                  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+                                  IRQF_ONESHOT, cd->label, host);
        if (ret < 0)
                goto eirqreq;
 
index 2d4a4b7..4f4489a 100644 (file)
@@ -717,10 +717,6 @@ static int mmc_select_powerclass(struct mmc_card *card,
                                 card->ext_csd.generic_cmd6_time);
        }
 
-       if (err)
-               pr_err("%s: power class selection for ext_csd_bus_width %d"
-                      " failed\n", mmc_hostname(card->host), bus_width);
-
        return err;
 }
 
@@ -1104,7 +1100,9 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
                                EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
                err = mmc_select_powerclass(card, ext_csd_bits, ext_csd);
                if (err)
-                       goto err;
+                       pr_warning("%s: power class selection to bus width %d"
+                                  " failed\n", mmc_hostname(card->host),
+                                  1 << bus_width);
        }
 
        /*
@@ -1136,7 +1134,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
                        err = mmc_select_powerclass(card, ext_csd_bits[idx][0],
                                                    ext_csd);
                        if (err)
-                               goto err;
+                               pr_warning("%s: power class selection to "
+                                          "bus width %d failed\n",
+                                          mmc_hostname(card->host),
+                                          1 << bus_width);
 
                        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                                         EXT_CSD_BUS_WIDTH,
@@ -1164,7 +1165,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
                        err = mmc_select_powerclass(card, ext_csd_bits[idx][1],
                                                    ext_csd);
                        if (err)
-                               goto err;
+                               pr_warning("%s: power class selection to "
+                                          "bus width %d ddr %d failed\n",
+                                          mmc_hostname(card->host),
+                                          1 << bus_width, ddr);
 
                        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                                         EXT_CSD_BUS_WIDTH,
@@ -1326,7 +1330,7 @@ static int mmc_suspend(struct mmc_host *host)
                if (!err)
                        mmc_card_set_sleep(host->card);
        } else if (!mmc_host_is_spi(host))
-               mmc_deselect_cards(host);
+               err = mmc_deselect_cards(host);
        host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
        mmc_release_host(host);
 
index c272c68..b2b43f6 100644 (file)
@@ -1075,16 +1075,18 @@ static void mmc_sd_detect(struct mmc_host *host)
  */
 static int mmc_sd_suspend(struct mmc_host *host)
 {
+       int err = 0;
+
        BUG_ON(!host);
        BUG_ON(!host->card);
 
        mmc_claim_host(host);
        if (!mmc_host_is_spi(host))
-               mmc_deselect_cards(host);
+               err = mmc_deselect_cards(host);
        host->card->state &= ~MMC_STATE_HIGHSPEED;
        mmc_release_host(host);
 
-       return 0;
+       return err;
 }
 
 /*
index 13d0e95..41c5fd8 100644 (file)
@@ -218,6 +218,12 @@ static int sdio_enable_wide(struct mmc_card *card)
        if (ret)
                return ret;
 
+       if ((ctrl & SDIO_BUS_WIDTH_MASK) == SDIO_BUS_WIDTH_RESERVED)
+               pr_warning("%s: SDIO_CCCR_IF is invalid: 0x%02x\n",
+                          mmc_hostname(card->host), ctrl);
+
+       /* set as 4-bit bus width */
+       ctrl &= ~SDIO_BUS_WIDTH_MASK;
        ctrl |= SDIO_BUS_WIDTH_4BIT;
 
        ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
index 787aba1..ab56f7d 100644 (file)
 #define atmci_writel(port,reg,value)                   \
        __raw_writel((value), (port)->regs + reg)
 
+/*
+ * Fix sconfig's burst size according to atmel MCI. We need to convert them as:
+ * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
+ *
+ * This can be done by finding most significant bit set.
+ */
+static inline unsigned int atmci_convert_chksize(unsigned int maxburst)
+{
+       if (maxburst > 1)
+               return fls(maxburst) - 2;
+       else
+               return 0;
+}
+
 #endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */
index 420aca6..f2c115e 100644 (file)
@@ -910,6 +910,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
        enum dma_data_direction         direction;
        enum dma_transfer_direction     slave_dirn;
        unsigned int                    sglen;
+       u32                             maxburst;
        u32 iflags;
 
        data->error = -EINPROGRESS;
@@ -943,17 +944,18 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
        if (!chan)
                return -ENODEV;
 
-       if (host->caps.has_dma)
-               atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN);
-
        if (data->flags & MMC_DATA_READ) {
                direction = DMA_FROM_DEVICE;
                host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
+               maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst);
        } else {
                direction = DMA_TO_DEVICE;
                host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
+               maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst);
        }
 
+       atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) | ATMCI_DMAEN);
+
        sglen = dma_map_sg(chan->device->dev, data->sg,
                        data->sg_len, direction);
 
@@ -2314,6 +2316,8 @@ static int __init atmci_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, host);
 
+       setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
+
        /* We need at least one slot to succeed */
        nr_slots = 0;
        ret = -ENODEV;
@@ -2352,8 +2356,6 @@ static int __init atmci_probe(struct platform_device *pdev)
                }
        }
 
-       setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
-
        dev_info(&pdev->dev,
                        "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
                        host->mapbase, irq, nr_slots);
index 9bbf45f..1ca5e72 100644 (file)
@@ -418,6 +418,8 @@ static int dw_mci_idmac_init(struct dw_mci *host)
        p->des3 = host->sg_dma;
        p->des0 = IDMAC_DES0_ER;
 
+       mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET);
+
        /* Mask out interrupts - get Tx & Rx complete only */
        mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
                   SDMMC_IDMAC_INT_TI);
@@ -615,14 +617,15 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
        u32 div;
 
        if (slot->clock != host->current_speed) {
-               if (host->bus_hz % slot->clock)
+               div = host->bus_hz / slot->clock;
+               if (host->bus_hz % slot->clock && host->bus_hz > slot->clock)
                        /*
                         * move the + 1 after the divide to prevent
                         * over-clocking the card.
                         */
-                       div = ((host->bus_hz / slot->clock) >> 1) + 1;
-               else
-                       div = (host->bus_hz  / slot->clock) >> 1;
+                       div += 1;
+
+               div = (host->bus_hz != slot->clock) ? DIV_ROUND_UP(div, 2) : 0;
 
                dev_info(&slot->mmc->class_dev,
                         "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
@@ -939,8 +942,8 @@ static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd
                        mdelay(20);
 
                if (cmd->data) {
-                       host->data = NULL;
                        dw_mci_stop_dma(host);
+                       host->data = NULL;
                }
        }
 }
@@ -1623,7 +1626,6 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
        if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
                mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
                mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
-               set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
                host->dma_ops->complete(host);
        }
 #endif
@@ -1725,7 +1727,8 @@ static void dw_mci_work_routine_card(struct work_struct *work)
 
 #ifdef CONFIG_MMC_DW_IDMAC
                                ctrl = mci_readl(host, BMOD);
-                               ctrl |= 0x01; /* Software reset of DMA */
+                               /* Software reset of DMA */
+                               ctrl |= SDMMC_IDMAC_SWRESET;
                                mci_writel(host, BMOD, ctrl);
 #endif
 
@@ -1950,10 +1953,6 @@ int dw_mci_probe(struct dw_mci *host)
        spin_lock_init(&host->lock);
        INIT_LIST_HEAD(&host->queue);
 
-
-       host->dma_ops = host->pdata->dma_ops;
-       dw_mci_init_dma(host);
-
        /*
         * Get the host data width - this assumes that HCON has been set with
         * the correct values.
@@ -1981,10 +1980,11 @@ int dw_mci_probe(struct dw_mci *host)
        }
 
        /* Reset all blocks */
-       if (!mci_wait_reset(&host->dev, host)) {
-               ret = -ENODEV;
-               goto err_dmaunmap;
-       }
+       if (!mci_wait_reset(&host->dev, host))
+               return -ENODEV;
+
+       host->dma_ops = host->pdata->dma_ops;
+       dw_mci_init_dma(host);
 
        /* Clear the interrupts for the host controller */
        mci_writel(host, RINTSTS, 0xFFFFFFFF);
@@ -2170,14 +2170,14 @@ int dw_mci_resume(struct dw_mci *host)
        if (host->vmmc)
                regulator_enable(host->vmmc);
 
-       if (host->dma_ops->init)
-               host->dma_ops->init(host);
-
        if (!mci_wait_reset(&host->dev, host)) {
                ret = -ENODEV;
                return ret;
        }
 
+       if (host->dma_ops->init)
+               host->dma_ops->init(host);
+
        /* Restore the old value at FIFOTH register */
        mci_writel(host, FIFOTH, host->fifoth_val);
 
index f0fcce4..50ff19a 100644 (file)
@@ -1216,12 +1216,7 @@ static void mmci_dt_populate_generic_pdata(struct device_node *np,
        int bus_width = 0;
 
        pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0);
-       if (!pdata->gpio_wp)
-               pdata->gpio_wp = -1;
-
        pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0);
-       if (!pdata->gpio_cd)
-               pdata->gpio_cd = -1;
 
        if (of_get_property(np, "cd-inverted", NULL))
                pdata->cd_invert = true;
@@ -1276,6 +1271,12 @@ static int __devinit mmci_probe(struct amba_device *dev,
                return -EINVAL;
        }
 
+       if (!plat) {
+               plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
+               if (!plat)
+                       return -ENOMEM;
+       }
+
        if (np)
                mmci_dt_populate_generic_pdata(np, plat);
 
@@ -1424,6 +1425,10 @@ static int __devinit mmci_probe(struct amba_device *dev,
        writel(0, host->base + MMCIMASK1);
        writel(0xfff, host->base + MMCICLEAR);
 
+       if (plat->gpio_cd == -EPROBE_DEFER) {
+               ret = -EPROBE_DEFER;
+               goto err_gpio_cd;
+       }
        if (gpio_is_valid(plat->gpio_cd)) {
                ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
                if (ret == 0)
@@ -1447,6 +1452,10 @@ static int __devinit mmci_probe(struct amba_device *dev,
                if (ret >= 0)
                        host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
        }
+       if (plat->gpio_wp == -EPROBE_DEFER) {
+               ret = -EPROBE_DEFER;
+               goto err_gpio_wp;
+       }
        if (gpio_is_valid(plat->gpio_wp)) {
                ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
                if (ret == 0)
index 34a9026..277161d 100644 (file)
@@ -894,8 +894,8 @@ static struct platform_driver mxs_mmc_driver = {
                .owner  = THIS_MODULE,
 #ifdef CONFIG_PM
                .pm     = &mxs_mmc_pm_ops,
-               .of_match_table = mxs_mmc_dt_ids,
 #endif
+               .of_match_table = mxs_mmc_dt_ids,
        },
 };
 
index 552196c..3e8dcf8 100644 (file)
@@ -1300,7 +1300,7 @@ static const struct mmc_host_ops mmc_omap_ops = {
        .set_ios        = mmc_omap_set_ios,
 };
 
-static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id)
+static int __devinit mmc_omap_new_slot(struct mmc_omap_host *host, int id)
 {
        struct mmc_omap_slot *slot = NULL;
        struct mmc_host *mmc;
@@ -1485,24 +1485,26 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
        }
 
        host->nr_slots = pdata->nr_slots;
+       host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
+
+       host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
+       if (!host->mmc_omap_wq)
+               goto err_plat_cleanup;
+
        for (i = 0; i < pdata->nr_slots; i++) {
                ret = mmc_omap_new_slot(host, i);
                if (ret < 0) {
                        while (--i >= 0)
                                mmc_omap_remove_slot(host->slots[i]);
 
-                       goto err_plat_cleanup;
+                       goto err_destroy_wq;
                }
        }
 
-       host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
-
-       host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
-       if (!host->mmc_omap_wq)
-               goto err_plat_cleanup;
-
        return 0;
 
+err_destroy_wq:
+       destroy_workqueue(host->mmc_omap_wq);
 err_plat_cleanup:
        if (pdata->cleanup)
                pdata->cleanup(&pdev->dev);
index 9a7a60a..389a3ee 100644 (file)
@@ -85,7 +85,6 @@
 #define BRR_ENABLE             (1 << 5)
 #define DTO_ENABLE             (1 << 20)
 #define INIT_STREAM            (1 << 1)
-#define ACEN_ACMD12            (1 << 2)
 #define DP_SELECT              (1 << 21)
 #define DDIR                   (1 << 4)
 #define DMA_EN                 0x1
 #define OMAP_MMC_MAX_CLOCK     52000000
 #define DRIVER_NAME            "omap_hsmmc"
 
-#define AUTO_CMD12             (1 << 0)        /* Auto CMD12 support */
 /*
  * One controller can have multiple slots, like on some omap boards using
  * omap.c controller driver. Luckily this is not currently done on any known
@@ -177,7 +175,6 @@ struct omap_hsmmc_host {
        int                     reqs_blocked;
        int                     use_reg;
        int                     req_in_progress;
-       unsigned int            flags;
        struct omap_hsmmc_next  next_data;
 
        struct  omap_mmc_platform_data  *pdata;
@@ -773,8 +770,6 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
                cmdtype = 0x3;
 
        cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22);
-       if ((host->flags & AUTO_CMD12) && mmc_op_multi(cmd->opcode))
-               cmdreg |= ACEN_ACMD12;
 
        if (data) {
                cmdreg |= DP_SELECT | MSBS | BCE;
@@ -847,14 +842,11 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
        else
                data->bytes_xfered = 0;
 
-       if (data->stop && ((!(host->flags & AUTO_CMD12)) || data->error)) {
-               omap_hsmmc_start_command(host, data->stop, NULL);
-       } else {
-               if (data->stop)
-                       data->stop->resp[0] = OMAP_HSMMC_READ(host->base,
-                                                       RSP76);
+       if (!data->stop) {
                omap_hsmmc_request_done(host, data->mrq);
+               return;
        }
+       omap_hsmmc_start_command(host, data->stop, NULL);
 }
 
 /*
@@ -1859,7 +1851,6 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
        host->mapbase   = res->start + pdata->reg_offset;
        host->base      = ioremap(host->mapbase, SZ_4K);
        host->power_mode = MMC_POWER_OFF;
-       host->flags     = AUTO_CMD12;
        host->next_data.cookie = 1;
 
        platform_set_drvdata(pdev, host);
index 55a164f..a50c205 100644 (file)
@@ -404,7 +404,7 @@ static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc)
                if (sc->ext_cd_irq &&
                    request_threaded_irq(sc->ext_cd_irq, NULL,
                                         sdhci_s3c_gpio_card_detect_thread,
-                                        IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+                                        IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                         dev_name(dev), sc) == 0) {
                        int status = gpio_get_value(sc->ext_cd_gpio);
                        if (pdata->ext_cd_gpio_invert)
index 1fe32df..423da81 100644 (file)
@@ -4,7 +4,7 @@
  * Support of SDHCI platform devices for spear soc family
  *
  * Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * Inspired by sdhci-pltfm.c
  *
@@ -289,5 +289,5 @@ static struct platform_driver sdhci_driver = {
 module_platform_driver(sdhci_driver);
 
 MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_LICENSE("GPL v2");
index e626732..f4b8b4d 100644 (file)
@@ -680,8 +680,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
        }
 
        if (count >= 0xF) {
-               pr_warning("%s: Too large timeout 0x%x requested for CMD%d!\n",
-                          mmc_hostname(host->mmc), count, cmd->opcode);
+               DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
+                   mmc_hostname(host->mmc), count, cmd->opcode);
                count = 0xE;
        }
 
index ae36d7e..551e316 100644 (file)
@@ -304,32 +304,17 @@ static void find_next_position(struct mtdoops_context *cxt)
 }
 
 static void mtdoops_do_dump(struct kmsg_dumper *dumper,
-               enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
-               const char *s2, unsigned long l2)
+                           enum kmsg_dump_reason reason)
 {
        struct mtdoops_context *cxt = container_of(dumper,
                        struct mtdoops_context, dump);
-       unsigned long s1_start, s2_start;
-       unsigned long l1_cpy, l2_cpy;
-       char *dst;
-
-       if (reason != KMSG_DUMP_OOPS &&
-           reason != KMSG_DUMP_PANIC)
-               return;
 
        /* Only dump oopses if dump_oops is set */
        if (reason == KMSG_DUMP_OOPS && !dump_oops)
                return;
 
-       dst = cxt->oops_buf + MTDOOPS_HEADER_SIZE; /* Skip the header */
-       l2_cpy = min(l2, record_size - MTDOOPS_HEADER_SIZE);
-       l1_cpy = min(l1, record_size - MTDOOPS_HEADER_SIZE - l2_cpy);
-
-       s2_start = l2 - l2_cpy;
-       s1_start = l1 - l1_cpy;
-
-       memcpy(dst, s1 + s1_start, l1_cpy);
-       memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
+       kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
+                            record_size - MTDOOPS_HEADER_SIZE, NULL);
 
        /* Panics must be written immediately */
        if (reason != KMSG_DUMP_OOPS)
@@ -375,6 +360,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
                return;
        }
 
+       cxt->dump.max_reason = KMSG_DUMP_OOPS;
        cxt->dump.dump = mtdoops_do_dump;
        err = kmsg_dump_register(&cxt->dump);
        if (err) {
index 41371ba..f3f6cfe 100644 (file)
@@ -102,7 +102,7 @@ static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
 static int cafe_device_ready(struct mtd_info *mtd)
 {
        struct cafe_priv *cafe = mtd->priv;
-       int result = !!(cafe_readl(cafe, NAND_STATUS) | 0x40000000);
+       int result = !!(cafe_readl(cafe, NAND_STATUS) & 0x40000000);
        uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
 
        cafe_writel(cafe, irqs, NAND_IRQ);
index a05b7b4..a6cad5c 100644 (file)
@@ -920,12 +920,12 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
                 */
                memset(chip->oob_poi, ~0, mtd->oobsize);
                chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
-
-               read_page_swap_end(this, buf, mtd->writesize,
-                               this->payload_virt, this->payload_phys,
-                               nfc_geo->payload_size,
-                               payload_virt, payload_phys);
        }
+
+       read_page_swap_end(this, buf, mtd->writesize,
+                       this->payload_virt, this->payload_phys,
+                       nfc_geo->payload_size,
+                       payload_virt, payload_phys);
 exit_nfc:
        return ret;
 }
index c58e6a9..6acc790 100644 (file)
@@ -273,6 +273,26 @@ static struct nand_ecclayout nandv2_hw_eccoob_4k = {
 
 static const char *part_probes[] = { "RedBoot", "cmdlinepart", "ofpart", NULL };
 
+static void memcpy32_fromio(void *trg, const void __iomem  *src, size_t size)
+{
+       int i;
+       u32 *t = trg;
+       const __iomem u32 *s = src;
+
+       for (i = 0; i < (size >> 2); i++)
+               *t++ = __raw_readl(s++);
+}
+
+static void memcpy32_toio(void __iomem *trg, const void *src, int size)
+{
+       int i;
+       u32 __iomem *t = trg;
+       const u32 *s = src;
+
+       for (i = 0; i < (size >> 2); i++)
+               __raw_writel(*s++, t++);
+}
+
 static int check_int_v3(struct mxc_nand_host *host)
 {
        uint32_t tmp;
@@ -519,7 +539,7 @@ static void send_read_id_v3(struct mxc_nand_host *host)
 
        wait_op_done(host, true);
 
-       memcpy_fromio(host->data_buf, host->main_area0, 16);
+       memcpy32_fromio(host->data_buf, host->main_area0, 16);
 }
 
 /* Request the NANDFC to perform a read of the NAND device ID. */
@@ -535,7 +555,7 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host)
        /* Wait for operation to complete */
        wait_op_done(host, true);
 
-       memcpy_fromio(host->data_buf, host->main_area0, 16);
+       memcpy32_fromio(host->data_buf, host->main_area0, 16);
 
        if (this->options & NAND_BUSWIDTH_16) {
                /* compress the ID info */
@@ -797,16 +817,16 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom)
 
        if (bfrom) {
                for (i = 0; i < n - 1; i++)
-                       memcpy_fromio(d + i * j, s + i * t, j);
+                       memcpy32_fromio(d + i * j, s + i * t, j);
 
                /* the last section */
-               memcpy_fromio(d + i * j, s + i * t, mtd->oobsize - i * j);
+               memcpy32_fromio(d + i * j, s + i * t, mtd->oobsize - i * j);
        } else {
                for (i = 0; i < n - 1; i++)
-                       memcpy_toio(&s[i * t], &d[i * j], j);
+                       memcpy32_toio(&s[i * t], &d[i * j], j);
 
                /* the last section */
-               memcpy_toio(&s[i * t], &d[i * j], mtd->oobsize - i * j);
+               memcpy32_toio(&s[i * t], &d[i * j], mtd->oobsize - i * j);
        }
 }
 
@@ -1070,7 +1090,8 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
 
                host->devtype_data->send_page(mtd, NFC_OUTPUT);
 
-               memcpy_fromio(host->data_buf, host->main_area0, mtd->writesize);
+               memcpy32_fromio(host->data_buf, host->main_area0,
+                               mtd->writesize);
                copy_spare(mtd, true);
                break;
 
@@ -1086,7 +1107,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
                break;
 
        case NAND_CMD_PAGEPROG:
-               memcpy_toio(host->main_area0, host->data_buf, mtd->writesize);
+               memcpy32_toio(host->main_area0, host->data_buf, mtd->writesize);
                copy_spare(mtd, false);
                host->devtype_data->send_page(mtd, NFC_INPUT);
                host->devtype_data->send_cmd(host, command, true);
index d47586c..a11253a 100644 (file)
@@ -3501,6 +3501,13 @@ int nand_scan_tail(struct mtd_info *mtd)
        /* propagate ecc info to mtd_info */
        mtd->ecclayout = chip->ecc.layout;
        mtd->ecc_strength = chip->ecc.strength;
+       /*
+        * Initialize bitflip_threshold to its default prior scan_bbt() call.
+        * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
+        * properly set.
+        */
+       if (!mtd->bitflip_threshold)
+               mtd->bitflip_threshold = mtd->ecc_strength;
 
        /* Check, if we should skip the bad block table scan */
        if (chip->options & NAND_SKIP_BBTSCAN)
index 6cc8fbf..cf0cd31 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/vmalloc.h>
-#include <asm/div64.h>
+#include <linux/math64.h>
 #include <linux/slab.h>
 #include <linux/errno.h>
 #include <linux/string.h>
@@ -546,12 +546,6 @@ static char *get_partition_name(int i)
        return kstrdup(buf, GFP_KERNEL);
 }
 
-static uint64_t divide(uint64_t n, uint32_t d)
-{
-       do_div(n, d);
-       return n;
-}
-
 /*
  * Initialize the nandsim structure.
  *
@@ -580,7 +574,7 @@ static int init_nandsim(struct mtd_info *mtd)
        ns->geom.oobsz    = mtd->oobsize;
        ns->geom.secsz    = mtd->erasesize;
        ns->geom.pgszoob  = ns->geom.pgsz + ns->geom.oobsz;
-       ns->geom.pgnum    = divide(ns->geom.totsz, ns->geom.pgsz);
+       ns->geom.pgnum    = div_u64(ns->geom.totsz, ns->geom.pgsz);
        ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
        ns->geom.secshift = ffs(ns->geom.secsz) - 1;
        ns->geom.pgshift  = chip->page_shift;
@@ -921,7 +915,7 @@ static int setup_wear_reporting(struct mtd_info *mtd)
 
        if (!rptwear)
                return 0;
-       wear_eb_count = divide(mtd->size, mtd->erasesize);
+       wear_eb_count = div_u64(mtd->size, mtd->erasesize);
        mem = wear_eb_count * sizeof(unsigned long);
        if (mem / sizeof(unsigned long) != wear_eb_count) {
                NS_ERR("Too many erase blocks for wear reporting\n");
index 09d4f8d..7c13803 100644 (file)
@@ -264,7 +264,7 @@ static struct dentry *dfs_rootdir;
  */
 int ubi_debugfs_init(void)
 {
-       if (!IS_ENABLED(DEBUG_FS))
+       if (!IS_ENABLED(CONFIG_DEBUG_FS))
                return 0;
 
        dfs_rootdir = debugfs_create_dir("ubi", NULL);
@@ -284,7 +284,7 @@ int ubi_debugfs_init(void)
  */
 void ubi_debugfs_exit(void)
 {
-       if (IS_ENABLED(DEBUG_FS))
+       if (IS_ENABLED(CONFIG_DEBUG_FS))
                debugfs_remove(dfs_rootdir);
 }
 
@@ -407,7 +407,7 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
        struct dentry *dent;
        struct ubi_debug_info *d = ubi->dbg;
 
-       if (!IS_ENABLED(DEBUG_FS))
+       if (!IS_ENABLED(CONFIG_DEBUG_FS))
                return 0;
 
        n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME,
@@ -477,6 +477,6 @@ out:
  */
 void ubi_debugfs_exit_dev(struct ubi_device *ubi)
 {
-       if (IS_ENABLED(DEBUG_FS))
+       if (IS_ENABLED(CONFIG_DEBUG_FS))
                debugfs_remove_recursive(ubi->dbg->dfs_dir);
 }
index 3680aa2..2cf084e 100644 (file)
@@ -6,7 +6,7 @@
 #include "bonding.h"
 #include "bond_alb.h"
 
-#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS)
 
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
index b9c2ae6..2ee7699 100644 (file)
@@ -3227,6 +3227,12 @@ static int bond_master_netdev_event(unsigned long event,
        switch (event) {
        case NETDEV_CHANGENAME:
                return bond_event_changename(event_bond);
+       case NETDEV_UNREGISTER:
+               bond_remove_proc_entry(event_bond);
+               break;
+       case NETDEV_REGISTER:
+               bond_create_proc_entry(event_bond);
+               break;
        default:
                break;
        }
@@ -4411,8 +4417,6 @@ static void bond_uninit(struct net_device *bond_dev)
 
        bond_work_cancel_all(bond);
 
-       bond_remove_proc_entry(bond);
-
        bond_debug_unregister(bond);
 
        __hw_addr_flush(&bond->mc_list);
@@ -4814,7 +4818,6 @@ static int bond_init(struct net_device *bond_dev)
 
        bond_set_lockdep_class(bond_dev);
 
-       bond_create_proc_entry(bond);
        list_add_tail(&bond->bond_list, &bn->dev_list);
 
        bond_prepare_sysfs_group(bond);
index ad284ba..3cea38d 100644 (file)
@@ -150,14 +150,25 @@ static void bond_info_show_master(struct seq_file *seq)
        }
 }
 
+static const char *bond_slave_link_status(s8 link)
+{
+       static const char * const status[] = {
+               [BOND_LINK_UP] = "up",
+               [BOND_LINK_FAIL] = "going down",
+               [BOND_LINK_DOWN] = "down",
+               [BOND_LINK_BACK] = "going back",
+       };
+
+       return status[link];
+}
+
 static void bond_info_show_slave(struct seq_file *seq,
                                 const struct slave *slave)
 {
        struct bonding *bond = seq->private;
 
        seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
-       seq_printf(seq, "MII Status: %s\n",
-                  (slave->link == BOND_LINK_UP) ?  "up" : "down");
+       seq_printf(seq, "MII Status: %s\n", bond_slave_link_status(slave->link));
        if (slave->speed == SPEED_UNKNOWN)
                seq_printf(seq, "Speed: %s\n", "Unknown");
        else
index 1520814..4a27adb 100644 (file)
@@ -693,8 +693,6 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
                         */
                        memcpy(rx_buf, (u8 *)piggy_desc,
                                        CFHSI_DESC_SHORT_SZ);
-                       /* Mark no embedded frame here */
-                       piggy_desc->offset = 0;
                        if (desc_pld_len == -EPROTO)
                                goto out_of_sync;
                }
@@ -737,6 +735,8 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
                        /* Extract any payload in piggyback descriptor. */
                        if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
                                goto out_of_sync;
+                       /* Mark no embedded frame after extracting it */
+                       piggy_desc->offset = 0;
                }
        }
 
@@ -1178,6 +1178,7 @@ int cfhsi_probe(struct platform_device *pdev)
                dev_err(&ndev->dev, "%s: Registration error: %d.\n",
                        __func__, res);
                free_netdev(ndev);
+               return -ENODEV;
        }
        /* Add CAIF HSI device to list. */
        spin_lock(&cfhsi_list_lock);
index 8dc84d6..86cd532 100644 (file)
@@ -590,8 +590,8 @@ static void c_can_chip_config(struct net_device *dev)
        priv->write_reg(priv, &priv->regs->control,
                        CONTROL_ENABLE_AR);
 
-       if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY &
-                                       CAN_CTRLMODE_LOOPBACK)) {
+       if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
+           (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
                /* loopback + silent mode : useful for hot self-test */
                priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
                                CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
index 38c0690..81d4741 100644 (file)
@@ -939,12 +939,12 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
                return PTR_ERR(pinctrl);
 
        if (pdev->dev.of_node) {
-               const u32 *clock_freq_p;
+               const __be32 *clock_freq_p;
 
                clock_freq_p = of_get_property(pdev->dev.of_node,
                                                "clock-frequency", NULL);
                if (clock_freq_p)
-                       clock_freq = *clock_freq_p;
+                       clock_freq = be32_to_cpup(clock_freq_p);
        }
 
        if (!clock_freq) {
index 9cc1570..1f78b63 100644 (file)
@@ -261,7 +261,6 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
        if ((phy_data & BMSR_LSTATUS) == 0) {
                /* link down */
                netif_carrier_off(netdev);
-               netif_stop_queue(netdev);
                hw->hibernate = true;
                if (atl1c_reset_mac(hw) != 0)
                        if (netif_msg_hw(adapter))
index 46b8b7d..d09c6b5 100644 (file)
@@ -656,7 +656,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
                        dma_unmap_single(bp->sdev->dma_dev, mapping,
                                             RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
                dev_kfree_skb_any(skb);
-               skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
+               skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
                if (skb == NULL)
                        return -ENOMEM;
                mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
@@ -967,7 +967,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        dma_unmap_single(bp->sdev->dma_dev, mapping, len,
                                             DMA_TO_DEVICE);
 
-               bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
+               bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
                if (!bounce_skb)
                        goto err_out;
 
index ac7b744..1fa4927 100644 (file)
@@ -5372,7 +5372,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
                        int k, last;
 
                        if (skb == NULL) {
-                               j++;
+                               j = NEXT_TX_BD(j);
                                continue;
                        }
 
@@ -5384,8 +5384,8 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
                        tx_buf->skb = NULL;
 
                        last = tx_buf->nr_frags;
-                       j++;
-                       for (k = 0; k < last; k++, j++) {
+                       j = NEXT_TX_BD(j);
+                       for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) {
                                tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
                                dma_unmap_page(&bp->pdev->dev,
                                        dma_unmap_addr(tx_buf, mapping),
index cbc56f2..8098eea 100644 (file)
@@ -190,7 +190,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
 
                if ((netif_tx_queue_stopped(txq)) &&
                    (bp->state == BNX2X_STATE_OPEN) &&
-                   (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
+                   (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4))
                        netif_tx_wake_queue(txq);
 
                __netif_tx_unlock(txq);
@@ -2516,8 +2516,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
 /* we split the first BD into headers and data BDs
  * to ease the pain of our fellow microcode engineers
  * we use one mapping for both BDs
- * So far this has only been observed to happen
- * in Other Operating Systems(TM)
  */
 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
                                   struct bnx2x_fp_txdata *txdata,
@@ -3171,7 +3169,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        txdata->tx_bd_prod += nbd;
 
-       if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
+       if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) {
                netif_tx_stop_queue(txq);
 
                /* paired memory barrier is in bnx2x_tx_int(), we have to keep
@@ -3180,7 +3178,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
                smp_mb();
 
                fp->eth_q_stats.driver_xoff++;
-               if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
+               if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
                        netif_tx_wake_queue(txq);
        }
        txdata->tx_pkt++;
index a3fb721..6e7d5c0 100644 (file)
@@ -40,6 +40,7 @@
 #define I2C_BSC0                       0
 #define I2C_BSC1                       1
 #define I2C_WA_RETRY_CNT               3
+#define I2C_WA_PWR_ITER                        (I2C_WA_RETRY_CNT - 1)
 #define MCPR_IMC_COMMAND_READ_OP       1
 #define MCPR_IMC_COMMAND_WRITE_OP      2
 
@@ -7659,6 +7660,28 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
        return -EINVAL;
 }
 
+static void bnx2x_warpcore_power_module(struct link_params *params,
+                                       struct bnx2x_phy *phy,
+                                       u8 power)
+{
+       u32 pin_cfg;
+       struct bnx2x *bp = params->bp;
+
+       pin_cfg = (REG_RD(bp, params->shmem_base +
+                         offsetof(struct shmem_region,
+                       dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
+                       PORT_HW_CFG_E3_PWR_DIS_MASK) >>
+                       PORT_HW_CFG_E3_PWR_DIS_SHIFT;
+
+       if (pin_cfg == PIN_CFG_NA)
+               return;
+       DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
+                      power, pin_cfg);
+       /* Low ==> corresponding SFP+ module is powered
+        * high ==> the SFP+ module is powered down
+        */
+       bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
+}
 static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
                                                 struct link_params *params,
                                                 u16 addr, u8 byte_cnt,
@@ -7678,6 +7701,12 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
        /* 4 byte aligned address */
        addr32 = addr & (~0x3);
        do {
+               if (cnt == I2C_WA_PWR_ITER) {
+                       bnx2x_warpcore_power_module(params, phy, 0);
+                       /* Note that 100us are not enough here */
+                       usleep_range(1000,1000);
+                       bnx2x_warpcore_power_module(params, phy, 1);
+               }
                rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
                                    data_array);
        } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
@@ -8200,29 +8229,6 @@ static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
                bnx2x_set_e1e2_module_fault_led(params, gpio_mode);
 }
 
-static void bnx2x_warpcore_power_module(struct link_params *params,
-                                       struct bnx2x_phy *phy,
-                                       u8 power)
-{
-       u32 pin_cfg;
-       struct bnx2x *bp = params->bp;
-
-       pin_cfg = (REG_RD(bp, params->shmem_base +
-                         offsetof(struct shmem_region,
-                       dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
-                       PORT_HW_CFG_E3_PWR_DIS_MASK) >>
-                       PORT_HW_CFG_E3_PWR_DIS_SHIFT;
-
-       if (pin_cfg == PIN_CFG_NA)
-               return;
-       DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
-                      power, pin_cfg);
-       /* Low ==> corresponding SFP+ module is powered
-        * high ==> the SFP+ module is powered down
-        */
-       bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
-}
-
 static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
                                    struct link_params *params)
 {
@@ -9748,7 +9754,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
 
        msleep(1);
 
-       if (!(CHIP_IS_E1(bp)))
+       if (!(CHIP_IS_E1x(bp)))
                port = BP_PATH(bp);
        else
                port = params->port;
index c95e7b5..2c89d17 100644 (file)
@@ -534,7 +534,8 @@ int cnic_unregister_driver(int ulp_type)
        }
 
        if (atomic_read(&ulp_ops->ref_count) != 0)
-               netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
+               pr_warn("%s: Failed waiting for ref count to go to zero\n",
+                       __func__);
        return 0;
 
 out_unlock:
@@ -1053,12 +1054,13 @@ static int cnic_init_uio(struct cnic_dev *dev)
 
        uinfo = &udev->cnic_uinfo;
 
-       uinfo->mem[0].addr = dev->netdev->base_addr;
+       uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
        uinfo->mem[0].internal_addr = dev->regview;
-       uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
        uinfo->mem[0].memtype = UIO_MEM_PHYS;
 
        if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+               uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
+                                                    TX_MAX_TSS_RINGS + 1);
                uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
                                        PAGE_MASK;
                if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
@@ -1068,6 +1070,8 @@ static int cnic_init_uio(struct cnic_dev *dev)
 
                uinfo->name = "bnx2_cnic";
        } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+               uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
+
                uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
                        PAGE_MASK;
                uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
index 8d06ea3..921c208 100644 (file)
@@ -122,15 +122,15 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
                        goto done;
 
                if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
-                       dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
-                               "permitted to execute this cmd (opcode %d)\n",
-                               opcode);
+                       dev_warn(&adapter->pdev->dev,
+                                "opcode %d-%d is not permitted\n",
+                                opcode, subsystem);
                } else {
                        extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
                                        CQE_STATUS_EXTD_MASK;
-                       dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
-                               "status %d, extd-status %d\n",
-                               opcode, compl_status, extd_status);
+                       dev_err(&adapter->pdev->dev,
+                               "opcode %d-%d failed:status %d-%d\n",
+                               opcode, subsystem, compl_status, extd_status);
                }
        }
 done:
index 9625bf4..b3f3fc3 100644 (file)
@@ -1566,7 +1566,7 @@ struct be_hw_stats_v1 {
        u32 rsvd0[BE_TXP_SW_SZ];
        struct be_erx_stats_v1 erx;
        struct be_pmem_stats pmem;
-       u32 rsvd1[3];
+       u32 rsvd1[18];
 };
 
 struct be_cmd_req_get_stats_v1 {
index fdb50ce..501dfa9 100644 (file)
@@ -3237,7 +3237,7 @@ static void be_netdev_init(struct net_device *netdev)
 
        netdev->flags |= IFF_MULTICAST;
 
-       netif_set_gso_max_size(netdev, 65535);
+       netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
 
        netdev->netdev_ops = &be_netdev_ops;
 
index 0741ade..ab1d80f 100644 (file)
@@ -1804,18 +1804,16 @@ void gfar_configure_coalescing(struct gfar_private *priv,
        if (priv->mode == MQ_MG_MODE) {
                baddr = &regs->txic0;
                for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
-                       if (likely(priv->tx_queue[i]->txcoalescing)) {
-                               gfar_write(baddr + i, 0);
+                       gfar_write(baddr + i, 0);
+                       if (likely(priv->tx_queue[i]->txcoalescing))
                                gfar_write(baddr + i, priv->tx_queue[i]->txic);
-                       }
                }
 
                baddr = &regs->rxic0;
                for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
-                       if (likely(priv->rx_queue[i]->rxcoalescing)) {
-                               gfar_write(baddr + i, 0);
+                       gfar_write(baddr + i, 0);
+                       if (likely(priv->rx_queue[i]->rxcoalescing))
                                gfar_write(baddr + i, priv->rx_queue[i]->rxic);
-                       }
                }
        }
 }
@@ -2065,10 +2063,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        return NETDEV_TX_OK;
                }
 
-               /* Steal sock reference for processing TX time stamps */
-               swap(skb_new->sk, skb->sk);
-               swap(skb_new->destructor, skb->destructor);
-               kfree_skb(skb);
+               if (skb->sk)
+                       skb_set_owner_w(skb_new, skb->sk);
+               consume_skb(skb);
                skb = skb_new;
        }
 
index 79b07ec..0cafe4f 100644 (file)
@@ -122,8 +122,10 @@ config IGB_DCA
 
 config IGB_PTP
        bool "PTP Hardware Clock (PHC)"
-       default y
-       depends on IGB && PTP_1588_CLOCK
+       default n
+       depends on IGB && EXPERIMENTAL
+       select PPS
+       select PTP_1588_CLOCK
        ---help---
          Say Y here if you want to use PTP Hardware Clock (PHC) in the
          driver.  Only the basic clock operations have been implemented.
@@ -223,7 +225,9 @@ config IXGBE_DCB
 config IXGBE_PTP
        bool "PTP Clock Support"
        default n
-       depends on IXGBE && PTP_1588_CLOCK
+       depends on IXGBE && EXPERIMENTAL
+       select PPS
+       select PTP_1588_CLOCK
        ---help---
          Say Y here if you want support for 1588 Timestamping with a
          PHC device, using the PTP 1588 Clock support. This is
index 36db4df..1f063dc 100644 (file)
@@ -1572,6 +1572,9 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
        ctrl = er32(CTRL);
        status = er32(STATUS);
        rxcw = er32(RXCW);
+       /* SYNCH bit and IV bit are sticky */
+       udelay(10);
+       rxcw = er32(RXCW);
 
        if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
 
index 351a409..76edbc1 100644 (file)
 #define E1000_RXD_ERR_SEQ       0x04    /* Sequence Error */
 #define E1000_RXD_ERR_CXE       0x10    /* Carrier Extension Error */
 #define E1000_RXD_ERR_TCPE      0x20    /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE       0x40    /* IP Checksum Error */
 #define E1000_RXD_ERR_RXE       0x80    /* Rx Data Error */
 #define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
 
index 238ab2f..e3a7b07 100644 (file)
@@ -325,24 +325,46 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
  **/
 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
 {
-       u16 phy_reg;
-       u32 phy_id;
+       u16 phy_reg = 0;
+       u32 phy_id = 0;
+       s32 ret_val;
+       u16 retry_count;
+
+       for (retry_count = 0; retry_count < 2; retry_count++) {
+               ret_val = e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
+               if (ret_val || (phy_reg == 0xFFFF))
+                       continue;
+               phy_id = (u32)(phy_reg << 16);
 
-       e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
-       phy_id = (u32)(phy_reg << 16);
-       e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
-       phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
+               ret_val = e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
+               if (ret_val || (phy_reg == 0xFFFF)) {
+                       phy_id = 0;
+                       continue;
+               }
+               phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
+               break;
+       }
 
        if (hw->phy.id) {
                if (hw->phy.id == phy_id)
                        return true;
-       } else {
-               if ((phy_id != 0) && (phy_id != PHY_REVISION_MASK))
-                       hw->phy.id = phy_id;
+       } else if (phy_id) {
+               hw->phy.id = phy_id;
+               hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
                return true;
        }
 
-       return false;
+       /*
+        * In case the PHY needs to be in mdio slow mode,
+        * set slow mode and try to get the PHY id again.
+        */
+       hw->phy.ops.release(hw);
+       ret_val = e1000_set_mdio_slow_mode_hv(hw);
+       if (!ret_val)
+               ret_val = e1000e_get_phy_id(hw);
+       hw->phy.ops.acquire(hw);
+
+       return !ret_val;
 }
 
 /**
index 31d37a2..623e30b 100644 (file)
@@ -496,7 +496,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
  * @sk_buff: socket buffer with received data
  **/
 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
-                             __le16 csum, struct sk_buff *skb)
+                             struct sk_buff *skb)
 {
        u16 status = (u16)status_err;
        u8 errors = (u8)(status_err >> 24);
@@ -511,8 +511,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
        if (status & E1000_RXD_STAT_IXSM)
                return;
 
-       /* TCP/UDP checksum error bit is set */
-       if (errors & E1000_RXD_ERR_TCPE) {
+       /* TCP/UDP checksum error bit or IP checksum error bit is set */
+       if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
                /* let the stack verify checksum errors */
                adapter->hw_csum_err++;
                return;
@@ -523,19 +523,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
                return;
 
        /* It must be a TCP or UDP packet with a valid checksum */
-       if (status & E1000_RXD_STAT_TCPCS) {
-               /* TCP checksum is good */
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
-       } else {
-               /*
-                * IP fragment with UDP payload
-                * Hardware complements the payload checksum, so we undo it
-                * and then put the value in host order for further stack use.
-                */
-               __sum16 sum = (__force __sum16)swab16((__force u16)csum);
-               skb->csum = csum_unfold(~sum);
-               skb->ip_summed = CHECKSUM_COMPLETE;
-       }
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
        adapter->hw_csum_good++;
 }
 
@@ -954,8 +942,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
                skb_put(skb, length);
 
                /* Receive Checksum Offload */
-               e1000_rx_checksum(adapter, staterr,
-                                 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+               e1000_rx_checksum(adapter, staterr, skb);
 
                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 
@@ -1341,8 +1328,7 @@ copydone:
                total_rx_bytes += skb->len;
                total_rx_packets++;
 
-               e1000_rx_checksum(adapter, staterr,
-                                 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+               e1000_rx_checksum(adapter, staterr, skb);
 
                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 
@@ -1512,9 +1498,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
                        }
                }
 
-               /* Receive Checksum Offload XXX recompute due to CRC strip? */
-               e1000_rx_checksum(adapter, staterr,
-                                 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+               /* Receive Checksum Offload */
+               e1000_rx_checksum(adapter, staterr, skb);
 
                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 
@@ -3098,19 +3083,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
 
        /* Enable Receive Checksum Offload for TCP and UDP */
        rxcsum = er32(RXCSUM);
-       if (adapter->netdev->features & NETIF_F_RXCSUM) {
+       if (adapter->netdev->features & NETIF_F_RXCSUM)
                rxcsum |= E1000_RXCSUM_TUOFL;
-
-               /*
-                * IPv4 payload checksum for UDP fragments must be
-                * used in conjunction with packet-split.
-                */
-               if (adapter->rx_ps_pages)
-                       rxcsum |= E1000_RXCSUM_IPPCSE;
-       } else {
+       else
                rxcsum &= ~E1000_RXCSUM_TUOFL;
-               /* no need to clear IPPCSE as it defaults to 0 */
-       }
        ew32(RXCSUM, rxcsum);
 
        if (adapter->hw.mac.type == e1000_pch2lan) {
@@ -5241,22 +5217,10 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
        /* Jumbo frame support */
-       if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
-               if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
-                       e_err("Jumbo Frames not supported.\n");
-                       return -EINVAL;
-               }
-
-               /*
-                * IP payload checksum (enabled with jumbos/packet-split when
-                * Rx checksum is enabled) and generation of RSS hash is
-                * mutually exclusive in the hardware.
-                */
-               if ((netdev->features & NETIF_F_RXCSUM) &&
-                   (netdev->features & NETIF_F_RXHASH)) {
-                       e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled.  Disable one of the receive offload features before enabling jumbos.\n");
-                       return -EINVAL;
-               }
+       if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+           !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
+               e_err("Jumbo Frames not supported.\n");
+               return -EINVAL;
        }
 
        /* Supported frame sizes */
@@ -6030,17 +5994,6 @@ static int e1000_set_features(struct net_device *netdev,
                         NETIF_F_RXALL)))
                return 0;
 
-       /*
-        * IP payload checksum (enabled with jumbos/packet-split when Rx
-        * checksum is enabled) and generation of RSS hash is mutually
-        * exclusive in the hardware.
-        */
-       if (adapter->rx_ps_pages &&
-           (features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) {
-               e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames.  Disable jumbos or enable only one of the receive offload features.\n");
-               return -EINVAL;
-       }
-
        if (changed & NETIF_F_RXFCS) {
                if (features & NETIF_F_RXFCS) {
                        adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
index e650839..5e84eaa 100644 (file)
@@ -206,8 +206,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
                mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
                break;
        case e1000_i350:
-       case e1000_i210:
-       case e1000_i211:
                mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
                break;
        default:
index 8ce6706..90eef07 100644 (file)
@@ -357,21 +357,28 @@ static int igbvf_set_coalesce(struct net_device *netdev,
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
-       if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) ||
-           ((ec->rx_coalesce_usecs > 3) &&
-            (ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) ||
-           (ec->rx_coalesce_usecs == 2))
-               return -EINVAL;
-
-       /* convert to rate of irq's per second */
-       if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
+       if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) &&
+            (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) {
+               adapter->current_itr = ec->rx_coalesce_usecs << 2;
+               adapter->requested_itr = 1000000000 /
+                                       (adapter->current_itr * 256);
+       } else if ((ec->rx_coalesce_usecs == 3) ||
+                  (ec->rx_coalesce_usecs == 2)) {
                adapter->current_itr = IGBVF_START_ITR;
                adapter->requested_itr = ec->rx_coalesce_usecs;
-       } else {
-               adapter->current_itr = ec->rx_coalesce_usecs << 2;
+       } else if (ec->rx_coalesce_usecs == 0) {
+               /*
+                * The user's desire is to turn off interrupt throttling
+                * altogether, but due to HW limitations, we can't do that.
+                * Instead we set a very small value in EITR, which would
+                * allow ~967k interrupts per second, but allow the adapter's
+                * internal clocking to still function properly.
+                */
+               adapter->current_itr = 4;
                adapter->requested_itr = 1000000000 /
                                        (adapter->current_itr * 256);
-       }
+       } else
+               return -EINVAL;
 
        writel(adapter->current_itr,
               hw->hw_addr + adapter->rx_ring->itr_register);
index 3ef3c52..7af291e 100644 (file)
@@ -196,7 +196,7 @@ enum ixgbe_ring_state_t {
        __IXGBE_HANG_CHECK_ARMED,
        __IXGBE_RX_RSC_ENABLED,
        __IXGBE_RX_CSUM_UDP_ZERO_ERR,
-       __IXGBE_RX_FCOE_BUFSZ,
+       __IXGBE_RX_FCOE,
 };
 
 #define check_for_tx_hang(ring) \
@@ -290,7 +290,7 @@ struct ixgbe_ring_feature {
 #if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192)
 static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
 {
-       return test_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state) ? 1 : 0;
+       return test_bit(__IXGBE_RX_FCOE, &ring->state) ? 1 : 0;
 }
 #else
 #define ixgbe_rx_pg_order(_ring) 0
index af1a531..c377706 100644 (file)
@@ -634,7 +634,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
                        f = &adapter->ring_feature[RING_F_FCOE];
                        if ((rxr_idx >= f->mask) &&
                            (rxr_idx < f->mask + f->indices))
-                               set_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state);
+                               set_bit(__IXGBE_RX_FCOE, &ring->state);
                }
 
 #endif /* IXGBE_FCOE */
index 17ad6a3..e242104 100644 (file)
@@ -1058,17 +1058,17 @@ static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
 #ifdef IXGBE_FCOE
 /**
  * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
- * @adapter: address of board private structure
+ * @ring: structure containing ring specific data
  * @rx_desc: advanced rx descriptor
  *
  * Returns : true if it is FCoE pkt
  */
-static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter,
+static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
                                    union ixgbe_adv_rx_desc *rx_desc)
 {
        __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
 
-       return (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+       return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
               ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
                (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
                             IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
@@ -1148,7 +1148,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
 
        /* alloc new page for storage */
        if (likely(!page)) {
-               page = alloc_pages(GFP_ATOMIC | __GFP_COLD,
+               page = alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP,
                                   ixgbe_rx_pg_order(rx_ring));
                if (unlikely(!page)) {
                        rx_ring->rx_stats.alloc_rx_page_failed++;
@@ -1549,6 +1549,12 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
                skb->truesize -= ixgbe_rx_bufsz(rx_ring);
        }
 
+#ifdef IXGBE_FCOE
+       /* do not attempt to pad FCoE Frames as this will disrupt DDP */
+       if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
+               return false;
+
+#endif
        /* if skb_pad returns an error the skb was freed */
        if (unlikely(skb->len < 60)) {
                int pad_len = 60 - skb->len;
@@ -1775,7 +1781,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 
 #ifdef IXGBE_FCOE
                /* if ddp, not passing to ULD unless for FCP_RSP or error */
-               if (ixgbe_rx_is_fcoe(adapter, rx_desc)) {
+               if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
                        ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
                        if (!ddp_bytes) {
                                dev_kfree_skb_any(skb);
@@ -6641,6 +6647,11 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
                return -EINVAL;
        }
 
+       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+               e_err(drv, "Enable failed, SR-IOV enabled\n");
+               return -EINVAL;
+       }
+
        /* Hardware supports up to 8 traffic classes */
        if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
            (hw->mac.type == ixgbe_mac_82598EB &&
index ddc6a4d..dcebd12 100644 (file)
@@ -708,6 +708,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        u32 incval = 0;
+       u32 timinca = 0;
        u32 shift = 0;
        u32 cycle_speed;
        unsigned long flags;
@@ -730,8 +731,16 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
                break;
        }
 
-       /* Bail if the cycle speed didn't change */
-       if (adapter->cycle_speed == cycle_speed)
+       /*
+        * grab the current TIMINCA value from the register so that it can be
+        * double checked. If the register value has been cleared, it must be
+        * reset to the correct value for generating a cyclecounter. If
+        * TIMINCA is zero, the SYSTIME registers do not increment at all.
+        */
+       timinca = IXGBE_READ_REG(hw, IXGBE_TIMINCA);
+
+       /* Bail if the cycle speed didn't change and TIMINCA is non-zero */
+       if (adapter->cycle_speed == cycle_speed && timinca)
                return;
 
        /* disable the SDP clock out */
index f69ec42..41e3225 100644 (file)
@@ -201,6 +201,9 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
        unsigned int i, eop, count = 0;
        unsigned int total_bytes = 0, total_packets = 0;
 
+       if (test_bit(__IXGBEVF_DOWN, &adapter->state))
+               return true;
+
        i = tx_ring->next_to_clean;
        eop = tx_ring->tx_buffer_info[i].next_to_watch;
        eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
@@ -969,8 +972,6 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
        for (i = 0; i < q_vector->txr_count; i++) {
                tx_ring = &(adapter->tx_ring[r_idx]);
-               tx_ring->total_bytes = 0;
-               tx_ring->total_packets = 0;
                ixgbevf_clean_tx_irq(adapter, tx_ring);
                r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
                                      r_idx + 1);
@@ -994,16 +995,6 @@ static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
        struct ixgbe_hw *hw = &adapter->hw;
        struct ixgbevf_ring  *rx_ring;
        int r_idx;
-       int i;
-
-       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
-       for (i = 0; i < q_vector->rxr_count; i++) {
-               rx_ring = &(adapter->rx_ring[r_idx]);
-               rx_ring->total_bytes = 0;
-               rx_ring->total_packets = 0;
-               r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
-                                     r_idx + 1);
-       }
 
        if (!q_vector->rxr_count)
                return IRQ_HANDLED;
index 926d8aa..073b85b 100644 (file)
@@ -929,15 +929,20 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
                if (priv->rx_cq[i].buf)
                        mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
        }
+
+       if (priv->base_tx_qpn) {
+               mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
+               priv->base_tx_qpn = 0;
+       }
 }
 
 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
 {
        struct mlx4_en_port_profile *prof = priv->prof;
        int i;
-       int base_tx_qpn, err;
+       int err;
 
-       err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn);
+       err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
        if (err) {
                en_err(priv, "failed reserving range for TX rings\n");
                return err;
@@ -949,7 +954,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
                                      prof->tx_ring_size, i, TX))
                        goto err;
 
-               if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i,
+               if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
                                           prof->tx_ring_size, TXBB_SIZE))
                        goto err;
        }
@@ -969,7 +974,6 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
 
 err:
        en_err(priv, "Failed to allocate NIC resources\n");
-       mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num);
        return -ENOMEM;
 }
 
@@ -1204,9 +1208,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
 
        /* Configure port */
+       mlx4_en_calc_rx_buf(dev);
        err = mlx4_SET_PORT_general(mdev->dev, priv->port,
-                                   MLX4_EN_MIN_MTU,
-                                   0, 0, 0, 0);
+                                   priv->rx_skb_size + ETH_FCS_LEN,
+                                   prof->tx_pause, prof->tx_ppp,
+                                   prof->rx_pause, prof->rx_ppp);
        if (err) {
                en_err(priv, "Failed setting port general configurations "
                       "for port %d, with error %d\n", priv->port, err);
index ee6f4fe..a0313de 100644 (file)
@@ -1975,6 +1975,8 @@ slave_start:
        if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
            !mlx4_is_mfunc(dev)) {
                dev->flags &= ~MLX4_FLAG_MSI_X;
+               dev->caps.num_comp_vectors = 1;
+               dev->caps.comp_pool        = 0;
                pci_disable_msix(pdev);
                err = mlx4_setup_hca(dev);
        }
index 6ae3509..225c20d 100644 (file)
@@ -495,6 +495,7 @@ struct mlx4_en_priv {
        int vids[128];
        bool wol;
        struct device *ddev;
+       int base_tx_qpn;
 
 #ifdef CONFIG_MLX4_EN_DCB
        struct ieee_ets ets;
index 46e77a2..ad98f4d 100644 (file)
@@ -479,7 +479,7 @@ qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
 
        for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
                pfn = pci_info[i].id;
-               if (pfn > QLCNIC_MAX_PCI_FUNC) {
+               if (pfn >= QLCNIC_MAX_PCI_FUNC) {
                        ret = QL_STATUS_INVALID_PARAM;
                        goto err_eswitch;
                }
index 7260aa7..d7a04e0 100644 (file)
@@ -3894,6 +3894,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
        case RTL_GIGA_MAC_VER_22:
        case RTL_GIGA_MAC_VER_23:
        case RTL_GIGA_MAC_VER_24:
+       case RTL_GIGA_MAC_VER_34:
                RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
                break;
        default:
index 667169b..79bf09b 100644 (file)
@@ -1011,7 +1011,7 @@ static int sh_eth_txfree(struct net_device *ndev)
 }
 
 /* Packet receive function */
-static int sh_eth_rx(struct net_device *ndev)
+static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
        struct sh_eth_rxdesc *rxdesc;
@@ -1102,9 +1102,11 @@ static int sh_eth_rx(struct net_device *ndev)
        /* Restart Rx engine if stopped. */
        /* If we don't need to check status, don't. -KDU */
        if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
-               /* fix the values for the next receiving */
-               mdp->cur_rx = mdp->dirty_rx = (sh_eth_read(ndev, RDFAR) -
-                                              sh_eth_read(ndev, RDLAR)) >> 4;
+               /* fix the values for the next receiving if RDE is set */
+               if (intr_status & EESR_RDE)
+                       mdp->cur_rx = mdp->dirty_rx =
+                               (sh_eth_read(ndev, RDFAR) -
+                                sh_eth_read(ndev, RDLAR)) >> 4;
                sh_eth_write(ndev, EDRRR_R, EDRRR);
        }
 
@@ -1273,7 +1275,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
                        EESR_RTSF | /* short frame recv */
                        EESR_PRE  | /* PHY-LSI recv error */
                        EESR_CERF)){ /* recv frame CRC error */
-               sh_eth_rx(ndev);
+               sh_eth_rx(ndev, intr_status);
        }
 
        /* Tx Check */
index fb8377d..4b785e1 100644 (file)
@@ -51,7 +51,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
                desc->des3 = desc->des2 + BUF_SIZE_4KiB;
                priv->hw->desc->prepare_tx_desc(desc, 1, bmax,
                                                csum);
-
+               wmb();
                entry = (++priv->cur_tx) % txsize;
                desc = priv->dma_tx + entry;
 
@@ -59,6 +59,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
                                            len, DMA_TO_DEVICE);
                desc->des3 = desc->des2 + BUF_SIZE_4KiB;
                priv->hw->desc->prepare_tx_desc(desc, 0, len, csum);
+               wmb();
                priv->hw->desc->set_tx_owner(desc);
                priv->tx_skbuff[entry] = NULL;
        } else {
index 51b3b68..ea3003e 100644 (file)
@@ -1212,6 +1212,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
                wmb();
                priv->hw->desc->set_tx_owner(desc);
+               wmb();
        }
 
        /* Interrupt on completition only for the latest segment */
@@ -1227,6 +1228,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 
        /* To avoid raise condition */
        priv->hw->desc->set_tx_owner(first);
+       wmb();
 
        priv->cur_tx++;
 
@@ -1290,6 +1292,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
                }
                wmb();
                priv->hw->desc->set_rx_owner(p + entry);
+               wmb();
        }
 }
 
index d614c37..3b5c457 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
 #include <linux/device.h>
+#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/dma-mapping.h>
index 39ea067..5c12018 100644 (file)
@@ -46,7 +46,13 @@ static int mdio_mux_read(struct mii_bus *bus, int phy_id, int regnum)
        struct mdio_mux_parent_bus *pb = cb->parent;
        int r;
 
-       mutex_lock(&pb->mii_bus->mdio_lock);
+       /* In theory multiple mdio_mux could be stacked, thus creating
+        * more than a single level of nesting.  But in practice,
+        * SINGLE_DEPTH_NESTING will cover the vast majority of use
+        * cases.  We use it, instead of trying to handle the general
+        * case.
+        */
+       mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
        r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
        if (r)
                goto out;
@@ -71,7 +77,7 @@ static int mdio_mux_write(struct mii_bus *bus, int phy_id,
 
        int r;
 
-       mutex_lock(&pb->mii_bus->mdio_lock);
+       mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
        r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
        if (r)
                goto out;
index 590f902..9d6c80c 100644 (file)
@@ -161,7 +161,7 @@ static struct phy_driver ks8051_driver = {
 static struct phy_driver ks8001_driver = {
        .phy_id         = PHY_ID_KS8001,
        .name           = "Micrel KS8001 or KS8721",
-       .phy_id_mask    = 0x00fffff0,
+       .phy_id_mask    = 0x00ffffff,
        .features       = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
        .config_init    = kszphy_config_init,
@@ -174,7 +174,7 @@ static struct phy_driver ks8001_driver = {
 
 static struct phy_driver ksz9021_driver = {
        .phy_id         = PHY_ID_KSZ9021,
-       .phy_id_mask    = 0x000fff10,
+       .phy_id_mask    = 0x000ffffe,
        .name           = "Micrel KSZ9021 Gigabit PHY",
        .features       = (PHY_GBIT_FEATURES | SUPPORTED_Pause
                                | SUPPORTED_Asym_Pause),
@@ -240,8 +240,8 @@ MODULE_AUTHOR("David J. Choi");
 MODULE_LICENSE("GPL");
 
 static struct mdio_device_id __maybe_unused micrel_tbl[] = {
-       { PHY_ID_KSZ9021, 0x000fff10 },
-       { PHY_ID_KS8001, 0x00fffff0 },
+       { PHY_ID_KSZ9021, 0x000ffffe },
+       { PHY_ID_KS8001, 0x00ffffff },
        { PHY_ID_KS8737, 0x00fffff0 },
        { PHY_ID_KS8041, 0x00fffff0 },
        { PHY_ID_KS8051, 0x00fffff0 },
index 964031e..a28a983 100644 (file)
@@ -59,6 +59,7 @@
 #define USB_PRODUCT_IPHONE_3G   0x1292
 #define USB_PRODUCT_IPHONE_3GS  0x1294
 #define USB_PRODUCT_IPHONE_4   0x1297
+#define USB_PRODUCT_IPAD 0x129a
 #define USB_PRODUCT_IPHONE_4_VZW 0x129c
 #define USB_PRODUCT_IPHONE_4S  0x12a0
 
@@ -100,6 +101,10 @@ static struct usb_device_id ipheth_table[] = {
                USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
                IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
                IPHETH_USBINTF_PROTO) },
+       { USB_DEVICE_AND_INTERFACE_INFO(
+               USB_VENDOR_APPLE, USB_PRODUCT_IPAD,
+               IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+               IPHETH_USBINTF_PROTO) },
        { USB_DEVICE_AND_INTERFACE_INFO(
                USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
                IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
index 3b20678..a051ced 100644 (file)
@@ -197,6 +197,10 @@ err:
 static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
 {
        struct usbnet *dev = usb_get_intfdata(intf);
+
+       /* can be called while disconnecting */
+       if (!dev)
+               return 0;
        return qmi_wwan_manage_power(dev, on);
 }
 
@@ -257,29 +261,6 @@ err:
        return rv;
 }
 
-/* Gobi devices uses identical class/protocol codes for all interfaces regardless
- * of function. Some of these are CDC ACM like and have the exact same endpoints
- * we are looking for. This leaves two possible strategies for identifying the
- * correct interface:
- *   a) hardcoding interface number, or
- *   b) use the fact that the wwan interface is the only one lacking additional
- *      (CDC functional) descriptors
- *
- * Let's see if we can get away with the generic b) solution.
- */
-static int qmi_wwan_bind_gobi(struct usbnet *dev, struct usb_interface *intf)
-{
-       int rv = -EINVAL;
-
-       /* ignore any interface with additional descriptors */
-       if (intf->cur_altsetting->extralen)
-               goto err;
-
-       rv = qmi_wwan_bind_shared(dev, intf);
-err:
-       return rv;
-}
-
 static void qmi_wwan_unbind_shared(struct usbnet *dev, struct usb_interface *intf)
 {
        struct usb_driver *subdriver = (void *)dev->data[0];
@@ -347,15 +328,15 @@ static const struct driver_info   qmi_wwan_shared = {
        .manage_power   = qmi_wwan_manage_power,
 };
 
-static const struct driver_info        qmi_wwan_gobi = {
-       .description    = "Qualcomm Gobi wwan/QMI device",
+static const struct driver_info        qmi_wwan_force_int0 = {
+       .description    = "Qualcomm WWAN/QMI device",
        .flags          = FLAG_WWAN,
-       .bind           = qmi_wwan_bind_gobi,
+       .bind           = qmi_wwan_bind_shared,
        .unbind         = qmi_wwan_unbind_shared,
        .manage_power   = qmi_wwan_manage_power,
+       .data           = BIT(0), /* interface whitelist bitmap */
 };
 
-/* ZTE suck at making USB descriptors */
 static const struct driver_info        qmi_wwan_force_int1 = {
        .description    = "Qualcomm WWAN/QMI device",
        .flags          = FLAG_WWAN,
@@ -365,6 +346,24 @@ static const struct driver_info    qmi_wwan_force_int1 = {
        .data           = BIT(1), /* interface whitelist bitmap */
 };
 
+static const struct driver_info qmi_wwan_force_int2 = {
+       .description    = "Qualcomm WWAN/QMI device",
+       .flags          = FLAG_WWAN,
+       .bind           = qmi_wwan_bind_shared,
+       .unbind         = qmi_wwan_unbind_shared,
+       .manage_power   = qmi_wwan_manage_power,
+       .data           = BIT(2), /* interface whitelist bitmap */
+};
+
+static const struct driver_info        qmi_wwan_force_int3 = {
+       .description    = "Qualcomm WWAN/QMI device",
+       .flags          = FLAG_WWAN,
+       .bind           = qmi_wwan_bind_shared,
+       .unbind         = qmi_wwan_unbind_shared,
+       .manage_power   = qmi_wwan_manage_power,
+       .data           = BIT(3), /* interface whitelist bitmap */
+};
+
 static const struct driver_info        qmi_wwan_force_int4 = {
        .description    = "Qualcomm WWAN/QMI device",
        .flags          = FLAG_WWAN,
@@ -390,16 +389,23 @@ static const struct driver_info   qmi_wwan_force_int4 = {
 static const struct driver_info        qmi_wwan_sierra = {
        .description    = "Sierra Wireless wwan/QMI device",
        .flags          = FLAG_WWAN,
-       .bind           = qmi_wwan_bind_gobi,
+       .bind           = qmi_wwan_bind_shared,
        .unbind         = qmi_wwan_unbind_shared,
        .manage_power   = qmi_wwan_manage_power,
        .data           = BIT(8) | BIT(19), /* interface whitelist bitmap */
 };
 
 #define HUAWEI_VENDOR_ID       0x12D1
+
+/* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */
+#define QMI_GOBI1K_DEVICE(vend, prod) \
+       USB_DEVICE(vend, prod), \
+       .driver_info = (unsigned long)&qmi_wwan_force_int3
+
+/* Gobi 2000 and Gobi 3000 QMI/wwan interface number is 0 according to qcserial */
 #define QMI_GOBI_DEVICE(vend, prod) \
        USB_DEVICE(vend, prod), \
-       .driver_info = (unsigned long)&qmi_wwan_gobi
+       .driver_info = (unsigned long)&qmi_wwan_force_int0
 
 static const struct usb_device_id products[] = {
        {       /* Huawei E392, E398 and possibly others sharing both device id and more... */
@@ -501,6 +507,15 @@ static const struct usb_device_id products[] = {
                .bInterfaceProtocol = 0xff,
                .driver_info        = (unsigned long)&qmi_wwan_force_int4,
        },
+       {       /* ZTE MF60 */
+               .match_flags        = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
+               .idVendor           = 0x19d2,
+               .idProduct          = 0x1402,
+               .bInterfaceClass    = 0xff,
+               .bInterfaceSubClass = 0xff,
+               .bInterfaceProtocol = 0xff,
+               .driver_info        = (unsigned long)&qmi_wwan_force_int2,
+       },
        {       /* Sierra Wireless MC77xx in QMI mode */
                .match_flags        = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
                .idVendor           = 0x1199,
@@ -510,20 +525,24 @@ static const struct usb_device_id products[] = {
                .bInterfaceProtocol = 0xff,
                .driver_info        = (unsigned long)&qmi_wwan_sierra,
        },
-       {QMI_GOBI_DEVICE(0x05c6, 0x9212)},      /* Acer Gobi Modem Device */
-       {QMI_GOBI_DEVICE(0x03f0, 0x1f1d)},      /* HP un2400 Gobi Modem Device */
-       {QMI_GOBI_DEVICE(0x03f0, 0x371d)},      /* HP un2430 Mobile Broadband Module */
-       {QMI_GOBI_DEVICE(0x04da, 0x250d)},      /* Panasonic Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x413c, 0x8172)},      /* Dell Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x1410, 0xa001)},      /* Novatel Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x0b05, 0x1776)},      /* Asus Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x19d2, 0xfff3)},      /* ONDA Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x05c6, 0x9001)},      /* Generic Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x05c6, 0x9002)},      /* Generic Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x05c6, 0x9202)},      /* Generic Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x05c6, 0x9203)},      /* Generic Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x05c6, 0x9222)},      /* Generic Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x05c6, 0x9009)},      /* Generic Gobi Modem device */
+
+       /* Gobi 1000 devices */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
+       {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)},    /* HP un2400 Gobi Modem Device */
+       {QMI_GOBI1K_DEVICE(0x03f0, 0x371d)},    /* HP un2430 Mobile Broadband Module */
+       {QMI_GOBI1K_DEVICE(0x04da, 0x250d)},    /* Panasonic Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x413c, 0x8172)},    /* Dell Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x1410, 0xa001)},    /* Novatel Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x0b05, 0x1776)},    /* Asus Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x19d2, 0xfff3)},    /* ONDA Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9001)},    /* Generic Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9002)},    /* Generic Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9202)},    /* Generic Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9203)},    /* Generic Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9222)},    /* Generic Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9009)},    /* Generic Gobi Modem device */
+
+       /* Gobi 2000 and 3000 devices */
        {QMI_GOBI_DEVICE(0x413c, 0x8186)},      /* Dell Gobi 2000 Modem device (N0218, VU936) */
        {QMI_GOBI_DEVICE(0x05c6, 0x920b)},      /* Generic Gobi 2000 Modem device */
        {QMI_GOBI_DEVICE(0x05c6, 0x9225)},      /* Sony Gobi 2000 Modem device (N0279, VU730) */
index 9f58330..aba769d 100644 (file)
@@ -796,11 +796,13 @@ int usbnet_open (struct net_device *net)
        if (info->manage_power) {
                retval = info->manage_power(dev, 1);
                if (retval < 0)
-                       goto done;
+                       goto done_manage_power_error;
                usb_autopm_put_interface(dev->intf);
        }
        return retval;
 
+done_manage_power_error:
+       clear_bit(EVENT_DEV_OPEN, &dev->flags);
 done:
        usb_autopm_put_interface(dev->intf);
 done_nopm:
@@ -876,9 +878,9 @@ void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
 {
        struct usbnet *dev = netdev_priv(net);
 
-       strncpy (info->driver, dev->driver_name, sizeof info->driver);
-       strncpy (info->version, DRIVER_VERSION, sizeof info->version);
-       strncpy (info->fw_version, dev->driver_info->description,
+       strlcpy (info->driver, dev->driver_name, sizeof info->driver);
+       strlcpy (info->version, DRIVER_VERSION, sizeof info->version);
+       strlcpy (info->fw_version, dev->driver_info->description,
                sizeof info->fw_version);
        usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info);
 }
@@ -1202,6 +1204,21 @@ deferred:
 }
 EXPORT_SYMBOL_GPL(usbnet_start_xmit);
 
+static void rx_alloc_submit(struct usbnet *dev, gfp_t flags)
+{
+       struct urb      *urb;
+       int             i;
+
+       /* don't refill the queue all at once */
+       for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) {
+               urb = usb_alloc_urb(0, flags);
+               if (urb != NULL) {
+                       if (rx_submit(dev, urb, flags) == -ENOLINK)
+                               return;
+               }
+       }
+}
+
 /*-------------------------------------------------------------------------*/
 
 // tasklet (work deferred from completions, in_irq) or timer
@@ -1241,26 +1258,14 @@ static void usbnet_bh (unsigned long param)
                   !timer_pending (&dev->delay) &&
                   !test_bit (EVENT_RX_HALT, &dev->flags)) {
                int     temp = dev->rxq.qlen;
-               int     qlen = RX_QLEN (dev);
-
-               if (temp < qlen) {
-                       struct urb      *urb;
-                       int             i;
-
-                       // don't refill the queue all at once
-                       for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
-                               urb = usb_alloc_urb (0, GFP_ATOMIC);
-                               if (urb != NULL) {
-                                       if (rx_submit (dev, urb, GFP_ATOMIC) ==
-                                           -ENOLINK)
-                                               return;
-                               }
-                       }
+
+               if (temp < RX_QLEN(dev)) {
+                       rx_alloc_submit(dev, GFP_ATOMIC);
                        if (temp != dev->rxq.qlen)
                                netif_dbg(dev, link, dev->net,
                                          "rxqlen %d --> %d\n",
                                          temp, dev->rxq.qlen);
-                       if (dev->rxq.qlen < qlen)
+                       if (dev->rxq.qlen < RX_QLEN(dev))
                                tasklet_schedule (&dev->bh);
                }
                if (dev->txq.qlen < TX_QLEN (dev))
@@ -1513,6 +1518,7 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
                spin_lock_irq(&dev->txq.lock);
                /* don't autosuspend while transmitting */
                if (dev->txq.qlen && PMSG_IS_AUTO(message)) {
+                       dev->suspend_count--;
                        spin_unlock_irq(&dev->txq.lock);
                        return -EBUSY;
                } else {
@@ -1569,6 +1575,13 @@ int usbnet_resume (struct usb_interface *intf)
                spin_unlock_irq(&dev->txq.lock);
 
                if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
+                       /* handle remote wakeup ASAP */
+                       if (!dev->wait &&
+                               netif_device_present(dev->net) &&
+                               !timer_pending(&dev->delay) &&
+                               !test_bit(EVENT_RX_HALT, &dev->flags))
+                                       rx_alloc_submit(dev, GFP_KERNEL);
+
                        if (!(dev->txq.qlen >= TX_QLEN(dev)))
                                netif_tx_wake_all_queues(dev->net);
                        tasklet_schedule (&dev->bh);
index 520a4b2..a747c63 100644 (file)
@@ -7233,8 +7233,8 @@ static int airo_get_aplist(struct net_device *dev,
                }
        } else {
                dwrq->flags = 1; /* Should be define'd */
-               memcpy(extra + sizeof(struct sockaddr)*i,
-                      &qual,  sizeof(struct iw_quality)*i);
+               memcpy(extra + sizeof(struct sockaddr) * i, qual,
+                      sizeof(struct iw_quality) * i);
        }
        dwrq->length = i;
 
index c54b7d3..420d69b 100644 (file)
@@ -143,6 +143,7 @@ struct ath_common {
        u32 keymax;
        DECLARE_BITMAP(keymap, ATH_KEYMAX);
        DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX);
+       DECLARE_BITMAP(ccmp_keymap, ATH_KEYMAX);
        enum ath_crypt_caps crypt_caps;
 
        unsigned int clockrate;
index fbaa309..44ad6fe 100644 (file)
@@ -1045,11 +1045,11 @@ ath5k_drain_tx_buffs(struct ath5k_hw *ah)
 
                                ath5k_txbuf_free_skb(ah, bf);
 
-                               spin_lock_bh(&ah->txbuflock);
+                               spin_lock(&ah->txbuflock);
                                list_move_tail(&bf->list, &ah->txbuf);
                                ah->txbuf_len++;
                                txq->txq_len--;
-                               spin_unlock_bh(&ah->txbuflock);
+                               spin_unlock(&ah->txbuflock);
                        }
                        txq->link = NULL;
                        txq->txq_poll_mark = false;
index a277cf6..4866550 100644 (file)
@@ -214,6 +214,7 @@ struct ath_frame_info {
        enum ath9k_key_type keytype;
        u8 keyix;
        u8 retries;
+       u8 rtscts_rate;
 };
 
 struct ath_buf_state {
index 2b8f61c..abbd6ef 100644 (file)
@@ -1496,6 +1496,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
                        priv->num_sta_assoc_vif++ : priv->num_sta_assoc_vif--;
 
                if (priv->ah->opmode == NL80211_IFTYPE_STATION) {
+                       ath9k_htc_choose_set_bssid(priv);
                        if (bss_conf->assoc && (priv->num_sta_assoc_vif == 1))
                                ath9k_htc_start_ani(priv);
                        else if (priv->num_sta_assoc_vif == 0)
@@ -1503,13 +1504,11 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
                }
        }
 
-       if (changed & BSS_CHANGED_BSSID) {
+       if (changed & BSS_CHANGED_IBSS) {
                if (priv->ah->opmode == NL80211_IFTYPE_ADHOC) {
                        common->curaid = bss_conf->aid;
                        memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
                        ath9k_htc_set_bssid(priv);
-               } else if (priv->ah->opmode == NL80211_IFTYPE_STATION) {
-                       ath9k_htc_choose_set_bssid(priv);
                }
        }
 
index 7db1890..995ca8e 100644 (file)
@@ -622,7 +622,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
 
        if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
                if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
-                   ((AR_SREV_9160(ah) || AR_SREV_9280(ah)) &&
+                   ((AR_SREV_9160(ah) || AR_SREV_9280(ah) || AR_SREV_9287(ah)) &&
                     !ah->is_pciexpress)) {
                        ah->config.serialize_regmode =
                                SER_REG_MODE_ON;
@@ -784,13 +784,25 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
 
 u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
 {
+       struct ath_common *common = ath9k_hw_common(ah);
+       int i = 0;
+
        REG_CLR_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
        udelay(100);
        REG_SET_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
 
-       while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0)
+       while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0) {
+
                udelay(100);
 
+               if (WARN_ON_ONCE(i >= 100)) {
+                       ath_err(common, "PLL4 meaurement not done\n");
+                       break;
+               }
+
+               i++;
+       }
+
        return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
 }
 EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc);
index 4de4473..dac1a27 100644 (file)
@@ -971,6 +971,15 @@ void ath_hw_pll_work(struct work_struct *work)
                                            hw_pll_work.work);
        u32 pll_sqsum;
 
+       /*
+        * ensure that the PLL WAR is executed only
+        * after the STA is associated (or) if the
+        * beaconing had started in interfaces that
+        * uses beacons.
+        */
+       if (!(sc->sc_flags & SC_OP_BEACONS))
+               return;
+
        if (AR_SREV_9485(sc->sc_ah)) {
 
                ath9k_ps_wakeup(sc);
@@ -1443,15 +1452,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
                }
        }
 
-       if ((ah->opmode == NL80211_IFTYPE_ADHOC) ||
-           ((vif->type == NL80211_IFTYPE_ADHOC) &&
-            sc->nvifs > 0)) {
-               ath_err(common, "Cannot create ADHOC interface when other"
-                       " interfaces already exist.\n");
-               ret = -EINVAL;
-               goto out;
-       }
-
        ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
 
        sc->nvifs++;
@@ -1476,15 +1476,6 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
        mutex_lock(&sc->mutex);
        ath9k_ps_wakeup(sc);
 
-       /* See if new interface type is valid. */
-       if ((new_type == NL80211_IFTYPE_ADHOC) &&
-           (sc->nvifs > 1)) {
-               ath_err(common, "When using ADHOC, it must be the only"
-                       " interface.\n");
-               ret = -EINVAL;
-               goto out;
-       }
-
        if (ath9k_uses_beacons(new_type) &&
            !ath9k_uses_beacons(vif->type)) {
                if (sc->nbcnvifs >= ATH_BCBUF) {
index e1fcc68..0735aeb 100644 (file)
@@ -695,9 +695,9 @@ static bool ath_edma_get_buffers(struct ath_softc *sc,
                        __skb_unlink(skb, &rx_edma->rx_fifo);
                        list_add_tail(&bf->list, &sc->rx.rxbuf);
                        ath_rx_edma_buf_link(sc, qtype);
-               } else {
-                       bf = NULL;
                }
+
+               bf = NULL;
        }
 
        *dest = bf;
@@ -822,7 +822,8 @@ static bool ath9k_rx_accept(struct ath_common *common,
         * descriptor does contain a valid key index. This has been observed
         * mostly with CCMP encryption.
         */
-       if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID)
+       if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
+           !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
                rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
 
        if (!rx_stats->rs_datalen) {
index d59dd01..4d57139 100644 (file)
@@ -938,6 +938,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
        struct ieee80211_tx_rate *rates;
        const struct ieee80211_rate *rate;
        struct ieee80211_hdr *hdr;
+       struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
        int i;
        u8 rix = 0;
 
@@ -948,18 +949,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
 
        /* set dur_update_en for l-sig computation except for PS-Poll frames */
        info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
-
-       /*
-        * We check if Short Preamble is needed for the CTS rate by
-        * checking the BSS's global flag.
-        * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
-        */
-       rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
-       info->rtscts_rate = rate->hw_value;
-
-       if (tx_info->control.vif &&
-           tx_info->control.vif->bss_conf.use_short_preamble)
-               info->rtscts_rate |= rate->hw_value_short;
+       info->rtscts_rate = fi->rtscts_rate;
 
        for (i = 0; i < 4; i++) {
                bool is_40, is_sgi, is_sp;
@@ -1001,13 +991,13 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
                }
 
                /* legacy rates */
+               rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
                if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
                    !(rate->flags & IEEE80211_RATE_ERP_G))
                        phy = WLAN_RC_PHY_CCK;
                else
                        phy = WLAN_RC_PHY_OFDM;
 
-               rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
                info->rates[i].Rate = rate->hw_value;
                if (rate->hw_value_short) {
                        if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
@@ -1776,10 +1766,22 @@ static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
        struct ieee80211_sta *sta = tx_info->control.sta;
        struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       const struct ieee80211_rate *rate;
        struct ath_frame_info *fi = get_frame_info(skb);
        struct ath_node *an = NULL;
        enum ath9k_key_type keytype;
+       bool short_preamble = false;
+
+       /*
+        * We check if Short Preamble is needed for the CTS rate by
+        * checking the BSS's global flag.
+        * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
+        */
+       if (tx_info->control.vif &&
+           tx_info->control.vif->bss_conf.use_short_preamble)
+               short_preamble = true;
 
+       rate = ieee80211_get_rts_cts_rate(hw, tx_info);
        keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
 
        if (sta)
@@ -1794,6 +1796,9 @@ static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
                fi->keyix = ATH9K_TXKEYIX_INVALID;
        fi->keytype = keytype;
        fi->framelen = framelen;
+       fi->rtscts_rate = rate->hw_value;
+       if (short_preamble)
+               fi->rtscts_rate |= rate->hw_value_short;
 }
 
 u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
index 0e81904..5c54aa4 100644 (file)
@@ -556,6 +556,9 @@ int ath_key_config(struct ath_common *common,
                return -EIO;
 
        set_bit(idx, common->keymap);
+       if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
+               set_bit(idx, common->ccmp_keymap);
+
        if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
                set_bit(idx + 64, common->keymap);
                set_bit(idx, common->tkip_keymap);
@@ -582,6 +585,7 @@ void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
                return;
 
        clear_bit(key->hw_key_idx, common->keymap);
+       clear_bit(key->hw_key_idx, common->ccmp_keymap);
        if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
                return;
 
index acd03a4..1b988f2 100644 (file)
@@ -3767,7 +3767,7 @@ static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan)
        if (prev_status >= B43_STAT_STARTED) {
                err = b43_wireless_core_start(up_dev);
                if (err) {
-                       b43err(wl, "Fatal: Coult not start device for "
+                       b43err(wl, "Fatal: Could not start device for "
                               "selected %s-GHz band\n",
                               band_to_string(chan->band));
                        b43_wireless_core_exit(up_dev);
index f1f8bd0..c8baf02 100644 (file)
@@ -1072,7 +1072,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
        meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
        /* create a bounce buffer in zone_dma on mapping failure. */
        if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
-               bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
+               bounce_skb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
                if (!bounce_skb) {
                        ring->current_slot = old_top_slot;
                        ring->used_slots = old_used_slots;
index cd9c9bc..eae691e 100644 (file)
@@ -2633,7 +2633,7 @@ static int b43legacy_switch_phymode(struct b43legacy_wl *wl,
        if (prev_status >= B43legacy_STAT_STARTED) {
                err = b43legacy_wireless_core_start(up_dev);
                if (err) {
-                       b43legacyerr(wl, "Fatal: Coult not start device for "
+                       b43legacyerr(wl, "Fatal: Could not start device for "
                               "newly selected %s-PHY mode\n",
                               phymode_to_string(new_mode));
                        b43legacy_wireless_core_exit(up_dev);
index 509301a..ff5d689 100644 (file)
@@ -3405,7 +3405,7 @@ il4965_remove_dynamic_key(struct il_priv *il,
                return 0;
        }
 
-       if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
+       if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
                IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
                        key_flags);
                spin_unlock_irqrestore(&il->sta_lock, flags);
@@ -3420,7 +3420,7 @@ il4965_remove_dynamic_key(struct il_priv *il,
        memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
        il->stations[sta_id].sta.key.key_flags =
            STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
-       il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
+       il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
        il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
        il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
 
index cbf2dc1..5d4807c 100644 (file)
@@ -4767,14 +4767,12 @@ il_bg_watchdog(unsigned long data)
                return;
 
        /* monitor and check for other stuck queues */
-       if (il_is_any_associated(il)) {
-               for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
-                       /* skip as we already checked the command queue */
-                       if (cnt == il->cmd_queue)
-                               continue;
-                       if (il_check_stuck_queue(il, cnt))
-                               return;
-               }
+       for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+               /* skip as we already checked the command queue */
+               if (cnt == il->cmd_queue)
+                       continue;
+               if (il_check_stuck_queue(il, cnt))
+                       return;
        }
 
        mod_timer(&il->watchdog,
index e7c157e..7f97dec 100644 (file)
@@ -2239,6 +2239,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
        return count;
 }
 
+#ifdef CONFIG_IWLWIFI_DEBUG
 static ssize_t iwl_dbgfs_log_event_read(struct file *file,
                                         char __user *user_buf,
                                         size_t count, loff_t *ppos)
@@ -2276,6 +2277,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
 
        return count;
 }
+#endif
 
 static ssize_t iwl_dbgfs_calib_disabled_read(struct file *file,
                                         char __user *user_buf,
@@ -2345,7 +2347,9 @@ DEBUGFS_READ_FILE_OPS(bt_traffic);
 DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
 DEBUGFS_READ_FILE_OPS(reply_tx_error);
 DEBUGFS_WRITE_FILE_OPS(echo_test);
+#ifdef CONFIG_IWLWIFI_DEBUG
 DEBUGFS_READ_WRITE_FILE_OPS(log_event);
+#endif
 DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled);
 
 /*
@@ -2405,7 +2409,9 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
        DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
        DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
        DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR);
+#ifdef CONFIG_IWLWIFI_DEBUG
        DEBUGFS_ADD_FILE(log_event, dir_debug, S_IWUSR | S_IRUSR);
+#endif
 
        if (iwl_advanced_bt_coexist(priv))
                DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
index 3ee2313..0136803 100644 (file)
@@ -796,6 +796,18 @@ int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
        switch (op) {
        case ADD:
                ret = iwlagn_mac_sta_add(hw, vif, sta);
+               if (ret)
+                       break;
+               /*
+                * Clear the in-progress flag, the AP station entry was added
+                * but we'll initialize LQ only when we've associated (which
+                * would also clear the in-progress flag). This is necessary
+                * in case we never initialize LQ because association fails.
+                */
+               spin_lock_bh(&priv->sta_lock);
+               priv->stations[iwl_sta_id(sta)].used &=
+                       ~IWL_STA_UCODE_INPROGRESS;
+               spin_unlock_bh(&priv->sta_lock);
                break;
        case REMOVE:
                ret = iwlagn_mac_sta_remove(hw, vif, sta);
index 9c44088..900ee12 100644 (file)
@@ -256,7 +256,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
        else
                last_seq = priv->rx_seq[tid];
 
-       if (last_seq >= new_node->start_win)
+       if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
+           last_seq >= new_node->start_win)
                new_node->start_win = last_seq + 1;
 
        new_node->win_size = win_size;
@@ -596,5 +597,5 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
        spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
        INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
-       memset(priv->rx_seq, 0, sizeof(priv->rx_seq));
+       mwifiex_reset_11n_rx_seq_num(priv);
 }
index f1bffeb..6c9815a 100644 (file)
 
 #define ADDBA_RSP_STATUS_ACCEPT 0
 
+#define MWIFIEX_DEF_11N_RX_SEQ_NUM     0xffff
+
+static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv)
+{
+       memset(priv->rx_seq, 0xff, sizeof(priv->rx_seq));
+}
+
 int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *,
                               u16 seqNum,
                               u16 tid, u8 *ta,
index 015fec3..5c7fd18 100644 (file)
@@ -958,6 +958,7 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
        case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
                /* firmware doesn't support this type of hidden SSID */
        default:
+               kfree(bss_cfg);
                return -EINVAL;
        }
 
@@ -1484,7 +1485,7 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
        struct wireless_dev *wdev;
 
        if (!adapter)
-               return NULL;
+               return ERR_PTR(-EFAULT);
 
        switch (type) {
        case NL80211_IFTYPE_UNSPECIFIED:
@@ -1494,12 +1495,12 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
                if (priv->bss_mode) {
                        wiphy_err(wiphy,
                                  "cannot create multiple sta/adhoc ifaces\n");
-                       return NULL;
+                       return ERR_PTR(-EINVAL);
                }
 
                wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
                if (!wdev)
-                       return NULL;
+                       return ERR_PTR(-ENOMEM);
 
                wdev->wiphy = wiphy;
                priv->wdev = wdev;
@@ -1522,12 +1523,12 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
 
                if (priv->bss_mode) {
                        wiphy_err(wiphy, "Can't create multiple AP interfaces");
-                       return NULL;
+                       return ERR_PTR(-EINVAL);
                }
 
                wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
                if (!wdev)
-                       return NULL;
+                       return ERR_PTR(-ENOMEM);
 
                priv->wdev = wdev;
                wdev->wiphy = wiphy;
@@ -1544,14 +1545,15 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
                break;
        default:
                wiphy_err(wiphy, "type not supported\n");
-               return NULL;
+               return ERR_PTR(-EINVAL);
        }
 
        dev = alloc_netdev_mq(sizeof(struct mwifiex_private *), name,
                              ether_setup, 1);
        if (!dev) {
                wiphy_err(wiphy, "no memory available for netdevice\n");
-               goto error;
+               priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
+               return ERR_PTR(-ENOMEM);
        }
 
        mwifiex_init_priv_params(priv, dev);
@@ -1582,7 +1584,9 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
        /* Register network device */
        if (register_netdevice(dev)) {
                wiphy_err(wiphy, "cannot register virtual network device\n");
-               goto error;
+               free_netdev(dev);
+               priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
+               return ERR_PTR(-EFAULT);
        }
 
        sema_init(&priv->async_sem, 1);
@@ -1594,12 +1598,6 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
        mwifiex_dev_debugfs_init(priv);
 #endif
        return dev;
-error:
-       if (dev && (dev->reg_state == NETREG_UNREGISTERED))
-               free_netdev(dev);
-       priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
-
-       return NULL;
 }
 EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf);
 
index ceb82cd..383820a 100644 (file)
@@ -213,6 +213,7 @@ mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
                /* save assoc resp ie index after auto-indexing */
                *assoc_idx = *((u16 *)pos);
 
+       kfree(ap_custom_ie);
        return ret;
 }
 
index e037747..fc8a9bf 100644 (file)
@@ -978,10 +978,10 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
                dev_dbg(adapter->dev, "info: --- Rx: Event ---\n");
                adapter->event_cause = *(u32 *) skb->data;
 
-               skb_pull(skb, MWIFIEX_EVENT_HEADER_LEN);
-
                if ((skb->len > 0) && (skb->len  < MAX_EVENT_SIZE))
-                       memcpy(adapter->event_body, skb->data, skb->len);
+                       memcpy(adapter->event_body,
+                              skb->data + MWIFIEX_EVENT_HEADER_LEN,
+                              skb->len);
 
                /* event cause has been saved to adapter->event_cause */
                adapter->event_received = true;
index 4ace5a3..11e731f 100644 (file)
@@ -406,9 +406,9 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                break;
 
        case EVENT_UAP_STA_ASSOC:
-               skb_pull(adapter->event_skb, MWIFIEX_UAP_EVENT_EXTRA_HEADER);
                memset(&sinfo, 0, sizeof(sinfo));
-               event = (struct mwifiex_assoc_event *)adapter->event_skb->data;
+               event = (struct mwifiex_assoc_event *)
+                       (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
                if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
                        len = -1;
 
@@ -433,9 +433,8 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                                 GFP_KERNEL);
                break;
        case EVENT_UAP_STA_DEAUTH:
-               skb_pull(adapter->event_skb, MWIFIEX_UAP_EVENT_EXTRA_HEADER);
-               cfg80211_del_sta(priv->netdev, adapter->event_skb->data,
-                                GFP_KERNEL);
+               cfg80211_del_sta(priv->netdev, adapter->event_body +
+                                MWIFIEX_UAP_EVENT_EXTRA_HEADER, GFP_KERNEL);
                break;
        case EVENT_UAP_BSS_IDLE:
                priv->media_connected = false;
index e2faec4..cecb272 100644 (file)
@@ -161,15 +161,11 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
                goto done;
 
        for (i = 0; i < adapter->priv_num; i++) {
-
                tpriv = adapter->priv[i];
 
-               if ((GET_BSS_ROLE(tpriv) == MWIFIEX_BSS_ROLE_STA) &&
-                   (tpriv->media_connected)) {
-                       if (netif_queue_stopped(tpriv->netdev))
-                               mwifiex_wake_up_net_dev_queue(tpriv->netdev,
-                                                             adapter);
-               }
+               if (tpriv->media_connected &&
+                   netif_queue_stopped(tpriv->netdev))
+                       mwifiex_wake_up_net_dev_queue(tpriv->netdev, adapter);
        }
 done:
        dev_kfree_skb_any(skb);
index 8173ab6..89f9a2a 100644 (file)
@@ -27,6 +27,17 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
                              struct cfg80211_ap_settings *params) {
        int i;
 
+       if (!params->privacy) {
+               bss_config->protocol = PROTOCOL_NO_SECURITY;
+               bss_config->key_mgmt = KEY_MGMT_NONE;
+               bss_config->wpa_cfg.length = 0;
+               priv->sec_info.wep_enabled = 0;
+               priv->sec_info.wpa_enabled = 0;
+               priv->sec_info.wpa2_enabled = 0;
+
+               return 0;
+       }
+
        switch (params->auth_type) {
        case NL80211_AUTHTYPE_OPEN_SYSTEM:
                bss_config->auth_mode = WLAN_AUTH_OPEN;
index 49ebf20..22a5916 100644 (file)
@@ -49,6 +49,7 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
        struct device *dev = adapter->dev;
        u32 recv_type;
        __le32 tmp;
+       int ret;
 
        if (adapter->hs_activated)
                mwifiex_process_hs_config(adapter);
@@ -69,16 +70,19 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
                case MWIFIEX_USB_TYPE_CMD:
                        if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) {
                                dev_err(dev, "CMD: skb->len too large\n");
-                               return -1;
+                               ret = -1;
+                               goto exit_restore_skb;
                        } else if (!adapter->curr_cmd) {
                                dev_dbg(dev, "CMD: no curr_cmd\n");
                                if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
                                        mwifiex_process_sleep_confirm_resp(
                                                        adapter, skb->data,
                                                        skb->len);
-                                       return 0;
+                                       ret = 0;
+                                       goto exit_restore_skb;
                                }
-                               return -1;
+                               ret = -1;
+                               goto exit_restore_skb;
                        }
 
                        adapter->curr_cmd->resp_skb = skb;
@@ -87,20 +91,22 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
                case MWIFIEX_USB_TYPE_EVENT:
                        if (skb->len < sizeof(u32)) {
                                dev_err(dev, "EVENT: skb->len too small\n");
-                               return -1;
+                               ret = -1;
+                               goto exit_restore_skb;
                        }
                        skb_copy_from_linear_data(skb, &tmp, sizeof(u32));
                        adapter->event_cause = le32_to_cpu(tmp);
-                       skb_pull(skb, sizeof(u32));
                        dev_dbg(dev, "event_cause %#x\n", adapter->event_cause);
 
                        if (skb->len > MAX_EVENT_SIZE) {
                                dev_err(dev, "EVENT: event body too large\n");
-                               return -1;
+                               ret = -1;
+                               goto exit_restore_skb;
                        }
 
-                       skb_copy_from_linear_data(skb, adapter->event_body,
-                                                 skb->len);
+                       memcpy(adapter->event_body, skb->data +
+                              MWIFIEX_EVENT_HEADER_LEN, skb->len);
+
                        adapter->event_received = true;
                        adapter->event_skb = skb;
                        break;
@@ -124,6 +130,12 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
        }
 
        return -EINPROGRESS;
+
+exit_restore_skb:
+       /* The buffer will be reused for further cmds/events */
+       skb_push(skb, INTF_HEADER_LEN);
+
+       return ret;
 }
 
 static void mwifiex_usb_rx_complete(struct urb *urb)
index f3fc655..3fa4d41 100644 (file)
@@ -404,6 +404,8 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
                priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE;
                priv->add_ba_param.rx_win_size = MWIFIEX_AMPDU_DEF_RXWINSIZE;
 
+               mwifiex_reset_11n_rx_seq_num(priv);
+
                atomic_set(&priv->wmm.tx_pkts_queued, 0);
                atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
        }
@@ -1221,6 +1223,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
 
        if (!ptr->is_11n_enabled ||
            mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
+           priv->wps.session_enable ||
            ((priv->sec_info.wpa_enabled ||
              priv->sec_info.wpa2_enabled) &&
             !priv->wpa_is_gtk_set)) {
index 2e9e6af..dfcd02a 100644 (file)
@@ -2110,7 +2110,7 @@ resize_buf:
        while (check_bssid_list_item(bssid, bssid_len, buf, len)) {
                if (rndis_bss_info_update(usbdev, bssid) && match_bssid &&
                    matched) {
-                       if (!ether_addr_equal(bssid->mac, match_bssid))
+                       if (ether_addr_equal(bssid->mac, match_bssid))
                                *matched = true;
                }
 
index d357d1e..74ecc33 100644 (file)
@@ -436,8 +436,8 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
        case QID_RX:
                if (!rt2x00queue_full(queue))
                        rt2x00queue_for_each_entry(queue,
-                                                  Q_INDEX_DONE,
                                                   Q_INDEX,
+                                                  Q_INDEX_DONE,
                                                   NULL,
                                                   rt2x00usb_kick_rx_entry);
                break;
index d228358..9970c2b 100644 (file)
@@ -301,9 +301,11 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
        {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
        {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
+       {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
        {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
        {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
        {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
+       {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
        /* HP - Lite-On ,8188CUS Slim Combo */
        {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
        {RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
@@ -346,6 +348,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
        {RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/
        {RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
+       {RTL_USB_DEVICE(0x0bda, 0x8186, rtl92cu_hal_cfg)}, /*Realtek 92CE-VAU*/
        {RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/
        {RTL_USB_DEVICE(0x0e66, 0x0019, rtl92cu_hal_cfg)}, /*Hawking-Edimax*/
        {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
index ad87a1a..db6430c 100644 (file)
@@ -869,7 +869,7 @@ int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime)
        }
 
        *mactime = tsf_info->current_tsf_lsb |
-               (tsf_info->current_tsf_msb << 31);
+               ((u64)tsf_info->current_tsf_msb << 32);
 
 out:
        kfree(tsf_info);
index 9f15cca..5ec50a4 100644 (file)
@@ -76,8 +76,7 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
                }
        }
 
-       if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID &&
-           wl->station_mode != STATION_ACTIVE_MODE) {
+       if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) {
                wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT");
 
                /* indicate to the stack, that beacons have been lost */
index 87f6305..567660c 100644 (file)
@@ -73,6 +73,8 @@ static void wl1251_spi_reset(struct wl1251 *wl)
        spi_sync(wl_to_spi(wl), &m);
 
        wl1251_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
+
+       kfree(cmd);
 }
 
 static void wl1251_spi_wake(struct wl1251 *wl)
@@ -127,6 +129,8 @@ static void wl1251_spi_wake(struct wl1251 *wl)
        spi_sync(wl_to_spi(wl), &m);
 
        wl1251_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
+
+       kfree(cmd);
 }
 
 static void wl1251_spi_reset_wake(struct wl1251 *wl)
index 54156b0..d7b907e 100644 (file)
@@ -1,7 +1,6 @@
 config WLCORE
        tristate "TI wlcore support"
        depends on WL_TI && GENERIC_HARDIRQS && MAC80211
-       depends on INET
        select FW_LOADER
        ---help---
          This module contains the main code for TI WLAN chips.  It abstracts
index 2027afe..3089990 100644 (file)
@@ -1935,14 +1935,14 @@ static int __devexit xennet_remove(struct xenbus_device *dev)
 
        dev_dbg(&dev->dev, "%s\n", dev->nodename);
 
-       unregister_netdev(info->netdev);
-
        xennet_disconnect_backend(info);
 
-       del_timer_sync(&info->rx_refill_timer);
-
        xennet_sysfs_delif(info->netdev);
 
+       unregister_netdev(info->netdev);
+
+       del_timer_sync(&info->rx_refill_timer);
+
        free_percpu(info->stats);
 
        free_netdev(info->netdev);
index 343ad29..e44f8c2 100644 (file)
@@ -317,10 +317,9 @@ static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *l
        for(; lookup->compatible != NULL; lookup++) {
                if (!of_device_is_compatible(np, lookup->compatible))
                        continue;
-               if (of_address_to_resource(np, 0, &res))
-                       continue;
-               if (res.start != lookup->phys_addr)
-                       continue;
+               if (!of_address_to_resource(np, 0, &res))
+                       if (res.start != lookup->phys_addr)
+                               continue;
                pr_debug("%s: devname=%s\n", np->full_name, lookup->name);
                return lookup;
        }
@@ -462,4 +461,5 @@ int of_platform_populate(struct device_node *root,
        of_node_put(root);
        return rc;
 }
+EXPORT_SYMBOL_GPL(of_platform_populate);
 #endif /* CONFIG_OF_ADDRESS */
index da14432..f3cfa0b 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright 2010 ARM Ltd.
+ * Copyright 2012 Advanced Micro Devices, Inc., Robert Richter
  *
  * Perf-events backend for OProfile.
  */
@@ -25,7 +26,7 @@ static int oprofile_perf_enabled;
 static DEFINE_MUTEX(oprofile_perf_mutex);
 
 static struct op_counter_config *counter_config;
-static struct perf_event **perf_events[nr_cpumask_bits];
+static DEFINE_PER_CPU(struct perf_event **, perf_events);
 static int num_counters;
 
 /*
@@ -38,7 +39,7 @@ static void op_overflow_handler(struct perf_event *event,
        u32 cpu = smp_processor_id();
 
        for (id = 0; id < num_counters; ++id)
-               if (perf_events[cpu][id] == event)
+               if (per_cpu(perf_events, cpu)[id] == event)
                        break;
 
        if (id != num_counters)
@@ -74,7 +75,7 @@ static int op_create_counter(int cpu, int event)
 {
        struct perf_event *pevent;
 
-       if (!counter_config[event].enabled || perf_events[cpu][event])
+       if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event])
                return 0;
 
        pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
@@ -91,18 +92,18 @@ static int op_create_counter(int cpu, int event)
                return -EBUSY;
        }
 
-       perf_events[cpu][event] = pevent;
+       per_cpu(perf_events, cpu)[event] = pevent;
 
        return 0;
 }
 
 static void op_destroy_counter(int cpu, int event)
 {
-       struct perf_event *pevent = perf_events[cpu][event];
+       struct perf_event *pevent = per_cpu(perf_events, cpu)[event];
 
        if (pevent) {
                perf_event_release_kernel(pevent);
-               perf_events[cpu][event] = NULL;
+               per_cpu(perf_events, cpu)[event] = NULL;
        }
 }
 
@@ -257,12 +258,12 @@ void oprofile_perf_exit(void)
 
        for_each_possible_cpu(cpu) {
                for (id = 0; id < num_counters; ++id) {
-                       event = perf_events[cpu][id];
+                       event = per_cpu(perf_events, cpu)[id];
                        if (event)
                                perf_event_release_kernel(event);
                }
 
-               kfree(perf_events[cpu]);
+               kfree(per_cpu(perf_events, cpu));
        }
 
        kfree(counter_config);
@@ -277,8 +278,6 @@ int __init oprofile_perf_init(struct oprofile_operations *ops)
        if (ret)
                return ret;
 
-       memset(&perf_events, 0, sizeof(perf_events));
-
        num_counters = perf_num_counters();
        if (num_counters <= 0) {
                pr_info("oprofile: no performance counters\n");
@@ -298,9 +297,9 @@ int __init oprofile_perf_init(struct oprofile_operations *ops)
        }
 
        for_each_possible_cpu(cpu) {
-               perf_events[cpu] = kcalloc(num_counters,
+               per_cpu(perf_events, cpu) = kcalloc(num_counters,
                                sizeof(struct perf_event *), GFP_KERNEL);
-               if (!perf_events[cpu]) {
+               if (!per_cpu(perf_events, cpu)) {
                        pr_info("oprofile: failed to allocate %d perf events "
                                        "for cpu %d\n", num_counters, cpu);
                        ret = -ENOMEM;
index bf0cee6..099f46c 100644 (file)
@@ -748,6 +748,18 @@ static int pci_pm_suspend_noirq(struct device *dev)
 
        pci_pm_set_unknown_state(pci_dev);
 
+       /*
+        * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's
+        * PCI COMMAND register isn't 0, the BIOS assumes that the controller
+        * hasn't been quiesced and tries to turn it off.  If the controller
+        * is already in D3, this can hang or cause memory corruption.
+        *
+        * Since the value of the COMMAND register doesn't matter once the
+        * device has been suspended, we can safely set it to 0 here.
+        */
+       if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
+               pci_write_config_word(pci_dev, PCI_COMMAND, 0);
+
        return 0;
 }
 
index 77cb54a..447e834 100644 (file)
@@ -1744,11 +1744,6 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
        if (target_state == PCI_POWER_ERROR)
                return -EIO;
 
-       /* Some devices mustn't be in D3 during system sleep */
-       if (target_state == PCI_D3hot &&
-                       (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP))
-               return 0;
-
        pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
 
        error = pci_set_power_state(dev, target_state);
index 194b243..2a75216 100644 (file)
@@ -2929,32 +2929,6 @@ static void __devinit disable_igfx_irq(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
 
-/*
- * The Intel 6 Series/C200 Series chipset's EHCI controllers on many
- * ASUS motherboards will cause memory corruption or a system crash
- * if they are in D3 while the system is put into S3 sleep.
- */
-static void __devinit asus_ehci_no_d3(struct pci_dev *dev)
-{
-       const char *sys_info;
-       static const char good_Asus_board[] = "P8Z68-V";
-
-       if (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP)
-               return;
-       if (dev->subsystem_vendor != PCI_VENDOR_ID_ASUSTEK)
-               return;
-       sys_info = dmi_get_system_info(DMI_BOARD_NAME);
-       if (sys_info && memcmp(sys_info, good_Asus_board,
-                       sizeof(good_Asus_board) - 1) == 0)
-               return;
-
-       dev_info(&dev->dev, "broken D3 during system sleep on ASUS\n");
-       dev->dev_flags |= PCI_DEV_FLAGS_NO_D3_DURING_SLEEP;
-       device_set_wakeup_capable(&dev->dev, false);
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c26, asus_ehci_no_d3);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c2d, asus_ehci_no_d3);
-
 static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
                          struct pci_fixup *end)
 {
index dd6d93a..90c837f 100644 (file)
@@ -474,7 +474,9 @@ static int __devinit imx_pinctrl_parse_groups(struct device_node *np,
                grp->configs[j] = config & ~IMX_PAD_SION;
        }
 
+#ifdef DEBUG
        IMX_PMX_DUMP(info, grp->pins, grp->mux_mode, grp->configs, grp->npins);
+#endif
 
        return 0;
 }
index 7737d4d..e9bf71f 100644 (file)
@@ -1950,6 +1950,8 @@ static struct imx_pin_reg imx6q_pin_regs[] = {
        IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 5, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__GPIO_1_12 */
        IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 6, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__SJC_DONE */
        IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 7, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__ANATOP_TESTO_3 */
+       IMX_PIN_REG(MX6Q_PAD_ENET_RX_ER, 0x04EC, 0x01D8, 0, 0x0000, 0), /* MX6Q_PAD_ENET_RX_ER__ANATOP_USBOTG_ID */
+       IMX_PIN_REG(MX6Q_PAD_GPIO_1, 0x05F4, 0x0224, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_1__ANATOP_USBOTG_ID */
 };
 
 /* Pad names for the pinmux subsystem */
index afb50ee..4ba4636 100644 (file)
@@ -137,7 +137,7 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev,
 
 free_group:
        if (!purecfg)
-               free(group);
+               kfree(group);
 free:
        kfree(new_map);
        return ret;
index e8937e7..3e7e47d 100644 (file)
@@ -1438,7 +1438,27 @@ static int nmk_pmx_enable(struct pinctrl_dev *pctldev, unsigned function,
 
        dev_dbg(npct->dev, "enable group %s, %u pins\n", g->name, g->npins);
 
-       /* Handle this special glitch on altfunction C */
+       /*
+        * If we're setting altfunc C by setting both AFSLA and AFSLB to 1,
+        * we may pass through an undesired state. In this case we take
+        * some extra care.
+        *
+        * Safe sequence used to switch IOs between GPIO and Alternate-C mode:
+        *  - Save SLPM registers (since we have a shadow register in the
+        *    nmk_chip we're using that as backup)
+        *  - Set SLPM=0 for the IOs you want to switch and others to 1
+        *  - Configure the GPIO registers for the IOs that are being switched
+        *  - Set IOFORCE=1
+        *  - Modify the AFLSA/B registers for the IOs that are being switched
+        *  - Set IOFORCE=0
+        *  - Restore SLPM registers
+        *  - Any spurious wake up event during switch sequence to be ignored
+        *    and cleared
+        *
+        * We REALLY need to save ALL slpm registers, because the external
+        * IOFORCE will switch *all* ports to their sleepmode setting to as
+        * to avoid glitches. (Not just one port!)
+        */
        glitch = (g->altsetting == NMK_GPIO_ALT_C);
 
        if (glitch) {
index 5ae50aa..b3f6b28 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * Inspired from:
  * - U300 Pinctl drivers
index 9155783..d950eb7 100644 (file)
@@ -2,7 +2,7 @@
  * Driver header file for the ST Microelectronics SPEAr pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index fff168b..d6cca8c 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr1310 pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -2192,7 +2192,7 @@ static void __exit spear1310_pinctrl_exit(void)
 }
 module_exit(spear1310_pinctrl_exit);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match);
index a8ab2a6..a0eb057 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr1340 pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -1983,7 +1983,7 @@ static void __exit spear1340_pinctrl_exit(void)
 }
 module_exit(spear1340_pinctrl_exit);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match);
index 9c82a35..4dfc284 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr300 pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -702,7 +702,7 @@ static void __exit spear300_pinctrl_exit(void)
 }
 module_exit(spear300_pinctrl_exit);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("ST Microelectronics SPEAr300 pinctrl driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, spear300_pinctrl_of_match);
index 1a97076..9688369 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr310 pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -425,7 +425,7 @@ static void __exit spear310_pinctrl_exit(void)
 }
 module_exit(spear310_pinctrl_exit);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("ST Microelectronics SPEAr310 pinctrl driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, SPEAr310_pinctrl_of_match);
index de726e6..020b1e0 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr320 pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -3462,7 +3462,7 @@ static void __exit spear320_pinctrl_exit(void)
 }
 module_exit(spear320_pinctrl_exit);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("ST Microelectronics SPEAr320 pinctrl driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, spear320_pinctrl_of_match);
index 91c883b..0242378 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr3xx pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 5d5fdd8..31f4434 100644 (file)
@@ -2,7 +2,7 @@
  * Header file for the ST Microelectronics SPEAr3xx pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 4f20f8d..17f6dfd 100644 (file)
@@ -694,10 +694,10 @@ MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
 static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
 {
        int ret, i;
-       unsigned long cfg;
+       int cfg;
        struct ideapad_private *priv;
 
-       if (read_method_int(adevice->handle, "_CFG", (int *)&cfg))
+       if (read_method_int(adevice->handle, "_CFG", &cfg))
                return -ENODEV;
 
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -721,7 +721,7 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
                goto input_failed;
 
        for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) {
-               if (test_bit(ideapad_rfk_data[i].cfgbit, &cfg))
+               if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
                        ideapad_register_rfkill(adevice, i);
                else
                        priv->rfk[i] = NULL;
index 0ffdb3c..9af4257 100644 (file)
@@ -72,6 +72,7 @@
 #include <linux/string.h>
 #include <linux/tick.h>
 #include <linux/timer.h>
+#include <linux/dmi.h>
 #include <drm/i915_drm.h>
 #include <asm/msr.h>
 #include <asm/processor.h>
@@ -1485,6 +1486,24 @@ static DEFINE_PCI_DEVICE_TABLE(ips_id_table) = {
 
 MODULE_DEVICE_TABLE(pci, ips_id_table);
 
+static int ips_blacklist_callback(const struct dmi_system_id *id)
+{
+       pr_info("Blacklisted intel_ips for %s\n", id->ident);
+       return 1;
+}
+
+static const struct dmi_system_id ips_blacklist[] = {
+       {
+               .callback = ips_blacklist_callback,
+               .ident = "HP ProBook",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook"),
+               },
+       },
+       { }     /* terminating entry */
+};
+
 static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
        u64 platform_info;
@@ -1494,6 +1513,9 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
        u16 htshi, trc, trc_required_mask;
        u8 tse;
 
+       if (dmi_check_system(ips_blacklist))
+               return -ENODEV;
+
        ips = kzalloc(sizeof(struct ips_driver), GFP_KERNEL);
        if (!ips)
                return -ENOMEM;
index 210d4ae..d456ff0 100644 (file)
@@ -973,7 +973,7 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
                               struct device_attribute *attr,
                               const char *buffer, size_t count)
 {
-       unsigned long value = 0;
+       int value;
        int ret = 0;
        struct sony_nc_value *item =
            container_of(attr, struct sony_nc_value, devattr);
@@ -984,7 +984,7 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
        if (count > 31)
                return -EINVAL;
 
-       if (kstrtoul(buffer, 10, &value))
+       if (kstrtoint(buffer, 10, &value))
                return -EINVAL;
 
        if (item->validate)
@@ -994,7 +994,7 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
                return value;
 
        ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiset,
-                       (int *)&value, NULL);
+                              &value, NULL);
        if (ret < 0)
                return -EIO;
 
@@ -1010,6 +1010,7 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
 struct sony_backlight_props {
        struct backlight_device *dev;
        int                     handle;
+       int                     cmd_base;
        u8                      offset;
        u8                      maxlvl;
 };
@@ -1037,7 +1038,7 @@ static int sony_nc_get_brightness_ng(struct backlight_device *bd)
        struct sony_backlight_props *sdev =
                (struct sony_backlight_props *)bl_get_data(bd);
 
-       sony_call_snc_handle(sdev->handle, 0x0200, &result);
+       sony_call_snc_handle(sdev->handle, sdev->cmd_base + 0x100, &result);
 
        return (result & 0xff) - sdev->offset;
 }
@@ -1049,7 +1050,8 @@ static int sony_nc_update_status_ng(struct backlight_device *bd)
                (struct sony_backlight_props *)bl_get_data(bd);
 
        value = bd->props.brightness + sdev->offset;
-       if (sony_call_snc_handle(sdev->handle, 0x0100 | (value << 16), &result))
+       if (sony_call_snc_handle(sdev->handle, sdev->cmd_base | (value << 0x10),
+                               &result))
                return -EIO;
 
        return value;
@@ -1172,6 +1174,11 @@ static int sony_nc_hotkeys_decode(u32 event, unsigned int handle)
 /*
  * ACPI callbacks
  */
+enum event_types {
+       HOTKEY = 1,
+       KILLSWITCH,
+       GFX_SWITCH
+};
 static void sony_nc_notify(struct acpi_device *device, u32 event)
 {
        u32 real_ev = event;
@@ -1196,7 +1203,7 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
                /* hotkey event */
                case 0x0100:
                case 0x0127:
-                       ev_type = 1;
+                       ev_type = HOTKEY;
                        real_ev = sony_nc_hotkeys_decode(event, handle);
 
                        if (real_ev > 0)
@@ -1216,7 +1223,7 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
                         * update the rfkill device status when the
                         * switch is moved.
                         */
-                       ev_type = 2;
+                       ev_type = KILLSWITCH;
                        sony_call_snc_handle(handle, 0x0100, &result);
                        real_ev = result & 0x03;
 
@@ -1226,6 +1233,24 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
 
                        break;
 
+               case 0x0128:
+               case 0x0146:
+                       /* Hybrid GFX switching */
+                       sony_call_snc_handle(handle, 0x0000, &result);
+                       dprintk("GFX switch event received (reason: %s)\n",
+                                       (result & 0x01) ?
+                                       "switch change" : "unknown");
+
+                       /* verify the switch state
+                        * 1: discrete GFX
+                        * 0: integrated GFX
+                        */
+                       sony_call_snc_handle(handle, 0x0100, &result);
+
+                       ev_type = GFX_SWITCH;
+                       real_ev = result & 0xff;
+                       break;
+
                default:
                        dprintk("Unknown event 0x%x for handle 0x%x\n",
                                        event, handle);
@@ -1238,7 +1263,7 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
 
        } else {
                /* old style event */
-               ev_type = 1;
+               ev_type = HOTKEY;
                sony_laptop_report_input_event(real_ev);
        }
 
@@ -1893,32 +1918,33 @@ static ssize_t sony_nc_battery_care_limit_store(struct device *dev,
         *  bits 4,5: store the limit into the EC
         *  bits 6,7: store the limit into the battery
         */
+       cmd = 0;
 
-       /*
-        * handle 0x0115 should allow storing on battery too;
-        * handle 0x0136 same as 0x0115 + health status;
-        * handle 0x013f, same as 0x0136 but no storing on the battery
-        *
-        * Store only inside the EC for now, regardless the handle number
-        */
-       if (value == 0)
-               /* disable limits */
-               cmd = 0x0;
+       if (value > 0) {
+               if (value <= 50)
+                       cmd = 0x20;
 
-       else if (value <= 50)
-               cmd = 0x21;
+               else if (value <= 80)
+                       cmd = 0x10;
 
-       else if (value <= 80)
-               cmd = 0x11;
+               else if (value <= 100)
+                       cmd = 0x30;
 
-       else if (value <= 100)
-               cmd = 0x31;
+               else
+                       return -EINVAL;
 
-       else
-               return -EINVAL;
+               /*
+                * handle 0x0115 should allow storing on battery too;
+                * handle 0x0136 same as 0x0115 + health status;
+                * handle 0x013f, same as 0x0136 but no storing on the battery
+                */
+               if (bcare_ctl->handle != 0x013f)
+                       cmd = cmd | (cmd << 2);
 
-       if (sony_call_snc_handle(bcare_ctl->handle, (cmd << 0x10) | 0x0100,
-                               &result))
+               cmd = (cmd | 0x1) << 0x10;
+       }
+
+       if (sony_call_snc_handle(bcare_ctl->handle, cmd | 0x0100, &result))
                return -EIO;
 
        return count;
@@ -2113,7 +2139,7 @@ static ssize_t sony_nc_thermal_mode_show(struct device *dev,
                struct device_attribute *attr, char *buffer)
 {
        ssize_t count = 0;
-       unsigned int mode = sony_nc_thermal_mode_get();
+       int mode = sony_nc_thermal_mode_get();
 
        if (mode < 0)
                return mode;
@@ -2472,6 +2498,7 @@ static void sony_nc_backlight_ng_read_limits(int handle,
 {
        u64 offset;
        int i;
+       int lvl_table_len = 0;
        u8 min = 0xff, max = 0x00;
        unsigned char buffer[32] = { 0 };
 
@@ -2480,8 +2507,6 @@ static void sony_nc_backlight_ng_read_limits(int handle,
        props->maxlvl = 0xff;
 
        offset = sony_find_snc_handle(handle);
-       if (offset < 0)
-               return;
 
        /* try to read the boundaries from ACPI tables, if we fail the above
         * defaults should be reasonable
@@ -2491,11 +2516,21 @@ static void sony_nc_backlight_ng_read_limits(int handle,
        if (i < 0)
                return;
 
+       switch (handle) {
+       case 0x012f:
+       case 0x0137:
+               lvl_table_len = 9;
+               break;
+       case 0x143:
+               lvl_table_len = 16;
+               break;
+       }
+
        /* the buffer lists brightness levels available, brightness levels are
         * from position 0 to 8 in the array, other values are used by ALS
         * control.
         */
-       for (i = 0; i < 9 && i < ARRAY_SIZE(buffer); i++) {
+       for (i = 0; i < lvl_table_len && i < ARRAY_SIZE(buffer); i++) {
 
                dprintk("Brightness level: %d\n", buffer[i]);
 
@@ -2520,16 +2555,24 @@ static void sony_nc_backlight_setup(void)
        const struct backlight_ops *ops = NULL;
        struct backlight_properties props;
 
-       if (sony_find_snc_handle(0x12f) != -1) {
+       if (sony_find_snc_handle(0x12f) >= 0) {
                ops = &sony_backlight_ng_ops;
+               sony_bl_props.cmd_base = 0x0100;
                sony_nc_backlight_ng_read_limits(0x12f, &sony_bl_props);
                max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
 
-       } else if (sony_find_snc_handle(0x137) != -1) {
+       } else if (sony_find_snc_handle(0x137) >= 0) {
                ops = &sony_backlight_ng_ops;
+               sony_bl_props.cmd_base = 0x0100;
                sony_nc_backlight_ng_read_limits(0x137, &sony_bl_props);
                max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
 
+       } else if (sony_find_snc_handle(0x143) >= 0) {
+               ops = &sony_backlight_ng_ops;
+               sony_bl_props.cmd_base = 0x3000;
+               sony_nc_backlight_ng_read_limits(0x143, &sony_bl_props);
+               max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+
        } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
                                                &unused))) {
                ops = &sony_backlight_ops;
@@ -2597,6 +2640,12 @@ static int sony_nc_add(struct acpi_device *device)
                }
        }
 
+       result = sony_laptop_setup_input(device);
+       if (result) {
+               pr_err("Unable to create input devices\n");
+               goto outplatform;
+       }
+
        if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
                                         &handle))) {
                int arg = 1;
@@ -2614,12 +2663,6 @@ static int sony_nc_add(struct acpi_device *device)
        }
 
        /* setup input devices and helper fifo */
-       result = sony_laptop_setup_input(device);
-       if (result) {
-               pr_err("Unable to create input devices\n");
-               goto outsnc;
-       }
-
        if (acpi_video_backlight_support()) {
                pr_info("brightness ignored, must be controlled by ACPI video driver\n");
        } else {
@@ -2667,22 +2710,21 @@ static int sony_nc_add(struct acpi_device *device)
 
        return 0;
 
-      out_sysfs:
+out_sysfs:
        for (item = sony_nc_values; item->name; ++item) {
                device_remove_file(&sony_pf_device->dev, &item->devattr);
        }
        sony_nc_backlight_cleanup();
-
-       sony_laptop_remove_input();
-
-      outsnc:
        sony_nc_function_cleanup(sony_pf_device);
        sony_nc_handles_cleanup(sony_pf_device);
 
-      outpresent:
+outplatform:
+       sony_laptop_remove_input();
+
+outpresent:
        sony_pf_remove();
 
-      outwalk:
+outwalk:
        sony_nc_rfkill_cleanup();
        return result;
 }
index e1b8c54..a739f5c 100644 (file)
@@ -794,17 +794,17 @@ static __devinit int ab8500_regulator_register(struct platform_device *pdev,
 }
 
 static struct of_regulator_match ab8500_regulator_matches[] = {
-       { .name = "LDO-AUX1",    .driver_data = (void *) AB8500_LDO_AUX1, },
-       { .name = "LDO-AUX2",    .driver_data = (void *) AB8500_LDO_AUX2, },
-       { .name = "LDO-AUX3",    .driver_data = (void *) AB8500_LDO_AUX3, },
-       { .name = "LDO-INTCORE", .driver_data = (void *) AB8500_LDO_INTCORE, },
-       { .name = "LDO-TVOUT",   .driver_data = (void *) AB8500_LDO_TVOUT, },
-       { .name = "LDO-USB",     .driver_data = (void *) AB8500_LDO_USB, },
-       { .name = "LDO-AUDIO",   .driver_data = (void *) AB8500_LDO_AUDIO, },
-       { .name = "LDO-ANAMIC1", .driver_data = (void *) AB8500_LDO_ANAMIC1, },
-       { .name = "LDO-ANAMIC2", .driver_data = (void *) AB8500_LDO_ANAMIC2, },
-       { .name = "LDO-DMIC",    .driver_data = (void *) AB8500_LDO_DMIC, },
-       { .name = "LDO-ANA",     .driver_data = (void *) AB8500_LDO_ANA, },
+       { .name = "ab8500_ldo_aux1",    .driver_data = (void *) AB8500_LDO_AUX1, },
+       { .name = "ab8500_ldo_aux2",    .driver_data = (void *) AB8500_LDO_AUX2, },
+       { .name = "ab8500_ldo_aux3",    .driver_data = (void *) AB8500_LDO_AUX3, },
+       { .name = "ab8500_ldo_intcore", .driver_data = (void *) AB8500_LDO_INTCORE, },
+       { .name = "ab8500_ldo_tvout",   .driver_data = (void *) AB8500_LDO_TVOUT, },
+       { .name = "ab8500_ldo_usb",     .driver_data = (void *) AB8500_LDO_USB, },
+       { .name = "ab8500_ldo_audio",   .driver_data = (void *) AB8500_LDO_AUDIO, },
+       { .name = "ab8500_ldo_anamic1", .driver_data = (void *) AB8500_LDO_ANAMIC1, },
+       { .name = "ab8500_ldo_amamic2", .driver_data = (void *) AB8500_LDO_ANAMIC2, },
+       { .name = "ab8500_ldo_dmic",    .driver_data = (void *) AB8500_LDO_DMIC, },
+       { .name = "ab8500_ldo_ana",     .driver_data = (void *) AB8500_LDO_ANA, },
 };
 
 static __devinit int
index 09a737c..8b4b382 100644 (file)
@@ -2519,9 +2519,12 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
 {
        struct regulator_dev *rdev = regulator->rdev;
        struct regulator *consumer;
-       int ret, output_uV, input_uV, total_uA_load = 0;
+       int ret, output_uV, input_uV = 0, total_uA_load = 0;
        unsigned int mode;
 
+       if (rdev->supply)
+               input_uV = regulator_get_voltage(rdev->supply);
+
        mutex_lock(&rdev->mutex);
 
        /*
@@ -2554,10 +2557,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
                goto out;
        }
 
-       /* get input voltage */
-       input_uV = 0;
-       if (rdev->supply)
-               input_uV = regulator_get_voltage(rdev->supply);
+       /* No supply? Use constraint voltage */
        if (input_uV <= 0)
                input_uV = rdev->constraints->input_uV;
        if (input_uV <= 0) {
index 968f97f..9dbb491 100644 (file)
@@ -452,26 +452,26 @@ static __devinit int db8500_regulator_register(struct platform_device *pdev,
 }
 
 static struct of_regulator_match db8500_regulator_matches[] = {
-       { .name = "db8500-vape",          .driver_data = (void *) DB8500_REGULATOR_VAPE, },
-       { .name = "db8500-varm",          .driver_data = (void *) DB8500_REGULATOR_VARM, },
-       { .name = "db8500-vmodem",        .driver_data = (void *) DB8500_REGULATOR_VMODEM, },
-       { .name = "db8500-vpll",          .driver_data = (void *) DB8500_REGULATOR_VPLL, },
-       { .name = "db8500-vsmps1",        .driver_data = (void *) DB8500_REGULATOR_VSMPS1, },
-       { .name = "db8500-vsmps2",        .driver_data = (void *) DB8500_REGULATOR_VSMPS2, },
-       { .name = "db8500-vsmps3",        .driver_data = (void *) DB8500_REGULATOR_VSMPS3, },
-       { .name = "db8500-vrf1",          .driver_data = (void *) DB8500_REGULATOR_VRF1, },
-       { .name = "db8500-sva-mmdsp",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSP, },
-       { .name = "db8500-sva-mmdsp-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSPRET, },
-       { .name = "db8500-sva-pipe",      .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAPIPE, },
-       { .name = "db8500-sia-mmdsp",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSP, },
-       { .name = "db8500-sia-mmdsp-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSPRET, },
-       { .name = "db8500-sia-pipe",      .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAPIPE, },
-       { .name = "db8500-sga",           .driver_data = (void *) DB8500_REGULATOR_SWITCH_SGA, },
-       { .name = "db8500-b2r2-mcde",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_B2R2_MCDE, },
-       { .name = "db8500-esram12",       .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12, },
-       { .name = "db8500-esram12-ret",   .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12RET, },
-       { .name = "db8500-esram34",       .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34, },
-       { .name = "db8500-esram34-ret",   .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34RET, },
+       { .name = "db8500_vape",          .driver_data = (void *) DB8500_REGULATOR_VAPE, },
+       { .name = "db8500_varm",          .driver_data = (void *) DB8500_REGULATOR_VARM, },
+       { .name = "db8500_vmodem",        .driver_data = (void *) DB8500_REGULATOR_VMODEM, },
+       { .name = "db8500_vpll",          .driver_data = (void *) DB8500_REGULATOR_VPLL, },
+       { .name = "db8500_vsmps1",        .driver_data = (void *) DB8500_REGULATOR_VSMPS1, },
+       { .name = "db8500_vsmps2",        .driver_data = (void *) DB8500_REGULATOR_VSMPS2, },
+       { .name = "db8500_vsmps3",        .driver_data = (void *) DB8500_REGULATOR_VSMPS3, },
+       { .name = "db8500_vrf1",          .driver_data = (void *) DB8500_REGULATOR_VRF1, },
+       { .name = "db8500_sva_mmdsp",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSP, },
+       { .name = "db8500_sva_mmdsp_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSPRET, },
+       { .name = "db8500_sva_pipe",      .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAPIPE, },
+       { .name = "db8500_sia_mmdsp",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSP, },
+       { .name = "db8500_sia_mmdsp_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSPRET, },
+       { .name = "db8500_sia_pipe",      .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAPIPE, },
+       { .name = "db8500_sga",           .driver_data = (void *) DB8500_REGULATOR_SWITCH_SGA, },
+       { .name = "db8500_b2r2_mcde",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_B2R2_MCDE, },
+       { .name = "db8500_esram12",       .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12, },
+       { .name = "db8500_esram12_ret",   .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12RET, },
+       { .name = "db8500_esram34",       .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34, },
+       { .name = "db8500_esram34_ret",   .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34RET, },
 };
 
 static __devinit int
index 9b7ca90..795f75a 100644 (file)
@@ -673,7 +673,9 @@ static __devinit int palmas_probe(struct platform_device *pdev)
                        pmic->desc[id].ops = &palmas_ops_smps10;
                        pmic->desc[id].vsel_reg = PALMAS_SMPS10_CTRL;
                        pmic->desc[id].vsel_mask = SMPS10_VSEL;
-                       pmic->desc[id].enable_reg = PALMAS_SMPS10_STATUS;
+                       pmic->desc[id].enable_reg =
+                                       PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
+                                                       PALMAS_SMPS10_STATUS);
                        pmic->desc[id].enable_mask = SMPS10_BOOST_EN;
                }
 
@@ -739,7 +741,8 @@ static __devinit int palmas_probe(struct platform_device *pdev)
 
                pmic->desc[id].type = REGULATOR_VOLTAGE;
                pmic->desc[id].owner = THIS_MODULE;
-               pmic->desc[id].enable_reg = palmas_regs_info[id].ctrl_addr;
+               pmic->desc[id].enable_reg = PALMAS_BASE_TO_REG(PALMAS_LDO_BASE,
+                                               palmas_regs_info[id].ctrl_addr);
                pmic->desc[id].enable_mask = PALMAS_LDO1_CTRL_MODE_ACTIVE;
 
                if (pdata && pdata->reg_data)
index 290d6fc..9caadb4 100644 (file)
@@ -451,7 +451,7 @@ static int s5m8767_set_voltage_time_sel(struct regulator_dev *rdev,
 
        desc = reg_voltage_map[reg_id];
 
-       if (old_sel < new_sel)
+       if ((old_sel < new_sel) && s5m8767->ramp_delay)
                return DIV_ROUND_UP(desc->step * (new_sel - old_sel),
                                        s5m8767->ramp_delay * 1000);
        return 0;
index f841bd0..8f1be85 100644 (file)
@@ -71,7 +71,7 @@
 
 /* LDO_CTRL bitfields */
 #define TPS65023_LDO_CTRL_LDOx_SHIFT(ldo_id)   ((ldo_id)*4)
-#define TPS65023_LDO_CTRL_LDOx_MASK(ldo_id)    (0x0F << ((ldo_id)*4))
+#define TPS65023_LDO_CTRL_LDOx_MASK(ldo_id)    (0x07 << ((ldo_id)*4))
 
 /* Number of step-down converters available */
 #define TPS65023_NUM_DCDC              3
index b88b3df..1b299aa 100644 (file)
@@ -482,7 +482,7 @@ static int get_voltage_sel(struct regulator_dev *rdev)
        info    = &supply_info[rdev_get_id(rdev)];
 
        if (info->flags & FIXED_VOLTAGE)
-               return info->fixed_voltage;
+               return 0;
 
        ret = read_field(hw, &info->voltage);
        if (ret < 0)
index 24d880e..f8d818a 100644 (file)
@@ -4,9 +4,11 @@ menu "Remoteproc drivers (EXPERIMENTAL)"
 config REMOTEPROC
        tristate
        depends on EXPERIMENTAL
+       select FW_CONFIG
 
 config OMAP_REMOTEPROC
        tristate "OMAP remoteproc support"
+       depends on EXPERIMENTAL
        depends on ARCH_OMAP4
        depends on OMAP_IOMMU
        select REMOTEPROC
index 69425c4..de138e3 100644 (file)
@@ -182,7 +182,7 @@ static int __devinit omap_rproc_probe(struct platform_device *pdev)
 
        ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
        if (ret) {
-               dev_err(pdev->dev.parent, "dma_set_coherent_mask: %d\n", ret);
+               dev_err(&pdev->dev, "dma_set_coherent_mask: %d\n", ret);
                return ret;
        }
 
index 8ea7bcc..66324ee 100644 (file)
@@ -247,7 +247,7 @@ rproc_load_segments(struct rproc *rproc, const u8 *elf_data, size_t len)
                }
 
                if (offset + filesz > len) {
-                       dev_err(dev, "truncated fw: need 0x%x avail 0x%x\n",
+                       dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
                                        offset + filesz, len);
                        ret = -EINVAL;
                        break;
@@ -934,7 +934,7 @@ static void rproc_resource_cleanup(struct rproc *rproc)
                unmapped = iommu_unmap(rproc->domain, entry->da, entry->len);
                if (unmapped != entry->len) {
                        /* nothing much to do besides complaining */
-                       dev_err(dev, "failed to unmap %u/%u\n", entry->len,
+                       dev_err(dev, "failed to unmap %u/%zu\n", entry->len,
                                                                unmapped);
                }
 
@@ -1020,7 +1020,7 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
 
        ehdr = (struct elf32_hdr *)fw->data;
 
-       dev_info(dev, "Booting fw image %s, size %d\n", name, fw->size);
+       dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size);
 
        /*
         * if enabling an IOMMU isn't relevant for this rproc, this is
@@ -1041,8 +1041,10 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
 
        /* look for the resource table */
        table = rproc_find_rsc_table(rproc, fw->data, fw->size, &tablesz);
-       if (!table)
+       if (!table) {
+               ret = -EINVAL;
                goto clean_up;
+       }
 
        /* handle fw resources which are required to boot rproc */
        ret = rproc_handle_boot_rsc(rproc, table, tablesz);
index 75506ec..f56c8ba 100644 (file)
@@ -188,6 +188,26 @@ static int rpmsg_uevent(struct device *dev, struct kobj_uevent_env *env)
                                        rpdev->id.name);
 }
 
+/**
+ * __ept_release() - deallocate an rpmsg endpoint
+ * @kref: the ept's reference count
+ *
+ * This function deallocates an ept, and is invoked when its @kref refcount
+ * drops to zero.
+ *
+ * Never invoke this function directly!
+ */
+static void __ept_release(struct kref *kref)
+{
+       struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint,
+                                                 refcount);
+       /*
+        * At this point no one holds a reference to ept anymore,
+        * so we can directly free it
+        */
+       kfree(ept);
+}
+
 /* for more info, see below documentation of rpmsg_create_ept() */
 static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
                struct rpmsg_channel *rpdev, rpmsg_rx_cb_t cb,
@@ -206,6 +226,9 @@ static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
                return NULL;
        }
 
+       kref_init(&ept->refcount);
+       mutex_init(&ept->cb_lock);
+
        ept->rpdev = rpdev;
        ept->cb = cb;
        ept->priv = priv;
@@ -238,7 +261,7 @@ rem_idr:
        idr_remove(&vrp->endpoints, request);
 free_ept:
        mutex_unlock(&vrp->endpoints_lock);
-       kfree(ept);
+       kref_put(&ept->refcount, __ept_release);
        return NULL;
 }
 
@@ -302,11 +325,17 @@ EXPORT_SYMBOL(rpmsg_create_ept);
 static void
 __rpmsg_destroy_ept(struct virtproc_info *vrp, struct rpmsg_endpoint *ept)
 {
+       /* make sure new inbound messages can't find this ept anymore */
        mutex_lock(&vrp->endpoints_lock);
        idr_remove(&vrp->endpoints, ept->addr);
        mutex_unlock(&vrp->endpoints_lock);
 
-       kfree(ept);
+       /* make sure in-flight inbound messages won't invoke cb anymore */
+       mutex_lock(&ept->cb_lock);
+       ept->cb = NULL;
+       mutex_unlock(&ept->cb_lock);
+
+       kref_put(&ept->refcount, __ept_release);
 }
 
 /**
@@ -790,12 +819,28 @@ static void rpmsg_recv_done(struct virtqueue *rvq)
 
        /* use the dst addr to fetch the callback of the appropriate user */
        mutex_lock(&vrp->endpoints_lock);
+
        ept = idr_find(&vrp->endpoints, msg->dst);
+
+       /* let's make sure no one deallocates ept while we use it */
+       if (ept)
+               kref_get(&ept->refcount);
+
        mutex_unlock(&vrp->endpoints_lock);
 
-       if (ept && ept->cb)
-               ept->cb(ept->rpdev, msg->data, msg->len, ept->priv, msg->src);
-       else
+       if (ept) {
+               /* make sure ept->cb doesn't go away while we use it */
+               mutex_lock(&ept->cb_lock);
+
+               if (ept->cb)
+                       ept->cb(ept->rpdev, msg->data, msg->len, ept->priv,
+                               msg->src);
+
+               mutex_unlock(&ept->cb_lock);
+
+               /* farewell, ept, we don't need you anymore */
+               kref_put(&ept->refcount, __ept_release);
+       } else
                dev_warn(dev, "msg received with no recepient\n");
 
        /* publish the real size of the buffer */
@@ -1040,7 +1085,7 @@ static int __init rpmsg_init(void)
 
        return ret;
 }
-module_init(rpmsg_init);
+subsys_initcall(rpmsg_init);
 
 static void __exit rpmsg_fini(void)
 {
index 4bcf9ca..370889d 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/mfd/abx500.h>
 #include <linux/mfd/abx500/ab8500.h>
 #include <linux/delay.h>
+#include <linux/of.h>
 
 #define AB8500_RTC_SOFF_STAT_REG       0x00
 #define AB8500_RTC_CC_CONF_REG         0x01
@@ -422,7 +423,7 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
        }
 
        err = request_threaded_irq(irq, NULL, rtc_alarm_handler,
-               IRQF_NO_SUSPEND, "ab8500-rtc", rtc);
+               IRQF_NO_SUSPEND | IRQF_ONESHOT, "ab8500-rtc", rtc);
        if (err < 0) {
                rtc_device_unregister(rtc);
                return err;
@@ -430,7 +431,6 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, rtc);
 
-
        err = ab8500_sysfs_rtc_register(&pdev->dev);
        if (err) {
                dev_err(&pdev->dev, "sysfs RTC failed to register\n");
@@ -454,10 +454,16 @@ static int __devexit ab8500_rtc_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id ab8500_rtc_match[] = {
+       { .compatible = "stericsson,ab8500-rtc", },
+       {}
+};
+
 static struct platform_driver ab8500_rtc_driver = {
        .driver = {
                .name = "ab8500-rtc",
                .owner = THIS_MODULE,
+               .of_match_table = ab8500_rtc_match,
        },
        .probe  = ab8500_rtc_probe,
        .remove = __devexit_p(ab8500_rtc_remove),
index 5e1d64e..e3e50d6 100644 (file)
@@ -202,10 +202,11 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
        struct platform_device *pdev = dev_id;
        struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
        void __iomem *ioaddr = pdata->ioaddr;
+       unsigned long flags;
        u32 status;
        u32 events = 0;
 
-       spin_lock_irq(&pdata->rtc->irq_lock);
+       spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
        status = readw(ioaddr + RTC_RTCISR) & readw(ioaddr + RTC_RTCIENR);
        /* clear interrupt sources */
        writew(status, ioaddr + RTC_RTCISR);
@@ -224,7 +225,7 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
                events |= (RTC_PF | RTC_IRQF);
 
        rtc_update_irq(pdata->rtc, 1, events);
-       spin_unlock_irq(&pdata->rtc->irq_lock);
+       spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags);
 
        return IRQ_HANDLED;
 }
index 1f76320..e278547 100644 (file)
@@ -458,12 +458,12 @@ static int __devexit spear_rtc_remove(struct platform_device *pdev)
        clk_disable(config->clk);
        clk_put(config->clk);
        iounmap(config->ioaddr);
-       kfree(config);
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (res)
                release_mem_region(res->start, resource_size(res));
        platform_set_drvdata(pdev, NULL);
        rtc_device_unregister(config->rtc);
+       kfree(config);
 
        return 0;
 }
index 258abea..c5d06fe 100644 (file)
@@ -510,7 +510,7 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
        }
 
        ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt,
-                                  IRQF_TRIGGER_RISING,
+                                  IRQF_TRIGGER_RISING | IRQF_ONESHOT,
                                   dev_name(&rtc->dev), rtc);
        if (ret < 0) {
                dev_err(&pdev->dev, "IRQ is not free.\n");
index 532d212..393e7ce 100644 (file)
@@ -201,7 +201,7 @@ static void asd_get_response_tasklet(struct asd_ascb *ascb,
 
                if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
                        resp->frame_len = le16_to_cpu(*(__le16 *)(r+6));
-                       memcpy(&resp->ending_fis[0], r+16, 24);
+                       memcpy(&resp->ending_fis[0], r+16, ATA_RESP_FIS_SIZE);
                        ts->buf_valid_size = sizeof(*resp);
                }
        }
index 0c53c28..7e77cf6 100644 (file)
@@ -350,6 +350,7 @@ struct bnx2i_hba {
        struct pci_dev *pcidev;
        struct net_device *netdev;
        void __iomem *regview;
+       resource_size_t reg_base;
 
        u32 age;
        unsigned long cnic_dev_type;
index ece47e5..86a12b4 100644 (file)
@@ -2724,7 +2724,6 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
                goto arm_cq;
        }
 
-       reg_base = ep->hba->netdev->base_addr;
        if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) &&
            (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) {
                config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2);
@@ -2740,7 +2739,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
                /* 5709 device in normal node and 5706/5708 devices */
                reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
 
-       ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off,
+       ep->qp.ctx_base = ioremap_nocache(ep->hba->reg_base + reg_off,
                                          MB_KERNEL_CTX_SIZE);
        if (!ep->qp.ctx_base)
                return -ENOMEM;
index f8d516b..621538b 100644 (file)
@@ -811,13 +811,13 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
        bnx2i_identify_device(hba);
        bnx2i_setup_host_queue_size(hba, shost);
 
+       hba->reg_base = pci_resource_start(hba->pcidev, 0);
        if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
-               hba->regview = ioremap_nocache(hba->netdev->base_addr,
-                                              BNX2_MQ_CONFIG2);
+               hba->regview = pci_iomap(hba->pcidev, 0, BNX2_MQ_CONFIG2);
                if (!hba->regview)
                        goto ioreg_map_err;
        } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
-               hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096);
+               hba->regview = pci_iomap(hba->pcidev, 0, 4096);
                if (!hba->regview)
                        goto ioreg_map_err;
        }
@@ -884,7 +884,7 @@ cid_que_err:
        bnx2i_free_mp_bdt(hba);
 mp_bdt_mem_err:
        if (hba->regview) {
-               iounmap(hba->regview);
+               pci_iounmap(hba->pcidev, hba->regview);
                hba->regview = NULL;
        }
 ioreg_map_err:
@@ -910,7 +910,7 @@ void bnx2i_free_hba(struct bnx2i_hba *hba)
        pci_dev_put(hba->pcidev);
 
        if (hba->regview) {
-               iounmap(hba->regview);
+               pci_iounmap(hba->pcidev, hba->regview);
                hba->regview = NULL;
        }
        bnx2i_free_mp_bdt(hba);
index 441d88a..d109cc3 100644 (file)
@@ -139,12 +139,12 @@ static void sas_ata_task_done(struct sas_task *task)
        if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
            ((stat->stat == SAM_STAT_CHECK_CONDITION &&
              dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
-               ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
+               memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE);
 
                if (!link->sactive) {
-                       qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+                       qc->err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
                } else {
-                       link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+                       link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
                        if (unlikely(link->eh_info.err_mask))
                                qc->flags |= ATA_QCFLAG_FAILED;
                }
@@ -161,8 +161,8 @@ static void sas_ata_task_done(struct sas_task *task)
                                qc->flags |= ATA_QCFLAG_FAILED;
                        }
 
-                       dev->sata_dev.tf.feature = 0x04; /* status err */
-                       dev->sata_dev.tf.command = ATA_ERR;
+                       dev->sata_dev.fis[3] = 0x04; /* status err */
+                       dev->sata_dev.fis[2] = ATA_ERR;
                }
        }
 
@@ -269,7 +269,7 @@ static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
 {
        struct domain_device *dev = qc->ap->private_data;
 
-       memcpy(&qc->result_tf, &dev->sata_dev.tf, sizeof(qc->result_tf));
+       ata_tf_from_fis(dev->sata_dev.fis, &qc->result_tf);
        return true;
 }
 
index 6986552..77759c7 100644 (file)
@@ -3960,7 +3960,7 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
 {
        struct qla_hw_data *ha = vha->hw;
        struct qla_tgt *tgt = ha->tgt.qla_tgt;
-       int reason_code;
+       int login_code;
 
        ql_dbg(ql_dbg_tgt, vha, 0xe039,
            "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
@@ -4003,9 +4003,9 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
        {
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
                    "qla_target(%d): Async LOOP_UP occured "
-                   "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx,
-                   le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
-                   le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
+                   "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
+                   le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+                   le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
                if (tgt->link_reinit_iocb_pending) {
                        qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
                            0, 0, 0, 0, 0, 0);
@@ -4020,23 +4020,24 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
        case MBA_RSCN_UPDATE:
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
                    "qla_target(%d): Async event %#x occured "
-                   "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx, code,
-                   le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
-                   le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
+                   "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
+                   le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+                   le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
                break;
 
        case MBA_PORT_UPDATE:
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
                    "qla_target(%d): Port update async event %#x "
-                   "occured: updating the ports database (m[1]=%x, m[2]=%x, "
-                   "m[3]=%x, m[4]=%x)", vha->vp_idx, code,
-                   le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
-                   le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
-               reason_code = le16_to_cpu(mailbox[2]);
-               if (reason_code == 0x4)
+                   "occured: updating the ports database (m[0]=%x, m[1]=%x, "
+                   "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
+                   le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+                   le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+
+               login_code = le16_to_cpu(mailbox[2]);
+               if (login_code == 0x4)
                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
                            "Async MB 2: Got PLOGI Complete\n");
-               else if (reason_code == 0x7)
+               else if (login_code == 0x7)
                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
                            "Async MB 2: Port Logged Out\n");
                break;
@@ -4044,9 +4045,9 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
        default:
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040,
                    "qla_target(%d): Async event %#x occured: "
-                   "ignore (m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx,
-                   code, le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
-                   le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
+                   "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
+                   code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+                   le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
                break;
        }
 
index ae78148..0727345 100644 (file)
@@ -22,11 +22,6 @@ static int __init wait_scan_init(void)
         * and might not yet have reached the scsi async scanning
         */
        wait_for_device_probe();
-       /*
-        * and then we wait for the actual asynchronous scsi scan
-        * to finish.
-        */
-       scsi_complete_async_scans();
        return 0;
 }
 
index 6f0a4c6..6f72b80 100644 (file)
@@ -1899,6 +1899,8 @@ static int sd_try_rc16_first(struct scsi_device *sdp)
 {
        if (sdp->host->max_cmd_len < 16)
                return 0;
+       if (sdp->try_rc_10_first)
+               return 0;
        if (sdp->scsi_level > SCSI_SPC_2)
                return 1;
        if (scsi_device_protection(sdp))
index 46ef5fe..0c73dd4 100644 (file)
@@ -801,7 +801,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
        mcspi_dma = &mcspi->dma_channels[spi->chip_select];
 
        if (!cs) {
-               cs = devm_kzalloc(&spi->dev , sizeof *cs, GFP_KERNEL);
+               cs = kzalloc(sizeof *cs, GFP_KERNEL);
                if (!cs)
                        return -ENOMEM;
                cs->base = mcspi->base + spi->chip_select * 0x14;
@@ -842,6 +842,7 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
                cs = spi->controller_state;
                list_del(&cs->node);
 
+               kfree(cs);
        }
 
        if (spi->chip_select < spi->master->num_chipselect) {
index 1c3d638..aeac1ca 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/pci.h>
 #include <linux/usb.h>
 #include <linux/errno.h>
+#include <linux/kconfig.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/fcntl.h>
@@ -981,6 +982,8 @@ void comedi_pci_driver_unregister(struct comedi_driver *comedi_driver,
 }
 EXPORT_SYMBOL_GPL(comedi_pci_driver_unregister);
 
+#if IS_ENABLED(CONFIG_USB)
+
 static int comedi_old_usb_auto_config(struct usb_interface *intf,
                                      struct comedi_driver *driver)
 {
@@ -1043,3 +1046,5 @@ void comedi_usb_driver_unregister(struct comedi_driver *comedi_driver,
        comedi_driver_unregister(comedi_driver);
 }
 EXPORT_SYMBOL_GPL(comedi_usb_driver_unregister);
+
+#endif
index 292af0f..5166513 100644 (file)
@@ -104,7 +104,7 @@ struct sock *netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type,
 
 void netlink_exit(struct sock *sock)
 {
-       sock_release(sock->sk_socket);
+       netlink_kernel_release(sock);
 }
 
 int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
index 0338c7c..f03fbd3 100644 (file)
@@ -29,8 +29,6 @@ Then fill in the following:
        * info->driver_module:
                Set to THIS_MODULE. Used to ensure correct ownership
                of various resources allocate by the core.
-       * info->num_interrupt_lines:
-               Number of event triggering hardware lines the device has.
        * info->event_attrs:
                Attributes used to enable / disable hardware events.
        * info->attrs:
index 2490dd2..8f1b3af 100644 (file)
@@ -13,6 +13,7 @@ config AD7291
 config AD7298
        tristate "Analog Devices AD7298 ADC driver"
        depends on SPI
+       select IIO_KFIFO_BUF if IIO_BUFFER
        help
          Say yes here to build support for Analog Devices AD7298
          8 Channel ADC with temperature sensor.
index 10ab6dc..a13afff 100644 (file)
@@ -235,7 +235,8 @@ static const struct attribute_group ad7606_attribute_group_range = {
                .indexed = 1,                                   \
                .channel = num,                                 \
                .address = num,                                 \
-               .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT,    \
+               .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |   \
+                               IIO_CHAN_INFO_SCALE_SHARED_BIT, \
                .scan_index = num,                              \
                .scan_type = IIO_ST('s', 16, 16, 0),            \
        }
index 3295ea6..97ef670 100644 (file)
@@ -129,6 +129,7 @@ static void send_space_homebrew(long length);
 
 static struct lirc_serial hardware[] = {
        [LIRC_HOMEBREW] = {
+               .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_HOMEBREW].lock),
                .signal_pin        = UART_MSR_DCD,
                .signal_pin_change = UART_MSR_DDCD,
                .on  = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
@@ -145,6 +146,7 @@ static struct lirc_serial hardware[] = {
        },
 
        [LIRC_IRDEO] = {
+               .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IRDEO].lock),
                .signal_pin        = UART_MSR_DSR,
                .signal_pin_change = UART_MSR_DDSR,
                .on  = UART_MCR_OUT2,
@@ -156,6 +158,7 @@ static struct lirc_serial hardware[] = {
        },
 
        [LIRC_IRDEO_REMOTE] = {
+               .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IRDEO_REMOTE].lock),
                .signal_pin        = UART_MSR_DSR,
                .signal_pin_change = UART_MSR_DDSR,
                .on  = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
@@ -167,6 +170,7 @@ static struct lirc_serial hardware[] = {
        },
 
        [LIRC_ANIMAX] = {
+               .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_ANIMAX].lock),
                .signal_pin        = UART_MSR_DCD,
                .signal_pin_change = UART_MSR_DDCD,
                .on  = 0,
@@ -177,6 +181,7 @@ static struct lirc_serial hardware[] = {
        },
 
        [LIRC_IGOR] = {
+               .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IGOR].lock),
                .signal_pin        = UART_MSR_DSR,
                .signal_pin_change = UART_MSR_DDSR,
                .on  = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
@@ -201,6 +206,7 @@ static struct lirc_serial hardware[] = {
         * See also http://www.nslu2-linux.org for this device
         */
        [LIRC_NSLU2] = {
+               .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_NSLU2].lock),
                .signal_pin        = UART_MSR_CTS,
                .signal_pin_change = UART_MSR_DCTS,
                .on  = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
index 11acd4c..8c6ed3b 100644 (file)
@@ -208,7 +208,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
         */
        ret = omap_gem_get_paddr(fbdev->bo, &paddr, true);
        if (ret) {
-               dev_err(dev->dev, "could not map (paddr)!\n");
+               dev_err(dev->dev,
+                       "could not map (paddr)!  Skipping framebuffer alloc\n");
                ret = -ENOMEM;
                goto fail;
        }
@@ -388,8 +389,11 @@ void omap_fbdev_free(struct drm_device *dev)
 
        fbi = helper->fbdev;
 
-       unregister_framebuffer(fbi);
-       framebuffer_release(fbi);
+       /* only cleanup framebuffer if it is present */
+       if (fbi) {
+               unregister_framebuffer(fbi);
+               framebuffer_release(fbi);
+       }
 
        drm_fb_helper_fini(helper);
 
index 9bd18e2..69f616c 100644 (file)
@@ -102,6 +102,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
        /* - */
        {USB_DEVICE(0x20F4, 0x646B)},
        {USB_DEVICE(0x083A, 0xC512)},
+       {USB_DEVICE(0x25D4, 0x4CA1)},
+       {USB_DEVICE(0x25D4, 0x4CAB)},
 
 /* RTL8191SU */
        /* Realtek */
index 9888693..664f6e7 100644 (file)
@@ -1095,7 +1095,7 @@ int target_emulate_write_same(struct se_cmd *cmd)
        if (num_blocks != 0)
                range = num_blocks;
        else
-               range = (dev->transport->get_blocks(dev) - lba);
+               range = (dev->transport->get_blocks(dev) - lba) + 1;
 
        pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
                 (unsigned long long)lba, (unsigned long long)range);
index 8556499..a1bcd92 100644 (file)
@@ -2031,7 +2031,7 @@ static int __core_scsi3_write_aptpl_to_file(
        if (IS_ERR(file) || !file || !file->f_dentry) {
                pr_err("filp_open(%s) for APTPL metadata"
                        " failed\n", path);
-               return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT);
+               return IS_ERR(file) ? PTR_ERR(file) : -ENOENT;
        }
 
        iov[0].iov_base = &buf[0];
@@ -3818,7 +3818,7 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
                        " SPC-2 reservation is held, returning"
                        " RESERVATION_CONFLICT\n");
                cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-               ret = EINVAL;
+               ret = -EINVAL;
                goto out;
        }
 
@@ -3828,7 +3828,8 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
         */
        if (!cmd->se_sess) {
                cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
        if (cmd->data_length < 24) {
index f03fb97..5b65f33 100644 (file)
@@ -230,6 +230,8 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
 {
        struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
 
+       if (cmd->aborted)
+               return ~0;
        return fc_seq_exch(cmd->seq)->rxid;
 }
 
index cb99da9..87901fa 100644 (file)
@@ -58,7 +58,8 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport)
        struct ft_tport *tport;
        int i;
 
-       tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
+       tport = rcu_dereference_protected(lport->prov[FC_TYPE_FCP],
+                                         lockdep_is_held(&ft_lport_lock));
        if (tport && tport->tpg)
                return tport;
 
index ced26c8..0d2ea0c 100644 (file)
@@ -401,7 +401,7 @@ out:
 }
 
 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL_RAW
-void __init udbg_init_debug_opal(void)
+void __init udbg_init_debug_opal_raw(void)
 {
        u32 index = CONFIG_PPC_EARLY_DEBUG_OPAL_VTERMNO;
        hvc_opal_privs[index] = &hvc_opal_boot_priv;
index 47d061b..6e1958a 100644 (file)
@@ -3113,7 +3113,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *
 
 /**
  *     serial8250_register_8250_port - register a serial port
- *     @port: serial port template
+ *     @up: serial port template
  *
  *     Configure the serial port specified by the request. If the
  *     port exists and is in use, it is hung up and unregistered
index 4ad721f..c17923e 100644 (file)
@@ -133,6 +133,10 @@ struct pl011_dmatx_data {
 struct uart_amba_port {
        struct uart_port        port;
        struct clk              *clk;
+       /* Two optional pin states - default & sleep */
+       struct pinctrl          *pinctrl;
+       struct pinctrl_state    *pins_default;
+       struct pinctrl_state    *pins_sleep;
        const struct vendor_data *vendor;
        unsigned int            dmacr;          /* dma control reg */
        unsigned int            im;             /* interrupt mask */
@@ -1312,6 +1316,14 @@ static int pl011_startup(struct uart_port *port)
        unsigned int cr;
        int retval;
 
+       /* Optionaly enable pins to be muxed in and configured */
+       if (!IS_ERR(uap->pins_default)) {
+               retval = pinctrl_select_state(uap->pinctrl, uap->pins_default);
+               if (retval)
+                       dev_err(port->dev,
+                               "could not set default pins\n");
+       }
+
        retval = clk_prepare(uap->clk);
        if (retval)
                goto out;
@@ -1420,6 +1432,7 @@ static void pl011_shutdown(struct uart_port *port)
 {
        struct uart_amba_port *uap = (struct uart_amba_port *)port;
        unsigned int cr;
+       int retval;
 
        /*
         * disable all interrupts
@@ -1462,6 +1475,14 @@ static void pl011_shutdown(struct uart_port *port)
         */
        clk_disable(uap->clk);
        clk_unprepare(uap->clk);
+       /* Optionally let pins go into sleep states */
+       if (!IS_ERR(uap->pins_sleep)) {
+               retval = pinctrl_select_state(uap->pinctrl, uap->pins_sleep);
+               if (retval)
+                       dev_err(port->dev,
+                               "could not set pins to sleep state\n");
+       }
+
 
        if (uap->port.dev->platform_data) {
                struct amba_pl011_data *plat;
@@ -1792,6 +1813,14 @@ static int __init pl011_console_setup(struct console *co, char *options)
        if (!uap)
                return -ENODEV;
 
+       /* Allow pins to be muxed in and configured */
+       if (!IS_ERR(uap->pins_default)) {
+               ret = pinctrl_select_state(uap->pinctrl, uap->pins_default);
+               if (ret)
+                       dev_err(uap->port.dev,
+                               "could not set default pins\n");
+       }
+
        ret = clk_prepare(uap->clk);
        if (ret)
                return ret;
@@ -1844,7 +1873,6 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
 {
        struct uart_amba_port *uap;
        struct vendor_data *vendor = id->data;
-       struct pinctrl *pinctrl;
        void __iomem *base;
        int i, ret;
 
@@ -1869,11 +1897,20 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
                goto free;
        }
 
-       pinctrl = devm_pinctrl_get_select_default(&dev->dev);
-       if (IS_ERR(pinctrl)) {
-               ret = PTR_ERR(pinctrl);
+       uap->pinctrl = devm_pinctrl_get(&dev->dev);
+       if (IS_ERR(uap->pinctrl)) {
+               ret = PTR_ERR(uap->pinctrl);
                goto unmap;
        }
+       uap->pins_default = pinctrl_lookup_state(uap->pinctrl,
+                                                PINCTRL_STATE_DEFAULT);
+       if (IS_ERR(uap->pins_default))
+               dev_err(&dev->dev, "could not get default pinstate\n");
+
+       uap->pins_sleep = pinctrl_lookup_state(uap->pinctrl,
+                                              PINCTRL_STATE_SLEEP);
+       if (IS_ERR(uap->pins_sleep))
+               dev_dbg(&dev->dev, "could not get sleep pinstate\n");
 
        uap->clk = clk_get(&dev->dev, NULL);
        if (IS_ERR(uap->clk)) {
index 34bd345..6ae2a58 100644 (file)
@@ -466,7 +466,7 @@ static void serial_txx9_break_ctl(struct uart_port *port, int break_state)
        spin_unlock_irqrestore(&up->port.lock, flags);
 }
 
-#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || (CONFIG_CONSOLE_POLL)
+#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
 /*
  *     Wait for transmitter & holding register to empty
  */
index c691eea..f5ed3d7 100644 (file)
@@ -46,7 +46,7 @@ obj-$(CONFIG_USB_MICROTEK)    += image/
 obj-$(CONFIG_USB_SERIAL)       += serial/
 
 obj-$(CONFIG_USB)              += misc/
-obj-$(CONFIG_USB)              += phy/
+obj-$(CONFIG_USB_COMMON)       += phy/
 obj-$(CONFIG_EARLY_PRINTK_DBGP)        += early/
 
 obj-$(CONFIG_USB_ATM)          += atm/
index 8fd398d..ee46927 100644 (file)
@@ -500,6 +500,8 @@ retry:
                        goto retry;
                }
                if (!desc->reslength) { /* zero length read */
+                       dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__);
+                       clear_bit(WDM_READ, &desc->flags);
                        spin_unlock_irq(&desc->iuspin);
                        goto retry;
                }
index 25a7422..8fb4849 100644 (file)
@@ -2324,12 +2324,16 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
 static int hub_port_reset(struct usb_hub *hub, int port1,
                        struct usb_device *udev, unsigned int delay, bool warm);
 
-/* Is a USB 3.0 port in the Inactive state? */
-static bool hub_port_inactive(struct usb_hub *hub, u16 portstatus)
+/* Is a USB 3.0 port in the Inactive or Complinance Mode state?
+ * Port worm reset is required to recover
+ */
+static bool hub_port_warm_reset_required(struct usb_hub *hub, u16 portstatus)
 {
        return hub_is_superspeed(hub->hdev) &&
-               (portstatus & USB_PORT_STAT_LINK_STATE) ==
-               USB_SS_PORT_LS_SS_INACTIVE;
+               (((portstatus & USB_PORT_STAT_LINK_STATE) ==
+                 USB_SS_PORT_LS_SS_INACTIVE) ||
+                ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+                 USB_SS_PORT_LS_COMP_MOD)) ;
 }
 
 static int hub_port_wait_reset(struct usb_hub *hub, int port1,
@@ -2365,7 +2369,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
                         *
                         * See https://bugzilla.kernel.org/show_bug.cgi?id=41752
                         */
-                       if (hub_port_inactive(hub, portstatus)) {
+                       if (hub_port_warm_reset_required(hub, portstatus)) {
                                int ret;
 
                                if ((portchange & USB_PORT_STAT_C_CONNECTION))
@@ -4408,9 +4412,7 @@ static void hub_events(void)
                        /* Warm reset a USB3 protocol port if it's in
                         * SS.Inactive state.
                         */
-                       if (hub_is_superspeed(hub->hdev) &&
-                               (portstatus & USB_PORT_STAT_LINK_STATE)
-                                       == USB_SS_PORT_LS_SS_INACTIVE) {
+                       if (hub_port_warm_reset_required(hub, portstatus)) {
                                dev_dbg(hub_dev, "warm reset port %d\n", i);
                                hub_port_reset(hub, i, NULL,
                                                HUB_BH_RESET_TIME, true);
index 3df1a19..ec70df7 100644 (file)
@@ -1091,7 +1091,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
                if (r == req) {
                        /* wait until it is processed */
                        dwc3_stop_active_transfer(dwc, dep->number);
-                       goto out0;
+                       goto out1;
                }
                dev_err(dwc->dev, "request %p was not queued to %s\n",
                                request, ep->name);
@@ -1099,6 +1099,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
                goto out0;
        }
 
+out1:
        /* giveback the request */
        dwc3_gadget_giveback(dep, req, -ECONNRESET);
 
index 4c07ca9..7026919 100644 (file)
@@ -153,10 +153,10 @@ struct usb_ep_para{
 #define USB_BUSMODE_DTB                0x02
 
 /* Endpoint basic handle */
-#define ep_index(EP)           ((EP)->desc->bEndpointAddress & 0xF)
+#define ep_index(EP)           ((EP)->ep.desc->bEndpointAddress & 0xF)
 #define ep_maxpacket(EP)       ((EP)->ep.maxpacket)
 #define ep_is_in(EP)   ((ep_index(EP) == 0) ? (EP->udc->ep0_dir == \
-                       USB_DIR_IN) : ((EP)->desc->bEndpointAddress \
+                       USB_DIR_IN) : ((EP)->ep.desc->bEndpointAddress \
                        & USB_DIR_IN) == USB_DIR_IN)
 
 /* ep0 transfer state */
index 262acfd..2ab0388 100644 (file)
@@ -61,6 +61,7 @@
 #include <mach/irqs.h>
 #include <mach/board.h>
 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
+#include <linux/debugfs.h>
 #include <linux/seq_file.h>
 #endif
 
index 17cfb8a..c304354 100644 (file)
@@ -281,14 +281,13 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
                }
        }
 
+       /* Hold PHYs in reset while initializing EHCI controller */
        if (pdata->phy_reset) {
                if (gpio_is_valid(pdata->reset_gpio_port[0]))
-                       gpio_request_one(pdata->reset_gpio_port[0],
-                                        GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
+                       gpio_set_value_cansleep(pdata->reset_gpio_port[0], 0);
 
                if (gpio_is_valid(pdata->reset_gpio_port[1]))
-                       gpio_request_one(pdata->reset_gpio_port[1],
-                                        GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
+                       gpio_set_value_cansleep(pdata->reset_gpio_port[1], 0);
 
                /* Hold the PHY in RESET for enough time till DIR is high */
                udelay(10);
@@ -330,6 +329,11 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
        omap_ehci->hcs_params = readl(&omap_ehci->caps->hcs_params);
 
        ehci_reset(omap_ehci);
+       ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+       if (ret) {
+               dev_err(dev, "failed to add hcd with err %d\n", ret);
+               goto err_add_hcd;
+       }
 
        if (pdata->phy_reset) {
                /* Hold the PHY in RESET for enough time till
@@ -344,12 +348,6 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
                        gpio_set_value_cansleep(pdata->reset_gpio_port[1], 1);
        }
 
-       ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
-       if (ret) {
-               dev_err(dev, "failed to add hcd with err %d\n", ret);
-               goto err_add_hcd;
-       }
-
        /* root ports should always stay powered */
        ehci_port_power(omap_ehci, 1);
 
index 2732ef6..7b01094 100644 (file)
@@ -462,6 +462,42 @@ void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
        }
 }
 
+/* Updates Link Status for super Speed port */
+static void xhci_hub_report_link_state(u32 *status, u32 status_reg)
+{
+       u32 pls = status_reg & PORT_PLS_MASK;
+
+       /* resume state is a xHCI internal state.
+        * Do not report it to usb core.
+        */
+       if (pls == XDEV_RESUME)
+               return;
+
+       /* When the CAS bit is set then warm reset
+        * should be performed on port
+        */
+       if (status_reg & PORT_CAS) {
+               /* The CAS bit can be set while the port is
+                * in any link state.
+                * Only roothubs have CAS bit, so we
+                * pretend to be in compliance mode
+                * unless we're already in compliance
+                * or the inactive state.
+                */
+               if (pls != USB_SS_PORT_LS_COMP_MOD &&
+                   pls != USB_SS_PORT_LS_SS_INACTIVE) {
+                       pls = USB_SS_PORT_LS_COMP_MOD;
+               }
+               /* Return also connection bit -
+                * hub state machine resets port
+                * when this bit is set.
+                */
+               pls |= USB_PORT_STAT_CONNECTION;
+       }
+       /* update status field */
+       *status |= pls;
+}
+
 int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                u16 wIndex, char *buf, u16 wLength)
 {
@@ -606,13 +642,9 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                        else
                                status |= USB_PORT_STAT_POWER;
                }
-               /* Port Link State */
+               /* Update Port Link State for super speed ports*/
                if (hcd->speed == HCD_USB3) {
-                       /* resume state is a xHCI internal state.
-                        * Do not report it to usb core.
-                        */
-                       if ((temp & PORT_PLS_MASK) != XDEV_RESUME)
-                               status |= (temp & PORT_PLS_MASK);
+                       xhci_hub_report_link_state(&status, temp);
                }
                if (bus_state->port_c_suspend & (1 << wIndex))
                        status |= 1 << USB_PORT_FEAT_C_SUSPEND;
index 23b4aef..8275645 100644 (file)
@@ -885,6 +885,17 @@ static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
        num_trbs_free_temp = ep_ring->num_trbs_free;
        dequeue_temp = ep_ring->dequeue;
 
+       /* If we get two back-to-back stalls, and the first stalled transfer
+        * ends just before a link TRB, the dequeue pointer will be left on
+        * the link TRB by the code in the while loop.  So we have to update
+        * the dequeue pointer one segment further, or we'll jump off
+        * the segment into la-la-land.
+        */
+       if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) {
+               ep_ring->deq_seg = ep_ring->deq_seg->next;
+               ep_ring->dequeue = ep_ring->deq_seg->trbs;
+       }
+
        while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
                /* We have more usable TRBs */
                ep_ring->num_trbs_free++;
index de3d6e3..55c0785 100644 (file)
@@ -341,7 +341,11 @@ struct xhci_op_regs {
 #define PORT_PLC       (1 << 22)
 /* port configure error change - port failed to configure its link partner */
 #define PORT_CEC       (1 << 23)
-/* bit 24 reserved */
+/* Cold Attach Status - xHC can set this bit to report device attached during
+ * Sx state. Warm port reset should be perfomed to clear this bit and move port
+ * to connected state.
+ */
+#define PORT_CAS       (1 << 24)
 /* wake on connect (enable) */
 #define PORT_WKCONN_E  (1 << 25)
 /* wake on disconnect (enable) */
index ef8d744..e090c79 100644 (file)
@@ -375,11 +375,21 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
         */
        if (list_empty(&qh->hep->urb_list)) {
                struct list_head        *head;
+               struct dma_controller   *dma = musb->dma_controller;
 
-               if (is_in)
+               if (is_in) {
                        ep->rx_reinit = 1;
-               else
+                       if (ep->rx_channel) {
+                               dma->channel_release(ep->rx_channel);
+                               ep->rx_channel = NULL;
+                       }
+               } else {
                        ep->tx_reinit = 1;
+                       if (ep->tx_channel) {
+                               dma->channel_release(ep->tx_channel);
+                               ep->tx_channel = NULL;
+                       }
+               }
 
                /* Clobber old pointers to this qh */
                musb_ep_set_qh(ep, is_in, NULL);
index d2a9a8e..0eabb04 100644 (file)
@@ -305,9 +305,8 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
 
                regulator_enable(twl->usb3v3);
                twl->asleep = 1;
-               twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_CLR, 0x1);
-               twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET,
-                                                               0x10);
+               twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_CLR);
+               twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_SET);
                status = USB_EVENT_ID;
                otg->default_a = true;
                twl->phy.state = OTG_STATE_A_IDLE;
@@ -316,12 +315,10 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
                atomic_notifier_call_chain(&twl->phy.notifier, status,
                                                        otg->gadget);
        } else  {
-               twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_CLR,
-                                                               0x10);
-               twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET,
-                                                               0x1);
+               twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_CLR);
+               twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
        }
-       twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_LATCH_CLR, status);
+       twl6030_writeb(twl, TWL_MODULE_USB, status, USB_ID_INT_LATCH_CLR);
 
        return IRQ_HANDLED;
 }
@@ -343,7 +340,7 @@ static int twl6030_enable_irq(struct usb_phy *x)
 {
        struct twl6030_usb *twl = phy_to_twl(x);
 
-       twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET, 0x1);
+       twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
        twl6030_interrupt_unmask(0x05, REG_INT_MSK_LINE_C);
        twl6030_interrupt_unmask(0x05, REG_INT_MSK_STS_C);
 
index 3cfabcb..e7cf84f 100644 (file)
@@ -2,11 +2,11 @@
 # Physical Layer USB driver configuration
 #
 comment "USB Physical Layer drivers"
-       depends on USB
+       depends on USB || USB_GADGET
 
 config USB_ISP1301
        tristate "NXP ISP1301 USB transceiver support"
-       depends on USB
+       depends on USB || USB_GADGET
        depends on I2C
        help
          Say Y here to add support for the NXP ISP1301 USB transceiver driver.
index 73d25cd..1e71079 100644 (file)
@@ -93,6 +93,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
        { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
        { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
+       { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
        { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
        { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
        { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
@@ -134,7 +135,13 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
        { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
        { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
+       { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
+       { USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */
        { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
+       { USB_DEVICE(0x166A, 0x0304) }, /* Clipsal 5000CT2 C-Bus Black and White Touchscreen */
+       { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */
+       { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */
+       { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */
        { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
        { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
        { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
@@ -146,7 +153,11 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
        { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
        { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
+       { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
+       { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
        { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
+       { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
+       { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
        { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
        { } /* Terminating Entry */
 };
index 81423f7..d47eb06 100644 (file)
@@ -222,14 +222,6 @@ static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port)
        metro_priv->throttled = 0;
        spin_unlock_irqrestore(&metro_priv->lock, flags);
 
-       /*
-        * Force low_latency on so that our tty_push actually forces the data
-        * through, otherwise it is scheduled, and with high data rates (like
-        * with OHCI) data can get lost.
-        */
-       if (tty)
-               tty->low_latency = 1;
-
        /* Clear the urb pipe. */
        usb_clear_halt(serial->dev, port->interrupt_in_urb->pipe);
 
index e668a24..417ab1b 100644 (file)
@@ -236,6 +236,7 @@ static void option_instat_callback(struct urb *urb);
 #define NOVATELWIRELESS_PRODUCT_G1             0xA001
 #define NOVATELWIRELESS_PRODUCT_G1_M           0xA002
 #define NOVATELWIRELESS_PRODUCT_G2             0xA010
+#define NOVATELWIRELESS_PRODUCT_MC551          0xB001
 
 /* AMOI PRODUCTS */
 #define AMOI_VENDOR_ID                         0x1614
@@ -496,6 +497,19 @@ static void option_instat_callback(struct urb *urb);
 
 /* MediaTek products */
 #define MEDIATEK_VENDOR_ID                     0x0e8d
+#define MEDIATEK_PRODUCT_DC_1COM               0x00a0
+#define MEDIATEK_PRODUCT_DC_4COM               0x00a5
+#define MEDIATEK_PRODUCT_DC_5COM               0x00a4
+#define MEDIATEK_PRODUCT_7208_1COM             0x7101
+#define MEDIATEK_PRODUCT_7208_2COM             0x7102
+#define MEDIATEK_PRODUCT_FP_1COM               0x0003
+#define MEDIATEK_PRODUCT_FP_2COM               0x0023
+#define MEDIATEK_PRODUCT_FPDC_1COM             0x0043
+#define MEDIATEK_PRODUCT_FPDC_2COM             0x0033
+
+/* Cellient products */
+#define CELLIENT_VENDOR_ID                     0x2692
+#define CELLIENT_PRODUCT_MEN200                        0x9005
 
 /* some devices interfaces need special handling due to a number of reasons */
 enum option_blacklist_reason {
@@ -549,6 +563,10 @@ static const struct option_blacklist_info net_intf1_blacklist = {
        .reserved = BIT(1),
 };
 
+static const struct option_blacklist_info net_intf2_blacklist = {
+       .reserved = BIT(2),
+};
+
 static const struct option_blacklist_info net_intf3_blacklist = {
        .reserved = BIT(3),
 };
@@ -734,6 +752,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
+       /* Novatel Ovation MC551 a.k.a. Verizon USB551L */
+       { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
 
        { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
        { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
@@ -1092,6 +1112,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
+               .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
          0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
@@ -1233,6 +1255,18 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x02, 0x01) },
        { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x00, 0x00) },
        { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x02, 0x01) },        /* MediaTek MT6276M modem & app port */
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_1COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x02, 0x01) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x02, 0x01) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_1COM, 0x02, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_2COM, 0x02, 0x02, 0x01) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_1COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
index a324a5d..11418da 100644 (file)
@@ -202,6 +202,12 @@ static int slave_configure(struct scsi_device *sdev)
                if (us->fflags & US_FL_NO_READ_CAPACITY_16)
                        sdev->no_read_capacity_16 = 1;
 
+               /*
+                * Many devices do not respond properly to READ_CAPACITY_16.
+                * Tell the SCSI layer to try READ_CAPACITY_10 first.
+                */
+               sdev->try_rc_10_first = 1;
+
                /* assume SPC3 or latter devices support sense size > 18 */
                if (sdev->scsi_level > SCSI_SPC_2)
                        us->fflags |= US_FL_SANE_SENSE;
index caf22bf..1719886 100644 (file)
@@ -1107,13 +1107,6 @@ UNUSUAL_DEV( 0x090a, 0x1200, 0x0000, 0x9999,
                USB_SC_RBC, USB_PR_BULK, NULL,
                0 ),
 
-/* Feiya QDI U2 DISK, reported by Hans de Goede <hdegoede@redhat.com> */
-UNUSUAL_DEV( 0x090c, 0x1000, 0x0000, 0xffff,
-               "Feiya",
-               "QDI U2 DISK",
-               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
-               US_FL_NO_READ_CAPACITY_16 ),
-
 /* aeb */
 UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
                "Feiya",
index 94dbd25..112156f 100644 (file)
@@ -191,7 +191,9 @@ static int vhost_worker(void *data)
        struct vhost_dev *dev = data;
        struct vhost_work *work = NULL;
        unsigned uninitialized_var(seq);
+       mm_segment_t oldfs = get_fs();
 
+       set_fs(USER_DS);
        use_mm(dev->mm);
 
        for (;;) {
@@ -229,6 +231,7 @@ static int vhost_worker(void *data)
 
        }
        unuse_mm(dev->mm);
+       set_fs(oldfs);
        return 0;
 }
 
index 5066eee..58bd9c2 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/io.h>
 #include <linux/device.h>
 #include <linux/regulator/consumer.h>
+#include <linux/suspend.h>
 
 #include <video/omapdss.h>
 
@@ -201,6 +202,28 @@ int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
 #endif /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
 
 /* PLATFORM DEVICE */
+static int omap_dss_pm_notif(struct notifier_block *b, unsigned long v, void *d)
+{
+       DSSDBG("pm notif %lu\n", v);
+
+       switch (v) {
+       case PM_SUSPEND_PREPARE:
+               DSSDBG("suspending displays\n");
+               return dss_suspend_all_devices();
+
+       case PM_POST_SUSPEND:
+               DSSDBG("resuming displays\n");
+               return dss_resume_all_devices();
+
+       default:
+               return 0;
+       }
+}
+
+static struct notifier_block omap_dss_pm_notif_block = {
+       .notifier_call = omap_dss_pm_notif,
+};
+
 static int __init omap_dss_probe(struct platform_device *pdev)
 {
        struct omap_dss_board_info *pdata = pdev->dev.platform_data;
@@ -224,6 +247,8 @@ static int __init omap_dss_probe(struct platform_device *pdev)
        else if (pdata->default_device)
                core.default_display_name = pdata->default_device->name;
 
+       register_pm_notifier(&omap_dss_pm_notif_block);
+
        return 0;
 
 err_debugfs:
@@ -233,6 +258,8 @@ err_debugfs:
 
 static int omap_dss_remove(struct platform_device *pdev)
 {
+       unregister_pm_notifier(&omap_dss_pm_notif_block);
+
        dss_uninitialize_debugfs();
 
        dss_uninit_overlays(pdev);
@@ -247,25 +274,9 @@ static void omap_dss_shutdown(struct platform_device *pdev)
        dss_disable_all_devices();
 }
 
-static int omap_dss_suspend(struct platform_device *pdev, pm_message_t state)
-{
-       DSSDBG("suspend %d\n", state.event);
-
-       return dss_suspend_all_devices();
-}
-
-static int omap_dss_resume(struct platform_device *pdev)
-{
-       DSSDBG("resume\n");
-
-       return dss_resume_all_devices();
-}
-
 static struct platform_driver omap_dss_driver = {
        .remove         = omap_dss_remove,
        .shutdown       = omap_dss_shutdown,
-       .suspend        = omap_dss_suspend,
-       .resume         = omap_dss_resume,
        .driver         = {
                .name   = "omapdss",
                .owner  = THIS_MODULE,
index 4749ac3..397d4ee 100644 (file)
@@ -384,7 +384,7 @@ void dispc_runtime_put(void)
        DSSDBG("dispc_runtime_put\n");
 
        r = pm_runtime_put_sync(&dispc.pdev->dev);
-       WARN_ON(r < 0);
+       WARN_ON(r < 0 && r != -ENOSYS);
 }
 
 static inline bool dispc_mgr_is_lcd(enum omap_channel channel)
index ca8382d..14ce8cc 100644 (file)
@@ -1075,7 +1075,7 @@ void dsi_runtime_put(struct platform_device *dsidev)
        DSSDBG("dsi_runtime_put\n");
 
        r = pm_runtime_put_sync(&dsi->pdev->dev);
-       WARN_ON(r < 0);
+       WARN_ON(r < 0 && r != -ENOSYS);
 }
 
 /* source clock for DSI PLL. this could also be PCLKFREE */
index 7706323..d2b5719 100644 (file)
@@ -731,7 +731,7 @@ static void dss_runtime_put(void)
        DSSDBG("dss_runtime_put\n");
 
        r = pm_runtime_put_sync(&dss.pdev->dev);
-       WARN_ON(r < 0 && r != -EBUSY);
+       WARN_ON(r < 0 && r != -ENOSYS && r != -EBUSY);
 }
 
 /* DEBUGFS */
index 8195c71..26a2430 100644 (file)
@@ -138,7 +138,7 @@ static void hdmi_runtime_put(void)
        DSSDBG("hdmi_runtime_put\n");
 
        r = pm_runtime_put_sync(&hdmi.pdev->dev);
-       WARN_ON(r < 0);
+       WARN_ON(r < 0 && r != -ENOSYS);
 }
 
 static int __init hdmi_init_display(struct omap_dss_device *dssdev)
index 3d8c206..7985fa1 100644 (file)
@@ -141,7 +141,7 @@ static void rfbi_runtime_put(void)
        DSSDBG("rfbi_runtime_put\n");
 
        r = pm_runtime_put_sync(&rfbi.pdev->dev);
-       WARN_ON(r < 0);
+       WARN_ON(r < 0 && r != -ENOSYS);
 }
 
 void rfbi_bus_lock(void)
index 2b89739..3907c8b 100644 (file)
@@ -402,7 +402,7 @@ static void venc_runtime_put(void)
        DSSDBG("venc_runtime_put\n");
 
        r = pm_runtime_put_sync(&venc.pdev->dev);
-       WARN_ON(r < 0);
+       WARN_ON(r < 0 && r != -ENOSYS);
 }
 
 static const struct venc_config *venc_timings_to_config(
index bfbc15c..0908e60 100644 (file)
@@ -47,7 +47,7 @@ struct virtio_balloon
        struct task_struct *thread;
 
        /* Waiting for host to ack the pages we released. */
-       struct completion acked;
+       wait_queue_head_t acked;
 
        /* Number of balloon pages we've told the Host we're not using. */
        unsigned int num_pages;
@@ -89,29 +89,25 @@ static struct page *balloon_pfn_to_page(u32 pfn)
 
 static void balloon_ack(struct virtqueue *vq)
 {
-       struct virtio_balloon *vb;
-       unsigned int len;
+       struct virtio_balloon *vb = vq->vdev->priv;
 
-       vb = virtqueue_get_buf(vq, &len);
-       if (vb)
-               complete(&vb->acked);
+       wake_up(&vb->acked);
 }
 
 static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
 {
        struct scatterlist sg;
+       unsigned int len;
 
        sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
 
-       init_completion(&vb->acked);
-
        /* We should always be able to add one buffer to an empty queue. */
        if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
                BUG();
        virtqueue_kick(vq);
 
        /* When host has read buffer, this completes via balloon_ack */
-       wait_for_completion(&vb->acked);
+       wait_event(vb->acked, virtqueue_get_buf(vq, &len));
 }
 
 static void set_page_pfns(u32 pfns[], struct page *page)
@@ -231,12 +227,8 @@ static void update_balloon_stats(struct virtio_balloon *vb)
  */
 static void stats_request(struct virtqueue *vq)
 {
-       struct virtio_balloon *vb;
-       unsigned int len;
+       struct virtio_balloon *vb = vq->vdev->priv;
 
-       vb = virtqueue_get_buf(vq, &len);
-       if (!vb)
-               return;
        vb->need_stats_update = 1;
        wake_up(&vb->config_change);
 }
@@ -245,11 +237,14 @@ static void stats_handle_request(struct virtio_balloon *vb)
 {
        struct virtqueue *vq;
        struct scatterlist sg;
+       unsigned int len;
 
        vb->need_stats_update = 0;
        update_balloon_stats(vb);
 
        vq = vb->stats_vq;
+       if (!virtqueue_get_buf(vq, &len))
+               return;
        sg_init_one(&sg, vb->stats, sizeof(vb->stats));
        if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
                BUG();
@@ -358,6 +353,7 @@ static int virtballoon_probe(struct virtio_device *vdev)
        INIT_LIST_HEAD(&vb->pages);
        vb->num_pages = 0;
        init_waitqueue_head(&vb->config_change);
+       init_waitqueue_head(&vb->acked);
        vb->vdev = vdev;
        vb->need_stats_update = 0;
 
index 2b76381..1eff743 100644 (file)
@@ -146,7 +146,7 @@ struct cmn_registers {
 }  __attribute__((packed));
 
 static unsigned int hpwdt_nmi_decoding;
-static unsigned int allow_kdump;
+static unsigned int allow_kdump = 1;
 static unsigned int is_icru;
 static DEFINE_SPINLOCK(rom_lock);
 static void *cru_rom_addr;
@@ -756,6 +756,8 @@ error:
 static void hpwdt_exit_nmi_decoding(void)
 {
        unregister_nmi_handler(NMI_UNKNOWN, "hpwdt");
+       unregister_nmi_handler(NMI_SERR, "hpwdt");
+       unregister_nmi_handler(NMI_IO_CHECK, "hpwdt");
        if (cru_rom_addr)
                iounmap(cru_rom_addr);
 }
index bc47e90..9c2c27c 100644 (file)
@@ -699,3 +699,4 @@ MODULE_DESCRIPTION("Intel TCO WatchDog Timer Driver");
 MODULE_VERSION(DRV_VERSION);
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_ALIAS("platform:" DRV_NAME);
index afcd136..e4841c3 100644 (file)
@@ -4,7 +4,7 @@
  * Watchdog driver for ARM SP805 watchdog module
  *
  * Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2 or later. This program is licensed "as is" without any
@@ -331,6 +331,6 @@ static struct amba_driver sp805_wdt_driver = {
 
 module_amba_driver(sp805_wdt_driver);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("ARM SP805 Watchdog Driver");
 MODULE_LICENSE("GPL");
index 672d169..ef8edec 100644 (file)
@@ -349,7 +349,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
                        sizeof(struct watchdog_info)) ? -EFAULT : 0;
        case WDIOC_GETSTATUS:
                err = watchdog_get_status(wdd, &val);
-               if (err)
+               if (err == -ENODEV)
                        return err;
                return put_user(val, p);
        case WDIOC_GETBOOTSTATUS:
index 8f7d123..a383c18 100644 (file)
@@ -179,61 +179,74 @@ static int __add_prelim_ref(struct list_head *head, u64 root_id,
 
 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
                                struct ulist *parents, int level,
-                               struct btrfs_key *key, u64 time_seq,
+                               struct btrfs_key *key_for_search, u64 time_seq,
                                u64 wanted_disk_byte,
                                const u64 *extent_item_pos)
 {
-       int ret;
-       int slot = path->slots[level];
-       struct extent_buffer *eb = path->nodes[level];
+       int ret = 0;
+       int slot;
+       struct extent_buffer *eb;
+       struct btrfs_key key;
        struct btrfs_file_extent_item *fi;
        struct extent_inode_elem *eie = NULL;
        u64 disk_byte;
-       u64 wanted_objectid = key->objectid;
 
-add_parent:
-       if (level == 0 && extent_item_pos) {
-               fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
-               ret = check_extent_in_eb(key, eb, fi, *extent_item_pos, &eie);
+       if (level != 0) {
+               eb = path->nodes[level];
+               ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
                if (ret < 0)
                        return ret;
-       }
-       ret = ulist_add(parents, eb->start, (unsigned long)eie, GFP_NOFS);
-       if (ret < 0)
-               return ret;
-
-       if (level != 0)
                return 0;
+       }
 
        /*
-        * if the current leaf is full with EXTENT_DATA items, we must
-        * check the next one if that holds a reference as well.
-        * ref->count cannot be used to skip this check.
-        * repeat this until we don't find any additional EXTENT_DATA items.
+        * We normally enter this function with the path already pointing to
+        * the first item to check. But sometimes, we may enter it with
+        * slot==nritems. In that case, go to the next leaf before we continue.
         */
-       while (1) {
-               eie = NULL;
+       if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
                ret = btrfs_next_old_leaf(root, path, time_seq);
-               if (ret < 0)
-                       return ret;
-               if (ret)
-                       return 0;
 
+       while (!ret) {
                eb = path->nodes[0];
-               for (slot = 0; slot < btrfs_header_nritems(eb); ++slot) {
-                       btrfs_item_key_to_cpu(eb, key, slot);
-                       if (key->objectid != wanted_objectid ||
-                           key->type != BTRFS_EXTENT_DATA_KEY)
-                               return 0;
-                       fi = btrfs_item_ptr(eb, slot,
-                                               struct btrfs_file_extent_item);
-                       disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
-                       if (disk_byte == wanted_disk_byte)
-                               goto add_parent;
+               slot = path->slots[0];
+
+               btrfs_item_key_to_cpu(eb, &key, slot);
+
+               if (key.objectid != key_for_search->objectid ||
+                   key.type != BTRFS_EXTENT_DATA_KEY)
+                       break;
+
+               fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
+               disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
+
+               if (disk_byte == wanted_disk_byte) {
+                       eie = NULL;
+                       if (extent_item_pos) {
+                               ret = check_extent_in_eb(&key, eb, fi,
+                                               *extent_item_pos,
+                                               &eie);
+                               if (ret < 0)
+                                       break;
+                       }
+                       if (!ret) {
+                               ret = ulist_add(parents, eb->start,
+                                               (unsigned long)eie, GFP_NOFS);
+                               if (ret < 0)
+                                       break;
+                               if (!extent_item_pos) {
+                                       ret = btrfs_next_old_leaf(root, path,
+                                                       time_seq);
+                                       continue;
+                               }
+                       }
                }
+               ret = btrfs_next_old_item(root, path, time_seq);
        }
 
-       return 0;
+       if (ret > 0)
+               ret = 0;
+       return ret;
 }
 
 /*
@@ -250,7 +263,6 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
        struct btrfs_path *path;
        struct btrfs_root *root;
        struct btrfs_key root_key;
-       struct btrfs_key key = {0};
        struct extent_buffer *eb;
        int ret = 0;
        int root_level;
@@ -289,17 +301,19 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
                goto out;
 
        eb = path->nodes[level];
-       if (!eb) {
-               WARN_ON(1);
-               ret = 1;
-               goto out;
+       while (!eb) {
+               if (!level) {
+                       WARN_ON(1);
+                       ret = 1;
+                       goto out;
+               }
+               level--;
+               eb = path->nodes[level];
        }
 
-       if (level == 0)
-               btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
-
-       ret = add_all_parents(root, path, parents, level, &key, time_seq,
-                               ref->wanted_disk_byte, extent_item_pos);
+       ret = add_all_parents(root, path, parents, level, &ref->key_for_search,
+                               time_seq, ref->wanted_disk_byte,
+                               extent_item_pos);
 out:
        btrfs_free_path(path);
        return ret;
@@ -825,6 +839,7 @@ again:
                        }
                        ret = __add_delayed_refs(head, delayed_ref_seq,
                                                 &prefs_delayed);
+                       mutex_unlock(&head->mutex);
                        if (ret) {
                                spin_unlock(&delayed_refs->lock);
                                goto out;
@@ -918,8 +933,6 @@ again:
        }
 
 out:
-       if (head)
-               mutex_unlock(&head->mutex);
        btrfs_free_path(path);
        while (!list_empty(&prefs)) {
                ref = list_first_entry(&prefs, struct __prelim_ref, list);
index 15cbc2b..8206b39 100644 (file)
@@ -1024,11 +1024,18 @@ __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
                if (!looped && !tm)
                        return 0;
                /*
-                * we must have key remove operations in the log before the
-                * replace operation.
+                * if there are no tree operation for the oldest root, we simply
+                * return it. this should only happen if that (old) root is at
+                * level 0.
                 */
-               BUG_ON(!tm);
+               if (!tm)
+                       break;
 
+               /*
+                * if there's an operation that's not a root replacement, we
+                * found the oldest version of our root. normally, we'll find a
+                * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
+                */
                if (tm->op != MOD_LOG_ROOT_REPLACE)
                        break;
 
@@ -1087,11 +1094,7 @@ __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
                                                      tm->generation);
                        break;
                case MOD_LOG_KEY_ADD:
-                       if (tm->slot != n - 1) {
-                               o_dst = btrfs_node_key_ptr_offset(tm->slot);
-                               o_src = btrfs_node_key_ptr_offset(tm->slot + 1);
-                               memmove_extent_buffer(eb, o_dst, o_src, p_size);
-                       }
+                       /* if a move operation is needed it's in the log */
                        n--;
                        break;
                case MOD_LOG_MOVE_KEYS:
@@ -1192,16 +1195,8 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
        }
 
        tm = tree_mod_log_search(root->fs_info, logical, time_seq);
-       /*
-        * there was an item in the log when __tree_mod_log_oldest_root
-        * returned. this one must not go away, because the time_seq passed to
-        * us must be blocking its removal.
-        */
-       BUG_ON(!tm);
-
        if (old_root)
-               eb = alloc_dummy_extent_buffer(tm->index << PAGE_CACHE_SHIFT,
-                                              root->nodesize);
+               eb = alloc_dummy_extent_buffer(logical, root->nodesize);
        else
                eb = btrfs_clone_extent_buffer(root->node);
        btrfs_tree_read_unlock(root->node);
@@ -1216,7 +1211,10 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
                btrfs_set_header_level(eb, old_root->level);
                btrfs_set_header_generation(eb, old_generation);
        }
-       __tree_mod_log_rewind(eb, time_seq, tm);
+       if (tm)
+               __tree_mod_log_rewind(eb, time_seq, tm);
+       else
+               WARN_ON(btrfs_header_level(eb) != 0);
        extent_buffer_get(eb);
 
        return eb;
@@ -2995,7 +2993,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
 static void insert_ptr(struct btrfs_trans_handle *trans,
                       struct btrfs_root *root, struct btrfs_path *path,
                       struct btrfs_disk_key *key, u64 bytenr,
-                      int slot, int level, int tree_mod_log)
+                      int slot, int level)
 {
        struct extent_buffer *lower;
        int nritems;
@@ -3008,7 +3006,7 @@ static void insert_ptr(struct btrfs_trans_handle *trans,
        BUG_ON(slot > nritems);
        BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
        if (slot != nritems) {
-               if (tree_mod_log && level)
+               if (level)
                        tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
                                             slot, nritems - slot);
                memmove_extent_buffer(lower,
@@ -3016,7 +3014,7 @@ static void insert_ptr(struct btrfs_trans_handle *trans,
                              btrfs_node_key_ptr_offset(slot),
                              (nritems - slot) * sizeof(struct btrfs_key_ptr));
        }
-       if (tree_mod_log && level) {
+       if (level) {
                ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
                                              MOD_LOG_KEY_ADD);
                BUG_ON(ret < 0);
@@ -3104,7 +3102,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
        btrfs_mark_buffer_dirty(split);
 
        insert_ptr(trans, root, path, &disk_key, split->start,
-                  path->slots[level + 1] + 1, level + 1, 1);
+                  path->slots[level + 1] + 1, level + 1);
 
        if (path->slots[level] >= mid) {
                path->slots[level] -= mid;
@@ -3641,7 +3639,7 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans,
        btrfs_set_header_nritems(l, mid);
        btrfs_item_key(right, &disk_key, 0);
        insert_ptr(trans, root, path, &disk_key, right->start,
-                  path->slots[1] + 1, 1, 0);
+                  path->slots[1] + 1, 1);
 
        btrfs_mark_buffer_dirty(right);
        btrfs_mark_buffer_dirty(l);
@@ -3848,7 +3846,7 @@ again:
                if (mid <= slot) {
                        btrfs_set_header_nritems(right, 0);
                        insert_ptr(trans, root, path, &disk_key, right->start,
-                                  path->slots[1] + 1, 1, 0);
+                                  path->slots[1] + 1, 1);
                        btrfs_tree_unlock(path->nodes[0]);
                        free_extent_buffer(path->nodes[0]);
                        path->nodes[0] = right;
@@ -3857,7 +3855,7 @@ again:
                } else {
                        btrfs_set_header_nritems(right, 0);
                        insert_ptr(trans, root, path, &disk_key, right->start,
-                                         path->slots[1], 1, 0);
+                                         path->slots[1], 1);
                        btrfs_tree_unlock(path->nodes[0]);
                        free_extent_buffer(path->nodes[0]);
                        path->nodes[0] = right;
@@ -5121,6 +5119,18 @@ again:
 
                if (!path->skip_locking) {
                        ret = btrfs_try_tree_read_lock(next);
+                       if (!ret && time_seq) {
+                               /*
+                                * If we don't get the lock, we may be racing
+                                * with push_leaf_left, holding that lock while
+                                * itself waiting for the leaf we've currently
+                                * locked. To solve this situation, we give up
+                                * on our lock and cycle.
+                                */
+                               btrfs_release_path(path);
+                               cond_resched();
+                               goto again;
+                       }
                        if (!ret) {
                                btrfs_set_path_blocking(path);
                                btrfs_tree_read_lock(next);
index 8b73b2d..fa5c45b 100644 (file)
@@ -2755,13 +2755,18 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path);
 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
                        u64 time_seq);
-static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
+static inline int btrfs_next_old_item(struct btrfs_root *root,
+                                     struct btrfs_path *p, u64 time_seq)
 {
        ++p->slots[0];
        if (p->slots[0] >= btrfs_header_nritems(p->nodes[0]))
-               return btrfs_next_leaf(root, p);
+               return btrfs_next_old_leaf(root, p, time_seq);
        return 0;
 }
+static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
+{
+       return btrfs_next_old_item(root, p, 0);
+}
 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
 int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
 int __must_check btrfs_drop_snapshot(struct btrfs_root *root,
index e1890b1..2936ca4 100644 (file)
@@ -2354,12 +2354,17 @@ retry_root_backup:
                                  BTRFS_CSUM_TREE_OBJECTID, csum_root);
        if (ret)
                goto recovery_tree_root;
-
        csum_root->track_dirty = 1;
 
        fs_info->generation = generation;
        fs_info->last_trans_committed = generation;
 
+       ret = btrfs_recover_balance(fs_info);
+       if (ret) {
+               printk(KERN_WARNING "btrfs: failed to recover balance\n");
+               goto fail_block_groups;
+       }
+
        ret = btrfs_init_dev_stats(fs_info);
        if (ret) {
                printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n",
@@ -2485,20 +2490,23 @@ retry_root_backup:
                goto fail_trans_kthread;
        }
 
-       if (!(sb->s_flags & MS_RDONLY)) {
-               down_read(&fs_info->cleanup_work_sem);
-               err = btrfs_orphan_cleanup(fs_info->fs_root);
-               if (!err)
-                       err = btrfs_orphan_cleanup(fs_info->tree_root);
-               up_read(&fs_info->cleanup_work_sem);
+       if (sb->s_flags & MS_RDONLY)
+               return 0;
 
-               if (!err)
-                       err = btrfs_recover_balance(fs_info->tree_root);
+       down_read(&fs_info->cleanup_work_sem);
+       if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
+           (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
+               up_read(&fs_info->cleanup_work_sem);
+               close_ctree(tree_root);
+               return ret;
+       }
+       up_read(&fs_info->cleanup_work_sem);
 
-               if (err) {
-                       close_ctree(tree_root);
-                       return err;
-               }
+       ret = btrfs_resume_balance_async(fs_info);
+       if (ret) {
+               printk(KERN_WARNING "btrfs: failed to resume balance\n");
+               close_ctree(tree_root);
+               return ret;
        }
 
        return 0;
@@ -3426,6 +3434,7 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
                                mutex_unlock(&head->mutex);
                                btrfs_put_delayed_ref(ref);
 
+                               spin_lock(&delayed_refs->lock);
                                continue;
                        }
 
index 4b5a1e1..6e1d367 100644 (file)
@@ -2347,12 +2347,10 @@ next:
        return count;
 }
 
-
 static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs,
-                       unsigned long num_refs)
+                              unsigned long num_refs,
+                              struct list_head *first_seq)
 {
-       struct list_head *first_seq = delayed_refs->seq_head.next;
-
        spin_unlock(&delayed_refs->lock);
        pr_debug("waiting for more refs (num %ld, first %p)\n",
                 num_refs, first_seq);
@@ -2381,6 +2379,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
        struct btrfs_delayed_ref_root *delayed_refs;
        struct btrfs_delayed_ref_node *ref;
        struct list_head cluster;
+       struct list_head *first_seq = NULL;
        int ret;
        u64 delayed_start;
        int run_all = count == (unsigned long)-1;
@@ -2436,8 +2435,10 @@ again:
                                 */
                                consider_waiting = 1;
                                num_refs = delayed_refs->num_entries;
+                               first_seq = root->fs_info->tree_mod_seq_list.next;
                        } else {
-                               wait_for_more_refs(delayed_refs, num_refs);
+                               wait_for_more_refs(delayed_refs,
+                                                  num_refs, first_seq);
                                /*
                                 * after waiting, things have changed. we
                                 * dropped the lock and someone else might have
index aaa12c1..01c21b6 100644 (file)
@@ -3324,6 +3324,7 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
                             writepage_t writepage, void *data,
                             void (*flush_fn)(void *))
 {
+       struct inode *inode = mapping->host;
        int ret = 0;
        int done = 0;
        int nr_to_write_done = 0;
@@ -3334,6 +3335,18 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
        int scanned = 0;
        int tag;
 
+       /*
+        * We have to hold onto the inode so that ordered extents can do their
+        * work when the IO finishes.  The alternative to this is failing to add
+        * an ordered extent if the igrab() fails there and that is a huge pain
+        * to deal with, so instead just hold onto the inode throughout the
+        * writepages operation.  If it fails here we are freeing up the inode
+        * anyway and we'd rather not waste our time writing out stuff that is
+        * going to be truncated anyway.
+        */
+       if (!igrab(inode))
+               return 0;
+
        pagevec_init(&pvec, 0);
        if (wbc->range_cyclic) {
                index = mapping->writeback_index; /* Start from prev offset */
@@ -3428,6 +3441,7 @@ retry:
                index = 0;
                goto retry;
        }
+       btrfs_add_delayed_iput(inode);
        return ret;
 }
 
index 70dc8ca..9aa01ec 100644 (file)
@@ -1334,7 +1334,6 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
                                    loff_t *ppos, size_t count, size_t ocount)
 {
        struct file *file = iocb->ki_filp;
-       struct inode *inode = fdentry(file)->d_inode;
        struct iov_iter i;
        ssize_t written;
        ssize_t written_buffered;
@@ -1344,18 +1343,6 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
        written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
                                            count, ocount);
 
-       /*
-        * the generic O_DIRECT will update in-memory i_size after the
-        * DIOs are done.  But our endio handlers that update the on
-        * disk i_size never update past the in memory i_size.  So we
-        * need one more update here to catch any additions to the
-        * file
-        */
-       if (inode->i_size != BTRFS_I(inode)->disk_i_size) {
-               btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
-               mark_inode_dirty(inode);
-       }
-
        if (written < 0 || written == count)
                return written;
 
index 81296c5..6c4e2ba 100644 (file)
@@ -1543,29 +1543,26 @@ again:
        end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
 
        /*
-        * XXX - this can go away after a few releases.
-        *
-        * since the only user of btrfs_remove_free_space is the tree logging
-        * stuff, and the only way to test that is under crash conditions, we
-        * want to have this debug stuff here just in case somethings not
-        * working.  Search the bitmap for the space we are trying to use to
-        * make sure its actually there.  If its not there then we need to stop
-        * because something has gone wrong.
+        * We need to search for bits in this bitmap.  We could only cover some
+        * of the extent in this bitmap thanks to how we add space, so we need
+        * to search for as much as it as we can and clear that amount, and then
+        * go searching for the next bit.
         */
        search_start = *offset;
-       search_bytes = *bytes;
+       search_bytes = ctl->unit;
        search_bytes = min(search_bytes, end - search_start + 1);
        ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
        BUG_ON(ret < 0 || search_start != *offset);
 
-       if (*offset > bitmap_info->offset && *offset + *bytes > end) {
-               bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1);
-               *bytes -= end - *offset + 1;
-               *offset = end + 1;
-       } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
-               bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes);
-               *bytes = 0;
-       }
+       /* We may have found more bits than what we need */
+       search_bytes = min(search_bytes, *bytes);
+
+       /* Cannot clear past the end of the bitmap */
+       search_bytes = min(search_bytes, end - search_start + 1);
+
+       bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes);
+       *offset += search_bytes;
+       *bytes -= search_bytes;
 
        if (*bytes) {
                struct rb_node *next = rb_next(&bitmap_info->offset_index);
@@ -1596,7 +1593,7 @@ again:
                 * everything over again.
                 */
                search_start = *offset;
-               search_bytes = *bytes;
+               search_bytes = ctl->unit;
                ret = search_bitmap(ctl, bitmap_info, &search_start,
                                    &search_bytes);
                if (ret < 0 || search_start != *offset)
@@ -1879,12 +1876,14 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
 {
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *info;
-       struct btrfs_free_space *next_info = NULL;
        int ret = 0;
 
        spin_lock(&ctl->tree_lock);
 
 again:
+       if (!bytes)
+               goto out_lock;
+
        info = tree_search_offset(ctl, offset, 0, 0);
        if (!info) {
                /*
@@ -1905,88 +1904,48 @@ again:
                }
        }
 
-       if (info->bytes < bytes && rb_next(&info->offset_index)) {
-               u64 end;
-               next_info = rb_entry(rb_next(&info->offset_index),
-                                            struct btrfs_free_space,
-                                            offset_index);
-
-               if (next_info->bitmap)
-                       end = next_info->offset +
-                             BITS_PER_BITMAP * ctl->unit - 1;
-               else
-                       end = next_info->offset + next_info->bytes;
-
-               if (next_info->bytes < bytes ||
-                   next_info->offset > offset || offset > end) {
-                       printk(KERN_CRIT "Found free space at %llu, size %llu,"
-                             " trying to use %llu\n",
-                             (unsigned long long)info->offset,
-                             (unsigned long long)info->bytes,
-                             (unsigned long long)bytes);
-                       WARN_ON(1);
-                       ret = -EINVAL;
-                       goto out_lock;
-               }
-
-               info = next_info;
-       }
-
-       if (info->bytes == bytes) {
+       if (!info->bitmap) {
                unlink_free_space(ctl, info);
-               if (info->bitmap) {
-                       kfree(info->bitmap);
-                       ctl->total_bitmaps--;
-               }
-               kmem_cache_free(btrfs_free_space_cachep, info);
-               ret = 0;
-               goto out_lock;
-       }
-
-       if (!info->bitmap && info->offset == offset) {
-               unlink_free_space(ctl, info);
-               info->offset += bytes;
-               info->bytes -= bytes;
-               ret = link_free_space(ctl, info);
-               WARN_ON(ret);
-               goto out_lock;
-       }
+               if (offset == info->offset) {
+                       u64 to_free = min(bytes, info->bytes);
+
+                       info->bytes -= to_free;
+                       info->offset += to_free;
+                       if (info->bytes) {
+                               ret = link_free_space(ctl, info);
+                               WARN_ON(ret);
+                       } else {
+                               kmem_cache_free(btrfs_free_space_cachep, info);
+                       }
 
-       if (!info->bitmap && info->offset <= offset &&
-           info->offset + info->bytes >= offset + bytes) {
-               u64 old_start = info->offset;
-               /*
-                * we're freeing space in the middle of the info,
-                * this can happen during tree log replay
-                *
-                * first unlink the old info and then
-                * insert it again after the hole we're creating
-                */
-               unlink_free_space(ctl, info);
-               if (offset + bytes < info->offset + info->bytes) {
-                       u64 old_end = info->offset + info->bytes;
+                       offset += to_free;
+                       bytes -= to_free;
+                       goto again;
+               } else {
+                       u64 old_end = info->bytes + info->offset;
 
-                       info->offset = offset + bytes;
-                       info->bytes = old_end - info->offset;
+                       info->bytes = offset - info->offset;
                        ret = link_free_space(ctl, info);
                        WARN_ON(ret);
                        if (ret)
                                goto out_lock;
-               } else {
-                       /* the hole we're creating ends at the end
-                        * of the info struct, just free the info
-                        */
-                       kmem_cache_free(btrfs_free_space_cachep, info);
-               }
-               spin_unlock(&ctl->tree_lock);
 
-               /* step two, insert a new info struct to cover
-                * anything before the hole
-                */
-               ret = btrfs_add_free_space(block_group, old_start,
-                                          offset - old_start);
-               WARN_ON(ret); /* -ENOMEM */
-               goto out;
+                       /* Not enough bytes in this entry to satisfy us */
+                       if (old_end < offset + bytes) {
+                               bytes -= old_end - offset;
+                               offset = old_end;
+                               goto again;
+                       } else if (old_end == offset + bytes) {
+                               /* all done */
+                               goto out_lock;
+                       }
+                       spin_unlock(&ctl->tree_lock);
+
+                       ret = btrfs_add_free_space(block_group, offset + bytes,
+                                                  old_end - (offset + bytes));
+                       WARN_ON(ret);
+                       goto out;
+               }
        }
 
        ret = remove_from_bitmap(ctl, info, &offset, &bytes);
index a4f0250..a7d1921 100644 (file)
@@ -987,7 +987,7 @@ static noinline void async_cow_start(struct btrfs_work *work)
                            async_cow->start, async_cow->end, async_cow,
                            &num_added);
        if (num_added == 0) {
-               iput(async_cow->inode);
+               btrfs_add_delayed_iput(async_cow->inode);
                async_cow->inode = NULL;
        }
 }
@@ -1023,7 +1023,7 @@ static noinline void async_cow_free(struct btrfs_work *work)
        struct async_cow *async_cow;
        async_cow = container_of(work, struct async_cow, work);
        if (async_cow->inode)
-               iput(async_cow->inode);
+               btrfs_add_delayed_iput(async_cow->inode);
        kfree(async_cow);
 }
 
@@ -3754,7 +3754,7 @@ void btrfs_evict_inode(struct inode *inode)
        btrfs_wait_ordered_range(inode, 0, (u64)-1);
 
        if (root->fs_info->log_root_recovering) {
-               BUG_ON(!test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+               BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
                                 &BTRFS_I(inode)->runtime_flags));
                goto no_delete;
        }
@@ -5876,8 +5876,17 @@ map:
        bh_result->b_size = len;
        bh_result->b_bdev = em->bdev;
        set_buffer_mapped(bh_result);
-       if (create && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
-               set_buffer_new(bh_result);
+       if (create) {
+               if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
+                       set_buffer_new(bh_result);
+
+               /*
+                * Need to update the i_size under the extent lock so buffered
+                * readers will get the updated i_size when we unlock.
+                */
+               if (start + len > i_size_read(inode))
+                       i_size_write(inode, start + len);
+       }
 
        free_extent_map(em);
 
@@ -6360,12 +6369,48 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
                 */
                ordered = btrfs_lookup_ordered_range(inode, lockstart,
                                                     lockend - lockstart + 1);
-               if (!ordered)
+
+               /*
+                * We need to make sure there are no buffered pages in this
+                * range either, we could have raced between the invalidate in
+                * generic_file_direct_write and locking the extent.  The
+                * invalidate needs to happen so that reads after a write do not
+                * get stale data.
+                */
+               if (!ordered && (!writing ||
+                   !test_range_bit(&BTRFS_I(inode)->io_tree,
+                                   lockstart, lockend, EXTENT_UPTODATE, 0,
+                                   cached_state)))
                        break;
+
                unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
                                     &cached_state, GFP_NOFS);
-               btrfs_start_ordered_extent(inode, ordered, 1);
-               btrfs_put_ordered_extent(ordered);
+
+               if (ordered) {
+                       btrfs_start_ordered_extent(inode, ordered, 1);
+                       btrfs_put_ordered_extent(ordered);
+               } else {
+                       /* Screw you mmap */
+                       ret = filemap_write_and_wait_range(file->f_mapping,
+                                                          lockstart,
+                                                          lockend);
+                       if (ret)
+                               goto out;
+
+                       /*
+                        * If we found a page that couldn't be invalidated just
+                        * fall back to buffered.
+                        */
+                       ret = invalidate_inode_pages2_range(file->f_mapping,
+                                       lockstart >> PAGE_CACHE_SHIFT,
+                                       lockend >> PAGE_CACHE_SHIFT);
+                       if (ret) {
+                               if (ret == -EBUSY)
+                                       ret = 0;
+                               goto out;
+                       }
+               }
+
                cond_resched();
        }
 
index 497c530..e440aa6 100644 (file)
@@ -339,7 +339,7 @@ struct btrfs_ioctl_get_dev_stats {
 #define BTRFS_IOC_WAIT_SYNC  _IOW(BTRFS_IOCTL_MAGIC, 22, __u64)
 #define BTRFS_IOC_SNAP_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 23, \
                                   struct btrfs_ioctl_vol_args_v2)
-#define BTRFS_IOC_SUBVOL_GETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 25, __u64)
+#define BTRFS_IOC_SUBVOL_GETFLAGS _IOR(BTRFS_IOCTL_MAGIC, 25, __u64)
 #define BTRFS_IOC_SUBVOL_SETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 26, __u64)
 #define BTRFS_IOC_SCRUB _IOWR(BTRFS_IOCTL_MAGIC, 27, \
                              struct btrfs_ioctl_scrub_args)
index 0eb9a4d..e239915 100644 (file)
@@ -1187,6 +1187,10 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                if (ret)
                        goto restore;
 
+               ret = btrfs_resume_balance_async(fs_info);
+               if (ret)
+                       goto restore;
+
                sb->s_flags &= ~MS_RDONLY;
        }
 
index 2017d0f..8abeae4 100644 (file)
@@ -690,6 +690,8 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
        kfree(name);
 
        iput(inode);
+
+       btrfs_run_delayed_items(trans, root);
        return ret;
 }
 
@@ -895,6 +897,7 @@ again:
                                ret = btrfs_unlink_inode(trans, root, dir,
                                                         inode, victim_name,
                                                         victim_name_len);
+                               btrfs_run_delayed_items(trans, root);
                        }
                        kfree(victim_name);
                        ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
@@ -1475,6 +1478,9 @@ again:
                        ret = btrfs_unlink_inode(trans, root, dir, inode,
                                                 name, name_len);
                        BUG_ON(ret);
+
+                       btrfs_run_delayed_items(trans, root);
+
                        kfree(name);
                        iput(inode);
 
index 8a3d259..ecaad40 100644 (file)
@@ -2845,31 +2845,48 @@ out:
 
 static int balance_kthread(void *data)
 {
-       struct btrfs_balance_control *bctl =
-                       (struct btrfs_balance_control *)data;
-       struct btrfs_fs_info *fs_info = bctl->fs_info;
+       struct btrfs_fs_info *fs_info = data;
        int ret = 0;
 
        mutex_lock(&fs_info->volume_mutex);
        mutex_lock(&fs_info->balance_mutex);
 
-       set_balance_control(bctl);
-
-       if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
-               printk(KERN_INFO "btrfs: force skipping balance\n");
-       } else {
+       if (fs_info->balance_ctl) {
                printk(KERN_INFO "btrfs: continuing balance\n");
-               ret = btrfs_balance(bctl, NULL);
+               ret = btrfs_balance(fs_info->balance_ctl, NULL);
        }
 
        mutex_unlock(&fs_info->balance_mutex);
        mutex_unlock(&fs_info->volume_mutex);
+
        return ret;
 }
 
-int btrfs_recover_balance(struct btrfs_root *tree_root)
+int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
 {
        struct task_struct *tsk;
+
+       spin_lock(&fs_info->balance_lock);
+       if (!fs_info->balance_ctl) {
+               spin_unlock(&fs_info->balance_lock);
+               return 0;
+       }
+       spin_unlock(&fs_info->balance_lock);
+
+       if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
+               printk(KERN_INFO "btrfs: force skipping balance\n");
+               return 0;
+       }
+
+       tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
+       if (IS_ERR(tsk))
+               return PTR_ERR(tsk);
+
+       return 0;
+}
+
+int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
+{
        struct btrfs_balance_control *bctl;
        struct btrfs_balance_item *item;
        struct btrfs_disk_balance_args disk_bargs;
@@ -2882,29 +2899,30 @@ int btrfs_recover_balance(struct btrfs_root *tree_root)
        if (!path)
                return -ENOMEM;
 
-       bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
-       if (!bctl) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
        key.objectid = BTRFS_BALANCE_OBJECTID;
        key.type = BTRFS_BALANCE_ITEM_KEY;
        key.offset = 0;
 
-       ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
+       ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
        if (ret < 0)
-               goto out_bctl;
+               goto out;
        if (ret > 0) { /* ret = -ENOENT; */
                ret = 0;
-               goto out_bctl;
+               goto out;
+       }
+
+       bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
+       if (!bctl) {
+               ret = -ENOMEM;
+               goto out;
        }
 
        leaf = path->nodes[0];
        item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
 
-       bctl->fs_info = tree_root->fs_info;
-       bctl->flags = btrfs_balance_flags(leaf, item) | BTRFS_BALANCE_RESUME;
+       bctl->fs_info = fs_info;
+       bctl->flags = btrfs_balance_flags(leaf, item);
+       bctl->flags |= BTRFS_BALANCE_RESUME;
 
        btrfs_balance_data(leaf, item, &disk_bargs);
        btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
@@ -2913,14 +2931,13 @@ int btrfs_recover_balance(struct btrfs_root *tree_root)
        btrfs_balance_sys(leaf, item, &disk_bargs);
        btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
 
-       tsk = kthread_run(balance_kthread, bctl, "btrfs-balance");
-       if (IS_ERR(tsk))
-               ret = PTR_ERR(tsk);
-       else
-               goto out;
+       mutex_lock(&fs_info->volume_mutex);
+       mutex_lock(&fs_info->balance_mutex);
 
-out_bctl:
-       kfree(bctl);
+       set_balance_control(bctl);
+
+       mutex_unlock(&fs_info->balance_mutex);
+       mutex_unlock(&fs_info->volume_mutex);
 out:
        btrfs_free_path(path);
        return ret;
@@ -4061,16 +4078,18 @@ static void btrfs_end_bio(struct bio *bio, int err)
 
                        BUG_ON(stripe_index >= bbio->num_stripes);
                        dev = bbio->stripes[stripe_index].dev;
-                       if (bio->bi_rw & WRITE)
-                               btrfs_dev_stat_inc(dev,
-                                                  BTRFS_DEV_STAT_WRITE_ERRS);
-                       else
-                               btrfs_dev_stat_inc(dev,
-                                                  BTRFS_DEV_STAT_READ_ERRS);
-                       if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
-                               btrfs_dev_stat_inc(dev,
-                                                  BTRFS_DEV_STAT_FLUSH_ERRS);
-                       btrfs_dev_stat_print_on_error(dev);
+                       if (dev->bdev) {
+                               if (bio->bi_rw & WRITE)
+                                       btrfs_dev_stat_inc(dev,
+                                               BTRFS_DEV_STAT_WRITE_ERRS);
+                               else
+                                       btrfs_dev_stat_inc(dev,
+                                               BTRFS_DEV_STAT_READ_ERRS);
+                               if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
+                                       btrfs_dev_stat_inc(dev,
+                                               BTRFS_DEV_STAT_FLUSH_ERRS);
+                               btrfs_dev_stat_print_on_error(dev);
+                       }
                }
        }
 
index 74366f2..95f6637 100644 (file)
@@ -281,7 +281,8 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
 int btrfs_init_new_device(struct btrfs_root *root, char *path);
 int btrfs_balance(struct btrfs_balance_control *bctl,
                  struct btrfs_ioctl_balance_args *bargs);
-int btrfs_recover_balance(struct btrfs_root *tree_root);
+int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info);
+int btrfs_recover_balance(struct btrfs_fs_info *fs_info);
 int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
index 838a9cf..c7062c8 100644 (file)
@@ -1036,6 +1036,9 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
 static struct buffer_head *
 __getblk_slow(struct block_device *bdev, sector_t block, int size)
 {
+       int ret;
+       struct buffer_head *bh;
+
        /* Size must be multiple of hard sectorsize */
        if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
                        (size < 512 || size > PAGE_SIZE))) {
@@ -1048,20 +1051,21 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
                return NULL;
        }
 
-       for (;;) {
-               struct buffer_head * bh;
-               int ret;
+retry:
+       bh = __find_get_block(bdev, block, size);
+       if (bh)
+               return bh;
 
+       ret = grow_buffers(bdev, block, size);
+       if (ret == 0) {
+               free_more_memory();
+               goto retry;
+       } else if (ret > 0) {
                bh = __find_get_block(bdev, block, size);
                if (bh)
                        return bh;
-
-               ret = grow_buffers(bdev, block, size);
-               if (ret < 0)
-                       return NULL;
-               if (ret == 0)
-                       free_more_memory();
        }
+       return NULL;
 }
 
 /*
index 173b1d2..8b67304 100644 (file)
        (CONGESTION_ON_THRESH(congestion_kb) -                          \
         (CONGESTION_ON_THRESH(congestion_kb) >> 2))
 
-
+static inline struct ceph_snap_context *page_snap_context(struct page *page)
+{
+       if (PagePrivate(page))
+               return (void *)page->private;
+       return NULL;
+}
 
 /*
  * Dirty a page.  Optimistically adjust accounting, on the assumption
@@ -142,10 +147,9 @@ static void ceph_invalidatepage(struct page *page, unsigned long offset)
 {
        struct inode *inode;
        struct ceph_inode_info *ci;
-       struct ceph_snap_context *snapc = (void *)page->private;
+       struct ceph_snap_context *snapc = page_snap_context(page);
 
        BUG_ON(!PageLocked(page));
-       BUG_ON(!page->private);
        BUG_ON(!PagePrivate(page));
        BUG_ON(!page->mapping);
 
@@ -182,7 +186,6 @@ static int ceph_releasepage(struct page *page, gfp_t g)
        struct inode *inode = page->mapping ? page->mapping->host : NULL;
        dout("%p releasepage %p idx %lu\n", inode, page, page->index);
        WARN_ON(PageDirty(page));
-       WARN_ON(page->private);
        WARN_ON(PagePrivate(page));
        return 0;
 }
@@ -443,7 +446,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
        osdc = &fsc->client->osdc;
 
        /* verify this is a writeable snap context */
-       snapc = (void *)page->private;
+       snapc = page_snap_context(page);
        if (snapc == NULL) {
                dout("writepage %p page %p not dirty?\n", inode, page);
                goto out;
@@ -451,7 +454,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
        oldest = get_oldest_context(inode, &snap_size);
        if (snapc->seq > oldest->seq) {
                dout("writepage %p page %p snapc %p not writeable - noop\n",
-                    inode, page, (void *)page->private);
+                    inode, page, snapc);
                /* we should only noop if called by kswapd */
                WARN_ON((current->flags & PF_MEMALLOC) == 0);
                ceph_put_snap_context(oldest);
@@ -591,7 +594,7 @@ static void writepages_finish(struct ceph_osd_request *req,
                        clear_bdi_congested(&fsc->backing_dev_info,
                                            BLK_RW_ASYNC);
 
-               ceph_put_snap_context((void *)page->private);
+               ceph_put_snap_context(page_snap_context(page));
                page->private = 0;
                ClearPagePrivate(page);
                dout("unlocking %d %p\n", i, page);
@@ -795,7 +798,7 @@ get_more_pages:
                        }
 
                        /* only if matching snap context */
-                       pgsnapc = (void *)page->private;
+                       pgsnapc = page_snap_context(page);
                        if (pgsnapc->seq > snapc->seq) {
                                dout("page snapc %p %lld > oldest %p %lld\n",
                                     pgsnapc, pgsnapc->seq, snapc, snapc->seq);
@@ -984,7 +987,7 @@ retry_locked:
        BUG_ON(!ci->i_snap_realm);
        down_read(&mdsc->snap_rwsem);
        BUG_ON(!ci->i_snap_realm->cached_context);
-       snapc = (void *)page->private;
+       snapc = page_snap_context(page);
        if (snapc && snapc != ci->i_head_snapc) {
                /*
                 * this page is already dirty in another (older) snap
index 5b40073..4ee522b 100644 (file)
@@ -86,7 +86,31 @@ static struct {
 #endif /* CONFIG_CIFS_WEAK_PW_HASH */
 #endif /* CIFS_POSIX */
 
-/* Forward declarations */
+#ifdef CONFIG_HIGHMEM
+/*
+ * On arches that have high memory, kmap address space is limited. By
+ * serializing the kmap operations on those arches, we ensure that we don't
+ * end up with a bunch of threads in writeback with partially mapped page
+ * arrays, stuck waiting for kmap to come back. That situation prevents
+ * progress and can deadlock.
+ */
+static DEFINE_MUTEX(cifs_kmap_mutex);
+
+static inline void
+cifs_kmap_lock(void)
+{
+       mutex_lock(&cifs_kmap_mutex);
+}
+
+static inline void
+cifs_kmap_unlock(void)
+{
+       mutex_unlock(&cifs_kmap_mutex);
+}
+#else /* !CONFIG_HIGHMEM */
+#define cifs_kmap_lock() do { ; } while(0)
+#define cifs_kmap_unlock() do { ; } while(0)
+#endif /* CONFIG_HIGHMEM */
 
 /* Mark as invalid, all open files on tree connections since they
    were closed when session to server was lost */
@@ -1503,7 +1527,9 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
        }
 
        /* marshal up the page array */
+       cifs_kmap_lock();
        len = rdata->marshal_iov(rdata, data_len);
+       cifs_kmap_unlock();
        data_len -= len;
 
        /* issue the read if we have any iovecs left to fill */
@@ -2069,7 +2095,9 @@ cifs_async_writev(struct cifs_writedata *wdata)
         * and set the iov_len properly for each one. It may also set
         * wdata->bytes too.
         */
+       cifs_kmap_lock();
        wdata->marshal_iov(iov, wdata);
+       cifs_kmap_unlock();
 
        cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes);
 
index 78db68a..94b7788 100644 (file)
@@ -1653,24 +1653,26 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                         * If yes, we have encountered a double deliminator
                         * reset the NULL character to the deliminator
                         */
-                       if (tmp_end < end && tmp_end[1] == delim)
+                       if (tmp_end < end && tmp_end[1] == delim) {
                                tmp_end[0] = delim;
 
-                       /* Keep iterating until we get to a single deliminator
-                        * OR the end
-                        */
-                       while ((tmp_end = strchr(tmp_end, delim)) != NULL &&
-                              (tmp_end[1] == delim)) {
-                               tmp_end = (char *) &tmp_end[2];
-                       }
+                               /* Keep iterating until we get to a single
+                                * deliminator OR the end
+                                */
+                               while ((tmp_end = strchr(tmp_end, delim))
+                                       != NULL && (tmp_end[1] == delim)) {
+                                               tmp_end = (char *) &tmp_end[2];
+                               }
 
-                       /* Reset var options to point to next element */
-                       if (tmp_end) {
-                               tmp_end[0] = '\0';
-                               options = (char *) &tmp_end[1];
-                       } else
-                               /* Reached the end of the mount option string */
-                               options = end;
+                               /* Reset var options to point to next element */
+                               if (tmp_end) {
+                                       tmp_end[0] = '\0';
+                                       options = (char *) &tmp_end[1];
+                               } else
+                                       /* Reached the end of the mount option
+                                        * string */
+                                       options = end;
+                       }
 
                        /* Now build new password string */
                        temp_len = strlen(value);
@@ -3443,6 +3445,18 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
 #define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
 #define CIFS_DEFAULT_NON_POSIX_WSIZE (65536)
 
+/*
+ * On hosts with high memory, we can't currently support wsize/rsize that are
+ * larger than we can kmap at once. Cap the rsize/wsize at
+ * LAST_PKMAP * PAGE_SIZE. We'll never be able to fill a read or write request
+ * larger than that anyway.
+ */
+#ifdef CONFIG_HIGHMEM
+#define CIFS_KMAP_SIZE_LIMIT   (LAST_PKMAP * PAGE_CACHE_SIZE)
+#else /* CONFIG_HIGHMEM */
+#define CIFS_KMAP_SIZE_LIMIT   (1<<24)
+#endif /* CONFIG_HIGHMEM */
+
 static unsigned int
 cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
 {
@@ -3473,6 +3487,9 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
                wsize = min_t(unsigned int, wsize,
                                server->maxBuf - sizeof(WRITE_REQ) + 4);
 
+       /* limit to the amount that we can kmap at once */
+       wsize = min_t(unsigned int, wsize, CIFS_KMAP_SIZE_LIMIT);
+
        /* hard limit of CIFS_MAX_WSIZE */
        wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE);
 
@@ -3493,18 +3510,15 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
         * MS-CIFS indicates that servers are only limited by the client's
         * bufsize for reads, testing against win98se shows that it throws
         * INVALID_PARAMETER errors if you try to request too large a read.
+        * OS/2 just sends back short reads.
         *
-        * If the server advertises a MaxBufferSize of less than one page,
-        * assume that it also can't satisfy reads larger than that either.
-        *
-        * FIXME: Is there a better heuristic for this?
+        * If the server doesn't advertise CAP_LARGE_READ_X, then assume that
+        * it can't handle a read request larger than its MaxBufferSize either.
         */
        if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_READ_CAP))
                defsize = CIFS_DEFAULT_IOSIZE;
        else if (server->capabilities & CAP_LARGE_READ_X)
                defsize = CIFS_DEFAULT_NON_POSIX_RSIZE;
-       else if (server->maxBuf >= PAGE_CACHE_SIZE)
-               defsize = CIFSMaxBufSize;
        else
                defsize = server->maxBuf - sizeof(READ_RSP);
 
@@ -3517,6 +3531,9 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
        if (!(server->capabilities & CAP_LARGE_READ_X))
                rsize = min_t(unsigned int, CIFSMaxBufSize, rsize);
 
+       /* limit to the amount that we can kmap at once */
+       rsize = min_t(unsigned int, rsize, CIFS_KMAP_SIZE_LIMIT);
+
        /* hard limit of CIFS_MAX_RSIZE */
        rsize = min_t(unsigned int, rsize, CIFS_MAX_RSIZE);
 
index 0a8224d..a4217f0 100644 (file)
@@ -86,9 +86,12 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
 
        dentry = d_lookup(parent, name);
        if (dentry) {
-               /* FIXME: check for inode number changes? */
-               if (dentry->d_inode != NULL)
+               inode = dentry->d_inode;
+               /* update inode in place if i_ino didn't change */
+               if (inode && CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
+                       cifs_fattr_to_inode(inode, fattr);
                        return dentry;
+               }
                d_drop(dentry);
                dput(dentry);
        }
index 3097ee5..f25d4ea 100644 (file)
@@ -365,16 +365,14 @@ cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov,
        if (mid == NULL)
                return -ENOMEM;
 
-       /* put it on the pending_mid_q */
-       spin_lock(&GlobalMid_Lock);
-       list_add_tail(&mid->qhead, &server->pending_mid_q);
-       spin_unlock(&GlobalMid_Lock);
-
        rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
-       if (rc)
-               delete_mid(mid);
+       if (rc) {
+               DeleteMidQEntry(mid);
+               return rc;
+       }
+
        *ret_mid = mid;
-       return rc;
+       return 0;
 }
 
 /*
@@ -407,17 +405,21 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
        mid->callback_data = cbdata;
        mid->mid_state = MID_REQUEST_SUBMITTED;
 
+       /* put it on the pending_mid_q */
+       spin_lock(&GlobalMid_Lock);
+       list_add_tail(&mid->qhead, &server->pending_mid_q);
+       spin_unlock(&GlobalMid_Lock);
+
+
        cifs_in_send_inc(server);
        rc = smb_sendv(server, iov, nvec);
        cifs_in_send_dec(server);
        cifs_save_when_sent(mid);
        mutex_unlock(&server->srv_mutex);
 
-       if (rc)
-               goto out_err;
+       if (rc == 0)
+               return 0;
 
-       return rc;
-out_err:
        delete_mid(mid);
        add_credits(server, 1);
        wake_up(&server->request_q);
index 69f994a..0dbe58a 100644 (file)
@@ -149,7 +149,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
        (*lower_file) = dentry_open(lower_dentry, lower_mnt, flags, cred);
        if (!IS_ERR(*lower_file))
                goto out;
-       if (flags & O_RDONLY) {
+       if ((flags & O_ACCMODE) == O_RDONLY) {
                rc = PTR_ERR((*lower_file));
                goto out;
        }
index 3a06f40..c0038f6 100644 (file)
@@ -49,7 +49,10 @@ ecryptfs_miscdev_poll(struct file *file, poll_table *pt)
        mutex_lock(&ecryptfs_daemon_hash_mux);
        /* TODO: Just use file->private_data? */
        rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
-       BUG_ON(rc || !daemon);
+       if (rc || !daemon) {
+               mutex_unlock(&ecryptfs_daemon_hash_mux);
+               return -EINVAL;
+       }
        mutex_lock(&daemon->mux);
        mutex_unlock(&ecryptfs_daemon_hash_mux);
        if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
@@ -122,6 +125,7 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
                goto out_unlock_daemon;
        }
        daemon->flags |= ECRYPTFS_DAEMON_MISCDEV_OPEN;
+       file->private_data = daemon;
        atomic_inc(&ecryptfs_num_miscdev_opens);
 out_unlock_daemon:
        mutex_unlock(&daemon->mux);
@@ -152,9 +156,9 @@ ecryptfs_miscdev_release(struct inode *inode, struct file *file)
 
        mutex_lock(&ecryptfs_daemon_hash_mux);
        rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
-       BUG_ON(rc || !daemon);
+       if (rc || !daemon)
+               daemon = file->private_data;
        mutex_lock(&daemon->mux);
-       BUG_ON(daemon->pid != task_pid(current));
        BUG_ON(!(daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN));
        daemon->flags &= ~ECRYPTFS_DAEMON_MISCDEV_OPEN;
        atomic_dec(&ecryptfs_num_miscdev_opens);
@@ -191,31 +195,32 @@ int ecryptfs_send_miscdev(char *data, size_t data_size,
                          struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type,
                          u16 msg_flags, struct ecryptfs_daemon *daemon)
 {
-       int rc = 0;
+       struct ecryptfs_message *msg;
 
-       mutex_lock(&msg_ctx->mux);
-       msg_ctx->msg = kmalloc((sizeof(*msg_ctx->msg) + data_size),
-                              GFP_KERNEL);
-       if (!msg_ctx->msg) {
-               rc = -ENOMEM;
+       msg = kmalloc((sizeof(*msg) + data_size), GFP_KERNEL);
+       if (!msg) {
                printk(KERN_ERR "%s: Out of memory whilst attempting "
                       "to kmalloc(%zd, GFP_KERNEL)\n", __func__,
-                      (sizeof(*msg_ctx->msg) + data_size));
-               goto out_unlock;
+                      (sizeof(*msg) + data_size));
+               return -ENOMEM;
        }
+
+       mutex_lock(&msg_ctx->mux);
+       msg_ctx->msg = msg;
        msg_ctx->msg->index = msg_ctx->index;
        msg_ctx->msg->data_len = data_size;
        msg_ctx->type = msg_type;
        memcpy(msg_ctx->msg->data, data, data_size);
        msg_ctx->msg_size = (sizeof(*msg_ctx->msg) + data_size);
-       mutex_lock(&daemon->mux);
        list_add_tail(&msg_ctx->daemon_out_list, &daemon->msg_ctx_out_queue);
+       mutex_unlock(&msg_ctx->mux);
+
+       mutex_lock(&daemon->mux);
        daemon->num_queued_msg_ctx++;
        wake_up_interruptible(&daemon->wait);
        mutex_unlock(&daemon->mux);
-out_unlock:
-       mutex_unlock(&msg_ctx->mux);
-       return rc;
+
+       return 0;
 }
 
 /*
@@ -269,8 +274,16 @@ ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count,
        mutex_lock(&ecryptfs_daemon_hash_mux);
        /* TODO: Just use file->private_data? */
        rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
-       BUG_ON(rc || !daemon);
+       if (rc || !daemon) {
+               mutex_unlock(&ecryptfs_daemon_hash_mux);
+               return -EINVAL;
+       }
        mutex_lock(&daemon->mux);
+       if (task_pid(current) != daemon->pid) {
+               mutex_unlock(&daemon->mux);
+               mutex_unlock(&ecryptfs_daemon_hash_mux);
+               return -EPERM;
+       }
        if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
                rc = 0;
                mutex_unlock(&ecryptfs_daemon_hash_mux);
@@ -307,9 +320,6 @@ check_list:
                 * message from the queue; try again */
                goto check_list;
        }
-       BUG_ON(euid != daemon->euid);
-       BUG_ON(current_user_ns() != daemon->user_ns);
-       BUG_ON(task_pid(current) != daemon->pid);
        msg_ctx = list_first_entry(&daemon->msg_ctx_out_queue,
                                   struct ecryptfs_msg_ctx, daemon_out_list);
        BUG_ON(!msg_ctx);
index 74598f6..1c8b556 100644 (file)
@@ -1710,7 +1710,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
                goto error_tgt_fput;
 
        /* Check if EPOLLWAKEUP is allowed */
-       if ((epds.events & EPOLLWAKEUP) && !capable(CAP_EPOLLWAKEUP))
+       if ((epds.events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))
                epds.events &= ~EPOLLWAKEUP;
 
        /*
index a79786a..da27b91 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -819,10 +819,10 @@ static int exec_mmap(struct mm_struct *mm)
        /* Notify parent that we're no longer interested in the old VM */
        tsk = current;
        old_mm = current->mm;
-       sync_mm_rss(old_mm);
        mm_release(tsk, old_mm);
 
        if (old_mm) {
+               sync_mm_rss(old_mm);
                /*
                 * Make sure that if there is a core dump in progress
                 * for the old mm, we get out and die instead of going
index 49cf230..24a49d4 100644 (file)
@@ -735,13 +735,7 @@ static int _prepare_for_striping(struct ore_io_state *ios)
 out:
        ios->numdevs = devs_in_group;
        ios->pages_consumed = cur_pg;
-       if (unlikely(ret)) {
-               if (length == ios->length)
-                       return ret;
-               else
-                       ios->length -= length;
-       }
-       return 0;
+       return ret;
 }
 
 int ore_create(struct ore_io_state *ios)
index d222c77..5f376d1 100644 (file)
@@ -144,26 +144,26 @@ static void _sp2d_reset(struct __stripe_pages_2d *sp2d,
 {
        unsigned data_devs = sp2d->data_devs;
        unsigned group_width = data_devs + sp2d->parity;
-       unsigned p;
+       int p, c;
 
        if (!sp2d->needed)
                return;
 
-       for (p = 0; p < sp2d->pages_in_unit; p++) {
-               struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
-
-               if (_1ps->write_count < group_width) {
-                       unsigned c;
+       for (c = data_devs - 1; c >= 0; --c)
+               for (p = sp2d->pages_in_unit - 1; p >= 0; --p) {
+                       struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
 
-                       for (c = 0; c < data_devs; c++)
-                               if (_1ps->page_is_read[c]) {
-                                       struct page *page = _1ps->pages[c];
+                       if (_1ps->page_is_read[c]) {
+                               struct page *page = _1ps->pages[c];
 
-                                       r4w->put_page(priv, page);
-                                       _1ps->page_is_read[c] = false;
-                               }
+                               r4w->put_page(priv, page);
+                               _1ps->page_is_read[c] = false;
+                       }
                }
 
+       for (p = 0; p < sp2d->pages_in_unit; p++) {
+               struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
+
                memset(_1ps->pages, 0, group_width * sizeof(*_1ps->pages));
                _1ps->write_count = 0;
                _1ps->tx = NULL;
@@ -461,16 +461,12 @@ static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret)
  * ios->sp2d[p][*], xor is calculated the same way. These pages are
  * allocated/freed and don't go through cache
  */
-static int _read_4_write(struct ore_io_state *ios)
+static int _read_4_write_first_stripe(struct ore_io_state *ios)
 {
-       struct ore_io_state *ios_read;
        struct ore_striping_info read_si;
        struct __stripe_pages_2d *sp2d = ios->sp2d;
        u64 offset = ios->si.first_stripe_start;
-       u64 last_stripe_end;
-       unsigned bytes_in_stripe = ios->si.bytes_in_stripe;
-       unsigned i, c, p, min_p = sp2d->pages_in_unit, max_p = -1;
-       int ret;
+       unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
 
        if (offset == ios->offset) /* Go to start collect $200 */
                goto read_last_stripe;
@@ -478,6 +474,9 @@ static int _read_4_write(struct ore_io_state *ios)
        min_p = _sp2d_min_pg(sp2d);
        max_p = _sp2d_max_pg(sp2d);
 
+       ORE_DBGMSG("stripe_start=0x%llx ios->offset=0x%llx min_p=%d max_p=%d\n",
+                  offset, ios->offset, min_p, max_p);
+
        for (c = 0; ; c++) {
                ore_calc_stripe_info(ios->layout, offset, 0, &read_si);
                read_si.obj_offset += min_p * PAGE_SIZE;
@@ -512,6 +511,18 @@ static int _read_4_write(struct ore_io_state *ios)
        }
 
 read_last_stripe:
+       return 0;
+}
+
+static int _read_4_write_last_stripe(struct ore_io_state *ios)
+{
+       struct ore_striping_info read_si;
+       struct __stripe_pages_2d *sp2d = ios->sp2d;
+       u64 offset;
+       u64 last_stripe_end;
+       unsigned bytes_in_stripe = ios->si.bytes_in_stripe;
+       unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
+
        offset = ios->offset + ios->length;
        if (offset % PAGE_SIZE)
                _add_to_r4w_last_page(ios, &offset);
@@ -527,15 +538,15 @@ read_last_stripe:
        c = _dev_order(ios->layout->group_width * ios->layout->mirrors_p1,
                       ios->layout->mirrors_p1, read_si.par_dev, read_si.dev);
 
-       BUG_ON(ios->si.first_stripe_start + bytes_in_stripe != last_stripe_end);
-       /* unaligned IO must be within a single stripe */
-
        if (min_p == sp2d->pages_in_unit) {
                /* Didn't do it yet */
                min_p = _sp2d_min_pg(sp2d);
                max_p = _sp2d_max_pg(sp2d);
        }
 
+       ORE_DBGMSG("offset=0x%llx stripe_end=0x%llx min_p=%d max_p=%d\n",
+                  offset, last_stripe_end, min_p, max_p);
+
        while (offset < last_stripe_end) {
                struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
 
@@ -568,6 +579,15 @@ read_last_stripe:
        }
 
 read_it:
+       return 0;
+}
+
+static int _read_4_write_execute(struct ore_io_state *ios)
+{
+       struct ore_io_state *ios_read;
+       unsigned i;
+       int ret;
+
        ios_read = ios->ios_read_4_write;
        if (!ios_read)
                return 0;
@@ -591,6 +611,8 @@ read_it:
        }
 
        _mark_read4write_pages_uptodate(ios_read, ret);
+       ore_put_io_state(ios_read);
+       ios->ios_read_4_write = NULL; /* Might need a reuse at last stripe */
        return 0;
 }
 
@@ -626,8 +648,11 @@ int _ore_add_parity_unit(struct ore_io_state *ios,
                        /* If first stripe, Read in all read4write pages
                         * (if needed) before we calculate the first parity.
                         */
-                       _read_4_write(ios);
+                       _read_4_write_first_stripe(ios);
                }
+               if (!cur_len) /* If last stripe r4w pages of last stripe */
+                       _read_4_write_last_stripe(ios);
+               _read_4_write_execute(ios);
 
                for (i = 0; i < num_pages; i++) {
                        pages[i] = _raid_page_alloc();
@@ -654,34 +679,14 @@ int _ore_add_parity_unit(struct ore_io_state *ios,
 
 int _ore_post_alloc_raid_stuff(struct ore_io_state *ios)
 {
-       struct ore_layout *layout = ios->layout;
-
        if (ios->parity_pages) {
+               struct ore_layout *layout = ios->layout;
                unsigned pages_in_unit = layout->stripe_unit / PAGE_SIZE;
-               unsigned stripe_size = ios->si.bytes_in_stripe;
-               u64 last_stripe, first_stripe;
 
                if (_sp2d_alloc(pages_in_unit, layout->group_width,
                                layout->parity, &ios->sp2d)) {
                        return -ENOMEM;
                }
-
-               /* Round io down to last full strip */
-               first_stripe = div_u64(ios->offset, stripe_size);
-               last_stripe = div_u64(ios->offset + ios->length, stripe_size);
-
-               /* If an IO spans more then a single stripe it must end at
-                * a stripe boundary. The reminder at the end is pushed into the
-                * next IO.
-                */
-               if (last_stripe != first_stripe) {
-                       ios->length = last_stripe * stripe_size - ios->offset;
-
-                       BUG_ON(!ios->length);
-                       ios->nr_pages = (ios->length + PAGE_SIZE - 1) /
-                                       PAGE_SIZE;
-                       ios->si.length = ios->length; /*make it consistent */
-               }
        }
        return 0;
 }
index e34deac..6ec6f9e 100644 (file)
@@ -268,7 +268,6 @@ group_extend_out:
                err = ext4_move_extents(filp, donor_filp, me.orig_start,
                                        me.donor_start, me.len, &me.moved_len);
                mnt_drop_write_file(filp);
-               mnt_drop_write(filp->f_path.mnt);
 
                if (copy_to_user((struct move_extent __user *)arg,
                                 &me, sizeof(me)))
index a3d81eb..0038b32 100644 (file)
@@ -738,22 +738,21 @@ static int
 fat_encode_fh(struct inode *inode, __u32 *fh, int *lenp, struct inode *parent)
 {
        int len = *lenp;
-       u32 ipos_h, ipos_m, ipos_l;
+       struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
+       loff_t i_pos;
 
        if (len < 5) {
                *lenp = 5;
                return 255; /* no room */
        }
 
-       ipos_h = MSDOS_I(inode)->i_pos >> 8;
-       ipos_m = (MSDOS_I(inode)->i_pos & 0xf0) << 24;
-       ipos_l = (MSDOS_I(inode)->i_pos & 0x0f) << 28;
+       i_pos = fat_i_pos_read(sbi, inode);
        *lenp = 5;
        fh[0] = inode->i_ino;
        fh[1] = inode->i_generation;
-       fh[2] = ipos_h;
-       fh[3] = ipos_m | MSDOS_I(inode)->i_logstart;
-       fh[4] = ipos_l;
+       fh[2] = i_pos >> 8;
+       fh[3] = ((i_pos & 0xf0) << 24) | MSDOS_I(inode)->i_logstart;
+       fh[4] = (i_pos & 0x0f) << 28;
        if (parent)
                fh[4] |= MSDOS_I(parent)->i_logstart;
        return 3;
index b1a524d..cf6f434 100644 (file)
--- a/fs/fifo.c
+++ b/fs/fifo.c
@@ -14,7 +14,7 @@
 #include <linux/sched.h>
 #include <linux/pipe_fs_i.h>
 
-static void wait_for_partner(struct inode* inode, unsigned int *cnt)
+static int wait_for_partner(struct inode* inode, unsigned int *cnt)
 {
        int cur = *cnt; 
 
@@ -23,6 +23,7 @@ static void wait_for_partner(struct inode* inode, unsigned int *cnt)
                if (signal_pending(current))
                        break;
        }
+       return cur == *cnt ? -ERESTARTSYS : 0;
 }
 
 static void wake_up_partner(struct inode* inode)
@@ -67,8 +68,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
                                 * seen a writer */
                                filp->f_version = pipe->w_counter;
                        } else {
-                               wait_for_partner(inode, &pipe->w_counter);
-                               if(signal_pending(current))
+                               if (wait_for_partner(inode, &pipe->w_counter))
                                        goto err_rd;
                        }
                }
@@ -90,8 +90,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
                        wake_up_partner(inode);
 
                if (!pipe->readers) {
-                       wait_for_partner(inode, &pipe->r_counter);
-                       if (signal_pending(current))
+                       if (wait_for_partner(inode, &pipe->r_counter))
                                goto err_wr;
                }
                break;
index c640ba5..09addc8 100644 (file)
@@ -31,6 +31,7 @@ static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags)
        struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
        struct hfsplus_vh *vh = sbi->s_vhdr;
        struct hfsplus_vh *bvh = sbi->s_backup_vhdr;
+       u32 cnid = (unsigned long)dentry->d_fsdata;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
@@ -41,8 +42,12 @@ static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags)
        vh->finder_info[0] = bvh->finder_info[0] =
                cpu_to_be32(parent_ino(dentry));
 
-       /* Bootloader */
-       vh->finder_info[1] = bvh->finder_info[1] = cpu_to_be32(inode->i_ino);
+       /*
+        * Bootloader. Just using the inode here breaks in the case of
+        * hard links - the firmware wants the ID of the hard link file,
+        * but the inode points at the indirect inode
+        */
+       vh->finder_info[1] = bvh->finder_info[1] = cpu_to_be32(cnid);
 
        /* Per spec, the OS X system folder - same as finder_info[0] here */
        vh->finder_info[5] = bvh->finder_info[5] =
index 7daf4b8..90effcc 100644 (file)
@@ -56,7 +56,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
        DECLARE_COMPLETION_ONSTACK(wait);
        struct bio *bio;
        int ret = 0;
-       unsigned int io_size;
+       u64 io_size;
        loff_t start;
        int offset;
 
index 814c51d..fce6238 100644 (file)
@@ -1465,7 +1465,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
        case F_WRLCK:
                return generic_add_lease(filp, arg, flp);
        default:
-               BUG();
+               return -EINVAL;
        }
 }
 EXPORT_SYMBOL(generic_setlease);
index 17ba6b9..f005b5b 100644 (file)
@@ -207,7 +207,6 @@ error_0:
 static void nfs4_shutdown_session(struct nfs_client *clp)
 {
        if (nfs4_has_session(clp)) {
-               nfs4_deviceid_purge_client(clp);
                nfs4_destroy_session(clp->cl_session);
                nfs4_destroy_clientid(clp);
        }
index 3168f6e..4825337 100644 (file)
@@ -484,17 +484,22 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
 
        list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
                if (!nfs_pageio_add_request(&desc, req)) {
+                       nfs_list_remove_request(req);
                        nfs_list_add_request(req, &failed);
                        spin_lock(cinfo.lock);
                        dreq->flags = 0;
                        dreq->error = -EIO;
                        spin_unlock(cinfo.lock);
                }
+               nfs_release_request(req);
        }
        nfs_pageio_complete(&desc);
 
-       while (!list_empty(&failed))
+       while (!list_empty(&failed)) {
+               req = nfs_list_entry(failed.next);
+               nfs_list_remove_request(req);
                nfs_unlock_and_release_request(req);
+       }
 
        if (put_dreq(dreq))
                nfs_direct_write_complete(dreq, dreq->inode);
index b5b86a0..864c51e 100644 (file)
@@ -57,6 +57,11 @@ unsigned int nfs_idmap_cache_timeout = 600;
 static const struct cred *id_resolver_cache;
 static struct key_type key_type_id_resolver_legacy;
 
+struct idmap {
+       struct rpc_pipe         *idmap_pipe;
+       struct key_construction *idmap_key_cons;
+       struct mutex            idmap_mutex;
+};
 
 /**
  * nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields
@@ -310,9 +315,11 @@ static ssize_t nfs_idmap_get_key(const char *name, size_t namelen,
                                            name, namelen, type, data,
                                            data_size, NULL);
        if (ret < 0) {
+               mutex_lock(&idmap->idmap_mutex);
                ret = nfs_idmap_request_key(&key_type_id_resolver_legacy,
                                            name, namelen, type, data,
                                            data_size, idmap);
+               mutex_unlock(&idmap->idmap_mutex);
        }
        return ret;
 }
@@ -354,11 +361,6 @@ static int nfs_idmap_lookup_id(const char *name, size_t namelen, const char *typ
 /* idmap classic begins here */
 module_param(nfs_idmap_cache_timeout, int, 0644);
 
-struct idmap {
-       struct rpc_pipe         *idmap_pipe;
-       struct key_construction *idmap_key_cons;
-};
-
 enum {
        Opt_find_uid, Opt_find_gid, Opt_find_user, Opt_find_group, Opt_find_err
 };
@@ -469,6 +471,7 @@ nfs_idmap_new(struct nfs_client *clp)
                return error;
        }
        idmap->idmap_pipe = pipe;
+       mutex_init(&idmap->idmap_mutex);
 
        clp->cl_idmap = idmap;
        return 0;
index e605d69..f729698 100644 (file)
@@ -1530,7 +1530,6 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
        nfsi->delegation_state = 0;
        init_rwsem(&nfsi->rwsem);
        nfsi->layout = NULL;
-       atomic_set(&nfsi->commit_info.rpcs_out, 0);
 #endif
 }
 
@@ -1545,6 +1544,7 @@ static void init_once(void *foo)
        INIT_LIST_HEAD(&nfsi->commit_info.list);
        nfsi->npages = 0;
        nfsi->commit_info.ncommit = 0;
+       atomic_set(&nfsi->commit_info.rpcs_out, 0);
        atomic_set(&nfsi->silly_count, 1);
        INIT_HLIST_HEAD(&nfsi->silly_list);
        init_waitqueue_head(&nfsi->waitqueue);
index b47277b..f50d3e8 100644 (file)
@@ -454,7 +454,10 @@ int objio_read_pagelist(struct nfs_read_data *rdata)
        objios->ios->done = _read_done;
        dprintk("%s: offset=0x%llx length=0x%x\n", __func__,
                rdata->args.offset, rdata->args.count);
-       return ore_read(objios->ios);
+       ret = ore_read(objios->ios);
+       if (unlikely(ret))
+               objio_free_result(&objios->oir);
+       return ret;
 }
 
 /*
@@ -486,8 +489,16 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
        struct nfs_write_data *wdata = objios->oir.rpcdata;
        struct address_space *mapping = wdata->header->inode->i_mapping;
        pgoff_t index = offset / PAGE_SIZE;
-       struct page *page = find_get_page(mapping, index);
+       struct page *page;
+       loff_t i_size = i_size_read(wdata->header->inode);
+
+       if (offset >= i_size) {
+               *uptodate = true;
+               dprintk("%s: g_zero_page index=0x%lx\n", __func__, index);
+               return ZERO_PAGE(0);
+       }
 
+       page = find_get_page(mapping, index);
        if (!page) {
                page = find_or_create_page(mapping, index, GFP_NOFS);
                if (unlikely(!page)) {
@@ -507,8 +518,10 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
 
 static void __r4w_put_page(void *priv, struct page *page)
 {
-       dprintk("%s: index=0x%lx\n", __func__, page->index);
-       page_cache_release(page);
+       dprintk("%s: index=0x%lx\n", __func__,
+               (page == ZERO_PAGE(0)) ? -1UL : page->index);
+       if (ZERO_PAGE(0) != page)
+               page_cache_release(page);
        return;
 }
 
@@ -539,8 +552,10 @@ int objio_write_pagelist(struct nfs_write_data *wdata, int how)
        dprintk("%s: offset=0x%llx length=0x%x\n", __func__,
                wdata->args.offset, wdata->args.count);
        ret = ore_write(objios->ios);
-       if (unlikely(ret))
+       if (unlikely(ret)) {
+               objio_free_result(&objios->oir);
                return ret;
+       }
 
        if (objios->sync)
                _write_done(objios->ios, objios);
index b8323aa..bbc49ca 100644 (file)
@@ -70,6 +70,10 @@ find_pnfs_driver(u32 id)
 
        spin_lock(&pnfs_spinlock);
        local = find_pnfs_driver_locked(id);
+       if (local != NULL && !try_module_get(local->owner)) {
+               dprintk("%s: Could not grab reference on module\n", __func__);
+               local = NULL;
+       }
        spin_unlock(&pnfs_spinlock);
        return local;
 }
@@ -80,6 +84,9 @@ unset_pnfs_layoutdriver(struct nfs_server *nfss)
        if (nfss->pnfs_curr_ld) {
                if (nfss->pnfs_curr_ld->clear_layoutdriver)
                        nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
+               /* Decrement the MDS count. Purge the deviceid cache if zero */
+               if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
+                       nfs4_deviceid_purge_client(nfss->nfs_client);
                module_put(nfss->pnfs_curr_ld->owner);
        }
        nfss->pnfs_curr_ld = NULL;
@@ -115,10 +122,6 @@ set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
                        goto out_no_driver;
                }
        }
-       if (!try_module_get(ld_type->owner)) {
-               dprintk("%s: Could not grab reference on module\n", __func__);
-               goto out_no_driver;
-       }
        server->pnfs_curr_ld = ld_type;
        if (ld_type->set_layoutdriver
            && ld_type->set_layoutdriver(server, mntfh)) {
@@ -127,6 +130,8 @@ set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
                module_put(ld_type->owner);
                goto out_no_driver;
        }
+       /* Bump the MDS count */
+       atomic_inc(&server->nfs_client->cl_mds_count);
 
        dprintk("%s: pNFS module for %u set\n", __func__, id);
        return;
index 906f09c..0622819 100644 (file)
@@ -2860,6 +2860,8 @@ static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
 
        dfprintk(MOUNT, "--> nfs4_try_mount()\n");
 
+       mount_info->fill_super = nfs4_fill_super;
+
        export_path = data->nfs_server.export_path;
        data->nfs_server.export_path = "/";
        root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, mount_info,
index 08a07a2..57ceaf3 100644 (file)
@@ -191,6 +191,8 @@ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs)
        while (!list_empty(head)) {
                ii = list_first_entry(head, struct nilfs_inode_info, i_dirty);
                list_del_init(&ii->i_dirty);
+               truncate_inode_pages(&ii->vfs_inode.i_data, 0);
+               nilfs_btnode_cache_clear(&ii->i_btnode_cache);
                iput(&ii->vfs_inode);
        }
 }
index 0e72ad6..88e11fb 100644 (file)
@@ -2309,6 +2309,8 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
                if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
                        continue;
                list_del_init(&ii->i_dirty);
+               truncate_inode_pages(&ii->vfs_inode.i_data, 0);
+               nilfs_btnode_cache_clear(&ii->i_btnode_cache);
                iput(&ii->vfs_inode);
        }
 }
index 81a4cd2..4f7795f 100644 (file)
@@ -456,7 +456,7 @@ static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
        stats->ls_gets++;
        stats->ls_total += ktime_to_ns(kt);
        /* overflow */
-       if (unlikely(stats->ls_gets) == 0) {
+       if (unlikely(stats->ls_gets == 0)) {
                stats->ls_gets++;
                stats->ls_total = ktime_to_ns(kt);
        }
@@ -3932,6 +3932,8 @@ unqueue:
 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
                                        struct ocfs2_lock_res *lockres)
 {
+       unsigned long flags;
+
        assert_spin_locked(&lockres->l_lock);
 
        if (lockres->l_flags & OCFS2_LOCK_FREEING) {
@@ -3945,21 +3947,22 @@ static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
 
        lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
 
-       spin_lock(&osb->dc_task_lock);
+       spin_lock_irqsave(&osb->dc_task_lock, flags);
        if (list_empty(&lockres->l_blocked_list)) {
                list_add_tail(&lockres->l_blocked_list,
                              &osb->blocked_lock_list);
                osb->blocked_lock_count++;
        }
-       spin_unlock(&osb->dc_task_lock);
+       spin_unlock_irqrestore(&osb->dc_task_lock, flags);
 }
 
 static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
 {
        unsigned long processed;
+       unsigned long flags;
        struct ocfs2_lock_res *lockres;
 
-       spin_lock(&osb->dc_task_lock);
+       spin_lock_irqsave(&osb->dc_task_lock, flags);
        /* grab this early so we know to try again if a state change and
         * wake happens part-way through our work  */
        osb->dc_work_sequence = osb->dc_wake_sequence;
@@ -3972,38 +3975,40 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
                                     struct ocfs2_lock_res, l_blocked_list);
                list_del_init(&lockres->l_blocked_list);
                osb->blocked_lock_count--;
-               spin_unlock(&osb->dc_task_lock);
+               spin_unlock_irqrestore(&osb->dc_task_lock, flags);
 
                BUG_ON(!processed);
                processed--;
 
                ocfs2_process_blocked_lock(osb, lockres);
 
-               spin_lock(&osb->dc_task_lock);
+               spin_lock_irqsave(&osb->dc_task_lock, flags);
        }
-       spin_unlock(&osb->dc_task_lock);
+       spin_unlock_irqrestore(&osb->dc_task_lock, flags);
 }
 
 static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
 {
        int empty = 0;
+       unsigned long flags;
 
-       spin_lock(&osb->dc_task_lock);
+       spin_lock_irqsave(&osb->dc_task_lock, flags);
        if (list_empty(&osb->blocked_lock_list))
                empty = 1;
 
-       spin_unlock(&osb->dc_task_lock);
+       spin_unlock_irqrestore(&osb->dc_task_lock, flags);
        return empty;
 }
 
 static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
 {
        int should_wake = 0;
+       unsigned long flags;
 
-       spin_lock(&osb->dc_task_lock);
+       spin_lock_irqsave(&osb->dc_task_lock, flags);
        if (osb->dc_work_sequence != osb->dc_wake_sequence)
                should_wake = 1;
-       spin_unlock(&osb->dc_task_lock);
+       spin_unlock_irqrestore(&osb->dc_task_lock, flags);
 
        return should_wake;
 }
@@ -4033,10 +4038,12 @@ static int ocfs2_downconvert_thread(void *arg)
 
 void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
 {
-       spin_lock(&osb->dc_task_lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&osb->dc_task_lock, flags);
        /* make sure the voting thread gets a swipe at whatever changes
         * the caller may have made to the voting state */
        osb->dc_wake_sequence++;
-       spin_unlock(&osb->dc_task_lock);
+       spin_unlock_irqrestore(&osb->dc_task_lock, flags);
        wake_up(&osb->dc_event);
 }
index 2f5b92e..70b5863 100644 (file)
@@ -923,8 +923,6 @@ out_unlock:
 
        ocfs2_inode_unlock(inode, 0);
 out:
-       if (ret && ret != -ENXIO)
-               ret = -ENXIO;
        return ret;
 }
 
index 061591a..7602783 100644 (file)
@@ -1950,7 +1950,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
        if (ret < 0)
                mlog_errno(ret);
 
-       if (file->f_flags & O_SYNC)
+       if (file && (file->f_flags & O_SYNC))
                handle->h_sync = 1;
 
        ocfs2_commit_trans(osb, handle);
@@ -2422,8 +2422,10 @@ out_dio:
                unaligned_dio = 0;
        }
 
-       if (unaligned_dio)
+       if (unaligned_dio) {
+               ocfs2_iocb_clear_unaligned_aio(iocb);
                atomic_dec(&OCFS2_I(inode)->ip_unaligned_aio);
+       }
 
 out:
        if (rw_level != -1)
index 92fcd57..0a86e30 100644 (file)
@@ -399,8 +399,6 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
                              msecs_to_jiffies(oinfo->dqi_syncms));
 
 out_err:
-       if (status)
-               mlog_errno(status);
        return status;
 out_unlock:
        ocfs2_unlock_global_qf(oinfo, 0);
index d6c79a0..1540632 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -397,10 +397,10 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
 {
        struct file *file;
        struct inode *inode;
-       int error;
+       int error, fput_needed;
 
        error = -EBADF;
-       file = fget(fd);
+       file = fget_raw_light(fd, &fput_needed);
        if (!file)
                goto out;
 
@@ -414,7 +414,7 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
        if (!error)
                set_fs_pwd(current->fs, &file->f_path);
 out_putf:
-       fput(file);
+       fput_light(file, fput_needed);
 out:
        return error;
 }
index aeb19e6..11a2aa2 100644 (file)
@@ -258,7 +258,7 @@ fail:
        return rc;
 }
 
-int pstore_fill_super(struct super_block *sb, void *data, int silent)
+static int pstore_fill_super(struct super_block *sb, void *data, int silent)
 {
        struct inode *inode;
 
index 82c585f..03ce7a9 100644 (file)
@@ -94,20 +94,15 @@ static const char *get_reason_str(enum kmsg_dump_reason reason)
  * as we can from the end of the buffer.
  */
 static void pstore_dump(struct kmsg_dumper *dumper,
-           enum kmsg_dump_reason reason,
-           const char *s1, unsigned long l1,
-           const char *s2, unsigned long l2)
+                       enum kmsg_dump_reason reason)
 {
-       unsigned long   s1_start, s2_start;
-       unsigned long   l1_cpy, l2_cpy;
-       unsigned long   size, total = 0;
-       char            *dst;
+       unsigned long   total = 0;
        const char      *why;
        u64             id;
-       int             hsize, ret;
        unsigned int    part = 1;
        unsigned long   flags = 0;
        int             is_locked = 0;
+       int             ret;
 
        why = get_reason_str(reason);
 
@@ -119,30 +114,25 @@ static void pstore_dump(struct kmsg_dumper *dumper,
                spin_lock_irqsave(&psinfo->buf_lock, flags);
        oopscount++;
        while (total < kmsg_bytes) {
+               char *dst;
+               unsigned long size;
+               int hsize;
+               size_t len;
+
                dst = psinfo->buf;
                hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount, part);
                size = psinfo->bufsize - hsize;
                dst += hsize;
 
-               l2_cpy = min(l2, size);
-               l1_cpy = min(l1, size - l2_cpy);
-
-               if (l1_cpy + l2_cpy == 0)
+               if (!kmsg_dump_get_buffer(dumper, true, dst, size, &len))
                        break;
 
-               s2_start = l2 - l2_cpy;
-               s1_start = l1 - l1_cpy;
-
-               memcpy(dst, s1 + s1_start, l1_cpy);
-               memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
-
                ret = psinfo->write(PSTORE_TYPE_DMESG, reason, &id, part,
-                                  hsize + l1_cpy + l2_cpy, psinfo);
+                                   hsize + len, psinfo);
                if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted())
                        pstore_new_entry = 1;
-               l1 -= l1_cpy;
-               l2 -= l2_cpy;
-               total += l1_cpy + l2_cpy;
+
+               total += hsize + len;
                part++;
        }
        if (in_nmi()) {
index 9123cce..453030f 100644 (file)
@@ -106,6 +106,8 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
        time->tv_sec = 0;
        time->tv_nsec = 0;
 
+       /* Update old/shadowed buffer. */
+       persistent_ram_save_old(prz);
        size = persistent_ram_old_size(prz);
        *buf = kmalloc(size, GFP_KERNEL);
        if (*buf == NULL)
@@ -184,6 +186,7 @@ static int ramoops_pstore_erase(enum pstore_type_id type, u64 id,
                return -EINVAL;
 
        persistent_ram_free_old(cxt->przs[id]);
+       persistent_ram_zap(cxt->przs[id]);
 
        return 0;
 }
index 31f8d18..c5fbdbb 100644 (file)
@@ -250,23 +250,24 @@ static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
        persistent_ram_update_ecc(prz, start, count);
 }
 
-static void __init
-persistent_ram_save_old(struct persistent_ram_zone *prz)
+void persistent_ram_save_old(struct persistent_ram_zone *prz)
 {
        struct persistent_ram_buffer *buffer = prz->buffer;
        size_t size = buffer_size(prz);
        size_t start = buffer_start(prz);
-       char *dest;
 
-       persistent_ram_ecc_old(prz);
+       if (!size)
+               return;
 
-       dest = kmalloc(size, GFP_KERNEL);
-       if (dest == NULL) {
+       if (!prz->old_log) {
+               persistent_ram_ecc_old(prz);
+               prz->old_log = kmalloc(size, GFP_KERNEL);
+       }
+       if (!prz->old_log) {
                pr_err("persistent_ram: failed to allocate buffer\n");
                return;
        }
 
-       prz->old_log = dest;
        prz->old_log_size = size;
        memcpy(prz->old_log, &buffer->data[start], size - start);
        memcpy(prz->old_log + size - start, &buffer->data[0], start);
@@ -319,6 +320,13 @@ void persistent_ram_free_old(struct persistent_ram_zone *prz)
        prz->old_log_size = 0;
 }
 
+void persistent_ram_zap(struct persistent_ram_zone *prz)
+{
+       atomic_set(&prz->buffer->start, 0);
+       atomic_set(&prz->buffer->size, 0);
+       persistent_ram_update_header_ecc(prz);
+}
+
 static void *persistent_ram_vmap(phys_addr_t start, size_t size)
 {
        struct page **pages;
@@ -405,6 +413,7 @@ static int __init persistent_ram_post_init(struct persistent_ram_zone *prz, bool
                                " size %zu, start %zu\n",
                               buffer_size(prz), buffer_start(prz));
                        persistent_ram_save_old(prz);
+                       return 0;
                }
        } else {
                pr_info("persistent_ram: no valid data in buffer"
@@ -412,8 +421,7 @@ static int __init persistent_ram_post_init(struct persistent_ram_zone *prz, bool
        }
 
        prz->buffer->sig = PERSISTENT_RAM_SIG;
-       atomic_set(&prz->buffer->start, 0);
-       atomic_set(&prz->buffer->size, 0);
+       persistent_ram_zap(prz);
 
        return 0;
 }
@@ -448,7 +456,6 @@ struct persistent_ram_zone * __init persistent_ram_new(phys_addr_t start,
                goto err;
 
        persistent_ram_post_init(prz, ecc);
-       persistent_ram_update_header_ecc(prz);
 
        return prz;
 err:
index fbb0b47..d5378d0 100644 (file)
@@ -110,6 +110,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
 
                /* prevent the page from being discarded on memory pressure */
                SetPageDirty(page);
+               SetPageUptodate(page);
 
                unlock_page(page);
                put_page(page);
index c9f1318..7bf08fa 100644 (file)
@@ -273,13 +273,16 @@ void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
  * Check if we need to grow the arrays holding pages and partial page
  * descriptions.
  */
-int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
+int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
 {
-       if (pipe->buffers <= PIPE_DEF_BUFFERS)
+       unsigned int buffers = ACCESS_ONCE(pipe->buffers);
+
+       spd->nr_pages_max = buffers;
+       if (buffers <= PIPE_DEF_BUFFERS)
                return 0;
 
-       spd->pages = kmalloc(pipe->buffers * sizeof(struct page *), GFP_KERNEL);
-       spd->partial = kmalloc(pipe->buffers * sizeof(struct partial_page), GFP_KERNEL);
+       spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL);
+       spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL);
 
        if (spd->pages && spd->partial)
                return 0;
@@ -289,10 +292,9 @@ int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
        return -ENOMEM;
 }
 
-void splice_shrink_spd(struct pipe_inode_info *pipe,
-                      struct splice_pipe_desc *spd)
+void splice_shrink_spd(struct splice_pipe_desc *spd)
 {
-       if (pipe->buffers <= PIPE_DEF_BUFFERS)
+       if (spd->nr_pages_max <= PIPE_DEF_BUFFERS)
                return;
 
        kfree(spd->pages);
@@ -315,6 +317,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &page_cache_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -326,7 +329,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
        index = *ppos >> PAGE_CACHE_SHIFT;
        loff = *ppos & ~PAGE_CACHE_MASK;
        req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-       nr_pages = min(req_pages, pipe->buffers);
+       nr_pages = min(req_pages, spd.nr_pages_max);
 
        /*
         * Lookup the (hopefully) full range of pages we need.
@@ -497,7 +500,7 @@ fill_it:
        if (spd.nr_pages)
                error = splice_to_pipe(pipe, &spd);
 
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return error;
 }
 
@@ -598,6 +601,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &default_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -608,8 +612,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
 
        res = -ENOMEM;
        vec = __vec;
-       if (pipe->buffers > PIPE_DEF_BUFFERS) {
-               vec = kmalloc(pipe->buffers * sizeof(struct iovec), GFP_KERNEL);
+       if (spd.nr_pages_max > PIPE_DEF_BUFFERS) {
+               vec = kmalloc(spd.nr_pages_max * sizeof(struct iovec), GFP_KERNEL);
                if (!vec)
                        goto shrink_ret;
        }
@@ -617,7 +621,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
        offset = *ppos & ~PAGE_CACHE_MASK;
        nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 
-       for (i = 0; i < nr_pages && i < pipe->buffers && len; i++) {
+       for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) {
                struct page *page;
 
                page = alloc_page(GFP_USER);
@@ -665,7 +669,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
 shrink_ret:
        if (vec != __vec)
                kfree(vec);
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return res;
 
 err:
@@ -1614,6 +1618,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &user_page_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -1629,13 +1634,13 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
 
        spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
                                            spd.partial, false,
-                                           pipe->buffers);
+                                           spd.nr_pages_max);
        if (spd.nr_pages <= 0)
                ret = spd.nr_pages;
        else
                ret = splice_to_pipe(pipe, &spd);
 
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return ret;
 }
 
index 84a7e6f..92df3b0 100644 (file)
@@ -2918,7 +2918,7 @@ int dbg_debugfs_init_fs(struct ubifs_info *c)
        struct dentry *dent;
        struct ubifs_debug_info *d = c->dbg;
 
-       if (!IS_ENABLED(DEBUG_FS))
+       if (!IS_ENABLED(CONFIG_DEBUG_FS))
                return 0;
 
        n = snprintf(d->dfs_dir_name, UBIFS_DFS_DIR_LEN + 1, UBIFS_DFS_DIR_NAME,
@@ -3013,7 +3013,7 @@ out:
  */
 void dbg_debugfs_exit_fs(struct ubifs_info *c)
 {
-       if (IS_ENABLED(DEBUG_FS))
+       if (IS_ENABLED(CONFIG_DEBUG_FS))
                debugfs_remove_recursive(c->dbg->dfs_dir);
 }
 
@@ -3099,7 +3099,7 @@ int dbg_debugfs_init(void)
        const char *fname;
        struct dentry *dent;
 
-       if (!IS_ENABLED(DEBUG_FS))
+       if (!IS_ENABLED(CONFIG_DEBUG_FS))
                return 0;
 
        fname = "ubifs";
@@ -3166,7 +3166,7 @@ out:
  */
 void dbg_debugfs_exit(void)
 {
-       if (IS_ENABLED(DEBUG_FS))
+       if (IS_ENABLED(CONFIG_DEBUG_FS))
                debugfs_remove_recursive(dfs_rootdir);
 }
 
index 2559d17..28ec13a 100644 (file)
@@ -939,8 +939,8 @@ static int find_dirtiest_idx_leb(struct ubifs_info *c)
        }
        dbg_find("LEB %d, dirty %d and free %d flags %#x", lp->lnum, lp->dirty,
                 lp->free, lp->flags);
-       ubifs_assert(lp->flags | LPROPS_TAKEN);
-       ubifs_assert(lp->flags | LPROPS_INDEX);
+       ubifs_assert(lp->flags & LPROPS_TAKEN);
+       ubifs_assert(lp->flags & LPROPS_INDEX);
        return lnum;
 }
 
index ef3d1ba..15e2fc5 100644 (file)
@@ -718,8 +718,12 @@ static int fixup_free_space(struct ubifs_info *c)
                lnum = ubifs_next_log_lnum(c, lnum);
        }
 
-       /* Fixup the current log head */
-       err = fixup_leb(c, c->lhead_lnum, c->lhead_offs);
+       /*
+        * Fixup the log head which contains the only a CS node at the
+        * beginning.
+        */
+       err = fixup_leb(c, c->lhead_lnum,
+                       ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size));
        if (err)
                goto out;
 
index ac8a348..8d86a87 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/seq_file.h>
 #include <linux/bitmap.h>
 #include <linux/crc-itu-t.h>
+#include <linux/log2.h>
 #include <asm/byteorder.h>
 
 #include "udf_sb.h"
@@ -1215,16 +1216,65 @@ out_bh:
        return ret;
 }
 
+static int udf_load_sparable_map(struct super_block *sb,
+                                struct udf_part_map *map,
+                                struct sparablePartitionMap *spm)
+{
+       uint32_t loc;
+       uint16_t ident;
+       struct sparingTable *st;
+       struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing;
+       int i;
+       struct buffer_head *bh;
+
+       map->s_partition_type = UDF_SPARABLE_MAP15;
+       sdata->s_packet_len = le16_to_cpu(spm->packetLength);
+       if (!is_power_of_2(sdata->s_packet_len)) {
+               udf_err(sb, "error loading logical volume descriptor: "
+                       "Invalid packet length %u\n",
+                       (unsigned)sdata->s_packet_len);
+               return -EIO;
+       }
+       if (spm->numSparingTables > 4) {
+               udf_err(sb, "error loading logical volume descriptor: "
+                       "Too many sparing tables (%d)\n",
+                       (int)spm->numSparingTables);
+               return -EIO;
+       }
+
+       for (i = 0; i < spm->numSparingTables; i++) {
+               loc = le32_to_cpu(spm->locSparingTable[i]);
+               bh = udf_read_tagged(sb, loc, loc, &ident);
+               if (!bh)
+                       continue;
+
+               st = (struct sparingTable *)bh->b_data;
+               if (ident != 0 ||
+                   strncmp(st->sparingIdent.ident, UDF_ID_SPARING,
+                           strlen(UDF_ID_SPARING)) ||
+                   sizeof(*st) + le16_to_cpu(st->reallocationTableLen) >
+                                                       sb->s_blocksize) {
+                       brelse(bh);
+                       continue;
+               }
+
+               sdata->s_spar_map[i] = bh;
+       }
+       map->s_partition_func = udf_get_pblock_spar15;
+       return 0;
+}
+
 static int udf_load_logicalvol(struct super_block *sb, sector_t block,
                               struct kernel_lb_addr *fileset)
 {
        struct logicalVolDesc *lvd;
-       int i, j, offset;
+       int i, offset;
        uint8_t type;
        struct udf_sb_info *sbi = UDF_SB(sb);
        struct genericPartitionMap *gpm;
        uint16_t ident;
        struct buffer_head *bh;
+       unsigned int table_len;
        int ret = 0;
 
        bh = udf_read_tagged(sb, block, block, &ident);
@@ -1232,15 +1282,20 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
                return 1;
        BUG_ON(ident != TAG_IDENT_LVD);
        lvd = (struct logicalVolDesc *)bh->b_data;
-
-       i = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
-       if (i != 0) {
-               ret = i;
+       table_len = le32_to_cpu(lvd->mapTableLength);
+       if (sizeof(*lvd) + table_len > sb->s_blocksize) {
+               udf_err(sb, "error loading logical volume descriptor: "
+                       "Partition table too long (%u > %lu)\n", table_len,
+                       sb->s_blocksize - sizeof(*lvd));
                goto out_bh;
        }
 
+       ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
+       if (ret)
+               goto out_bh;
+
        for (i = 0, offset = 0;
-            i < sbi->s_partitions && offset < le32_to_cpu(lvd->mapTableLength);
+            i < sbi->s_partitions && offset < table_len;
             i++, offset += gpm->partitionMapLength) {
                struct udf_part_map *map = &sbi->s_partmaps[i];
                gpm = (struct genericPartitionMap *)
@@ -1275,38 +1330,9 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
                        } else if (!strncmp(upm2->partIdent.ident,
                                                UDF_ID_SPARABLE,
                                                strlen(UDF_ID_SPARABLE))) {
-                               uint32_t loc;
-                               struct sparingTable *st;
-                               struct sparablePartitionMap *spm =
-                                       (struct sparablePartitionMap *)gpm;
-
-                               map->s_partition_type = UDF_SPARABLE_MAP15;
-                               map->s_type_specific.s_sparing.s_packet_len =
-                                               le16_to_cpu(spm->packetLength);
-                               for (j = 0; j < spm->numSparingTables; j++) {
-                                       struct buffer_head *bh2;
-
-                                       loc = le32_to_cpu(
-                                               spm->locSparingTable[j]);
-                                       bh2 = udf_read_tagged(sb, loc, loc,
-                                                            &ident);
-                                       map->s_type_specific.s_sparing.
-                                                       s_spar_map[j] = bh2;
-
-                                       if (bh2 == NULL)
-                                               continue;
-
-                                       st = (struct sparingTable *)bh2->b_data;
-                                       if (ident != 0 || strncmp(
-                                               st->sparingIdent.ident,
-                                               UDF_ID_SPARING,
-                                               strlen(UDF_ID_SPARING))) {
-                                               brelse(bh2);
-                                               map->s_type_specific.s_sparing.
-                                                       s_spar_map[j] = NULL;
-                                       }
-                               }
-                               map->s_partition_func = udf_get_pblock_spar15;
+                               if (udf_load_sparable_map(sb, map,
+                                   (struct sparablePartitionMap *)gpm) < 0)
+                                       goto out_bh;
                        } else if (!strncmp(upm2->partIdent.ident,
                                                UDF_ID_METADATA,
                                                strlen(UDF_ID_METADATA))) {
index 229641f..4f33c32 100644 (file)
@@ -1074,12 +1074,13 @@ restart:
         * If we couldn't get anything, give up.
         */
        if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
+               xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+
                if (!forced++) {
                        trace_xfs_alloc_near_busy(args);
                        xfs_log_force(args->mp, XFS_LOG_SYNC);
                        goto restart;
                }
-
                trace_xfs_alloc_size_neither(args);
                args->agbno = NULLAGBLOCK;
                return 0;
@@ -2433,15 +2434,24 @@ xfs_alloc_vextent_worker(
        current_restore_flags_nested(&pflags, PF_FSTRANS);
 }
 
-
-int                            /* error */
+/*
+ * Data allocation requests often come in with little stack to work on. Push
+ * them off to a worker thread so there is lots of stack to use. Metadata
+ * requests, OTOH, are generally from low stack usage paths, so avoid the
+ * context switch overhead here.
+ */
+int
 xfs_alloc_vextent(
-       xfs_alloc_arg_t *args)  /* allocation argument structure */
+       struct xfs_alloc_arg    *args)
 {
        DECLARE_COMPLETION_ONSTACK(done);
 
+       if (!args->userdata)
+               return __xfs_alloc_vextent(args);
+
+
        args->done = &done;
-       INIT_WORK(&args->work, xfs_alloc_vextent_worker);
+       INIT_WORK_ONSTACK(&args->work, xfs_alloc_vextent_worker);
        queue_work(xfs_alloc_wq, &args->work);
        wait_for_completion(&done);
        return args->result;
index ae31c31..8dad722 100644 (file)
@@ -981,10 +981,15 @@ xfs_vm_writepage(
                                imap_valid = 0;
                        }
                } else {
-                       if (PageUptodate(page)) {
+                       if (PageUptodate(page))
                                ASSERT(buffer_mapped(bh));
-                               imap_valid = 0;
-                       }
+                       /*
+                        * This buffer is not uptodate and will not be
+                        * written to disk.  Ensure that we will put any
+                        * subsequent writeable buffers into a new
+                        * ioend.
+                        */
+                       imap_valid = 0;
                        continue;
                }
 
index 172d3cc..269b35c 100644 (file)
@@ -201,14 +201,7 @@ xfs_buf_alloc(
        bp->b_length = numblks;
        bp->b_io_length = numblks;
        bp->b_flags = flags;
-
-       /*
-        * We do not set the block number here in the buffer because we have not
-        * finished initialising the buffer. We insert the buffer into the cache
-        * in this state, so this ensures that we are unable to do IO on a
-        * buffer that hasn't been fully initialised.
-        */
-       bp->b_bn = XFS_BUF_DADDR_NULL;
+       bp->b_bn = blkno;
        atomic_set(&bp->b_pin_count, 0);
        init_waitqueue_head(&bp->b_waiters);
 
@@ -567,11 +560,6 @@ xfs_buf_get(
        if (bp != new_bp)
                xfs_buf_free(new_bp);
 
-       /*
-        * Now we have a workable buffer, fill in the block number so
-        * that we can do IO on it.
-        */
-       bp->b_bn = blkno;
        bp->b_io_length = bp->b_length;
 
 found:
@@ -772,7 +760,7 @@ xfs_buf_get_uncached(
        int                     error, i;
        xfs_buf_t               *bp;
 
-       bp = xfs_buf_alloc(target, 0, numblks, 0);
+       bp = xfs_buf_alloc(target, XFS_BUF_DADDR_NULL, numblks, 0);
        if (unlikely(bp == NULL))
                goto fail;
 
@@ -1001,27 +989,6 @@ xfs_buf_ioerror_alert(
                (__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length);
 }
 
-int
-xfs_bwrite(
-       struct xfs_buf          *bp)
-{
-       int                     error;
-
-       ASSERT(xfs_buf_islocked(bp));
-
-       bp->b_flags |= XBF_WRITE;
-       bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
-
-       xfs_bdstrat_cb(bp);
-
-       error = xfs_buf_iowait(bp);
-       if (error) {
-               xfs_force_shutdown(bp->b_target->bt_mount,
-                                  SHUTDOWN_META_IO_ERROR);
-       }
-       return error;
-}
-
 /*
  * Called when we want to stop a buffer from getting written or read.
  * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
@@ -1091,14 +1058,7 @@ xfs_bioerror_relse(
        return EIO;
 }
 
-
-/*
- * All xfs metadata buffers except log state machine buffers
- * get this attached as their b_bdstrat callback function.
- * This is so that we can catch a buffer
- * after prematurely unpinning it to forcibly shutdown the filesystem.
- */
-int
+STATIC int
 xfs_bdstrat_cb(
        struct xfs_buf  *bp)
 {
@@ -1119,6 +1079,27 @@ xfs_bdstrat_cb(
        return 0;
 }
 
+int
+xfs_bwrite(
+       struct xfs_buf          *bp)
+{
+       int                     error;
+
+       ASSERT(xfs_buf_islocked(bp));
+
+       bp->b_flags |= XBF_WRITE;
+       bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
+
+       xfs_bdstrat_cb(bp);
+
+       error = xfs_buf_iowait(bp);
+       if (error) {
+               xfs_force_shutdown(bp->b_target->bt_mount,
+                                  SHUTDOWN_META_IO_ERROR);
+       }
+       return error;
+}
+
 /*
  * Wrapper around bdstrat so that we can stop data from going to disk in case
  * we are shutting down the filesystem.  Typically user data goes thru this
@@ -1255,7 +1236,7 @@ xfs_buf_iorequest(
         */
        atomic_set(&bp->b_io_remaining, 1);
        _xfs_buf_ioapply(bp);
-       _xfs_buf_ioend(bp, 0);
+       _xfs_buf_ioend(bp, 1);
 
        xfs_buf_rele(bp);
 }
index 7f1d139..79344c4 100644 (file)
@@ -180,7 +180,6 @@ extern void xfs_buf_unlock(xfs_buf_t *);
 extern int xfs_bwrite(struct xfs_buf *bp);
 
 extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
-extern int xfs_bdstrat_cb(struct xfs_buf *);
 
 extern void xfs_buf_ioend(xfs_buf_t *, int);
 extern void xfs_buf_ioerror(xfs_buf_t *, int);
index 45df2b8..d9e4511 100644 (file)
@@ -954,7 +954,7 @@ xfs_buf_iodone_callbacks(
 
                if (!XFS_BUF_ISSTALE(bp)) {
                        bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE;
-                       xfs_bdstrat_cb(bp);
+                       xfs_buf_iorequest(bp);
                } else {
                        xfs_buf_relse(bp);
                }
index 6cdbf90..d041d47 100644 (file)
@@ -504,6 +504,14 @@ xfs_inode_item_push(
                goto out_unlock;
        }
 
+       /*
+        * Stale inode items should force out the iclog.
+        */
+       if (ip->i_flags & XFS_ISTALE) {
+               rval = XFS_ITEM_PINNED;
+               goto out_unlock;
+       }
+
        /*
         * Someone else is already flushing the inode.  Nothing we can do
         * here but wait for the flush to finish and remove the item from
@@ -514,15 +522,6 @@ xfs_inode_item_push(
                goto out_unlock;
        }
 
-       /*
-        * Stale inode items should force out the iclog.
-        */
-       if (ip->i_flags & XFS_ISTALE) {
-               xfs_ifunlock(ip);
-               xfs_iunlock(ip, XFS_ILOCK_SHARED);
-               return XFS_ITEM_PINNED;
-       }
-
        ASSERT(iip->ili_fields != 0 || XFS_FORCED_SHUTDOWN(ip->i_mount));
        ASSERT(iip->ili_logged == 0 || XFS_FORCED_SHUTDOWN(ip->i_mount));
 
index f30d980..d90d4a3 100644 (file)
 kmem_zone_t    *xfs_log_ticket_zone;
 
 /* Local miscellaneous function prototypes */
-STATIC int      xlog_commit_record(struct log *log, struct xlog_ticket *ticket,
-                                   xlog_in_core_t **, xfs_lsn_t *);
+STATIC int
+xlog_commit_record(
+       struct xlog             *log,
+       struct xlog_ticket      *ticket,
+       struct xlog_in_core     **iclog,
+       xfs_lsn_t               *commitlsnp);
+
 STATIC xlog_t *  xlog_alloc_log(xfs_mount_t    *mp,
                                xfs_buftarg_t   *log_target,
                                xfs_daddr_t     blk_offset,
                                int             num_bblks);
-STATIC int      xlog_space_left(struct log *log, atomic64_t *head);
+STATIC int
+xlog_space_left(
+       struct xlog             *log,
+       atomic64_t              *head);
 STATIC int      xlog_sync(xlog_t *log, xlog_in_core_t *iclog);
 STATIC void     xlog_dealloc_log(xlog_t *log);
 
@@ -64,8 +72,10 @@ STATIC void xlog_state_switch_iclogs(xlog_t          *log,
                                     int                eventual_size);
 STATIC void xlog_state_want_sync(xlog_t        *log, xlog_in_core_t *iclog);
 
-STATIC void xlog_grant_push_ail(struct log     *log,
-                               int             need_bytes);
+STATIC void
+xlog_grant_push_ail(
+       struct xlog     *log,
+       int             need_bytes);
 STATIC void xlog_regrant_reserve_log_space(xlog_t       *log,
                                           xlog_ticket_t *ticket);
 STATIC void xlog_ungrant_log_space(xlog_t       *log,
@@ -73,7 +83,9 @@ STATIC void xlog_ungrant_log_space(xlog_t      *log,
 
 #if defined(DEBUG)
 STATIC void    xlog_verify_dest_ptr(xlog_t *log, char *ptr);
-STATIC void    xlog_verify_grant_tail(struct log *log);
+STATIC void
+xlog_verify_grant_tail(
+       struct xlog     *log);
 STATIC void    xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog,
                                  int count, boolean_t syncing);
 STATIC void    xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog,
@@ -89,9 +101,9 @@ STATIC int   xlog_iclogs_empty(xlog_t *log);
 
 static void
 xlog_grant_sub_space(
-       struct log      *log,
-       atomic64_t      *head,
-       int             bytes)
+       struct xlog             *log,
+       atomic64_t              *head,
+       int                     bytes)
 {
        int64_t head_val = atomic64_read(head);
        int64_t new, old;
@@ -115,9 +127,9 @@ xlog_grant_sub_space(
 
 static void
 xlog_grant_add_space(
-       struct log      *log,
-       atomic64_t      *head,
-       int             bytes)
+       struct xlog             *log,
+       atomic64_t              *head,
+       int                     bytes)
 {
        int64_t head_val = atomic64_read(head);
        int64_t new, old;
@@ -165,7 +177,7 @@ xlog_grant_head_wake_all(
 
 static inline int
 xlog_ticket_reservation(
-       struct log              *log,
+       struct xlog             *log,
        struct xlog_grant_head  *head,
        struct xlog_ticket      *tic)
 {
@@ -182,7 +194,7 @@ xlog_ticket_reservation(
 
 STATIC bool
 xlog_grant_head_wake(
-       struct log              *log,
+       struct xlog             *log,
        struct xlog_grant_head  *head,
        int                     *free_bytes)
 {
@@ -204,7 +216,7 @@ xlog_grant_head_wake(
 
 STATIC int
 xlog_grant_head_wait(
-       struct log              *log,
+       struct xlog             *log,
        struct xlog_grant_head  *head,
        struct xlog_ticket      *tic,
        int                     need_bytes)
@@ -256,7 +268,7 @@ shutdown:
  */
 STATIC int
 xlog_grant_head_check(
-       struct log              *log,
+       struct xlog             *log,
        struct xlog_grant_head  *head,
        struct xlog_ticket      *tic,
        int                     *need_bytes)
@@ -323,7 +335,7 @@ xfs_log_regrant(
        struct xfs_mount        *mp,
        struct xlog_ticket      *tic)
 {
-       struct log              *log = mp->m_log;
+       struct xlog             *log = mp->m_log;
        int                     need_bytes;
        int                     error = 0;
 
@@ -389,7 +401,7 @@ xfs_log_reserve(
        bool                    permanent,
        uint                    t_type)
 {
-       struct log              *log = mp->m_log;
+       struct xlog             *log = mp->m_log;
        struct xlog_ticket      *tic;
        int                     need_bytes;
        int                     error = 0;
@@ -465,7 +477,7 @@ xfs_log_done(
        struct xlog_in_core     **iclog,
        uint                    flags)
 {
-       struct log              *log = mp->m_log;
+       struct xlog             *log = mp->m_log;
        xfs_lsn_t               lsn = 0;
 
        if (XLOG_FORCED_SHUTDOWN(log) ||
@@ -810,6 +822,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
 void
 xfs_log_unmount(xfs_mount_t *mp)
 {
+       cancel_delayed_work_sync(&mp->m_sync_work);
        xfs_trans_ail_destroy(mp);
        xlog_dealloc_log(mp->m_log);
 }
@@ -838,7 +851,7 @@ void
 xfs_log_space_wake(
        struct xfs_mount        *mp)
 {
-       struct log              *log = mp->m_log;
+       struct xlog             *log = mp->m_log;
        int                     free_bytes;
 
        if (XLOG_FORCED_SHUTDOWN(log))
@@ -916,7 +929,7 @@ xfs_lsn_t
 xlog_assign_tail_lsn_locked(
        struct xfs_mount        *mp)
 {
-       struct log              *log = mp->m_log;
+       struct xlog             *log = mp->m_log;
        struct xfs_log_item     *lip;
        xfs_lsn_t               tail_lsn;
 
@@ -965,7 +978,7 @@ xlog_assign_tail_lsn(
  */
 STATIC int
 xlog_space_left(
-       struct log      *log,
+       struct xlog     *log,
        atomic64_t      *head)
 {
        int             free_bytes;
@@ -1277,7 +1290,7 @@ out:
  */
 STATIC int
 xlog_commit_record(
-       struct log              *log,
+       struct xlog             *log,
        struct xlog_ticket      *ticket,
        struct xlog_in_core     **iclog,
        xfs_lsn_t               *commitlsnp)
@@ -1311,7 +1324,7 @@ xlog_commit_record(
  */
 STATIC void
 xlog_grant_push_ail(
-       struct log      *log,
+       struct xlog     *log,
        int             need_bytes)
 {
        xfs_lsn_t       threshold_lsn = 0;
@@ -1790,7 +1803,7 @@ xlog_write_start_rec(
 
 static xlog_op_header_t *
 xlog_write_setup_ophdr(
-       struct log              *log,
+       struct xlog             *log,
        struct xlog_op_header   *ophdr,
        struct xlog_ticket      *ticket,
        uint                    flags)
@@ -1873,7 +1886,7 @@ xlog_write_setup_copy(
 
 static int
 xlog_write_copy_finish(
-       struct log              *log,
+       struct xlog             *log,
        struct xlog_in_core     *iclog,
        uint                    flags,
        int                     *record_cnt,
@@ -1958,7 +1971,7 @@ xlog_write_copy_finish(
  */
 int
 xlog_write(
-       struct log              *log,
+       struct xlog             *log,
        struct xfs_log_vec      *log_vector,
        struct xlog_ticket      *ticket,
        xfs_lsn_t               *start_lsn,
@@ -2821,7 +2834,7 @@ _xfs_log_force(
        uint                    flags,
        int                     *log_flushed)
 {
-       struct log              *log = mp->m_log;
+       struct xlog             *log = mp->m_log;
        struct xlog_in_core     *iclog;
        xfs_lsn_t               lsn;
 
@@ -2969,7 +2982,7 @@ _xfs_log_force_lsn(
        uint                    flags,
        int                     *log_flushed)
 {
-       struct log              *log = mp->m_log;
+       struct xlog             *log = mp->m_log;
        struct xlog_in_core     *iclog;
        int                     already_slept = 0;
 
@@ -3147,7 +3160,7 @@ xfs_log_ticket_get(
  */
 xlog_ticket_t *
 xlog_ticket_alloc(
-       struct log      *log,
+       struct xlog     *log,
        int             unit_bytes,
        int             cnt,
        char            client,
@@ -3278,7 +3291,7 @@ xlog_ticket_alloc(
  */
 void
 xlog_verify_dest_ptr(
-       struct log      *log,
+       struct xlog     *log,
        char            *ptr)
 {
        int i;
@@ -3307,7 +3320,7 @@ xlog_verify_dest_ptr(
  */
 STATIC void
 xlog_verify_grant_tail(
-       struct log      *log)
+       struct xlog     *log)
 {
        int             tail_cycle, tail_blocks;
        int             cycle, space;
index 7d6197c..ddc4529 100644 (file)
@@ -44,7 +44,7 @@
  */
 static struct xlog_ticket *
 xlog_cil_ticket_alloc(
-       struct log      *log)
+       struct xlog     *log)
 {
        struct xlog_ticket *tic;
 
@@ -72,7 +72,7 @@ xlog_cil_ticket_alloc(
  */
 void
 xlog_cil_init_post_recovery(
-       struct log      *log)
+       struct xlog     *log)
 {
        log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
        log->l_cilp->xc_ctx->sequence = 1;
@@ -182,7 +182,7 @@ xlog_cil_prepare_log_vecs(
  */
 STATIC void
 xfs_cil_prepare_item(
-       struct log              *log,
+       struct xlog             *log,
        struct xfs_log_vec      *lv,
        int                     *len,
        int                     *diff_iovecs)
@@ -231,7 +231,7 @@ xfs_cil_prepare_item(
  */
 static void
 xlog_cil_insert_items(
-       struct log              *log,
+       struct xlog             *log,
        struct xfs_log_vec      *log_vector,
        struct xlog_ticket      *ticket)
 {
@@ -373,7 +373,7 @@ xlog_cil_committed(
  */
 STATIC int
 xlog_cil_push(
-       struct log              *log)
+       struct xlog             *log)
 {
        struct xfs_cil          *cil = log->l_cilp;
        struct xfs_log_vec      *lv;
@@ -601,7 +601,7 @@ xlog_cil_push_work(
  */
 static void
 xlog_cil_push_background(
-       struct log      *log)
+       struct xlog     *log)
 {
        struct xfs_cil  *cil = log->l_cilp;
 
@@ -629,7 +629,7 @@ xlog_cil_push_background(
 
 static void
 xlog_cil_push_foreground(
-       struct log      *log,
+       struct xlog     *log,
        xfs_lsn_t       push_seq)
 {
        struct xfs_cil  *cil = log->l_cilp;
@@ -683,7 +683,7 @@ xfs_log_commit_cil(
        xfs_lsn_t               *commit_lsn,
        int                     flags)
 {
-       struct log              *log = mp->m_log;
+       struct xlog             *log = mp->m_log;
        int                     log_flags = 0;
        struct xfs_log_vec      *log_vector;
 
@@ -754,7 +754,7 @@ xfs_log_commit_cil(
  */
 xfs_lsn_t
 xlog_cil_force_lsn(
-       struct log      *log,
+       struct xlog     *log,
        xfs_lsn_t       sequence)
 {
        struct xfs_cil          *cil = log->l_cilp;
@@ -833,7 +833,7 @@ xfs_log_item_in_current_chkpt(
  */
 int
 xlog_cil_init(
-       struct log      *log)
+       struct xlog     *log)
 {
        struct xfs_cil  *cil;
        struct xfs_cil_ctx *ctx;
@@ -869,7 +869,7 @@ xlog_cil_init(
 
 void
 xlog_cil_destroy(
-       struct log      *log)
+       struct xlog     *log)
 {
        if (log->l_cilp->xc_ctx) {
                if (log->l_cilp->xc_ctx->ticket)
index 5bc3326..72eba22 100644 (file)
@@ -19,7 +19,7 @@
 #define __XFS_LOG_PRIV_H__
 
 struct xfs_buf;
-struct log;
+struct xlog;
 struct xlog_ticket;
 struct xfs_mount;
 
@@ -352,7 +352,7 @@ typedef struct xlog_in_core {
        struct xlog_in_core     *ic_next;
        struct xlog_in_core     *ic_prev;
        struct xfs_buf          *ic_bp;
-       struct log              *ic_log;
+       struct xlog             *ic_log;
        int                     ic_size;
        int                     ic_offset;
        int                     ic_bwritecnt;
@@ -409,7 +409,7 @@ struct xfs_cil_ctx {
  * operations almost as efficient as the old logging methods.
  */
 struct xfs_cil {
-       struct log              *xc_log;
+       struct xlog             *xc_log;
        struct list_head        xc_cil;
        spinlock_t              xc_cil_lock;
        struct xfs_cil_ctx      *xc_ctx;
@@ -487,7 +487,7 @@ struct xlog_grant_head {
  * overflow 31 bits worth of byte offset, so using a byte number will mean
  * that round off problems won't occur when releasing partial reservations.
  */
-typedef struct log {
+typedef struct xlog {
        /* The following fields don't need locking */
        struct xfs_mount        *l_mp;          /* mount point */
        struct xfs_ail          *l_ailp;        /* AIL log is working with */
@@ -553,9 +553,14 @@ extern int  xlog_recover_finish(xlog_t *log);
 extern void     xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
 
 extern kmem_zone_t *xfs_log_ticket_zone;
-struct xlog_ticket *xlog_ticket_alloc(struct log *log, int unit_bytes,
-                               int count, char client, bool permanent,
-                               xfs_km_flags_t alloc_flags);
+struct xlog_ticket *
+xlog_ticket_alloc(
+       struct xlog     *log,
+       int             unit_bytes,
+       int             count,
+       char            client,
+       bool            permanent,
+       xfs_km_flags_t  alloc_flags);
 
 
 static inline void
@@ -567,9 +572,14 @@ xlog_write_adv_cnt(void **ptr, int *len, int *off, size_t bytes)
 }
 
 void   xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket);
-int    xlog_write(struct log *log, struct xfs_log_vec *log_vector,
-                               struct xlog_ticket *tic, xfs_lsn_t *start_lsn,
-                               xlog_in_core_t **commit_iclog, uint flags);
+int
+xlog_write(
+       struct xlog             *log,
+       struct xfs_log_vec      *log_vector,
+       struct xlog_ticket      *tic,
+       xfs_lsn_t               *start_lsn,
+       struct xlog_in_core     **commit_iclog,
+       uint                    flags);
 
 /*
  * When we crack an atomic LSN, we sample it first so that the value will not
@@ -629,17 +639,23 @@ xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
 /*
  * Committed Item List interfaces
  */
-int    xlog_cil_init(struct log *log);
-void   xlog_cil_init_post_recovery(struct log *log);
-void   xlog_cil_destroy(struct log *log);
+int
+xlog_cil_init(struct xlog *log);
+void
+xlog_cil_init_post_recovery(struct xlog *log);
+void
+xlog_cil_destroy(struct xlog *log);
 
 /*
  * CIL force routines
  */
-xfs_lsn_t xlog_cil_force_lsn(struct log *log, xfs_lsn_t sequence);
+xfs_lsn_t
+xlog_cil_force_lsn(
+       struct xlog *log,
+       xfs_lsn_t sequence);
 
 static inline void
-xlog_cil_force(struct log *log)
+xlog_cil_force(struct xlog *log)
 {
        xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence);
 }
index ca38690..a7be98a 100644 (file)
@@ -1471,8 +1471,8 @@ xlog_recover_add_item(
 
 STATIC int
 xlog_recover_add_to_cont_trans(
-       struct log              *log,
-       xlog_recover_t          *trans,
+       struct xlog             *log,
+       struct xlog_recover     *trans,
        xfs_caddr_t             dp,
        int                     len)
 {
@@ -1517,8 +1517,8 @@ xlog_recover_add_to_cont_trans(
  */
 STATIC int
 xlog_recover_add_to_trans(
-       struct log              *log,
-       xlog_recover_t          *trans,
+       struct xlog             *log,
+       struct xlog_recover     *trans,
        xfs_caddr_t             dp,
        int                     len)
 {
@@ -1588,8 +1588,8 @@ xlog_recover_add_to_trans(
  */
 STATIC int
 xlog_recover_reorder_trans(
-       struct log              *log,
-       xlog_recover_t          *trans,
+       struct xlog             *log,
+       struct xlog_recover     *trans,
        int                     pass)
 {
        xlog_recover_item_t     *item, *n;
@@ -1642,8 +1642,8 @@ xlog_recover_reorder_trans(
  */
 STATIC int
 xlog_recover_buffer_pass1(
-       struct log              *log,
-       xlog_recover_item_t     *item)
+       struct xlog                     *log,
+       struct xlog_recover_item        *item)
 {
        xfs_buf_log_format_t    *buf_f = item->ri_buf[0].i_addr;
        struct list_head        *bucket;
@@ -1696,7 +1696,7 @@ xlog_recover_buffer_pass1(
  */
 STATIC int
 xlog_check_buffer_cancelled(
-       struct log              *log,
+       struct xlog             *log,
        xfs_daddr_t             blkno,
        uint                    len,
        ushort                  flags)
@@ -2689,9 +2689,9 @@ xlog_recover_free_trans(
 
 STATIC int
 xlog_recover_commit_pass1(
-       struct log              *log,
-       struct xlog_recover     *trans,
-       xlog_recover_item_t     *item)
+       struct xlog                     *log,
+       struct xlog_recover             *trans,
+       struct xlog_recover_item        *item)
 {
        trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
 
@@ -2716,10 +2716,10 @@ xlog_recover_commit_pass1(
 
 STATIC int
 xlog_recover_commit_pass2(
-       struct log              *log,
-       struct xlog_recover     *trans,
-       struct list_head        *buffer_list,
-       xlog_recover_item_t     *item)
+       struct xlog                     *log,
+       struct xlog_recover             *trans,
+       struct list_head                *buffer_list,
+       struct xlog_recover_item        *item)
 {
        trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
 
@@ -2753,7 +2753,7 @@ xlog_recover_commit_pass2(
  */
 STATIC int
 xlog_recover_commit_trans(
-       struct log              *log,
+       struct xlog             *log,
        struct xlog_recover     *trans,
        int                     pass)
 {
@@ -2793,8 +2793,8 @@ out:
 
 STATIC int
 xlog_recover_unmount_trans(
-       struct log              *log,
-       xlog_recover_t          *trans)
+       struct xlog             *log,
+       struct xlog_recover     *trans)
 {
        /* Do nothing now */
        xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
index 8b89c5a..90c1fc9 100644 (file)
@@ -53,7 +53,7 @@ typedef struct xfs_trans_reservations {
 
 #include "xfs_sync.h"
 
-struct log;
+struct xlog;
 struct xfs_mount_args;
 struct xfs_inode;
 struct xfs_bmbt_irec;
@@ -133,7 +133,7 @@ typedef struct xfs_mount {
        uint                    m_readio_blocks; /* min read size blocks */
        uint                    m_writeio_log;  /* min write size log bytes */
        uint                    m_writeio_blocks; /* min write size blocks */
-       struct log              *m_log;         /* log specific stuff */
+       struct xlog             *m_log;         /* log specific stuff */
        int                     m_logbufs;      /* number of log buffers */
        int                     m_logbsize;     /* size of each log buffer */
        uint                    m_rsumlevels;   /* rt summary levels */
index c9d3409..1e9ee06 100644 (file)
@@ -386,23 +386,23 @@ xfs_sync_worker(
         * We shouldn't write/force the log if we are in the mount/unmount
         * process or on a read only filesystem. The workqueue still needs to be
         * active in both cases, however, because it is used for inode reclaim
-        * during these times.  Use the s_umount semaphore to provide exclusion
-        * with unmount.
+        * during these times.  Use the MS_ACTIVE flag to avoid doing anything
+        * during mount.  Doing work during unmount is avoided by calling
+        * cancel_delayed_work_sync on this work queue before tearing down
+        * the ail and the log in xfs_log_unmount.
         */
-       if (down_read_trylock(&mp->m_super->s_umount)) {
-               if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
-                       /* dgc: errors ignored here */
-                       if (mp->m_super->s_frozen == SB_UNFROZEN &&
-                           xfs_log_need_covered(mp))
-                               error = xfs_fs_log_dummy(mp);
-                       else
-                               xfs_log_force(mp, 0);
-
-                       /* start pushing all the metadata that is currently
-                        * dirty */
-                       xfs_ail_push_all(mp->m_ail);
-               }
-               up_read(&mp->m_super->s_umount);
+       if (!(mp->m_super->s_flags & MS_ACTIVE) &&
+           !(mp->m_flags & XFS_MOUNT_RDONLY)) {
+               /* dgc: errors ignored here */
+               if (mp->m_super->s_frozen == SB_UNFROZEN &&
+                   xfs_log_need_covered(mp))
+                       error = xfs_fs_log_dummy(mp);
+               else
+                       xfs_log_force(mp, 0);
+
+               /* start pushing all the metadata that is currently
+                * dirty */
+               xfs_ail_push_all(mp->m_ail);
        }
 
        /* queue us up again */
index 7cf9d35..caf5dab 100644 (file)
@@ -32,7 +32,7 @@ struct xfs_da_node_entry;
 struct xfs_dquot;
 struct xfs_log_item;
 struct xlog_ticket;
-struct log;
+struct xlog;
 struct xlog_recover;
 struct xlog_recover_item;
 struct xfs_buf_log_format;
@@ -762,7 +762,7 @@ DEFINE_DQUOT_EVENT(xfs_dqflush_force);
 DEFINE_DQUOT_EVENT(xfs_dqflush_done);
 
 DECLARE_EVENT_CLASS(xfs_loggrant_class,
-       TP_PROTO(struct log *log, struct xlog_ticket *tic),
+       TP_PROTO(struct xlog *log, struct xlog_ticket *tic),
        TP_ARGS(log, tic),
        TP_STRUCT__entry(
                __field(dev_t, dev)
@@ -830,7 +830,7 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class,
 
 #define DEFINE_LOGGRANT_EVENT(name) \
 DEFINE_EVENT(xfs_loggrant_class, name, \
-       TP_PROTO(struct log *log, struct xlog_ticket *tic), \
+       TP_PROTO(struct xlog *log, struct xlog_ticket *tic), \
        TP_ARGS(log, tic))
 DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm);
 DEFINE_LOGGRANT_EVENT(xfs_log_done_perm);
@@ -1664,7 +1664,7 @@ DEFINE_SWAPEXT_EVENT(xfs_swap_extent_before);
 DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after);
 
 DECLARE_EVENT_CLASS(xfs_log_recover_item_class,
-       TP_PROTO(struct log *log, struct xlog_recover *trans,
+       TP_PROTO(struct xlog *log, struct xlog_recover *trans,
                struct xlog_recover_item *item, int pass),
        TP_ARGS(log, trans, item, pass),
        TP_STRUCT__entry(
@@ -1698,7 +1698,7 @@ DECLARE_EVENT_CLASS(xfs_log_recover_item_class,
 
 #define DEFINE_LOG_RECOVER_ITEM(name) \
 DEFINE_EVENT(xfs_log_recover_item_class, name, \
-       TP_PROTO(struct log *log, struct xlog_recover *trans, \
+       TP_PROTO(struct xlog *log, struct xlog_recover *trans, \
                struct xlog_recover_item *item, int pass), \
        TP_ARGS(log, trans, item, pass))
 
@@ -1709,7 +1709,7 @@ DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_tail);
 DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_recover);
 
 DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class,
-       TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f),
+       TP_PROTO(struct xlog *log, struct xfs_buf_log_format *buf_f),
        TP_ARGS(log, buf_f),
        TP_STRUCT__entry(
                __field(dev_t, dev)
@@ -1739,7 +1739,7 @@ DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class,
 
 #define DEFINE_LOG_RECOVER_BUF_ITEM(name) \
 DEFINE_EVENT(xfs_log_recover_buf_item_class, name, \
-       TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f), \
+       TP_PROTO(struct xlog *log, struct xfs_buf_log_format *buf_f), \
        TP_ARGS(log, buf_f))
 
 DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_not_cancel);
@@ -1752,7 +1752,7 @@ DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_reg_buf);
 DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_dquot_buf);
 
 DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class,
-       TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f),
+       TP_PROTO(struct xlog *log, struct xfs_inode_log_format *in_f),
        TP_ARGS(log, in_f),
        TP_STRUCT__entry(
                __field(dev_t, dev)
@@ -1790,7 +1790,7 @@ DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class,
 )
 #define DEFINE_LOG_RECOVER_INO_ITEM(name) \
 DEFINE_EVENT(xfs_log_recover_ino_item_class, name, \
-       TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f), \
+       TP_PROTO(struct xlog *log, struct xfs_inode_log_format *in_f), \
        TP_ARGS(log, in_f))
 
 DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_recover);
index 9f02005..7d10f96 100644 (file)
@@ -2,12 +2,19 @@
 #define _ASM_GENERIC_BUG_H
 
 #include <linux/compiler.h>
+
+#ifdef CONFIG_GENERIC_BUG
+#define BUGFLAG_WARNING                (1 << 0)
+#define BUGFLAG_TAINT(taint)   (BUGFLAG_WARNING | ((taint) << 8))
+#define BUG_GET_TAINT(bug)     ((bug)->flags >> 8)
+#endif
+
+#ifndef __ASSEMBLY__
 #include <linux/kernel.h>
 
 #ifdef CONFIG_BUG
 
 #ifdef CONFIG_GENERIC_BUG
-#ifndef __ASSEMBLY__
 struct bug_entry {
 #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
        unsigned long   bug_addr;
@@ -24,12 +31,6 @@ struct bug_entry {
 #endif
        unsigned short  flags;
 };
-#endif         /* __ASSEMBLY__ */
-
-#define BUGFLAG_WARNING                (1 << 0)
-#define BUGFLAG_TAINT(taint)   (BUGFLAG_WARNING | ((taint) << 8))
-#define BUG_GET_TAINT(bug)     ((bug)->flags >> 8)
-
 #endif /* CONFIG_GENERIC_BUG */
 
 /*
@@ -61,7 +62,6 @@ struct bug_entry {
  * to provide better diagnostics.
  */
 #ifndef __WARN_TAINT
-#ifndef __ASSEMBLY__
 extern __printf(3, 4)
 void warn_slowpath_fmt(const char *file, const int line,
                       const char *fmt, ...);
@@ -70,7 +70,6 @@ void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint,
                             const char *fmt, ...);
 extern void warn_slowpath_null(const char *file, const int line);
 #define WANT_WARN_ON_SLOWPATH
-#endif
 #define __WARN()               warn_slowpath_null(__FILE__, __LINE__)
 #define __WARN_printf(arg...)  warn_slowpath_fmt(__FILE__, __LINE__, arg)
 #define __WARN_printf_taint(taint, arg...)                             \
@@ -203,4 +202,6 @@ extern void warn_slowpath_null(const char *file, const int line);
 # define WARN_ON_SMP(x)                        ({0;})
 #endif
 
+#endif /* __ASSEMBLY__ */
+
 #endif
index c544356..294b1e7 100644 (file)
@@ -18,7 +18,7 @@ static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
 {
        if (dev)
                dev->cma_area = cma;
-       if (!dev || !dma_contiguous_default_area)
+       if (!dev && !dma_contiguous_default_area)
                dma_contiguous_default_area = cma;
 }
 
index 6f2b45a..ff4947b 100644 (file)
@@ -484,6 +484,16 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
        /*
         * The barrier will stabilize the pmdval in a register or on
         * the stack so that it will stop changing under the code.
+        *
+        * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
+        * pmd_read_atomic is allowed to return a not atomic pmdval
+        * (for example pointing to an hugepage that has never been
+        * mapped in the pmd). The below checks will only care about
+        * the low part of the pmd with 32bit PAE x86 anyway, with the
+        * exception of pmd_none(). So the important thing is that if
+        * the low part of the pmd is found null, the high part will
+        * be also null or the pmd_none() check below would be
+        * confused.
         */
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
        barrier();
index 81368ab..a7aec39 100644 (file)
@@ -1,7 +1,3 @@
-/*
-   This file is auto-generated from the drm_pciids.txt in the DRM CVS
-   Please contact dri-devel@lists.sf.net to add new cards to this list
-*/
 #define radeon_PCI_IDS \
        {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
        {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
index 2314ad8..b1a520e 100644 (file)
@@ -140,6 +140,7 @@ struct kiocb {
                (x)->ki_dtor = NULL;                    \
                (x)->ki_obj.tsk = tsk;                  \
                (x)->ki_user_data = 0;                  \
+               (x)->private = NULL;                    \
        } while (0)
 
 #define AIO_RING_MAGIC                 0xa10a10a1
index ba43f40..07954b0 100644 (file)
@@ -827,7 +827,6 @@ extern bool __blk_end_request_err(struct request *rq, int error);
 extern void blk_complete_request(struct request *);
 extern void __blk_complete_request(struct request *);
 extern void blk_abort_request(struct request *);
-extern void blk_abort_queue(struct request_queue *);
 extern void blk_unprep_request(struct request *);
 
 /*
index 324fe08..6d6795d 100644 (file)
@@ -91,6 +91,11 @@ extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
                                  unsigned long size,
                                  unsigned long align,
                                  unsigned long goal);
+void *___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
+                                 unsigned long size,
+                                 unsigned long align,
+                                 unsigned long goal,
+                                 unsigned long limit);
 extern void *__alloc_bootmem_low(unsigned long size,
                                 unsigned long align,
                                 unsigned long goal);
index 68d56ef..d10b7ed 100644 (file)
@@ -360,11 +360,11 @@ struct cpu_vfs_cap_data {
 
 #define CAP_WAKE_ALARM            35
 
-/* Allow preventing system suspends while epoll events are pending */
+/* Allow preventing system suspends */
 
-#define CAP_EPOLLWAKEUP      36
+#define CAP_BLOCK_SUSPEND    36
 
-#define CAP_LAST_CAP         CAP_EPOLLWAKEUP
+#define CAP_LAST_CAP         CAP_BLOCK_SUSPEND
 
 #define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP)
 
index 2521a95..44c87e7 100644 (file)
@@ -163,16 +163,8 @@ struct ceph_connection {
 
        /* connection negotiation temps */
        char in_banner[CEPH_BANNER_MAX_LEN];
-       union {
-               struct {  /* outgoing connection */
-                       struct ceph_msg_connect out_connect;
-                       struct ceph_msg_connect_reply in_reply;
-               };
-               struct {  /* incoming */
-                       struct ceph_msg_connect in_connect;
-                       struct ceph_msg_connect_reply out_reply;
-               };
-       };
+       struct ceph_msg_connect out_connect;
+       struct ceph_msg_connect_reply in_reply;
        struct ceph_entity_addr actual_peer_addr;
 
        /* message out temps */
index e5834aa..6a6d7ae 100644 (file)
@@ -47,9 +47,9 @@
  */
 #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
     !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
-# define inline                inline          __attribute__((always_inline))
-# define __inline__    __inline__      __attribute__((always_inline))
-# define __inline      __inline        __attribute__((always_inline))
+# define inline                inline          __attribute__((always_inline)) notrace
+# define __inline__    __inline__      __attribute__((always_inline)) notrace
+# define __inline      __inline        __attribute__((always_inline)) notrace
 #else
 /* A lot of inline functions can cause havoc with function tracing */
 # define inline                inline          notrace
index 161d962..6de9415 100644 (file)
@@ -865,8 +865,6 @@ extern int (*platform_notify_remove)(struct device *dev);
 extern struct device *get_device(struct device *dev);
 extern void put_device(struct device *dev);
 
-extern void wait_for_device_probe(void);
-
 #ifdef CONFIG_DEVTMPFS
 extern int devtmpfs_create_node(struct device *dev);
 extern int devtmpfs_delete_node(struct device *dev);
index 6f8be32..f4bb378 100644 (file)
@@ -34,7 +34,7 @@
  * re-allowed until epoll_wait is called again after consuming the wakeup
  * event(s).
  *
- * Requires CAP_EPOLLWAKEUP
+ * Requires CAP_BLOCK_SUSPEND
  */
 #define EPOLLWAKEUP (1 << 29)
 
index 176a939..af961d6 100644 (file)
@@ -65,7 +65,7 @@ struct trace_iterator {
        void                    *private;
        int                     cpu_file;
        struct mutex            mutex;
-       struct ring_buffer_iter *buffer_iter[NR_CPUS];
+       struct ring_buffer_iter **buffer_iter;
        unsigned long           iter_flags;
 
        /* trace_seq for __print_flags() and __print_symbolic() etc. */
@@ -207,6 +207,9 @@ struct ftrace_event_call {
         *   bit 1:             enabled
         *   bit 2:             filter_active
         *   bit 3:             enabled cmd record
+        *   bit 4:             allow trace by non root (cap any)
+        *   bit 5:             failed to apply filter
+        *   bit 6:             ftrace internal event (do not enable)
         *
         * Changes to flags must hold the event_mutex.
         *
index f07fc2d..2e31e8b 100644 (file)
@@ -22,8 +22,8 @@
 /* Gpio pin is open source */
 #define GPIOF_OPEN_SOURCE      (1 << 3)
 
-#define GPIOF_EXPORT           (1 << 2)
-#define GPIOF_EXPORT_CHANGEABLE        (1 << 3)
+#define GPIOF_EXPORT           (1 << 4)
+#define GPIOF_EXPORT_CHANGEABLE        (1 << 5)
 #define GPIOF_EXPORT_DIR_FIXED (GPIOF_EXPORT)
 #define GPIOF_EXPORT_DIR_CHANGEABLE (GPIOF_EXPORT | GPIOF_EXPORT_CHANGEABLE)
 
index fd0dc30..cc07d27 100644 (file)
@@ -165,6 +165,7 @@ enum  hrtimer_base_type {
  * @lock:              lock protecting the base and associated clock bases
  *                     and timers
  * @active_bases:      Bitfield to mark bases with active timers
+ * @clock_was_set:     Indicates that clock was set from irq context.
  * @expires_next:      absolute time of the next event which was scheduled
  *                     via clock_set_next_event()
  * @hres_active:       State of high resolution mode
@@ -177,7 +178,8 @@ enum  hrtimer_base_type {
  */
 struct hrtimer_cpu_base {
        raw_spinlock_t                  lock;
-       unsigned long                   active_bases;
+       unsigned int                    active_bases;
+       unsigned int                    clock_was_set;
 #ifdef CONFIG_HIGH_RES_TIMERS
        ktime_t                         expires_next;
        int                             hres_active;
@@ -286,6 +288,8 @@ extern void hrtimer_peek_ahead_timers(void);
 # define MONOTONIC_RES_NSEC    HIGH_RES_NSEC
 # define KTIME_MONOTONIC_RES   KTIME_HIGH_RES
 
+extern void clock_was_set_delayed(void);
+
 #else
 
 # define MONOTONIC_RES_NSEC    LOW_RES_NSEC
@@ -306,6 +310,9 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
 {
        return 0;
 }
+
+static inline void clock_was_set_delayed(void) { }
+
 #endif
 
 extern void clock_was_set(void);
@@ -320,6 +327,7 @@ extern ktime_t ktime_get(void);
 extern ktime_t ktime_get_real(void);
 extern ktime_t ktime_get_boottime(void);
 extern ktime_t ktime_get_monotonic_offset(void);
+extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot);
 
 DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
 
index 9e65eff..8a74761 100644 (file)
@@ -168,8 +168,8 @@ extern struct cred init_cred;
        .children       = LIST_HEAD_INIT(tsk.children),                 \
        .sibling        = LIST_HEAD_INIT(tsk.sibling),                  \
        .group_leader   = &tsk,                                         \
-       RCU_INIT_POINTER(.real_cred, &init_cred),                       \
-       RCU_INIT_POINTER(.cred, &init_cred),                            \
+       RCU_POINTER_INITIALIZER(real_cred, &init_cred),                 \
+       RCU_POINTER_INITIALIZER(cred, &init_cred),                      \
        .comm           = INIT_TASK_COMM,                               \
        .thread         = INIT_THREAD,                                  \
        .fs             = &init_fs,                                     \
index a816714..2740d08 100644 (file)
@@ -116,6 +116,7 @@ struct input_keymap_entry {
 
 /**
  * EVIOCGMTSLOTS(len) - get MT slot values
+ * @len: size of the data buffer in bytes
  *
  * The ioctl buffer argument should be binary equivalent to
  *
index e6ca56d..78e2ada 100644 (file)
@@ -308,6 +308,8 @@ enum {
 
 struct intel_iommu {
        void __iomem    *reg; /* Pointer to hardware regs, virtual addr */
+       u64             reg_phys; /* physical address of hw register set */
+       u64             reg_size; /* size of hw register set */
        u64             cap;
        u64             ecap;
        u32             gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
index 61f5cec..a5261e3 100644 (file)
@@ -301,8 +301,6 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
  * @irq_pm_shutdown:   function called from core code on shutdown once per chip
  * @irq_print_chip:    optional to print special chip info in show_interrupts
  * @flags:             chip specific flags
- *
- * @release:           release function solely used by UML
  */
 struct irq_chip {
        const char      *name;
index c513a40..0976fc4 100644 (file)
@@ -42,8 +42,7 @@
  * allowed.
  *
  * Not initializing the key (static data is initialized to 0s anyway) is the
- * same as using STATIC_KEY_INIT_FALSE and static_key_false() is
- * equivalent with static_branch().
+ * same as using STATIC_KEY_INIT_FALSE.
  *
 */
 
@@ -107,12 +106,6 @@ static __always_inline bool static_key_true(struct static_key *key)
        return !static_key_false(key);
 }
 
-/* Deprecated. Please use 'static_key_false() instead. */
-static __always_inline bool static_branch(struct static_key *key)
-{
-       return arch_static_branch(key);
-}
-
 extern struct jump_entry __start___jump_table[];
 extern struct jump_entry __stop___jump_table[];
 
@@ -166,14 +159,6 @@ static __always_inline bool static_key_true(struct static_key *key)
        return false;
 }
 
-/* Deprecated. Please use 'static_key_false() instead. */
-static __always_inline bool static_branch(struct static_key *key)
-{
-       if (unlikely(atomic_read(&key->enabled)) > 0)
-               return true;
-       return false;
-}
-
 static inline void static_key_slow_inc(struct static_key *key)
 {
        atomic_inc(&key->enabled);
index e07f5e0..6043821 100644 (file)
@@ -377,7 +377,6 @@ extern enum system_states {
        SYSTEM_HALT,
        SYSTEM_POWER_OFF,
        SYSTEM_RESTART,
-       SYSTEM_SUSPEND_DISK,
 } system_state;
 
 #define TAINT_PROPRIETARY_MODULE       0
index 4cd22ed..cef3b31 100644 (file)
@@ -303,7 +303,9 @@ static inline bool key_is_instantiated(const struct key *key)
                                   rwsem_is_locked(&((struct key *)(KEY))->sem)))
 
 #define rcu_assign_keypointer(KEY, PAYLOAD)                            \
-       (rcu_assign_pointer((KEY)->payload.rcudata, PAYLOAD))
+do {                                                                   \
+       rcu_assign_pointer((KEY)->payload.rcudata, (PAYLOAD));          \
+} while (0)
 
 #ifdef CONFIG_SYSCTL
 extern ctl_table key_sysctls[];
index 35f7237..2e7a1e0 100644 (file)
@@ -21,6 +21,7 @@
  * is passed to the kernel.
  */
 enum kmsg_dump_reason {
+       KMSG_DUMP_UNDEF,
        KMSG_DUMP_PANIC,
        KMSG_DUMP_OOPS,
        KMSG_DUMP_EMERG,
@@ -31,23 +32,42 @@ enum kmsg_dump_reason {
 
 /**
  * struct kmsg_dumper - kernel crash message dumper structure
- * @dump:      The callback which gets called on crashes. The buffer is passed
- *             as two sections, where s1 (length l1) contains the older
- *             messages and s2 (length l2) contains the newer.
  * @list:      Entry in the dumper list (private)
+ * @dump:      Call into dumping code which will retrieve the data with
+ *             through the record iterator
+ * @max_reason:        filter for highest reason number that should be dumped
  * @registered:        Flag that specifies if this is already registered
  */
 struct kmsg_dumper {
-       void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason,
-                       const char *s1, unsigned long l1,
-                       const char *s2, unsigned long l2);
        struct list_head list;
-       int registered;
+       void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason);
+       enum kmsg_dump_reason max_reason;
+       bool active;
+       bool registered;
+
+       /* private state of the kmsg iterator */
+       u32 cur_idx;
+       u32 next_idx;
+       u64 cur_seq;
+       u64 next_seq;
 };
 
 #ifdef CONFIG_PRINTK
 void kmsg_dump(enum kmsg_dump_reason reason);
 
+bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
+                              char *line, size_t size, size_t *len);
+
+bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
+                       char *line, size_t size, size_t *len);
+
+bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+                         char *buf, size_t size, size_t *len);
+
+void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper);
+
+void kmsg_dump_rewind(struct kmsg_dumper *dumper);
+
 int kmsg_dump_register(struct kmsg_dumper *dumper);
 
 int kmsg_dump_unregister(struct kmsg_dumper *dumper);
@@ -56,6 +76,33 @@ static inline void kmsg_dump(enum kmsg_dump_reason reason)
 {
 }
 
+static inline bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper,
+                                            bool syslog, const char *line,
+                                            size_t size, size_t *len)
+{
+       return false;
+}
+
+static inline bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
+                               const char *line, size_t size, size_t *len)
+{
+       return false;
+}
+
+static inline bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+                                       char *buf, size_t size, size_t *len)
+{
+       return false;
+}
+
+static inline void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
+{
+}
+
+static inline void kmsg_dump_rewind(struct kmsg_dumper *dumper)
+{
+}
+
 static inline int kmsg_dump_register(struct kmsg_dumper *dumper)
 {
        return -EINVAL;
index c446435..96c158a 100644 (file)
@@ -815,7 +815,7 @@ static inline void kvm_free_irq_routing(struct kvm *kvm) {}
 #ifdef CONFIG_HAVE_KVM_EVENTFD
 
 void kvm_eventfd_init(struct kvm *kvm);
-int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags);
+int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
 void kvm_irqfd_release(struct kvm *kvm);
 void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
@@ -824,7 +824,7 @@ int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
 
 static inline void kvm_eventfd_init(struct kvm *kvm) {}
 
-static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
+static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
 {
        return -EINVAL;
 }
index a6bb102..19dc455 100644 (file)
@@ -50,9 +50,7 @@ phys_addr_t memblock_find_in_range_node(phys_addr_t start, phys_addr_t end,
                                phys_addr_t size, phys_addr_t align, int nid);
 phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
                                   phys_addr_t size, phys_addr_t align);
-int memblock_free_reserved_regions(void);
-int memblock_reserve_reserved_regions(void);
-
+phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
 void memblock_allow_resize(void);
 int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
 int memblock_add(phys_addr_t base, phys_addr_t size);
index dad95bd..704a626 100644 (file)
@@ -57,8 +57,18 @@ struct page {
                };
 
                union {
+#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
+       defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
                        /* Used for cmpxchg_double in slub */
                        unsigned long counters;
+#else
+                       /*
+                        * Keep _count separate from slub cmpxchg_double data.
+                        * As the rest of the double word is protected by
+                        * slab_lock but _count is not.
+                        */
+                       unsigned counters;
+#endif
 
                        struct {
 
index 5cdc96d..e78c0e2 100644 (file)
@@ -4,7 +4,7 @@
  * SDHCI declarations specific to ST SPEAr platform
  *
  * Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index c9fe66c..17446d3 100644 (file)
@@ -98,7 +98,9 @@
 
 #define SDIO_CCCR_IF           0x07    /* bus interface controls */
 
+#define  SDIO_BUS_WIDTH_MASK   0x03    /* data bus width setting */
 #define  SDIO_BUS_WIDTH_1BIT   0x00
+#define  SDIO_BUS_WIDTH_RESERVED 0x01
 #define  SDIO_BUS_WIDTH_4BIT   0x02
 #define  SDIO_BUS_ECSI         0x20    /* Enable continuous SPI interrupt */
 #define  SDIO_BUS_SCSI         0x40    /* Support continuous SPI interrupt */
index 2427706..68c569f 100644 (file)
@@ -694,7 +694,7 @@ typedef struct pglist_data {
                                             range, including holes */
        int node_id;
        wait_queue_head_t kswapd_wait;
-       struct task_struct *kswapd;
+       struct task_struct *kswapd;     /* Protected by lock_memory_hotplug() */
        int kswapd_max_order;
        enum zone_type classzone_idx;
 } pg_data_t;
index fbb78fb..f58325a 100644 (file)
@@ -25,6 +25,7 @@ struct nfs41_impl_id;
  */
 struct nfs_client {
        atomic_t                cl_count;
+       atomic_t                cl_mds_count;
        int                     cl_cons_state;  /* current construction state (-ve: init error) */
 #define NFS_CS_READY           0               /* ready to be used */
 #define NFS_CS_INITING         1               /* busy initialising */
index a6ee9aa..a7b4fc3 100644 (file)
@@ -4,7 +4,7 @@
  * Arasan Compact Flash host controller platform data header file
  *
  * Copyright (C) 2011 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index fefb4e1..d8c379d 100644 (file)
@@ -176,8 +176,6 @@ enum pci_dev_flags {
        PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
        /* Provide indication device is assigned by a Virtual Machine Manager */
        PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4,
-       /* Device causes system crash if in D3 during S3 sleep */
-       PCI_DEV_FLAGS_NO_D3_DURING_SLEEP = (__force pci_dev_flags_t) 8,
 };
 
 enum pci_irq_reroute_variant {
index ab741b0..5f18702 100644 (file)
 #define PCI_DEVICE_ID_INTEL_IOAT_SNB7  0x3c27
 #define PCI_DEVICE_ID_INTEL_IOAT_SNB8  0x3c2e
 #define PCI_DEVICE_ID_INTEL_IOAT_SNB9  0x3c2f
+#define PCI_DEVICE_ID_INTEL_UNC_HA     0x3c46
+#define PCI_DEVICE_ID_INTEL_UNC_IMC0   0x3cb0
+#define PCI_DEVICE_ID_INTEL_UNC_IMC1   0x3cb1
+#define PCI_DEVICE_ID_INTEL_UNC_IMC2   0x3cb4
+#define PCI_DEVICE_ID_INTEL_UNC_IMC3   0x3cb5
+#define PCI_DEVICE_ID_INTEL_UNC_QPI0   0x3c41
+#define PCI_DEVICE_ID_INTEL_UNC_QPI1   0x3c42
+#define PCI_DEVICE_ID_INTEL_UNC_R2PCIE 0x3c43
+#define PCI_DEVICE_ID_INTEL_UNC_R3QPI0 0x3c44
+#define PCI_DEVICE_ID_INTEL_UNC_R3QPI1 0x3c45
+#define PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX      0x3ce0
 #define PCI_DEVICE_ID_INTEL_IOAT_SNB   0x402f
 #define PCI_DEVICE_ID_INTEL_5100_16    0x65f0
 #define PCI_DEVICE_ID_INTEL_5100_21    0x65f5
index 45db49f..76c5c8b 100644 (file)
@@ -677,6 +677,7 @@ struct hw_perf_event {
                        u64             last_tag;
                        unsigned long   config_base;
                        unsigned long   event_base;
+                       int             event_base_rdpmc;
                        int             idx;
                        int             last_cpu;
 
@@ -1106,6 +1107,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
                                struct task_struct *task,
                                perf_overflow_handler_t callback,
                                void *context);
+extern void perf_pmu_migrate_context(struct pmu *pmu,
+                               int src_cpu, int dst_cpu);
 extern u64 perf_event_read_value(struct perf_event *event,
                                 u64 *enabled, u64 *running);
 
index 3988012..289760f 100644 (file)
  * Changing LSM security domain is considered a new privilege.  So, for example,
  * asking selinux for a specific new context (e.g. with runcon) will result
  * in execve returning -EPERM.
+ *
+ * See Documentation/prctl/no_new_privs.txt for more details.
  */
 #define PR_SET_NO_NEW_PRIVS    38
 #define PR_GET_NO_NEW_PRIVS    39
index 7ed7fd4..3b823d4 100644 (file)
@@ -69,12 +69,14 @@ struct persistent_ram_zone * __init persistent_ram_new(phys_addr_t start,
                                                       size_t size,
                                                       bool ecc);
 void persistent_ram_free(struct persistent_ram_zone *prz);
+void persistent_ram_zap(struct persistent_ram_zone *prz);
 struct persistent_ram_zone *persistent_ram_init_ringbuffer(struct device *dev,
                bool ecc);
 
 int persistent_ram_write(struct persistent_ram_zone *prz, const void *s,
        unsigned int count);
 
+void persistent_ram_save_old(struct persistent_ram_zone *prz);
 size_t persistent_ram_old_size(struct persistent_ram_zone *prz);
 void *persistent_ram_old(struct persistent_ram_zone *prz);
 void persistent_ram_free_old(struct persistent_ram_zone *prz);
index 44835fb..f366320 100644 (file)
@@ -160,7 +160,9 @@ enum pxa_ssp_type {
        PXA25x_SSP,  /* pxa 210, 250, 255, 26x */
        PXA25x_NSSP, /* pxa 255, 26x (including ASSP) */
        PXA27x_SSP,
+       PXA3xx_SSP,
        PXA168_SSP,
+       PXA910_SSP,
        CE4100_SSP,
 };
 
index 26d1a47..115ead2 100644 (file)
@@ -147,6 +147,7 @@ extern void synchronize_sched(void);
 
 extern void __rcu_read_lock(void);
 extern void __rcu_read_unlock(void);
+extern void rcu_read_unlock_special(struct task_struct *t);
 void synchronize_rcu(void);
 
 /*
@@ -184,7 +185,6 @@ static inline int rcu_preempt_depth(void)
 /* Internal to kernel */
 extern void rcu_sched_qs(int cpu);
 extern void rcu_bh_qs(int cpu);
-extern void rcu_preempt_note_context_switch(void);
 extern void rcu_check_callbacks(int cpu, int user);
 struct notifier_block;
 extern void rcu_idle_enter(void);
@@ -256,6 +256,10 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
 }
 #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
 
+#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP)
+extern int rcu_is_cpu_idle(void);
+#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) */
+
 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
 bool rcu_lockdep_current_cpu_online(void);
 #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
@@ -267,15 +271,6 @@ static inline bool rcu_lockdep_current_cpu_online(void)
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 
-#ifdef CONFIG_PROVE_RCU
-extern int rcu_is_cpu_idle(void);
-#else /* !CONFIG_PROVE_RCU */
-static inline int rcu_is_cpu_idle(void)
-{
-       return 0;
-}
-#endif /* else !CONFIG_PROVE_RCU */
-
 static inline void rcu_lock_acquire(struct lockdep_map *map)
 {
        lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_);
@@ -432,8 +427,7 @@ extern int rcu_my_thread_group_empty(void);
 static inline void rcu_preempt_sleep_check(void)
 {
        rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
-                          "Illegal context switch in RCU read-side "
-                          "critical section");
+                          "Illegal context switch in RCU read-side critical section");
 }
 #else /* #ifdef CONFIG_PROVE_RCU */
 static inline void rcu_preempt_sleep_check(void)
@@ -514,10 +508,10 @@ static inline void rcu_preempt_sleep_check(void)
                (_________p1); \
        })
 #define __rcu_assign_pointer(p, v, space) \
-       ({ \
+       do { \
                smp_wmb(); \
                (p) = (typeof(*v) __force space *)(v); \
-       })
+       } while (0)
 
 
 /**
@@ -852,7 +846,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
  *
  * Assigns the specified value to the specified RCU-protected
  * pointer, ensuring that any concurrent RCU readers will see
- * any prior initialization.  Returns the value assigned.
+ * any prior initialization.
  *
  * Inserts memory barriers on architectures that require them
  * (which is most of them), and also prevents the compiler from
@@ -904,25 +898,17 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
  * the reader-accessible portions of the linked structure.
  */
 #define RCU_INIT_POINTER(p, v) \
-               p = (typeof(*v) __force __rcu *)(v)
-
-static __always_inline bool __is_kfree_rcu_offset(unsigned long offset)
-{
-       return offset < 4096;
-}
-
-static __always_inline
-void __kfree_rcu(struct rcu_head *head, unsigned long offset)
-{
-       typedef void (*rcu_callback)(struct rcu_head *);
-
-       BUILD_BUG_ON(!__builtin_constant_p(offset));
-
-       /* See the kfree_rcu() header comment. */
-       BUILD_BUG_ON(!__is_kfree_rcu_offset(offset));
+       do { \
+               p = (typeof(*v) __force __rcu *)(v); \
+       } while (0)
 
-       kfree_call_rcu(head, (rcu_callback)offset);
-}
+/**
+ * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
+ *
+ * GCC-style initialization for an RCU-protected pointer in a structure field.
+ */
+#define RCU_POINTER_INITIALIZER(p, v) \
+               .p = (typeof(*v) __force __rcu *)(v)
 
 /*
  * Does the specified offset indicate that the corresponding rcu_head
@@ -936,7 +922,7 @@ void __kfree_rcu(struct rcu_head *head, unsigned long offset)
 #define __kfree_rcu(head, offset) \
        do { \
                BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
-               call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \
+               kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \
        } while (0)
 
 /**
index 854dc4c..4e56a9c 100644 (file)
@@ -87,6 +87,10 @@ static inline void kfree_call_rcu(struct rcu_head *head,
 
 #ifdef CONFIG_TINY_RCU
 
+static inline void rcu_preempt_note_context_switch(void)
+{
+}
+
 static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
 {
        *delta_jiffies = ULONG_MAX;
@@ -95,6 +99,7 @@ static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
 
 #else /* #ifdef CONFIG_TINY_RCU */
 
+void rcu_preempt_note_context_switch(void);
 int rcu_preempt_needs_cpu(void);
 
 static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
@@ -108,6 +113,7 @@ static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
 static inline void rcu_note_context_switch(int cpu)
 {
        rcu_sched_qs(cpu);
+       rcu_preempt_note_context_switch();
 }
 
 /*
index a8e50e4..82a6739 100644 (file)
@@ -38,6 +38,8 @@
 #include <linux/types.h>
 #include <linux/device.h>
 #include <linux/mod_devicetable.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
 
 /* The feature bitmap for virtio rpmsg */
 #define VIRTIO_RPMSG_F_NS      0 /* RP supports name service notifications */
@@ -120,7 +122,9 @@ typedef void (*rpmsg_rx_cb_t)(struct rpmsg_channel *, void *, int, void *, u32);
 /**
  * struct rpmsg_endpoint - binds a local rpmsg address to its user
  * @rpdev: rpmsg channel device
+ * @refcount: when this drops to zero, the ept is deallocated
  * @cb: rx callback handler
+ * @cb_lock: must be taken before accessing/changing @cb
  * @addr: local rpmsg address
  * @priv: private data for the driver's use
  *
@@ -140,7 +144,9 @@ typedef void (*rpmsg_rx_cb_t)(struct rpmsg_channel *, void *, int, void *, u32);
  */
 struct rpmsg_endpoint {
        struct rpmsg_channel *rpdev;
+       struct kref refcount;
        rpmsg_rx_cb_t cb;
+       struct mutex cb_lock;
        u32 addr;
        void *priv;
 };
index 4059c0f..64d9df5 100644 (file)
@@ -1581,7 +1581,6 @@ struct task_struct {
 #endif
 #ifdef CONFIG_UPROBES
        struct uprobe_task *utask;
-       int uprobe_srcu_id;
 #endif
 };
 
@@ -1871,22 +1870,12 @@ static inline void rcu_copy_process(struct task_struct *p)
        INIT_LIST_HEAD(&p->rcu_node_entry);
 }
 
-static inline void rcu_switch_from(struct task_struct *prev)
-{
-       if (prev->rcu_read_lock_nesting != 0)
-               rcu_preempt_note_context_switch();
-}
-
 #else
 
 static inline void rcu_copy_process(struct task_struct *p)
 {
 }
 
-static inline void rcu_switch_from(struct task_struct *prev)
-{
-}
-
 #endif
 
 #ifdef CONFIG_SMP
@@ -1909,6 +1898,14 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
 }
 #endif
 
+#ifdef CONFIG_NO_HZ
+void calc_load_enter_idle(void);
+void calc_load_exit_idle(void);
+#else
+static inline void calc_load_enter_idle(void) { }
+static inline void calc_load_exit_idle(void) { }
+#endif /* CONFIG_NO_HZ */
+
 #ifndef CONFIG_CPUMASK_OFFSTACK
 static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
 {
index b534a1b..642cb73 100644 (file)
@@ -225,14 +225,11 @@ enum {
        /* device driver is going to provide hardware time stamp */
        SKBTX_IN_PROGRESS = 1 << 2,
 
-       /* ensure the originating sk reference is available on driver level */
-       SKBTX_DRV_NEEDS_SK_REF = 1 << 3,
-
        /* device driver supports TX zero-copy buffers */
-       SKBTX_DEV_ZEROCOPY = 1 << 4,
+       SKBTX_DEV_ZEROCOPY = 1 << 3,
 
        /* generate wifi status information (where possible) */
-       SKBTX_WIFI_STATUS = 1 << 5,
+       SKBTX_WIFI_STATUS = 1 << 4,
 };
 
 /*
index 717fb74..dd6f06b 100644 (file)
@@ -90,10 +90,6 @@ void kick_all_cpus_sync(void);
 void __init call_function_init(void);
 void generic_smp_call_function_single_interrupt(void);
 void generic_smp_call_function_interrupt(void);
-void ipi_call_lock(void);
-void ipi_call_unlock(void);
-void ipi_call_lock_irq(void);
-void ipi_call_unlock_irq(void);
 #else
 static inline void call_function_init(void) { }
 #endif
@@ -181,7 +177,6 @@ static inline int up_smp_call_function(smp_call_func_t func, void *info)
        } while (0)
 
 static inline void smp_send_reschedule(int cpu) { }
-#define num_booting_cpus()                     1
 #define smp_prepare_boot_cpu()                 do {} while (0)
 #define smp_call_function_many(mask, func, info, wait) \
                        (up_smp_call_function(func, info))
index d3e1075..c73d144 100644 (file)
@@ -43,7 +43,7 @@ struct pxa2xx_spi_chip {
        void (*cs_control)(u32 command);
 };
 
-#ifdef CONFIG_ARCH_PXA
+#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
 
 #include <linux/clk.h>
 #include <mach/dma.h>
index 26e5b61..09a545a 100644 (file)
@@ -51,7 +51,8 @@ struct partial_page {
 struct splice_pipe_desc {
        struct page **pages;            /* page map */
        struct partial_page *partial;   /* pages[] may not be contig */
-       int nr_pages;                   /* number of pages in map */
+       int nr_pages;                   /* number of populated pages in map */
+       unsigned int nr_pages_max;      /* pages[] & partial[] arrays size */
        unsigned int flags;             /* splice flags */
        const struct pipe_buf_operations *ops;/* ops associated with output pipe */
        void (*spd_release)(struct splice_pipe_desc *, unsigned int);
@@ -85,9 +86,8 @@ extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
 /*
  * for dynamic pipe sizing
  */
-extern int splice_grow_spd(struct pipe_inode_info *, struct splice_pipe_desc *);
-extern void splice_shrink_spd(struct pipe_inode_info *,
-                               struct splice_pipe_desc *);
+extern int splice_grow_spd(const struct pipe_inode_info *, struct splice_pipe_desc *);
+extern void splice_shrink_spd(struct splice_pipe_desc *);
 extern void spd_release_page(struct splice_pipe_desc *, unsigned int);
 
 extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
index ab8be90..f37fceb 100644 (file)
@@ -31,10 +31,10 @@ enum tick_nohz_mode {
  * struct tick_sched - sched tick emulation and no idle tick control/stats
  * @sched_timer:       hrtimer to schedule the periodic tick in high
  *                     resolution mode
- * @idle_tick:         Store the last idle tick expiry time when the tick
- *                     timer is modified for idle sleeps. This is necessary
+ * @last_tick:         Store the last tick expiry time when the tick
+ *                     timer is modified for nohz sleeps. This is necessary
  *                     to resume the tick timer operation in the timeline
- *                     when the CPU returns from idle
+ *                     when the CPU returns from nohz sleep.
  * @tick_stopped:      Indicator that the idle tick has been stopped
  * @idle_jiffies:      jiffies at the entry to idle for idle time accounting
  * @idle_calls:                Total number of idle calls
@@ -51,7 +51,7 @@ struct tick_sched {
        struct hrtimer                  sched_timer;
        unsigned long                   check_clocks;
        enum tick_nohz_mode             nohz_mode;
-       ktime_t                         idle_tick;
+       ktime_t                         last_tick;
        int                             inidle;
        int                             tick_stopped;
        unsigned long                   idle_jiffies;
index bd96ecd..802de56 100644 (file)
@@ -153,7 +153,7 @@ static inline void tracepoint_synchronize_unregister(void)
        }                                                               \
        static inline void trace_##name##_rcuidle(proto)                \
        {                                                               \
-               if (static_branch(&__tracepoint_##name.key))            \
+               if (static_key_false(&__tracepoint_##name.key))         \
                        __DO_TRACE(&__tracepoint_##name,                \
                                TP_PROTO(data_proto),                   \
                                TP_ARGS(data_args),                     \
index 60da41f..ddb419c 100644 (file)
@@ -7,8 +7,13 @@
  * vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs
  */
 
+#ifndef _LINUX_VGA_SWITCHEROO_H_
+#define _LINUX_VGA_SWITCHEROO_H_
+
 #include <linux/fb.h>
 
+struct pci_dev;
+
 enum vga_switcheroo_state {
        VGA_SWITCHEROO_OFF,
        VGA_SWITCHEROO_ON,
@@ -71,3 +76,4 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
 
 
 #endif
+#endif /* _LINUX_VGA_SWITCHEROO_H_ */
index 370d111..2039c5d 100644 (file)
@@ -2640,9 +2640,9 @@ struct v4l2_create_buffers {
 
 /* Experimental, these three ioctls may change over the next couple of kernel
    versions. */
-#define VIDIOC_ENUM_DV_TIMINGS  _IOWR('V', 96, struct v4l2_enum_dv_timings)
-#define VIDIOC_QUERY_DV_TIMINGS  _IOR('V', 97, struct v4l2_dv_timings)
-#define VIDIOC_DV_TIMINGS_CAP   _IOWR('V', 98, struct v4l2_dv_timings_cap)
+#define VIDIOC_ENUM_DV_TIMINGS  _IOWR('V', 98, struct v4l2_enum_dv_timings)
+#define VIDIOC_QUERY_DV_TIMINGS  _IOR('V', 99, struct v4l2_dv_timings)
+#define VIDIOC_DV_TIMINGS_CAP   _IOWR('V', 100, struct v4l2_dv_timings_cap)
 
 /* Reminder: when adding new ioctls please add support for them to
    drivers/media/video/v4l2-compat-ioctl32.c as well! */
index 66a7b57..3def64b 100644 (file)
@@ -1144,6 +1144,12 @@ struct extended_inquiry_info {
        __u8     data[240];
 } __packed;
 
+#define HCI_EV_KEY_REFRESH_COMPLETE    0x30
+struct hci_ev_key_refresh_complete {
+       __u8    status;
+       __le16  handle;
+} __packed;
+
 #define HCI_EV_IO_CAPA_REQUEST         0x31
 struct hci_ev_io_capa_request {
        bdaddr_t bdaddr;
index d6146b4..95374d1 100644 (file)
@@ -1425,7 +1425,7 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
        struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 
        if (!ct || !nf_ct_is_untracked(ct)) {
-               nf_reset(skb);
+               nf_conntrack_put(skb->nfct);
                skb->nfct = &nf_ct_untracked_get()->ct_general;
                skb->nfctinfo = IP_CT_NEW;
                nf_conntrack_get(skb->nfct);
index 1937c7d..95e39b6 100644 (file)
@@ -1940,6 +1940,11 @@ enum ieee80211_rate_control_changed {
  *     to also unregister the device. If it returns 1, then mac80211
  *     will also go through the regular complete restart on resume.
  *
+ * @set_wakeup: Enable or disable wakeup when WoWLAN configuration is
+ *     modified. The reason is that device_set_wakeup_enable() is
+ *     supposed to be called when the configuration changes, not only
+ *     in suspend().
+ *
  * @add_interface: Called when a netdevice attached to the hardware is
  *     enabled. Because it is not called for monitor mode devices, @start
  *     and @stop must be implemented.
@@ -2966,6 +2971,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
  * ieee80211_generic_frame_duration - Calculate the duration field for a frame
  * @hw: pointer obtained from ieee80211_alloc_hw().
  * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @band: the band to calculate the frame duration on
  * @frame_len: the length of the frame.
  * @rate: the rate at which the frame is going to be transmitted.
  *
index a88fb69..e1ce104 100644 (file)
@@ -78,7 +78,7 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
        struct net *net = nf_ct_net(ct);
        struct nf_conntrack_ecache *e;
 
-       if (net->ct.nf_conntrack_event_cb == NULL)
+       if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
                return;
 
        e = nf_ct_ecache_find(ct);
index 928daf5..bcd525e 100644 (file)
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Author: Rémi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Author: Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index e4652fe..fecdf31 100644 (file)
@@ -912,6 +912,9 @@ struct sctp_transport {
                /* Is this structure kfree()able? */
                malloced:1;
 
+       /* Has this transport moved the ctsn since we last sacked */
+       __u32 sack_generation;
+
        struct flowi fl;
 
        /* This is the peer's IP address and port. */
@@ -1584,6 +1587,7 @@ struct sctp_association {
                 */
                __u8    sack_needed;     /* Do we need to sack the peer? */
                __u32   sack_cnt;
+               __u32   sack_generation;
 
                /* These are capabilities which our peer advertised.  */
                __u8    ecn_capable:1,      /* Can peer do ECN? */
index e7728bc..2c5d2b4 100644 (file)
@@ -117,7 +117,8 @@ void sctp_tsnmap_free(struct sctp_tsnmap *map);
 int sctp_tsnmap_check(const struct sctp_tsnmap *, __u32 tsn);
 
 /* Mark this TSN as seen.  */
-int sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn);
+int sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn,
+                    struct sctp_transport *trans);
 
 /* Mark this TSN and all lower as seen. */
 void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn);
index f4f1c96..10ce74f 100644 (file)
@@ -163,6 +163,8 @@ enum ata_command_set {
         ATAPI_COMMAND_SET = 1,
 };
 
+#define ATA_RESP_FIS_SIZE 24
+
 struct sata_device {
         enum   ata_command_set command_set;
         struct smp_resp        rps_resp; /* report_phy_sata_resp */
@@ -171,7 +173,7 @@ struct sata_device {
 
        struct ata_port *ap;
        struct ata_host ata_host;
-       struct ata_taskfile tf;
+       u8     fis[ATA_RESP_FIS_SIZE];
 };
 
 enum {
@@ -537,7 +539,7 @@ enum exec_status {
  */
 struct ata_task_resp {
        u16  frame_len;
-       u8   ending_fis[24];      /* dev to host or data-in */
+       u8   ending_fis[ATA_RESP_FIS_SIZE];       /* dev to host or data-in */
 };
 
 #define SAS_STATUS_BUF_SIZE 96
index 1e11985..ac06cc5 100644 (file)
@@ -134,10 +134,16 @@ struct scsi_cmnd {
 
 static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
 {
+       struct scsi_driver **sdp;
+
        if (!cmd->request->rq_disk)
                return NULL;
 
-       return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
+       sdp = (struct scsi_driver **)cmd->request->rq_disk->private_data;
+       if (!sdp)
+               return NULL;
+
+       return *sdp;
 }
 
 extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
index 6efb2e1..ba96988 100644 (file)
@@ -151,6 +151,7 @@ struct scsi_device {
                                           SD_LAST_BUGGY_SECTORS */
        unsigned no_read_disc_info:1;   /* Avoid READ_DISC_INFO cmds */
        unsigned no_read_capacity_16:1; /* Avoid READ_CAPACITY_16 cmds */
+       unsigned try_rc_10_first:1;     /* Try READ_CAPACACITY_10 first */
        unsigned is_visible:1;  /* is the device visible in sysfs */
 
        DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
index ec3f910..0c3c2fb 100644 (file)
@@ -44,6 +44,7 @@ struct snd_tea575x_ops {
 
 struct snd_tea575x {
        struct v4l2_device *v4l2_dev;
+       struct v4l2_file_operations fops;
        struct video_device vd;         /* video device */
        int radio_nr;                   /* radio_nr */
        bool tea5759;                   /* 5759 chip is present */
@@ -62,7 +63,7 @@ struct snd_tea575x {
        int (*ext_init)(struct snd_tea575x *tea);
 };
 
-int snd_tea575x_init(struct snd_tea575x *tea);
+int snd_tea575x_init(struct snd_tea575x *tea, struct module *owner);
 void snd_tea575x_exit(struct snd_tea575x *tea);
 
 #endif /* __SOUND_TEA575X_TUNER_H */
index d274734..5bde94d 100644 (file)
@@ -541,6 +541,50 @@ TRACE_EVENT(rcu_torture_read,
                  __entry->rcutorturename, __entry->rhp)
 );
 
+/*
+ * Tracepoint for _rcu_barrier() execution.  The string "s" describes
+ * the _rcu_barrier phase:
+ *     "Begin": rcu_barrier_callback() started.
+ *     "Check": rcu_barrier_callback() checking for piggybacking.
+ *     "EarlyExit": rcu_barrier_callback() piggybacked, thus early exit.
+ *     "Inc1": rcu_barrier_callback() piggyback check counter incremented.
+ *     "Offline": rcu_barrier_callback() found offline CPU
+ *     "OnlineQ": rcu_barrier_callback() found online CPU with callbacks.
+ *     "OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks.
+ *     "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
+ *     "CB": An rcu_barrier_callback() invoked a callback, not the last.
+ *     "LastCB": An rcu_barrier_callback() invoked the last callback.
+ *     "Inc2": rcu_barrier_callback() piggyback check counter incremented.
+ * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
+ * is the count of remaining callbacks, and "done" is the piggybacking count.
+ */
+TRACE_EVENT(rcu_barrier,
+
+       TP_PROTO(char *rcuname, char *s, int cpu, int cnt, unsigned long done),
+
+       TP_ARGS(rcuname, s, cpu, cnt, done),
+
+       TP_STRUCT__entry(
+               __field(char *, rcuname)
+               __field(char *, s)
+               __field(int, cpu)
+               __field(int, cnt)
+               __field(unsigned long, done)
+       ),
+
+       TP_fast_assign(
+               __entry->rcuname = rcuname;
+               __entry->s = s;
+               __entry->cpu = cpu;
+               __entry->cnt = cnt;
+               __entry->done = done;
+       ),
+
+       TP_printk("%s %s cpu %d remaining %d # %lu",
+                 __entry->rcuname, __entry->s, __entry->cpu, __entry->cnt,
+                 __entry->done)
+);
+
 #else /* #ifdef CONFIG_RCU_TRACE */
 
 #define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
@@ -564,6 +608,7 @@ TRACE_EVENT(rcu_torture_read,
 #define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
        do { } while (0)
 #define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
+#define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
 
 #endif /* #else #ifdef CONFIG_RCU_TRACE */
 
index 7697249..c6bc2fa 100644 (file)
@@ -571,6 +571,7 @@ static inline void ftrace_test_probe_##call(void)                   \
 
 #undef __print_flags
 #undef __print_symbolic
+#undef __print_hex
 #undef __get_dynamic_array
 #undef __get_str
 
index 72fcd30..b303dfc 100644 (file)
@@ -255,12 +255,17 @@ int cgroup_lock_is_held(void)
 
 EXPORT_SYMBOL_GPL(cgroup_lock_is_held);
 
+static int css_unbias_refcnt(int refcnt)
+{
+       return refcnt >= 0 ? refcnt : refcnt - CSS_DEACT_BIAS;
+}
+
 /* the current nr of refs, always >= 0 whether @css is deactivated or not */
 static int css_refcnt(struct cgroup_subsys_state *css)
 {
        int v = atomic_read(&css->refcnt);
 
-       return v >= 0 ? v : v - CSS_DEACT_BIAS;
+       return css_unbias_refcnt(v);
 }
 
 /* convenient tests for these bits */
@@ -896,13 +901,10 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
                mutex_unlock(&cgroup_mutex);
 
                /*
-                * We want to drop the active superblock reference from the
-                * cgroup creation after all the dentry refs are gone -
-                * kill_sb gets mighty unhappy otherwise.  Mark
-                * dentry->d_fsdata with cgroup_diput() to tell
-                * cgroup_d_release() to call deactivate_super().
+                * Drop the active superblock reference that we took when we
+                * created the cgroup
                 */
-               dentry->d_fsdata = cgroup_diput;
+               deactivate_super(cgrp->root->sb);
 
                /*
                 * if we're getting rid of the cgroup, refcount should ensure
@@ -928,13 +930,6 @@ static int cgroup_delete(const struct dentry *d)
        return 1;
 }
 
-static void cgroup_d_release(struct dentry *dentry)
-{
-       /* did cgroup_diput() tell me to deactivate super? */
-       if (dentry->d_fsdata == cgroup_diput)
-               deactivate_super(dentry->d_sb);
-}
-
 static void remove_dir(struct dentry *d)
 {
        struct dentry *parent = dget(d->d_parent);
@@ -1542,7 +1537,6 @@ static int cgroup_get_rootdir(struct super_block *sb)
        static const struct dentry_operations cgroup_dops = {
                .d_iput = cgroup_diput,
                .d_delete = cgroup_delete,
-               .d_release = cgroup_d_release,
        };
 
        struct inode *inode =
@@ -3889,8 +3883,12 @@ static void css_dput_fn(struct work_struct *work)
 {
        struct cgroup_subsys_state *css =
                container_of(work, struct cgroup_subsys_state, dput_work);
+       struct dentry *dentry = css->cgroup->dentry;
+       struct super_block *sb = dentry->d_sb;
 
-       dput(css->cgroup->dentry);
+       atomic_inc(&sb->s_active);
+       dput(dentry);
+       deactivate_super(sb);
 }
 
 static void init_cgroup_css(struct cgroup_subsys_state *css,
@@ -4982,10 +4980,12 @@ EXPORT_SYMBOL_GPL(__css_tryget);
 void __css_put(struct cgroup_subsys_state *css)
 {
        struct cgroup *cgrp = css->cgroup;
+       int v;
 
        rcu_read_lock();
-       atomic_dec(&css->refcnt);
-       switch (css_refcnt(css)) {
+       v = css_unbias_refcnt(atomic_dec_return(&css->refcnt));
+
+       switch (v) {
        case 1:
                if (notify_on_release(cgrp)) {
                        set_bit(CGRP_RELEASABLE, &cgrp->flags);
index 67b847d..1f91413 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/ctype.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
+#include <linux/kmsg_dump.h>
 #include <linux/reboot.h>
 #include <linux/sched.h>
 #include <linux/sysrq.h>
@@ -2040,8 +2041,15 @@ static int kdb_env(int argc, const char **argv)
  */
 static int kdb_dmesg(int argc, const char **argv)
 {
-       char *syslog_data[4], *start, *end, c = '\0', *p;
-       int diag, logging, logsize, lines = 0, adjust = 0, n;
+       int diag;
+       int logging;
+       int lines = 0;
+       int adjust = 0;
+       int n = 0;
+       int skip = 0;
+       struct kmsg_dumper dumper = { .active = 1 };
+       size_t len;
+       char buf[201];
 
        if (argc > 2)
                return KDB_ARGCOUNT;
@@ -2064,22 +2072,10 @@ static int kdb_dmesg(int argc, const char **argv)
                kdb_set(2, setargs);
        }
 
-       /* syslog_data[0,1] physical start, end+1.  syslog_data[2,3]
-        * logical start, end+1. */
-       kdb_syslog_data(syslog_data);
-       if (syslog_data[2] == syslog_data[3])
-               return 0;
-       logsize = syslog_data[1] - syslog_data[0];
-       start = syslog_data[2];
-       end = syslog_data[3];
-#define KDB_WRAP(p) (((p - syslog_data[0]) % logsize) + syslog_data[0])
-       for (n = 0, p = start; p < end; ++p) {
-               c = *KDB_WRAP(p);
-               if (c == '\n')
-                       ++n;
-       }
-       if (c != '\n')
-               ++n;
+       kmsg_dump_rewind_nolock(&dumper);
+       while (kmsg_dump_get_line_nolock(&dumper, 1, NULL, 0, NULL))
+               n++;
+
        if (lines < 0) {
                if (adjust >= n)
                        kdb_printf("buffer only contains %d lines, nothing "
@@ -2087,21 +2083,11 @@ static int kdb_dmesg(int argc, const char **argv)
                else if (adjust - lines >= n)
                        kdb_printf("buffer only contains %d lines, last %d "
                                   "lines printed\n", n, n - adjust);
-               if (adjust) {
-                       for (; start < end && adjust; ++start) {
-                               if (*KDB_WRAP(start) == '\n')
-                                       --adjust;
-                       }
-                       if (start < end)
-                               ++start;
-               }
-               for (p = start; p < end && lines; ++p) {
-                       if (*KDB_WRAP(p) == '\n')
-                               ++lines;
-               }
-               end = p;
+               skip = adjust;
+               lines = abs(lines);
        } else if (lines > 0) {
-               int skip = n - (adjust + lines);
+               skip = n - lines - adjust;
+               lines = abs(lines);
                if (adjust >= n) {
                        kdb_printf("buffer only contains %d lines, "
                                   "nothing printed\n", n);
@@ -2112,35 +2098,24 @@ static int kdb_dmesg(int argc, const char **argv)
                        kdb_printf("buffer only contains %d lines, first "
                                   "%d lines printed\n", n, lines);
                }
-               for (; start < end && skip; ++start) {
-                       if (*KDB_WRAP(start) == '\n')
-                               --skip;
-               }
-               for (p = start; p < end && lines; ++p) {
-                       if (*KDB_WRAP(p) == '\n')
-                               --lines;
-               }
-               end = p;
+       } else {
+               lines = n;
        }
-       /* Do a line at a time (max 200 chars) to reduce protocol overhead */
-       c = '\n';
-       while (start != end) {
-               char buf[201];
-               p = buf;
-               if (KDB_FLAG(CMD_INTERRUPT))
-                       return 0;
-               while (start < end && (c = *KDB_WRAP(start)) &&
-                      (p - buf) < sizeof(buf)-1) {
-                       ++start;
-                       *p++ = c;
-                       if (c == '\n')
-                               break;
+
+       if (skip >= n || skip < 0)
+               return 0;
+
+       kmsg_dump_rewind_nolock(&dumper);
+       while (kmsg_dump_get_line_nolock(&dumper, 1, buf, sizeof(buf), &len)) {
+               if (skip) {
+                       skip--;
+                       continue;
                }
-               *p = '\0';
-               kdb_printf("%s", buf);
+               if (!lines--)
+                       break;
+
+               kdb_printf("%.*s\n", (int)len - 1, buf);
        }
-       if (c != '\n')
-               kdb_printf("\n");
 
        return 0;
 }
index 47c4e56..392ec6a 100644 (file)
@@ -205,7 +205,6 @@ extern char kdb_grep_string[];
 extern int kdb_grep_leading;
 extern int kdb_grep_trailing;
 extern char *kdb_cmds[];
-extern void kdb_syslog_data(char *syslog_data[]);
 extern unsigned long kdb_task_state_string(const char *);
 extern char kdb_task_state_char (const struct task_struct *);
 extern unsigned long kdb_task_state(const struct task_struct *p,
index f85c015..f1cf0ed 100644 (file)
@@ -253,9 +253,9 @@ perf_cgroup_match(struct perf_event *event)
        return !event->cgrp || event->cgrp == cpuctx->cgrp;
 }
 
-static inline void perf_get_cgroup(struct perf_event *event)
+static inline bool perf_tryget_cgroup(struct perf_event *event)
 {
-       css_get(&event->cgrp->css);
+       return css_tryget(&event->cgrp->css);
 }
 
 static inline void perf_put_cgroup(struct perf_event *event)
@@ -484,7 +484,11 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
        event->cgrp = cgrp;
 
        /* must be done before we fput() the file */
-       perf_get_cgroup(event);
+       if (!perf_tryget_cgroup(event)) {
+               event->cgrp = NULL;
+               ret = -ENOENT;
+               goto out;
+       }
 
        /*
         * all events in a group must monitor
@@ -1641,6 +1645,8 @@ perf_install_in_context(struct perf_event_context *ctx,
        lockdep_assert_held(&ctx->mutex);
 
        event->ctx = ctx;
+       if (event->cpu != -1)
+               event->cpu = cpu;
 
        if (!task) {
                /*
@@ -6248,6 +6254,8 @@ SYSCALL_DEFINE5(perf_event_open,
                }
        }
 
+       get_online_cpus();
+
        event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
                                 NULL, NULL);
        if (IS_ERR(event)) {
@@ -6300,7 +6308,7 @@ SYSCALL_DEFINE5(perf_event_open,
        /*
         * Get the target context (task or percpu):
         */
-       ctx = find_get_context(pmu, task, cpu);
+       ctx = find_get_context(pmu, task, event->cpu);
        if (IS_ERR(ctx)) {
                err = PTR_ERR(ctx);
                goto err_alloc;
@@ -6373,20 +6381,23 @@ SYSCALL_DEFINE5(perf_event_open,
        mutex_lock(&ctx->mutex);
 
        if (move_group) {
-               perf_install_in_context(ctx, group_leader, cpu);
+               synchronize_rcu();
+               perf_install_in_context(ctx, group_leader, event->cpu);
                get_ctx(ctx);
                list_for_each_entry(sibling, &group_leader->sibling_list,
                                    group_entry) {
-                       perf_install_in_context(ctx, sibling, cpu);
+                       perf_install_in_context(ctx, sibling, event->cpu);
                        get_ctx(ctx);
                }
        }
 
-       perf_install_in_context(ctx, event, cpu);
+       perf_install_in_context(ctx, event, event->cpu);
        ++ctx->generation;
        perf_unpin_context(ctx);
        mutex_unlock(&ctx->mutex);
 
+       put_online_cpus();
+
        event->owner = current;
 
        mutex_lock(&current->perf_event_mutex);
@@ -6415,6 +6426,7 @@ err_context:
 err_alloc:
        free_event(event);
 err_task:
+       put_online_cpus();
        if (task)
                put_task_struct(task);
 err_group_fd:
@@ -6475,6 +6487,39 @@ err:
 }
 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
 
+void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
+{
+       struct perf_event_context *src_ctx;
+       struct perf_event_context *dst_ctx;
+       struct perf_event *event, *tmp;
+       LIST_HEAD(events);
+
+       src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
+       dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
+
+       mutex_lock(&src_ctx->mutex);
+       list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
+                                event_entry) {
+               perf_remove_from_context(event);
+               put_ctx(src_ctx);
+               list_add(&event->event_entry, &events);
+       }
+       mutex_unlock(&src_ctx->mutex);
+
+       synchronize_rcu();
+
+       mutex_lock(&dst_ctx->mutex);
+       list_for_each_entry_safe(event, tmp, &events, event_entry) {
+               list_del(&event->event_entry);
+               if (event->state >= PERF_EVENT_STATE_OFF)
+                       event->state = PERF_EVENT_STATE_INACTIVE;
+               perf_install_in_context(dst_ctx, event, dst_cpu);
+               get_ctx(dst_ctx);
+       }
+       mutex_unlock(&dst_ctx->mutex);
+}
+EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
+
 static void sync_child_event(struct perf_event *child_event,
                               struct task_struct *child)
 {
index 985be4d..f935327 100644 (file)
 #define UINSNS_PER_PAGE                        (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
 #define MAX_UPROBE_XOL_SLOTS           UINSNS_PER_PAGE
 
-static struct srcu_struct uprobes_srcu;
 static struct rb_root uprobes_tree = RB_ROOT;
 
 static DEFINE_SPINLOCK(uprobes_treelock);      /* serialize rbtree access */
 
 #define UPROBES_HASH_SZ        13
 
+/*
+ * We need separate register/unregister and mmap/munmap lock hashes because
+ * of mmap_sem nesting.
+ *
+ * uprobe_register() needs to install probes on (potentially) all processes
+ * and thus needs to acquire multiple mmap_sems (consequtively, not
+ * concurrently), whereas uprobe_mmap() is called while holding mmap_sem
+ * for the particular process doing the mmap.
+ *
+ * uprobe_register()->register_for_each_vma() needs to drop/acquire mmap_sem
+ * because of lock order against i_mmap_mutex. This means there's a hole in
+ * the register vma iteration where a mmap() can happen.
+ *
+ * Thus uprobe_register() can race with uprobe_mmap() and we can try and
+ * install a probe where one is already installed.
+ */
+
 /* serialize (un)register */
 static struct mutex uprobes_mutex[UPROBES_HASH_SZ];
 
@@ -61,17 +77,6 @@ static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
  */
 static atomic_t uprobe_events = ATOMIC_INIT(0);
 
-/*
- * Maintain a temporary per vma info that can be used to search if a vma
- * has already been handled. This structure is introduced since extending
- * vm_area_struct wasnt recommended.
- */
-struct vma_info {
-       struct list_head        probe_list;
-       struct mm_struct        *mm;
-       loff_t                  vaddr;
-};
-
 struct uprobe {
        struct rb_node          rb_node;        /* node in the rb tree */
        atomic_t                ref;
@@ -100,7 +105,8 @@ static bool valid_vma(struct vm_area_struct *vma, bool is_register)
        if (!is_register)
                return true;
 
-       if ((vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)) == (VM_READ|VM_EXEC))
+       if ((vma->vm_flags & (VM_HUGETLB|VM_READ|VM_WRITE|VM_EXEC|VM_SHARED))
+                               == (VM_READ|VM_EXEC))
                return true;
 
        return false;
@@ -129,33 +135,17 @@ static loff_t vma_address(struct vm_area_struct *vma, loff_t offset)
 static int __replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage)
 {
        struct mm_struct *mm = vma->vm_mm;
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *ptep;
-       spinlock_t *ptl;
        unsigned long addr;
-       int err = -EFAULT;
+       spinlock_t *ptl;
+       pte_t *ptep;
 
        addr = page_address_in_vma(page, vma);
        if (addr == -EFAULT)
-               goto out;
-
-       pgd = pgd_offset(mm, addr);
-       if (!pgd_present(*pgd))
-               goto out;
-
-       pud = pud_offset(pgd, addr);
-       if (!pud_present(*pud))
-               goto out;
-
-       pmd = pmd_offset(pud, addr);
-       if (!pmd_present(*pmd))
-               goto out;
+               return -EFAULT;
 
-       ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
+       ptep = page_check_address(page, mm, addr, &ptl, 0);
        if (!ptep)
-               goto out;
+               return -EAGAIN;
 
        get_page(kpage);
        page_add_new_anon_rmap(kpage, vma, addr);
@@ -174,10 +164,8 @@ static int __replace_page(struct vm_area_struct *vma, struct page *page, struct
                try_to_free_swap(page);
        put_page(page);
        pte_unmap_unlock(ptep, ptl);
-       err = 0;
 
-out:
-       return err;
+       return 0;
 }
 
 /**
@@ -222,9 +210,8 @@ static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
        void *vaddr_old, *vaddr_new;
        struct vm_area_struct *vma;
        struct uprobe *uprobe;
-       loff_t addr;
        int ret;
-
+retry:
        /* Read the page with vaddr into memory */
        ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma);
        if (ret <= 0)
@@ -246,10 +233,6 @@ static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
        if (mapping != vma->vm_file->f_mapping)
                goto put_out;
 
-       addr = vma_address(vma, uprobe->offset);
-       if (vaddr != (unsigned long)addr)
-               goto put_out;
-
        ret = -ENOMEM;
        new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
        if (!new_page)
@@ -267,11 +250,7 @@ static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
        vaddr_new = kmap_atomic(new_page);
 
        memcpy(vaddr_new, vaddr_old, PAGE_SIZE);
-
-       /* poke the new insn in, ASSUMES we don't cross page boundary */
-       vaddr &= ~PAGE_MASK;
-       BUG_ON(vaddr + UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
-       memcpy(vaddr_new + vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
+       memcpy(vaddr_new + (vaddr & ~PAGE_MASK), &opcode, UPROBE_SWBP_INSN_SIZE);
 
        kunmap_atomic(vaddr_new);
        kunmap_atomic(vaddr_old);
@@ -291,6 +270,8 @@ unlock_out:
 put_out:
        put_page(old_page);
 
+       if (unlikely(ret == -EAGAIN))
+               goto retry;
        return ret;
 }
 
@@ -312,7 +293,7 @@ static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_
        void *vaddr_new;
        int ret;
 
-       ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &page, NULL);
+       ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
        if (ret <= 0)
                return ret;
 
@@ -333,10 +314,20 @@ static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr)
        uprobe_opcode_t opcode;
        int result;
 
+       if (current->mm == mm) {
+               pagefault_disable();
+               result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
+                                                               sizeof(opcode));
+               pagefault_enable();
+
+               if (likely(result == 0))
+                       goto out;
+       }
+
        result = read_opcode(mm, vaddr, &opcode);
        if (result)
                return result;
-
+out:
        if (is_swbp_insn(&opcode))
                return 1;
 
@@ -355,7 +346,9 @@ static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr)
 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
 {
        int result;
-
+       /*
+        * See the comment near uprobes_hash().
+        */
        result = is_swbp_at_addr(mm, vaddr);
        if (result == 1)
                return -EEXIST;
@@ -520,7 +513,6 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
        uprobe->inode = igrab(inode);
        uprobe->offset = offset;
        init_rwsem(&uprobe->consumer_rwsem);
-       INIT_LIST_HEAD(&uprobe->pending_list);
 
        /* add to uprobes_tree, sorted on inode:offset */
        cur_uprobe = insert_uprobe(uprobe);
@@ -588,20 +580,22 @@ static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
 }
 
 static int
-__copy_insn(struct address_space *mapping, struct vm_area_struct *vma, char *insn,
-                       unsigned long nbytes, unsigned long offset)
+__copy_insn(struct address_space *mapping, struct file *filp, char *insn,
+                       unsigned long nbytes, loff_t offset)
 {
-       struct file *filp = vma->vm_file;
        struct page *page;
        void *vaddr;
-       unsigned long off1;
-       unsigned long idx;
+       unsigned long off;
+       pgoff_t idx;
 
        if (!filp)
                return -EINVAL;
 
-       idx = (unsigned long)(offset >> PAGE_CACHE_SHIFT);
-       off1 = offset &= ~PAGE_MASK;
+       if (!mapping->a_ops->readpage)
+               return -EIO;
+
+       idx = offset >> PAGE_CACHE_SHIFT;
+       off = offset & ~PAGE_MASK;
 
        /*
         * Ensure that the page that has the original instruction is
@@ -612,22 +606,20 @@ __copy_insn(struct address_space *mapping, struct vm_area_struct *vma, char *ins
                return PTR_ERR(page);
 
        vaddr = kmap_atomic(page);
-       memcpy(insn, vaddr + off1, nbytes);
+       memcpy(insn, vaddr + off, nbytes);
        kunmap_atomic(vaddr);
        page_cache_release(page);
 
        return 0;
 }
 
-static int
-copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
+static int copy_insn(struct uprobe *uprobe, struct file *filp)
 {
        struct address_space *mapping;
        unsigned long nbytes;
        int bytes;
 
-       addr &= ~PAGE_MASK;
-       nbytes = PAGE_SIZE - addr;
+       nbytes = PAGE_SIZE - (uprobe->offset & ~PAGE_MASK);
        mapping = uprobe->inode->i_mapping;
 
        /* Instruction at end of binary; copy only available bytes */
@@ -638,13 +630,13 @@ copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
 
        /* Instruction at the page-boundary; copy bytes in second page */
        if (nbytes < bytes) {
-               if (__copy_insn(mapping, vma, uprobe->arch.insn + nbytes,
-                               bytes - nbytes, uprobe->offset + nbytes))
-                       return -ENOMEM;
-
+               int err = __copy_insn(mapping, filp, uprobe->arch.insn + nbytes,
+                               bytes - nbytes, uprobe->offset + nbytes);
+               if (err)
+                       return err;
                bytes = nbytes;
        }
-       return __copy_insn(mapping, vma, uprobe->arch.insn, bytes, uprobe->offset);
+       return __copy_insn(mapping, filp, uprobe->arch.insn, bytes, uprobe->offset);
 }
 
 /*
@@ -672,9 +664,8 @@ copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
  */
 static int
 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
-                       struct vm_area_struct *vma, loff_t vaddr)
+                       struct vm_area_struct *vma, unsigned long vaddr)
 {
-       unsigned long addr;
        int ret;
 
        /*
@@ -687,20 +678,22 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
        if (!uprobe->consumers)
                return -EEXIST;
 
-       addr = (unsigned long)vaddr;
-
        if (!(uprobe->flags & UPROBE_COPY_INSN)) {
-               ret = copy_insn(uprobe, vma, addr);
+               ret = copy_insn(uprobe, vma->vm_file);
                if (ret)
                        return ret;
 
                if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn))
-                       return -EEXIST;
+                       return -ENOTSUPP;
 
-               ret = arch_uprobe_analyze_insn(&uprobe->arch, mm);
+               ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
                if (ret)
                        return ret;
 
+               /* write_opcode() assumes we don't cross page boundary */
+               BUG_ON((uprobe->offset & ~PAGE_MASK) +
+                               UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
+
                uprobe->flags |= UPROBE_COPY_INSN;
        }
 
@@ -713,7 +706,7 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
         * Hence increment before and decrement on failure.
         */
        atomic_inc(&mm->uprobes_state.count);
-       ret = set_swbp(&uprobe->arch, mm, addr);
+       ret = set_swbp(&uprobe->arch, mm, vaddr);
        if (ret)
                atomic_dec(&mm->uprobes_state.count);
 
@@ -721,27 +714,21 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
 }
 
 static void
-remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, loff_t vaddr)
+remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
 {
-       if (!set_orig_insn(&uprobe->arch, mm, (unsigned long)vaddr, true))
+       if (!set_orig_insn(&uprobe->arch, mm, vaddr, true))
                atomic_dec(&mm->uprobes_state.count);
 }
 
 /*
- * There could be threads that have hit the breakpoint and are entering the
- * notifier code and trying to acquire the uprobes_treelock. The thread
- * calling delete_uprobe() that is removing the uprobe from the rb_tree can
- * race with these threads and might acquire the uprobes_treelock compared
- * to some of the breakpoint hit threads. In such a case, the breakpoint
- * hit threads will not find the uprobe. The current unregistering thread
- * waits till all other threads have hit a breakpoint, to acquire the
- * uprobes_treelock before the uprobe is removed from the rbtree.
+ * There could be threads that have already hit the breakpoint. They
+ * will recheck the current insn and restart if find_uprobe() fails.
+ * See find_active_uprobe().
  */
 static void delete_uprobe(struct uprobe *uprobe)
 {
        unsigned long flags;
 
-       synchronize_srcu(&uprobes_srcu);
        spin_lock_irqsave(&uprobes_treelock, flags);
        rb_erase(&uprobe->rb_node, &uprobes_tree);
        spin_unlock_irqrestore(&uprobes_treelock, flags);
@@ -750,139 +737,135 @@ static void delete_uprobe(struct uprobe *uprobe)
        atomic_dec(&uprobe_events);
 }
 
-static struct vma_info *
-__find_next_vma_info(struct address_space *mapping, struct list_head *head,
-                       struct vma_info *vi, loff_t offset, bool is_register)
+struct map_info {
+       struct map_info *next;
+       struct mm_struct *mm;
+       unsigned long vaddr;
+};
+
+static inline struct map_info *free_map_info(struct map_info *info)
+{
+       struct map_info *next = info->next;
+       kfree(info);
+       return next;
+}
+
+static struct map_info *
+build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
 {
+       unsigned long pgoff = offset >> PAGE_SHIFT;
        struct prio_tree_iter iter;
        struct vm_area_struct *vma;
-       struct vma_info *tmpvi;
-       unsigned long pgoff;
-       int existing_vma;
-       loff_t vaddr;
-
-       pgoff = offset >> PAGE_SHIFT;
+       struct map_info *curr = NULL;
+       struct map_info *prev = NULL;
+       struct map_info *info;
+       int more = 0;
 
+ again:
+       mutex_lock(&mapping->i_mmap_mutex);
        vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
                if (!valid_vma(vma, is_register))
                        continue;
 
-               existing_vma = 0;
-               vaddr = vma_address(vma, offset);
-
-               list_for_each_entry(tmpvi, head, probe_list) {
-                       if (tmpvi->mm == vma->vm_mm && tmpvi->vaddr == vaddr) {
-                               existing_vma = 1;
-                               break;
-                       }
+               if (!prev && !more) {
+                       /*
+                        * Needs GFP_NOWAIT to avoid i_mmap_mutex recursion through
+                        * reclaim. This is optimistic, no harm done if it fails.
+                        */
+                       prev = kmalloc(sizeof(struct map_info),
+                                       GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
+                       if (prev)
+                               prev->next = NULL;
                }
-
-               /*
-                * Another vma needs a probe to be installed. However skip
-                * installing the probe if the vma is about to be unlinked.
-                */
-               if (!existing_vma && atomic_inc_not_zero(&vma->vm_mm->mm_users)) {
-                       vi->mm = vma->vm_mm;
-                       vi->vaddr = vaddr;
-                       list_add(&vi->probe_list, head);
-
-                       return vi;
+               if (!prev) {
+                       more++;
+                       continue;
                }
-       }
 
-       return NULL;
-}
-
-/*
- * Iterate in the rmap prio tree  and find a vma where a probe has not
- * yet been inserted.
- */
-static struct vma_info *
-find_next_vma_info(struct address_space *mapping, struct list_head *head,
-               loff_t offset, bool is_register)
-{
-       struct vma_info *vi, *retvi;
+               if (!atomic_inc_not_zero(&vma->vm_mm->mm_users))
+                       continue;
 
-       vi = kzalloc(sizeof(struct vma_info), GFP_KERNEL);
-       if (!vi)
-               return ERR_PTR(-ENOMEM);
+               info = prev;
+               prev = prev->next;
+               info->next = curr;
+               curr = info;
 
-       mutex_lock(&mapping->i_mmap_mutex);
-       retvi = __find_next_vma_info(mapping, head, vi, offset, is_register);
+               info->mm = vma->vm_mm;
+               info->vaddr = vma_address(vma, offset);
+       }
        mutex_unlock(&mapping->i_mmap_mutex);
 
-       if (!retvi)
-               kfree(vi);
+       if (!more)
+               goto out;
+
+       prev = curr;
+       while (curr) {
+               mmput(curr->mm);
+               curr = curr->next;
+       }
 
-       return retvi;
+       do {
+               info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
+               if (!info) {
+                       curr = ERR_PTR(-ENOMEM);
+                       goto out;
+               }
+               info->next = prev;
+               prev = info;
+       } while (--more);
+
+       goto again;
+ out:
+       while (prev)
+               prev = free_map_info(prev);
+       return curr;
 }
 
 static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
 {
-       struct list_head try_list;
-       struct vm_area_struct *vma;
-       struct address_space *mapping;
-       struct vma_info *vi, *tmpvi;
-       struct mm_struct *mm;
-       loff_t vaddr;
-       int ret;
+       struct map_info *info;
+       int err = 0;
 
-       mapping = uprobe->inode->i_mapping;
-       INIT_LIST_HEAD(&try_list);
+       info = build_map_info(uprobe->inode->i_mapping,
+                                       uprobe->offset, is_register);
+       if (IS_ERR(info))
+               return PTR_ERR(info);
 
-       ret = 0;
+       while (info) {
+               struct mm_struct *mm = info->mm;
+               struct vm_area_struct *vma;
 
-       for (;;) {
-               vi = find_next_vma_info(mapping, &try_list, uprobe->offset, is_register);
-               if (!vi)
-                       break;
+               if (err)
+                       goto free;
 
-               if (IS_ERR(vi)) {
-                       ret = PTR_ERR(vi);
-                       break;
-               }
+               down_write(&mm->mmap_sem);
+               vma = find_vma(mm, (unsigned long)info->vaddr);
+               if (!vma || !valid_vma(vma, is_register))
+                       goto unlock;
 
-               mm = vi->mm;
-               down_read(&mm->mmap_sem);
-               vma = find_vma(mm, (unsigned long)vi->vaddr);
-               if (!vma || !valid_vma(vma, is_register)) {
-                       list_del(&vi->probe_list);
-                       kfree(vi);
-                       up_read(&mm->mmap_sem);
-                       mmput(mm);
-                       continue;
-               }
-               vaddr = vma_address(vma, uprobe->offset);
                if (vma->vm_file->f_mapping->host != uprobe->inode ||
-                                               vaddr != vi->vaddr) {
-                       list_del(&vi->probe_list);
-                       kfree(vi);
-                       up_read(&mm->mmap_sem);
-                       mmput(mm);
-                       continue;
-               }
-
-               if (is_register)
-                       ret = install_breakpoint(uprobe, mm, vma, vi->vaddr);
-               else
-                       remove_breakpoint(uprobe, mm, vi->vaddr);
+                   vma_address(vma, uprobe->offset) != info->vaddr)
+                       goto unlock;
 
-               up_read(&mm->mmap_sem);
-               mmput(mm);
                if (is_register) {
-                       if (ret && ret == -EEXIST)
-                               ret = 0;
-                       if (ret)
-                               break;
+                       err = install_breakpoint(uprobe, mm, vma, info->vaddr);
+                       /*
+                        * We can race against uprobe_mmap(), see the
+                        * comment near uprobe_hash().
+                        */
+                       if (err == -EEXIST)
+                               err = 0;
+               } else {
+                       remove_breakpoint(uprobe, mm, info->vaddr);
                }
+ unlock:
+               up_write(&mm->mmap_sem);
+ free:
+               mmput(mm);
+               info = free_map_info(info);
        }
 
-       list_for_each_entry_safe(vi, tmpvi, &try_list, probe_list) {
-               list_del(&vi->probe_list);
-               kfree(vi);
-       }
-
-       return ret;
+       return err;
 }
 
 static int __uprobe_register(struct uprobe *uprobe)
@@ -1048,7 +1031,7 @@ static void build_probe_list(struct inode *inode, struct list_head *head)
 int uprobe_mmap(struct vm_area_struct *vma)
 {
        struct list_head tmp_list;
-       struct uprobe *uprobe, *u;
+       struct uprobe *uprobe;
        struct inode *inode;
        int ret, count;
 
@@ -1066,12 +1049,9 @@ int uprobe_mmap(struct vm_area_struct *vma)
        ret = 0;
        count = 0;
 
-       list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
-               loff_t vaddr;
-
-               list_del(&uprobe->pending_list);
+       list_for_each_entry(uprobe, &tmp_list, pending_list) {
                if (!ret) {
-                       vaddr = vma_address(vma, uprobe->offset);
+                       loff_t vaddr = vma_address(vma, uprobe->offset);
 
                        if (vaddr < vma->vm_start || vaddr >= vma->vm_end) {
                                put_uprobe(uprobe);
@@ -1079,8 +1059,10 @@ int uprobe_mmap(struct vm_area_struct *vma)
                        }
 
                        ret = install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
-
-                       /* Ignore double add: */
+                       /*
+                        * We can race against uprobe_register(), see the
+                        * comment near uprobe_hash().
+                        */
                        if (ret == -EEXIST) {
                                ret = 0;
 
@@ -1115,7 +1097,7 @@ int uprobe_mmap(struct vm_area_struct *vma)
 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
 {
        struct list_head tmp_list;
-       struct uprobe *uprobe, *u;
+       struct uprobe *uprobe;
        struct inode *inode;
 
        if (!atomic_read(&uprobe_events) || !valid_vma(vma, false))
@@ -1132,11 +1114,8 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon
        mutex_lock(uprobes_mmap_hash(inode));
        build_probe_list(inode, &tmp_list);
 
-       list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
-               loff_t vaddr;
-
-               list_del(&uprobe->pending_list);
-               vaddr = vma_address(vma, uprobe->offset);
+       list_for_each_entry(uprobe, &tmp_list, pending_list) {
+               loff_t vaddr = vma_address(vma, uprobe->offset);
 
                if (vaddr >= start && vaddr < end) {
                        /*
@@ -1378,9 +1357,6 @@ void uprobe_free_utask(struct task_struct *t)
 {
        struct uprobe_task *utask = t->utask;
 
-       if (t->uprobe_srcu_id != -1)
-               srcu_read_unlock_raw(&uprobes_srcu, t->uprobe_srcu_id);
-
        if (!utask)
                return;
 
@@ -1398,7 +1374,6 @@ void uprobe_free_utask(struct task_struct *t)
 void uprobe_copy_process(struct task_struct *t)
 {
        t->utask = NULL;
-       t->uprobe_srcu_id = -1;
 }
 
 /*
@@ -1417,7 +1392,6 @@ static struct uprobe_task *add_utask(void)
        if (unlikely(!utask))
                return NULL;
 
-       utask->active_uprobe = NULL;
        current->utask = utask;
        return utask;
 }
@@ -1479,41 +1453,64 @@ static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs)
        return false;
 }
 
+static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
+{
+       struct mm_struct *mm = current->mm;
+       struct uprobe *uprobe = NULL;
+       struct vm_area_struct *vma;
+
+       down_read(&mm->mmap_sem);
+       vma = find_vma(mm, bp_vaddr);
+       if (vma && vma->vm_start <= bp_vaddr) {
+               if (valid_vma(vma, false)) {
+                       struct inode *inode;
+                       loff_t offset;
+
+                       inode = vma->vm_file->f_mapping->host;
+                       offset = bp_vaddr - vma->vm_start;
+                       offset += (vma->vm_pgoff << PAGE_SHIFT);
+                       uprobe = find_uprobe(inode, offset);
+               }
+
+               if (!uprobe)
+                       *is_swbp = is_swbp_at_addr(mm, bp_vaddr);
+       } else {
+               *is_swbp = -EFAULT;
+       }
+       up_read(&mm->mmap_sem);
+
+       return uprobe;
+}
+
 /*
  * Run handler and ask thread to singlestep.
  * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
  */
 static void handle_swbp(struct pt_regs *regs)
 {
-       struct vm_area_struct *vma;
        struct uprobe_task *utask;
        struct uprobe *uprobe;
-       struct mm_struct *mm;
        unsigned long bp_vaddr;
+       int uninitialized_var(is_swbp);
 
-       uprobe = NULL;
        bp_vaddr = uprobe_get_swbp_addr(regs);
-       mm = current->mm;
-       down_read(&mm->mmap_sem);
-       vma = find_vma(mm, bp_vaddr);
-
-       if (vma && vma->vm_start <= bp_vaddr && valid_vma(vma, false)) {
-               struct inode *inode;
-               loff_t offset;
-
-               inode = vma->vm_file->f_mapping->host;
-               offset = bp_vaddr - vma->vm_start;
-               offset += (vma->vm_pgoff << PAGE_SHIFT);
-               uprobe = find_uprobe(inode, offset);
-       }
-
-       srcu_read_unlock_raw(&uprobes_srcu, current->uprobe_srcu_id);
-       current->uprobe_srcu_id = -1;
-       up_read(&mm->mmap_sem);
+       uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
 
        if (!uprobe) {
-               /* No matching uprobe; signal SIGTRAP. */
-               send_sig(SIGTRAP, current, 0);
+               if (is_swbp > 0) {
+                       /* No matching uprobe; signal SIGTRAP. */
+                       send_sig(SIGTRAP, current, 0);
+               } else {
+                       /*
+                        * Either we raced with uprobe_unregister() or we can't
+                        * access this memory. The latter is only possible if
+                        * another thread plays with our ->mm. In both cases
+                        * we can simply restart. If this vma was unmapped we
+                        * can pretend this insn was not executed yet and get
+                        * the (correct) SIGSEGV after restart.
+                        */
+                       instruction_pointer_set(regs, bp_vaddr);
+               }
                return;
        }
 
@@ -1620,7 +1617,6 @@ int uprobe_pre_sstep_notifier(struct pt_regs *regs)
                utask->state = UTASK_BP_HIT;
 
        set_thread_flag(TIF_UPROBE);
-       current->uprobe_srcu_id = srcu_read_lock_raw(&uprobes_srcu);
 
        return 1;
 }
@@ -1655,7 +1651,6 @@ static int __init init_uprobes(void)
                mutex_init(&uprobes_mutex[i]);
                mutex_init(&uprobes_mmap_mutex[i]);
        }
-       init_srcu_struct(&uprobes_srcu);
 
        return register_die_notifier(&uprobe_exception_nb);
 }
index 34867cc..2f59cc3 100644 (file)
@@ -72,6 +72,18 @@ static void __unhash_process(struct task_struct *p, bool group_dead)
                list_del_rcu(&p->tasks);
                list_del_init(&p->sibling);
                __this_cpu_dec(process_counts);
+               /*
+                * If we are the last child process in a pid namespace to be
+                * reaped, notify the reaper sleeping zap_pid_ns_processes().
+                */
+               if (IS_ENABLED(CONFIG_PID_NS)) {
+                       struct task_struct *parent = p->real_parent;
+
+                       if ((task_active_pid_ns(parent)->child_reaper == parent) &&
+                           list_empty(&parent->children) &&
+                           (parent->flags & PF_EXITING))
+                               wake_up_process(parent);
+               }
        }
        list_del_rcu(&p->thread_group);
 }
@@ -643,6 +655,7 @@ static void exit_mm(struct task_struct * tsk)
        mm_release(tsk, mm);
        if (!mm)
                return;
+       sync_mm_rss(mm);
        /*
         * Serialize with any possible pending coredump.
         * We must hold mmap_sem around checking core_state
@@ -719,12 +732,6 @@ static struct task_struct *find_new_reaper(struct task_struct *father)
 
                zap_pid_ns_processes(pid_ns);
                write_lock_irq(&tasklist_lock);
-               /*
-                * We can not clear ->child_reaper or leave it alone.
-                * There may by stealth EXIT_DEAD tasks on ->children,
-                * forget_original_parent() must move them somewhere.
-                */
-               pid_ns->child_reaper = init_pid_ns.child_reaper;
        } else if (father->signal->has_child_subreaper) {
                struct task_struct *reaper;
 
index ab5211b..f00e319 100644 (file)
@@ -304,12 +304,17 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
        }
 
        err = arch_dup_task_struct(tsk, orig);
-       if (err)
-               goto out;
 
+       /*
+        * We defer looking at err, because we will need this setup
+        * for the clean up path to work correctly.
+        */
        tsk->stack = ti;
-
        setup_thread_stack(tsk, orig);
+
+       if (err)
+               goto out;
+
        clear_user_return_notifier(tsk);
        clear_tsk_need_resched(tsk);
        stackend = end_of_stack(tsk);
index ae34bf5..6db7a5e 100644 (file)
@@ -657,6 +657,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
        return 0;
 }
 
+static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
+{
+       ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
+       ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
+
+       return ktime_get_update_offsets(offs_real, offs_boot);
+}
+
 /*
  * Retrigger next event is called after clock was set
  *
@@ -665,22 +673,12 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
 static void retrigger_next_event(void *arg)
 {
        struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
-       struct timespec realtime_offset, xtim, wtm, sleep;
 
        if (!hrtimer_hres_active())
                return;
 
-       /* Optimized out for !HIGH_RES */
-       get_xtime_and_monotonic_and_sleep_offset(&xtim, &wtm, &sleep);
-       set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
-
-       /* Adjust CLOCK_REALTIME offset */
        raw_spin_lock(&base->lock);
-       base->clock_base[HRTIMER_BASE_REALTIME].offset =
-               timespec_to_ktime(realtime_offset);
-       base->clock_base[HRTIMER_BASE_BOOTTIME].offset =
-               timespec_to_ktime(sleep);
-
+       hrtimer_update_base(base);
        hrtimer_force_reprogram(base, 0);
        raw_spin_unlock(&base->lock);
 }
@@ -710,13 +708,25 @@ static int hrtimer_switch_to_hres(void)
                base->clock_base[i].resolution = KTIME_HIGH_RES;
 
        tick_setup_sched_timer();
-
        /* "Retrigger" the interrupt to get things going */
        retrigger_next_event(NULL);
        local_irq_restore(flags);
        return 1;
 }
 
+/*
+ * Called from timekeeping code to reprogramm the hrtimer interrupt
+ * device. If called from the timer interrupt context we defer it to
+ * softirq context.
+ */
+void clock_was_set_delayed(void)
+{
+       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+
+       cpu_base->clock_was_set = 1;
+       __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+}
+
 #else
 
 static inline int hrtimer_hres_active(void) { return 0; }
@@ -1250,11 +1260,10 @@ void hrtimer_interrupt(struct clock_event_device *dev)
        cpu_base->nr_events++;
        dev->next_event.tv64 = KTIME_MAX;
 
-       entry_time = now = ktime_get();
+       raw_spin_lock(&cpu_base->lock);
+       entry_time = now = hrtimer_update_base(cpu_base);
 retry:
        expires_next.tv64 = KTIME_MAX;
-
-       raw_spin_lock(&cpu_base->lock);
        /*
         * We set expires_next to KTIME_MAX here with cpu_base->lock
         * held to prevent that a timer is enqueued in our queue via
@@ -1330,8 +1339,12 @@ retry:
         * We need to prevent that we loop forever in the hrtimer
         * interrupt routine. We give it 3 attempts to avoid
         * overreacting on some spurious event.
+        *
+        * Acquire base lock for updating the offsets and retrieving
+        * the current time.
         */
-       now = ktime_get();
+       raw_spin_lock(&cpu_base->lock);
+       now = hrtimer_update_base(cpu_base);
        cpu_base->nr_retries++;
        if (++retries < 3)
                goto retry;
@@ -1343,6 +1356,7 @@ retry:
         */
        cpu_base->nr_hangs++;
        cpu_base->hang_detected = 1;
+       raw_spin_unlock(&cpu_base->lock);
        delta = ktime_sub(now, entry_time);
        if (delta.tv64 > cpu_base->max_hang_time.tv64)
                cpu_base->max_hang_time = delta;
@@ -1395,6 +1409,13 @@ void hrtimer_peek_ahead_timers(void)
 
 static void run_hrtimer_softirq(struct softirq_action *h)
 {
+       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+
+       if (cpu_base->clock_was_set) {
+               cpu_base->clock_was_set = 0;
+               clock_was_set();
+       }
+
        hrtimer_peek_ahead_timers();
 }
 
index 16b20e3..b3c7fd5 100644 (file)
@@ -184,11 +184,31 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
        }
        read_unlock(&tasklist_lock);
 
+       /* Firstly reap the EXIT_ZOMBIE children we may have. */
        do {
                clear_thread_flag(TIF_SIGPENDING);
                rc = sys_wait4(-1, NULL, __WALL, NULL);
        } while (rc != -ECHILD);
 
+       /*
+        * sys_wait4() above can't reap the TASK_DEAD children.
+        * Make sure they all go away, see __unhash_process().
+        */
+       for (;;) {
+               bool need_wait = false;
+
+               read_lock(&tasklist_lock);
+               if (!list_empty(&current->children)) {
+                       __set_current_state(TASK_UNINTERRUPTIBLE);
+                       need_wait = true;
+               }
+               read_unlock(&tasklist_lock);
+
+               if (!need_wait)
+                       break;
+               schedule();
+       }
+
        if (pid_ns->reboot)
                current->signal->group_exit_code = pid_ns->reboot;
 
index 8b53db3..238025f 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/syscore_ops.h>
 #include <linux/ctype.h>
 #include <linux/genhd.h>
-#include <scsi/scsi_scan.h>
 
 #include "power.h"
 
@@ -748,13 +747,6 @@ static int software_resume(void)
                        async_synchronize_full();
                }
 
-               /*
-                * We can't depend on SCSI devices being available after loading
-                * one of their modules until scsi_complete_async_scans() is
-                * called and the resume device usually is a SCSI one.
-                */
-               scsi_complete_async_scans();
-
                swsusp_resume_device = name_to_dev_t(resume_file);
                if (!swsusp_resume_device) {
                        error = -ENODEV;
index 91b0fd0..4ed81e7 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/console.h>
 #include <linux/cpu.h>
 #include <linux/freezer.h>
-#include <scsi/scsi_scan.h>
 
 #include <asm/uaccess.h>
 
@@ -84,7 +83,6 @@ static int snapshot_open(struct inode *inode, struct file *filp)
                 * appear.
                 */
                wait_for_device_probe();
-               scsi_complete_async_scans();
 
                data->swap = -1;
                data->mode = O_WRONLY;
index 32462d2..ac4bc9e 100644 (file)
@@ -193,12 +193,21 @@ static int console_may_schedule;
  * separated by ',', and find the message after the ';' character.
  */
 
+enum log_flags {
+       LOG_NOCONS      = 1,    /* already flushed, do not print to console */
+       LOG_NEWLINE     = 2,    /* text ended with a newline */
+       LOG_PREFIX      = 4,    /* text started with a prefix */
+       LOG_CONT        = 8,    /* text is a fragment of a continuation line */
+};
+
 struct log {
        u64 ts_nsec;            /* timestamp in nanoseconds */
        u16 len;                /* length of entire record */
        u16 text_len;           /* length of text buffer */
        u16 dict_len;           /* length of dictionary buffer */
-       u16 level;              /* syslog level + facility */
+       u8 facility;            /* syslog facility */
+       u8 flags:5;             /* internal record flags */
+       u8 level:3;             /* syslog level */
 };
 
 /*
@@ -210,6 +219,8 @@ static DEFINE_RAW_SPINLOCK(logbuf_lock);
 /* the next printk record to read by syslog(READ) or /proc/kmsg */
 static u64 syslog_seq;
 static u32 syslog_idx;
+static enum log_flags syslog_prev;
+static size_t syslog_partial;
 
 /* index and sequence number of the first record stored in the buffer */
 static u64 log_first_seq;
@@ -227,10 +238,10 @@ static u32 clear_idx;
 #define LOG_LINE_MAX 1024
 
 /* record buffer */
-#if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
 #define LOG_ALIGN 4
 #else
-#define LOG_ALIGN 8
+#define LOG_ALIGN __alignof__(struct log)
 #endif
 #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
 static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
@@ -286,6 +297,7 @@ static u32 log_next(u32 idx)
 
 /* insert record into the buffer, discard old ones, update heads */
 static void log_store(int facility, int level,
+                     enum log_flags flags, u64 ts_nsec,
                      const char *dict, u16 dict_len,
                      const char *text, u16 text_len)
 {
@@ -329,8 +341,13 @@ static void log_store(int facility, int level,
        msg->text_len = text_len;
        memcpy(log_dict(msg), dict, dict_len);
        msg->dict_len = dict_len;
-       msg->level = (facility << 3) | (level & 7);
-       msg->ts_nsec = local_clock();
+       msg->facility = facility;
+       msg->level = level & 7;
+       msg->flags = flags & 0x1f;
+       if (ts_nsec > 0)
+               msg->ts_nsec = ts_nsec;
+       else
+               msg->ts_nsec = local_clock();
        memset(log_dict(msg) + dict_len, 0, pad_len);
        msg->len = sizeof(struct log) + text_len + dict_len + pad_len;
 
@@ -414,21 +431,23 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
        if (!user)
                return -EBADF;
 
-       mutex_lock(&user->lock);
-       raw_spin_lock(&logbuf_lock);
+       ret = mutex_lock_interruptible(&user->lock);
+       if (ret)
+               return ret;
+       raw_spin_lock_irq(&logbuf_lock);
        while (user->seq == log_next_seq) {
                if (file->f_flags & O_NONBLOCK) {
                        ret = -EAGAIN;
-                       raw_spin_unlock(&logbuf_lock);
+                       raw_spin_unlock_irq(&logbuf_lock);
                        goto out;
                }
 
-               raw_spin_unlock(&logbuf_lock);
+               raw_spin_unlock_irq(&logbuf_lock);
                ret = wait_event_interruptible(log_wait,
                                               user->seq != log_next_seq);
                if (ret)
                        goto out;
-               raw_spin_lock(&logbuf_lock);
+               raw_spin_lock_irq(&logbuf_lock);
        }
 
        if (user->seq < log_first_seq) {
@@ -436,7 +455,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
                user->idx = log_first_idx;
                user->seq = log_first_seq;
                ret = -EPIPE;
-               raw_spin_unlock(&logbuf_lock);
+               raw_spin_unlock_irq(&logbuf_lock);
                goto out;
        }
 
@@ -444,13 +463,13 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
        ts_usec = msg->ts_nsec;
        do_div(ts_usec, 1000);
        len = sprintf(user->buf, "%u,%llu,%llu;",
-                     msg->level, user->seq, ts_usec);
+                     (msg->facility << 3) | msg->level, user->seq, ts_usec);
 
        /* escape non-printable characters */
        for (i = 0; i < msg->text_len; i++) {
                unsigned char c = log_text(msg)[i];
 
-               if (c < ' ' || c >= 128)
+               if (c < ' ' || c >= 127 || c == '\\')
                        len += sprintf(user->buf + len, "\\x%02x", c);
                else
                        user->buf[len++] = c;
@@ -474,7 +493,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
                                continue;
                        }
 
-                       if (c < ' ' || c >= 128) {
+                       if (c < ' ' || c >= 127 || c == '\\') {
                                len += sprintf(user->buf + len, "\\x%02x", c);
                                continue;
                        }
@@ -486,7 +505,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
 
        user->idx = log_next(user->idx);
        user->seq++;
-       raw_spin_unlock(&logbuf_lock);
+       raw_spin_unlock_irq(&logbuf_lock);
 
        if (len > count) {
                ret = -EINVAL;
@@ -513,7 +532,7 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
        if (offset)
                return -ESPIPE;
 
-       raw_spin_lock(&logbuf_lock);
+       raw_spin_lock_irq(&logbuf_lock);
        switch (whence) {
        case SEEK_SET:
                /* the first record */
@@ -537,7 +556,7 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
        default:
                ret = -EINVAL;
        }
-       raw_spin_unlock(&logbuf_lock);
+       raw_spin_unlock_irq(&logbuf_lock);
        return ret;
 }
 
@@ -551,14 +570,14 @@ static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
 
        poll_wait(file, &log_wait, wait);
 
-       raw_spin_lock(&logbuf_lock);
+       raw_spin_lock_irq(&logbuf_lock);
        if (user->seq < log_next_seq) {
                /* return error when data has vanished underneath us */
                if (user->seq < log_first_seq)
                        ret = POLLIN|POLLRDNORM|POLLERR|POLLPRI;
                ret = POLLIN|POLLRDNORM;
        }
-       raw_spin_unlock(&logbuf_lock);
+       raw_spin_unlock_irq(&logbuf_lock);
 
        return ret;
 }
@@ -582,10 +601,10 @@ static int devkmsg_open(struct inode *inode, struct file *file)
 
        mutex_init(&user->lock);
 
-       raw_spin_lock(&logbuf_lock);
+       raw_spin_lock_irq(&logbuf_lock);
        user->idx = log_first_idx;
        user->seq = log_first_seq;
-       raw_spin_unlock(&logbuf_lock);
+       raw_spin_unlock_irq(&logbuf_lock);
 
        file->private_data = user;
        return 0;
@@ -785,44 +804,64 @@ static bool printk_time;
 #endif
 module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
 
+static size_t print_time(u64 ts, char *buf)
+{
+       unsigned long rem_nsec;
+
+       if (!printk_time)
+               return 0;
+
+       if (!buf)
+               return 15;
+
+       rem_nsec = do_div(ts, 1000000000);
+       return sprintf(buf, "[%5lu.%06lu] ",
+                      (unsigned long)ts, rem_nsec / 1000);
+}
+
 static size_t print_prefix(const struct log *msg, bool syslog, char *buf)
 {
        size_t len = 0;
+       unsigned int prefix = (msg->facility << 3) | msg->level;
 
        if (syslog) {
                if (buf) {
-                       len += sprintf(buf, "<%u>", msg->level);
+                       len += sprintf(buf, "<%u>", prefix);
                } else {
                        len += 3;
-                       if (msg->level > 9)
-                               len++;
-                       if (msg->level > 99)
+                       if (prefix > 999)
+                               len += 3;
+                       else if (prefix > 99)
+                               len += 2;
+                       else if (prefix > 9)
                                len++;
                }
        }
 
-       if (printk_time) {
-               if (buf) {
-                       unsigned long long ts = msg->ts_nsec;
-                       unsigned long rem_nsec = do_div(ts, 1000000000);
-
-                       len += sprintf(buf + len, "[%5lu.%06lu] ",
-                                        (unsigned long) ts, rem_nsec / 1000);
-               } else {
-                       len += 15;
-               }
-       }
-
+       len += print_time(msg->ts_nsec, buf ? buf + len : NULL);
        return len;
 }
 
-static size_t msg_print_text(const struct log *msg, bool syslog,
-                            char *buf, size_t size)
+static size_t msg_print_text(const struct log *msg, enum log_flags prev,
+                            bool syslog, char *buf, size_t size)
 {
        const char *text = log_text(msg);
        size_t text_size = msg->text_len;
+       bool prefix = true;
+       bool newline = true;
        size_t len = 0;
 
+       if ((prev & LOG_CONT) && !(msg->flags & LOG_PREFIX))
+               prefix = false;
+
+       if (msg->flags & LOG_CONT) {
+               if ((prev & LOG_CONT) && !(prev & LOG_NEWLINE))
+                       prefix = false;
+
+               if (!(msg->flags & LOG_NEWLINE))
+                       newline = false;
+       }
+
        do {
                const char *next = memchr(text, '\n', text_size);
                size_t text_len;
@@ -840,16 +879,22 @@ static size_t msg_print_text(const struct log *msg, bool syslog,
                            text_len + 1>= size - len)
                                break;
 
-                       len += print_prefix(msg, syslog, buf + len);
+                       if (prefix)
+                               len += print_prefix(msg, syslog, buf + len);
                        memcpy(buf + len, text, text_len);
                        len += text_len;
-                       buf[len++] = '\n';
+                       if (next || newline)
+                               buf[len++] = '\n';
                } else {
                        /* SYSLOG_ACTION_* buffer size only calculation */
-                       len += print_prefix(msg, syslog, NULL);
-                       len += text_len + 1;
+                       if (prefix)
+                               len += print_prefix(msg, syslog, NULL);
+                       len += text_len;
+                       if (next || newline)
+                               len++;
                }
 
+               prefix = true;
                text = next;
        } while (text);
 
@@ -860,26 +905,60 @@ static int syslog_print(char __user *buf, int size)
 {
        char *text;
        struct log *msg;
-       int len;
+       int len = 0;
 
        text = kmalloc(LOG_LINE_MAX, GFP_KERNEL);
        if (!text)
                return -ENOMEM;
 
-       raw_spin_lock_irq(&logbuf_lock);
-       if (syslog_seq < log_first_seq) {
-               /* messages are gone, move to first one */
-               syslog_seq = log_first_seq;
-               syslog_idx = log_first_idx;
-       }
-       msg = log_from_idx(syslog_idx);
-       len = msg_print_text(msg, true, text, LOG_LINE_MAX);
-       syslog_idx = log_next(syslog_idx);
-       syslog_seq++;
-       raw_spin_unlock_irq(&logbuf_lock);
+       while (size > 0) {
+               size_t n;
+               size_t skip;
 
-       if (len > 0 && copy_to_user(buf, text, len))
-               len = -EFAULT;
+               raw_spin_lock_irq(&logbuf_lock);
+               if (syslog_seq < log_first_seq) {
+                       /* messages are gone, move to first one */
+                       syslog_seq = log_first_seq;
+                       syslog_idx = log_first_idx;
+                       syslog_prev = 0;
+                       syslog_partial = 0;
+               }
+               if (syslog_seq == log_next_seq) {
+                       raw_spin_unlock_irq(&logbuf_lock);
+                       break;
+               }
+
+               skip = syslog_partial;
+               msg = log_from_idx(syslog_idx);
+               n = msg_print_text(msg, syslog_prev, true, text, LOG_LINE_MAX);
+               if (n - syslog_partial <= size) {
+                       /* message fits into buffer, move forward */
+                       syslog_idx = log_next(syslog_idx);
+                       syslog_seq++;
+                       syslog_prev = msg->flags;
+                       n -= syslog_partial;
+                       syslog_partial = 0;
+               } else if (!len){
+                       /* partial read(), remember position */
+                       n = size;
+                       syslog_partial += n;
+               } else
+                       n = 0;
+               raw_spin_unlock_irq(&logbuf_lock);
+
+               if (!n)
+                       break;
+
+               if (copy_to_user(buf, text + skip, n)) {
+                       if (!len)
+                               len = -EFAULT;
+                       break;
+               }
+
+               len += n;
+               size -= n;
+               buf += n;
+       }
 
        kfree(text);
        return len;
@@ -899,6 +978,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
                u64 next_seq;
                u64 seq;
                u32 idx;
+               enum log_flags prev;
 
                if (clear_seq < log_first_seq) {
                        /* messages are gone, move to first available one */
@@ -909,41 +989,47 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
                /*
                 * Find first record that fits, including all following records,
                 * into the user-provided buffer for this dump.
-               */
+                */
                seq = clear_seq;
                idx = clear_idx;
+               prev = 0;
                while (seq < log_next_seq) {
                        struct log *msg = log_from_idx(idx);
 
-                       len += msg_print_text(msg, true, NULL, 0);
+                       len += msg_print_text(msg, prev, true, NULL, 0);
                        idx = log_next(idx);
                        seq++;
                }
+
+               /* move first record forward until length fits into the buffer */
                seq = clear_seq;
                idx = clear_idx;
+               prev = 0;
                while (len > size && seq < log_next_seq) {
                        struct log *msg = log_from_idx(idx);
 
-                       len -= msg_print_text(msg, true, NULL, 0);
+                       len -= msg_print_text(msg, prev, true, NULL, 0);
                        idx = log_next(idx);
                        seq++;
                }
 
-               /* last message in this dump */
+               /* last message fitting into this dump */
                next_seq = log_next_seq;
 
                len = 0;
+               prev = 0;
                while (len >= 0 && seq < next_seq) {
                        struct log *msg = log_from_idx(idx);
                        int textlen;
 
-                       textlen = msg_print_text(msg, true, text, LOG_LINE_MAX);
+                       textlen = msg_print_text(msg, prev, true, text, LOG_LINE_MAX);
                        if (textlen < 0) {
                                len = textlen;
                                break;
                        }
                        idx = log_next(idx);
                        seq++;
+                       prev = msg->flags;
 
                        raw_spin_unlock_irq(&logbuf_lock);
                        if (copy_to_user(buf + len, text, textlen))
@@ -956,6 +1042,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
                                /* messages are gone, move to next one */
                                seq = log_first_seq;
                                idx = log_first_idx;
+                               prev = 0;
                        }
                }
        }
@@ -1027,6 +1114,7 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
        /* Clear ring buffer */
        case SYSLOG_ACTION_CLEAR:
                syslog_print_all(NULL, 0, true);
+               break;
        /* Disable logging to console */
        case SYSLOG_ACTION_CONSOLE_OFF:
                if (saved_console_loglevel == -1)
@@ -1059,6 +1147,8 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
                        /* messages are gone, move to first one */
                        syslog_seq = log_first_seq;
                        syslog_idx = log_first_idx;
+                       syslog_prev = 0;
+                       syslog_partial = 0;
                }
                if (from_file) {
                        /*
@@ -1068,19 +1158,20 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
                         */
                        error = log_next_idx - syslog_idx;
                } else {
-                       u64 seq;
-                       u32 idx;
+                       u64 seq = syslog_seq;
+                       u32 idx = syslog_idx;
+                       enum log_flags prev = syslog_prev;
 
                        error = 0;
-                       seq = syslog_seq;
-                       idx = syslog_idx;
                        while (seq < log_next_seq) {
                                struct log *msg = log_from_idx(idx);
 
-                               error += msg_print_text(msg, true, NULL, 0);
+                               error += msg_print_text(msg, prev, true, NULL, 0);
                                idx = log_next(idx);
                                seq++;
+                               prev = msg->flags;
                        }
+                       error -= syslog_partial;
                }
                raw_spin_unlock_irq(&logbuf_lock);
                break;
@@ -1101,21 +1192,6 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
        return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
 }
 
-#ifdef CONFIG_KGDB_KDB
-/* kdb dmesg command needs access to the syslog buffer.  do_syslog()
- * uses locks so it cannot be used during debugging.  Just tell kdb
- * where the start and end of the physical and logical logs are.  This
- * is equivalent to do_syslog(3).
- */
-void kdb_syslog_data(char *syslog_data[4])
-{
-       syslog_data[0] = log_buf;
-       syslog_data[1] = log_buf + log_buf_len;
-       syslog_data[2] = log_buf + log_first_idx;
-       syslog_data[3] = log_buf + log_next_idx;
-}
-#endif /* CONFIG_KGDB_KDB */
-
 static bool __read_mostly ignore_loglevel;
 
 static int __init ignore_loglevel_setup(char *str)
@@ -1259,22 +1335,98 @@ static inline void printk_delay(void)
        }
 }
 
+/*
+ * Continuation lines are buffered, and not committed to the record buffer
+ * until the line is complete, or a race forces it. The line fragments
+ * though, are printed immediately to the consoles to ensure everything has
+ * reached the console in case of a kernel crash.
+ */
+static struct cont {
+       char buf[LOG_LINE_MAX];
+       size_t len;                     /* length == 0 means unused buffer */
+       size_t cons;                    /* bytes written to console */
+       struct task_struct *owner;      /* task of first print*/
+       u64 ts_nsec;                    /* time of first print */
+       u8 level;                       /* log level of first message */
+       u8 facility;                    /* log level of first message */
+       bool flushed:1;                 /* buffer sealed and committed */
+} cont;
+
+static void cont_flush(void)
+{
+       if (cont.flushed)
+               return;
+       if (cont.len == 0)
+               return;
+
+       log_store(cont.facility, cont.level, LOG_NOCONS, cont.ts_nsec,
+                 NULL, 0, cont.buf, cont.len);
+
+       cont.flushed = true;
+}
+
+static bool cont_add(int facility, int level, const char *text, size_t len)
+{
+       if (cont.len && cont.flushed)
+               return false;
+
+       if (cont.len + len > sizeof(cont.buf)) {
+               cont_flush();
+               return false;
+       }
+
+       if (!cont.len) {
+               cont.facility = facility;
+               cont.level = level;
+               cont.owner = current;
+               cont.ts_nsec = local_clock();
+               cont.cons = 0;
+               cont.flushed = false;
+       }
+
+       memcpy(cont.buf + cont.len, text, len);
+       cont.len += len;
+       return true;
+}
+
+static size_t cont_print_text(char *text, size_t size)
+{
+       size_t textlen = 0;
+       size_t len;
+
+       if (cont.cons == 0) {
+               textlen += print_time(cont.ts_nsec, text);
+               size -= textlen;
+       }
+
+       len = cont.len - cont.cons;
+       if (len > 0) {
+               if (len+1 > size)
+                       len = size-1;
+               memcpy(text + textlen, cont.buf + cont.cons, len);
+               textlen += len;
+               cont.cons = cont.len;
+       }
+
+       if (cont.flushed) {
+               text[textlen++] = '\n';
+               /* got everything, release buffer */
+               cont.len = 0;
+       }
+       return textlen;
+}
+
 asmlinkage int vprintk_emit(int facility, int level,
                            const char *dict, size_t dictlen,
                            const char *fmt, va_list args)
 {
        static int recursion_bug;
-       static char cont_buf[LOG_LINE_MAX];
-       static size_t cont_len;
-       static int cont_level;
-       static struct task_struct *cont_task;
        static char textbuf[LOG_LINE_MAX];
        char *text = textbuf;
        size_t text_len;
+       enum log_flags lflags = 0;
        unsigned long flags;
        int this_cpu;
-       bool newline = false;
-       bool prefix = false;
        int printed_len = 0;
 
        boot_delay_msec();
@@ -1313,7 +1465,8 @@ asmlinkage int vprintk_emit(int facility, int level,
                recursion_bug = 0;
                printed_len += strlen(recursion_msg);
                /* emit KERN_CRIT message */
-               log_store(0, 2, NULL, 0, recursion_msg, printed_len);
+               log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0,
+                         NULL, 0, recursion_msg, printed_len);
        }
 
        /*
@@ -1325,7 +1478,7 @@ asmlinkage int vprintk_emit(int facility, int level,
        /* mark and strip a trailing newline */
        if (text_len && text[text_len-1] == '\n') {
                text_len--;
-               newline = true;
+               lflags |= LOG_NEWLINE;
        }
 
        /* strip syslog prefix and extract log level or control flags */
@@ -1335,7 +1488,7 @@ asmlinkage int vprintk_emit(int facility, int level,
                        if (level == -1)
                                level = text[1] - '0';
                case 'd':       /* KERN_DEFAULT */
-                       prefix = true;
+                       lflags |= LOG_PREFIX;
                case 'c':       /* KERN_CONT */
                        text += 3;
                        text_len -= 3;
@@ -1345,61 +1498,41 @@ asmlinkage int vprintk_emit(int facility, int level,
        if (level == -1)
                level = default_message_loglevel;
 
-       if (dict) {
-               prefix = true;
-               newline = true;
-       }
-
-       if (!newline) {
-               if (cont_len && (prefix || cont_task != current)) {
-                       /*
-                        * Flush earlier buffer, which is either from a
-                        * different thread, or when we got a new prefix.
-                        */
-                       log_store(facility, cont_level, NULL, 0, cont_buf, cont_len);
-                       cont_len = 0;
-               }
+       if (dict)
+               lflags |= LOG_PREFIX|LOG_NEWLINE;
 
-               if (!cont_len) {
-                       cont_level = level;
-                       cont_task = current;
-               }
+       if (!(lflags & LOG_NEWLINE)) {
+               /*
+                * Flush the conflicting buffer. An earlier newline was missing,
+                * or another task also prints continuation lines.
+                */
+               if (cont.len && (lflags & LOG_PREFIX || cont.owner != current))
+                       cont_flush();
 
-               /* buffer or append to earlier buffer from the same thread */
-               if (cont_len + text_len > sizeof(cont_buf))
-                       text_len = sizeof(cont_buf) - cont_len;
-               memcpy(cont_buf + cont_len, text, text_len);
-               cont_len += text_len;
+               /* buffer line if possible, otherwise store it right away */
+               if (!cont_add(facility, level, text, text_len))
+                       log_store(facility, level, lflags | LOG_CONT, 0,
+                                 dict, dictlen, text, text_len);
        } else {
-               if (cont_len && cont_task == current) {
-                       if (prefix) {
-                               /*
-                                * New prefix from the same thread; flush. We
-                                * either got no earlier newline, or we race
-                                * with an interrupt.
-                                */
-                               log_store(facility, cont_level,
-                                         NULL, 0, cont_buf, cont_len);
-                               cont_len = 0;
-                       }
+               bool stored = false;
 
-                       /* append to the earlier buffer and flush */
-                       if (cont_len + text_len > sizeof(cont_buf))
-                               text_len = sizeof(cont_buf) - cont_len;
-                       memcpy(cont_buf + cont_len, text, text_len);
-                       cont_len += text_len;
-                       log_store(facility, cont_level,
-                                 NULL, 0, cont_buf, cont_len);
-                       cont_len = 0;
-                       cont_task = NULL;
-                       printed_len = cont_len;
-               } else {
-                       /* ordinary single and terminated line */
-                       log_store(facility, level,
-                                 dict, dictlen, text, text_len);
-                       printed_len = text_len;
+               /*
+                * If an earlier newline was missing and it was the same task,
+                * either merge it with the current buffer and flush, or if
+                * there was a race with interrupts (prefix == true) then just
+                * flush it out and store this line separately.
+                */
+               if (cont.len && cont.owner == current) {
+                       if (!(lflags & LOG_PREFIX))
+                               stored = cont_add(facility, level, text, text_len);
+                       cont_flush();
                }
+
+               if (!stored)
+                       log_store(facility, level, lflags, 0,
+                                 dict, dictlen, text, text_len);
        }
+       printed_len += text_len;
 
        /*
         * Try to acquire and then immediately release the console semaphore.
@@ -1486,11 +1619,18 @@ EXPORT_SYMBOL(printk);
 #else
 
 #define LOG_LINE_MAX 0
+static struct cont {
+       size_t len;
+       size_t cons;
+       u8 level;
+       bool flushed:1;
+} cont;
 static struct log *log_from_idx(u32 idx) { return NULL; }
 static u32 log_next(u32 idx) { return 0; }
 static void call_console_drivers(int level, const char *text, size_t len) {}
-static size_t msg_print_text(const struct log *msg, bool syslog,
-                            char *buf, size_t size) { return 0; }
+static size_t msg_print_text(const struct log *msg, enum log_flags prev,
+                            bool syslog, char *buf, size_t size) { return 0; }
+static size_t cont_print_text(char *text, size_t size) { return 0; }
 
 #endif /* CONFIG_PRINTK */
 
@@ -1765,6 +1905,7 @@ void wake_up_klogd(void)
 /* the next printk record to write to the console */
 static u64 console_seq;
 static u32 console_idx;
+static enum log_flags console_prev;
 
 /**
  * console_unlock - unlock the console system
@@ -1782,6 +1923,7 @@ static u32 console_idx;
  */
 void console_unlock(void)
 {
+       static char text[LOG_LINE_MAX];
        static u64 seen_seq;
        unsigned long flags;
        bool wake_klogd = false;
@@ -1794,10 +1936,23 @@ void console_unlock(void)
 
        console_may_schedule = 0;
 
+       /* flush buffered message fragment immediately to console */
+       raw_spin_lock_irqsave(&logbuf_lock, flags);
+       if (cont.len && (cont.cons < cont.len || cont.flushed)) {
+               size_t len;
+
+               len = cont_print_text(text, sizeof(text));
+               raw_spin_unlock(&logbuf_lock);
+               stop_critical_timings();
+               call_console_drivers(cont.level, text, len);
+               start_critical_timings();
+               local_irq_restore(flags);
+       } else
+               raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+
 again:
        for (;;) {
                struct log *msg;
-               static char text[LOG_LINE_MAX];
                size_t len;
                int level;
 
@@ -1811,18 +1966,35 @@ again:
                        /* messages are gone, move to first one */
                        console_seq = log_first_seq;
                        console_idx = log_first_idx;
+                       console_prev = 0;
                }
-
+skip:
                if (console_seq == log_next_seq)
                        break;
 
                msg = log_from_idx(console_idx);
-               level = msg->level & 7;
-
-               len = msg_print_text(msg, false, text, sizeof(text));
+               if (msg->flags & LOG_NOCONS) {
+                       /*
+                        * Skip record we have buffered and already printed
+                        * directly to the console when we received it.
+                        */
+                       console_idx = log_next(console_idx);
+                       console_seq++;
+                       /*
+                        * We will get here again when we register a new
+                        * CON_PRINTBUFFER console. Clear the flag so we
+                        * will properly dump everything later.
+                        */
+                       msg->flags &= ~LOG_NOCONS;
+                       goto skip;
+               }
 
+               level = msg->level;
+               len = msg_print_text(msg, console_prev, false,
+                                    text, sizeof(text));
                console_idx = log_next(console_idx);
                console_seq++;
+               console_prev = msg->flags;
                raw_spin_unlock(&logbuf_lock);
 
                stop_critical_timings();        /* don't trace print latency */
@@ -2085,6 +2257,7 @@ void register_console(struct console *newcon)
                raw_spin_lock_irqsave(&logbuf_lock, flags);
                console_seq = syslog_seq;
                console_idx = syslog_idx;
+               console_prev = syslog_prev;
                raw_spin_unlock_irqrestore(&logbuf_lock, flags);
                /*
                 * We're about to replay the log buffer.  Only do this to the
@@ -2300,48 +2473,256 @@ module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
  * kmsg_dump - dump kernel log to kernel message dumpers.
  * @reason: the reason (oops, panic etc) for dumping
  *
- * Iterate through each of the dump devices and call the oops/panic
- * callbacks with the log buffer.
+ * Call each of the registered dumper's dump() callback, which can
+ * retrieve the kmsg records with kmsg_dump_get_line() or
+ * kmsg_dump_get_buffer().
  */
 void kmsg_dump(enum kmsg_dump_reason reason)
 {
-       u64 idx;
        struct kmsg_dumper *dumper;
-       const char *s1, *s2;
-       unsigned long l1, l2;
        unsigned long flags;
 
        if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump)
                return;
 
-       /* Theoretically, the log could move on after we do this, but
-          there's not a lot we can do about that. The new messages
-          will overwrite the start of what we dump. */
+       rcu_read_lock();
+       list_for_each_entry_rcu(dumper, &dump_list, list) {
+               if (dumper->max_reason && reason > dumper->max_reason)
+                       continue;
+
+               /* initialize iterator with data about the stored records */
+               dumper->active = true;
+
+               raw_spin_lock_irqsave(&logbuf_lock, flags);
+               dumper->cur_seq = clear_seq;
+               dumper->cur_idx = clear_idx;
+               dumper->next_seq = log_next_seq;
+               dumper->next_idx = log_next_idx;
+               raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+
+               /* invoke dumper which will iterate over records */
+               dumper->dump(dumper, reason);
+
+               /* reset iterator */
+               dumper->active = false;
+       }
+       rcu_read_unlock();
+}
+
+/**
+ * kmsg_dump_get_line_nolock - retrieve one kmsg log line (unlocked version)
+ * @dumper: registered kmsg dumper
+ * @syslog: include the "<4>" prefixes
+ * @line: buffer to copy the line to
+ * @size: maximum size of the buffer
+ * @len: length of line placed into buffer
+ *
+ * Start at the beginning of the kmsg buffer, with the oldest kmsg
+ * record, and copy one record into the provided buffer.
+ *
+ * Consecutive calls will return the next available record moving
+ * towards the end of the buffer with the youngest messages.
+ *
+ * A return value of FALSE indicates that there are no more records to
+ * read.
+ *
+ * The function is similar to kmsg_dump_get_line(), but grabs no locks.
+ */
+bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
+                              char *line, size_t size, size_t *len)
+{
+       struct log *msg;
+       size_t l = 0;
+       bool ret = false;
+
+       if (!dumper->active)
+               goto out;
+
+       if (dumper->cur_seq < log_first_seq) {
+               /* messages are gone, move to first available one */
+               dumper->cur_seq = log_first_seq;
+               dumper->cur_idx = log_first_idx;
+       }
+
+       /* last entry */
+       if (dumper->cur_seq >= log_next_seq)
+               goto out;
+
+       msg = log_from_idx(dumper->cur_idx);
+       l = msg_print_text(msg, 0, syslog, line, size);
+
+       dumper->cur_idx = log_next(dumper->cur_idx);
+       dumper->cur_seq++;
+       ret = true;
+out:
+       if (len)
+               *len = l;
+       return ret;
+}
+
+/**
+ * kmsg_dump_get_line - retrieve one kmsg log line
+ * @dumper: registered kmsg dumper
+ * @syslog: include the "<4>" prefixes
+ * @line: buffer to copy the line to
+ * @size: maximum size of the buffer
+ * @len: length of line placed into buffer
+ *
+ * Start at the beginning of the kmsg buffer, with the oldest kmsg
+ * record, and copy one record into the provided buffer.
+ *
+ * Consecutive calls will return the next available record moving
+ * towards the end of the buffer with the youngest messages.
+ *
+ * A return value of FALSE indicates that there are no more records to
+ * read.
+ */
+bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
+                       char *line, size_t size, size_t *len)
+{
+       unsigned long flags;
+       bool ret;
+
+       raw_spin_lock_irqsave(&logbuf_lock, flags);
+       ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len);
+       raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
+
+/**
+ * kmsg_dump_get_buffer - copy kmsg log lines
+ * @dumper: registered kmsg dumper
+ * @syslog: include the "<4>" prefixes
+ * @buf: buffer to copy the line to
+ * @size: maximum size of the buffer
+ * @len: length of line placed into buffer
+ *
+ * Start at the end of the kmsg buffer and fill the provided buffer
+ * with as many of the the *youngest* kmsg records that fit into it.
+ * If the buffer is large enough, all available kmsg records will be
+ * copied with a single call.
+ *
+ * Consecutive calls will fill the buffer with the next block of
+ * available older records, not including the earlier retrieved ones.
+ *
+ * A return value of FALSE indicates that there are no more records to
+ * read.
+ */
+bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+                         char *buf, size_t size, size_t *len)
+{
+       unsigned long flags;
+       u64 seq;
+       u32 idx;
+       u64 next_seq;
+       u32 next_idx;
+       enum log_flags prev;
+       size_t l = 0;
+       bool ret = false;
+
+       if (!dumper->active)
+               goto out;
 
        raw_spin_lock_irqsave(&logbuf_lock, flags);
-       if (syslog_seq < log_first_seq)
-               idx = syslog_idx;
-       else
-               idx = log_first_idx;
+       if (dumper->cur_seq < log_first_seq) {
+               /* messages are gone, move to first available one */
+               dumper->cur_seq = log_first_seq;
+               dumper->cur_idx = log_first_idx;
+       }
+
+       /* last entry */
+       if (dumper->cur_seq >= dumper->next_seq) {
+               raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+               goto out;
+       }
 
-       if (idx > log_next_idx) {
-               s1 = log_buf;
-               l1 = log_next_idx;
+       /* calculate length of entire buffer */
+       seq = dumper->cur_seq;
+       idx = dumper->cur_idx;
+       prev = 0;
+       while (seq < dumper->next_seq) {
+               struct log *msg = log_from_idx(idx);
+
+               l += msg_print_text(msg, prev, true, NULL, 0);
+               idx = log_next(idx);
+               seq++;
+               prev = msg->flags;
+       }
 
-               s2 = log_buf + idx;
-               l2 = log_buf_len - idx;
-       } else {
-               s1 = "";
-               l1 = 0;
+       /* move first record forward until length fits into the buffer */
+       seq = dumper->cur_seq;
+       idx = dumper->cur_idx;
+       prev = 0;
+       while (l > size && seq < dumper->next_seq) {
+               struct log *msg = log_from_idx(idx);
+
+               l -= msg_print_text(msg, prev, true, NULL, 0);
+               idx = log_next(idx);
+               seq++;
+               prev = msg->flags;
+       }
+
+       /* last message in next interation */
+       next_seq = seq;
+       next_idx = idx;
+
+       l = 0;
+       prev = 0;
+       while (seq < dumper->next_seq) {
+               struct log *msg = log_from_idx(idx);
 
-               s2 = log_buf + idx;
-               l2 = log_next_idx - idx;
+               l += msg_print_text(msg, prev, syslog, buf + l, size - l);
+               idx = log_next(idx);
+               seq++;
+               prev = msg->flags;
        }
+
+       dumper->next_seq = next_seq;
+       dumper->next_idx = next_idx;
+       ret = true;
        raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+out:
+       if (len)
+               *len = l;
+       return ret;
+}
+EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(dumper, &dump_list, list)
-               dumper->dump(dumper, reason, s1, l1, s2, l2);
-       rcu_read_unlock();
+/**
+ * kmsg_dump_rewind_nolock - reset the interator (unlocked version)
+ * @dumper: registered kmsg dumper
+ *
+ * Reset the dumper's iterator so that kmsg_dump_get_line() and
+ * kmsg_dump_get_buffer() can be called again and used multiple
+ * times within the same dumper.dump() callback.
+ *
+ * The function is similar to kmsg_dump_rewind(), but grabs no locks.
+ */
+void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
+{
+       dumper->cur_seq = clear_seq;
+       dumper->cur_idx = clear_idx;
+       dumper->next_seq = log_next_seq;
+       dumper->next_idx = log_next_idx;
+}
+
+/**
+ * kmsg_dump_rewind - reset the interator
+ * @dumper: registered kmsg dumper
+ *
+ * Reset the dumper's iterator so that kmsg_dump_get_line() and
+ * kmsg_dump_get_buffer() can be called again and used multiple
+ * times within the same dumper.dump() callback.
+ */
+void kmsg_dump_rewind(struct kmsg_dumper *dumper)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&logbuf_lock, flags);
+       kmsg_dump_rewind_nolock(dumper);
+       raw_spin_unlock_irqrestore(&logbuf_lock, flags);
 }
+EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
 #endif
index 95cba41..4e6a61b 100644 (file)
 
 #ifdef CONFIG_PREEMPT_RCU
 
+/*
+ * Preemptible RCU implementation for rcu_read_lock().
+ * Just increment ->rcu_read_lock_nesting, shared state will be updated
+ * if we block.
+ */
+void __rcu_read_lock(void)
+{
+       current->rcu_read_lock_nesting++;
+       barrier();  /* critical section after entry code. */
+}
+EXPORT_SYMBOL_GPL(__rcu_read_lock);
+
+/*
+ * Preemptible RCU implementation for rcu_read_unlock().
+ * Decrement ->rcu_read_lock_nesting.  If the result is zero (outermost
+ * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
+ * invoke rcu_read_unlock_special() to clean up after a context switch
+ * in an RCU read-side critical section and other special cases.
+ */
+void __rcu_read_unlock(void)
+{
+       struct task_struct *t = current;
+
+       if (t->rcu_read_lock_nesting != 1) {
+               --t->rcu_read_lock_nesting;
+       } else {
+               barrier();  /* critical section before exit code. */
+               t->rcu_read_lock_nesting = INT_MIN;
+               barrier();  /* assign before ->rcu_read_unlock_special load */
+               if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
+                       rcu_read_unlock_special(t);
+               barrier();  /* ->rcu_read_unlock_special load before assign */
+               t->rcu_read_lock_nesting = 0;
+       }
+#ifdef CONFIG_PROVE_LOCKING
+       {
+               int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
+
+               WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
+       }
+#endif /* #ifdef CONFIG_PROVE_LOCKING */
+}
+EXPORT_SYMBOL_GPL(__rcu_read_unlock);
+
 /*
  * Check for a task exiting while in a preemptible-RCU read-side
  * critical section, clean up if so.  No need to issue warnings,
index 37a5444..547b1fe 100644 (file)
@@ -172,7 +172,7 @@ void rcu_irq_enter(void)
        local_irq_restore(flags);
 }
 
-#ifdef CONFIG_PROVE_RCU
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
 
 /*
  * Test whether RCU thinks that the current CPU is idle.
@@ -183,7 +183,7 @@ int rcu_is_cpu_idle(void)
 }
 EXPORT_SYMBOL(rcu_is_cpu_idle);
 
-#endif /* #ifdef CONFIG_PROVE_RCU */
+#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 
 /*
  * Test whether the current CPU was interrupted from idle.  Nested
index fc31a2d..918fd1e 100644 (file)
@@ -132,7 +132,6 @@ static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
        RCU_TRACE(.rcb.name = "rcu_preempt")
 };
 
-static void rcu_read_unlock_special(struct task_struct *t);
 static int rcu_preempted_readers_exp(void);
 static void rcu_report_exp_done(void);
 
@@ -351,8 +350,9 @@ static int rcu_initiate_boost(void)
                        rcu_preempt_ctrlblk.boost_tasks =
                                rcu_preempt_ctrlblk.gp_tasks;
                invoke_rcu_callbacks();
-       } else
+       } else {
                RCU_TRACE(rcu_initiate_boost_trace());
+       }
        return 1;
 }
 
@@ -526,24 +526,12 @@ void rcu_preempt_note_context_switch(void)
        local_irq_restore(flags);
 }
 
-/*
- * Tiny-preemptible RCU implementation for rcu_read_lock().
- * Just increment ->rcu_read_lock_nesting, shared state will be updated
- * if we block.
- */
-void __rcu_read_lock(void)
-{
-       current->rcu_read_lock_nesting++;
-       barrier();  /* needed if we ever invoke rcu_read_lock in rcutiny.c */
-}
-EXPORT_SYMBOL_GPL(__rcu_read_lock);
-
 /*
  * Handle special cases during rcu_read_unlock(), such as needing to
  * notify RCU core processing or task having blocked during the RCU
  * read-side critical section.
  */
-static noinline void rcu_read_unlock_special(struct task_struct *t)
+void rcu_read_unlock_special(struct task_struct *t)
 {
        int empty;
        int empty_exp;
@@ -626,38 +614,6 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
        local_irq_restore(flags);
 }
 
-/*
- * Tiny-preemptible RCU implementation for rcu_read_unlock().
- * Decrement ->rcu_read_lock_nesting.  If the result is zero (outermost
- * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
- * invoke rcu_read_unlock_special() to clean up after a context switch
- * in an RCU read-side critical section and other special cases.
- */
-void __rcu_read_unlock(void)
-{
-       struct task_struct *t = current;
-
-       barrier();  /* needed if we ever invoke rcu_read_unlock in rcutiny.c */
-       if (t->rcu_read_lock_nesting != 1)
-               --t->rcu_read_lock_nesting;
-       else {
-               t->rcu_read_lock_nesting = INT_MIN;
-               barrier();  /* assign before ->rcu_read_unlock_special load */
-               if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
-                       rcu_read_unlock_special(t);
-               barrier();  /* ->rcu_read_unlock_special load before assign */
-               t->rcu_read_lock_nesting = 0;
-       }
-#ifdef CONFIG_PROVE_LOCKING
-       {
-               int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
-
-               WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
-       }
-#endif /* #ifdef CONFIG_PROVE_LOCKING */
-}
-EXPORT_SYMBOL_GPL(__rcu_read_unlock);
-
 /*
  * Check for a quiescent state from the current CPU.  When a task blocks,
  * the task is recorded in the rcu_preempt_ctrlblk structure, which is
@@ -823,9 +779,9 @@ void synchronize_rcu_expedited(void)
                rpcp->exp_tasks = NULL;
 
        /* Wait for tail of ->blkd_tasks list to drain. */
-       if (!rcu_preempted_readers_exp())
+       if (!rcu_preempted_readers_exp()) {
                local_irq_restore(flags);
-       else {
+       else {
                rcu_initiate_boost();
                local_irq_restore(flags);
                wait_event(sync_rcu_preempt_exp_wq,
@@ -846,8 +802,6 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
  */
 int rcu_preempt_needs_cpu(void)
 {
-       if (!rcu_preempt_running_reader())
-               rcu_preempt_cpu_qs();
        return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
 }
 
index e66b34a..25b1503 100644 (file)
@@ -49,8 +49,7 @@
 #include <asm/byteorder.h>
 
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
-             "Josh Triplett <josh@freedesktop.org>");
+MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>");
 
 static int nreaders = -1;      /* # reader threads, defaults to 2*ncpus */
 static int nfakewriters = 4;   /* # fake writer threads */
@@ -206,6 +205,7 @@ static unsigned long boost_starttime;       /* jiffies of next boost test start. */
 DEFINE_MUTEX(boost_mutex);             /* protect setting boost_starttime */
                                        /*  and boost task create/destroy. */
 static atomic_t barrier_cbs_count;     /* Barrier callbacks registered. */
+static bool barrier_phase;             /* Test phase. */
 static atomic_t barrier_cbs_invoked;   /* Barrier callbacks invoked. */
 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
@@ -407,8 +407,9 @@ rcu_torture_cb(struct rcu_head *p)
        if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
                rp->rtort_mbtest = 0;
                rcu_torture_free(rp);
-       } else
+       } else {
                cur_ops->deferred_free(rp);
+       }
 }
 
 static int rcu_no_completed(void)
@@ -635,6 +636,17 @@ static void srcu_torture_synchronize(void)
        synchronize_srcu(&srcu_ctl);
 }
 
+static void srcu_torture_call(struct rcu_head *head,
+                             void (*func)(struct rcu_head *head))
+{
+       call_srcu(&srcu_ctl, head, func);
+}
+
+static void srcu_torture_barrier(void)
+{
+       srcu_barrier(&srcu_ctl);
+}
+
 static int srcu_torture_stats(char *page)
 {
        int cnt = 0;
@@ -661,8 +673,8 @@ static struct rcu_torture_ops srcu_ops = {
        .completed      = srcu_torture_completed,
        .deferred_free  = srcu_torture_deferred_free,
        .sync           = srcu_torture_synchronize,
-       .call           = NULL,
-       .cb_barrier     = NULL,
+       .call           = srcu_torture_call,
+       .cb_barrier     = srcu_torture_barrier,
        .stats          = srcu_torture_stats,
        .name           = "srcu"
 };
@@ -1013,7 +1025,11 @@ rcu_torture_fakewriter(void *arg)
        do {
                schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
                udelay(rcu_random(&rand) & 0x3ff);
-               cur_ops->sync();
+               if (cur_ops->cb_barrier != NULL &&
+                   rcu_random(&rand) % (nfakewriters * 8) == 0)
+                       cur_ops->cb_barrier();
+               else
+                       cur_ops->sync();
                rcu_stutter_wait("rcu_torture_fakewriter");
        } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
 
@@ -1183,27 +1199,27 @@ rcu_torture_printk(char *page)
        }
        cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
        cnt += sprintf(&page[cnt],
-                      "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d "
-                      "rtmbe: %d rtbke: %ld rtbre: %ld "
-                      "rtbf: %ld rtb: %ld nt: %ld "
-                      "onoff: %ld/%ld:%ld/%ld "
-                      "barrier: %ld/%ld:%ld",
+                      "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
                       rcu_torture_current,
                       rcu_torture_current_version,
                       list_empty(&rcu_torture_freelist),
                       atomic_read(&n_rcu_torture_alloc),
                       atomic_read(&n_rcu_torture_alloc_fail),
-                      atomic_read(&n_rcu_torture_free),
+                      atomic_read(&n_rcu_torture_free));
+       cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
                       atomic_read(&n_rcu_torture_mberror),
                       n_rcu_torture_boost_ktrerror,
-                      n_rcu_torture_boost_rterror,
+                      n_rcu_torture_boost_rterror);
+       cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
                       n_rcu_torture_boost_failure,
                       n_rcu_torture_boosts,
-                      n_rcu_torture_timers,
+                      n_rcu_torture_timers);
+       cnt += sprintf(&page[cnt], "onoff: %ld/%ld:%ld/%ld ",
                       n_online_successes,
                       n_online_attempts,
                       n_offline_successes,
-                      n_offline_attempts,
+                      n_offline_attempts);
+       cnt += sprintf(&page[cnt], "barrier: %ld/%ld:%ld",
                       n_barrier_successes,
                       n_barrier_attempts,
                       n_rcu_torture_barrier_error);
@@ -1445,8 +1461,7 @@ rcu_torture_shutdown(void *arg)
                delta = shutdown_time - jiffies_snap;
                if (verbose)
                        printk(KERN_ALERT "%s" TORTURE_FLAG
-                              "rcu_torture_shutdown task: %lu "
-                              "jiffies remaining\n",
+                              "rcu_torture_shutdown task: %lu jiffies remaining\n",
                               torture_type, delta);
                schedule_timeout_interruptible(delta);
                jiffies_snap = ACCESS_ONCE(jiffies);
@@ -1498,8 +1513,7 @@ rcu_torture_onoff(void *arg)
                        if (cpu_down(cpu) == 0) {
                                if (verbose)
                                        printk(KERN_ALERT "%s" TORTURE_FLAG
-                                              "rcu_torture_onoff task: "
-                                              "offlined %d\n",
+                                              "rcu_torture_onoff task: offlined %d\n",
                                               torture_type, cpu);
                                n_offline_successes++;
                        }
@@ -1512,8 +1526,7 @@ rcu_torture_onoff(void *arg)
                        if (cpu_up(cpu) == 0) {
                                if (verbose)
                                        printk(KERN_ALERT "%s" TORTURE_FLAG
-                                              "rcu_torture_onoff task: "
-                                              "onlined %d\n",
+                                              "rcu_torture_onoff task: onlined %d\n",
                                               torture_type, cpu);
                                n_online_successes++;
                        }
@@ -1631,6 +1644,7 @@ void rcu_torture_barrier_cbf(struct rcu_head *rcu)
 static int rcu_torture_barrier_cbs(void *arg)
 {
        long myid = (long)arg;
+       bool lastphase = 0;
        struct rcu_head rcu;
 
        init_rcu_head_on_stack(&rcu);
@@ -1638,9 +1652,11 @@ static int rcu_torture_barrier_cbs(void *arg)
        set_user_nice(current, 19);
        do {
                wait_event(barrier_cbs_wq[myid],
-                          atomic_read(&barrier_cbs_count) == n_barrier_cbs ||
+                          barrier_phase != lastphase ||
                           kthread_should_stop() ||
                           fullstop != FULLSTOP_DONTSTOP);
+               lastphase = barrier_phase;
+               smp_mb(); /* ensure barrier_phase load before ->call(). */
                if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP)
                        break;
                cur_ops->call(&rcu, rcu_torture_barrier_cbf);
@@ -1665,7 +1681,8 @@ static int rcu_torture_barrier(void *arg)
        do {
                atomic_set(&barrier_cbs_invoked, 0);
                atomic_set(&barrier_cbs_count, n_barrier_cbs);
-               /* wake_up() path contains the required barriers. */
+               smp_mb(); /* Ensure barrier_phase after prior assignments. */
+               barrier_phase = !barrier_phase;
                for (i = 0; i < n_barrier_cbs; i++)
                        wake_up(&barrier_cbs_wq[i]);
                wait_event(barrier_wq,
@@ -1684,7 +1701,7 @@ static int rcu_torture_barrier(void *arg)
                schedule_timeout_interruptible(HZ / 10);
        } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
        VERBOSE_PRINTK_STRING("rcu_torture_barrier task stopping");
-       rcutorture_shutdown_absorb("rcu_torture_barrier_cbs");
+       rcutorture_shutdown_absorb("rcu_torture_barrier");
        while (!kthread_should_stop())
                schedule_timeout_interruptible(1);
        return 0;
@@ -1908,8 +1925,8 @@ rcu_torture_init(void)
        static struct rcu_torture_ops *torture_ops[] =
                { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
                  &rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops,
-                 &srcu_ops, &srcu_sync_ops, &srcu_raw_ops,
-                 &srcu_raw_sync_ops, &srcu_expedited_ops,
+                 &srcu_ops, &srcu_sync_ops, &srcu_expedited_ops,
+                 &srcu_raw_ops, &srcu_raw_sync_ops,
                  &sched_ops, &sched_sync_ops, &sched_expedited_ops, };
 
        mutex_lock(&fullstop_mutex);
@@ -1931,8 +1948,7 @@ rcu_torture_init(void)
                return -EINVAL;
        }
        if (cur_ops->fqs == NULL && fqs_duration != 0) {
-               printk(KERN_ALERT "rcu-torture: ->fqs NULL and non-zero "
-                                 "fqs_duration, fqs disabled.\n");
+               printk(KERN_ALERT "rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
                fqs_duration = 0;
        }
        if (cur_ops->init)
index 3b0f133..f280e54 100644 (file)
 
 /* Data structures. */
 
-static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
-
-#define RCU_STATE_INITIALIZER(structname) { \
-       .level = { &structname##_state.node[0] }, \
-       .levelcnt = { \
-               NUM_RCU_LVL_0,  /* root of hierarchy. */ \
-               NUM_RCU_LVL_1, \
-               NUM_RCU_LVL_2, \
-               NUM_RCU_LVL_3, \
-               NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \
-       }, \
+static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
+
+#define RCU_STATE_INITIALIZER(sname, cr) { \
+       .level = { &sname##_state.node[0] }, \
+       .call = cr, \
        .fqs_state = RCU_GP_IDLE, \
        .gpnum = -300, \
        .completed = -300, \
-       .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.onofflock), \
-       .orphan_nxttail = &structname##_state.orphan_nxtlist, \
-       .orphan_donetail = &structname##_state.orphan_donelist, \
-       .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.fqslock), \
-       .n_force_qs = 0, \
-       .n_force_qs_ngp = 0, \
-       .name = #structname, \
+       .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.onofflock), \
+       .orphan_nxttail = &sname##_state.orphan_nxtlist, \
+       .orphan_donetail = &sname##_state.orphan_donelist, \
+       .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
+       .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.fqslock), \
+       .name = #sname, \
 }
 
-struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched);
+struct rcu_state rcu_sched_state =
+       RCU_STATE_INITIALIZER(rcu_sched, call_rcu_sched);
 DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
 
-struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh);
+struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh, call_rcu_bh);
 DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
 
 static struct rcu_state *rcu_state;
+LIST_HEAD(rcu_struct_flavors);
+
+/* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */
+static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF;
+module_param(rcu_fanout_leaf, int, 0);
+int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
+static int num_rcu_lvl[] = {  /* Number of rcu_nodes at specified level. */
+       NUM_RCU_LVL_0,
+       NUM_RCU_LVL_1,
+       NUM_RCU_LVL_2,
+       NUM_RCU_LVL_3,
+       NUM_RCU_LVL_4,
+};
+int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
 
 /*
  * The rcu_scheduler_active variable transitions from zero to one just
@@ -147,13 +155,6 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
 unsigned long rcutorture_testseq;
 unsigned long rcutorture_vernum;
 
-/* State information for rcu_barrier() and friends. */
-
-static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
-static atomic_t rcu_barrier_cpu_count;
-static DEFINE_MUTEX(rcu_barrier_mutex);
-static struct completion rcu_barrier_completion;
-
 /*
  * Return true if an RCU grace period is in progress.  The ACCESS_ONCE()s
  * permit this function to be invoked without holding the root rcu_node
@@ -201,6 +202,7 @@ void rcu_note_context_switch(int cpu)
 {
        trace_rcu_utilization("Start context switch");
        rcu_sched_qs(cpu);
+       rcu_preempt_note_context_switch(cpu);
        trace_rcu_utilization("End context switch");
 }
 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
@@ -357,7 +359,7 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
                struct task_struct *idle = idle_task(smp_processor_id());
 
                trace_rcu_dyntick("Error on entry: not idle task", oldval, 0);
-               ftrace_dump(DUMP_ALL);
+               ftrace_dump(DUMP_ORIG);
                WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
                          current->pid, current->comm,
                          idle->pid, idle->comm); /* must be idle task! */
@@ -467,7 +469,7 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
 
                trace_rcu_dyntick("Error on exit: not idle task",
                                  oldval, rdtp->dynticks_nesting);
-               ftrace_dump(DUMP_ALL);
+               ftrace_dump(DUMP_ORIG);
                WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
                          current->pid, current->comm,
                          idle->pid, idle->comm); /* must be idle task! */
@@ -584,8 +586,6 @@ void rcu_nmi_exit(void)
        WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
 }
 
-#ifdef CONFIG_PROVE_RCU
-
 /**
  * rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle
  *
@@ -603,7 +603,7 @@ int rcu_is_cpu_idle(void)
 }
 EXPORT_SYMBOL(rcu_is_cpu_idle);
 
-#ifdef CONFIG_HOTPLUG_CPU
+#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
 
 /*
  * Is the current CPU online?  Disable preemption to avoid false positives
@@ -644,9 +644,7 @@ bool rcu_lockdep_current_cpu_online(void)
 }
 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
 
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-
-#endif /* #ifdef CONFIG_PROVE_RCU */
+#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
 
 /**
  * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
@@ -732,7 +730,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
        int cpu;
        long delta;
        unsigned long flags;
-       int ndetected;
+       int ndetected = 0;
        struct rcu_node *rnp = rcu_get_root(rsp);
 
        /* Only let one CPU complain about others per time interval. */
@@ -773,7 +771,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
         */
        rnp = rcu_get_root(rsp);
        raw_spin_lock_irqsave(&rnp->lock, flags);
-       ndetected = rcu_print_task_stall(rnp);
+       ndetected += rcu_print_task_stall(rnp);
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 
        print_cpu_stall_info_end();
@@ -859,9 +857,10 @@ static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
  */
 void rcu_cpu_stall_reset(void)
 {
-       rcu_sched_state.jiffies_stall = jiffies + ULONG_MAX / 2;
-       rcu_bh_state.jiffies_stall = jiffies + ULONG_MAX / 2;
-       rcu_preempt_stall_reset();
+       struct rcu_state *rsp;
+
+       for_each_rcu_flavor(rsp)
+               rsp->jiffies_stall = jiffies + ULONG_MAX / 2;
 }
 
 static struct notifier_block rcu_panic_block = {
@@ -893,8 +892,9 @@ static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct
                if (rnp->qsmask & rdp->grpmask) {
                        rdp->qs_pending = 1;
                        rdp->passed_quiesce = 0;
-               } else
+               } else {
                        rdp->qs_pending = 0;
+               }
                zero_cpu_stall_ticks(rdp);
        }
 }
@@ -935,6 +935,18 @@ check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
        return ret;
 }
 
+/*
+ * Initialize the specified rcu_data structure's callback list to empty.
+ */
+static void init_callback_list(struct rcu_data *rdp)
+{
+       int i;
+
+       rdp->nxtlist = NULL;
+       for (i = 0; i < RCU_NEXT_SIZE; i++)
+               rdp->nxttail[i] = &rdp->nxtlist;
+}
+
 /*
  * Advance this CPU's callbacks, but only if the current grace period
  * has ended.  This may be called only from the CPU to whom the rdp
@@ -1327,8 +1339,6 @@ static void
 rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
                          struct rcu_node *rnp, struct rcu_data *rdp)
 {
-       int i;
-
        /*
         * Orphan the callbacks.  First adjust the counts.  This is safe
         * because ->onofflock excludes _rcu_barrier()'s adoption of
@@ -1339,7 +1349,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
                rsp->qlen += rdp->qlen;
                rdp->n_cbs_orphaned += rdp->qlen;
                rdp->qlen_lazy = 0;
-               rdp->qlen = 0;
+               ACCESS_ONCE(rdp->qlen) = 0;
        }
 
        /*
@@ -1368,9 +1378,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
        }
 
        /* Finally, initialize the rcu_data structure's list to empty.  */
-       rdp->nxtlist = NULL;
-       for (i = 0; i < RCU_NEXT_SIZE; i++)
-               rdp->nxttail[i] = &rdp->nxtlist;
+       init_callback_list(rdp);
 }
 
 /*
@@ -1504,6 +1512,9 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
        if (need_report & RCU_OFL_TASKS_EXP_GP)
                rcu_report_exp_rnp(rsp, rnp, true);
+       WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
+                 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
+                 cpu, rdp->qlen, rdp->nxtlist);
 }
 
 #else /* #ifdef CONFIG_HOTPLUG_CPU */
@@ -1530,7 +1541,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
 {
        unsigned long flags;
        struct rcu_head *next, *list, **tail;
-       int bl, count, count_lazy;
+       int bl, count, count_lazy, i;
 
        /* If no callbacks are ready, just return.*/
        if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
@@ -1553,9 +1564,9 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
        rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
        *rdp->nxttail[RCU_DONE_TAIL] = NULL;
        tail = rdp->nxttail[RCU_DONE_TAIL];
-       for (count = RCU_NEXT_SIZE - 1; count >= 0; count--)
-               if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL])
-                       rdp->nxttail[count] = &rdp->nxtlist;
+       for (i = RCU_NEXT_SIZE - 1; i >= 0; i--)
+               if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
+                       rdp->nxttail[i] = &rdp->nxtlist;
        local_irq_restore(flags);
 
        /* Invoke callbacks. */
@@ -1583,15 +1594,15 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
        if (list != NULL) {
                *tail = rdp->nxtlist;
                rdp->nxtlist = list;
-               for (count = 0; count < RCU_NEXT_SIZE; count++)
-                       if (&rdp->nxtlist == rdp->nxttail[count])
-                               rdp->nxttail[count] = tail;
+               for (i = 0; i < RCU_NEXT_SIZE; i++)
+                       if (&rdp->nxtlist == rdp->nxttail[i])
+                               rdp->nxttail[i] = tail;
                        else
                                break;
        }
        smp_mb(); /* List handling before counting for rcu_barrier(). */
        rdp->qlen_lazy -= count_lazy;
-       rdp->qlen -= count;
+       ACCESS_ONCE(rdp->qlen) -= count;
        rdp->n_cbs_invoked += count;
 
        /* Reinstate batch limit if we have worked down the excess. */
@@ -1604,6 +1615,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
                rdp->n_force_qs_snap = rsp->n_force_qs;
        } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
                rdp->qlen_last_fqs_check = rdp->qlen;
+       WARN_ON_ONCE((rdp->nxtlist == NULL) != (rdp->qlen == 0));
 
        local_irq_restore(flags);
 
@@ -1744,8 +1756,6 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
                break; /* grace period idle or initializing, ignore. */
 
        case RCU_SAVE_DYNTICK:
-               if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK)
-                       break; /* So gcc recognizes the dead code. */
 
                raw_spin_unlock(&rnp->lock);  /* irqs remain disabled */
 
@@ -1787,9 +1797,10 @@ unlock_fqs_ret:
  * whom the rdp belongs.
  */
 static void
-__rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
+__rcu_process_callbacks(struct rcu_state *rsp)
 {
        unsigned long flags;
+       struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
 
        WARN_ON_ONCE(rdp->beenonline == 0);
 
@@ -1825,11 +1836,11 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
  */
 static void rcu_process_callbacks(struct softirq_action *unused)
 {
+       struct rcu_state *rsp;
+
        trace_rcu_utilization("Start RCU core");
-       __rcu_process_callbacks(&rcu_sched_state,
-                               &__get_cpu_var(rcu_sched_data));
-       __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
-       rcu_preempt_process_callbacks();
+       for_each_rcu_flavor(rsp)
+               __rcu_process_callbacks(rsp);
        trace_rcu_utilization("End RCU core");
 }
 
@@ -1856,6 +1867,56 @@ static void invoke_rcu_core(void)
        raise_softirq(RCU_SOFTIRQ);
 }
 
+/*
+ * Handle any core-RCU processing required by a call_rcu() invocation.
+ */
+static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
+                           struct rcu_head *head, unsigned long flags)
+{
+       /*
+        * If called from an extended quiescent state, invoke the RCU
+        * core in order to force a re-evaluation of RCU's idleness.
+        */
+       if (rcu_is_cpu_idle() && cpu_online(smp_processor_id()))
+               invoke_rcu_core();
+
+       /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
+       if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
+               return;
+
+       /*
+        * Force the grace period if too many callbacks or too long waiting.
+        * Enforce hysteresis, and don't invoke force_quiescent_state()
+        * if some other CPU has recently done so.  Also, don't bother
+        * invoking force_quiescent_state() if the newly enqueued callback
+        * is the only one waiting for a grace period to complete.
+        */
+       if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
+
+               /* Are we ignoring a completed grace period? */
+               rcu_process_gp_end(rsp, rdp);
+               check_for_new_grace_period(rsp, rdp);
+
+               /* Start a new grace period if one not already started. */
+               if (!rcu_gp_in_progress(rsp)) {
+                       unsigned long nestflag;
+                       struct rcu_node *rnp_root = rcu_get_root(rsp);
+
+                       raw_spin_lock_irqsave(&rnp_root->lock, nestflag);
+                       rcu_start_gp(rsp, nestflag);  /* rlses rnp_root->lock */
+               } else {
+                       /* Give the grace period a kick. */
+                       rdp->blimit = LONG_MAX;
+                       if (rsp->n_force_qs == rdp->n_force_qs_snap &&
+                           *rdp->nxttail[RCU_DONE_TAIL] != head)
+                               force_quiescent_state(rsp, 0);
+                       rdp->n_force_qs_snap = rsp->n_force_qs;
+                       rdp->qlen_last_fqs_check = rdp->qlen;
+               }
+       } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
+               force_quiescent_state(rsp, 1);
+}
+
 static void
 __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
           struct rcu_state *rsp, bool lazy)
@@ -1880,7 +1941,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
        rdp = this_cpu_ptr(rsp->rda);
 
        /* Add the callback to our list. */
-       rdp->qlen++;
+       ACCESS_ONCE(rdp->qlen)++;
        if (lazy)
                rdp->qlen_lazy++;
        else
@@ -1895,43 +1956,8 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
        else
                trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
 
-       /* If interrupts were disabled, don't dive into RCU core. */
-       if (irqs_disabled_flags(flags)) {
-               local_irq_restore(flags);
-               return;
-       }
-
-       /*
-        * Force the grace period if too many callbacks or too long waiting.
-        * Enforce hysteresis, and don't invoke force_quiescent_state()
-        * if some other CPU has recently done so.  Also, don't bother
-        * invoking force_quiescent_state() if the newly enqueued callback
-        * is the only one waiting for a grace period to complete.
-        */
-       if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
-
-               /* Are we ignoring a completed grace period? */
-               rcu_process_gp_end(rsp, rdp);
-               check_for_new_grace_period(rsp, rdp);
-
-               /* Start a new grace period if one not already started. */
-               if (!rcu_gp_in_progress(rsp)) {
-                       unsigned long nestflag;
-                       struct rcu_node *rnp_root = rcu_get_root(rsp);
-
-                       raw_spin_lock_irqsave(&rnp_root->lock, nestflag);
-                       rcu_start_gp(rsp, nestflag);  /* rlses rnp_root->lock */
-               } else {
-                       /* Give the grace period a kick. */
-                       rdp->blimit = LONG_MAX;
-                       if (rsp->n_force_qs == rdp->n_force_qs_snap &&
-                           *rdp->nxttail[RCU_DONE_TAIL] != head)
-                               force_quiescent_state(rsp, 0);
-                       rdp->n_force_qs_snap = rsp->n_force_qs;
-                       rdp->qlen_last_fqs_check = rdp->qlen;
-               }
-       } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
-               force_quiescent_state(rsp, 1);
+       /* Go handle any RCU core processing required. */
+       __call_rcu_core(rsp, rdp, head, flags);
        local_irq_restore(flags);
 }
 
@@ -1961,28 +1987,16 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
  * occasionally incorrectly indicate that there are multiple CPUs online
  * when there was in fact only one the whole time, as this just adds
  * some overhead: RCU still operates correctly.
- *
- * Of course, sampling num_online_cpus() with preemption enabled can
- * give erroneous results if there are concurrent CPU-hotplug operations.
- * For example, given a demonic sequence of preemptions in num_online_cpus()
- * and CPU-hotplug operations, there could be two or more CPUs online at
- * all times, but num_online_cpus() might well return one (or even zero).
- *
- * However, all such demonic sequences require at least one CPU-offline
- * operation.  Furthermore, rcu_blocking_is_gp() giving the wrong answer
- * is only a problem if there is an RCU read-side critical section executing
- * throughout.  But RCU-sched and RCU-bh read-side critical sections
- * disable either preemption or bh, which prevents a CPU from going offline.
- * Therefore, the only way that rcu_blocking_is_gp() can incorrectly return
- * that there is only one CPU when in fact there was more than one throughout
- * is when there were no RCU readers in the system.  If there are no
- * RCU readers, the grace period by definition can be of zero length,
- * regardless of the number of online CPUs.
  */
 static inline int rcu_blocking_is_gp(void)
 {
+       int ret;
+
        might_sleep();  /* Check for RCU read-side critical section. */
-       return num_online_cpus() <= 1;
+       preempt_disable();
+       ret = num_online_cpus() <= 1;
+       preempt_enable();
+       return ret;
 }
 
 /**
@@ -2117,9 +2131,9 @@ void synchronize_sched_expedited(void)
                put_online_cpus();
 
                /* No joy, try again later.  Or just synchronize_sched(). */
-               if (trycount++ < 10)
+               if (trycount++ < 10) {
                        udelay(trycount * num_online_cpus());
-               else {
+               else {
                        synchronize_sched();
                        return;
                }
@@ -2240,9 +2254,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
  */
 static int rcu_pending(int cpu)
 {
-       return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) ||
-              __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) ||
-              rcu_preempt_pending(cpu);
+       struct rcu_state *rsp;
+
+       for_each_rcu_flavor(rsp)
+               if (__rcu_pending(rsp, per_cpu_ptr(rsp->rda, cpu)))
+                       return 1;
+       return 0;
 }
 
 /*
@@ -2252,20 +2269,41 @@ static int rcu_pending(int cpu)
  */
 static int rcu_cpu_has_callbacks(int cpu)
 {
+       struct rcu_state *rsp;
+
        /* RCU callbacks either ready or pending? */
-       return per_cpu(rcu_sched_data, cpu).nxtlist ||
-              per_cpu(rcu_bh_data, cpu).nxtlist ||
-              rcu_preempt_cpu_has_callbacks(cpu);
+       for_each_rcu_flavor(rsp)
+               if (per_cpu_ptr(rsp->rda, cpu)->nxtlist)
+                       return 1;
+       return 0;
+}
+
+/*
+ * Helper function for _rcu_barrier() tracing.  If tracing is disabled,
+ * the compiler is expected to optimize this away.
+ */
+static void _rcu_barrier_trace(struct rcu_state *rsp, char *s,
+                              int cpu, unsigned long done)
+{
+       trace_rcu_barrier(rsp->name, s, cpu,
+                         atomic_read(&rsp->barrier_cpu_count), done);
 }
 
 /*
  * RCU callback function for _rcu_barrier().  If we are last, wake
  * up the task executing _rcu_barrier().
  */
-static void rcu_barrier_callback(struct rcu_head *notused)
+static void rcu_barrier_callback(struct rcu_head *rhp)
 {
-       if (atomic_dec_and_test(&rcu_barrier_cpu_count))
-               complete(&rcu_barrier_completion);
+       struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
+       struct rcu_state *rsp = rdp->rsp;
+
+       if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
+               _rcu_barrier_trace(rsp, "LastCB", -1, rsp->n_barrier_done);
+               complete(&rsp->barrier_completion);
+       } else {
+               _rcu_barrier_trace(rsp, "CB", -1, rsp->n_barrier_done);
+       }
 }
 
 /*
@@ -2273,35 +2311,63 @@ static void rcu_barrier_callback(struct rcu_head *notused)
  */
 static void rcu_barrier_func(void *type)
 {
-       int cpu = smp_processor_id();
-       struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
-       void (*call_rcu_func)(struct rcu_head *head,
-                             void (*func)(struct rcu_head *head));
+       struct rcu_state *rsp = type;
+       struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
 
-       atomic_inc(&rcu_barrier_cpu_count);
-       call_rcu_func = type;
-       call_rcu_func(head, rcu_barrier_callback);
+       _rcu_barrier_trace(rsp, "IRQ", -1, rsp->n_barrier_done);
+       atomic_inc(&rsp->barrier_cpu_count);
+       rsp->call(&rdp->barrier_head, rcu_barrier_callback);
 }
 
 /*
  * Orchestrate the specified type of RCU barrier, waiting for all
  * RCU callbacks of the specified type to complete.
  */
-static void _rcu_barrier(struct rcu_state *rsp,
-                        void (*call_rcu_func)(struct rcu_head *head,
-                                              void (*func)(struct rcu_head *head)))
+static void _rcu_barrier(struct rcu_state *rsp)
 {
        int cpu;
        unsigned long flags;
        struct rcu_data *rdp;
-       struct rcu_head rh;
+       struct rcu_data rd;
+       unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done);
+       unsigned long snap_done;
 
-       init_rcu_head_on_stack(&rh);
+       init_rcu_head_on_stack(&rd.barrier_head);
+       _rcu_barrier_trace(rsp, "Begin", -1, snap);
 
        /* Take mutex to serialize concurrent rcu_barrier() requests. */
-       mutex_lock(&rcu_barrier_mutex);
+       mutex_lock(&rsp->barrier_mutex);
+
+       /*
+        * Ensure that all prior references, including to ->n_barrier_done,
+        * are ordered before the _rcu_barrier() machinery.
+        */
+       smp_mb();  /* See above block comment. */
+
+       /*
+        * Recheck ->n_barrier_done to see if others did our work for us.
+        * This means checking ->n_barrier_done for an even-to-odd-to-even
+        * transition.  The "if" expression below therefore rounds the old
+        * value up to the next even number and adds two before comparing.
+        */
+       snap_done = ACCESS_ONCE(rsp->n_barrier_done);
+       _rcu_barrier_trace(rsp, "Check", -1, snap_done);
+       if (ULONG_CMP_GE(snap_done, ((snap + 1) & ~0x1) + 2)) {
+               _rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done);
+               smp_mb(); /* caller's subsequent code after above check. */
+               mutex_unlock(&rsp->barrier_mutex);
+               return;
+       }
 
-       smp_mb();  /* Prevent any prior operations from leaking in. */
+       /*
+        * Increment ->n_barrier_done to avoid duplicate work.  Use
+        * ACCESS_ONCE() to prevent the compiler from speculating
+        * the increment to precede the early-exit check.
+        */
+       ACCESS_ONCE(rsp->n_barrier_done)++;
+       WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
+       _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
+       smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
 
        /*
         * Initialize the count to one rather than to zero in order to
@@ -2320,8 +2386,8 @@ static void _rcu_barrier(struct rcu_state *rsp,
         * 6.   Both rcu_barrier_callback() callbacks are invoked, awakening
         *      us -- but before CPU 1's orphaned callbacks are invoked!!!
         */
-       init_completion(&rcu_barrier_completion);
-       atomic_set(&rcu_barrier_cpu_count, 1);
+       init_completion(&rsp->barrier_completion);
+       atomic_set(&rsp->barrier_cpu_count, 1);
        raw_spin_lock_irqsave(&rsp->onofflock, flags);
        rsp->rcu_barrier_in_progress = current;
        raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
@@ -2337,14 +2403,19 @@ static void _rcu_barrier(struct rcu_state *rsp,
                preempt_disable();
                rdp = per_cpu_ptr(rsp->rda, cpu);
                if (cpu_is_offline(cpu)) {
+                       _rcu_barrier_trace(rsp, "Offline", cpu,
+                                          rsp->n_barrier_done);
                        preempt_enable();
                        while (cpu_is_offline(cpu) && ACCESS_ONCE(rdp->qlen))
                                schedule_timeout_interruptible(1);
                } else if (ACCESS_ONCE(rdp->qlen)) {
-                       smp_call_function_single(cpu, rcu_barrier_func,
-                                                (void *)call_rcu_func, 1);
+                       _rcu_barrier_trace(rsp, "OnlineQ", cpu,
+                                          rsp->n_barrier_done);
+                       smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
                        preempt_enable();
                } else {
+                       _rcu_barrier_trace(rsp, "OnlineNQ", cpu,
+                                          rsp->n_barrier_done);
                        preempt_enable();
                }
        }
@@ -2361,24 +2432,32 @@ static void _rcu_barrier(struct rcu_state *rsp,
        rcu_adopt_orphan_cbs(rsp);
        rsp->rcu_barrier_in_progress = NULL;
        raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
-       atomic_inc(&rcu_barrier_cpu_count);
+       atomic_inc(&rsp->barrier_cpu_count);
        smp_mb__after_atomic_inc(); /* Ensure atomic_inc() before callback. */
-       call_rcu_func(&rh, rcu_barrier_callback);
+       rd.rsp = rsp;
+       rsp->call(&rd.barrier_head, rcu_barrier_callback);
 
        /*
         * Now that we have an rcu_barrier_callback() callback on each
         * CPU, and thus each counted, remove the initial count.
         */
-       if (atomic_dec_and_test(&rcu_barrier_cpu_count))
-               complete(&rcu_barrier_completion);
+       if (atomic_dec_and_test(&rsp->barrier_cpu_count))
+               complete(&rsp->barrier_completion);
+
+       /* Increment ->n_barrier_done to prevent duplicate work. */
+       smp_mb(); /* Keep increment after above mechanism. */
+       ACCESS_ONCE(rsp->n_barrier_done)++;
+       WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
+       _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
+       smp_mb(); /* Keep increment before caller's subsequent code. */
 
        /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
-       wait_for_completion(&rcu_barrier_completion);
+       wait_for_completion(&rsp->barrier_completion);
 
        /* Other rcu_barrier() invocations can now safely proceed. */
-       mutex_unlock(&rcu_barrier_mutex);
+       mutex_unlock(&rsp->barrier_mutex);
 
-       destroy_rcu_head_on_stack(&rh);
+       destroy_rcu_head_on_stack(&rd.barrier_head);
 }
 
 /**
@@ -2386,7 +2465,7 @@ static void _rcu_barrier(struct rcu_state *rsp,
  */
 void rcu_barrier_bh(void)
 {
-       _rcu_barrier(&rcu_bh_state, call_rcu_bh);
+       _rcu_barrier(&rcu_bh_state);
 }
 EXPORT_SYMBOL_GPL(rcu_barrier_bh);
 
@@ -2395,7 +2474,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier_bh);
  */
 void rcu_barrier_sched(void)
 {
-       _rcu_barrier(&rcu_sched_state, call_rcu_sched);
+       _rcu_barrier(&rcu_sched_state);
 }
 EXPORT_SYMBOL_GPL(rcu_barrier_sched);
 
@@ -2406,18 +2485,15 @@ static void __init
 rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
 {
        unsigned long flags;
-       int i;
        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
        struct rcu_node *rnp = rcu_get_root(rsp);
 
        /* Set up local state, ensuring consistent view of global state. */
        raw_spin_lock_irqsave(&rnp->lock, flags);
        rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
-       rdp->nxtlist = NULL;
-       for (i = 0; i < RCU_NEXT_SIZE; i++)
-               rdp->nxttail[i] = &rdp->nxtlist;
+       init_callback_list(rdp);
        rdp->qlen_lazy = 0;
-       rdp->qlen = 0;
+       ACCESS_ONCE(rdp->qlen) = 0;
        rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
        WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
        WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
@@ -2491,9 +2567,11 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
 
 static void __cpuinit rcu_prepare_cpu(int cpu)
 {
-       rcu_init_percpu_data(cpu, &rcu_sched_state, 0);
-       rcu_init_percpu_data(cpu, &rcu_bh_state, 0);
-       rcu_preempt_init_percpu_data(cpu);
+       struct rcu_state *rsp;
+
+       for_each_rcu_flavor(rsp)
+               rcu_init_percpu_data(cpu, rsp,
+                                    strcmp(rsp->name, "rcu_preempt") == 0);
 }
 
 /*
@@ -2505,6 +2583,7 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
        long cpu = (long)hcpu;
        struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
        struct rcu_node *rnp = rdp->mynode;
+       struct rcu_state *rsp;
 
        trace_rcu_utilization("Start CPU hotplug");
        switch (action) {
@@ -2529,18 +2608,16 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
                 * touch any data without introducing corruption. We send the
                 * dying CPU's callbacks to an arbitrarily chosen online CPU.
                 */
-               rcu_cleanup_dying_cpu(&rcu_bh_state);
-               rcu_cleanup_dying_cpu(&rcu_sched_state);
-               rcu_preempt_cleanup_dying_cpu();
+               for_each_rcu_flavor(rsp)
+                       rcu_cleanup_dying_cpu(rsp);
                rcu_cleanup_after_idle(cpu);
                break;
        case CPU_DEAD:
        case CPU_DEAD_FROZEN:
        case CPU_UP_CANCELED:
        case CPU_UP_CANCELED_FROZEN:
-               rcu_cleanup_dead_cpu(cpu, &rcu_bh_state);
-               rcu_cleanup_dead_cpu(cpu, &rcu_sched_state);
-               rcu_preempt_cleanup_dead_cpu(cpu);
+               for_each_rcu_flavor(rsp)
+                       rcu_cleanup_dead_cpu(cpu, rsp);
                break;
        default:
                break;
@@ -2573,9 +2650,9 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp)
 {
        int i;
 
-       for (i = NUM_RCU_LVLS - 1; i > 0; i--)
+       for (i = rcu_num_lvls - 1; i > 0; i--)
                rsp->levelspread[i] = CONFIG_RCU_FANOUT;
-       rsp->levelspread[0] = CONFIG_RCU_FANOUT_LEAF;
+       rsp->levelspread[0] = rcu_fanout_leaf;
 }
 #else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
 static void __init rcu_init_levelspread(struct rcu_state *rsp)
@@ -2585,7 +2662,7 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp)
        int i;
 
        cprv = NR_CPUS;
-       for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
+       for (i = rcu_num_lvls - 1; i >= 0; i--) {
                ccur = rsp->levelcnt[i];
                rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
                cprv = ccur;
@@ -2612,13 +2689,15 @@ static void __init rcu_init_one(struct rcu_state *rsp,
 
        /* Initialize the level-tracking arrays. */
 
-       for (i = 1; i < NUM_RCU_LVLS; i++)
+       for (i = 0; i < rcu_num_lvls; i++)
+               rsp->levelcnt[i] = num_rcu_lvl[i];
+       for (i = 1; i < rcu_num_lvls; i++)
                rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
        rcu_init_levelspread(rsp);
 
        /* Initialize the elements themselves, starting from the leaves. */
 
-       for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
+       for (i = rcu_num_lvls - 1; i >= 0; i--) {
                cpustride *= rsp->levelspread[i];
                rnp = rsp->level[i];
                for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
@@ -2648,13 +2727,74 @@ static void __init rcu_init_one(struct rcu_state *rsp,
        }
 
        rsp->rda = rda;
-       rnp = rsp->level[NUM_RCU_LVLS - 1];
+       rnp = rsp->level[rcu_num_lvls - 1];
        for_each_possible_cpu(i) {
                while (i > rnp->grphi)
                        rnp++;
                per_cpu_ptr(rsp->rda, i)->mynode = rnp;
                rcu_boot_init_percpu_data(i, rsp);
        }
+       list_add(&rsp->flavors, &rcu_struct_flavors);
+}
+
+/*
+ * Compute the rcu_node tree geometry from kernel parameters.  This cannot
+ * replace the definitions in rcutree.h because those are needed to size
+ * the ->node array in the rcu_state structure.
+ */
+static void __init rcu_init_geometry(void)
+{
+       int i;
+       int j;
+       int n = nr_cpu_ids;
+       int rcu_capacity[MAX_RCU_LVLS + 1];
+
+       /* If the compile-time values are accurate, just leave. */
+       if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF)
+               return;
+
+       /*
+        * Compute number of nodes that can be handled an rcu_node tree
+        * with the given number of levels.  Setting rcu_capacity[0] makes
+        * some of the arithmetic easier.
+        */
+       rcu_capacity[0] = 1;
+       rcu_capacity[1] = rcu_fanout_leaf;
+       for (i = 2; i <= MAX_RCU_LVLS; i++)
+               rcu_capacity[i] = rcu_capacity[i - 1] * CONFIG_RCU_FANOUT;
+
+       /*
+        * The boot-time rcu_fanout_leaf parameter is only permitted
+        * to increase the leaf-level fanout, not decrease it.  Of course,
+        * the leaf-level fanout cannot exceed the number of bits in
+        * the rcu_node masks.  Finally, the tree must be able to accommodate
+        * the configured number of CPUs.  Complain and fall back to the
+        * compile-time values if these limits are exceeded.
+        */
+       if (rcu_fanout_leaf < CONFIG_RCU_FANOUT_LEAF ||
+           rcu_fanout_leaf > sizeof(unsigned long) * 8 ||
+           n > rcu_capacity[MAX_RCU_LVLS]) {
+               WARN_ON(1);
+               return;
+       }
+
+       /* Calculate the number of rcu_nodes at each level of the tree. */
+       for (i = 1; i <= MAX_RCU_LVLS; i++)
+               if (n <= rcu_capacity[i]) {
+                       for (j = 0; j <= i; j++)
+                               num_rcu_lvl[j] =
+                                       DIV_ROUND_UP(n, rcu_capacity[i - j]);
+                       rcu_num_lvls = i;
+                       for (j = i + 1; j <= MAX_RCU_LVLS; j++)
+                               num_rcu_lvl[j] = 0;
+                       break;
+               }
+
+       /* Calculate the total number of rcu_node structures. */
+       rcu_num_nodes = 0;
+       for (i = 0; i <= MAX_RCU_LVLS; i++)
+               rcu_num_nodes += num_rcu_lvl[i];
+       rcu_num_nodes -= n;
 }
 
 void __init rcu_init(void)
@@ -2662,6 +2802,7 @@ void __init rcu_init(void)
        int cpu;
 
        rcu_bootup_announce();
+       rcu_init_geometry();
        rcu_init_one(&rcu_sched_state, &rcu_sched_data);
        rcu_init_one(&rcu_bh_state, &rcu_bh_data);
        __rcu_init_preempt();
index ea05649..4d29169 100644 (file)
 #define RCU_FANOUT_4         (RCU_FANOUT_3 * CONFIG_RCU_FANOUT)
 
 #if NR_CPUS <= RCU_FANOUT_1
-#  define NUM_RCU_LVLS       1
+#  define RCU_NUM_LVLS       1
 #  define NUM_RCU_LVL_0              1
 #  define NUM_RCU_LVL_1              (NR_CPUS)
 #  define NUM_RCU_LVL_2              0
 #  define NUM_RCU_LVL_3              0
 #  define NUM_RCU_LVL_4              0
 #elif NR_CPUS <= RCU_FANOUT_2
-#  define NUM_RCU_LVLS       2
+#  define RCU_NUM_LVLS       2
 #  define NUM_RCU_LVL_0              1
 #  define NUM_RCU_LVL_1              DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
 #  define NUM_RCU_LVL_2              (NR_CPUS)
 #  define NUM_RCU_LVL_3              0
 #  define NUM_RCU_LVL_4              0
 #elif NR_CPUS <= RCU_FANOUT_3
-#  define NUM_RCU_LVLS       3
+#  define RCU_NUM_LVLS       3
 #  define NUM_RCU_LVL_0              1
 #  define NUM_RCU_LVL_1              DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
 #  define NUM_RCU_LVL_2              DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
 #  define NUM_RCU_LVL_3              (NR_CPUS)
 #  define NUM_RCU_LVL_4              0
 #elif NR_CPUS <= RCU_FANOUT_4
-#  define NUM_RCU_LVLS       4
+#  define RCU_NUM_LVLS       4
 #  define NUM_RCU_LVL_0              1
 #  define NUM_RCU_LVL_1              DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
 #  define NUM_RCU_LVL_2              DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
@@ -76,6 +76,9 @@
 #define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4)
 #define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
 
+extern int rcu_num_lvls;
+extern int rcu_num_nodes;
+
 /*
  * Dynticks per-CPU state.
  */
@@ -97,6 +100,7 @@ struct rcu_dynticks {
                                    /* # times non-lazy CBs posted to CPU. */
        unsigned long nonlazy_posted_snap;
                                    /* idle-period nonlazy_posted snapshot. */
+       int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
 #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
 };
 
@@ -206,7 +210,7 @@ struct rcu_node {
  */
 #define rcu_for_each_node_breadth_first(rsp, rnp) \
        for ((rnp) = &(rsp)->node[0]; \
-            (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
+            (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
 
 /*
  * Do a breadth-first scan of the non-leaf rcu_node structures for the
@@ -215,7 +219,7 @@ struct rcu_node {
  */
 #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
        for ((rnp) = &(rsp)->node[0]; \
-            (rnp) < (rsp)->level[NUM_RCU_LVLS - 1]; (rnp)++)
+            (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)
 
 /*
  * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
@@ -224,8 +228,8 @@ struct rcu_node {
  * It is still a leaf node, even if it is also the root node.
  */
 #define rcu_for_each_leaf_node(rsp, rnp) \
-       for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \
-            (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
+       for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
+            (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
 
 /* Index values for nxttail array in struct rcu_data. */
 #define RCU_DONE_TAIL          0       /* Also RCU_WAIT head. */
@@ -311,6 +315,9 @@ struct rcu_data {
        unsigned long n_rp_need_fqs;
        unsigned long n_rp_need_nothing;
 
+       /* 6) _rcu_barrier() callback. */
+       struct rcu_head barrier_head;
+
        int cpu;
        struct rcu_state *rsp;
 };
@@ -357,10 +364,12 @@ do {                                                                      \
  */
 struct rcu_state {
        struct rcu_node node[NUM_RCU_NODES];    /* Hierarchy. */
-       struct rcu_node *level[NUM_RCU_LVLS];   /* Hierarchy levels. */
+       struct rcu_node *level[RCU_NUM_LVLS];   /* Hierarchy levels. */
        u32 levelcnt[MAX_RCU_LVLS + 1];         /* # nodes in each level. */
-       u8 levelspread[NUM_RCU_LVLS];           /* kids/node in each level. */
+       u8 levelspread[RCU_NUM_LVLS];           /* kids/node in each level. */
        struct rcu_data __percpu *rda;          /* pointer of percu rcu_data. */
+       void (*call)(struct rcu_head *head,     /* call_rcu() flavor. */
+                    void (*func)(struct rcu_head *head));
 
        /* The following fields are guarded by the root rcu_node's lock. */
 
@@ -392,6 +401,11 @@ struct rcu_state {
        struct task_struct *rcu_barrier_in_progress;
                                                /* Task doing rcu_barrier(), */
                                                /*  or NULL if no barrier. */
+       struct mutex barrier_mutex;             /* Guards barrier fields. */
+       atomic_t barrier_cpu_count;             /* # CPUs waiting on. */
+       struct completion barrier_completion;   /* Wake at barrier end. */
+       unsigned long n_barrier_done;           /* ++ at start and end of */
+                                               /*  _rcu_barrier(). */
        raw_spinlock_t fqslock;                 /* Only one task forcing */
                                                /*  quiescent states. */
        unsigned long jiffies_force_qs;         /* Time at which to invoke */
@@ -409,8 +423,13 @@ struct rcu_state {
        unsigned long gp_max;                   /* Maximum GP duration in */
                                                /*  jiffies. */
        char *name;                             /* Name of structure. */
+       struct list_head flavors;               /* List of RCU flavors. */
 };
 
+extern struct list_head rcu_struct_flavors;
+#define for_each_rcu_flavor(rsp) \
+       list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
+
 /* Return values for rcu_preempt_offline_tasks(). */
 
 #define RCU_OFL_TASKS_NORM_GP  0x1             /* Tasks blocking normal */
@@ -444,6 +463,7 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work);
 /* Forward declarations for rcutree_plugin.h */
 static void rcu_bootup_announce(void);
 long rcu_batches_completed(void);
+static void rcu_preempt_note_context_switch(int cpu);
 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
 #ifdef CONFIG_HOTPLUG_CPU
 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
@@ -452,25 +472,18 @@ static void rcu_stop_cpu_kthread(int cpu);
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 static void rcu_print_detail_task_stall(struct rcu_state *rsp);
 static int rcu_print_task_stall(struct rcu_node *rnp);
-static void rcu_preempt_stall_reset(void);
 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
 #ifdef CONFIG_HOTPLUG_CPU
 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
                                     struct rcu_node *rnp,
                                     struct rcu_data *rdp);
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
-static void rcu_preempt_cleanup_dead_cpu(int cpu);
 static void rcu_preempt_check_callbacks(int cpu);
-static void rcu_preempt_process_callbacks(void);
 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU)
 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
                               bool wake);
 #endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */
-static int rcu_preempt_pending(int cpu);
-static int rcu_preempt_cpu_has_callbacks(int cpu);
-static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
-static void rcu_preempt_cleanup_dying_cpu(void);
 static void __init __rcu_init_preempt(void);
 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
index 5271a02..7f3244c 100644 (file)
@@ -68,17 +68,21 @@ static void __init rcu_bootup_announce_oddness(void)
        printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n");
 #endif
 #if NUM_RCU_LVL_4 != 0
-       printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n");
+       printk(KERN_INFO "\tFour-level hierarchy is enabled.\n");
 #endif
+       if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
+               printk(KERN_INFO "\tExperimental boot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
+       if (nr_cpu_ids != NR_CPUS)
+               printk(KERN_INFO "\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
 }
 
 #ifdef CONFIG_TREE_PREEMPT_RCU
 
-struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt);
+struct rcu_state rcu_preempt_state =
+       RCU_STATE_INITIALIZER(rcu_preempt, call_rcu);
 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
 static struct rcu_state *rcu_state = &rcu_preempt_state;
 
-static void rcu_read_unlock_special(struct task_struct *t);
 static int rcu_preempted_readers_exp(struct rcu_node *rnp);
 
 /*
@@ -153,7 +157,7 @@ static void rcu_preempt_qs(int cpu)
  *
  * Caller must disable preemption.
  */
-void rcu_preempt_note_context_switch(void)
+static void rcu_preempt_note_context_switch(int cpu)
 {
        struct task_struct *t = current;
        unsigned long flags;
@@ -164,7 +168,7 @@ void rcu_preempt_note_context_switch(void)
            (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
 
                /* Possibly blocking in an RCU read-side critical section. */
-               rdp = __this_cpu_ptr(rcu_preempt_state.rda);
+               rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
                rnp = rdp->mynode;
                raw_spin_lock_irqsave(&rnp->lock, flags);
                t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
@@ -228,22 +232,10 @@ void rcu_preempt_note_context_switch(void)
         * means that we continue to block the current grace period.
         */
        local_irq_save(flags);
-       rcu_preempt_qs(smp_processor_id());
+       rcu_preempt_qs(cpu);
        local_irq_restore(flags);
 }
 
-/*
- * Tree-preemptible RCU implementation for rcu_read_lock().
- * Just increment ->rcu_read_lock_nesting, shared state will be updated
- * if we block.
- */
-void __rcu_read_lock(void)
-{
-       current->rcu_read_lock_nesting++;
-       barrier();  /* needed if we ever invoke rcu_read_lock in rcutree.c */
-}
-EXPORT_SYMBOL_GPL(__rcu_read_lock);
-
 /*
  * Check for preempted RCU readers blocking the current grace period
  * for the specified rcu_node structure.  If the caller needs a reliable
@@ -310,7 +302,7 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
  * notify RCU core processing or task having blocked during the RCU
  * read-side critical section.
  */
-static noinline void rcu_read_unlock_special(struct task_struct *t)
+void rcu_read_unlock_special(struct task_struct *t)
 {
        int empty;
        int empty_exp;
@@ -398,8 +390,9 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
                                                         rnp->grphi,
                                                         !!rnp->gp_tasks);
                        rcu_report_unblock_qs_rnp(rnp, flags);
-               } else
+               } else {
                        raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               }
 
 #ifdef CONFIG_RCU_BOOST
                /* Unboost if we were boosted. */
@@ -418,38 +411,6 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
        }
 }
 
-/*
- * Tree-preemptible RCU implementation for rcu_read_unlock().
- * Decrement ->rcu_read_lock_nesting.  If the result is zero (outermost
- * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
- * invoke rcu_read_unlock_special() to clean up after a context switch
- * in an RCU read-side critical section and other special cases.
- */
-void __rcu_read_unlock(void)
-{
-       struct task_struct *t = current;
-
-       if (t->rcu_read_lock_nesting != 1)
-               --t->rcu_read_lock_nesting;
-       else {
-               barrier();  /* critical section before exit code. */
-               t->rcu_read_lock_nesting = INT_MIN;
-               barrier();  /* assign before ->rcu_read_unlock_special load */
-               if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
-                       rcu_read_unlock_special(t);
-               barrier();  /* ->rcu_read_unlock_special load before assign */
-               t->rcu_read_lock_nesting = 0;
-       }
-#ifdef CONFIG_PROVE_LOCKING
-       {
-               int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
-
-               WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
-       }
-#endif /* #ifdef CONFIG_PROVE_LOCKING */
-}
-EXPORT_SYMBOL_GPL(__rcu_read_unlock);
-
 #ifdef CONFIG_RCU_CPU_STALL_VERBOSE
 
 /*
@@ -539,16 +500,6 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
        return ndetected;
 }
 
-/*
- * Suppress preemptible RCU's CPU stall warnings by pushing the
- * time of the next stall-warning message comfortably far into the
- * future.
- */
-static void rcu_preempt_stall_reset(void)
-{
-       rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2;
-}
-
 /*
  * Check that the list of blocked tasks for the newly completed grace
  * period is in fact empty.  It is a serious bug to complete a grace
@@ -649,14 +600,6 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
 
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 
-/*
- * Do CPU-offline processing for preemptible RCU.
- */
-static void rcu_preempt_cleanup_dead_cpu(int cpu)
-{
-       rcu_cleanup_dead_cpu(cpu, &rcu_preempt_state);
-}
-
 /*
  * Check for a quiescent state from the current CPU.  When a task blocks,
  * the task is recorded in the corresponding CPU's rcu_node structure,
@@ -677,15 +620,6 @@ static void rcu_preempt_check_callbacks(int cpu)
                t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
 }
 
-/*
- * Process callbacks for preemptible RCU.
- */
-static void rcu_preempt_process_callbacks(void)
-{
-       __rcu_process_callbacks(&rcu_preempt_state,
-                               &__get_cpu_var(rcu_preempt_data));
-}
-
 #ifdef CONFIG_RCU_BOOST
 
 static void rcu_preempt_do_callbacks(void)
@@ -824,9 +758,9 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
        int must_wait = 0;
 
        raw_spin_lock_irqsave(&rnp->lock, flags);
-       if (list_empty(&rnp->blkd_tasks))
+       if (list_empty(&rnp->blkd_tasks)) {
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
-       else {
+       else {
                rnp->exp_tasks = rnp->blkd_tasks.next;
                rcu_initiate_boost(rnp, flags);  /* releases rnp->lock */
                must_wait = 1;
@@ -870,9 +804,9 @@ void synchronize_rcu_expedited(void)
         * expedited grace period for us, just leave.
         */
        while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
-               if (trycount++ < 10)
+               if (trycount++ < 10) {
                        udelay(trycount * num_online_cpus());
-               else {
+               else {
                        synchronize_rcu();
                        return;
                }
@@ -917,50 +851,15 @@ mb_ret:
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
 
-/*
- * Check to see if there is any immediate preemptible-RCU-related work
- * to be done.
- */
-static int rcu_preempt_pending(int cpu)
-{
-       return __rcu_pending(&rcu_preempt_state,
-                            &per_cpu(rcu_preempt_data, cpu));
-}
-
-/*
- * Does preemptible RCU have callbacks on this CPU?
- */
-static int rcu_preempt_cpu_has_callbacks(int cpu)
-{
-       return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
-}
-
 /**
  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
  */
 void rcu_barrier(void)
 {
-       _rcu_barrier(&rcu_preempt_state, call_rcu);
+       _rcu_barrier(&rcu_preempt_state);
 }
 EXPORT_SYMBOL_GPL(rcu_barrier);
 
-/*
- * Initialize preemptible RCU's per-CPU data.
- */
-static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
-{
-       rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
-}
-
-/*
- * Move preemptible RCU's callbacks from dying CPU to other online CPU
- * and record a quiescent state.
- */
-static void rcu_preempt_cleanup_dying_cpu(void)
-{
-       rcu_cleanup_dying_cpu(&rcu_preempt_state);
-}
-
 /*
  * Initialize preemptible RCU's state structures.
  */
@@ -1001,6 +900,14 @@ void rcu_force_quiescent_state(void)
 }
 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
 
+/*
+ * Because preemptible RCU does not exist, we never have to check for
+ * CPUs being in quiescent states.
+ */
+static void rcu_preempt_note_context_switch(int cpu)
+{
+}
+
 /*
  * Because preemptible RCU does not exist, there are never any preempted
  * RCU readers.
@@ -1037,14 +944,6 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
        return 0;
 }
 
-/*
- * Because preemptible RCU does not exist, there is no need to suppress
- * its CPU stall warnings.
- */
-static void rcu_preempt_stall_reset(void)
-{
-}
-
 /*
  * Because there is no preemptible RCU, there can be no readers blocked,
  * so there is no need to check for blocked tasks.  So check only for
@@ -1072,14 +971,6 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
 
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 
-/*
- * Because preemptible RCU does not exist, it never needs CPU-offline
- * processing.
- */
-static void rcu_preempt_cleanup_dead_cpu(int cpu)
-{
-}
-
 /*
  * Because preemptible RCU does not exist, it never has any callbacks
  * to check.
@@ -1088,14 +979,6 @@ static void rcu_preempt_check_callbacks(int cpu)
 {
 }
 
-/*
- * Because preemptible RCU does not exist, it never has any callbacks
- * to process.
- */
-static void rcu_preempt_process_callbacks(void)
-{
-}
-
 /*
  * Queue an RCU callback for lazy invocation after a grace period.
  * This will likely be later named something like "call_rcu_lazy()",
@@ -1136,22 +1019,6 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
 
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 
-/*
- * Because preemptible RCU does not exist, it never has any work to do.
- */
-static int rcu_preempt_pending(int cpu)
-{
-       return 0;
-}
-
-/*
- * Because preemptible RCU does not exist, it never has callbacks
- */
-static int rcu_preempt_cpu_has_callbacks(int cpu)
-{
-       return 0;
-}
-
 /*
  * Because preemptible RCU does not exist, rcu_barrier() is just
  * another name for rcu_barrier_sched().
@@ -1162,21 +1029,6 @@ void rcu_barrier(void)
 }
 EXPORT_SYMBOL_GPL(rcu_barrier);
 
-/*
- * Because preemptible RCU does not exist, there is no per-CPU
- * data to initialize.
- */
-static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
-{
-}
-
-/*
- * Because there is no preemptible RCU, there is no cleanup to do.
- */
-static void rcu_preempt_cleanup_dying_cpu(void)
-{
-}
-
 /*
  * Because preemptible RCU does not exist, it need not be initialized.
  */
@@ -1960,9 +1812,11 @@ static void rcu_idle_count_callbacks_posted(void)
  */
 #define RCU_IDLE_FLUSHES 5             /* Number of dyntick-idle tries. */
 #define RCU_IDLE_OPT_FLUSHES 3         /* Optional dyntick-idle tries. */
-#define RCU_IDLE_GP_DELAY 6            /* Roughly one grace period. */
+#define RCU_IDLE_GP_DELAY 4            /* Roughly one grace period. */
 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ)        /* Roughly six seconds. */
 
+extern int tick_nohz_enabled;
+
 /*
  * Does the specified flavor of RCU have non-lazy callbacks pending on
  * the specified CPU?  Both RCU flavor and CPU are specified by the
@@ -2039,10 +1893,13 @@ int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
                return 1;
        }
        /* Set up for the possibility that RCU will post a timer. */
-       if (rcu_cpu_has_nonlazy_callbacks(cpu))
-               *delta_jiffies = RCU_IDLE_GP_DELAY;
-       else
-               *delta_jiffies = RCU_IDLE_LAZY_GP_DELAY;
+       if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
+               *delta_jiffies = round_up(RCU_IDLE_GP_DELAY + jiffies,
+                                         RCU_IDLE_GP_DELAY) - jiffies;
+       } else {
+               *delta_jiffies = jiffies + RCU_IDLE_LAZY_GP_DELAY;
+               *delta_jiffies = round_jiffies(*delta_jiffies) - jiffies;
+       }
        return 0;
 }
 
@@ -2101,6 +1958,7 @@ static void rcu_cleanup_after_idle(int cpu)
 
        del_timer(&rdtp->idle_gp_timer);
        trace_rcu_prep_idle("Cleanup after idle");
+       rdtp->tick_nohz_enabled_snap = ACCESS_ONCE(tick_nohz_enabled);
 }
 
 /*
@@ -2126,6 +1984,18 @@ static void rcu_prepare_for_idle(int cpu)
 {
        struct timer_list *tp;
        struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+       int tne;
+
+       /* Handle nohz enablement switches conservatively. */
+       tne = ACCESS_ONCE(tick_nohz_enabled);
+       if (tne != rdtp->tick_nohz_enabled_snap) {
+               if (rcu_cpu_has_callbacks(cpu))
+                       invoke_rcu_core(); /* force nohz to see update. */
+               rdtp->tick_nohz_enabled_snap = tne;
+               return;
+       }
+       if (!tne)
+               return;
 
        /*
         * If this is an idle re-entry, for example, due to use of
@@ -2179,10 +2049,11 @@ static void rcu_prepare_for_idle(int cpu)
                if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
                        trace_rcu_prep_idle("Dyntick with callbacks");
                        rdtp->idle_gp_timer_expires =
-                                          jiffies + RCU_IDLE_GP_DELAY;
+                               round_up(jiffies + RCU_IDLE_GP_DELAY,
+                                        RCU_IDLE_GP_DELAY);
                } else {
                        rdtp->idle_gp_timer_expires =
-                                          jiffies + RCU_IDLE_LAZY_GP_DELAY;
+                               round_jiffies(jiffies + RCU_IDLE_LAZY_GP_DELAY);
                        trace_rcu_prep_idle("Dyntick with lazy callbacks");
                }
                tp = &rdtp->idle_gp_timer;
@@ -2223,8 +2094,9 @@ static void rcu_prepare_for_idle(int cpu)
        if (rcu_cpu_has_callbacks(cpu)) {
                trace_rcu_prep_idle("More callbacks");
                invoke_rcu_core();
-       } else
+       } else {
                trace_rcu_prep_idle("Callbacks drained");
+       }
 }
 
 /*
@@ -2261,6 +2133,7 @@ static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
 
 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
 {
+       *cp = '\0';
 }
 
 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
index d4bc16d..abffb48 100644 (file)
 #define RCU_TREE_NONCORE
 #include "rcutree.h"
 
+static int show_rcubarrier(struct seq_file *m, void *unused)
+{
+       struct rcu_state *rsp;
+
+       for_each_rcu_flavor(rsp)
+               seq_printf(m, "%s: %c bcc: %d nbd: %lu\n",
+                          rsp->name, rsp->rcu_barrier_in_progress ? 'B' : '.',
+                          atomic_read(&rsp->barrier_cpu_count),
+                          rsp->n_barrier_done);
+       return 0;
+}
+
+static int rcubarrier_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, show_rcubarrier, NULL);
+}
+
+static const struct file_operations rcubarrier_fops = {
+       .owner = THIS_MODULE,
+       .open = rcubarrier_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
 #ifdef CONFIG_RCU_BOOST
 
 static char convert_kthread_status(unsigned int kthread_status)
@@ -95,24 +120,16 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
                   rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
 }
 
-#define PRINT_RCU_DATA(name, func, m) \
-       do { \
-               int _p_r_d_i; \
-               \
-               for_each_possible_cpu(_p_r_d_i) \
-                       func(m, &per_cpu(name, _p_r_d_i)); \
-       } while (0)
-
 static int show_rcudata(struct seq_file *m, void *unused)
 {
-#ifdef CONFIG_TREE_PREEMPT_RCU
-       seq_puts(m, "rcu_preempt:\n");
-       PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data, m);
-#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
-       seq_puts(m, "rcu_sched:\n");
-       PRINT_RCU_DATA(rcu_sched_data, print_one_rcu_data, m);
-       seq_puts(m, "rcu_bh:\n");
-       PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data, m);
+       int cpu;
+       struct rcu_state *rsp;
+
+       for_each_rcu_flavor(rsp) {
+               seq_printf(m, "%s:\n", rsp->name);
+               for_each_possible_cpu(cpu)
+                       print_one_rcu_data(m, per_cpu_ptr(rsp->rda, cpu));
+       }
        return 0;
 }
 
@@ -166,6 +183,9 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
 
 static int show_rcudata_csv(struct seq_file *m, void *unused)
 {
+       int cpu;
+       struct rcu_state *rsp;
+
        seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pgp\",\"pq\",");
        seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
        seq_puts(m, "\"of\",\"qll\",\"ql\",\"qs\"");
@@ -173,14 +193,11 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
        seq_puts(m, "\"kt\",\"ktl\"");
 #endif /* #ifdef CONFIG_RCU_BOOST */
        seq_puts(m, ",\"b\",\"ci\",\"co\",\"ca\"\n");
-#ifdef CONFIG_TREE_PREEMPT_RCU
-       seq_puts(m, "\"rcu_preempt:\"\n");
-       PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m);
-#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
-       seq_puts(m, "\"rcu_sched:\"\n");
-       PRINT_RCU_DATA(rcu_sched_data, print_one_rcu_data_csv, m);
-       seq_puts(m, "\"rcu_bh:\"\n");
-       PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data_csv, m);
+       for_each_rcu_flavor(rsp) {
+               seq_printf(m, "\"%s:\"\n", rsp->name);
+               for_each_possible_cpu(cpu)
+                       print_one_rcu_data_csv(m, per_cpu_ptr(rsp->rda, cpu));
+       }
        return 0;
 }
 
@@ -201,8 +218,7 @@ static const struct file_operations rcudata_csv_fops = {
 
 static void print_one_rcu_node_boost(struct seq_file *m, struct rcu_node *rnp)
 {
-       seq_printf(m,  "%d:%d tasks=%c%c%c%c kt=%c ntb=%lu neb=%lu nnb=%lu "
-                  "j=%04x bt=%04x\n",
+       seq_printf(m, "%d:%d tasks=%c%c%c%c kt=%c ntb=%lu neb=%lu nnb=%lu ",
                   rnp->grplo, rnp->grphi,
                   "T."[list_empty(&rnp->blkd_tasks)],
                   "N."[!rnp->gp_tasks],
@@ -210,11 +226,11 @@ static void print_one_rcu_node_boost(struct seq_file *m, struct rcu_node *rnp)
                   "B."[!rnp->boost_tasks],
                   convert_kthread_status(rnp->boost_kthread_status),
                   rnp->n_tasks_boosted, rnp->n_exp_boosts,
-                  rnp->n_normal_boosts,
+                  rnp->n_normal_boosts);
+       seq_printf(m, "j=%04x bt=%04x\n",
                   (int)(jiffies & 0xffff),
                   (int)(rnp->boost_time & 0xffff));
-       seq_printf(m, "%s: nt=%lu egt=%lu bt=%lu nb=%lu ny=%lu nos=%lu\n",
-                  "     balk",
+       seq_printf(m, "    balk: nt=%lu egt=%lu bt=%lu nb=%lu ny=%lu nos=%lu\n",
                   rnp->n_balk_blkd_tasks,
                   rnp->n_balk_exp_gp_tasks,
                   rnp->n_balk_boost_tasks,
@@ -270,15 +286,15 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
        struct rcu_node *rnp;
 
        gpnum = rsp->gpnum;
-       seq_printf(m, "c=%lu g=%lu s=%d jfq=%ld j=%x "
-                     "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n",
-                  rsp->completed, gpnum, rsp->fqs_state,
+       seq_printf(m, "%s: c=%lu g=%lu s=%d jfq=%ld j=%x ",
+                  rsp->name, rsp->completed, gpnum, rsp->fqs_state,
                   (long)(rsp->jiffies_force_qs - jiffies),
-                  (int)(jiffies & 0xffff),
+                  (int)(jiffies & 0xffff));
+       seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n",
                   rsp->n_force_qs, rsp->n_force_qs_ngp,
                   rsp->n_force_qs - rsp->n_force_qs_ngp,
                   rsp->n_force_qs_lh, rsp->qlen_lazy, rsp->qlen);
-       for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < NUM_RCU_NODES; rnp++) {
+       for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < rcu_num_nodes; rnp++) {
                if (rnp->level != level) {
                        seq_puts(m, "\n");
                        level = rnp->level;
@@ -295,14 +311,10 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
 
 static int show_rcuhier(struct seq_file *m, void *unused)
 {
-#ifdef CONFIG_TREE_PREEMPT_RCU
-       seq_puts(m, "rcu_preempt:\n");
-       print_one_rcu_state(m, &rcu_preempt_state);
-#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
-       seq_puts(m, "rcu_sched:\n");
-       print_one_rcu_state(m, &rcu_sched_state);
-       seq_puts(m, "rcu_bh:\n");
-       print_one_rcu_state(m, &rcu_bh_state);
+       struct rcu_state *rsp;
+
+       for_each_rcu_flavor(rsp)
+               print_one_rcu_state(m, rsp);
        return 0;
 }
 
@@ -343,11 +355,10 @@ static void show_one_rcugp(struct seq_file *m, struct rcu_state *rsp)
 
 static int show_rcugp(struct seq_file *m, void *unused)
 {
-#ifdef CONFIG_TREE_PREEMPT_RCU
-       show_one_rcugp(m, &rcu_preempt_state);
-#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
-       show_one_rcugp(m, &rcu_sched_state);
-       show_one_rcugp(m, &rcu_bh_state);
+       struct rcu_state *rsp;
+
+       for_each_rcu_flavor(rsp)
+               show_one_rcugp(m, rsp);
        return 0;
 }
 
@@ -366,44 +377,36 @@ static const struct file_operations rcugp_fops = {
 
 static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp)
 {
-       seq_printf(m, "%3d%cnp=%ld "
-                  "qsp=%ld rpq=%ld cbr=%ld cng=%ld "
-                  "gpc=%ld gps=%ld nf=%ld nn=%ld\n",
+       seq_printf(m, "%3d%cnp=%ld ",
                   rdp->cpu,
                   cpu_is_offline(rdp->cpu) ? '!' : ' ',
-                  rdp->n_rcu_pending,
+                  rdp->n_rcu_pending);
+       seq_printf(m, "qsp=%ld rpq=%ld cbr=%ld cng=%ld ",
                   rdp->n_rp_qs_pending,
                   rdp->n_rp_report_qs,
                   rdp->n_rp_cb_ready,
-                  rdp->n_rp_cpu_needs_gp,
+                  rdp->n_rp_cpu_needs_gp);
+       seq_printf(m, "gpc=%ld gps=%ld nf=%ld nn=%ld\n",
                   rdp->n_rp_gp_completed,
                   rdp->n_rp_gp_started,
                   rdp->n_rp_need_fqs,
                   rdp->n_rp_need_nothing);
 }
 
-static void print_rcu_pendings(struct seq_file *m, struct rcu_state *rsp)
+static int show_rcu_pending(struct seq_file *m, void *unused)
 {
        int cpu;
        struct rcu_data *rdp;
-
-       for_each_possible_cpu(cpu) {
-               rdp = per_cpu_ptr(rsp->rda, cpu);
-               if (rdp->beenonline)
-                       print_one_rcu_pending(m, rdp);
+       struct rcu_state *rsp;
+
+       for_each_rcu_flavor(rsp) {
+               seq_printf(m, "%s:\n", rsp->name);
+               for_each_possible_cpu(cpu) {
+                       rdp = per_cpu_ptr(rsp->rda, cpu);
+                       if (rdp->beenonline)
+                               print_one_rcu_pending(m, rdp);
+               }
        }
-}
-
-static int show_rcu_pending(struct seq_file *m, void *unused)
-{
-#ifdef CONFIG_TREE_PREEMPT_RCU
-       seq_puts(m, "rcu_preempt:\n");
-       print_rcu_pendings(m, &rcu_preempt_state);
-#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
-       seq_puts(m, "rcu_sched:\n");
-       print_rcu_pendings(m, &rcu_sched_state);
-       seq_puts(m, "rcu_bh:\n");
-       print_rcu_pendings(m, &rcu_bh_state);
        return 0;
 }
 
@@ -453,6 +456,11 @@ static int __init rcutree_trace_init(void)
        if (!rcudir)
                goto free_out;
 
+       retval = debugfs_create_file("rcubarrier", 0444, rcudir,
+                                               NULL, &rcubarrier_fops);
+       if (!retval)
+               goto free_out;
+
        retval = debugfs_create_file("rcudata", 0444, rcudir,
                                                NULL, &rcudata_fops);
        if (!retval)
index ab56a17..e8cd202 100644 (file)
@@ -1235,6 +1235,7 @@ static ssize_t subbuf_splice_actor(struct file *in,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .nr_pages = 0,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .partial = partial,
                .flags = flags,
                .ops = &relay_pipe_buf_ops,
@@ -1302,8 +1303,8 @@ static ssize_t subbuf_splice_actor(struct file *in,
                 ret += padding;
 
 out:
-       splice_shrink_spd(pipe, &spd);
-        return ret;
+       splice_shrink_spd(&spd);
+       return ret;
 }
 
 static ssize_t relay_file_splice_read(struct file *in,
index d5594a4..468bdd4 100644 (file)
@@ -2081,7 +2081,6 @@ context_switch(struct rq *rq, struct task_struct *prev,
 #endif
 
        /* Here we just switch the register state and the stack. */
-       rcu_switch_from(prev);
        switch_to(prev, next, prev);
 
        barrier();
@@ -2161,11 +2160,73 @@ unsigned long this_cpu_load(void)
 }
 
 
+/*
+ * Global load-average calculations
+ *
+ * We take a distributed and async approach to calculating the global load-avg
+ * in order to minimize overhead.
+ *
+ * The global load average is an exponentially decaying average of nr_running +
+ * nr_uninterruptible.
+ *
+ * Once every LOAD_FREQ:
+ *
+ *   nr_active = 0;
+ *   for_each_possible_cpu(cpu)
+ *     nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
+ *
+ *   avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
+ *
+ * Due to a number of reasons the above turns in the mess below:
+ *
+ *  - for_each_possible_cpu() is prohibitively expensive on machines with
+ *    serious number of cpus, therefore we need to take a distributed approach
+ *    to calculating nr_active.
+ *
+ *        \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
+ *                      = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
+ *
+ *    So assuming nr_active := 0 when we start out -- true per definition, we
+ *    can simply take per-cpu deltas and fold those into a global accumulate
+ *    to obtain the same result. See calc_load_fold_active().
+ *
+ *    Furthermore, in order to avoid synchronizing all per-cpu delta folding
+ *    across the machine, we assume 10 ticks is sufficient time for every
+ *    cpu to have completed this task.
+ *
+ *    This places an upper-bound on the IRQ-off latency of the machine. Then
+ *    again, being late doesn't loose the delta, just wrecks the sample.
+ *
+ *  - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because
+ *    this would add another cross-cpu cacheline miss and atomic operation
+ *    to the wakeup path. Instead we increment on whatever cpu the task ran
+ *    when it went into uninterruptible state and decrement on whatever cpu
+ *    did the wakeup. This means that only the sum of nr_uninterruptible over
+ *    all cpus yields the correct result.
+ *
+ *  This covers the NO_HZ=n code, for extra head-aches, see the comment below.
+ */
+
 /* Variables and functions for calc_load */
 static atomic_long_t calc_load_tasks;
 static unsigned long calc_load_update;
 unsigned long avenrun[3];
-EXPORT_SYMBOL(avenrun);
+EXPORT_SYMBOL(avenrun); /* should be removed */
+
+/**
+ * get_avenrun - get the load average array
+ * @loads:     pointer to dest load array
+ * @offset:    offset to add
+ * @shift:     shift count to shift the result left
+ *
+ * These values are estimates at best, so no need for locking.
+ */
+void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
+{
+       loads[0] = (avenrun[0] + offset) << shift;
+       loads[1] = (avenrun[1] + offset) << shift;
+       loads[2] = (avenrun[2] + offset) << shift;
+}
 
 static long calc_load_fold_active(struct rq *this_rq)
 {
@@ -2182,6 +2243,9 @@ static long calc_load_fold_active(struct rq *this_rq)
        return delta;
 }
 
+/*
+ * a1 = a0 * e + a * (1 - e)
+ */
 static unsigned long
 calc_load(unsigned long load, unsigned long exp, unsigned long active)
 {
@@ -2193,30 +2257,118 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
 
 #ifdef CONFIG_NO_HZ
 /*
- * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
+ * Handle NO_HZ for the global load-average.
+ *
+ * Since the above described distributed algorithm to compute the global
+ * load-average relies on per-cpu sampling from the tick, it is affected by
+ * NO_HZ.
+ *
+ * The basic idea is to fold the nr_active delta into a global idle-delta upon
+ * entering NO_HZ state such that we can include this as an 'extra' cpu delta
+ * when we read the global state.
+ *
+ * Obviously reality has to ruin such a delightfully simple scheme:
+ *
+ *  - When we go NO_HZ idle during the window, we can negate our sample
+ *    contribution, causing under-accounting.
+ *
+ *    We avoid this by keeping two idle-delta counters and flipping them
+ *    when the window starts, thus separating old and new NO_HZ load.
+ *
+ *    The only trick is the slight shift in index flip for read vs write.
+ *
+ *        0s            5s            10s           15s
+ *          +10           +10           +10           +10
+ *        |-|-----------|-|-----------|-|-----------|-|
+ *    r:0 0 1           1 0           0 1           1 0
+ *    w:0 1 1           0 0           1 1           0 0
+ *
+ *    This ensures we'll fold the old idle contribution in this window while
+ *    accumlating the new one.
+ *
+ *  - When we wake up from NO_HZ idle during the window, we push up our
+ *    contribution, since we effectively move our sample point to a known
+ *    busy state.
+ *
+ *    This is solved by pushing the window forward, and thus skipping the
+ *    sample, for this cpu (effectively using the idle-delta for this cpu which
+ *    was in effect at the time the window opened). This also solves the issue
+ *    of having to deal with a cpu having been in NOHZ idle for multiple
+ *    LOAD_FREQ intervals.
  *
  * When making the ILB scale, we should try to pull this in as well.
  */
-static atomic_long_t calc_load_tasks_idle;
+static atomic_long_t calc_load_idle[2];
+static int calc_load_idx;
 
-void calc_load_account_idle(struct rq *this_rq)
+static inline int calc_load_write_idx(void)
 {
+       int idx = calc_load_idx;
+
+       /*
+        * See calc_global_nohz(), if we observe the new index, we also
+        * need to observe the new update time.
+        */
+       smp_rmb();
+
+       /*
+        * If the folding window started, make sure we start writing in the
+        * next idle-delta.
+        */
+       if (!time_before(jiffies, calc_load_update))
+               idx++;
+
+       return idx & 1;
+}
+
+static inline int calc_load_read_idx(void)
+{
+       return calc_load_idx & 1;
+}
+
+void calc_load_enter_idle(void)
+{
+       struct rq *this_rq = this_rq();
        long delta;
 
+       /*
+        * We're going into NOHZ mode, if there's any pending delta, fold it
+        * into the pending idle delta.
+        */
        delta = calc_load_fold_active(this_rq);
-       if (delta)
-               atomic_long_add(delta, &calc_load_tasks_idle);
+       if (delta) {
+               int idx = calc_load_write_idx();
+               atomic_long_add(delta, &calc_load_idle[idx]);
+       }
 }
 
-static long calc_load_fold_idle(void)
+void calc_load_exit_idle(void)
 {
-       long delta = 0;
+       struct rq *this_rq = this_rq();
+
+       /*
+        * If we're still before the sample window, we're done.
+        */
+       if (time_before(jiffies, this_rq->calc_load_update))
+               return;
 
        /*
-        * Its got a race, we don't care...
+        * We woke inside or after the sample window, this means we're already
+        * accounted through the nohz accounting, so skip the entire deal and
+        * sync up for the next window.
         */
-       if (atomic_long_read(&calc_load_tasks_idle))
-               delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
+       this_rq->calc_load_update = calc_load_update;
+       if (time_before(jiffies, this_rq->calc_load_update + 10))
+               this_rq->calc_load_update += LOAD_FREQ;
+}
+
+static long calc_load_fold_idle(void)
+{
+       int idx = calc_load_read_idx();
+       long delta = 0;
+
+       if (atomic_long_read(&calc_load_idle[idx]))
+               delta = atomic_long_xchg(&calc_load_idle[idx], 0);
 
        return delta;
 }
@@ -2302,66 +2454,39 @@ static void calc_global_nohz(void)
 {
        long delta, active, n;
 
-       /*
-        * If we crossed a calc_load_update boundary, make sure to fold
-        * any pending idle changes, the respective CPUs might have
-        * missed the tick driven calc_load_account_active() update
-        * due to NO_HZ.
-        */
-       delta = calc_load_fold_idle();
-       if (delta)
-               atomic_long_add(delta, &calc_load_tasks);
-
-       /*
-        * It could be the one fold was all it took, we done!
-        */
-       if (time_before(jiffies, calc_load_update + 10))
-               return;
-
-       /*
-        * Catch-up, fold however many we are behind still
-        */
-       delta = jiffies - calc_load_update - 10;
-       n = 1 + (delta / LOAD_FREQ);
+       if (!time_before(jiffies, calc_load_update + 10)) {
+               /*
+                * Catch-up, fold however many we are behind still
+                */
+               delta = jiffies - calc_load_update - 10;
+               n = 1 + (delta / LOAD_FREQ);
 
-       active = atomic_long_read(&calc_load_tasks);
-       active = active > 0 ? active * FIXED_1 : 0;
+               active = atomic_long_read(&calc_load_tasks);
+               active = active > 0 ? active * FIXED_1 : 0;
 
-       avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
-       avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
-       avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
+               avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
+               avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
+               avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
 
-       calc_load_update += n * LOAD_FREQ;
-}
-#else
-void calc_load_account_idle(struct rq *this_rq)
-{
-}
+               calc_load_update += n * LOAD_FREQ;
+       }
 
-static inline long calc_load_fold_idle(void)
-{
-       return 0;
+       /*
+        * Flip the idle index...
+        *
+        * Make sure we first write the new time then flip the index, so that
+        * calc_load_write_idx() will see the new time when it reads the new
+        * index, this avoids a double flip messing things up.
+        */
+       smp_wmb();
+       calc_load_idx++;
 }
+#else /* !CONFIG_NO_HZ */
 
-static void calc_global_nohz(void)
-{
-}
-#endif
+static inline long calc_load_fold_idle(void) { return 0; }
+static inline void calc_global_nohz(void) { }
 
-/**
- * get_avenrun - get the load average array
- * @loads:     pointer to dest load array
- * @offset:    offset to add
- * @shift:     shift count to shift the result left
- *
- * These values are estimates at best, so no need for locking.
- */
-void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
-{
-       loads[0] = (avenrun[0] + offset) << shift;
-       loads[1] = (avenrun[1] + offset) << shift;
-       loads[2] = (avenrun[2] + offset) << shift;
-}
+#endif /* CONFIG_NO_HZ */
 
 /*
  * calc_load - update the avenrun load estimates 10 ticks after the
@@ -2369,11 +2494,18 @@ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
  */
 void calc_global_load(unsigned long ticks)
 {
-       long active;
+       long active, delta;
 
        if (time_before(jiffies, calc_load_update + 10))
                return;
 
+       /*
+        * Fold the 'old' idle-delta to include all NO_HZ cpus.
+        */
+       delta = calc_load_fold_idle();
+       if (delta)
+               atomic_long_add(delta, &calc_load_tasks);
+
        active = atomic_long_read(&calc_load_tasks);
        active = active > 0 ? active * FIXED_1 : 0;
 
@@ -2384,12 +2516,7 @@ void calc_global_load(unsigned long ticks)
        calc_load_update += LOAD_FREQ;
 
        /*
-        * Account one period with whatever state we found before
-        * folding in the nohz state and ageing the entire idle period.
-        *
-        * This avoids loosing a sample when we go idle between 
-        * calc_load_account_active() (10 ticks ago) and now and thus
-        * under-accounting.
+        * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
         */
        calc_global_nohz();
 }
@@ -2406,13 +2533,16 @@ static void calc_load_account_active(struct rq *this_rq)
                return;
 
        delta  = calc_load_fold_active(this_rq);
-       delta += calc_load_fold_idle();
        if (delta)
                atomic_long_add(delta, &calc_load_tasks);
 
        this_rq->calc_load_update += LOAD_FREQ;
 }
 
+/*
+ * End of global load-average stuff
+ */
+
 /*
  * The exact cpuload at various idx values, calculated at every tick would be
  * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
index b44d604..b6baf37 100644 (file)
@@ -25,7 +25,6 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
 static struct task_struct *pick_next_task_idle(struct rq *rq)
 {
        schedstat_inc(rq, sched_goidle);
-       calc_load_account_idle(rq);
        return rq->idle;
 }
 
index 6d52cea..55844f2 100644 (file)
@@ -942,8 +942,6 @@ static inline u64 sched_avg_period(void)
        return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
 }
 
-void calc_load_account_idle(struct rq *this_rq);
-
 #ifdef CONFIG_SCHED_HRTICK
 
 /*
index d0ae5b2..29dd40a 100644 (file)
@@ -581,26 +581,6 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
        return 0;
 }
 EXPORT_SYMBOL(smp_call_function);
-
-void ipi_call_lock(void)
-{
-       raw_spin_lock(&call_function.lock);
-}
-
-void ipi_call_unlock(void)
-{
-       raw_spin_unlock(&call_function.lock);
-}
-
-void ipi_call_lock_irq(void)
-{
-       raw_spin_lock_irq(&call_function.lock);
-}
-
-void ipi_call_unlock_irq(void)
-{
-       raw_spin_unlock_irq(&call_function.lock);
-}
 #endif /* USE_GENERIC_SMP_HELPERS */
 
 /* Setup configured maximum number of CPUs to activate */
index 80c0acf..6ef9433 100644 (file)
@@ -3,8 +3,6 @@
 
 struct task_struct;
 
-int smpboot_prepare(unsigned int cpu);
-
 #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
 struct task_struct *idle_thread_get(unsigned int cpu);
 void idle_thread_set_boot_cpu(void);
index f0ec44d..2d39a84 100644 (file)
@@ -1788,7 +1788,6 @@ SYSCALL_DEFINE1(umask, int, mask)
 #ifdef CONFIG_CHECKPOINT_RESTORE
 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
 {
-       struct vm_area_struct *vma;
        struct file *exe_file;
        struct dentry *dentry;
        int err;
@@ -1816,13 +1815,17 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
        down_write(&mm->mmap_sem);
 
        /*
-        * Forbid mm->exe_file change if there are mapped other files.
+        * Forbid mm->exe_file change if old file still mapped.
         */
        err = -EBUSY;
-       for (vma = mm->mmap; vma; vma = vma->vm_next) {
-               if (vma->vm_file && !path_equal(&vma->vm_file->f_path,
-                                               &exe_file->f_path))
-                       goto exit_unlock;
+       if (mm->exe_file) {
+               struct vm_area_struct *vma;
+
+               for (vma = mm->mmap; vma; vma = vma->vm_next)
+                       if (vma->vm_file &&
+                           path_equal(&vma->vm_file->f_path,
+                                      &mm->exe_file->f_path))
+                               goto exit_unlock;
        }
 
        /*
@@ -1835,6 +1838,7 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
        if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags))
                goto exit_unlock;
 
+       err = 0;
        set_mm_exe_file(mm, exe_file);
 exit_unlock:
        up_write(&mm->mmap_sem);
@@ -2127,9 +2131,6 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
                                else
                                        return -EINVAL;
                                break;
-               case PR_GET_TID_ADDRESS:
-                       error = prctl_get_tid_address(me, (int __user **)arg2);
-                       break;
                        default:
                                return -EINVAL;
                        }
@@ -2147,6 +2148,9 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
                case PR_SET_MM:
                        error = prctl_set_mm(arg2, arg3, arg4, arg5);
                        break;
+               case PR_GET_TID_ADDRESS:
+                       error = prctl_get_tid_address(me, (int __user **)arg2);
+                       break;
                case PR_SET_CHILD_SUBREAPER:
                        me->signal->is_child_subreaper = !!arg2;
                        error = 0;
index 70b33ab..b7fbadc 100644 (file)
@@ -409,7 +409,9 @@ int second_overflow(unsigned long secs)
                        time_state = TIME_DEL;
                break;
        case TIME_INS:
-               if (secs % 86400 == 0) {
+               if (!(time_status & STA_INS))
+                       time_state = TIME_OK;
+               else if (secs % 86400 == 0) {
                        leap = -1;
                        time_state = TIME_OOP;
                        time_tai++;
@@ -418,7 +420,9 @@ int second_overflow(unsigned long secs)
                }
                break;
        case TIME_DEL:
-               if ((secs + 1) % 86400 == 0) {
+               if (!(time_status & STA_DEL))
+                       time_state = TIME_OK;
+               else if ((secs + 1) % 86400 == 0) {
                        leap = 1;
                        time_tai--;
                        time_state = TIME_WAIT;
index 8699978..024540f 100644 (file)
@@ -105,7 +105,7 @@ static ktime_t tick_init_jiffy_update(void)
 /*
  * NO HZ enabled ?
  */
-static int tick_nohz_enabled __read_mostly  = 1;
+int tick_nohz_enabled __read_mostly  = 1;
 
 /*
  * Enable / Disable tickless mode
@@ -271,50 +271,15 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
 }
 EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
 
-static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
+static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
+                                        ktime_t now, int cpu)
 {
        unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
+       ktime_t last_update, expires, ret = { .tv64 = 0 };
        unsigned long rcu_delta_jiffies;
-       ktime_t last_update, expires, now;
        struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
        u64 time_delta;
-       int cpu;
-
-       cpu = smp_processor_id();
-       ts = &per_cpu(tick_cpu_sched, cpu);
-
-       now = tick_nohz_start_idle(cpu, ts);
-
-       /*
-        * If this cpu is offline and it is the one which updates
-        * jiffies, then give up the assignment and let it be taken by
-        * the cpu which runs the tick timer next. If we don't drop
-        * this here the jiffies might be stale and do_timer() never
-        * invoked.
-        */
-       if (unlikely(!cpu_online(cpu))) {
-               if (cpu == tick_do_timer_cpu)
-                       tick_do_timer_cpu = TICK_DO_TIMER_NONE;
-       }
-
-       if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
-               return;
 
-       if (need_resched())
-               return;
-
-       if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
-               static int ratelimit;
-
-               if (ratelimit < 10) {
-                       printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
-                              (unsigned int) local_softirq_pending());
-                       ratelimit++;
-               }
-               return;
-       }
-
-       ts->idle_calls++;
        /* Read jiffies and the time when jiffies were updated last */
        do {
                seq = read_seqbegin(&xtime_lock);
@@ -397,6 +362,8 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
                if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
                        goto out;
 
+               ret = expires;
+
                /*
                 * nohz_stop_sched_tick can be called several times before
                 * the nohz_restart_sched_tick is called. This happens when
@@ -406,17 +373,12 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
                 */
                if (!ts->tick_stopped) {
                        select_nohz_load_balancer(1);
+                       calc_load_enter_idle();
 
-                       ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
+                       ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
                        ts->tick_stopped = 1;
-                       ts->idle_jiffies = last_jiffies;
                }
 
-               ts->idle_sleeps++;
-
-               /* Mark expires */
-               ts->idle_expires = expires;
-
                /*
                 * If the expiration time == KTIME_MAX, then
                 * in this case we simply stop the tick timer.
@@ -447,6 +409,65 @@ out:
        ts->next_jiffies = next_jiffies;
        ts->last_jiffies = last_jiffies;
        ts->sleep_length = ktime_sub(dev->next_event, now);
+
+       return ret;
+}
+
+static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
+{
+       /*
+        * If this cpu is offline and it is the one which updates
+        * jiffies, then give up the assignment and let it be taken by
+        * the cpu which runs the tick timer next. If we don't drop
+        * this here the jiffies might be stale and do_timer() never
+        * invoked.
+        */
+       if (unlikely(!cpu_online(cpu))) {
+               if (cpu == tick_do_timer_cpu)
+                       tick_do_timer_cpu = TICK_DO_TIMER_NONE;
+       }
+
+       if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
+               return false;
+
+       if (need_resched())
+               return false;
+
+       if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
+               static int ratelimit;
+
+               if (ratelimit < 10) {
+                       printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
+                              (unsigned int) local_softirq_pending());
+                       ratelimit++;
+               }
+               return false;
+       }
+
+       return true;
+}
+
+static void __tick_nohz_idle_enter(struct tick_sched *ts)
+{
+       ktime_t now, expires;
+       int cpu = smp_processor_id();
+
+       now = tick_nohz_start_idle(cpu, ts);
+
+       if (can_stop_idle_tick(cpu, ts)) {
+               int was_stopped = ts->tick_stopped;
+
+               ts->idle_calls++;
+
+               expires = tick_nohz_stop_sched_tick(ts, now, cpu);
+               if (expires.tv64 > 0LL) {
+                       ts->idle_sleeps++;
+                       ts->idle_expires = expires;
+               }
+
+               if (!was_stopped && ts->tick_stopped)
+                       ts->idle_jiffies = ts->last_jiffies;
+       }
 }
 
 /**
@@ -484,7 +505,7 @@ void tick_nohz_idle_enter(void)
         * update of the idle time accounting in tick_nohz_start_idle().
         */
        ts->inidle = 1;
-       tick_nohz_stop_sched_tick(ts);
+       __tick_nohz_idle_enter(ts);
 
        local_irq_enable();
 }
@@ -504,7 +525,7 @@ void tick_nohz_irq_exit(void)
        if (!ts->inidle)
                return;
 
-       tick_nohz_stop_sched_tick(ts);
+       __tick_nohz_idle_enter(ts);
 }
 
 /**
@@ -522,7 +543,7 @@ ktime_t tick_nohz_get_sleep_length(void)
 static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
 {
        hrtimer_cancel(&ts->sched_timer);
-       hrtimer_set_expires(&ts->sched_timer, ts->idle_tick);
+       hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
 
        while (1) {
                /* Forward the time to expire in the future */
@@ -545,6 +566,41 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
        }
 }
 
+static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
+{
+       /* Update jiffies first */
+       select_nohz_load_balancer(0);
+       tick_do_update_jiffies64(now);
+       update_cpu_load_nohz();
+
+       touch_softlockup_watchdog();
+       /*
+        * Cancel the scheduled timer and restore the tick
+        */
+       ts->tick_stopped  = 0;
+       ts->idle_exittime = now;
+
+       tick_nohz_restart(ts, now);
+}
+
+static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
+{
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+       unsigned long ticks;
+       /*
+        * We stopped the tick in idle. Update process times would miss the
+        * time we slept as update_process_times does only a 1 tick
+        * accounting. Enforce that this is accounted to idle !
+        */
+       ticks = jiffies - ts->idle_jiffies;
+       /*
+        * We might be one off. Do not randomly account a huge number of ticks!
+        */
+       if (ticks && ticks < LONG_MAX)
+               account_idle_ticks(ticks);
+#endif
+}
+
 /**
  * tick_nohz_idle_exit - restart the idle tick from the idle task
  *
@@ -556,9 +612,6 @@ void tick_nohz_idle_exit(void)
 {
        int cpu = smp_processor_id();
        struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
-       unsigned long ticks;
-#endif
        ktime_t now;
 
        local_irq_disable();
@@ -573,39 +626,11 @@ void tick_nohz_idle_exit(void)
        if (ts->idle_active)
                tick_nohz_stop_idle(cpu, now);
 
-       if (!ts->tick_stopped) {
-               local_irq_enable();
-               return;
+       if (ts->tick_stopped) {
+               tick_nohz_restart_sched_tick(ts, now);
+               tick_nohz_account_idle_ticks(ts);
        }
 
-       /* Update jiffies first */
-       select_nohz_load_balancer(0);
-       tick_do_update_jiffies64(now);
-       update_cpu_load_nohz();
-
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
-       /*
-        * We stopped the tick in idle. Update process times would miss the
-        * time we slept as update_process_times does only a 1 tick
-        * accounting. Enforce that this is accounted to idle !
-        */
-       ticks = jiffies - ts->idle_jiffies;
-       /*
-        * We might be one off. Do not randomly account a huge number of ticks!
-        */
-       if (ticks && ticks < LONG_MAX)
-               account_idle_ticks(ticks);
-#endif
-
-       touch_softlockup_watchdog();
-       /*
-        * Cancel the scheduled timer and restore the tick
-        */
-       ts->tick_stopped  = 0;
-       ts->idle_exittime = now;
-
-       tick_nohz_restart(ts, now);
-
        local_irq_enable();
 }
 
@@ -809,7 +834,8 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
                 */
                if (ts->tick_stopped) {
                        touch_softlockup_watchdog();
-                       ts->idle_jiffies++;
+                       if (idle_cpu(cpu))
+                               ts->idle_jiffies++;
                }
                update_process_times(user_mode(regs));
                profile_tick(CPU_PROFILING);
index 6f46a00..f045cc5 100644 (file)
 /* Structure holding internal timekeeping values. */
 struct timekeeper {
        /* Current clocksource used for timekeeping. */
-       struct clocksource *clock;
+       struct clocksource      *clock;
        /* NTP adjusted clock multiplier */
-       u32     mult;
+       u32                     mult;
        /* The shift value of the current clocksource. */
-       int     shift;
-
+       u32                     shift;
        /* Number of clock cycles in one NTP interval. */
-       cycle_t cycle_interval;
+       cycle_t                 cycle_interval;
        /* Number of clock shifted nano seconds in one NTP interval. */
-       u64     xtime_interval;
+       u64                     xtime_interval;
        /* shifted nano seconds left over when rounding cycle_interval */
-       s64     xtime_remainder;
+       s64                     xtime_remainder;
        /* Raw nano seconds accumulated per NTP interval. */
-       u32     raw_interval;
+       u32                     raw_interval;
+
+       /* Current CLOCK_REALTIME time in seconds */
+       u64                     xtime_sec;
+       /* Clock shifted nano seconds */
+       u64                     xtime_nsec;
 
-       /* Clock shifted nano seconds remainder not stored in xtime.tv_nsec. */
-       u64     xtime_nsec;
        /* Difference between accumulated time and NTP time in ntp
         * shifted nano seconds. */
-       s64     ntp_error;
+       s64                     ntp_error;
        /* Shift conversion between clock shifted nano seconds and
         * ntp shifted nano seconds. */
-       int     ntp_error_shift;
+       u32                     ntp_error_shift;
 
-       /* The current time */
-       struct timespec xtime;
        /*
         * wall_to_monotonic is what we need to add to xtime (or xtime corrected
         * for sub jiffie times) to get to monotonic time.  Monotonic is pegged
@@ -64,14 +64,17 @@ struct timekeeper {
         * - wall_to_monotonic is no longer the boot time, getboottime must be
         * used instead.
         */
-       struct timespec wall_to_monotonic;
+       struct timespec         wall_to_monotonic;
        /* time spent in suspend */
-       struct timespec total_sleep_time;
+       struct timespec         total_sleep_time;
        /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
-       struct timespec raw_time;
-
+       struct timespec         raw_time;
+       /* Offset clock monotonic -> clock realtime */
+       ktime_t                 offs_real;
+       /* Offset clock monotonic -> clock boottime */
+       ktime_t                 offs_boot;
        /* Seqlock for all timekeeper values */
-       seqlock_t lock;
+       seqlock_t               lock;
 };
 
 static struct timekeeper timekeeper;
@@ -82,11 +85,37 @@ static struct timekeeper timekeeper;
  */
 __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
 
-
 /* flag for if timekeeping is suspended */
 int __read_mostly timekeeping_suspended;
 
+static inline void tk_normalize_xtime(struct timekeeper *tk)
+{
+       while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) {
+               tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift;
+               tk->xtime_sec++;
+       }
+}
+
+static struct timespec tk_xtime(struct timekeeper *tk)
+{
+       struct timespec ts;
+
+       ts.tv_sec = tk->xtime_sec;
+       ts.tv_nsec = (long)(tk->xtime_nsec >> tk->shift);
+       return ts;
+}
+
+static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts)
+{
+       tk->xtime_sec = ts->tv_sec;
+       tk->xtime_nsec = ts->tv_nsec << tk->shift;
+}
 
+static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)
+{
+       tk->xtime_sec += ts->tv_sec;
+       tk->xtime_nsec += ts->tv_nsec << tk->shift;
+}
 
 /**
  * timekeeper_setup_internals - Set up internals to use clocksource clock.
@@ -98,12 +127,14 @@ int __read_mostly timekeeping_suspended;
  *
  * Unless you're the timekeeping code, you should not be using this!
  */
-static void timekeeper_setup_internals(struct clocksource *clock)
+static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
 {
        cycle_t interval;
        u64 tmp, ntpinterval;
+       struct clocksource *old_clock;
 
-       timekeeper.clock = clock;
+       old_clock = tk->clock;
+       tk->clock = clock;
        clock->cycle_last = clock->read(clock);
 
        /* Do the ns -> cycle conversion first, using original mult */
@@ -116,71 +147,96 @@ static void timekeeper_setup_internals(struct clocksource *clock)
                tmp = 1;
 
        interval = (cycle_t) tmp;
-       timekeeper.cycle_interval = interval;
+       tk->cycle_interval = interval;
 
        /* Go back from cycles -> shifted ns */
-       timekeeper.xtime_interval = (u64) interval * clock->mult;
-       timekeeper.xtime_remainder = ntpinterval - timekeeper.xtime_interval;
-       timekeeper.raw_interval =
+       tk->xtime_interval = (u64) interval * clock->mult;
+       tk->xtime_remainder = ntpinterval - tk->xtime_interval;
+       tk->raw_interval =
                ((u64) interval * clock->mult) >> clock->shift;
 
-       timekeeper.xtime_nsec = 0;
-       timekeeper.shift = clock->shift;
+        /* if changing clocks, convert xtime_nsec shift units */
+       if (old_clock) {
+               int shift_change = clock->shift - old_clock->shift;
+               if (shift_change < 0)
+                       tk->xtime_nsec >>= -shift_change;
+               else
+                       tk->xtime_nsec <<= shift_change;
+       }
+       tk->shift = clock->shift;
 
-       timekeeper.ntp_error = 0;
-       timekeeper.ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
+       tk->ntp_error = 0;
+       tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
 
        /*
         * The timekeeper keeps its own mult values for the currently
         * active clocksource. These value will be adjusted via NTP
         * to counteract clock drifting.
         */
-       timekeeper.mult = clock->mult;
+       tk->mult = clock->mult;
 }
 
 /* Timekeeper helper functions. */
-static inline s64 timekeeping_get_ns(void)
+static inline s64 timekeeping_get_ns(struct timekeeper *tk)
 {
        cycle_t cycle_now, cycle_delta;
        struct clocksource *clock;
+       s64 nsec;
 
        /* read clocksource: */
-       clock = timekeeper.clock;
+       clock = tk->clock;
        cycle_now = clock->read(clock);
 
        /* calculate the delta since the last update_wall_time: */
        cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
 
-       /* return delta convert to nanoseconds using ntp adjusted mult. */
-       return clocksource_cyc2ns(cycle_delta, timekeeper.mult,
-                                 timekeeper.shift);
+       nsec = cycle_delta * tk->mult + tk->xtime_nsec;
+       nsec >>= tk->shift;
+
+       /* If arch requires, add in gettimeoffset() */
+       return nsec + arch_gettimeoffset();
 }
 
-static inline s64 timekeeping_get_ns_raw(void)
+static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
 {
        cycle_t cycle_now, cycle_delta;
        struct clocksource *clock;
+       s64 nsec;
 
        /* read clocksource: */
-       clock = timekeeper.clock;
+       clock = tk->clock;
        cycle_now = clock->read(clock);
 
        /* calculate the delta since the last update_wall_time: */
        cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
 
-       /* return delta convert to nanoseconds. */
-       return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
+       /* convert delta to nanoseconds. */
+       nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
+
+       /* If arch requires, add in gettimeoffset() */
+       return nsec + arch_gettimeoffset();
+}
+
+static void update_rt_offset(struct timekeeper *tk)
+{
+       struct timespec tmp, *wtm = &tk->wall_to_monotonic;
+
+       set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec);
+       tk->offs_real = timespec_to_ktime(tmp);
 }
 
 /* must hold write on timekeeper.lock */
-static void timekeeping_update(bool clearntp)
+static void timekeeping_update(struct timekeeper *tk, bool clearntp)
 {
+       struct timespec xt;
+
        if (clearntp) {
-               timekeeper.ntp_error = 0;
+               tk->ntp_error = 0;
                ntp_clear();
        }
-       update_vsyscall(&timekeeper.xtime, &timekeeper.wall_to_monotonic,
-                        timekeeper.clock, timekeeper.mult);
+       update_rt_offset(tk);
+       xt = tk_xtime(tk);
+       update_vsyscall(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult);
 }
 
 
@@ -191,27 +247,26 @@ static void timekeeping_update(bool clearntp)
  * update_wall_time(). This is useful before significant clock changes,
  * as it avoids having to deal with this time offset explicitly.
  */
-static void timekeeping_forward_now(void)
+static void timekeeping_forward_now(struct timekeeper *tk)
 {
        cycle_t cycle_now, cycle_delta;
        struct clocksource *clock;
        s64 nsec;
 
-       clock = timekeeper.clock;
+       clock = tk->clock;
        cycle_now = clock->read(clock);
        cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
        clock->cycle_last = cycle_now;
 
-       nsec = clocksource_cyc2ns(cycle_delta, timekeeper.mult,
-                                 timekeeper.shift);
+       tk->xtime_nsec += cycle_delta * tk->mult;
 
        /* If arch requires, add in gettimeoffset() */
-       nsec += arch_gettimeoffset();
+       tk->xtime_nsec += arch_gettimeoffset() << tk->shift;
 
-       timespec_add_ns(&timekeeper.xtime, nsec);
+       tk_normalize_xtime(tk);
 
        nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
-       timespec_add_ns(&timekeeper.raw_time, nsec);
+       timespec_add_ns(&tk->raw_time, nsec);
 }
 
 /**
@@ -223,18 +278,15 @@ static void timekeeping_forward_now(void)
 void getnstimeofday(struct timespec *ts)
 {
        unsigned long seq;
-       s64 nsecs;
+       s64 nsecs = 0;
 
        WARN_ON(timekeeping_suspended);
 
        do {
                seq = read_seqbegin(&timekeeper.lock);
 
-               *ts = timekeeper.xtime;
-               nsecs = timekeeping_get_ns();
-
-               /* If arch requires, add in gettimeoffset() */
-               nsecs += arch_gettimeoffset();
+               ts->tv_sec = timekeeper.xtime_sec;
+               ts->tv_nsec = timekeeping_get_ns(&timekeeper);
 
        } while (read_seqretry(&timekeeper.lock, seq));
 
@@ -251,13 +303,10 @@ ktime_t ktime_get(void)
 
        do {
                seq = read_seqbegin(&timekeeper.lock);
-               secs = timekeeper.xtime.tv_sec +
+               secs = timekeeper.xtime_sec +
                                timekeeper.wall_to_monotonic.tv_sec;
-               nsecs = timekeeper.xtime.tv_nsec +
+               nsecs = timekeeping_get_ns(&timekeeper) +
                                timekeeper.wall_to_monotonic.tv_nsec;
-               nsecs += timekeeping_get_ns();
-               /* If arch requires, add in gettimeoffset() */
-               nsecs += arch_gettimeoffset();
 
        } while (read_seqretry(&timekeeper.lock, seq));
        /*
@@ -280,22 +329,19 @@ void ktime_get_ts(struct timespec *ts)
 {
        struct timespec tomono;
        unsigned int seq;
-       s64 nsecs;
 
        WARN_ON(timekeeping_suspended);
 
        do {
                seq = read_seqbegin(&timekeeper.lock);
-               *ts = timekeeper.xtime;
+               ts->tv_sec = timekeeper.xtime_sec;
+               ts->tv_nsec = timekeeping_get_ns(&timekeeper);
                tomono = timekeeper.wall_to_monotonic;
-               nsecs = timekeeping_get_ns();
-               /* If arch requires, add in gettimeoffset() */
-               nsecs += arch_gettimeoffset();
 
        } while (read_seqretry(&timekeeper.lock, seq));
 
        set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
-                               ts->tv_nsec + tomono.tv_nsec + nsecs);
+                               ts->tv_nsec + tomono.tv_nsec);
 }
 EXPORT_SYMBOL_GPL(ktime_get_ts);
 
@@ -318,20 +364,14 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
        WARN_ON_ONCE(timekeeping_suspended);
 
        do {
-               u32 arch_offset;
-
                seq = read_seqbegin(&timekeeper.lock);
 
                *ts_raw = timekeeper.raw_time;
-               *ts_real = timekeeper.xtime;
-
-               nsecs_raw = timekeeping_get_ns_raw();
-               nsecs_real = timekeeping_get_ns();
+               ts_real->tv_sec = timekeeper.xtime_sec;
+               ts_real->tv_nsec = 0;
 
-               /* If arch requires, add in gettimeoffset() */
-               arch_offset = arch_gettimeoffset();
-               nsecs_raw += arch_offset;
-               nsecs_real += arch_offset;
+               nsecs_raw = timekeeping_get_ns_raw(&timekeeper);
+               nsecs_real = timekeeping_get_ns(&timekeeper);
 
        } while (read_seqretry(&timekeeper.lock, seq));
 
@@ -366,7 +406,7 @@ EXPORT_SYMBOL(do_gettimeofday);
  */
 int do_settimeofday(const struct timespec *tv)
 {
-       struct timespec ts_delta;
+       struct timespec ts_delta, xt;
        unsigned long flags;
 
        if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
@@ -374,15 +414,18 @@ int do_settimeofday(const struct timespec *tv)
 
        write_seqlock_irqsave(&timekeeper.lock, flags);
 
-       timekeeping_forward_now();
+       timekeeping_forward_now(&timekeeper);
+
+       xt = tk_xtime(&timekeeper);
+       ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
+       ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec;
 
-       ts_delta.tv_sec = tv->tv_sec - timekeeper.xtime.tv_sec;
-       ts_delta.tv_nsec = tv->tv_nsec - timekeeper.xtime.tv_nsec;
        timekeeper.wall_to_monotonic =
                        timespec_sub(timekeeper.wall_to_monotonic, ts_delta);
 
-       timekeeper.xtime = *tv;
-       timekeeping_update(true);
+       tk_set_xtime(&timekeeper, tv);
+
+       timekeeping_update(&timekeeper, true);
 
        write_sequnlock_irqrestore(&timekeeper.lock, flags);
 
@@ -409,13 +452,14 @@ int timekeeping_inject_offset(struct timespec *ts)
 
        write_seqlock_irqsave(&timekeeper.lock, flags);
 
-       timekeeping_forward_now();
+       timekeeping_forward_now(&timekeeper);
 
-       timekeeper.xtime = timespec_add(timekeeper.xtime, *ts);
+
+       tk_xtime_add(&timekeeper, ts);
        timekeeper.wall_to_monotonic =
                                timespec_sub(timekeeper.wall_to_monotonic, *ts);
 
-       timekeeping_update(true);
+       timekeeping_update(&timekeeper, true);
 
        write_sequnlock_irqrestore(&timekeeper.lock, flags);
 
@@ -440,14 +484,14 @@ static int change_clocksource(void *data)
 
        write_seqlock_irqsave(&timekeeper.lock, flags);
 
-       timekeeping_forward_now();
+       timekeeping_forward_now(&timekeeper);
        if (!new->enable || new->enable(new) == 0) {
                old = timekeeper.clock;
-               timekeeper_setup_internals(new);
+               tk_setup_internals(&timekeeper, new);
                if (old->disable)
                        old->disable(old);
        }
-       timekeeping_update(true);
+       timekeeping_update(&timekeeper, true);
 
        write_sequnlock_irqrestore(&timekeeper.lock, flags);
 
@@ -497,7 +541,7 @@ void getrawmonotonic(struct timespec *ts)
 
        do {
                seq = read_seqbegin(&timekeeper.lock);
-               nsecs = timekeeping_get_ns_raw();
+               nsecs = timekeeping_get_ns_raw(&timekeeper);
                *ts = timekeeper.raw_time;
 
        } while (read_seqretry(&timekeeper.lock, seq));
@@ -532,6 +576,7 @@ u64 timekeeping_max_deferment(void)
 {
        unsigned long seq;
        u64 ret;
+
        do {
                seq = read_seqbegin(&timekeeper.lock);
 
@@ -592,18 +637,17 @@ void __init timekeeping_init(void)
        clock = clocksource_default_clock();
        if (clock->enable)
                clock->enable(clock);
-       timekeeper_setup_internals(clock);
+       tk_setup_internals(&timekeeper, clock);
 
-       timekeeper.xtime.tv_sec = now.tv_sec;
-       timekeeper.xtime.tv_nsec = now.tv_nsec;
+       tk_set_xtime(&timekeeper, &now);
        timekeeper.raw_time.tv_sec = 0;
        timekeeper.raw_time.tv_nsec = 0;
-       if (boot.tv_sec == 0 && boot.tv_nsec == 0) {
-               boot.tv_sec = timekeeper.xtime.tv_sec;
-               boot.tv_nsec = timekeeper.xtime.tv_nsec;
-       }
+       if (boot.tv_sec == 0 && boot.tv_nsec == 0)
+               boot = tk_xtime(&timekeeper);
+
        set_normalized_timespec(&timekeeper.wall_to_monotonic,
                                -boot.tv_sec, -boot.tv_nsec);
+       update_rt_offset(&timekeeper);
        timekeeper.total_sleep_time.tv_sec = 0;
        timekeeper.total_sleep_time.tv_nsec = 0;
        write_sequnlock_irqrestore(&timekeeper.lock, flags);
@@ -612,6 +656,12 @@ void __init timekeeping_init(void)
 /* time in seconds when suspend began */
 static struct timespec timekeeping_suspend_time;
 
+static void update_sleep_time(struct timespec t)
+{
+       timekeeper.total_sleep_time = t;
+       timekeeper.offs_boot = timespec_to_ktime(t);
+}
+
 /**
  * __timekeeping_inject_sleeptime - Internal function to add sleep interval
  * @delta: pointer to a timespec delta value
@@ -619,7 +669,8 @@ static struct timespec timekeeping_suspend_time;
  * Takes a timespec offset measuring a suspend interval and properly
  * adds the sleep offset to the timekeeping variables.
  */
-static void __timekeeping_inject_sleeptime(struct timespec *delta)
+static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
+                                                       struct timespec *delta)
 {
        if (!timespec_valid(delta)) {
                printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
@@ -627,11 +678,9 @@ static void __timekeeping_inject_sleeptime(struct timespec *delta)
                return;
        }
 
-       timekeeper.xtime = timespec_add(timekeeper.xtime, *delta);
-       timekeeper.wall_to_monotonic =
-                       timespec_sub(timekeeper.wall_to_monotonic, *delta);
-       timekeeper.total_sleep_time = timespec_add(
-                                       timekeeper.total_sleep_time, *delta);
+       tk_xtime_add(tk, delta);
+       tk->wall_to_monotonic = timespec_sub(tk->wall_to_monotonic, *delta);
+       update_sleep_time(timespec_add(tk->total_sleep_time, *delta));
 }
 
 
@@ -657,11 +706,11 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
 
        write_seqlock_irqsave(&timekeeper.lock, flags);
 
-       timekeeping_forward_now();
+       timekeeping_forward_now(&timekeeper);
 
-       __timekeeping_inject_sleeptime(delta);
+       __timekeeping_inject_sleeptime(&timekeeper, delta);
 
-       timekeeping_update(true);
+       timekeeping_update(&timekeeper, true);
 
        write_sequnlock_irqrestore(&timekeeper.lock, flags);
 
@@ -690,12 +739,13 @@ static void timekeeping_resume(void)
 
        if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
                ts = timespec_sub(ts, timekeeping_suspend_time);
-               __timekeeping_inject_sleeptime(&ts);
+               __timekeeping_inject_sleeptime(&timekeeper, &ts);
        }
        /* re-base the last cycle value */
        timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
        timekeeper.ntp_error = 0;
        timekeeping_suspended = 0;
+       timekeeping_update(&timekeeper, false);
        write_sequnlock_irqrestore(&timekeeper.lock, flags);
 
        touch_softlockup_watchdog();
@@ -715,7 +765,7 @@ static int timekeeping_suspend(void)
        read_persistent_clock(&timekeeping_suspend_time);
 
        write_seqlock_irqsave(&timekeeper.lock, flags);
-       timekeeping_forward_now();
+       timekeeping_forward_now(&timekeeper);
        timekeeping_suspended = 1;
 
        /*
@@ -724,7 +774,7 @@ static int timekeeping_suspend(void)
         * try to compensate so the difference in system time
         * and persistent_clock time stays close to constant.
         */
-       delta = timespec_sub(timekeeper.xtime, timekeeping_suspend_time);
+       delta = timespec_sub(tk_xtime(&timekeeper), timekeeping_suspend_time);
        delta_delta = timespec_sub(delta, old_delta);
        if (abs(delta_delta.tv_sec)  >= 2) {
                /*
@@ -763,7 +813,8 @@ device_initcall(timekeeping_init_ops);
  * If the error is already larger, we look ahead even further
  * to compensate for late or lost adjustments.
  */
-static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval,
+static __always_inline int timekeeping_bigadjust(struct timekeeper *tk,
+                                                s64 error, s64 *interval,
                                                 s64 *offset)
 {
        s64 tick_error, i;
@@ -779,7 +830,7 @@ static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval,
         * here.  This is tuned so that an error of about 1 msec is adjusted
         * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
         */
-       error2 = timekeeper.ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
+       error2 = tk->ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
        error2 = abs(error2);
        for (look_ahead = 0; error2 > 0; look_ahead++)
                error2 >>= 2;
@@ -788,8 +839,8 @@ static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval,
         * Now calculate the error in (1 << look_ahead) ticks, but first
         * remove the single look ahead already included in the error.
         */
-       tick_error = ntp_tick_length() >> (timekeeper.ntp_error_shift + 1);
-       tick_error -= timekeeper.xtime_interval >> 1;
+       tick_error = ntp_tick_length() >> (tk->ntp_error_shift + 1);
+       tick_error -= tk->xtime_interval >> 1;
        error = ((error - tick_error) >> look_ahead) + tick_error;
 
        /* Finally calculate the adjustment shift value.  */
@@ -814,9 +865,9 @@ static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval,
  * this is optimized for the most common adjustments of -1,0,1,
  * for other values we can do a bit more work.
  */
-static void timekeeping_adjust(s64 offset)
+static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
 {
-       s64 error, interval = timekeeper.cycle_interval;
+       s64 error, interval = tk->cycle_interval;
        int adj;
 
        /*
@@ -832,7 +883,7 @@ static void timekeeping_adjust(s64 offset)
         *
         * Note: It does not "save" on aggravation when reading the code.
         */
-       error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1);
+       error = tk->ntp_error >> (tk->ntp_error_shift - 1);
        if (error > interval) {
                /*
                 * We now divide error by 4(via shift), which checks if
@@ -854,7 +905,8 @@ static void timekeeping_adjust(s64 offset)
                if (likely(error <= interval))
                        adj = 1;
                else
-                       adj = timekeeping_bigadjust(error, &interval, &offset);
+                       adj = timekeeping_bigadjust(tk, error, &interval,
+                                                       &offset);
        } else if (error < -interval) {
                /* See comment above, this is just switched for the negative */
                error >>= 2;
@@ -863,18 +915,17 @@ static void timekeeping_adjust(s64 offset)
                        interval = -interval;
                        offset = -offset;
                } else
-                       adj = timekeeping_bigadjust(error, &interval, &offset);
-       } else /* No adjustment needed */
+                       adj = timekeeping_bigadjust(tk, error, &interval,
+                                                       &offset);
+       } else
                return;
 
-       if (unlikely(timekeeper.clock->maxadj &&
-                       (timekeeper.mult + adj >
-                       timekeeper.clock->mult + timekeeper.clock->maxadj))) {
+       if (unlikely(tk->clock->maxadj &&
+               (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
                printk_once(KERN_WARNING
                        "Adjusting %s more than 11%% (%ld vs %ld)\n",
-                       timekeeper.clock->name, (long)timekeeper.mult + adj,
-                       (long)timekeeper.clock->mult +
-                               timekeeper.clock->maxadj);
+                       tk->clock->name, (long)tk->mult + adj,
+                       (long)tk->clock->mult + tk->clock->maxadj);
        }
        /*
         * So the following can be confusing.
@@ -925,11 +976,60 @@ static void timekeeping_adjust(s64 offset)
         *
         * XXX - TODO: Doc ntp_error calculation.
         */
-       timekeeper.mult += adj;
-       timekeeper.xtime_interval += interval;
-       timekeeper.xtime_nsec -= offset;
-       timekeeper.ntp_error -= (interval - offset) <<
-                               timekeeper.ntp_error_shift;
+       tk->mult += adj;
+       tk->xtime_interval += interval;
+       tk->xtime_nsec -= offset;
+       tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
+
+       /*
+        * It may be possible that when we entered this function, xtime_nsec
+        * was very small.  Further, if we're slightly speeding the clocksource
+        * in the code above, its possible the required corrective factor to
+        * xtime_nsec could cause it to underflow.
+        *
+        * Now, since we already accumulated the second, cannot simply roll
+        * the accumulated second back, since the NTP subsystem has been
+        * notified via second_overflow. So instead we push xtime_nsec forward
+        * by the amount we underflowed, and add that amount into the error.
+        *
+        * We'll correct this error next time through this function, when
+        * xtime_nsec is not as small.
+        */
+       if (unlikely((s64)tk->xtime_nsec < 0)) {
+               s64 neg = -(s64)tk->xtime_nsec;
+               tk->xtime_nsec = 0;
+               tk->ntp_error += neg << tk->ntp_error_shift;
+       }
+
+}
+
+
+/**
+ * accumulate_nsecs_to_secs - Accumulates nsecs into secs
+ *
+ * Helper function that accumulates a the nsecs greater then a second
+ * from the xtime_nsec field to the xtime_secs field.
+ * It also calls into the NTP code to handle leapsecond processing.
+ *
+ */
+static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
+{
+       u64 nsecps = (u64)NSEC_PER_SEC << tk->shift;
+
+       while (tk->xtime_nsec >= nsecps) {
+               int leap;
+
+               tk->xtime_nsec -= nsecps;
+               tk->xtime_sec++;
+
+               /* Figure out if its a leap sec and apply if needed */
+               leap = second_overflow(tk->xtime_sec);
+               tk->xtime_sec += leap;
+               tk->wall_to_monotonic.tv_sec -= leap;
+               if (leap)
+                       clock_was_set_delayed();
+
+       }
 }
 
 
@@ -942,44 +1042,36 @@ static void timekeeping_adjust(s64 offset)
  *
  * Returns the unconsumed cycles.
  */
-static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
+static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
+                                               u32 shift)
 {
-       u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
        u64 raw_nsecs;
 
-       /* If the offset is smaller than a shifted interval, do nothing */
-       if (offset < timekeeper.cycle_interval<<shift)
+       /* If the offset is smaller then a shifted interval, do nothing */
+       if (offset < tk->cycle_interval<<shift)
                return offset;
 
        /* Accumulate one shifted interval */
-       offset -= timekeeper.cycle_interval << shift;
-       timekeeper.clock->cycle_last += timekeeper.cycle_interval << shift;
+       offset -= tk->cycle_interval << shift;
+       tk->clock->cycle_last += tk->cycle_interval << shift;
 
-       timekeeper.xtime_nsec += timekeeper.xtime_interval << shift;
-       while (timekeeper.xtime_nsec >= nsecps) {
-               int leap;
-               timekeeper.xtime_nsec -= nsecps;
-               timekeeper.xtime.tv_sec++;
-               leap = second_overflow(timekeeper.xtime.tv_sec);
-               timekeeper.xtime.tv_sec += leap;
-               timekeeper.wall_to_monotonic.tv_sec -= leap;
-       }
+       tk->xtime_nsec += tk->xtime_interval << shift;
+       accumulate_nsecs_to_secs(tk);
 
        /* Accumulate raw time */
-       raw_nsecs = timekeeper.raw_interval << shift;
-       raw_nsecs += timekeeper.raw_time.tv_nsec;
+       raw_nsecs = tk->raw_interval << shift;
+       raw_nsecs += tk->raw_time.tv_nsec;
        if (raw_nsecs >= NSEC_PER_SEC) {
                u64 raw_secs = raw_nsecs;
                raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
-               timekeeper.raw_time.tv_sec += raw_secs;
+               tk->raw_time.tv_sec += raw_secs;
        }
-       timekeeper.raw_time.tv_nsec = raw_nsecs;
+       tk->raw_time.tv_nsec = raw_nsecs;
 
        /* Accumulate error between NTP and clock interval */
-       timekeeper.ntp_error += ntp_tick_length() << shift;
-       timekeeper.ntp_error -=
-           (timekeeper.xtime_interval + timekeeper.xtime_remainder) <<
-                               (timekeeper.ntp_error_shift + shift);
+       tk->ntp_error += ntp_tick_length() << shift;
+       tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
+                                               (tk->ntp_error_shift + shift);
 
        return offset;
 }
@@ -995,6 +1087,7 @@ static void update_wall_time(void)
        cycle_t offset;
        int shift = 0, maxshift;
        unsigned long flags;
+       s64 remainder;
 
        write_seqlock_irqsave(&timekeeper.lock, flags);
 
@@ -1009,8 +1102,6 @@ static void update_wall_time(void)
 #else
        offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
 #endif
-       timekeeper.xtime_nsec = (s64)timekeeper.xtime.tv_nsec <<
-                                               timekeeper.shift;
 
        /*
         * With NO_HZ we may have to accumulate many cycle_intervals
@@ -1026,62 +1117,36 @@ static void update_wall_time(void)
        maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
        shift = min(shift, maxshift);
        while (offset >= timekeeper.cycle_interval) {
-               offset = logarithmic_accumulation(offset, shift);
+               offset = logarithmic_accumulation(&timekeeper, offset, shift);
                if(offset < timekeeper.cycle_interval<<shift)
                        shift--;
        }
 
        /* correct the clock when NTP error is too big */
-       timekeeping_adjust(offset);
-
-       /*
-        * Since in the loop above, we accumulate any amount of time
-        * in xtime_nsec over a second into xtime.tv_sec, its possible for
-        * xtime_nsec to be fairly small after the loop. Further, if we're
-        * slightly speeding the clocksource up in timekeeping_adjust(),
-        * its possible the required corrective factor to xtime_nsec could
-        * cause it to underflow.
-        *
-        * Now, we cannot simply roll the accumulated second back, since
-        * the NTP subsystem has been notified via second_overflow. So
-        * instead we push xtime_nsec forward by the amount we underflowed,
-        * and add that amount into the error.
-        *
-        * We'll correct this error next time through this function, when
-        * xtime_nsec is not as small.
-        */
-       if (unlikely((s64)timekeeper.xtime_nsec < 0)) {
-               s64 neg = -(s64)timekeeper.xtime_nsec;
-               timekeeper.xtime_nsec = 0;
-               timekeeper.ntp_error += neg << timekeeper.ntp_error_shift;
-       }
+       timekeeping_adjust(&timekeeper, offset);
 
 
        /*
-        * Store full nanoseconds into xtime after rounding it up and
-        * add the remainder to the error difference.
-        */
-       timekeeper.xtime.tv_nsec = ((s64)timekeeper.xtime_nsec >>
-                                               timekeeper.shift) + 1;
-       timekeeper.xtime_nsec -= (s64)timekeeper.xtime.tv_nsec <<
-                                               timekeeper.shift;
-       timekeeper.ntp_error += timekeeper.xtime_nsec <<
-                               timekeeper.ntp_error_shift;
+       * Store only full nanoseconds into xtime_nsec after rounding
+       * it up and add the remainder to the error difference.
+       * XXX - This is necessary to avoid small 1ns inconsistnecies caused
+       * by truncating the remainder in vsyscalls. However, it causes
+       * additional work to be done in timekeeping_adjust(). Once
+       * the vsyscall implementations are converted to use xtime_nsec
+       * (shifted nanoseconds), this can be killed.
+       */
+       remainder = timekeeper.xtime_nsec & ((1 << timekeeper.shift) - 1);
+       timekeeper.xtime_nsec -= remainder;
+       timekeeper.xtime_nsec += 1 << timekeeper.shift;
+       timekeeper.ntp_error += remainder << timekeeper.ntp_error_shift;
 
        /*
         * Finally, make sure that after the rounding
-        * xtime.tv_nsec isn't larger than NSEC_PER_SEC
+        * xtime_nsec isn't larger than NSEC_PER_SEC
         */
-       if (unlikely(timekeeper.xtime.tv_nsec >= NSEC_PER_SEC)) {
-               int leap;
-               timekeeper.xtime.tv_nsec -= NSEC_PER_SEC;
-               timekeeper.xtime.tv_sec++;
-               leap = second_overflow(timekeeper.xtime.tv_sec);
-               timekeeper.xtime.tv_sec += leap;
-               timekeeper.wall_to_monotonic.tv_sec -= leap;
-       }
+       accumulate_nsecs_to_secs(&timekeeper);
 
-       timekeeping_update(false);
+       timekeeping_update(&timekeeper, false);
 
 out:
        write_sequnlock_irqrestore(&timekeeper.lock, flags);
@@ -1126,21 +1191,20 @@ void get_monotonic_boottime(struct timespec *ts)
 {
        struct timespec tomono, sleep;
        unsigned int seq;
-       s64 nsecs;
 
        WARN_ON(timekeeping_suspended);
 
        do {
                seq = read_seqbegin(&timekeeper.lock);
-               *ts = timekeeper.xtime;
+               ts->tv_sec = timekeeper.xtime_sec;
+               ts->tv_nsec = timekeeping_get_ns(&timekeeper);
                tomono = timekeeper.wall_to_monotonic;
                sleep = timekeeper.total_sleep_time;
-               nsecs = timekeeping_get_ns();
 
        } while (read_seqretry(&timekeeper.lock, seq));
 
        set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec,
-                       ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs);
+                       ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec);
 }
 EXPORT_SYMBOL_GPL(get_monotonic_boottime);
 
@@ -1173,13 +1237,13 @@ EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
 
 unsigned long get_seconds(void)
 {
-       return timekeeper.xtime.tv_sec;
+       return timekeeper.xtime_sec;
 }
 EXPORT_SYMBOL(get_seconds);
 
 struct timespec __current_kernel_time(void)
 {
-       return timekeeper.xtime;
+       return tk_xtime(&timekeeper);
 }
 
 struct timespec current_kernel_time(void)
@@ -1190,7 +1254,7 @@ struct timespec current_kernel_time(void)
        do {
                seq = read_seqbegin(&timekeeper.lock);
 
-               now = timekeeper.xtime;
+               now = tk_xtime(&timekeeper);
        } while (read_seqretry(&timekeeper.lock, seq));
 
        return now;
@@ -1205,7 +1269,7 @@ struct timespec get_monotonic_coarse(void)
        do {
                seq = read_seqbegin(&timekeeper.lock);
 
-               now = timekeeper.xtime;
+               now = tk_xtime(&timekeeper);
                mono = timekeeper.wall_to_monotonic;
        } while (read_seqretry(&timekeeper.lock, seq));
 
@@ -1240,12 +1304,43 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
 
        do {
                seq = read_seqbegin(&timekeeper.lock);
-               *xtim = timekeeper.xtime;
+               *xtim = tk_xtime(&timekeeper);
                *wtom = timekeeper.wall_to_monotonic;
                *sleep = timekeeper.total_sleep_time;
        } while (read_seqretry(&timekeeper.lock, seq));
 }
 
+#ifdef CONFIG_HIGH_RES_TIMERS
+/**
+ * ktime_get_update_offsets - hrtimer helper
+ * @offs_real: pointer to storage for monotonic -> realtime offset
+ * @offs_boot: pointer to storage for monotonic -> boottime offset
+ *
+ * Returns current monotonic time and updates the offsets
+ * Called from hrtimer_interupt() or retrigger_next_event()
+ */
+ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
+{
+       ktime_t now;
+       unsigned int seq;
+       u64 secs, nsecs;
+
+       do {
+               seq = read_seqbegin(&timekeeper.lock);
+
+               secs = timekeeper.xtime_sec;
+               nsecs = timekeeping_get_ns(&timekeeper);
+
+               *offs_real = timekeeper.offs_real;
+               *offs_boot = timekeeper.offs_boot;
+       } while (read_seqretry(&timekeeper.lock, seq));
+
+       now = ktime_add_ns(ktime_set(secs, 0), nsecs);
+       now = ktime_sub(now, *offs_real);
+       return now;
+}
+#endif
+
 /**
  * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
  */
index 3258455..af5a7e9 100644 (file)
@@ -167,7 +167,7 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
        {
                struct tick_sched *ts = tick_get_tick_sched(cpu);
                P(nohz_mode);
-               P_ns(idle_tick);
+               P_ns(last_tick);
                P(tick_stopped);
                P(idle_jiffies);
                P(idle_calls);
@@ -259,7 +259,7 @@ static int timer_list_show(struct seq_file *m, void *v)
        u64 now = ktime_to_ns(ktime_get());
        int cpu;
 
-       SEQ_printf(m, "Timer List Version: v0.6\n");
+       SEQ_printf(m, "Timer List Version: v0.7\n");
        SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES);
        SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now);
 
index 6ec7e7e..a61c093 100644 (file)
@@ -77,6 +77,7 @@ struct tvec_base {
        struct timer_list *running_timer;
        unsigned long timer_jiffies;
        unsigned long next_timer;
+       unsigned long active_timers;
        struct tvec_root tv1;
        struct tvec tv2;
        struct tvec tv3;
@@ -330,7 +331,8 @@ void set_timer_slack(struct timer_list *timer, int slack_hz)
 }
 EXPORT_SYMBOL_GPL(set_timer_slack);
 
-static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
+static void
+__internal_add_timer(struct tvec_base *base, struct timer_list *timer)
 {
        unsigned long expires = timer->expires;
        unsigned long idx = expires - base->timer_jiffies;
@@ -372,6 +374,19 @@ static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
        list_add_tail(&timer->entry, vec);
 }
 
+static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
+{
+       __internal_add_timer(base, timer);
+       /*
+        * Update base->active_timers and base->next_timer
+        */
+       if (!tbase_get_deferrable(timer->base)) {
+               if (time_before(timer->expires, base->next_timer))
+                       base->next_timer = timer->expires;
+               base->active_timers++;
+       }
+}
+
 #ifdef CONFIG_TIMER_STATS
 void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
 {
@@ -654,8 +669,7 @@ void init_timer_deferrable_key(struct timer_list *timer,
 }
 EXPORT_SYMBOL(init_timer_deferrable_key);
 
-static inline void detach_timer(struct timer_list *timer,
-                               int clear_pending)
+static inline void detach_timer(struct timer_list *timer, bool clear_pending)
 {
        struct list_head *entry = &timer->entry;
 
@@ -667,6 +681,29 @@ static inline void detach_timer(struct timer_list *timer,
        entry->prev = LIST_POISON2;
 }
 
+static inline void
+detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
+{
+       detach_timer(timer, true);
+       if (!tbase_get_deferrable(timer->base))
+               timer->base->active_timers--;
+}
+
+static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
+                            bool clear_pending)
+{
+       if (!timer_pending(timer))
+               return 0;
+
+       detach_timer(timer, clear_pending);
+       if (!tbase_get_deferrable(timer->base)) {
+               timer->base->active_timers--;
+               if (timer->expires == base->next_timer)
+                       base->next_timer = base->timer_jiffies;
+       }
+       return 1;
+}
+
 /*
  * We are using hashed locking: holding per_cpu(tvec_bases).lock
  * means that all timers which are tied to this base via timer->base are
@@ -712,16 +749,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
 
        base = lock_timer_base(timer, &flags);
 
-       if (timer_pending(timer)) {
-               detach_timer(timer, 0);
-               if (timer->expires == base->next_timer &&
-                   !tbase_get_deferrable(timer->base))
-                       base->next_timer = base->timer_jiffies;
-               ret = 1;
-       } else {
-               if (pending_only)
-                       goto out_unlock;
-       }
+       ret = detach_if_pending(timer, base, false);
+       if (!ret && pending_only)
+               goto out_unlock;
 
        debug_activate(timer, expires);
 
@@ -752,9 +782,6 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
        }
 
        timer->expires = expires;
-       if (time_before(timer->expires, base->next_timer) &&
-           !tbase_get_deferrable(timer->base))
-               base->next_timer = timer->expires;
        internal_add_timer(base, timer);
 
 out_unlock:
@@ -920,9 +947,6 @@ void add_timer_on(struct timer_list *timer, int cpu)
        spin_lock_irqsave(&base->lock, flags);
        timer_set_base(timer, base);
        debug_activate(timer, timer->expires);
-       if (time_before(timer->expires, base->next_timer) &&
-           !tbase_get_deferrable(timer->base))
-               base->next_timer = timer->expires;
        internal_add_timer(base, timer);
        /*
         * Check whether the other CPU is idle and needs to be
@@ -959,13 +983,7 @@ int del_timer(struct timer_list *timer)
        timer_stats_timer_clear_start_info(timer);
        if (timer_pending(timer)) {
                base = lock_timer_base(timer, &flags);
-               if (timer_pending(timer)) {
-                       detach_timer(timer, 1);
-                       if (timer->expires == base->next_timer &&
-                           !tbase_get_deferrable(timer->base))
-                               base->next_timer = base->timer_jiffies;
-                       ret = 1;
-               }
+               ret = detach_if_pending(timer, base, true);
                spin_unlock_irqrestore(&base->lock, flags);
        }
 
@@ -990,19 +1008,10 @@ int try_to_del_timer_sync(struct timer_list *timer)
 
        base = lock_timer_base(timer, &flags);
 
-       if (base->running_timer == timer)
-               goto out;
-
-       timer_stats_timer_clear_start_info(timer);
-       ret = 0;
-       if (timer_pending(timer)) {
-               detach_timer(timer, 1);
-               if (timer->expires == base->next_timer &&
-                   !tbase_get_deferrable(timer->base))
-                       base->next_timer = base->timer_jiffies;
-               ret = 1;
+       if (base->running_timer != timer) {
+               timer_stats_timer_clear_start_info(timer);
+               ret = detach_if_pending(timer, base, true);
        }
-out:
        spin_unlock_irqrestore(&base->lock, flags);
 
        return ret;
@@ -1089,7 +1098,8 @@ static int cascade(struct tvec_base *base, struct tvec *tv, int index)
         */
        list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
                BUG_ON(tbase_get_base(timer->base) != base);
-               internal_add_timer(base, timer);
+               /* No accounting, while moving them */
+               __internal_add_timer(base, timer);
        }
 
        return index;
@@ -1178,7 +1188,7 @@ static inline void __run_timers(struct tvec_base *base)
                        timer_stats_account_timer(timer);
 
                        base->running_timer = timer;
-                       detach_timer(timer, 1);
+                       detach_expired_timer(timer, base);
 
                        spin_unlock_irq(&base->lock);
                        call_timer_fn(timer, fn, data);
@@ -1316,18 +1326,21 @@ static unsigned long cmp_next_hrtimer_event(unsigned long now,
 unsigned long get_next_timer_interrupt(unsigned long now)
 {
        struct tvec_base *base = __this_cpu_read(tvec_bases);
-       unsigned long expires;
+       unsigned long expires = now + NEXT_TIMER_MAX_DELTA;
 
        /*
         * Pretend that there is no timer pending if the cpu is offline.
         * Possible pending timers will be migrated later to an active cpu.
         */
        if (cpu_is_offline(smp_processor_id()))
-               return now + NEXT_TIMER_MAX_DELTA;
+               return expires;
+
        spin_lock(&base->lock);
-       if (time_before_eq(base->next_timer, base->timer_jiffies))
-               base->next_timer = __next_timer_interrupt(base);
-       expires = base->next_timer;
+       if (base->active_timers) {
+               if (time_before_eq(base->next_timer, base->timer_jiffies))
+                       base->next_timer = __next_timer_interrupt(base);
+               expires = base->next_timer;
+       }
        spin_unlock(&base->lock);
 
        if (time_before_eq(expires, now))
@@ -1704,6 +1717,7 @@ static int __cpuinit init_timers_cpu(int cpu)
 
        base->timer_jiffies = jiffies;
        base->next_timer = base->timer_jiffies;
+       base->active_timers = 0;
        return 0;
 }
 
@@ -1714,11 +1728,9 @@ static void migrate_timer_list(struct tvec_base *new_base, struct list_head *hea
 
        while (!list_empty(head)) {
                timer = list_first_entry(head, struct timer_list, entry);
-               detach_timer(timer, 0);
+               /* We ignore the accounting on the dying cpu */
+               detach_timer(timer, false);
                timer_set_base(timer, new_base);
-               if (time_before(timer->expires, new_base->next_timer) &&
-                   !tbase_get_deferrable(timer->base))
-                       new_base->next_timer = timer->expires;
                internal_add_timer(new_base, timer);
        }
 }
index a008663..b4f20fb 100644 (file)
@@ -312,7 +312,7 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list,
 
 static int __register_ftrace_function(struct ftrace_ops *ops)
 {
-       if (ftrace_disabled)
+       if (unlikely(ftrace_disabled))
                return -ENODEV;
 
        if (FTRACE_WARN_ON(ops == &global_ops))
@@ -4299,16 +4299,12 @@ int register_ftrace_function(struct ftrace_ops *ops)
 
        mutex_lock(&ftrace_lock);
 
-       if (unlikely(ftrace_disabled))
-               goto out_unlock;
-
        ret = __register_ftrace_function(ops);
        if (!ret)
                ret = ftrace_startup(ops, 0);
 
-
- out_unlock:
        mutex_unlock(&ftrace_lock);
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(register_ftrace_function);
index 1d0f6a8..49491fa 100644 (file)
@@ -1075,6 +1075,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
        rb_init_page(bpage->page);
 
        INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
+       INIT_LIST_HEAD(&cpu_buffer->new_pages);
 
        ret = rb_allocate_pages(cpu_buffer, nr_pages);
        if (ret < 0)
@@ -1346,10 +1347,9 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
                         * If something was added to this page, it was full
                         * since it is not the tail page. So we deduct the
                         * bytes consumed in ring buffer from here.
-                        * No need to update overruns, since this page is
-                        * deleted from ring buffer and its entries are
-                        * already accounted for.
+                        * Increment overrun to account for the lost events.
                         */
+                       local_add(page_entries, &cpu_buffer->overrun);
                        local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
                }
 
@@ -3239,6 +3239,10 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
        if (cpu_buffer->commit_page == cpu_buffer->reader_page)
                goto out;
 
+       /* Don't bother swapping if the ring buffer is empty */
+       if (rb_num_of_entries(cpu_buffer) == 0)
+               goto out;
+
        /*
         * Reset the reader page to size zero.
         */
index 49249c2..a120f98 100644 (file)
@@ -830,6 +830,8 @@ int register_tracer(struct tracer *type)
                current_trace = saved_tracer;
                if (ret) {
                        printk(KERN_CONT "FAILED!\n");
+                       /* Add the warning after printing 'FAILED' */
+                       WARN_ON(1);
                        goto out;
                }
                /* Only reset on passing, to avoid touching corrupted buffers */
@@ -1708,9 +1710,11 @@ EXPORT_SYMBOL_GPL(trace_vprintk);
 
 static void trace_iterator_increment(struct trace_iterator *iter)
 {
+       struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
+
        iter->idx++;
-       if (iter->buffer_iter[iter->cpu])
-               ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
+       if (buf_iter)
+               ring_buffer_read(buf_iter, NULL);
 }
 
 static struct trace_entry *
@@ -1718,7 +1722,7 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
                unsigned long *lost_events)
 {
        struct ring_buffer_event *event;
-       struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
+       struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
 
        if (buf_iter)
                event = ring_buffer_iter_peek(buf_iter, ts);
@@ -1856,10 +1860,10 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
 
        tr->data[cpu]->skipped_entries = 0;
 
-       if (!iter->buffer_iter[cpu])
+       buf_iter = trace_buffer_iter(iter, cpu);
+       if (!buf_iter)
                return;
 
-       buf_iter = iter->buffer_iter[cpu];
        ring_buffer_iter_reset(buf_iter);
 
        /*
@@ -2205,13 +2209,15 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
 
 int trace_empty(struct trace_iterator *iter)
 {
+       struct ring_buffer_iter *buf_iter;
        int cpu;
 
        /* If we are looking at one CPU buffer, only check that one */
        if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
                cpu = iter->cpu_file;
-               if (iter->buffer_iter[cpu]) {
-                       if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
+               buf_iter = trace_buffer_iter(iter, cpu);
+               if (buf_iter) {
+                       if (!ring_buffer_iter_empty(buf_iter))
                                return 0;
                } else {
                        if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
@@ -2221,8 +2227,9 @@ int trace_empty(struct trace_iterator *iter)
        }
 
        for_each_tracing_cpu(cpu) {
-               if (iter->buffer_iter[cpu]) {
-                       if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
+               buf_iter = trace_buffer_iter(iter, cpu);
+               if (buf_iter) {
+                       if (!ring_buffer_iter_empty(buf_iter))
                                return 0;
                } else {
                        if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
@@ -2381,6 +2388,11 @@ __tracing_open(struct inode *inode, struct file *file)
        if (!iter)
                return ERR_PTR(-ENOMEM);
 
+       iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
+                                   GFP_KERNEL);
+       if (!iter->buffer_iter)
+               goto release;
+
        /*
         * We make a copy of the current tracer to avoid concurrent
         * changes on it while we are reading.
@@ -2441,6 +2453,8 @@ __tracing_open(struct inode *inode, struct file *file)
  fail:
        mutex_unlock(&trace_types_lock);
        kfree(iter->trace);
+       kfree(iter->buffer_iter);
+release:
        seq_release_private(inode, file);
        return ERR_PTR(-ENOMEM);
 }
@@ -2481,6 +2495,7 @@ static int tracing_release(struct inode *inode, struct file *file)
        mutex_destroy(&iter->mutex);
        free_cpumask_var(iter->started);
        kfree(iter->trace);
+       kfree(iter->buffer_iter);
        seq_release_private(inode, file);
        return 0;
 }
@@ -3609,6 +3624,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
                .pages          = pages_def,
                .partial        = partial_def,
                .nr_pages       = 0, /* This gets updated below. */
+               .nr_pages_max   = PIPE_DEF_BUFFERS,
                .flags          = flags,
                .ops            = &tracing_pipe_buf_ops,
                .spd_release    = tracing_spd_release_pipe,
@@ -3680,7 +3696,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
 
        ret = splice_to_pipe(pipe, &spd);
 out:
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return ret;
 
 out_err:
@@ -4231,6 +4247,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages          = pages_def,
                .partial        = partial_def,
+               .nr_pages_max   = PIPE_DEF_BUFFERS,
                .flags          = flags,
                .ops            = &buffer_pipe_buf_ops,
                .spd_release    = buffer_spd_release,
@@ -4318,7 +4335,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        }
 
        ret = splice_to_pipe(pipe, &spd);
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
 out:
        return ret;
 }
index 5aec220..55e1f7f 100644 (file)
@@ -317,6 +317,14 @@ struct tracer {
 
 #define TRACE_PIPE_ALL_CPU     -1
 
+static inline struct ring_buffer_iter *
+trace_buffer_iter(struct trace_iterator *iter, int cpu)
+{
+       if (iter->buffer_iter && iter->buffer_iter[cpu])
+               return iter->buffer_iter[cpu];
+       return NULL;
+}
+
 int tracer_init(struct tracer *t, struct trace_array *tr);
 int tracing_is_enabled(void);
 void trace_wake_up(void);
index a7d2a4c..ce27c8b 100644 (file)
@@ -538,7 +538,7 @@ get_return_for_leaf(struct trace_iterator *iter,
                next = &data->ret;
        } else {
 
-               ring_iter = iter->buffer_iter[iter->cpu];
+               ring_iter = trace_buffer_iter(iter, iter->cpu);
 
                /* First peek to compare current entry and the next one */
                if (ring_iter)
index df611a0..123b189 100644 (file)
@@ -1325,4 +1325,4 @@ __init static int init_events(void)
 
        return 0;
 }
-device_initcall(init_events);
+early_initcall(init_events);
index 518aea7..66ce414 100644 (file)
@@ -78,7 +78,7 @@ static LIST_HEAD(free_entries);
 static DEFINE_SPINLOCK(free_entries_lock);
 
 /* Global disable flag - will be set in case of an error */
-static bool global_disable __read_mostly;
+static u32 global_disable __read_mostly;
 
 /* Global error count */
 static u32 error_count;
@@ -657,7 +657,7 @@ static int dma_debug_fs_init(void)
 
        global_disable_dent = debugfs_create_bool("disabled", 0444,
                        dma_debug_dent,
-                       (u32 *)&global_disable);
+                       &global_disable);
        if (!global_disable_dent)
                goto out_err;
 
index 6805453..f7210ad 100644 (file)
@@ -101,6 +101,10 @@ static inline bool fail_stacktrace(struct fault_attr *attr)
 
 bool should_fail(struct fault_attr *attr, ssize_t size)
 {
+       /* No need to check any other properties if the probability is 0 */
+       if (attr->probability == 0)
+               return false;
+
        if (attr->task_filter && !fail_task(attr, current))
                return false;
 
index 23a5e03..c24c2f7 100644 (file)
@@ -87,12 +87,10 @@ void __list_add_rcu(struct list_head *new,
                    struct list_head *prev, struct list_head *next)
 {
        WARN(next->prev != prev,
-               "list_add_rcu corruption. next->prev should be "
-               "prev (%p), but was %p. (next=%p).\n",
+               "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
                prev, next->prev, next);
        WARN(prev->next != next,
-               "list_add_rcu corruption. prev->next should be "
-               "next (%p), but was %p. (prev=%p).\n",
+               "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
                next, prev->next, prev);
        new->next = next;
        new->prev = prev;
index ec4fcb7..bcb63ac 100644 (file)
@@ -698,7 +698,7 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align,
        return ___alloc_bootmem(size, align, goal, limit);
 }
 
-static void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
+void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
                                unsigned long size, unsigned long align,
                                unsigned long goal, unsigned long limit)
 {
@@ -710,6 +710,10 @@ again:
        if (ptr)
                return ptr;
 
+       /* do not panic in alloc_bootmem_bdata() */
+       if (limit && goal + size > limit)
+               limit = 0;
+
        ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit);
        if (ptr)
                return ptr;
index 7ea259d..2f42d95 100644 (file)
@@ -701,8 +701,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                if (err) {
                        putback_lru_pages(&cc->migratepages);
                        cc->nr_migratepages = 0;
+                       if (err == -ENOMEM) {
+                               ret = COMPACT_PARTIAL;
+                               goto out;
+                       }
                }
-
        }
 
 out:
index deff1b6..14d260f 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/sched.h>
 #include <linux/ksm.h>
 #include <linux/fs.h>
+#include <linux/file.h>
 
 /*
  * Any behaviour which results in changes to the vma->vm_flags needs to
@@ -204,14 +205,16 @@ static long madvise_remove(struct vm_area_struct *vma,
 {
        loff_t offset;
        int error;
+       struct file *f;
 
        *prev = NULL;   /* tell sys_madvise we drop mmap_sem */
 
        if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
                return -EINVAL;
 
-       if (!vma->vm_file || !vma->vm_file->f_mapping
-               || !vma->vm_file->f_mapping->host) {
+       f = vma->vm_file;
+
+       if (!f || !f->f_mapping || !f->f_mapping->host) {
                        return -EINVAL;
        }
 
@@ -221,11 +224,18 @@ static long madvise_remove(struct vm_area_struct *vma,
        offset = (loff_t)(start - vma->vm_start)
                        + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
 
-       /* filesystem's fallocate may need to take i_mutex */
+       /*
+        * Filesystem's fallocate may need to take i_mutex.  We need to
+        * explicitly grab a reference because the vma (and hence the
+        * vma's reference to the file) can go away as soon as we drop
+        * mmap_sem.
+        */
+       get_file(f);
        up_read(&current->mm->mmap_sem);
-       error = do_fallocate(vma->vm_file,
+       error = do_fallocate(f,
                                FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
                                offset, end - start);
+       fput(f);
        down_read(&current->mm->mmap_sem);
        return error;
 }
index 32a0a5e..5cc6731 100644 (file)
@@ -143,30 +143,6 @@ phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
                                           MAX_NUMNODES);
 }
 
-/*
- * Free memblock.reserved.regions
- */
-int __init_memblock memblock_free_reserved_regions(void)
-{
-       if (memblock.reserved.regions == memblock_reserved_init_regions)
-               return 0;
-
-       return memblock_free(__pa(memblock.reserved.regions),
-                sizeof(struct memblock_region) * memblock.reserved.max);
-}
-
-/*
- * Reserve memblock.reserved.regions
- */
-int __init_memblock memblock_reserve_reserved_regions(void)
-{
-       if (memblock.reserved.regions == memblock_reserved_init_regions)
-               return 0;
-
-       return memblock_reserve(__pa(memblock.reserved.regions),
-                sizeof(struct memblock_region) * memblock.reserved.max);
-}
-
 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
 {
        type->total_size -= type->regions[r].size;
@@ -184,9 +160,39 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u
        }
 }
 
-static int __init_memblock memblock_double_array(struct memblock_type *type)
+phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
+                                       phys_addr_t *addr)
+{
+       if (memblock.reserved.regions == memblock_reserved_init_regions)
+               return 0;
+
+       *addr = __pa(memblock.reserved.regions);
+
+       return PAGE_ALIGN(sizeof(struct memblock_region) *
+                         memblock.reserved.max);
+}
+
+/**
+ * memblock_double_array - double the size of the memblock regions array
+ * @type: memblock type of the regions array being doubled
+ * @new_area_start: starting address of memory range to avoid overlap with
+ * @new_area_size: size of memory range to avoid overlap with
+ *
+ * Double the size of the @type regions array. If memblock is being used to
+ * allocate memory for a new reserved regions array and there is a previously
+ * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
+ * waiting to be reserved, ensure the memory used by the new array does
+ * not overlap.
+ *
+ * RETURNS:
+ * 0 on success, -1 on failure.
+ */
+static int __init_memblock memblock_double_array(struct memblock_type *type,
+                                               phys_addr_t new_area_start,
+                                               phys_addr_t new_area_size)
 {
        struct memblock_region *new_array, *old_array;
+       phys_addr_t old_alloc_size, new_alloc_size;
        phys_addr_t old_size, new_size, addr;
        int use_slab = slab_is_available();
        int *in_slab;
@@ -200,6 +206,12 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
        /* Calculate new doubled size */
        old_size = type->max * sizeof(struct memblock_region);
        new_size = old_size << 1;
+       /*
+        * We need to allocated new one align to PAGE_SIZE,
+        *   so we can free them completely later.
+        */
+       old_alloc_size = PAGE_ALIGN(old_size);
+       new_alloc_size = PAGE_ALIGN(new_size);
 
        /* Retrieve the slab flag */
        if (type == &memblock.memory)
@@ -222,7 +234,18 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
                new_array = kmalloc(new_size, GFP_KERNEL);
                addr = new_array ? __pa(new_array) : 0;
        } else {
-               addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t));
+               /* only exclude range when trying to double reserved.regions */
+               if (type != &memblock.reserved)
+                       new_area_start = new_area_size = 0;
+
+               addr = memblock_find_in_range(new_area_start + new_area_size,
+                                               memblock.current_limit,
+                                               new_alloc_size, PAGE_SIZE);
+               if (!addr && new_area_size)
+                       addr = memblock_find_in_range(0,
+                                       min(new_area_start, memblock.current_limit),
+                                       new_alloc_size, PAGE_SIZE);
+
                new_array = addr ? __va(addr) : 0;
        }
        if (!addr) {
@@ -251,13 +274,13 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
                kfree(old_array);
        else if (old_array != memblock_memory_init_regions &&
                 old_array != memblock_reserved_init_regions)
-               memblock_free(__pa(old_array), old_size);
+               memblock_free(__pa(old_array), old_alloc_size);
 
        /* Reserve the new array if that comes from the memblock.
         * Otherwise, we needn't do it
         */
        if (!use_slab)
-               BUG_ON(memblock_reserve(addr, new_size));
+               BUG_ON(memblock_reserve(addr, new_alloc_size));
 
        /* Update slab flag */
        *in_slab = use_slab;
@@ -399,7 +422,7 @@ repeat:
         */
        if (!insert) {
                while (type->cnt + nr_new > type->max)
-                       if (memblock_double_array(type) < 0)
+                       if (memblock_double_array(type, obase, size) < 0)
                                return -ENOMEM;
                insert = true;
                goto repeat;
@@ -450,7 +473,7 @@ static int __init_memblock memblock_isolate_range(struct memblock_type *type,
 
        /* we'll create at most two more regions */
        while (type->cnt + 2 > type->max)
-               if (memblock_double_array(type) < 0)
+               if (memblock_double_array(type, base, size) < 0)
                        return -ENOMEM;
 
        for (i = 0; i < type->cnt; i++) {
@@ -540,9 +563,9 @@ int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
  * __next_free_mem_range - next function for for_each_free_mem_range()
  * @idx: pointer to u64 loop variable
  * @nid: nid: node selector, %MAX_NUMNODES for all nodes
- * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
- * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
- * @p_nid: ptr to int for nid of the range, can be %NULL
+ * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ * @out_nid: ptr to int for nid of the range, can be %NULL
  *
  * Find the first free area from *@idx which matches @nid, fill the out
  * parameters, and update *@idx for the next iteration.  The lower 32bit of
@@ -616,9 +639,9 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid,
  * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse()
  * @idx: pointer to u64 loop variable
  * @nid: nid: node selector, %MAX_NUMNODES for all nodes
- * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
- * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
- * @p_nid: ptr to int for nid of the range, can be %NULL
+ * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ * @out_nid: ptr to int for nid of the range, can be %NULL
  *
  * Reverse of __next_free_mem_range().
  */
index ac35bcc..f72b5e5 100644 (file)
@@ -1148,7 +1148,7 @@ bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
 {
        if (root_memcg == memcg)
                return true;
-       if (!root_memcg->use_hierarchy)
+       if (!root_memcg->use_hierarchy || !memcg)
                return false;
        return css_is_ancestor(&memcg->css, &root_memcg->css);
 }
@@ -1234,7 +1234,7 @@ int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
 
 /**
  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
- * @mem: the memory cgroup
+ * @memcg: the memory cgroup
  *
  * Returns the maximum amount of memory @mem can be charged with, in
  * pages.
@@ -1508,7 +1508,7 @@ static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
 
 /**
  * test_mem_cgroup_node_reclaimable
- * @mem: the target memcg
+ * @memcg: the target memcg
  * @nid: the node ID to be checked.
  * @noswap : specify true here if the user wants flle only information.
  *
index 1b7dc66..2466d12 100644 (file)
@@ -1225,7 +1225,15 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
                next = pmd_addr_end(addr, end);
                if (pmd_trans_huge(*pmd)) {
                        if (next - addr != HPAGE_PMD_SIZE) {
-                               VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
+#ifdef CONFIG_DEBUG_VM
+                               if (!rwsem_is_locked(&tlb->mm->mmap_sem)) {
+                                       pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n",
+                                               __func__, addr, end,
+                                               vma->vm_start,
+                                               vma->vm_end);
+                                       BUG();
+                               }
+#endif
                                split_huge_page_pmd(vma->vm_mm, pmd);
                        } else if (zap_huge_pmd(tlb, vma, pmd, addr))
                                goto next;
@@ -1366,7 +1374,7 @@ void unmap_vmas(struct mmu_gather *tlb,
 /**
  * zap_page_range - remove user pages in a given range
  * @vma: vm_area_struct holding the applicable pages
- * @address: starting address of pages to zap
+ * @start: starting address of pages to zap
  * @size: number of bytes to zap
  * @details: details of nonlinear truncation or shared cache invalidation
  *
index 0d7e3ec..427bb29 100644 (file)
@@ -618,7 +618,7 @@ int __ref add_memory(int nid, u64 start, u64 size)
                pgdat = hotadd_new_pgdat(nid, start);
                ret = -ENOMEM;
                if (!pgdat)
-                       goto out;
+                       goto error;
                new_pgdat = 1;
        }
 
index f15c1b2..1d771e4 100644 (file)
@@ -1177,7 +1177,7 @@ static long do_mbind(unsigned long start, unsigned long len,
                if (!list_empty(&pagelist)) {
                        nr_failed = migrate_pages(&pagelist, new_vma_page,
                                                (unsigned long)vma,
-                                               false, true);
+                                               false, MIGRATE_SYNC);
                        if (nr_failed)
                                putback_lru_pages(&pagelist);
                }
index d23415c..4055730 100644 (file)
@@ -105,27 +105,35 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
                __free_pages_bootmem(pfn_to_page(i), 0);
 }
 
+static unsigned long __init __free_memory_core(phys_addr_t start,
+                                phys_addr_t end)
+{
+       unsigned long start_pfn = PFN_UP(start);
+       unsigned long end_pfn = min_t(unsigned long,
+                                     PFN_DOWN(end), max_low_pfn);
+
+       if (start_pfn > end_pfn)
+               return 0;
+
+       __free_pages_memory(start_pfn, end_pfn);
+
+       return end_pfn - start_pfn;
+}
+
 unsigned long __init free_low_memory_core_early(int nodeid)
 {
        unsigned long count = 0;
-       phys_addr_t start, end;
+       phys_addr_t start, end, size;
        u64 i;
 
-       /* free reserved array temporarily so that it's treated as free area */
-       memblock_free_reserved_regions();
-
-       for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) {
-               unsigned long start_pfn = PFN_UP(start);
-               unsigned long end_pfn = min_t(unsigned long,
-                                             PFN_DOWN(end), max_low_pfn);
-               if (start_pfn < end_pfn) {
-                       __free_pages_memory(start_pfn, end_pfn);
-                       count += end_pfn - start_pfn;
-               }
-       }
+       for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL)
+               count += __free_memory_core(start, end);
+
+       /* free range that is used for reserved array if we allocate it */
+       size = get_allocated_memblock_reserved_regions_info(&start);
+       if (size)
+               count += __free_memory_core(start, start + size);
 
-       /* put region array back? */
-       memblock_reserve_reserved_regions();
        return count;
 }
 
@@ -274,7 +282,7 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align,
        return ___alloc_bootmem(size, align, goal, limit);
 }
 
-static void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
+void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
                                                   unsigned long size,
                                                   unsigned long align,
                                                   unsigned long goal,
index 416637f..ac300c9 100644 (file)
@@ -184,6 +184,7 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
                          const nodemask_t *nodemask, unsigned long totalpages)
 {
        long points;
+       long adj;
 
        if (oom_unkillable_task(p, memcg, nodemask))
                return 0;
@@ -192,7 +193,8 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
        if (!p)
                return 0;
 
-       if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
+       adj = p->signal->oom_score_adj;
+       if (adj == OOM_SCORE_ADJ_MIN) {
                task_unlock(p);
                return 0;
        }
@@ -210,14 +212,11 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
         * implementation used by LSMs.
         */
        if (has_capability_noaudit(p, CAP_SYS_ADMIN))
-               points -= 30 * totalpages / 1000;
+               adj -= 30;
 
-       /*
-        * /proc/pid/oom_score_adj ranges from -1000 to +1000 such that it may
-        * either completely disable oom killing or always prefer a certain
-        * task.
-        */
-       points += p->signal->oom_score_adj * totalpages / 1000;
+       /* Normalize to oom_score_adj units */
+       adj *= totalpages / 1000;
+       points += adj;
 
        /*
         * Never return 0 for an eligible task regardless of the root bonus and
@@ -366,7 +365,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
 
 /**
  * dump_tasks - dump current memory state of all system tasks
- * @mem: current's memory controller, if constrained
+ * @memcg: current's memory controller, if constrained
  * @nodemask: nodemask passed to page allocator for mempolicy ooms
  *
  * Dumps the current memory state of all eligible tasks.  Tasks not in the same
index 4403009..4a4f921 100644 (file)
@@ -5635,7 +5635,12 @@ static struct page *
 __alloc_contig_migrate_alloc(struct page *page, unsigned long private,
                             int **resultp)
 {
-       return alloc_page(GFP_HIGHUSER_MOVABLE);
+       gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
+
+       if (PageHighMem(page))
+               gfp_mask |= __GFP_HIGHMEM;
+
+       return alloc_page(gfp_mask);
 }
 
 /* [start, end) must belong to a single zone. */
index 1ccbd71..eb750f8 100644 (file)
@@ -392,7 +392,7 @@ static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent,
 
 /**
  * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry.
- * @end: swap entry to be cmpxchged
+ * @ent: swap entry to be cmpxchged
  * @old: old id
  * @new: new id
  *
@@ -422,7 +422,7 @@ unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
 /**
  * swap_cgroup_record - record mem_cgroup for this swp_entry.
  * @ent: swap entry to be recorded into
- * @mem: mem_cgroup to be recorded
+ * @id: mem_cgroup to be recorded
  *
  * Returns old value at success, 0 at failure.
  * (Of course, old value can be 0.)
index aa9701e..6c118d0 100644 (file)
@@ -162,7 +162,6 @@ static int walk_hugetlb_range(struct vm_area_struct *vma,
 
 /**
  * walk_page_range - walk a memory map's page tables with a callback
- * @mm: memory map to walk
  * @addr: starting address
  * @end: ending address
  * @walk: set of callbacks to invoke for each level of the tree
index 405d331..3707c71 100644 (file)
@@ -360,7 +360,6 @@ err_free:
  * @chunk: chunk to depopulate
  * @off: offset to the area to depopulate
  * @size: size of the area to depopulate in bytes
- * @flush: whether to flush cache and tlb or not
  *
  * For each cpu, depopulate and unmap pages [@page_start,@page_end)
  * from @chunk.  If @flush is true, vcache is flushed before unmapping
index a15a466..bd10636 100644 (file)
@@ -263,6 +263,24 @@ static int shmem_radix_tree_replace(struct address_space *mapping,
        return 0;
 }
 
+/*
+ * Sometimes, before we decide whether to proceed or to fail, we must check
+ * that an entry was not already brought back from swap by a racing thread.
+ *
+ * Checking page is not enough: by the time a SwapCache page is locked, it
+ * might be reused, and again be SwapCache, using the same swap as before.
+ */
+static bool shmem_confirm_swap(struct address_space *mapping,
+                              pgoff_t index, swp_entry_t swap)
+{
+       void *item;
+
+       rcu_read_lock();
+       item = radix_tree_lookup(&mapping->page_tree, index);
+       rcu_read_unlock();
+       return item == swp_to_radix_entry(swap);
+}
+
 /*
  * Like add_to_page_cache_locked, but error if expected item has gone.
  */
@@ -270,40 +288,31 @@ static int shmem_add_to_page_cache(struct page *page,
                                   struct address_space *mapping,
                                   pgoff_t index, gfp_t gfp, void *expected)
 {
-       int error = 0;
+       int error;
 
        VM_BUG_ON(!PageLocked(page));
        VM_BUG_ON(!PageSwapBacked(page));
 
+       page_cache_get(page);
+       page->mapping = mapping;
+       page->index = index;
+
+       spin_lock_irq(&mapping->tree_lock);
        if (!expected)
-               error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
+               error = radix_tree_insert(&mapping->page_tree, index, page);
+       else
+               error = shmem_radix_tree_replace(mapping, index, expected,
+                                                                page);
        if (!error) {
-               page_cache_get(page);
-               page->mapping = mapping;
-               page->index = index;
-
-               spin_lock_irq(&mapping->tree_lock);
-               if (!expected)
-                       error = radix_tree_insert(&mapping->page_tree,
-                                                       index, page);
-               else
-                       error = shmem_radix_tree_replace(mapping, index,
-                                                       expected, page);
-               if (!error) {
-                       mapping->nrpages++;
-                       __inc_zone_page_state(page, NR_FILE_PAGES);
-                       __inc_zone_page_state(page, NR_SHMEM);
-                       spin_unlock_irq(&mapping->tree_lock);
-               } else {
-                       page->mapping = NULL;
-                       spin_unlock_irq(&mapping->tree_lock);
-                       page_cache_release(page);
-               }
-               if (!expected)
-                       radix_tree_preload_end();
+               mapping->nrpages++;
+               __inc_zone_page_state(page, NR_FILE_PAGES);
+               __inc_zone_page_state(page, NR_SHMEM);
+               spin_unlock_irq(&mapping->tree_lock);
+       } else {
+               page->mapping = NULL;
+               spin_unlock_irq(&mapping->tree_lock);
+               page_cache_release(page);
        }
-       if (error)
-               mem_cgroup_uncharge_cache_page(page);
        return error;
 }
 
@@ -1124,9 +1133,9 @@ repeat:
                /* We have to do this with page locked to prevent races */
                lock_page(page);
                if (!PageSwapCache(page) || page_private(page) != swap.val ||
-                   page->mapping) {
+                   !shmem_confirm_swap(mapping, index, swap)) {
                        error = -EEXIST;        /* try again */
-                       goto failed;
+                       goto unlock;
                }
                if (!PageUptodate(page)) {
                        error = -EIO;
@@ -1142,9 +1151,12 @@ repeat:
 
                error = mem_cgroup_cache_charge(page, current->mm,
                                                gfp & GFP_RECLAIM_MASK);
-               if (!error)
+               if (!error) {
                        error = shmem_add_to_page_cache(page, mapping, index,
                                                gfp, swp_to_radix_entry(swap));
+                       /* We already confirmed swap, and make no allocation */
+                       VM_BUG_ON(error);
+               }
                if (error)
                        goto failed;
 
@@ -1181,11 +1193,18 @@ repeat:
                __set_page_locked(page);
                error = mem_cgroup_cache_charge(page, current->mm,
                                                gfp & GFP_RECLAIM_MASK);
-               if (!error)
-                       error = shmem_add_to_page_cache(page, mapping, index,
-                                               gfp, NULL);
                if (error)
                        goto decused;
+               error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
+               if (!error) {
+                       error = shmem_add_to_page_cache(page, mapping, index,
+                                                       gfp, NULL);
+                       radix_tree_preload_end();
+               }
+               if (error) {
+                       mem_cgroup_uncharge_cache_page(page);
+                       goto decused;
+               }
                lru_cache_add_anon(page);
 
                spin_lock(&info->lock);
@@ -1245,14 +1264,10 @@ decused:
 unacct:
        shmem_unacct_blocks(info->flags, 1);
 failed:
-       if (swap.val && error != -EINVAL) {
-               struct page *test = find_get_page(mapping, index);
-               if (test && !radix_tree_exceptional_entry(test))
-                       page_cache_release(test);
-               /* Have another try if the entry has changed */
-               if (test != swp_to_radix_entry(swap))
-                       error = -EEXIST;
-       }
+       if (swap.val && error != -EINVAL &&
+           !shmem_confirm_swap(mapping, index, swap))
+               error = -EEXIST;
+unlock:
        if (page) {
                unlock_page(page);
                page_cache_release(page);
@@ -1264,7 +1279,7 @@ failed:
                spin_unlock(&info->lock);
                goto repeat;
        }
-       if (error == -EEXIST)
+       if (error == -EEXIST)   /* from above or from radix_tree_insert */
                goto repeat;
        return error;
 }
@@ -1594,6 +1609,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &page_cache_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -1682,7 +1698,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        if (spd.nr_pages)
                error = splice_to_pipe(pipe, &spd);
 
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
 
        if (error > 0) {
                *ppos += error;
@@ -1691,98 +1707,6 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        return error;
 }
 
-/*
- * llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
- */
-static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
-                                   pgoff_t index, pgoff_t end, int origin)
-{
-       struct page *page;
-       struct pagevec pvec;
-       pgoff_t indices[PAGEVEC_SIZE];
-       bool done = false;
-       int i;
-
-       pagevec_init(&pvec, 0);
-       pvec.nr = 1;            /* start small: we may be there already */
-       while (!done) {
-               pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
-                                       pvec.nr, pvec.pages, indices);
-               if (!pvec.nr) {
-                       if (origin == SEEK_DATA)
-                               index = end;
-                       break;
-               }
-               for (i = 0; i < pvec.nr; i++, index++) {
-                       if (index < indices[i]) {
-                               if (origin == SEEK_HOLE) {
-                                       done = true;
-                                       break;
-                               }
-                               index = indices[i];
-                       }
-                       page = pvec.pages[i];
-                       if (page && !radix_tree_exceptional_entry(page)) {
-                               if (!PageUptodate(page))
-                                       page = NULL;
-                       }
-                       if (index >= end ||
-                           (page && origin == SEEK_DATA) ||
-                           (!page && origin == SEEK_HOLE)) {
-                               done = true;
-                               break;
-                       }
-               }
-               shmem_deswap_pagevec(&pvec);
-               pagevec_release(&pvec);
-               pvec.nr = PAGEVEC_SIZE;
-               cond_resched();
-       }
-       return index;
-}
-
-static loff_t shmem_file_llseek(struct file *file, loff_t offset, int origin)
-{
-       struct address_space *mapping;
-       struct inode *inode;
-       pgoff_t start, end;
-       loff_t new_offset;
-
-       if (origin != SEEK_DATA && origin != SEEK_HOLE)
-               return generic_file_llseek_size(file, offset, origin,
-                                                       MAX_LFS_FILESIZE);
-       mapping = file->f_mapping;
-       inode = mapping->host;
-       mutex_lock(&inode->i_mutex);
-       /* We're holding i_mutex so we can access i_size directly */
-
-       if (offset < 0)
-               offset = -EINVAL;
-       else if (offset >= inode->i_size)
-               offset = -ENXIO;
-       else {
-               start = offset >> PAGE_CACHE_SHIFT;
-               end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-               new_offset = shmem_seek_hole_data(mapping, start, end, origin);
-               new_offset <<= PAGE_CACHE_SHIFT;
-               if (new_offset > offset) {
-                       if (new_offset < inode->i_size)
-                               offset = new_offset;
-                       else if (origin == SEEK_DATA)
-                               offset = -ENXIO;
-                       else
-                               offset = inode->i_size;
-               }
-       }
-
-       if (offset >= 0 && offset != file->f_pos) {
-               file->f_pos = offset;
-               file->f_version = 0;
-       }
-       mutex_unlock(&inode->i_mutex);
-       return offset;
-}
-
 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                                                         loff_t len)
 {
@@ -2786,7 +2710,7 @@ static const struct address_space_operations shmem_aops = {
 static const struct file_operations shmem_file_operations = {
        .mmap           = shmem_mmap,
 #ifdef CONFIG_TMPFS
-       .llseek         = shmem_file_llseek,
+       .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
        .aio_read       = shmem_file_aio_read,
index 6a4bf91..c7bb952 100644 (file)
@@ -275,8 +275,9 @@ static unsigned long * __init
 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
                                         unsigned long size)
 {
-       pg_data_t *host_pgdat;
-       unsigned long goal;
+       unsigned long goal, limit;
+       unsigned long *p;
+       int nid;
        /*
         * A page may contain usemaps for other sections preventing the
         * page being freed and making a section unremovable while
@@ -287,10 +288,17 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
         * from the same section as the pgdat where possible to avoid
         * this problem.
         */
-       goal = __pa(pgdat) & PAGE_SECTION_MASK;
-       host_pgdat = NODE_DATA(early_pfn_to_nid(goal >> PAGE_SHIFT));
-       return __alloc_bootmem_node_nopanic(host_pgdat, size,
-                                           SMP_CACHE_BYTES, goal);
+       goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
+       limit = goal + (1UL << PA_SECTION_SHIFT);
+       nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
+again:
+       p = ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size,
+                                         SMP_CACHE_BYTES, goal, limit);
+       if (!p && limit) {
+               limit = 0;
+               goto again;
+       }
+       return p;
 }
 
 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
index eeb3bc9..66e4310 100644 (file)
@@ -2688,7 +2688,10 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
                 * them before going back to sleep.
                 */
                set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
-               schedule();
+
+               if (!kthread_should_stop())
+                       schedule();
+
                set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
        } else {
                if (remaining)
@@ -2955,14 +2958,17 @@ int kswapd_run(int nid)
 }
 
 /*
- * Called by memory hotplug when all memory in a node is offlined.
+ * Called by memory hotplug when all memory in a node is offlined.  Caller must
+ * hold lock_memory_hotplug().
  */
 void kswapd_stop(int nid)
 {
        struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
 
-       if (kswapd)
+       if (kswapd) {
                kthread_stop(kswapd);
+               NODE_DATA(nid)->kswapd = NULL;
+       }
 }
 
 static int __init kswapd_init(void)
index 6089f0c..9096bcb 100644 (file)
@@ -403,6 +403,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
                break;
 
        case NETDEV_DOWN:
+               if (dev->features & NETIF_F_HW_VLAN_FILTER)
+                       vlan_vid_del(dev, 0);
+
                /* Put all VLANs for this dev in the down state too.  */
                for (i = 0; i < VLAN_N_VID; i++) {
                        vlandev = vlan_group_get_device(grp, i);
index 9ee48cb..3d33ecf 100644 (file)
@@ -368,7 +368,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
                                const char *sptr = va_arg(ap, const char *);
                                uint16_t len = 0;
                                if (sptr)
-                                       len = min_t(uint16_t, strlen(sptr),
+                                       len = min_t(size_t, strlen(sptr),
                                                                USHRT_MAX);
 
                                errcode = p9pdu_writef(pdu, proto_version,
index 5af18d1..2a16765 100644 (file)
@@ -192,10 +192,10 @@ static int pack_sg_list(struct scatterlist *sg, int start,
                s = rest_of_page(data);
                if (s > count)
                        s = count;
+               BUG_ON(index > limit);
                sg_set_buf(&sg[index++], data, s);
                count -= s;
                data += s;
-               BUG_ON(index > limit);
        }
 
        return index-start;
index 051f7ab..779095d 100644 (file)
@@ -842,6 +842,7 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol,
                case AX25_P_NETROM:
                        if (ax25_protocol_is_registered(AX25_P_NETROM))
                                return -ESOCKTNOSUPPORT;
+                       break;
 #endif
 #ifdef CONFIG_ROSE_MODULE
                case AX25_P_ROSE:
index 8bf9751..c5863f4 100644 (file)
@@ -1351,6 +1351,7 @@ void bla_free(struct bat_priv *bat_priv)
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the frame to be checked
  * @vid: the VLAN ID of the frame
+ * @is_bcast: the packet came in a broadcast packet type.
  *
  * bla_rx avoidance checks if:
  *  * we have to race for a claim
@@ -1361,7 +1362,8 @@ void bla_free(struct bat_priv *bat_priv)
  * process the skb.
  *
  */
-int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
+int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid,
+          bool is_bcast)
 {
        struct ethhdr *ethhdr;
        struct claim search_claim, *claim = NULL;
@@ -1380,7 +1382,7 @@ int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
 
        if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
                /* don't allow broadcasts while requests are in flight */
-               if (is_multicast_ether_addr(ethhdr->h_dest))
+               if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
                        goto handled;
 
        memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
@@ -1406,8 +1408,13 @@ int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
        }
 
        /* if it is a broadcast ... */
-       if (is_multicast_ether_addr(ethhdr->h_dest)) {
-               /* ... drop it. the responsible gateway is in charge. */
+       if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
+               /* ... drop it. the responsible gateway is in charge.
+                *
+                * We need to check is_bcast because with the gateway
+                * feature, broadcasts (like DHCP requests) may be sent
+                * using a unicast packet type.
+                */
                goto handled;
        } else {
                /* seems the client considers us as its best gateway.
index e39f93a..dc5227b 100644 (file)
@@ -23,7 +23,8 @@
 #define _NET_BATMAN_ADV_BLA_H_
 
 #ifdef CONFIG_BATMAN_ADV_BLA
-int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
+int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid,
+          bool is_bcast);
 int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
 int bla_is_backbone_gw(struct sk_buff *skb,
                       struct orig_node *orig_node, int hdr_size);
@@ -41,7 +42,7 @@ void bla_free(struct bat_priv *bat_priv);
 #else /* ifdef CONFIG_BATMAN_ADV_BLA */
 
 static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb,
-                        short vid)
+                        short vid, bool is_bcast)
 {
        return 0;
 }
index 840e2c6..015471d 100644 (file)
@@ -617,6 +617,8 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
                         * changes */
                        if (skb_linearize(skb) < 0)
                                goto out;
+                       /* skb_linearize() possibly changed skb->data */
+                       tt_query = (struct tt_query_packet *)skb->data;
 
                        tt_len = tt_query->tt_data * sizeof(struct tt_change);
 
index 6e2530b..a0ec0e4 100644 (file)
@@ -256,7 +256,11 @@ void interface_rx(struct net_device *soft_iface,
        struct bat_priv *bat_priv = netdev_priv(soft_iface);
        struct ethhdr *ethhdr;
        struct vlan_ethhdr *vhdr;
+       struct batman_header *batadv_header = (struct batman_header *)skb->data;
        short vid __maybe_unused = -1;
+       bool is_bcast;
+
+       is_bcast = (batadv_header->packet_type == BAT_BCAST);
 
        /* check if enough space is available for pulling, and pull */
        if (!pskb_may_pull(skb, hdr_size))
@@ -302,7 +306,7 @@ void interface_rx(struct net_device *soft_iface,
        /* Let the bridge loop avoidance check the packet. If will
         * not handle it, we can safely push it up.
         */
-       if (bla_rx(bat_priv, skb, vid))
+       if (bla_rx(bat_priv, skb, vid, is_bcast))
                goto out;
 
        netif_rx(skb);
index a66c2dc..2ab83d7 100644 (file)
@@ -141,13 +141,14 @@ static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
        struct tt_orig_list_entry *orig_entry;
 
        orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu);
-       atomic_dec(&orig_entry->orig_node->tt_size);
        orig_node_free_ref(orig_entry->orig_node);
        kfree(orig_entry);
 }
 
 static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry)
 {
+       /* to avoid race conditions, immediately decrease the tt counter */
+       atomic_dec(&orig_entry->orig_node->tt_size);
        call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu);
 }
 
@@ -910,7 +911,6 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
                }
                spin_unlock_bh(list_lock);
        }
-       atomic_set(&orig_node->tt_size, 0);
        orig_node->tt_initialised = false;
 }
 
@@ -2031,10 +2031,10 @@ bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)
 {
        struct tt_local_entry *tt_local_entry = NULL;
        struct tt_global_entry *tt_global_entry = NULL;
-       bool ret = true;
+       bool ret = false;
 
        if (!atomic_read(&bat_priv->ap_isolation))
-               return false;
+               goto out;
 
        tt_local_entry = tt_local_hash_find(bat_priv, dst);
        if (!tt_local_entry)
@@ -2044,10 +2044,10 @@ bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)
        if (!tt_global_entry)
                goto out;
 
-       if (_is_ap_isolated(tt_local_entry, tt_global_entry))
+       if (!_is_ap_isolated(tt_local_entry, tt_global_entry))
                goto out;
 
-       ret = false;
+       ret = true;
 
 out:
        if (tt_global_entry)
index 4eefb7f..94ad124 100644 (file)
@@ -3043,6 +3043,50 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
        hci_dev_unlock(hdev);
 }
 
+static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
+                                        struct sk_buff *skb)
+{
+       struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
+       struct hci_conn *conn;
+
+       BT_DBG("%s status %u handle %u", hdev->name, ev->status,
+              __le16_to_cpu(ev->handle));
+
+       hci_dev_lock(hdev);
+
+       conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
+       if (!conn)
+               goto unlock;
+
+       if (!ev->status)
+               conn->sec_level = conn->pending_sec_level;
+
+       clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
+
+       if (ev->status && conn->state == BT_CONNECTED) {
+               hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
+               hci_conn_put(conn);
+               goto unlock;
+       }
+
+       if (conn->state == BT_CONFIG) {
+               if (!ev->status)
+                       conn->state = BT_CONNECTED;
+
+               hci_proto_connect_cfm(conn, ev->status);
+               hci_conn_put(conn);
+       } else {
+               hci_auth_cfm(conn, ev->status);
+
+               hci_conn_hold(conn);
+               conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+               hci_conn_put(conn);
+       }
+
+unlock:
+       hci_dev_unlock(hdev);
+}
+
 static inline u8 hci_get_auth_req(struct hci_conn *conn)
 {
        /* If remote requests dedicated bonding follow that lead */
@@ -3559,6 +3603,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
                hci_extended_inquiry_result_evt(hdev, skb);
                break;
 
+       case HCI_EV_KEY_REFRESH_COMPLETE:
+               hci_key_refresh_complete_evt(hdev, skb);
+               break;
+
        case HCI_EV_IO_CAPA_REQUEST:
                hci_io_capa_request_evt(hdev, skb);
                break;
index 4deaca7..9332bc7 100644 (file)
@@ -1,6 +1,6 @@
 config BT_HIDP
        tristate "HIDP protocol support"
-       depends on BT && INPUT && HID_SUPPORT
+       depends on BT && INPUT
        select HID
        help
          HIDP (Human Interface Device Protocol) is a transport layer
index 24f144b..4554e80 100644 (file)
@@ -1295,7 +1295,12 @@ static void security_timeout(struct work_struct *work)
        struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
                                                security_timer.work);
 
-       l2cap_conn_del(conn->hcon, ETIMEDOUT);
+       BT_DBG("conn %p", conn);
+
+       if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
+               smp_chan_destroy(conn);
+               l2cap_conn_del(conn->hcon, ETIMEDOUT);
+       }
 }
 
 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
@@ -2910,12 +2915,14 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
        while (len >= L2CAP_CONF_OPT_SIZE) {
                len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
 
-               switch (type) {
-               case L2CAP_CONF_RFC:
-                       if (olen == sizeof(rfc))
-                               memcpy(&rfc, (void *)val, olen);
-                       goto done;
-               }
+               if (type != L2CAP_CONF_RFC)
+                       continue;
+
+               if (olen != sizeof(rfc))
+                       break;
+
+               memcpy(&rfc, (void *)val, olen);
+               goto done;
        }
 
        /* Use sane default values in case a misbehaving remote device
index 25d2207..3e5e336 100644 (file)
@@ -1598,7 +1598,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
        else
                conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
 
-       if (!conn) {
+       if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
                err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
                                 MGMT_STATUS_NOT_CONNECTED);
                goto failed;
@@ -1873,6 +1873,22 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status)
                pairing_complete(cmd, mgmt_status(status));
 }
 
+static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
+{
+       struct pending_cmd *cmd;
+
+       BT_DBG("status %u", status);
+
+       if (!status)
+               return;
+
+       cmd = find_pairing(conn);
+       if (!cmd)
+               BT_DBG("Unable to find a pending command");
+       else
+               pairing_complete(cmd, mgmt_status(status));
+}
+
 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
                       u16 len)
 {
@@ -1934,6 +1950,8 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
        /* For LE, just connecting isn't a proof that the pairing finished */
        if (cp->addr.type == BDADDR_BREDR)
                conn->connect_cfm_cb = pairing_complete_cb;
+       else
+               conn->connect_cfm_cb = le_connect_complete_cb;
 
        conn->security_cfm_cb = pairing_complete_cb;
        conn->disconn_cfm_cb = pairing_complete_cb;
index 6fc7c47..37df4e9 100644 (file)
@@ -648,7 +648,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
 
        auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM;
 
-       ret = tk_request(conn, 0, auth, rsp->io_capability, req->io_capability);
+       ret = tk_request(conn, 0, auth, req->io_capability, rsp->io_capability);
        if (ret)
                return SMP_UNSPECIFIED;
 
@@ -703,7 +703,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
        return 0;
 }
 
-static u8 smp_ltk_encrypt(struct l2cap_conn *conn)
+static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
 {
        struct smp_ltk *key;
        struct hci_conn *hcon = conn->hcon;
@@ -712,6 +712,9 @@ static u8 smp_ltk_encrypt(struct l2cap_conn *conn)
        if (!key)
                return 0;
 
+       if (sec_level > BT_SECURITY_MEDIUM && !key->authenticated)
+               return 0;
+
        if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags))
                return 1;
 
@@ -732,7 +735,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
 
        hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req);
 
-       if (smp_ltk_encrypt(conn))
+       if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
                return 0;
 
        if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
@@ -771,7 +774,7 @@ int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level)
                return 1;
 
        if (hcon->link_mode & HCI_LM_MASTER)
-               if (smp_ltk_encrypt(conn))
+               if (smp_ltk_encrypt(conn, sec_level))
                        goto done;
 
        if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
index 0a942fb..e1144e1 100644 (file)
@@ -240,6 +240,7 @@ int br_add_bridge(struct net *net, const char *name)
                return -ENOMEM;
 
        dev_net_set(dev, net);
+       dev->rtnl_link_ops = &br_link_ops;
 
        res = register_netdev(dev);
        if (res)
index 2080485..fe41260 100644 (file)
@@ -208,7 +208,7 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
        return 0;
 }
 
-static struct rtnl_link_ops br_link_ops __read_mostly = {
+struct rtnl_link_ops br_link_ops __read_mostly = {
        .kind           = "bridge",
        .priv_size      = sizeof(struct net_bridge),
        .setup          = br_dev_setup,
index 1a8ad4f..a768b24 100644 (file)
@@ -549,6 +549,7 @@ extern int (*br_fdb_test_addr_hook)(struct net_device *dev, unsigned char *addr)
 #endif
 
 /* br_netlink.c */
+extern struct rtnl_link_ops br_link_ops;
 extern int br_netlink_init(void);
 extern void br_netlink_fini(void);
 extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
index aa6f716..8c83c17 100644 (file)
@@ -4,8 +4,7 @@
  * Author:     Sjur Brendeland/sjur.brandeland@stericsson.com
  * License terms: GNU General Public License (GPL) version 2
  *
- * Borrowed heavily from file: pn_dev.c. Thanks to
- *  Remi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont
  *  and Sakari Ailus <sakari.ailus@nokia.com>
  */
 
@@ -562,9 +561,9 @@ static int __init caif_device_init(void)
 
 static void __exit caif_device_exit(void)
 {
-       unregister_pernet_subsys(&caif_net_ops);
        unregister_netdevice_notifier(&caif_device_notifier);
        dev_remove_pack(&caif_packet_type);
+       unregister_pernet_subsys(&caif_net_ops);
 }
 
 module_init(caif_device_init);
index fb89443..78f1cda 100644 (file)
@@ -220,6 +220,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
                                                cfsk_hold, cfsk_put);
                cf_sk->sk.sk_state = CAIF_CONNECTED;
                set_tx_flow_on(cf_sk);
+               cf_sk->sk.sk_shutdown = 0;
                cf_sk->sk.sk_state_change(&cf_sk->sk);
                break;
 
index cde1b4a..46cca3a 100644 (file)
@@ -681,9 +681,6 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
        if (err < 0)
                goto free_skb;
 
-       /* to be able to check the received tx sock reference in raw_rcv() */
-       skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
-
        skb->dev = dev;
        skb->sk  = sk;
 
index a776f75..ba4323b 100644 (file)
@@ -504,13 +504,6 @@ void ceph_destroy_client(struct ceph_client *client)
        /* unmount */
        ceph_osdc_stop(&client->osdc);
 
-       /*
-        * make sure osd connections close out before destroying the
-        * auth module, which is needed to free those connections'
-        * ceph_authorizers.
-        */
-       ceph_msgr_flush();
-
        ceph_monc_stop(&client->monc);
 
        ceph_debugfs_client_cleanup(client);
index 524f4e4..10255e8 100644 (file)
@@ -563,6 +563,10 @@ static void prepare_write_message(struct ceph_connection *con)
                m->hdr.seq = cpu_to_le64(++con->out_seq);
                m->needs_out_seq = false;
        }
+#ifdef CONFIG_BLOCK
+       else
+               m->bio_iter = NULL;
+#endif
 
        dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
             m, con->out_seq, le16_to_cpu(m->hdr.type),
@@ -1419,7 +1423,7 @@ static int process_connect(struct ceph_connection *con)
                 * dropped messages.
                 */
                dout("process_connect got RESET peer seq %u\n",
-                    le32_to_cpu(con->in_connect.connect_seq));
+                    le32_to_cpu(con->in_reply.connect_seq));
                pr_err("%s%lld %s connection reset\n",
                       ENTITY_NAME(con->peer_name),
                       ceph_pr_addr(&con->peer_addr.in_addr));
@@ -1446,10 +1450,10 @@ static int process_connect(struct ceph_connection *con)
                 * If we sent a smaller connect_seq than the peer has, try
                 * again with a larger value.
                 */
-               dout("process_connect got RETRY my seq = %u, peer_seq = %u\n",
+               dout("process_connect got RETRY_SESSION my seq %u, peer %u\n",
                     le32_to_cpu(con->out_connect.connect_seq),
-                    le32_to_cpu(con->in_connect.connect_seq));
-               con->connect_seq = le32_to_cpu(con->in_connect.connect_seq);
+                    le32_to_cpu(con->in_reply.connect_seq));
+               con->connect_seq = le32_to_cpu(con->in_reply.connect_seq);
                ceph_con_out_kvec_reset(con);
                ret = prepare_write_connect(con);
                if (ret < 0)
@@ -1464,9 +1468,9 @@ static int process_connect(struct ceph_connection *con)
                 */
                dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
                     con->peer_global_seq,
-                    le32_to_cpu(con->in_connect.global_seq));
+                    le32_to_cpu(con->in_reply.global_seq));
                get_global_seq(con->msgr,
-                              le32_to_cpu(con->in_connect.global_seq));
+                              le32_to_cpu(con->in_reply.global_seq));
                ceph_con_out_kvec_reset(con);
                ret = prepare_write_connect(con);
                if (ret < 0)
index 10d6008..d0649a9 100644 (file)
@@ -847,6 +847,14 @@ void ceph_monc_stop(struct ceph_mon_client *monc)
 
        mutex_unlock(&monc->mutex);
 
+       /*
+        * flush msgr queue before we destroy ourselves to ensure that:
+        *  - any work that references our embedded con is finished.
+        *  - any osd_client or other work that may reference an authorizer
+        *    finishes before we shut down the auth subsystem.
+        */
+       ceph_msgr_flush();
+
        ceph_auth_destroy(monc->auth);
 
        ceph_msg_put(monc->m_auth);
index 1ffebed..ca59e66 100644 (file)
@@ -139,15 +139,15 @@ void ceph_osdc_release_request(struct kref *kref)
 
        if (req->r_request)
                ceph_msg_put(req->r_request);
-       if (req->r_reply)
-               ceph_msg_put(req->r_reply);
        if (req->r_con_filling_msg) {
                dout("release_request revoking pages %p from con %p\n",
                     req->r_pages, req->r_con_filling_msg);
                ceph_con_revoke_message(req->r_con_filling_msg,
                                      req->r_reply);
-               ceph_con_put(req->r_con_filling_msg);
+               req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
        }
+       if (req->r_reply)
+               ceph_msg_put(req->r_reply);
        if (req->r_own_pages)
                ceph_release_page_vector(req->r_pages,
                                         req->r_num_pages);
@@ -1216,7 +1216,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
        if (req->r_con_filling_msg == con && req->r_reply == msg) {
                dout(" dropping con_filling_msg ref %p\n", con);
                req->r_con_filling_msg = NULL;
-               ceph_con_put(con);
+               con->ops->put(con);
        }
 
        if (!req->r_got_reply) {
@@ -2028,7 +2028,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
                dout("get_reply revoking msg %p from old con %p\n",
                     req->r_reply, req->r_con_filling_msg);
                ceph_con_revoke_message(req->r_con_filling_msg, req->r_reply);
-               ceph_con_put(req->r_con_filling_msg);
+               req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
                req->r_con_filling_msg = NULL;
        }
 
@@ -2063,7 +2063,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
 #endif
        }
        *skip = 0;
-       req->r_con_filling_msg = ceph_con_get(con);
+       req->r_con_filling_msg = con->ops->get(con);
        dout("get_reply tid %lld %p\n", tid, m);
 
 out:
index cd09819..1cb0d8a 100644 (file)
@@ -1136,8 +1136,8 @@ void dev_load(struct net *net, const char *name)
                no_module = request_module("netdev-%s", name);
        if (no_module && capable(CAP_SYS_MODULE)) {
                if (!request_module("%s", name))
-                       pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
-                              name);
+                       pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
+                               name);
        }
 }
 EXPORT_SYMBOL(dev_load);
@@ -2089,25 +2089,6 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
        return 0;
 }
 
-/*
- * Try to orphan skb early, right before transmission by the device.
- * We cannot orphan skb if tx timestamp is requested or the sk-reference
- * is needed on driver level for other reasons, e.g. see net/can/raw.c
- */
-static inline void skb_orphan_try(struct sk_buff *skb)
-{
-       struct sock *sk = skb->sk;
-
-       if (sk && !skb_shinfo(skb)->tx_flags) {
-               /* skb_tx_hash() wont be able to get sk.
-                * We copy sk_hash into skb->rxhash
-                */
-               if (!skb->rxhash)
-                       skb->rxhash = sk->sk_hash;
-               skb_orphan(skb);
-       }
-}
-
 static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
 {
        return ((features & NETIF_F_GEN_CSUM) ||
@@ -2193,8 +2174,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                if (!list_empty(&ptype_all))
                        dev_queue_xmit_nit(skb, dev);
 
-               skb_orphan_try(skb);
-
                features = netif_skb_features(skb);
 
                if (vlan_tx_tag_present(skb) &&
@@ -2304,7 +2283,7 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
        if (skb->sk && skb->sk->sk_hash)
                hash = skb->sk->sk_hash;
        else
-               hash = (__force u16) skb->protocol ^ skb->rxhash;
+               hash = (__force u16) skb->protocol;
        hash = jhash_1word(hash, hashrnd);
 
        return (u16) (((u64) hash * qcount) >> 32) + qoffset;
@@ -2465,8 +2444,12 @@ static void skb_update_prio(struct sk_buff *skb)
 {
        struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
 
-       if ((!skb->priority) && (skb->sk) && map)
-               skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx];
+       if (!skb->priority && skb->sk && map) {
+               unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
+
+               if (prioidx < map->priomap_len)
+                       skb->priority = map->priomap[prioidx];
+       }
 }
 #else
 #define skb_update_prio(skb)
@@ -6300,7 +6283,8 @@ static struct hlist_head *netdev_create_hash(void)
 /* Initialize per network namespace state */
 static int __net_init netdev_init(struct net *net)
 {
-       INIT_LIST_HEAD(&net->dev_base_head);
+       if (net != &init_net)
+               INIT_LIST_HEAD(&net->dev_base_head);
 
        net->dev_name_head = netdev_create_hash();
        if (net->dev_name_head == NULL)
index dddbacb..42f1e1c 100644 (file)
@@ -27,7 +27,9 @@ static DEFINE_MUTEX(net_mutex);
 LIST_HEAD(net_namespace_list);
 EXPORT_SYMBOL_GPL(net_namespace_list);
 
-struct net init_net;
+struct net init_net = {
+       .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
+};
 EXPORT_SYMBOL(init_net);
 
 #define INITIAL_NET_GEN_PTRS   13 /* +1 for len +2 for rcu_head */
index 5b8aa2f..b2e9caa 100644 (file)
@@ -49,8 +49,9 @@ static int get_prioidx(u32 *prio)
                return -ENOSPC;
        }
        set_bit(prioidx, prioidx_map);
+       if (atomic_read(&max_prioidx) < prioidx)
+               atomic_set(&max_prioidx, prioidx);
        spin_unlock_irqrestore(&prioidx_map_lock, flags);
-       atomic_set(&max_prioidx, prioidx);
        *prio = prioidx;
        return 0;
 }
@@ -64,7 +65,7 @@ static void put_prioidx(u32 idx)
        spin_unlock_irqrestore(&prioidx_map_lock, flags);
 }
 
-static void extend_netdev_table(struct net_device *dev, u32 new_len)
+static int extend_netdev_table(struct net_device *dev, u32 new_len)
 {
        size_t new_size = sizeof(struct netprio_map) +
                           ((sizeof(u32) * new_len));
@@ -76,7 +77,7 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
 
        if (!new_priomap) {
                pr_warn("Unable to alloc new priomap!\n");
-               return;
+               return -ENOMEM;
        }
 
        for (i = 0;
@@ -89,46 +90,79 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
        rcu_assign_pointer(dev->priomap, new_priomap);
        if (old_priomap)
                kfree_rcu(old_priomap, rcu);
+       return 0;
 }
 
-static void update_netdev_tables(void)
+static int write_update_netdev_table(struct net_device *dev)
 {
+       int ret = 0;
+       u32 max_len;
+       struct netprio_map *map;
+
+       rtnl_lock();
+       max_len = atomic_read(&max_prioidx) + 1;
+       map = rtnl_dereference(dev->priomap);
+       if (!map || map->priomap_len < max_len)
+               ret = extend_netdev_table(dev, max_len);
+       rtnl_unlock();
+
+       return ret;
+}
+
+static int update_netdev_tables(void)
+{
+       int ret = 0;
        struct net_device *dev;
-       u32 max_len = atomic_read(&max_prioidx) + 1;
+       u32 max_len;
        struct netprio_map *map;
 
        rtnl_lock();
+       max_len = atomic_read(&max_prioidx) + 1;
        for_each_netdev(&init_net, dev) {
                map = rtnl_dereference(dev->priomap);
-               if ((!map) ||
-                   (map->priomap_len < max_len))
-                       extend_netdev_table(dev, max_len);
+               /*
+                * don't allocate priomap if we didn't
+                * change net_prio.ifpriomap (map == NULL),
+                * this will speed up skb_update_prio.
+                */
+               if (map && map->priomap_len < max_len) {
+                       ret = extend_netdev_table(dev, max_len);
+                       if (ret < 0)
+                               break;
+               }
        }
        rtnl_unlock();
+       return ret;
 }
 
 static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
 {
        struct cgroup_netprio_state *cs;
-       int ret;
+       int ret = -EINVAL;
 
        cs = kzalloc(sizeof(*cs), GFP_KERNEL);
        if (!cs)
                return ERR_PTR(-ENOMEM);
 
-       if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx) {
-               kfree(cs);
-               return ERR_PTR(-EINVAL);
-       }
+       if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx)
+               goto out;
 
        ret = get_prioidx(&cs->prioidx);
-       if (ret != 0) {
+       if (ret < 0) {
                pr_warn("No space in priority index array\n");
-               kfree(cs);
-               return ERR_PTR(ret);
+               goto out;
+       }
+
+       ret = update_netdev_tables();
+       if (ret < 0) {
+               put_prioidx(cs->prioidx);
+               goto out;
        }
 
        return &cs->css;
+out:
+       kfree(cs);
+       return ERR_PTR(ret);
 }
 
 static void cgrp_destroy(struct cgroup *cgrp)
@@ -141,7 +175,7 @@ static void cgrp_destroy(struct cgroup *cgrp)
        rtnl_lock();
        for_each_netdev(&init_net, dev) {
                map = rtnl_dereference(dev->priomap);
-               if (map)
+               if (map && cs->prioidx < map->priomap_len)
                        map->priomap[cs->prioidx] = 0;
        }
        rtnl_unlock();
@@ -165,7 +199,7 @@ static int read_priomap(struct cgroup *cont, struct cftype *cft,
        rcu_read_lock();
        for_each_netdev_rcu(&init_net, dev) {
                map = rcu_dereference(dev->priomap);
-               priority = map ? map->priomap[prioidx] : 0;
+               priority = (map && prioidx < map->priomap_len) ? map->priomap[prioidx] : 0;
                cb->fill(cb, dev->name, priority);
        }
        rcu_read_unlock();
@@ -220,13 +254,17 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
        if (!dev)
                goto out_free_devname;
 
-       update_netdev_tables();
-       ret = 0;
+       ret = write_update_netdev_table(dev);
+       if (ret < 0)
+               goto out_put_dev;
+
        rcu_read_lock();
        map = rcu_dereference(dev->priomap);
        if (map)
                map->priomap[prioidx] = priority;
        rcu_read_unlock();
+
+out_put_dev:
        dev_put(dev);
 
 out_free_devname:
index d78671e..d124306 100644 (file)
@@ -353,7 +353,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
        unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
                              SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
-       if (fragsz <= PAGE_SIZE && !(gfp_mask & __GFP_WAIT)) {
+       if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
                void *data = netdev_alloc_frag(fragsz);
 
                if (likely(data)) {
@@ -1755,6 +1755,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = MAX_SKB_FRAGS,
                .flags = flags,
                .ops = &sock_pipe_buf_ops,
                .spd_release = sock_spd_release,
index 6fbb2ad..1670561 100644 (file)
@@ -230,6 +230,12 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
        mtu = dev->mtu;
        pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
 
+       if (size > mtu) {
+               pr_debug("size = %Zu, mtu = %u\n", size, mtu);
+               err = -EINVAL;
+               goto out_dev;
+       }
+
        hlen = LL_RESERVED_SPACE(dev);
        tlen = dev->needed_tailroom;
        skb = sock_alloc_send_skb(sk, hlen + tlen + size,
@@ -258,12 +264,6 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
        if (err < 0)
                goto out_skb;
 
-       if (size > mtu) {
-               pr_debug("size = %Zu, mtu = %u\n", size, mtu);
-               err = -EINVAL;
-               goto out_skb;
-       }
-
        skb->dev = dev;
        skb->sk  = sk;
        skb->protocol = htons(ETH_P_IEEE802154);
index c48adc5..667c1d4 100644 (file)
@@ -1725,8 +1725,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
                case CIPSO_V4_TAG_LOCAL:
                        /* This is a non-standard tag that we only allow for
                         * local connections, so if the incoming interface is
-                        * not the loopback device drop the packet. */
-                       if (!(skb->dev->flags & IFF_LOOPBACK)) {
+                        * not the loopback device drop the packet. Further,
+                        * there is no legitimate reason for setting this from
+                        * userspace so reject it if skb is NULL. */
+                       if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) {
                                err_offset = opt_iter;
                                goto validate_return_locked;
                        }
index 74c21b9..6083276 100644 (file)
@@ -1349,8 +1349,8 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
                        if (w->leaf && fn->fn_flags & RTN_RTINFO) {
                                int err;
 
-                               if (w->count < w->skip) {
-                                       w->count++;
+                               if (w->skip) {
+                                       w->skip--;
                                        continue;
                                }
 
index 999a982..becb048 100644 (file)
@@ -2957,10 +2957,6 @@ static int __net_init ip6_route_net_init(struct net *net)
        net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
        net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
 
-#ifdef CONFIG_PROC_FS
-       proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
-       proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
-#endif
        net->ipv6.ip6_rt_gc_expire = 30*HZ;
 
        ret = 0;
@@ -2981,10 +2977,6 @@ out_ip6_dst_ops:
 
 static void __net_exit ip6_route_net_exit(struct net *net)
 {
-#ifdef CONFIG_PROC_FS
-       proc_net_remove(net, "ipv6_route");
-       proc_net_remove(net, "rt6_stats");
-#endif
        kfree(net->ipv6.ip6_null_entry);
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
        kfree(net->ipv6.ip6_prohibit_entry);
@@ -2993,11 +2985,33 @@ static void __net_exit ip6_route_net_exit(struct net *net)
        dst_entries_destroy(&net->ipv6.ip6_dst_ops);
 }
 
+static int __net_init ip6_route_net_init_late(struct net *net)
+{
+#ifdef CONFIG_PROC_FS
+       proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
+       proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
+#endif
+       return 0;
+}
+
+static void __net_exit ip6_route_net_exit_late(struct net *net)
+{
+#ifdef CONFIG_PROC_FS
+       proc_net_remove(net, "ipv6_route");
+       proc_net_remove(net, "rt6_stats");
+#endif
+}
+
 static struct pernet_operations ip6_route_net_ops = {
        .init = ip6_route_net_init,
        .exit = ip6_route_net_exit,
 };
 
+static struct pernet_operations ip6_route_net_late_ops = {
+       .init = ip6_route_net_init_late,
+       .exit = ip6_route_net_exit_late,
+};
+
 static struct notifier_block ip6_route_dev_notifier = {
        .notifier_call = ip6_route_dev_notify,
        .priority = 0,
@@ -3047,19 +3061,25 @@ int __init ip6_route_init(void)
        if (ret)
                goto xfrm6_init;
 
+       ret = register_pernet_subsys(&ip6_route_net_late_ops);
+       if (ret)
+               goto fib6_rules_init;
+
        ret = -ENOBUFS;
        if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
            __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
            __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
-               goto fib6_rules_init;
+               goto out_register_late_subsys;
 
        ret = register_netdevice_notifier(&ip6_route_dev_notifier);
        if (ret)
-               goto fib6_rules_init;
+               goto out_register_late_subsys;
 
 out:
        return ret;
 
+out_register_late_subsys:
+       unregister_pernet_subsys(&ip6_route_net_late_ops);
 fib6_rules_init:
        fib6_rules_cleanup();
 xfrm6_init:
@@ -3078,6 +3098,7 @@ out_kmem_cache:
 void ip6_route_cleanup(void)
 {
        unregister_netdevice_notifier(&ip6_route_dev_notifier);
+       unregister_pernet_subsys(&ip6_route_net_late_ops);
        fib6_rules_cleanup();
        xfrm6_fini();
        fib6_gc_cleanup();
index 3a9aec2..9df64a5 100644 (file)
@@ -1212,7 +1212,8 @@ have_isn:
        tcp_rsk(req)->snt_isn = isn;
        tcp_rsk(req)->snt_synack = tcp_time_stamp;
 
-       security_inet_conn_request(sk, skb, req);
+       if (security_inet_conn_request(sk, skb, req))
+               goto drop_and_release;
 
        if (tcp_v6_send_synack(sk, req,
                               (struct request_values *)&tmp_ext,
index 07d7d55..cd6f7a9 100644 (file)
@@ -372,7 +372,6 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
                        skb_trim(skb, skb->dev->mtu);
        }
        skb->protocol = ETH_P_AF_IUCV;
-       skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
        nskb = skb_clone(skb, GFP_ATOMIC);
        if (!nskb)
                return -ENOMEM;
index 185f12f..47b259f 100644 (file)
@@ -42,6 +42,11 @@ struct l2tp_eth {
        struct sock             *tunnel_sock;
        struct l2tp_session     *session;
        struct list_head        list;
+       atomic_long_t           tx_bytes;
+       atomic_long_t           tx_packets;
+       atomic_long_t           rx_bytes;
+       atomic_long_t           rx_packets;
+       atomic_long_t           rx_errors;
 };
 
 /* via l2tp_session_priv() */
@@ -88,24 +93,40 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
        struct l2tp_eth *priv = netdev_priv(dev);
        struct l2tp_session *session = priv->session;
 
+       atomic_long_add(skb->len, &priv->tx_bytes);
+       atomic_long_inc(&priv->tx_packets);
+
        l2tp_xmit_skb(session, skb, session->hdr_len);
 
-       dev->stats.tx_bytes += skb->len;
-       dev->stats.tx_packets++;
+       return NETDEV_TX_OK;
+}
 
-       return 0;
+static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
+                                                     struct rtnl_link_stats64 *stats)
+{
+       struct l2tp_eth *priv = netdev_priv(dev);
+
+       stats->tx_bytes   = atomic_long_read(&priv->tx_bytes);
+       stats->tx_packets = atomic_long_read(&priv->tx_packets);
+       stats->rx_bytes   = atomic_long_read(&priv->rx_bytes);
+       stats->rx_packets = atomic_long_read(&priv->rx_packets);
+       stats->rx_errors  = atomic_long_read(&priv->rx_errors);
+       return stats;
 }
 
+
 static struct net_device_ops l2tp_eth_netdev_ops = {
        .ndo_init               = l2tp_eth_dev_init,
        .ndo_uninit             = l2tp_eth_dev_uninit,
        .ndo_start_xmit         = l2tp_eth_dev_xmit,
+       .ndo_get_stats64        = l2tp_eth_get_stats64,
 };
 
 static void l2tp_eth_dev_setup(struct net_device *dev)
 {
        ether_setup(dev);
-       dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+       dev->priv_flags         &= ~IFF_TX_SKB_SHARING;
+       dev->features           |= NETIF_F_LLTX;
        dev->netdev_ops         = &l2tp_eth_netdev_ops;
        dev->destructor         = free_netdev;
 }
@@ -114,17 +135,17 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
 {
        struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
        struct net_device *dev = spriv->dev;
+       struct l2tp_eth *priv = netdev_priv(dev);
 
        if (session->debug & L2TP_MSG_DATA) {
                unsigned int length;
-               u8 *ptr = skb->data;
 
                length = min(32u, skb->len);
                if (!pskb_may_pull(skb, length))
                        goto error;
 
                pr_debug("%s: eth recv\n", session->name);
-               print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
+               print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
        }
 
        if (!pskb_may_pull(skb, sizeof(ETH_HLEN)))
@@ -139,15 +160,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
        nf_reset(skb);
 
        if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
-               dev->stats.rx_packets++;
-               dev->stats.rx_bytes += data_len;
-       } else
-               dev->stats.rx_errors++;
-
+               atomic_long_inc(&priv->rx_packets);
+               atomic_long_add(data_len, &priv->rx_bytes);
+       } else {
+               atomic_long_inc(&priv->rx_errors);
+       }
        return;
 
 error:
-       dev->stats.rx_errors++;
+       atomic_long_inc(&priv->rx_errors);
        kfree_skb(skb);
 }
 
index e9cecca..7d5108a 100644 (file)
@@ -2093,6 +2093,9 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
        struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
        int i, ret;
 
+       if (!ieee80211_sdata_running(sdata))
+               return -ENETDOWN;
+
        if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
                ret = drv_set_bitrate_mask(local, sdata, mask);
                if (ret)
index 91d84cc..0db5d34 100644 (file)
@@ -1342,7 +1342,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
        u32 changed = 0;
-       u8 bssid[ETH_ALEN];
 
        ASSERT_MGD_MTX(ifmgd);
 
@@ -1352,10 +1351,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        if (WARN_ON(!ifmgd->associated))
                return;
 
-       memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
+       ieee80211_stop_poll(sdata);
 
        ifmgd->associated = NULL;
-       memset(ifmgd->bssid, 0, ETH_ALEN);
 
        /*
         * we need to commit the associated = NULL change because the
@@ -1375,7 +1373,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        netif_carrier_off(sdata->dev);
 
        mutex_lock(&local->sta_mtx);
-       sta = sta_info_get(sdata, bssid);
+       sta = sta_info_get(sdata, ifmgd->bssid);
        if (sta) {
                set_sta_flag(sta, WLAN_STA_BLOCK_BA);
                ieee80211_sta_tear_down_BA_sessions(sta, tx);
@@ -1384,13 +1382,16 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
 
        /* deauthenticate/disassociate now */
        if (tx || frame_buf)
-               ieee80211_send_deauth_disassoc(sdata, bssid, stype, reason,
-                                              tx, frame_buf);
+               ieee80211_send_deauth_disassoc(sdata, ifmgd->bssid, stype,
+                                              reason, tx, frame_buf);
 
        /* flush out frame */
        if (tx)
                drv_flush(local, false);
 
+       /* clear bssid only after building the needed mgmt frames */
+       memset(ifmgd->bssid, 0, ETH_ALEN);
+
        /* remove AP and TDLS peers */
        sta_info_flush(local, sdata);
 
@@ -2173,15 +2174,13 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
                       sdata->name, mgmt->sa, status_code);
                ieee80211_destroy_assoc_data(sdata, false);
        } else {
-               printk(KERN_DEBUG "%s: associated\n", sdata->name);
-
                if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) {
                        /* oops -- internal error -- send timeout for now */
-                       ieee80211_destroy_assoc_data(sdata, true);
-                       sta_info_destroy_addr(sdata, mgmt->bssid);
+                       ieee80211_destroy_assoc_data(sdata, false);
                        cfg80211_put_bss(*bss);
                        return RX_MGMT_CFG80211_ASSOC_TIMEOUT;
                }
+               printk(KERN_DEBUG "%s: associated\n", sdata->name);
 
                /*
                 * destroy assoc_data afterwards, as otherwise an idle
@@ -2612,8 +2611,6 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        u8 frame_buf[DEAUTH_DISASSOC_LEN];
 
-       ieee80211_stop_poll(sdata);
-
        ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason,
                               false, frame_buf);
        mutex_unlock(&ifmgd->mtx);
index 2d1acc6..f9e51ef 100644 (file)
@@ -809,7 +809,7 @@ minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
                        max_rates = sband->n_bitrates;
        }
 
-       msp = kzalloc(sizeof(struct minstrel_ht_sta), gfp);
+       msp = kzalloc(sizeof(*msp), gfp);
        if (!msp)
                return NULL;
 
index 7bcecf7..965e6ec 100644 (file)
@@ -2455,7 +2455,7 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
         * frames that we didn't handle, including returning unknown
         * ones. For all other modes we will return them to the sender,
         * setting the 0x80 bit in the action category, as required by
-        * 802.11-2007 7.3.1.11.
+        * 802.11-2012 9.24.4.
         * Newer versions of hostapd shall also use the management frame
         * registration mechanisms, but older ones still use cooked
         * monitor interfaces so push all frames there.
@@ -2465,6 +2465,9 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
             sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
                return RX_DROP_MONITOR;
 
+       if (is_multicast_ether_addr(mgmt->da))
+               return RX_DROP_MONITOR;
+
        /* do not return rejected action frames */
        if (mgmt->u.action.category & 0x80)
                return RX_DROP_UNUSABLE;
index 3bb24a1..a470e11 100644 (file)
@@ -271,6 +271,9 @@ struct sta_ampdu_mlme {
  * @plink_timer: peer link watch timer
  * @plink_timer_was_running: used by suspend/resume to restore timers
  * @t_offset: timing offset relative to this host
+ * @t_offset_setpoint: reference timing offset of this sta to be used when
+ *     calculating clockdrift
+ * @ch_type: peer's channel type
  * @debugfs: debug filesystem info
  * @dead: set to true when sta is unlinked
  * @uploaded: set to true when sta is uploaded to the driver
@@ -278,6 +281,8 @@ struct sta_ampdu_mlme {
  * @sta: station information we share with the driver
  * @sta_state: duplicates information about station state (for debug)
  * @beacon_loss_count: number of times beacon loss has triggered
+ * @supports_40mhz: tracks whether the station advertised 40 MHz support
+ *     as we overwrite its HT parameters with the currently used value
  */
 struct sta_info {
        /* General information, mostly static */
index 8781d8f..434b687 100644 (file)
@@ -83,9 +83,10 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
 {
        struct xmit_work *work;
 
-       if (!(priv->phy->channels_supported[page] & (1 << chan)))
+       if (!(priv->phy->channels_supported[page] & (1 << chan))) {
                WARN_ON(1);
                return NETDEV_TX_OK;
+       }
 
        if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
                u16 crc = crc_ccitt(0, skb->data, skb->len);
index 819c342..9730882 100644 (file)
@@ -639,6 +639,14 @@ find_free_id(const char *name, ip_set_id_t *index, struct ip_set **set)
        return 0;
 }
 
+static int
+ip_set_none(struct sock *ctnl, struct sk_buff *skb,
+           const struct nlmsghdr *nlh,
+           const struct nlattr * const attr[])
+{
+       return -EOPNOTSUPP;
+}
+
 static int
 ip_set_create(struct sock *ctnl, struct sk_buff *skb,
              const struct nlmsghdr *nlh,
@@ -1539,6 +1547,10 @@ nlmsg_failure:
 }
 
 static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
+       [IPSET_CMD_NONE]        = {
+               .call           = ip_set_none,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+       },
        [IPSET_CMD_CREATE]      = {
                .call           = ip_set_create,
                .attr_count     = IPSET_ATTR_CMD_MAX,
index ee86394..d5d3607 100644 (file)
@@ -38,30 +38,6 @@ struct iface_node {
 
 #define iface_data(n)  (rb_entry(n, struct iface_node, node)->iface)
 
-static inline long
-ifname_compare(const char *_a, const char *_b)
-{
-       const long *a = (const long *)_a;
-       const long *b = (const long *)_b;
-
-       BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
-       if (a[0] != b[0])
-               return a[0] - b[0];
-       if (IFNAMSIZ > sizeof(long)) {
-               if (a[1] != b[1])
-                       return a[1] - b[1];
-       }
-       if (IFNAMSIZ > 2 * sizeof(long)) {
-               if (a[2] != b[2])
-                       return a[2] - b[2];
-       }
-       if (IFNAMSIZ > 3 * sizeof(long)) {
-               if (a[3] != b[3])
-                       return a[3] - b[3];
-       }
-       return 0;
-}
-
 static void
 rbtree_destroy(struct rb_root *root)
 {
@@ -99,7 +75,7 @@ iface_test(struct rb_root *root, const char **iface)
 
        while (n) {
                const char *d = iface_data(n);
-               long res = ifname_compare(*iface, d);
+               int res = strcmp(*iface, d);
 
                if (res < 0)
                        n = n->rb_left;
@@ -121,7 +97,7 @@ iface_add(struct rb_root *root, const char **iface)
 
        while (*n) {
                char *ifname = iface_data(*n);
-               long res = ifname_compare(*iface, ifname);
+               int res = strcmp(*iface, ifname);
 
                p = *n;
                if (res < 0)
@@ -366,7 +342,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
        struct hash_netiface4_elem data = { .cidr = HOST_MASK };
        u32 ip = 0, ip_to, last;
        u32 timeout = h->timeout;
-       char iface[IFNAMSIZ] = {};
+       char iface[IFNAMSIZ];
        int ret;
 
        if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -663,7 +639,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netiface6_elem data = { .cidr = HOST_MASK };
        u32 timeout = h->timeout;
-       char iface[IFNAMSIZ] = {};
+       char iface[IFNAMSIZ];
        int ret;
 
        if (unlikely(!tb[IPSET_ATTR_IP] ||
index dd811b8..84444dd 100644 (file)
@@ -76,19 +76,19 @@ static void __ip_vs_del_service(struct ip_vs_service *svc);
 
 #ifdef CONFIG_IP_VS_IPV6
 /* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
-static int __ip_vs_addr_is_local_v6(struct net *net,
-                                   const struct in6_addr *addr)
+static bool __ip_vs_addr_is_local_v6(struct net *net,
+                                    const struct in6_addr *addr)
 {
-       struct rt6_info *rt;
        struct flowi6 fl6 = {
                .daddr = *addr,
        };
+       struct dst_entry *dst = ip6_route_output(net, NULL, &fl6);
+       bool is_local;
 
-       rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
-       if (rt && rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
-               return 1;
+       is_local = !dst->error && dst->dev && (dst->dev->flags & IFF_LOOPBACK);
 
-       return 0;
+       dst_release(dst);
+       return is_local;
 }
 #endif
 
@@ -1521,11 +1521,12 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
 {
        struct net_device *dev = ptr;
        struct net *net = dev_net(dev);
+       struct netns_ipvs *ipvs = net_ipvs(net);
        struct ip_vs_service *svc;
        struct ip_vs_dest *dest;
        unsigned int idx;
 
-       if (event != NETDEV_UNREGISTER)
+       if (event != NETDEV_UNREGISTER || !ipvs)
                return NOTIFY_DONE;
        IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name);
        EnterFunction(2);
@@ -1551,7 +1552,7 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
                }
        }
 
-       list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) {
+       list_for_each_entry(dest, &ipvs->dest_trash, n_list) {
                __ip_vs_dev_reset(dest, dev);
        }
        mutex_unlock(&__ip_vs_mutex);
index 3e797d1..791d56b 100644 (file)
@@ -169,8 +169,10 @@ replay:
 
                err = nla_parse(cda, ss->cb[cb_id].attr_count,
                                attr, attrlen, ss->cb[cb_id].policy);
-               if (err < 0)
+               if (err < 0) {
+                       rcu_read_unlock();
                        return err;
+               }
 
                if (nc->call_rcu) {
                        err = nc->call_rcu(net->nfnl, skb, nlh,
index 035960e..c6f7db7 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -310,7 +311,8 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
                info->del_set.flags, 0, UINT_MAX);
 
        /* Normalize to fit into jiffies */
-       if (add_opt.timeout > UINT_MAX/MSEC_PER_SEC)
+       if (add_opt.timeout != IPSET_NO_TIMEOUT &&
+           add_opt.timeout > UINT_MAX/MSEC_PER_SEC)
                add_opt.timeout = UINT_MAX/MSEC_PER_SEC;
        if (info->add_set.index != IPSET_INVALID_ID)
                ip_set_add(info->add_set.index, skb, par, &add_opt);
index 17a707d..e06d458 100644 (file)
@@ -292,7 +292,7 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr,
 
        pr_debug("%p\n", sk);
 
-       if (llcp_sock == NULL)
+       if (llcp_sock == NULL || llcp_sock->dev == NULL)
                return -EBADFD;
 
        addr->sa_family = AF_NFC;
index cb26461..2ab196a 100644 (file)
@@ -106,7 +106,7 @@ static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
        nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
        data += 2;
 
-       nfca_poll->nfcid1_len = *data++;
+       nfca_poll->nfcid1_len = min_t(__u8, *data++, NFC_NFCID1_MAXSIZE);
 
        pr_debug("sens_res 0x%x, nfcid1_len %d\n",
                 nfca_poll->sens_res, nfca_poll->nfcid1_len);
@@ -130,7 +130,7 @@ static __u8 *nci_extract_rf_params_nfcb_passive_poll(struct nci_dev *ndev,
                        struct rf_tech_specific_params_nfcb_poll *nfcb_poll,
                                                     __u8 *data)
 {
-       nfcb_poll->sensb_res_len = *data++;
+       nfcb_poll->sensb_res_len = min_t(__u8, *data++, NFC_SENSB_RES_MAXSIZE);
 
        pr_debug("sensb_res_len %d\n", nfcb_poll->sensb_res_len);
 
@@ -145,7 +145,7 @@ static __u8 *nci_extract_rf_params_nfcf_passive_poll(struct nci_dev *ndev,
                                                     __u8 *data)
 {
        nfcf_poll->bit_rate = *data++;
-       nfcf_poll->sensf_res_len = *data++;
+       nfcf_poll->sensf_res_len = min_t(__u8, *data++, NFC_SENSF_RES_MAXSIZE);
 
        pr_debug("bit_rate %d, sensf_res_len %d\n",
                 nfcf_poll->bit_rate, nfcf_poll->sensf_res_len);
@@ -331,7 +331,7 @@ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
        switch (ntf->activation_rf_tech_and_mode) {
        case NCI_NFC_A_PASSIVE_POLL_MODE:
                nfca_poll = &ntf->activation_params.nfca_poll_iso_dep;
-               nfca_poll->rats_res_len = *data++;
+               nfca_poll->rats_res_len = min_t(__u8, *data++, 20);
                pr_debug("rats_res_len %d\n", nfca_poll->rats_res_len);
                if (nfca_poll->rats_res_len > 0) {
                        memcpy(nfca_poll->rats_res,
@@ -341,7 +341,7 @@ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
 
        case NCI_NFC_B_PASSIVE_POLL_MODE:
                nfcb_poll = &ntf->activation_params.nfcb_poll_iso_dep;
-               nfcb_poll->attrib_res_len = *data++;
+               nfcb_poll->attrib_res_len = min_t(__u8, *data++, 50);
                pr_debug("attrib_res_len %d\n", nfcb_poll->attrib_res_len);
                if (nfcb_poll->attrib_res_len > 0) {
                        memcpy(nfcb_poll->attrib_res,
index ec1134c..8b8a6a2 100644 (file)
@@ -54,7 +54,10 @@ static int rawsock_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
 
-       pr_debug("sock=%p\n", sock);
+       pr_debug("sock=%p sk=%p\n", sock, sk);
+
+       if (!sk)
+               return 0;
 
        sock_orphan(sk);
        sock_put(sk);
index 779ce4f..5a940db 100644 (file)
@@ -5,8 +5,8 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
- * Original author: Sakari Ailus <sakari.ailus@nokia.com>
+ * Authors: Sakari Ailus <sakari.ailus@nokia.com>
+ *          Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index bf35b4e..12c30f3 100644 (file)
@@ -5,8 +5,8 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
- * Original author: Sakari Ailus <sakari.ailus@nokia.com>
+ * Authors: Sakari Ailus <sakari.ailus@nokia.com>
+ *          Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index d012089..a2fba7e 100644 (file)
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Author: Rémi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Author: Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 9dd4f92..576f22c 100644 (file)
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Author: Rémi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Author: Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 36f75a9..5bf6341 100644 (file)
@@ -5,8 +5,8 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
- * Original author: Sakari Ailus <sakari.ailus@nokia.com>
+ * Authors: Sakari Ailus <sakari.ailus@nokia.com>
+ *          Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index cfdf135..7dd762a 100644 (file)
@@ -5,8 +5,8 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
- * Original author: Sakari Ailus <sakari.ailus@nokia.com>
+ * Authors: Sakari Ailus <sakari.ailus@nokia.com>
+ *          Remi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 89cfa9c..0acc943 100644 (file)
@@ -5,8 +5,8 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
- * Original author: Sakari Ailus <sakari.ailus@nokia.com>
+ * Authors: Sakari Ailus <sakari.ailus@nokia.com>
+ *          Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 696348f..d6bbbbd 100644 (file)
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Author: Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 2754f09..bebaa43 100644 (file)
@@ -229,7 +229,7 @@ found_UDP_peer:
        return peer;
 
 new_UDP_peer:
-       _net("Rx UDP DGRAM from NEW peer %d", peer->debug_id);
+       _net("Rx UDP DGRAM from NEW peer");
        read_unlock_bh(&rxrpc_peer_lock);
        _leave(" = -EBUSY [new]");
        return ERR_PTR(-EBUSY);
index a2a95aa..c412ad0 100644 (file)
@@ -331,29 +331,22 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
        return PSCHED_NS2TICKS(ticks);
 }
 
-static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
+static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
 {
        struct sk_buff_head *list = &sch->q;
        psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
-       struct sk_buff *skb;
-
-       if (likely(skb_queue_len(list) < sch->limit)) {
-               skb = skb_peek_tail(list);
-               /* Optimize for add at tail */
-               if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
-                       return qdisc_enqueue_tail(nskb, sch);
+       struct sk_buff *skb = skb_peek_tail(list);
 
-               skb_queue_reverse_walk(list, skb) {
-                       if (tnext >= netem_skb_cb(skb)->time_to_send)
-                               break;
-               }
+       /* Optimize for add at tail */
+       if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
+               return __skb_queue_tail(list, nskb);
 
-               __skb_queue_after(list, skb, nskb);
-               sch->qstats.backlog += qdisc_pkt_len(nskb);
-               return NET_XMIT_SUCCESS;
+       skb_queue_reverse_walk(list, skb) {
+               if (tnext >= netem_skb_cb(skb)->time_to_send)
+                       break;
        }
 
-       return qdisc_reshape_fail(nskb, sch);
+       __skb_queue_after(list, skb, nskb);
 }
 
 /*
@@ -368,7 +361,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        /* We don't fill cb now as skb_unshare() may invalidate it */
        struct netem_skb_cb *cb;
        struct sk_buff *skb2;
-       int ret;
        int count = 1;
 
        /* Random duplication */
@@ -419,6 +411,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
        }
 
+       if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
+               return qdisc_reshape_fail(skb, sch);
+
+       sch->qstats.backlog += qdisc_pkt_len(skb);
+
        cb = netem_skb_cb(skb);
        if (q->gap == 0 ||              /* not doing reordering */
            q->counter < q->gap - 1 ||  /* inside last reordering gap */
@@ -450,7 +447,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
                cb->time_to_send = now + delay;
                ++q->counter;
-               ret = tfifo_enqueue(skb, sch);
+               tfifo_enqueue(skb, sch);
        } else {
                /*
                 * Do re-ordering by putting one out of N packets at the front
@@ -460,16 +457,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                q->counter = 0;
 
                __skb_queue_head(&sch->q, skb);
-               sch->qstats.backlog += qdisc_pkt_len(skb);
                sch->qstats.requeues++;
-               ret = NET_XMIT_SUCCESS;
-       }
-
-       if (ret != NET_XMIT_SUCCESS) {
-               if (net_xmit_drop_count(ret)) {
-                       sch->qstats.drops++;
-                       return ret;
-               }
        }
 
        return NET_XMIT_SUCCESS;
index 74305c8..30ea467 100644 (file)
@@ -570,6 +570,8 @@ static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
 
        sch->qstats.backlog = q->qdisc->qstats.backlog;
        opts = nla_nest_start(skb, TCA_OPTIONS);
+       if (opts == NULL)
+               goto nla_put_failure;
        if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
                goto nla_put_failure;
        return nla_nest_end(skb, opts);
index 5bc9ab1..b16517e 100644 (file)
@@ -271,6 +271,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
         */
        asoc->peer.sack_needed = 1;
        asoc->peer.sack_cnt = 0;
+       asoc->peer.sack_generation = 1;
 
        /* Assume that the peer will tell us if he recognizes ASCONF
         * as part of INIT exchange.
index 80564fe..8b9b679 100644 (file)
@@ -736,15 +736,12 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
 
        epb = &ep->base;
 
-       if (hlist_unhashed(&epb->node))
-               return;
-
        epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
 
        head = &sctp_ep_hashtable[epb->hashent];
 
        sctp_write_lock(&head->lock);
-       __hlist_del(&epb->node);
+       hlist_del_init(&epb->node);
        sctp_write_unlock(&head->lock);
 }
 
@@ -825,7 +822,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
        head = &sctp_assoc_hashtable[epb->hashent];
 
        sctp_write_lock(&head->lock);
-       __hlist_del(&epb->node);
+       hlist_del_init(&epb->node);
        sctp_write_unlock(&head->lock);
 }
 
index f1b7d4b..6ae47ac 100644 (file)
@@ -248,6 +248,11 @@ static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt,
                /* If the SACK timer is running, we have a pending SACK */
                if (timer_pending(timer)) {
                        struct sctp_chunk *sack;
+
+                       if (pkt->transport->sack_generation !=
+                           pkt->transport->asoc->peer.sack_generation)
+                               return retval;
+
                        asoc->a_rwnd = asoc->rwnd;
                        sack = sctp_make_sack(asoc);
                        if (sack) {
index 5942d27..9c90811 100644 (file)
@@ -673,7 +673,9 @@ void sctp_addr_wq_timeout_handler(unsigned long arg)
                                SCTP_DEBUG_PRINTK("sctp_addrwq_timo_handler: sctp_asconf_mgmt failed\n");
                        sctp_bh_unlock_sock(sk);
                }
+#if IS_ENABLED(CONFIG_IPV6)
 free_next:
+#endif
                list_del(&addrw->list);
                kfree(addrw);
        }
index a85eeeb..b6de71e 100644 (file)
@@ -734,8 +734,10 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
        int len;
        __u32 ctsn;
        __u16 num_gabs, num_dup_tsns;
+       struct sctp_association *aptr = (struct sctp_association *)asoc;
        struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
        struct sctp_gap_ack_block gabs[SCTP_MAX_GABS];
+       struct sctp_transport *trans;
 
        memset(gabs, 0, sizeof(gabs));
        ctsn = sctp_tsnmap_get_ctsn(map);
@@ -805,6 +807,20 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
                sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns,
                                 sctp_tsnmap_get_dups(map));
 
+       /* Once we have a sack generated, check to see what our sack
+        * generation is, if its 0, reset the transports to 0, and reset
+        * the association generation to 1
+        *
+        * The idea is that zero is never used as a valid generation for the
+        * association so no transport will match after a wrap event like this,
+        * Until the next sack
+        */
+       if (++aptr->peer.sack_generation == 0) {
+               list_for_each_entry(trans, &asoc->peer.transport_addr_list,
+                                   transports)
+                       trans->sack_generation = 0;
+               aptr->peer.sack_generation = 1;
+       }
 nodata:
        return retval;
 }
index c96d1a8..8716da1 100644 (file)
@@ -1268,7 +1268,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
                case SCTP_CMD_REPORT_TSN:
                        /* Record the arrival of a TSN.  */
                        error = sctp_tsnmap_mark(&asoc->peer.tsn_map,
-                                                cmd->obj.u32);
+                                                cmd->obj.u32, NULL);
                        break;
 
                case SCTP_CMD_REPORT_FWDTSN:
index b3b8a8d..31c7bfc 100644 (file)
@@ -1231,8 +1231,14 @@ out_free:
        SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
                          " kaddrs: %p err: %d\n",
                          asoc, kaddrs, err);
-       if (asoc)
+       if (asoc) {
+               /* sctp_primitive_ASSOCIATE may have added this association
+                * To the hash table, try to unhash it, just in case, its a noop
+                * if it wasn't hashed so we're safe
+                */
+               sctp_unhash_established(asoc);
                sctp_association_free(asoc);
+       }
        return err;
 }
 
@@ -1942,8 +1948,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
        goto out_unlock;
 
 out_free:
-       if (new_asoc)
+       if (new_asoc) {
+               sctp_unhash_established(asoc);
                sctp_association_free(asoc);
+       }
 out_unlock:
        sctp_release_sock(sk);
 
index b026ba0..1dcceb6 100644 (file)
@@ -68,6 +68,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
        peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
        memset(&peer->saddr, 0, sizeof(union sctp_addr));
 
+       peer->sack_generation = 0;
+
        /* From 6.3.1 RTO Calculation:
         *
         * C1) Until an RTT measurement has been made for a packet sent to the
index f1e40ce..b5fb7c4 100644 (file)
@@ -114,7 +114,8 @@ int sctp_tsnmap_check(const struct sctp_tsnmap *map, __u32 tsn)
 
 
 /* Mark this TSN as seen.  */
-int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn)
+int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn,
+                    struct sctp_transport *trans)
 {
        u16 gap;
 
@@ -133,6 +134,9 @@ int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn)
                 */
                map->max_tsn_seen++;
                map->cumulative_tsn_ack_point++;
+               if (trans)
+                       trans->sack_generation =
+                               trans->asoc->peer.sack_generation;
                map->base_tsn++;
        } else {
                /* Either we already have a gap, or about to record a gap, so
index 8a84017..33d8947 100644 (file)
@@ -715,7 +715,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
         * can mark it as received so the tsn_map is updated correctly.
         */
        if (sctp_tsnmap_mark(&asoc->peer.tsn_map,
-                            ntohl(chunk->subh.data_hdr->tsn)))
+                            ntohl(chunk->subh.data_hdr->tsn),
+                            chunk->transport))
                goto fail_mark;
 
        /* First calculate the padding, so we don't inadvertently
index f2d1de7..f5a6a4f 100644 (file)
@@ -1051,7 +1051,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
        if (chunk && (freed >= needed)) {
                __u32 tsn;
                tsn = ntohl(chunk->subh.data_hdr->tsn);
-               sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
+               sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport);
                sctp_ulpq_tail_data(ulpq, chunk, gfp);
 
                sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
index 15f3474..baf5704 100644 (file)
@@ -1389,7 +1389,7 @@ static void reg_set_request_processed(void)
        spin_unlock(&reg_requests_lock);
 
        if (last_request->initiator == NL80211_REGDOM_SET_BY_USER)
-               cancel_delayed_work_sync(&reg_timeout);
+               cancel_delayed_work(&reg_timeout);
 
        if (need_more_processing)
                schedule_work(&reg_work);
index 8f2d68f..316cfd0 100644 (file)
@@ -804,7 +804,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
             ntype == NL80211_IFTYPE_P2P_CLIENT))
                return -EBUSY;
 
-       if (ntype != otype) {
+       if (ntype != otype && netif_running(dev)) {
                err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr,
                                                    ntype);
                if (err)
index 0948c6b..8b673dd 100755 (executable)
@@ -83,6 +83,8 @@ push(@signature_tags, "Signed-off-by:");
 push(@signature_tags, "Reviewed-by:");
 push(@signature_tags, "Acked-by:");
 
+my $signature_pattern = "\(" . join("|", @signature_tags) . "\)";
+
 # rfc822 email address - preloaded methods go here.
 my $rfc822_lwsp = "(?:(?:\\r\\n)?[ \\t])";
 my $rfc822_char = '[\\000-\\377]';
@@ -473,7 +475,6 @@ my @subsystem = ();
 my @status = ();
 my %deduplicate_name_hash = ();
 my %deduplicate_address_hash = ();
-my $signature_pattern;
 
 my @maintainers = get_maintainers();
 
old mode 100644 (file)
new mode 100755 (executable)
index 3efc9b1..860aeb3 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/mman.h>
 #include <linux/mount.h>
 #include <linux/personality.h>
+#include <linux/backing-dev.h>
 #include <net/flow.h>
 
 #define MAX_LSM_EVM_XATTR      2
index 372ec65..ffd8900 100644 (file)
@@ -2717,7 +2717,7 @@ static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
                        ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET))
                return dentry_has_perm(cred, dentry, FILE__SETATTR);
 
-       if (ia_valid & ATTR_SIZE)
+       if (selinux_policycap_openperm && (ia_valid & ATTR_SIZE))
                av |= FILE__OPEN;
 
        return dentry_has_perm(cred, dentry, av);
index b8c5372..df2de54 100644 (file)
@@ -145,7 +145,9 @@ struct security_class_mapping secclass_map[] = {
            "node_bind", "name_connect", NULL } },
        { "memprotect", { "mmap_zero", NULL } },
        { "peer", { "recv", NULL } },
-       { "capability2", { "mac_override", "mac_admin", "syslog", NULL } },
+       { "capability2",
+         { "mac_override", "mac_admin", "syslog", "wake_alarm", "block_suspend",
+           NULL } },
        { "kernel_service", { "use_as_override", "create_files_as", NULL } },
        { "tun_socket",
          { COMMON_SOCK_PERMS, NULL } },
index 582aace..7eca25f 100644 (file)
@@ -37,8 +37,8 @@ MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_DESCRIPTION("Routines for control of TEA5757/5759 Philips AM/FM radio tuner chips");
 MODULE_LICENSE("GPL");
 
-#define FREQ_LO                 (76U * 16000)
-#define FREQ_HI                (108U * 16000)
+#define FREQ_LO                ((tea->tea5759 ? 760 :  875) * 1600U)
+#define FREQ_HI                ((tea->tea5759 ? 910 : 1080) * 1600U)
 
 /*
  * definitions
@@ -120,9 +120,9 @@ static u32 snd_tea575x_read(struct snd_tea575x *tea)
        return data;
 }
 
-static u32 snd_tea575x_get_freq(struct snd_tea575x *tea)
+static u32 snd_tea575x_val_to_freq(struct snd_tea575x *tea, u32 val)
 {
-       u32 freq = snd_tea575x_read(tea) & TEA575X_BIT_FREQ_MASK;
+       u32 freq = val & TEA575X_BIT_FREQ_MASK;
 
        if (freq == 0)
                return freq;
@@ -139,6 +139,11 @@ static u32 snd_tea575x_get_freq(struct snd_tea575x *tea)
        return clamp(freq * 16, FREQ_LO, FREQ_HI); /* from kHz */
 }
 
+static u32 snd_tea575x_get_freq(struct snd_tea575x *tea)
+{
+       return snd_tea575x_val_to_freq(tea, snd_tea575x_read(tea));
+}
+
 static void snd_tea575x_set_freq(struct snd_tea575x *tea)
 {
        u32 freq = tea->freq;
@@ -156,6 +161,7 @@ static void snd_tea575x_set_freq(struct snd_tea575x *tea)
        tea->val &= ~TEA575X_BIT_FREQ_MASK;
        tea->val |= freq & TEA575X_BIT_FREQ_MASK;
        snd_tea575x_write(tea, tea->val);
+       tea->freq = snd_tea575x_val_to_freq(tea, tea->val);
 }
 
 /*
@@ -317,7 +323,6 @@ static int tea575x_s_ctrl(struct v4l2_ctrl *ctrl)
 }
 
 static const struct v4l2_file_operations tea575x_fops = {
-       .owner          = THIS_MODULE,
        .unlocked_ioctl = video_ioctl2,
        .open           = v4l2_fh_open,
        .release        = v4l2_fh_release,
@@ -337,7 +342,6 @@ static const struct v4l2_ioctl_ops tea575x_ioctl_ops = {
 };
 
 static const struct video_device tea575x_radio = {
-       .fops           = &tea575x_fops,
        .ioctl_ops      = &tea575x_ioctl_ops,
        .release        = video_device_release_empty,
 };
@@ -349,7 +353,7 @@ static const struct v4l2_ctrl_ops tea575x_ctrl_ops = {
 /*
  * initialize all the tea575x chips
  */
-int snd_tea575x_init(struct snd_tea575x *tea)
+int snd_tea575x_init(struct snd_tea575x *tea, struct module *owner)
 {
        int retval;
 
@@ -374,6 +378,9 @@ int snd_tea575x_init(struct snd_tea575x *tea)
        tea->vd.lock = &tea->mutex;
        tea->vd.v4l2_dev = tea->v4l2_dev;
        tea->vd.ctrl_handler = &tea->ctrl_handler;
+       tea->fops = tea575x_fops;
+       tea->fops.owner = owner;
+       tea->vd.fops = &tea->fops;
        set_bit(V4L2_FL_USE_FH_PRIO, &tea->vd.flags);
        /* disable hw_freq_seek if we can't use it */
        if (tea->cannot_read_data)
index 67f47d8..52b5c0b 100644 (file)
@@ -2769,7 +2769,7 @@ static int __devinit snd_es1968_create(struct snd_card *card,
        chip->tea.ops = &snd_es1968_tea_ops;
        strlcpy(chip->tea.card, "SF64-PCE2", sizeof(chip->tea.card));
        sprintf(chip->tea.bus_info, "PCI:%s", pci_name(pci));
-       if (!snd_tea575x_init(&chip->tea))
+       if (!snd_tea575x_init(&chip->tea, THIS_MODULE))
                printk(KERN_INFO "es1968: detected TEA575x radio\n");
 #endif
 
index f696623..b32e802 100644 (file)
@@ -1254,7 +1254,7 @@ static int __devinit snd_fm801_create(struct snd_card *card,
        sprintf(chip->tea.bus_info, "PCI:%s", pci_name(pci));
        if ((tea575x_tuner & TUNER_TYPE_MASK) > 0 &&
            (tea575x_tuner & TUNER_TYPE_MASK) < 4) {
-               if (snd_tea575x_init(&chip->tea)) {
+               if (snd_tea575x_init(&chip->tea, THIS_MODULE)) {
                        snd_printk(KERN_ERR "TEA575x radio not found\n");
                        snd_fm801_free(chip);
                        return -ENODEV;
@@ -1263,7 +1263,7 @@ static int __devinit snd_fm801_create(struct snd_card *card,
                /* autodetect tuner connection */
                for (tea575x_tuner = 1; tea575x_tuner <= 3; tea575x_tuner++) {
                        chip->tea575x_tuner = tea575x_tuner;
-                       if (!snd_tea575x_init(&chip->tea)) {
+                       if (!snd_tea575x_init(&chip->tea, THIS_MODULE)) {
                                snd_printk(KERN_INFO "detected TEA575x radio type %s\n",
                                           get_tea575x_gpio(chip)->name);
                                break;
index 163b6b5..d030797 100644 (file)
@@ -97,19 +97,6 @@ config SND_HDA_CODEC_REALTEK
          snd-hda-codec-realtek.
          This module is automatically loaded at probing.
 
-config SND_HDA_ENABLE_REALTEK_QUIRKS
-       bool "Build static quirks for Realtek codecs"
-       depends on SND_HDA_CODEC_REALTEK
-       default y
-       help
-         Say Y here to build the static quirks codes for Realtek codecs.
-         If you need the "model" preset that the default BIOS auto-parser
-         can't handle, turn this option on.
-
-         If your device works with model=auto option, basically you don't
-         need the quirk code.  By turning this off, you can reduce the
-         module size quite a lot.
-
 config SND_HDA_CODEC_ANALOG
        bool "Build Analog Device HD-audio codec support"
        default y
index 6e9ef3e..f7520b9 100644 (file)
@@ -618,7 +618,6 @@ int snd_hda_gen_add_verbs(struct hda_gen_spec *spec,
                          const struct hda_verb *list)
 {
        const struct hda_verb **v;
-       snd_array_init(&spec->verbs, sizeof(struct hda_verb *), 8);
        v = snd_array_new(&spec->verbs);
        if (!v)
                return -ENOMEM;
index 2a7889d..632ad0a 100644 (file)
@@ -157,4 +157,14 @@ void snd_hda_pick_fixup(struct hda_codec *codec,
                        const struct snd_pci_quirk *quirk,
                        const struct hda_fixup *fixlist);
 
+static inline void snd_hda_gen_init(struct hda_gen_spec *spec)
+{
+       snd_array_init(&spec->verbs, sizeof(struct hda_verb *), 8);
+}
+
+static inline void snd_hda_gen_free(struct hda_gen_spec *spec)
+{
+       snd_array_free(&spec->verbs);
+}
+
 #endif /* __SOUND_HDA_AUTO_PARSER_H */
index 41ca803..51cb2a2 100644 (file)
@@ -1184,6 +1184,7 @@ static void snd_hda_codec_free(struct hda_codec *codec)
 {
        if (!codec)
                return;
+       snd_hda_jack_tbl_clear(codec);
        restore_init_pincfgs(codec);
 #ifdef CONFIG_SND_HDA_POWER_SAVE
        cancel_delayed_work(&codec->power_work);
@@ -1192,6 +1193,7 @@ static void snd_hda_codec_free(struct hda_codec *codec)
        list_del(&codec->list);
        snd_array_free(&codec->mixers);
        snd_array_free(&codec->nids);
+       snd_array_free(&codec->cvt_setups);
        snd_array_free(&codec->conn_lists);
        snd_array_free(&codec->spdif_out);
        codec->bus->caddr_tbl[codec->addr] = NULL;
@@ -2333,6 +2335,8 @@ int snd_hda_codec_reset(struct hda_codec *codec)
        /* free only driver_pins so that init_pins + user_pins are restored */
        snd_array_free(&codec->driver_pins);
        restore_pincfgs(codec);
+       snd_array_free(&codec->cvt_setups);
+       snd_array_free(&codec->spdif_out);
        codec->num_pcms = 0;
        codec->pcm_info = NULL;
        codec->preset = NULL;
@@ -4393,20 +4397,19 @@ void snd_hda_update_power_acct(struct hda_codec *codec)
        codec->power_jiffies += delta;
 }
 
-/**
- * snd_hda_power_up - Power-up the codec
- * @codec: HD-audio codec
- *
- * Increment the power-up counter and power up the hardware really when
- * not turned on yet.
- */
-void snd_hda_power_up(struct hda_codec *codec)
+/* Transition to powered up, if wait_power_down then wait for a pending
+ * transition to D3 to complete. A pending D3 transition is indicated
+ * with power_transition == -1. */
+static void __snd_hda_power_up(struct hda_codec *codec, bool wait_power_down)
 {
        struct hda_bus *bus = codec->bus;
 
        spin_lock(&codec->power_lock);
        codec->power_count++;
-       if (codec->power_on || codec->power_transition > 0) {
+       /* Return if power_on or transitioning to power_on, unless currently
+        * powering down. */
+       if ((codec->power_on || codec->power_transition > 0) &&
+           !(wait_power_down && codec->power_transition < 0)) {
                spin_unlock(&codec->power_lock);
                return;
        }
@@ -4430,8 +4433,37 @@ void snd_hda_power_up(struct hda_codec *codec)
        codec->power_transition = 0;
        spin_unlock(&codec->power_lock);
 }
+
+/**
+ * snd_hda_power_up - Power-up the codec
+ * @codec: HD-audio codec
+ *
+ * Increment the power-up counter and power up the hardware really when
+ * not turned on yet.
+ */
+void snd_hda_power_up(struct hda_codec *codec)
+{
+       __snd_hda_power_up(codec, false);
+}
 EXPORT_SYMBOL_HDA(snd_hda_power_up);
 
+/**
+ * snd_hda_power_up_d3wait - Power-up the codec after waiting for any pending
+ *   D3 transition to complete.  This differs from snd_hda_power_up() when
+ *   power_transition == -1.  snd_hda_power_up sees this case as a nop,
+ *   snd_hda_power_up_d3wait waits for the D3 transition to complete then powers
+ *   back up.
+ * @codec: HD-audio codec
+ *
+ * Cancel any power down operation hapenning on the work queue, then power up.
+ */
+void snd_hda_power_up_d3wait(struct hda_codec *codec)
+{
+       /* This will cancel and wait for pending power_work to complete. */
+       __snd_hda_power_up(codec, true);
+}
+EXPORT_SYMBOL_HDA(snd_hda_power_up_d3wait);
+
 #define power_save(codec)      \
        ((codec)->bus->power_save ? *(codec)->bus->power_save : 0)
 
index 4fc3960..2fdaadb 100644 (file)
@@ -1056,10 +1056,12 @@ const char *snd_hda_get_jack_location(u32 cfg);
  */
 #ifdef CONFIG_SND_HDA_POWER_SAVE
 void snd_hda_power_up(struct hda_codec *codec);
+void snd_hda_power_up_d3wait(struct hda_codec *codec);
 void snd_hda_power_down(struct hda_codec *codec);
 void snd_hda_update_power_acct(struct hda_codec *codec);
 #else
 static inline void snd_hda_power_up(struct hda_codec *codec) {}
+static inline void snd_hda_power_up_d3wait(struct hda_codec *codec) {}
 static inline void snd_hda_power_down(struct hda_codec *codec) {}
 #endif
 
index 0276382..7757536 100644 (file)
@@ -1766,7 +1766,7 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
                                   buff_step);
        snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
                                   buff_step);
-       snd_hda_power_up(apcm->codec);
+       snd_hda_power_up_d3wait(apcm->codec);
        err = hinfo->ops.open(hinfo, apcm->codec, substream);
        if (err < 0) {
                azx_release_device(azx_dev);
index 172370b..2bf99fc 100644 (file)
@@ -445,8 +445,10 @@ static int conexant_init(struct hda_codec *codec)
 
 static void conexant_free(struct hda_codec *codec)
 {
+       struct conexant_spec *spec = codec->spec;
+       snd_hda_gen_free(&spec->gen);
        snd_hda_detach_beep_device(codec);
-       kfree(codec->spec);
+       kfree(spec);
 }
 
 static const struct snd_kcontrol_new cxt_capture_mixers[] = {
@@ -4466,6 +4468,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
        SND_PCI_QUIRK(0x17aa, 0x21ce, "Lenovo T420", CXT_PINCFG_LENOVO_TP410),
        SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410),
        SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
+       SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
        {}
 };
 
@@ -4497,6 +4500,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
        if (!spec)
                return -ENOMEM;
        codec->spec = spec;
+       snd_hda_gen_init(&spec->gen);
 
        switch (codec->vendor_id) {
        case 0x14f15045:
index f8f4906..aa4c25e 100644 (file)
@@ -2289,6 +2289,7 @@ static void alc_free(struct hda_codec *codec)
        alc_shutup(codec);
        alc_free_kctls(codec);
        alc_free_bind_ctls(codec);
+       snd_hda_gen_free(&spec->gen);
        kfree(spec);
        snd_hda_detach_beep_device(codec);
 }
@@ -4253,6 +4254,7 @@ static int alc_alloc_spec(struct hda_codec *codec, hda_nid_t mixer_nid)
                return -ENOMEM;
        codec->spec = spec;
        spec->mixer_nid = mixer_nid;
+       snd_hda_gen_init(&spec->gen);
 
        err = alc_codec_rename_from_preset(codec);
        if (err < 0) {
@@ -6686,6 +6688,31 @@ static const struct alc_model_fixup alc662_fixup_models[] = {
        {}
 };
 
+static void alc662_fill_coef(struct hda_codec *codec)
+{
+       int val, coef;
+
+       coef = alc_get_coef0(codec);
+
+       switch (codec->vendor_id) {
+       case 0x10ec0662:
+               if ((coef & 0x00f0) == 0x0030) {
+                       val = alc_read_coef_idx(codec, 0x4); /* EAPD Ctrl */
+                       alc_write_coef_idx(codec, 0x4, val & ~(1<<10));
+               }
+               break;
+       case 0x10ec0272:
+       case 0x10ec0273:
+       case 0x10ec0663:
+       case 0x10ec0665:
+       case 0x10ec0670:
+       case 0x10ec0671:
+       case 0x10ec0672:
+               val = alc_read_coef_idx(codec, 0xd); /* EAPD Ctrl */
+               alc_write_coef_idx(codec, 0xd, val | (1<<14));
+               break;
+       }
+}
 
 /*
  */
@@ -6705,12 +6732,8 @@ static int patch_alc662(struct hda_codec *codec)
 
        alc_fix_pll_init(codec, 0x20, 0x04, 15);
 
-       if ((alc_get_coef0(codec) & (1 << 14)) &&
-           codec->bus->pci->subsystem_vendor == 0x1025 &&
-           spec->cdefine.platform_type == 1) {
-               if (alc_codec_rename(codec, "ALC272X") < 0)
-                       goto error;
-       }
+       spec->init_hook = alc662_fill_coef;
+       alc662_fill_coef(codec);
 
        alc_pick_fixup(codec, alc662_fixup_models,
                       alc662_fixup_tbl, alc662_fixups);
@@ -6718,6 +6741,13 @@ static int patch_alc662(struct hda_codec *codec)
 
        alc_auto_parse_customize_define(codec);
 
+       if ((alc_get_coef0(codec) & (1 << 14)) &&
+           codec->bus->pci->subsystem_vendor == 0x1025 &&
+           spec->cdefine.platform_type == 1) {
+               if (alc_codec_rename(codec, "ALC272X") < 0)
+                       goto error;
+       }
+
        /* automatic parse from the BIOS config */
        err = alc662_parse_auto_config(codec);
        if (err < 0)
@@ -6800,6 +6830,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
        { .id = 0x10ec0272, .name = "ALC272", .patch = patch_alc662 },
        { .id = 0x10ec0275, .name = "ALC275", .patch = patch_alc269 },
        { .id = 0x10ec0276, .name = "ALC276", .patch = patch_alc269 },
+       { .id = 0x10ec0280, .name = "ALC280", .patch = patch_alc269 },
        { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
          .patch = patch_alc861 },
        { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
index 7db8228..0767528 100644 (file)
@@ -4367,7 +4367,7 @@ static int stac92xx_init(struct hda_codec *codec)
                                         AC_PINCTL_IN_EN);
        for (i = 0; i < spec->num_pwrs; i++)  {
                hda_nid_t nid = spec->pwr_nids[i];
-               int pinctl, def_conf;
+               unsigned int pinctl, def_conf;
 
                def_conf = snd_hda_codec_get_pincfg(codec, nid);
                def_conf = get_defcfg_connect(def_conf);
@@ -4376,6 +4376,11 @@ static int stac92xx_init(struct hda_codec *codec)
                        stac_toggle_power_map(codec, nid, 0);
                        continue;
                }
+               if (def_conf == AC_JACK_PORT_FIXED) {
+                       /* no need for jack detection for fixed pins */
+                       stac_toggle_power_map(codec, nid, 1);
+                       continue;
+               }
                /* power on when no jack detection is available */
                /* or when the VREF is used for controlling LED */
                if (!spec->hp_detect ||
index 64d2a4f..e9b62b5 100644 (file)
@@ -935,9 +935,7 @@ static int aic3x_hw_params(struct snd_pcm_substream *substream,
        }
 
 found:
-       data = snd_soc_read(codec, AIC3X_PLL_PROGA_REG);
-       snd_soc_write(codec, AIC3X_PLL_PROGA_REG,
-                     data | (pll_p << PLLP_SHIFT));
+       snd_soc_update_bits(codec, AIC3X_PLL_PROGA_REG, PLLP_MASK, pll_p);
        snd_soc_write(codec, AIC3X_OVRF_STATUS_AND_PLLR_REG,
                      pll_r << PLLR_SHIFT);
        snd_soc_write(codec, AIC3X_PLL_PROGB_REG, pll_j << PLLJ_SHIFT);
index 6f097fb..08c7f66 100644 (file)
 
 /* PLL registers bitfields */
 #define PLLP_SHIFT             0
+#define PLLP_MASK              7
 #define PLLQ_SHIFT             3
 #define PLLR_SHIFT             0
 #define PLLJ_SHIFT             2
index acbdc5f..32682c1 100644 (file)
@@ -1491,6 +1491,7 @@ static int wm2200_bclk_rates_dat[WM2200_NUM_BCLK_RATES] = {
 
 static int wm2200_bclk_rates_cd[WM2200_NUM_BCLK_RATES] = {
        5644800,
+       3763200,
        2882400,
        1881600,
        1411200,
index 65d525d..812acd8 100644 (file)
@@ -1863,6 +1863,7 @@ static int wm8904_set_bias_level(struct snd_soc_codec *codec,
                                return ret;
                        }
 
+                       regcache_cache_only(wm8904->regmap, false);
                        regcache_sync(wm8904->regmap);
 
                        /* Enable bias */
@@ -1899,14 +1900,8 @@ static int wm8904_set_bias_level(struct snd_soc_codec *codec,
                snd_soc_update_bits(codec, WM8904_BIAS_CONTROL_0,
                                    WM8904_BIAS_ENA, 0);
 
-#ifdef CONFIG_REGULATOR
-               /* Post 2.6.34 we will be able to get a callback when
-                * the regulators are disabled which we can use but
-                * for now just assume that the power will be cut if
-                * the regulator API is in use.
-                */
-               codec->cache_sync = 1;
-#endif
+               regcache_cache_only(wm8904->regmap, true);
+               regcache_mark_dirty(wm8904->regmap);
 
                regulator_bulk_disable(ARRAY_SIZE(wm8904->supplies),
                                       wm8904->supplies);
@@ -2084,10 +2079,8 @@ static int wm8904_probe(struct snd_soc_codec *codec)
 {
        struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
        struct wm8904_pdata *pdata = wm8904->pdata;
-       u16 *reg_cache = codec->reg_cache;
        int ret, i;
 
-       codec->cache_sync = 1;
        codec->control_data = wm8904->regmap;
 
        switch (wm8904->devtype) {
@@ -2150,6 +2143,7 @@ static int wm8904_probe(struct snd_soc_codec *codec)
                goto err_enable;
        }
 
+       regcache_cache_only(wm8904->regmap, true);
        /* Change some default settings - latch VU and enable ZC */
        snd_soc_update_bits(codec, WM8904_ADC_DIGITAL_VOLUME_LEFT,
                            WM8904_ADC_VU, WM8904_ADC_VU);
@@ -2180,14 +2174,18 @@ static int wm8904_probe(struct snd_soc_codec *codec)
                        if (!pdata->gpio_cfg[i])
                                continue;
 
-                       reg_cache[WM8904_GPIO_CONTROL_1 + i]
-                               = pdata->gpio_cfg[i] & 0xffff;
+                       regmap_update_bits(wm8904->regmap,
+                                          WM8904_GPIO_CONTROL_1 + i,
+                                          0xffff,
+                                          pdata->gpio_cfg[i]);
                }
 
                /* Zero is the default value for these anyway */
                for (i = 0; i < WM8904_MIC_REGS; i++)
-                       reg_cache[WM8904_MIC_BIAS_CONTROL_0 + i]
-                               = pdata->mic_cfg[i];
+                       regmap_update_bits(wm8904->regmap,
+                                          WM8904_MIC_BIAS_CONTROL_0 + i,
+                                          0xffff,
+                                          pdata->mic_cfg[i]);
        }
 
        /* Set Class W by default - this will be managed by the Class
index aa8c98b..1436b6c 100644 (file)
@@ -724,9 +724,6 @@ static void wm1811_jackdet_set_mode(struct snd_soc_codec *codec, u16 mode)
 {
        struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
 
-       if (!wm8994->jackdet || !wm8994->jack_cb)
-               return;
-
        if (!wm8994->jackdet || !wm8994->jack_cb)
                return;
 
index 8af422e..dc9b42b 100644 (file)
@@ -2837,8 +2837,6 @@ static int wm8996_probe(struct snd_soc_codec *codec)
                }
        }
 
-       regcache_cache_only(codec->control_data, true);
-
        /* Apply platform data settings */
        snd_soc_update_bits(codec, WM8996_LINE_INPUT_CONTROL,
                            WM8996_INL_MODE_MASK | WM8996_INR_MODE_MASK,
@@ -3051,7 +3049,6 @@ static int wm8996_remove(struct snd_soc_codec *codec)
        for (i = 0; i < ARRAY_SIZE(wm8996->supplies); i++)
                regulator_unregister_notifier(wm8996->supplies[i].consumer,
                                              &wm8996->disable_nb[i]);
-       regulator_bulk_free(ARRAY_SIZE(wm8996->supplies), wm8996->supplies);
 
        return 0;
 }
@@ -3206,14 +3203,15 @@ static __devinit int wm8996_i2c_probe(struct i2c_client *i2c,
        dev_info(&i2c->dev, "revision %c\n",
                 (reg & WM8996_CHIP_REV_MASK) + 'A');
 
-       regulator_bulk_disable(ARRAY_SIZE(wm8996->supplies), wm8996->supplies);
-
        ret = wm8996_reset(wm8996);
        if (ret < 0) {
                dev_err(&i2c->dev, "Failed to issue reset\n");
                goto err_regmap;
        }
 
+       regcache_cache_only(wm8996->regmap, true);
+       regulator_bulk_disable(ARRAY_SIZE(wm8996->supplies), wm8996->supplies);
+
        wm8996_init_gpio(wm8996);
 
        ret = snd_soc_register_codec(&i2c->dev,
index 1c2aa7f..4da5fc5 100644 (file)
@@ -33,7 +33,6 @@
 
 #include <mach/hardware.h>
 #include <mach/dma.h>
-#include <mach/audio.h>
 
 #include "../../arm/pxa2xx-pcm.h"
 #include "pxa-ssp.h"
@@ -194,7 +193,7 @@ static void pxa_ssp_set_scr(struct ssp_device *ssp, u32 div)
 {
        u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0);
 
-       if (cpu_is_pxa25x() && ssp->type == PXA25x_SSP) {
+       if (ssp->type == PXA25x_SSP) {
                sscr0 &= ~0x0000ff00;
                sscr0 |= ((div - 2)/2) << 8; /* 2..512 */
        } else {
@@ -212,7 +211,7 @@ static u32 pxa_ssp_get_scr(struct ssp_device *ssp)
        u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0);
        u32 div;
 
-       if (cpu_is_pxa25x() && ssp->type == PXA25x_SSP)
+       if (ssp->type == PXA25x_SSP)
                div = ((sscr0 >> 8) & 0xff) * 2 + 2;
        else
                div = ((sscr0 >> 8) & 0xfff) + 1;
@@ -242,7 +241,7 @@ static int pxa_ssp_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
                break;
        case PXA_SSP_CLK_PLL:
                /* Internal PLL is fixed */
-               if (cpu_is_pxa25x())
+               if (ssp->type == PXA25x_SSP)
                        priv->sysclk = 1843200;
                else
                        priv->sysclk = 13000000;
@@ -266,11 +265,11 @@ static int pxa_ssp_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
 
        /* The SSP clock must be disabled when changing SSP clock mode
         * on PXA2xx.  On PXA3xx it must be enabled when doing so. */
-       if (!cpu_is_pxa3xx())
+       if (ssp->type != PXA3xx_SSP)
                clk_disable(ssp->clk);
        val = pxa_ssp_read_reg(ssp, SSCR0) | sscr0;
        pxa_ssp_write_reg(ssp, SSCR0, val);
-       if (!cpu_is_pxa3xx())
+       if (ssp->type != PXA3xx_SSP)
                clk_enable(ssp->clk);
 
        return 0;
@@ -294,24 +293,20 @@ static int pxa_ssp_set_dai_clkdiv(struct snd_soc_dai *cpu_dai,
        case PXA_SSP_AUDIO_DIV_SCDB:
                val = pxa_ssp_read_reg(ssp, SSACD);
                val &= ~SSACD_SCDB;
-#if defined(CONFIG_PXA3xx)
-               if (cpu_is_pxa3xx())
+               if (ssp->type == PXA3xx_SSP)
                        val &= ~SSACD_SCDX8;
-#endif
                switch (div) {
                case PXA_SSP_CLK_SCDB_1:
                        val |= SSACD_SCDB;
                        break;
                case PXA_SSP_CLK_SCDB_4:
                        break;
-#if defined(CONFIG_PXA3xx)
                case PXA_SSP_CLK_SCDB_8:
-                       if (cpu_is_pxa3xx())
+                       if (ssp->type == PXA3xx_SSP)
                                val |= SSACD_SCDX8;
                        else
                                return -EINVAL;
                        break;
-#endif
                default:
                        return -EINVAL;
                }
@@ -337,10 +332,8 @@ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id,
        struct ssp_device *ssp = priv->ssp;
        u32 ssacd = pxa_ssp_read_reg(ssp, SSACD) & ~0x70;
 
-#if defined(CONFIG_PXA3xx)
-       if (cpu_is_pxa3xx())
+       if (ssp->type == PXA3xx_SSP)
                pxa_ssp_write_reg(ssp, SSACDD, 0);
-#endif
 
        switch (freq_out) {
        case 5622000:
@@ -365,11 +358,10 @@ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id,
                break;
 
        default:
-#ifdef CONFIG_PXA3xx
                /* PXA3xx has a clock ditherer which can be used to generate
                 * a wider range of frequencies - calculate a value for it.
                 */
-               if (cpu_is_pxa3xx()) {
+               if (ssp->type == PXA3xx_SSP) {
                        u32 val;
                        u64 tmp = 19968;
                        tmp *= 1000000;
@@ -386,7 +378,6 @@ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id,
                                val, freq_out);
                        break;
                }
-#endif
 
                return -EINVAL;
        }
@@ -590,10 +581,8 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream,
        /* bit size */
        switch (params_format(params)) {
        case SNDRV_PCM_FORMAT_S16_LE:
-#ifdef CONFIG_PXA3xx
-               if (cpu_is_pxa3xx())
+               if (ssp->type == PXA3xx_SSP)
                        sscr0 |= SSCR0_FPCKE;
-#endif
                sscr0 |= SSCR0_DataSize(16);
                break;
        case SNDRV_PCM_FORMAT_S24_LE:
@@ -618,9 +607,7 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream,
                        * trying and failing a lot; some of the registers
                        * needed for that mode are only available on PXA3xx.
                        */
-
-#ifdef CONFIG_PXA3xx
-                       if (!cpu_is_pxa3xx())
+                       if (ssp->type != PXA3xx_SSP)
                                return -EINVAL;
 
                        sspsp |= SSPSP_SFRMWDTH(width * 2);
@@ -628,9 +615,6 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream,
                        sspsp |= SSPSP_EDMYSTOP(3);
                        sspsp |= SSPSP_DMYSTOP(3);
                        sspsp |= SSPSP_DMYSTRT(1);
-#else
-                       return -EINVAL;
-#endif
                } else {
                        /* The frame width is the width the LRCLK is
                         * asserted for; the delay is expressed in
index 0b0df49..3b6da91 100644 (file)
@@ -346,6 +346,17 @@ static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd)
        return 0;
 }
 
+static int tegra_wm8903_remove(struct snd_soc_card *card)
+{
+       struct snd_soc_pcm_runtime *rtd = &(card->rtd[0]);
+       struct snd_soc_dai *codec_dai = rtd->codec_dai;
+       struct snd_soc_codec *codec = codec_dai->codec;
+
+       wm8903_mic_detect(codec, NULL, 0, 0);
+
+       return 0;
+}
+
 static struct snd_soc_dai_link tegra_wm8903_dai = {
        .name = "WM8903",
        .stream_name = "WM8903 PCM",
@@ -363,6 +374,8 @@ static struct snd_soc_card snd_soc_tegra_wm8903 = {
        .dai_link = &tegra_wm8903_dai,
        .num_links = 1,
 
+       .remove = tegra_wm8903_remove,
+
        .controls = tegra_wm8903_controls,
        .num_controls = ARRAY_SIZE(tegra_wm8903_controls),
        .dapm_widgets = tegra_wm8903_dapm_widgets,
index 6f9715a..56ad923 100644 (file)
@@ -209,7 +209,7 @@ static int usb6fire_fw_ezusb_upload(
        int ret;
        u8 data;
        struct usb_device *device = interface_to_usbdev(intf);
-       const struct firmware *fw = 0;
+       const struct firmware *fw = NULL;
        struct ihex_record *rec = kmalloc(sizeof(struct ihex_record),
                        GFP_KERNEL);
 
index e690690..0f647d2 100644 (file)
@@ -414,7 +414,7 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
 {
        struct list_head *p;
        struct snd_usb_endpoint *ep;
-       int ret, is_playback = direction == SNDRV_PCM_STREAM_PLAYBACK;
+       int is_playback = direction == SNDRV_PCM_STREAM_PLAYBACK;
 
        mutex_lock(&chip->mutex);
 
@@ -434,16 +434,6 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
                    type == SND_USB_ENDPOINT_TYPE_DATA ? "data" : "sync",
                    ep_num);
 
-       /* select the alt setting once so the endpoints become valid */
-       ret = usb_set_interface(chip->dev, alts->desc.bInterfaceNumber,
-                               alts->desc.bAlternateSetting);
-       if (ret < 0) {
-               snd_printk(KERN_ERR "%s(): usb_set_interface() failed, ret = %d\n",
-                                       __func__, ret);
-               ep = NULL;
-               goto __exit_unlock;
-       }
-
        ep = kzalloc(sizeof(*ep), GFP_KERNEL);
        if (!ep)
                goto __exit_unlock;
@@ -831,9 +821,6 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
        if (++ep->use_count != 1)
                return 0;
 
-       if (snd_BUG_ON(!test_bit(EP_FLAG_ACTIVATED, &ep->flags)))
-               return -EINVAL;
-
        /* just to be sure */
        deactivate_urbs(ep, 0, 1);
        wait_clear_urbs(ep);
@@ -911,9 +898,6 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep,
        if (snd_BUG_ON(ep->use_count == 0))
                return;
 
-       if (snd_BUG_ON(!test_bit(EP_FLAG_ACTIVATED, &ep->flags)))
-               return;
-
        if (--ep->use_count == 0) {
                deactivate_urbs(ep, force, can_sleep);
                ep->data_subs = NULL;
@@ -926,42 +910,6 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep,
        }
 }
 
-/**
- * snd_usb_endpoint_activate: activate an snd_usb_endpoint
- *
- * @ep: the endpoint to activate
- *
- * If the endpoint is not currently in use, this functions will select the
- * correct alternate interface setting for the interface of this endpoint.
- *
- * In case of any active users, this functions does nothing.
- *
- * Returns an error if usb_set_interface() failed, 0 in all other
- * cases.
- */
-int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep)
-{
-       if (ep->use_count != 0)
-               return 0;
-
-       if (!ep->chip->shutdown &&
-           !test_and_set_bit(EP_FLAG_ACTIVATED, &ep->flags)) {
-               int ret;
-
-               ret = usb_set_interface(ep->chip->dev, ep->iface, ep->alt_idx);
-               if (ret < 0) {
-                       snd_printk(KERN_ERR "%s() usb_set_interface() failed, ret = %d\n",
-                                               __func__, ret);
-                       clear_bit(EP_FLAG_ACTIVATED, &ep->flags);
-                       return ret;
-               }
-
-               return 0;
-       }
-
-       return -EBUSY;
-}
-
 /**
  * snd_usb_endpoint_deactivate: deactivate an snd_usb_endpoint
  *
@@ -980,24 +928,15 @@ int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep)
        if (!ep)
                return -EINVAL;
 
+       deactivate_urbs(ep, 1, 1);
+       wait_clear_urbs(ep);
+
        if (ep->use_count != 0)
                return 0;
 
-       if (!ep->chip->shutdown &&
-           test_and_clear_bit(EP_FLAG_ACTIVATED, &ep->flags)) {
-               int ret;
-
-               ret = usb_set_interface(ep->chip->dev, ep->iface, 0);
-               if (ret < 0) {
-                       snd_printk(KERN_ERR "%s(): usb_set_interface() failed, ret = %d\n",
-                                               __func__, ret);
-                       return ret;
-               }
+       clear_bit(EP_FLAG_ACTIVATED, &ep->flags);
 
-               return 0;
-       }
-
-       return -EBUSY;
+       return 0;
 }
 
 /**
index 41daaa2..e71fe55 100644 (file)
@@ -341,6 +341,14 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
                .map = audigy2nx_map,
                .selector_map = audigy2nx_selectors,
        },
+       {       /* Logitech, Inc. QuickCam Pro for Notebooks */
+               .id = USB_ID(0x046d, 0x0991),
+               .ignore_ctl_error = 1,
+       },
+       {       /* Logitech, Inc. QuickCam E 3500 */
+               .id = USB_ID(0x046d, 0x09a4),
+               .ignore_ctl_error = 1,
+       },
        {
                /* Hercules DJ Console (Windows Edition) */
                .id = USB_ID(0x06f8, 0xb000),
index cdf8b76..a1298f3 100644 (file)
@@ -261,19 +261,6 @@ static void stop_endpoints(struct snd_usb_substream *subs,
                                      force, can_sleep, wait);
 }
 
-static int activate_endpoints(struct snd_usb_substream *subs)
-{
-       if (subs->sync_endpoint) {
-               int ret;
-
-               ret = snd_usb_endpoint_activate(subs->sync_endpoint);
-               if (ret < 0)
-                       return ret;
-       }
-
-       return snd_usb_endpoint_activate(subs->data_endpoint);
-}
-
 static int deactivate_endpoints(struct snd_usb_substream *subs)
 {
        int reta, retb;
@@ -314,6 +301,33 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
        if (fmt == subs->cur_audiofmt)
                return 0;
 
+       /* close the old interface */
+       if (subs->interface >= 0 && subs->interface != fmt->iface) {
+               err = usb_set_interface(subs->dev, subs->interface, 0);
+               if (err < 0) {
+                       snd_printk(KERN_ERR "%d:%d:%d: return to setting 0 failed (%d)\n",
+                               dev->devnum, fmt->iface, fmt->altsetting, err);
+                       return -EIO;
+               }
+               subs->interface = -1;
+               subs->altset_idx = 0;
+       }
+
+       /* set interface */
+       if (subs->interface != fmt->iface ||
+           subs->altset_idx != fmt->altset_idx) {
+               err = usb_set_interface(dev, fmt->iface, fmt->altsetting);
+               if (err < 0) {
+                       snd_printk(KERN_ERR "%d:%d:%d: usb_set_interface failed (%d)\n",
+                                  dev->devnum, fmt->iface, fmt->altsetting, err);
+                       return -EIO;
+               }
+               snd_printdd(KERN_INFO "setting usb interface %d:%d\n",
+                               fmt->iface, fmt->altsetting);
+               subs->interface = fmt->iface;
+               subs->altset_idx = fmt->altset_idx;
+       }
+
        subs->data_endpoint = snd_usb_add_endpoint(subs->stream->chip,
                                                   alts, fmt->endpoint, subs->direction,
                                                   SND_USB_ENDPOINT_TYPE_DATA);
@@ -354,17 +368,21 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
                    (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
                     get_endpoint(alts, 1)->bSynchAddress != 0 &&
                     !implicit_fb)) {
-                       snd_printk(KERN_ERR "%d:%d:%d : invalid synch pipe\n",
-                                  dev->devnum, fmt->iface, fmt->altsetting);
+                       snd_printk(KERN_ERR "%d:%d:%d : invalid sync pipe. bmAttributes %02x, bLength %d, bSynchAddress %02x\n",
+                                  dev->devnum, fmt->iface, fmt->altsetting,
+                                  get_endpoint(alts, 1)->bmAttributes,
+                                  get_endpoint(alts, 1)->bLength,
+                                  get_endpoint(alts, 1)->bSynchAddress);
                        return -EINVAL;
                }
                ep = get_endpoint(alts, 1)->bEndpointAddress;
-               if (get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
+               if (!implicit_fb &&
+                   get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
                    (( is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) ||
-                    (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)) ||
-                    ( is_playback && !implicit_fb))) {
-                       snd_printk(KERN_ERR "%d:%d:%d : invalid synch pipe\n",
-                                  dev->devnum, fmt->iface, fmt->altsetting);
+                    (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)))) {
+                       snd_printk(KERN_ERR "%d:%d:%d : invalid sync pipe. is_playback %d, ep %02x, bSynchAddress %02x\n",
+                                  dev->devnum, fmt->iface, fmt->altsetting,
+                                  is_playback, ep, get_endpoint(alts, 0)->bSynchAddress);
                        return -EINVAL;
                }
 
@@ -383,7 +401,7 @@ add_sync_ep:
                subs->data_endpoint->sync_master = subs->sync_endpoint;
        }
 
-       if ((err = snd_usb_init_pitch(subs->stream->chip, subs->interface, alts, fmt)) < 0)
+       if ((err = snd_usb_init_pitch(subs->stream->chip, fmt->iface, alts, fmt)) < 0)
                return err;
 
        subs->cur_audiofmt = fmt;
@@ -446,7 +464,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
                struct usb_interface *iface;
                iface = usb_ifnum_to_if(subs->dev, fmt->iface);
                alts = &iface->altsetting[fmt->altset_idx];
-               ret = snd_usb_init_sample_rate(subs->stream->chip, subs->interface, alts, fmt, rate);
+               ret = snd_usb_init_sample_rate(subs->stream->chip, fmt->iface, alts, fmt, rate);
                if (ret < 0)
                        return ret;
                subs->cur_rate = rate;
@@ -456,12 +474,6 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
                mutex_lock(&subs->stream->chip->shutdown_mutex);
                /* format changed */
                stop_endpoints(subs, 0, 0, 0);
-               deactivate_endpoints(subs);
-
-               ret = activate_endpoints(subs);
-               if (ret < 0)
-                       goto unlock;
-
                ret = snd_usb_endpoint_set_params(subs->data_endpoint, hw_params, fmt,
                                                  subs->sync_endpoint);
                if (ret < 0)
@@ -496,6 +508,7 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
        subs->period_bytes = 0;
        mutex_lock(&subs->stream->chip->shutdown_mutex);
        stop_endpoints(subs, 0, 1, 1);
+       deactivate_endpoints(subs);
        mutex_unlock(&subs->stream->chip->shutdown_mutex);
        return snd_pcm_lib_free_vmalloc_buffer(substream);
 }
@@ -934,16 +947,20 @@ static int snd_usb_pcm_open(struct snd_pcm_substream *substream, int direction)
 
 static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction)
 {
-       int ret;
        struct snd_usb_stream *as = snd_pcm_substream_chip(substream);
        struct snd_usb_substream *subs = &as->substream[direction];
 
        stop_endpoints(subs, 0, 0, 0);
-       ret = deactivate_endpoints(subs);
+
+       if (!as->chip->shutdown && subs->interface >= 0) {
+               usb_set_interface(subs->dev, subs->interface, 0);
+               subs->interface = -1;
+       }
+
        subs->pcm_substream = NULL;
        snd_usb_autosuspend(subs->stream->chip);
 
-       return ret;
+       return 0;
 }
 
 /* Since a URB can handle only a single linear buffer, we must use double
@@ -1147,7 +1164,8 @@ static int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substrea
        return -EINVAL;
 }
 
-int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream, int cmd)
+static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream,
+                                            int cmd)
 {
        int err;
        struct snd_usb_substream *subs = substream->runtime->private_data;
index d89ab4c..79780fa 100644 (file)
@@ -1831,6 +1831,36 @@ YAMAHA_DEVICE(0x7010, "UB99"),
                }
        }
 },
+{
+       USB_DEVICE(0x0582, 0x014d),
+       .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+               /* .vendor_name = "BOSS", */
+               /* .product_name = "GT-100", */
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = (const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 1,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 3,
+                               .type = QUIRK_MIDI_FIXED_ENDPOINT,
+                               .data = & (const struct snd_usb_midi_endpoint_info) {
+                                       .out_cables = 0x0001,
+                                       .in_cables  = 0x0001
+                               }
+                       },
+                       {
+                               .ifnum = -1
+                       }
+               }
+       }
+},
 
 /* Guillemot devices */
 {
index 146fd61..d9834b3 100644 (file)
@@ -701,14 +701,18 @@ int main(void)
        pfd.fd = fd;
 
        while (1) {
+               struct sockaddr *addr_p = (struct sockaddr *) &addr;
+               socklen_t addr_l = sizeof(addr);
                pfd.events = POLLIN;
                pfd.revents = 0;
                poll(&pfd, 1, -1);
 
-               len = recv(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0);
+               len = recvfrom(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0,
+                               addr_p, &addr_l);
 
-               if (len < 0) {
-                       syslog(LOG_ERR, "recv failed; error:%d", len);
+               if (len < 0 || addr.nl_pid) {
+                       syslog(LOG_ERR, "recvfrom failed; pid:%u error:%d %s",
+                                       addr.nl_pid, errno, strerror(errno));
                        close(fd);
                        return -1;
                }
index 3d69aa9..46c2f6b 100644 (file)
@@ -250,8 +250,12 @@ endef
 all_objs := $(sort $(ALL_OBJS))
 all_deps := $(all_objs:%.o=.%.d)
 
+# let .d file also depends on the source and header files
 define check_deps
-               $(CC) -M $(CFLAGS) $< > $@;
+               @set -e; $(RM) $@; \
+               $(CC) -M $(CFLAGS) $< > $@.$$$$; \
+               sed 's,\($*\)\.o[ :]*,\1.o $@ : ,g' < $@.$$$$ > $@; \
+               $(RM) $@.$$$$
 endef
 
 $(gui_deps): ks_version.h
@@ -270,11 +274,13 @@ endif
 
 tags:  force
        $(RM) tags
-       find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px
+       find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px \
+       --regex-c++='/_PE\(([^,)]*).*/PEVENT_ERRNO__\1/'
 
 TAGS:  force
        $(RM) TAGS
-       find . -name '*.[ch]' | xargs etags
+       find . -name '*.[ch]' | xargs etags \
+       --regex='/_PE(\([^,)]*\).*/PEVENT_ERRNO__\1/'
 
 define do_install
        $(print_install)                                \
@@ -290,7 +296,7 @@ install_lib: all_cmd install_plugins install_python
 install: install_lib
 
 clean:
-       $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES).*.d
+       $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d
        $(RM) tags TAGS
 
 endif # skip-makefile
index 5548282..5f34aa3 100644 (file)
@@ -467,8 +467,10 @@ int pevent_register_function(struct pevent *pevent, char *func,
                item->mod = NULL;
        item->addr = addr;
 
-       pevent->funclist = item;
+       if (!item->func || (mod && !item->mod))
+               die("malloc func");
 
+       pevent->funclist = item;
        pevent->func_count++;
 
        return 0;
@@ -511,12 +513,12 @@ struct printk_list {
 
 static int printk_cmp(const void *a, const void *b)
 {
-       const struct func_map *fa = a;
-       const struct func_map *fb = b;
+       const struct printk_map *pa = a;
+       const struct printk_map *pb = b;
 
-       if (fa->addr < fb->addr)
+       if (pa->addr < pb->addr)
                return -1;
-       if (fa->addr > fb->addr)
+       if (pa->addr > pb->addr)
                return 1;
 
        return 0;
@@ -583,10 +585,13 @@ int pevent_register_print_string(struct pevent *pevent, char *fmt,
        item = malloc_or_die(sizeof(*item));
 
        item->next = pevent->printklist;
-       pevent->printklist = item;
        item->printk = strdup(fmt);
        item->addr = addr;
 
+       if (!item->printk)
+               die("malloc fmt");
+
+       pevent->printklist = item;
        pevent->printk_count++;
 
        return 0;
@@ -616,7 +621,9 @@ static struct event_format *alloc_event(void)
 {
        struct event_format *event;
 
-       event = malloc_or_die(sizeof(*event));
+       event = malloc(sizeof(*event));
+       if (!event)
+               return NULL;
        memset(event, 0, sizeof(*event));
 
        return event;
@@ -626,12 +633,8 @@ static void add_event(struct pevent *pevent, struct event_format *event)
 {
        int i;
 
-       if (!pevent->events)
-               pevent->events = malloc_or_die(sizeof(event));
-       else
-               pevent->events =
-                       realloc(pevent->events, sizeof(event) *
-                               (pevent->nr_events + 1));
+       pevent->events = realloc(pevent->events, sizeof(event) *
+                                (pevent->nr_events + 1));
        if (!pevent->events)
                die("Can not allocate events");
 
@@ -697,6 +700,10 @@ static void free_arg(struct print_arg *arg)
                free_arg(arg->symbol.field);
                free_flag_sym(arg->symbol.symbols);
                break;
+       case PRINT_HEX:
+               free_arg(arg->hex.field);
+               free_arg(arg->hex.size);
+               break;
        case PRINT_TYPE:
                free(arg->typecast.type);
                free_arg(arg->typecast.item);
@@ -775,6 +782,25 @@ int pevent_peek_char(void)
        return __peek_char();
 }
 
+static int extend_token(char **tok, char *buf, int size)
+{
+       char *newtok = realloc(*tok, size);
+
+       if (!newtok) {
+               free(*tok);
+               *tok = NULL;
+               return -1;
+       }
+
+       if (!*tok)
+               strcpy(newtok, buf);
+       else
+               strcat(newtok, buf);
+       *tok = newtok;
+
+       return 0;
+}
+
 static enum event_type force_token(const char *str, char **tok);
 
 static enum event_type __read_token(char **tok)
@@ -859,17 +885,10 @@ static enum event_type __read_token(char **tok)
                do {
                        if (i == (BUFSIZ - 1)) {
                                buf[i] = 0;
-                               if (*tok) {
-                                       *tok = realloc(*tok, tok_size + BUFSIZ);
-                                       if (!*tok)
-                                               return EVENT_NONE;
-                                       strcat(*tok, buf);
-                               } else
-                                       *tok = strdup(buf);
+                               tok_size += BUFSIZ;
 
-                               if (!*tok)
+                               if (extend_token(tok, buf, tok_size) < 0)
                                        return EVENT_NONE;
-                               tok_size += BUFSIZ;
                                i = 0;
                        }
                        last_ch = ch;
@@ -908,17 +927,10 @@ static enum event_type __read_token(char **tok)
        while (get_type(__peek_char()) == type) {
                if (i == (BUFSIZ - 1)) {
                        buf[i] = 0;
-                       if (*tok) {
-                               *tok = realloc(*tok, tok_size + BUFSIZ);
-                               if (!*tok)
-                                       return EVENT_NONE;
-                               strcat(*tok, buf);
-                       } else
-                               *tok = strdup(buf);
+                       tok_size += BUFSIZ;
 
-                       if (!*tok)
+                       if (extend_token(tok, buf, tok_size) < 0)
                                return EVENT_NONE;
-                       tok_size += BUFSIZ;
                        i = 0;
                }
                ch = __read_char();
@@ -927,14 +939,7 @@ static enum event_type __read_token(char **tok)
 
  out:
        buf[i] = 0;
-       if (*tok) {
-               *tok = realloc(*tok, tok_size + i);
-               if (!*tok)
-                       return EVENT_NONE;
-               strcat(*tok, buf);
-       } else
-               *tok = strdup(buf);
-       if (!*tok)
+       if (extend_token(tok, buf, tok_size + i + 1) < 0)
                return EVENT_NONE;
 
        if (type == EVENT_ITEM) {
@@ -1255,9 +1260,15 @@ static int event_read_fields(struct event_format *event, struct format_field **f
                                        field->flags |= FIELD_IS_POINTER;
 
                                if (field->type) {
-                                       field->type = realloc(field->type,
-                                                             strlen(field->type) +
-                                                             strlen(last_token) + 2);
+                                       char *new_type;
+                                       new_type = realloc(field->type,
+                                                          strlen(field->type) +
+                                                          strlen(last_token) + 2);
+                                       if (!new_type) {
+                                               free(last_token);
+                                               goto fail;
+                                       }
+                                       field->type = new_type;
                                        strcat(field->type, " ");
                                        strcat(field->type, last_token);
                                        free(last_token);
@@ -1282,6 +1293,7 @@ static int event_read_fields(struct event_format *event, struct format_field **f
                if (strcmp(token, "[") == 0) {
                        enum event_type last_type = type;
                        char *brackets = token;
+                       char *new_brackets;
                        int len;
 
                        field->flags |= FIELD_IS_ARRAY;
@@ -1301,9 +1313,14 @@ static int event_read_fields(struct event_format *event, struct format_field **f
                                        len = 1;
                                last_type = type;
 
-                               brackets = realloc(brackets,
-                                                  strlen(brackets) +
-                                                  strlen(token) + len);
+                               new_brackets = realloc(brackets,
+                                                      strlen(brackets) +
+                                                      strlen(token) + len);
+                               if (!new_brackets) {
+                                       free(brackets);
+                                       goto fail;
+                               }
+                               brackets = new_brackets;
                                if (len == 2)
                                        strcat(brackets, " ");
                                strcat(brackets, token);
@@ -1319,7 +1336,12 @@ static int event_read_fields(struct event_format *event, struct format_field **f
 
                        free_token(token);
 
-                       brackets = realloc(brackets, strlen(brackets) + 2);
+                       new_brackets = realloc(brackets, strlen(brackets) + 2);
+                       if (!new_brackets) {
+                               free(brackets);
+                               goto fail;
+                       }
+                       brackets = new_brackets;
                        strcat(brackets, "]");
 
                        /* add brackets to type */
@@ -1330,10 +1352,16 @@ static int event_read_fields(struct event_format *event, struct format_field **f
                         * the format: type [] item;
                         */
                        if (type == EVENT_ITEM) {
-                               field->type = realloc(field->type,
-                                                     strlen(field->type) +
-                                                     strlen(field->name) +
-                                                     strlen(brackets) + 2);
+                               char *new_type;
+                               new_type = realloc(field->type,
+                                                  strlen(field->type) +
+                                                  strlen(field->name) +
+                                                  strlen(brackets) + 2);
+                               if (!new_type) {
+                                       free(brackets);
+                                       goto fail;
+                               }
+                               field->type = new_type;
                                strcat(field->type, " ");
                                strcat(field->type, field->name);
                                free_token(field->name);
@@ -1341,9 +1369,15 @@ static int event_read_fields(struct event_format *event, struct format_field **f
                                field->name = token;
                                type = read_token(&token);
                        } else {
-                               field->type = realloc(field->type,
-                                                     strlen(field->type) +
-                                                     strlen(brackets) + 1);
+                               char *new_type;
+                               new_type = realloc(field->type,
+                                                  strlen(field->type) +
+                                                  strlen(brackets) + 1);
+                               if (!new_type) {
+                                       free(brackets);
+                                       goto fail;
+                               }
+                               field->type = new_type;
                                strcat(field->type, brackets);
                        }
                        free(brackets);
@@ -1726,10 +1760,16 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
                /* could just be a type pointer */
                if ((strcmp(arg->op.op, "*") == 0) &&
                    type == EVENT_DELIM && (strcmp(token, ")") == 0)) {
+                       char *new_atom;
+
                        if (left->type != PRINT_ATOM)
                                die("bad pointer type");
-                       left->atom.atom = realloc(left->atom.atom,
+                       new_atom = realloc(left->atom.atom,
                                            strlen(left->atom.atom) + 3);
+                       if (!new_atom)
+                               goto out_free;
+
+                       left->atom.atom = new_atom;
                        strcat(left->atom.atom, " *");
                        free(arg->op.op);
                        *arg = *left;
@@ -2146,6 +2186,8 @@ process_fields(struct event_format *event, struct print_flag_sym **list, char **
                if (value == NULL)
                        goto out_free;
                field->value = strdup(value);
+               if (field->value == NULL)
+                       goto out_free;
 
                free_arg(arg);
                arg = alloc_arg();
@@ -2159,6 +2201,8 @@ process_fields(struct event_format *event, struct print_flag_sym **list, char **
                if (value == NULL)
                        goto out_free;
                field->str = strdup(value);
+               if (field->str == NULL)
+                       goto out_free;
                free_arg(arg);
                arg = NULL;
 
@@ -2259,6 +2303,45 @@ process_symbols(struct event_format *event, struct print_arg *arg, char **tok)
        return EVENT_ERROR;
 }
 
+static enum event_type
+process_hex(struct event_format *event, struct print_arg *arg, char **tok)
+{
+       struct print_arg *field;
+       enum event_type type;
+       char *token;
+
+       memset(arg, 0, sizeof(*arg));
+       arg->type = PRINT_HEX;
+
+       field = alloc_arg();
+       type = process_arg(event, field, &token);
+
+       if (test_type_token(type, token, EVENT_DELIM, ","))
+               goto out_free;
+
+       arg->hex.field = field;
+
+       free_token(token);
+
+       field = alloc_arg();
+       type = process_arg(event, field, &token);
+
+       if (test_type_token(type, token, EVENT_DELIM, ")"))
+               goto out_free;
+
+       arg->hex.size = field;
+
+       free_token(token);
+       type = read_token_item(tok);
+       return type;
+
+ out_free:
+       free_arg(field);
+       free_token(token);
+       *tok = NULL;
+       return EVENT_ERROR;
+}
+
 static enum event_type
 process_dynamic_array(struct event_format *event, struct print_arg *arg, char **tok)
 {
@@ -2488,6 +2571,10 @@ process_function(struct event_format *event, struct print_arg *arg,
                is_symbolic_field = 1;
                return process_symbols(event, arg, tok);
        }
+       if (strcmp(token, "__print_hex") == 0) {
+               free_token(token);
+               return process_hex(event, arg, tok);
+       }
        if (strcmp(token, "__get_str") == 0) {
                free_token(token);
                return process_str(event, arg, tok);
@@ -2541,7 +2628,16 @@ process_arg_token(struct event_format *event, struct print_arg *arg,
                }
                /* atoms can be more than one token long */
                while (type == EVENT_ITEM) {
-                       atom = realloc(atom, strlen(atom) + strlen(token) + 2);
+                       char *new_atom;
+                       new_atom = realloc(atom,
+                                          strlen(atom) + strlen(token) + 2);
+                       if (!new_atom) {
+                               free(atom);
+                               *tok = NULL;
+                               free_token(token);
+                               return EVENT_ERROR;
+                       }
+                       atom = new_atom;
                        strcat(atom, " ");
                        strcat(atom, token);
                        free_token(token);
@@ -2835,7 +2931,7 @@ static int get_common_info(struct pevent *pevent,
        event = pevent->events[0];
        field = pevent_find_common_field(event, type);
        if (!field)
-               die("field '%s' not found", type);
+               return -1;
 
        *offset = field->offset;
        *size = field->size;
@@ -2886,15 +2982,16 @@ static int parse_common_flags(struct pevent *pevent, void *data)
 
 static int parse_common_lock_depth(struct pevent *pevent, void *data)
 {
-       int ret;
-
-       ret = __parse_common(pevent, data,
-                            &pevent->ld_size, &pevent->ld_offset,
-                            "common_lock_depth");
-       if (ret < 0)
-               return -1;
+       return __parse_common(pevent, data,
+                             &pevent->ld_size, &pevent->ld_offset,
+                             "common_lock_depth");
+}
 
-       return ret;
+static int parse_common_migrate_disable(struct pevent *pevent, void *data)
+{
+       return __parse_common(pevent, data,
+                             &pevent->ld_size, &pevent->ld_offset,
+                             "common_migrate_disable");
 }
 
 static int events_id_cmp(const void *a, const void *b);
@@ -2995,6 +3092,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
                break;
        case PRINT_FLAGS:
        case PRINT_SYMBOL:
+       case PRINT_HEX:
                break;
        case PRINT_TYPE:
                val = eval_num_arg(data, size, event, arg->typecast.item);
@@ -3214,11 +3312,13 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
 {
        struct pevent *pevent = event->pevent;
        struct print_flag_sym *flag;
+       struct format_field *field;
        unsigned long long val, fval;
        unsigned long addr;
        char *str;
+       unsigned char *hex;
        int print;
-       int len;
+       int i, len;
 
        switch (arg->type) {
        case PRINT_NULL:
@@ -3228,27 +3328,29 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
                print_str_to_seq(s, format, len_arg, arg->atom.atom);
                return;
        case PRINT_FIELD:
-               if (!arg->field.field) {
-                       arg->field.field = pevent_find_any_field(event, arg->field.name);
-                       if (!arg->field.field)
+               field = arg->field.field;
+               if (!field) {
+                       field = pevent_find_any_field(event, arg->field.name);
+                       if (!field)
                                die("field %s not found", arg->field.name);
+                       arg->field.field = field;
                }
                /* Zero sized fields, mean the rest of the data */
-               len = arg->field.field->size ? : size - arg->field.field->offset;
+               len = field->size ? : size - field->offset;
 
                /*
                 * Some events pass in pointers. If this is not an array
                 * and the size is the same as long_size, assume that it
                 * is a pointer.
                 */
-               if (!(arg->field.field->flags & FIELD_IS_ARRAY) &&
-                   arg->field.field->size == pevent->long_size) {
-                       addr = *(unsigned long *)(data + arg->field.field->offset);
+               if (!(field->flags & FIELD_IS_ARRAY) &&
+                   field->size == pevent->long_size) {
+                       addr = *(unsigned long *)(data + field->offset);
                        trace_seq_printf(s, "%lx", addr);
                        break;
                }
                str = malloc_or_die(len + 1);
-               memcpy(str, data + arg->field.field->offset, len);
+               memcpy(str, data + field->offset, len);
                str[len] = 0;
                print_str_to_seq(s, format, len_arg, str);
                free(str);
@@ -3281,6 +3383,23 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
                        }
                }
                break;
+       case PRINT_HEX:
+               field = arg->hex.field->field.field;
+               if (!field) {
+                       str = arg->hex.field->field.name;
+                       field = pevent_find_any_field(event, str);
+                       if (!field)
+                               die("field %s not found", str);
+                       arg->hex.field->field.field = field;
+               }
+               hex = data + field->offset;
+               len = eval_num_arg(data, size, event, arg->hex.size);
+               for (i = 0; i < len; i++) {
+                       if (i)
+                               trace_seq_putc(s, ' ');
+                       trace_seq_printf(s, "%02x", hex[i]);
+               }
+               break;
 
        case PRINT_TYPE:
                break;
@@ -3299,7 +3418,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
                break;
        }
        case PRINT_BSTRING:
-               trace_seq_printf(s, format, arg->string.string);
+               print_str_to_seq(s, format, len_arg, arg->string.string);
                break;
        case PRINT_OP:
                /*
@@ -3363,6 +3482,10 @@ process_defined_func(struct trace_seq *s, void *data, int size,
                        string = malloc_or_die(sizeof(*string));
                        string->next = strings;
                        string->str = strdup(str.buffer);
+                       if (!string->str)
+                               die("malloc str");
+
+                       args[i] = (unsigned long long)string->str;
                        strings = string;
                        trace_seq_destroy(&str);
                        break;
@@ -3400,6 +3523,7 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
        unsigned long long ip, val;
        char *ptr;
        void *bptr;
+       int vsize;
 
        field = pevent->bprint_buf_field;
        ip_field = pevent->bprint_ip_field;
@@ -3448,6 +3572,8 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
                                goto process_again;
                        case '0' ... '9':
                                goto process_again;
+                       case '.':
+                               goto process_again;
                        case 'p':
                                ls = 1;
                                /* fall through */
@@ -3455,23 +3581,30 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
                        case 'u':
                        case 'x':
                        case 'i':
-                               /* the pointers are always 4 bytes aligned */
-                               bptr = (void *)(((unsigned long)bptr + 3) &
-                                               ~3);
                                switch (ls) {
                                case 0:
-                                       ls = 4;
+                                       vsize = 4;
                                        break;
                                case 1:
-                                       ls = pevent->long_size;
+                                       vsize = pevent->long_size;
                                        break;
                                case 2:
-                                       ls = 8;
+                                       vsize = 8;
+                                       break;
                                default:
+                                       vsize = ls; /* ? */
                                        break;
                                }
-                               val = pevent_read_number(pevent, bptr, ls);
-                               bptr += ls;
+                       /* fall through */
+                       case '*':
+                               if (*ptr == '*')
+                                       vsize = 4;
+
+                               /* the pointers are always 4 bytes aligned */
+                               bptr = (void *)(((unsigned long)bptr + 3) &
+                                               ~3);
+                               val = pevent_read_number(pevent, bptr, vsize);
+                               bptr += vsize;
                                arg = alloc_arg();
                                arg->next = NULL;
                                arg->type = PRINT_ATOM;
@@ -3479,12 +3612,21 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
                                sprintf(arg->atom.atom, "%lld", val);
                                *next = arg;
                                next = &arg->next;
+                               /*
+                                * The '*' case means that an arg is used as the length.
+                                * We need to continue to figure out for what.
+                                */
+                               if (*ptr == '*')
+                                       goto process_again;
+
                                break;
                        case 's':
                                arg = alloc_arg();
                                arg->next = NULL;
                                arg->type = PRINT_BSTRING;
                                arg->string.string = strdup(bptr);
+                               if (!arg->string.string)
+                                       break;
                                bptr += strlen(bptr) + 1;
                                *next = arg;
                                next = &arg->next;
@@ -3589,6 +3731,16 @@ static void print_mac_arg(struct trace_seq *s, int mac, void *data, int size,
        trace_seq_printf(s, fmt, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
 }
 
+static int is_printable_array(char *p, unsigned int len)
+{
+       unsigned int i;
+
+       for (i = 0; i < len && p[i]; i++)
+               if (!isprint(p[i]))
+                   return 0;
+       return 1;
+}
+
 static void print_event_fields(struct trace_seq *s, void *data, int size,
                               struct event_format *event)
 {
@@ -3608,7 +3760,8 @@ static void print_event_fields(struct trace_seq *s, void *data, int size,
                                len = offset >> 16;
                                offset &= 0xffff;
                        }
-                       if (field->flags & FIELD_IS_STRING) {
+                       if (field->flags & FIELD_IS_STRING &&
+                           is_printable_array(data + offset, len)) {
                                trace_seq_printf(s, "%s", (char *)data + offset);
                        } else {
                                trace_seq_puts(s, "ARRAY[");
@@ -3619,6 +3772,7 @@ static void print_event_fields(struct trace_seq *s, void *data, int size,
                                                         *((unsigned char *)data + offset + i));
                                }
                                trace_seq_putc(s, ']');
+                               field->flags &= ~FIELD_IS_STRING;
                        }
                } else {
                        val = pevent_read_number(event->pevent, data + field->offset,
@@ -3758,6 +3912,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
                                } else if (*(ptr+1) == 'M' || *(ptr+1) == 'm') {
                                        print_mac_arg(s, *(ptr+1), data, size, event, arg);
                                        ptr++;
+                                       arg = arg->next;
                                        break;
                                }
 
@@ -3794,14 +3949,15 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
                                                break;
                                        }
                                }
-                               if (pevent->long_size == 8 && ls) {
+                               if (pevent->long_size == 8 && ls &&
+                                   sizeof(long) != 8) {
                                        char *p;
 
                                        ls = 2;
                                        /* make %l into %ll */
                                        p = strchr(format, 'l');
                                        if (p)
-                                               memmove(p, p+1, strlen(p)+1);
+                                               memmove(p+1, p, strlen(p)+1);
                                        else if (strcmp(format, "%p") == 0)
                                                strcpy(format, "0x%llx");
                                }
@@ -3878,8 +4034,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
  * pevent_data_lat_fmt - parse the data for the latency format
  * @pevent: a handle to the pevent
  * @s: the trace_seq to write to
- * @data: the raw data to read from
- * @size: currently unused.
+ * @record: the record to read from
  *
  * This parses out the Latency format (interrupts disabled,
  * need rescheduling, in hard/soft interrupt, preempt count
@@ -3889,10 +4044,13 @@ void pevent_data_lat_fmt(struct pevent *pevent,
                         struct trace_seq *s, struct pevent_record *record)
 {
        static int check_lock_depth = 1;
+       static int check_migrate_disable = 1;
        static int lock_depth_exists;
+       static int migrate_disable_exists;
        unsigned int lat_flags;
        unsigned int pc;
        int lock_depth;
+       int migrate_disable;
        int hardirq;
        int softirq;
        void *data = record->data;
@@ -3900,18 +4058,26 @@ void pevent_data_lat_fmt(struct pevent *pevent,
        lat_flags = parse_common_flags(pevent, data);
        pc = parse_common_pc(pevent, data);
        /* lock_depth may not always exist */
-       if (check_lock_depth) {
-               struct format_field *field;
-               struct event_format *event;
-
-               check_lock_depth = 0;
-               event = pevent->events[0];
-               field = pevent_find_common_field(event, "common_lock_depth");
-               if (field)
-                       lock_depth_exists = 1;
-       }
        if (lock_depth_exists)
                lock_depth = parse_common_lock_depth(pevent, data);
+       else if (check_lock_depth) {
+               lock_depth = parse_common_lock_depth(pevent, data);
+               if (lock_depth < 0)
+                       check_lock_depth = 0;
+               else
+                       lock_depth_exists = 1;
+       }
+
+       /* migrate_disable may not always exist */
+       if (migrate_disable_exists)
+               migrate_disable = parse_common_migrate_disable(pevent, data);
+       else if (check_migrate_disable) {
+               migrate_disable = parse_common_migrate_disable(pevent, data);
+               if (migrate_disable < 0)
+                       check_migrate_disable = 0;
+               else
+                       migrate_disable_exists = 1;
+       }
 
        hardirq = lat_flags & TRACE_FLAG_HARDIRQ;
        softirq = lat_flags & TRACE_FLAG_SOFTIRQ;
@@ -3930,6 +4096,13 @@ void pevent_data_lat_fmt(struct pevent *pevent,
        else
                trace_seq_putc(s, '.');
 
+       if (migrate_disable_exists) {
+               if (migrate_disable < 0)
+                       trace_seq_putc(s, '.');
+               else
+                       trace_seq_printf(s, "%d", migrate_disable);
+       }
+
        if (lock_depth_exists) {
                if (lock_depth < 0)
                        trace_seq_putc(s, '.');
@@ -3996,10 +4169,7 @@ const char *pevent_data_comm_from_pid(struct pevent *pevent, int pid)
  * pevent_data_comm_from_pid - parse the data into the print format
  * @s: the trace_seq to write to
  * @event: the handle to the event
- * @cpu: the cpu the event was recorded on
- * @data: the raw data
- * @size: the size of the raw data
- * @nsecs: the timestamp of the event
+ * @record: the record to read from
  *
  * This parses the raw @data using the given @event information and
  * writes the print format into the trace_seq.
@@ -4279,6 +4449,13 @@ static void print_args(struct print_arg *args)
                trace_seq_destroy(&s);
                printf(")");
                break;
+       case PRINT_HEX:
+               printf("__print_hex(");
+               print_args(args->hex.field);
+               printf(", ");
+               print_args(args->hex.size);
+               printf(")");
+               break;
        case PRINT_STRING:
        case PRINT_BSTRING:
                printf("__get_str(%s)", args->string.string);
@@ -4541,6 +4718,8 @@ int pevent_parse_event(struct pevent *pevent,
                die("failed to read event id");
 
        event->system = strdup(sys);
+       if (!event->system)
+               die("failed to allocate system");
 
        /* Add pevent to event so that it can be referenced */
        event->pevent = pevent;
@@ -4582,6 +4761,11 @@ int pevent_parse_event(struct pevent *pevent,
                        list = &arg->next;
                        arg->type = PRINT_FIELD;
                        arg->field.name = strdup(field->name);
+                       if (!arg->field.name) {
+                               do_warning("failed to allocate field name");
+                               event->flags |= EVENT_FL_FAILED;
+                               return -1;
+                       }
                        arg->field.field = field;
                }
                return 0;
@@ -4753,7 +4937,7 @@ int pevent_get_any_field_val(struct trace_seq *s, struct event_format *event,
  * @record: The record with the field name.
  * @err: print default error if failed.
  *
- * Returns: 0 on success, -1 field not fould, or 1 if buffer is full.
+ * Returns: 0 on success, -1 field not found, or 1 if buffer is full.
  */
 int pevent_print_num_field(struct trace_seq *s, const char *fmt,
                           struct event_format *event, const char *name,
@@ -4795,11 +4979,12 @@ static void free_func_handle(struct pevent_function_handler *func)
  * pevent_register_print_function - register a helper function
  * @pevent: the handle to the pevent
  * @func: the function to process the helper function
+ * @ret_type: the return type of the helper function
  * @name: the name of the helper function
  * @parameters: A list of enum pevent_func_arg_type
  *
  * Some events may have helper functions in the print format arguments.
- * This allows a plugin to dynmically create a way to process one
+ * This allows a plugin to dynamically create a way to process one
  * of these functions.
  *
  * The @parameters is a variable list of pevent_func_arg_type enums that
@@ -4870,12 +5055,13 @@ int pevent_register_print_function(struct pevent *pevent,
 }
 
 /**
- * pevent_register_event_handle - register a way to parse an event
+ * pevent_register_event_handler - register a way to parse an event
  * @pevent: the handle to the pevent
  * @id: the id of the event to register
  * @sys_name: the system name the event belongs to
  * @event_name: the name of the event
  * @func: the function to call to parse the event information
+ * @context: the data to be passed to @func
  *
  * This function allows a developer to override the parsing of
  * a given event. If for some reason the default print format
@@ -4925,6 +5111,11 @@ int pevent_register_event_handler(struct pevent *pevent,
        if (sys_name)
                handle->sys_name = strdup(sys_name);
 
+       if ((event_name && !handle->event_name) ||
+           (sys_name && !handle->sys_name)) {
+               die("Failed to allocate event/sys name");
+       }
+
        handle->func = func;
        handle->next = pevent->handlers;
        pevent->handlers = handle;
index ac997bc..5772ad8 100644 (file)
@@ -226,6 +226,11 @@ struct print_arg_symbol {
        struct print_flag_sym   *symbols;
 };
 
+struct print_arg_hex {
+       struct print_arg        *field;
+       struct print_arg        *size;
+};
+
 struct print_arg_dynarray {
        struct format_field     *field;
        struct print_arg        *index;
@@ -253,6 +258,7 @@ enum print_arg_type {
        PRINT_FIELD,
        PRINT_FLAGS,
        PRINT_SYMBOL,
+       PRINT_HEX,
        PRINT_TYPE,
        PRINT_STRING,
        PRINT_BSTRING,
@@ -270,6 +276,7 @@ struct print_arg {
                struct print_arg_typecast       typecast;
                struct print_arg_flags          flags;
                struct print_arg_symbol         symbol;
+               struct print_arg_hex            hex;
                struct print_arg_func           func;
                struct print_arg_string         string;
                struct print_arg_op             op;
index dfcfe2c..ad17855 100644 (file)
@@ -96,7 +96,7 @@ static enum event_type read_token(char **tok)
            (strcmp(token, "=") == 0 || strcmp(token, "!") == 0) &&
            pevent_peek_char() == '~') {
                /* append it */
-               *tok = malloc(3);
+               *tok = malloc_or_die(3);
                sprintf(*tok, "%c%c", *token, '~');
                free_token(token);
                /* Now remove the '~' from the buffer */
@@ -148,17 +148,11 @@ add_filter_type(struct event_filter *filter, int id)
        if (filter_type)
                return filter_type;
 
-       if (!filter->filters)
-               filter->event_filters =
-                       malloc_or_die(sizeof(*filter->event_filters));
-       else {
-               filter->event_filters =
-                       realloc(filter->event_filters,
-                               sizeof(*filter->event_filters) *
-                               (filter->filters + 1));
-               if (!filter->event_filters)
-                       die("Could not allocate filter");
-       }
+       filter->event_filters = realloc(filter->event_filters,
+                                       sizeof(*filter->event_filters) *
+                                       (filter->filters + 1));
+       if (!filter->event_filters)
+               die("Could not allocate filter");
 
        for (i = 0; i < filter->filters; i++) {
                if (filter->event_filters[i].event_id > id)
@@ -1480,7 +1474,7 @@ void pevent_filter_clear_trivial(struct event_filter *filter,
 {
        struct filter_type *filter_type;
        int count = 0;
-       int *ids;
+       int *ids = NULL;
        int i;
 
        if (!filter->filters)
@@ -1504,10 +1498,8 @@ void pevent_filter_clear_trivial(struct event_filter *filter,
                default:
                        break;
                }
-               if (count)
-                       ids = realloc(ids, sizeof(*ids) * (count + 1));
-               else
-                       ids = malloc(sizeof(*ids));
+
+               ids = realloc(ids, sizeof(*ids) * (count + 1));
                if (!ids)
                        die("Can't allocate ids");
                ids[count++] = filter_type->event_id;
@@ -1710,18 +1702,43 @@ static int test_num(struct event_format *event,
 
 static const char *get_field_str(struct filter_arg *arg, struct pevent_record *record)
 {
-       const char *val = record->data + arg->str.field->offset;
+       struct event_format *event;
+       struct pevent *pevent;
+       unsigned long long addr;
+       const char *val = NULL;
+       char hex[64];
 
-       /*
-        * We need to copy the data since we can't be sure the field
-        * is null terminated.
-        */
-       if (*(val + arg->str.field->size - 1)) {
-               /* copy it */
-               memcpy(arg->str.buffer, val, arg->str.field->size);
-               /* the buffer is already NULL terminated */
-               val = arg->str.buffer;
+       /* If the field is not a string convert it */
+       if (arg->str.field->flags & FIELD_IS_STRING) {
+               val = record->data + arg->str.field->offset;
+
+               /*
+                * We need to copy the data since we can't be sure the field
+                * is null terminated.
+                */
+               if (*(val + arg->str.field->size - 1)) {
+                       /* copy it */
+                       memcpy(arg->str.buffer, val, arg->str.field->size);
+                       /* the buffer is already NULL terminated */
+                       val = arg->str.buffer;
+               }
+
+       } else {
+               event = arg->str.field->event;
+               pevent = event->pevent;
+               addr = get_value(event, arg->str.field, record);
+
+               if (arg->str.field->flags & (FIELD_IS_POINTER | FIELD_IS_LONG))
+                       /* convert to a kernel symbol */
+                       val = pevent_find_function(pevent, addr);
+
+               if (val == NULL) {
+                       /* just use the hex of the string name */
+                       snprintf(hex, 64, "0x%llx", addr);
+                       val = hex;
+               }
        }
+
        return val;
 }
 
@@ -2001,11 +2018,13 @@ static char *exp_to_str(struct event_filter *filter, struct filter_arg *arg)
        char *lstr;
        char *rstr;
        char *op;
-       char *str;
+       char *str = NULL;
        int len;
 
        lstr = arg_to_str(filter, arg->exp.left);
        rstr = arg_to_str(filter, arg->exp.right);
+       if (!lstr || !rstr)
+               goto out;
 
        switch (arg->exp.type) {
        case FILTER_EXP_ADD:
@@ -2045,6 +2064,7 @@ static char *exp_to_str(struct event_filter *filter, struct filter_arg *arg)
        len = strlen(op) + strlen(lstr) + strlen(rstr) + 4;
        str = malloc_or_die(len);
        snprintf(str, len, "%s %s %s", lstr, op, rstr);
+out:
        free(lstr);
        free(rstr);
 
@@ -2061,6 +2081,8 @@ static char *num_to_str(struct event_filter *filter, struct filter_arg *arg)
 
        lstr = arg_to_str(filter, arg->num.left);
        rstr = arg_to_str(filter, arg->num.right);
+       if (!lstr || !rstr)
+               goto out;
 
        switch (arg->num.type) {
        case FILTER_CMP_EQ:
@@ -2097,6 +2119,7 @@ static char *num_to_str(struct event_filter *filter, struct filter_arg *arg)
                break;
        }
 
+out:
        free(lstr);
        free(rstr);
        return str;
@@ -2247,7 +2270,12 @@ int pevent_filter_compare(struct event_filter *filter1, struct event_filter *fil
                /* The best way to compare complex filters is with strings */
                str1 = arg_to_str(filter1, filter_type1->filter);
                str2 = arg_to_str(filter2, filter_type2->filter);
-               result = strcmp(str1, str2) != 0;
+               if (str1 && str2)
+                       result = strcmp(str1, str2) != 0;
+               else
+                       /* bail out if allocation fails */
+                       result = 1;
+
                free(str1);
                free(str2);
                if (result)
index a3dbadb..7065cd6 100644 (file)
@@ -12,7 +12,7 @@ SYNOPSIS
 
 DESCRIPTION
 -----------
-This 'perf bench' command is general framework for benchmark suites.
+This 'perf bench' command is general framework for benchmark suites.
 
 COMMON OPTIONS
 --------------
@@ -45,14 +45,20 @@ SUBSYSTEM
 'sched'::
        Scheduler and IPC mechanisms.
 
+'mem'::
+       Memory access performance.
+
+'all'::
+       All benchmark subsystems.
+
 SUITES FOR 'sched'
 ~~~~~~~~~~~~~~~~~~
 *messaging*::
 Suite for evaluating performance of scheduler and IPC mechanisms.
 Based on hackbench by Rusty Russell.
 
-Options of *pipe*
-^^^^^^^^^^^^^^^^^
+Options of *messaging*
+^^^^^^^^^^^^^^^^^^^^^^
 -p::
 --pipe::
 Use pipe() instead of socketpair()
@@ -115,6 +121,72 @@ Example of *pipe*
                 59004 ops/sec
 ---------------------
 
+SUITES FOR 'mem'
+~~~~~~~~~~~~~~~~
+*memcpy*::
+Suite for evaluating performance of simple memory copy in various ways.
+
+Options of *memcpy*
+^^^^^^^^^^^^^^^^^^^
+-l::
+--length::
+Specify length of memory to copy (default: 1MB).
+Available units are B, KB, MB, GB and TB (case insensitive).
+
+-r::
+--routine::
+Specify routine to copy (default: default).
+Available routines are depend on the architecture.
+On x86-64, x86-64-unrolled, x86-64-movsq and x86-64-movsb are supported.
+
+-i::
+--iterations::
+Repeat memcpy invocation this number of times.
+
+-c::
+--cycle::
+Use perf's cpu-cycles event instead of gettimeofday syscall.
+
+-o::
+--only-prefault::
+Show only the result with page faults before memcpy.
+
+-n::
+--no-prefault::
+Show only the result without page faults before memcpy.
+
+*memset*::
+Suite for evaluating performance of simple memory set in various ways.
+
+Options of *memset*
+^^^^^^^^^^^^^^^^^^^
+-l::
+--length::
+Specify length of memory to set (default: 1MB).
+Available units are B, KB, MB, GB and TB (case insensitive).
+
+-r::
+--routine::
+Specify routine to set (default: default).
+Available routines are depend on the architecture.
+On x86-64, x86-64-unrolled, x86-64-stosq and x86-64-stosb are supported.
+
+-i::
+--iterations::
+Repeat memset invocation this number of times.
+
+-c::
+--cycle::
+Use perf's cpu-cycles event instead of gettimeofday syscall.
+
+-o::
+--only-prefault::
+Show only the result with page faults before memset.
+
+-n::
+--no-prefault::
+Show only the result without page faults before memset.
+
 SEE ALSO
 --------
 linkperf:perf[1]
index 2d89f02..495210a 100644 (file)
@@ -57,7 +57,7 @@ OPTIONS
 
 -s::
 --sort=::
-       Sort by key(s): pid, comm, dso, symbol, parent.
+       Sort by key(s): pid, comm, dso, symbol, parent, srcline.
 
 -p::
 --parent=<regex>::
index 4a5680c..5b80d84 100644 (file)
@@ -112,7 +112,7 @@ Default is to monitor all CPUS.
 
 -s::
 --sort::
-       Sort by key(s): pid, comm, dso, symbol, parent
+       Sort by key(s): pid, comm, dso, symbol, parent, srcline.
 
 -n::
 --show-nr-samples::
index 0eee64c..75d74e5 100644 (file)
@@ -155,7 +155,7 @@ endif
 
 ### --- END CONFIGURATION SECTION ---
 
-BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -I$(OUTPUT)/util -I$(TRACE_EVENT_DIR) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
+BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -I$(OUTPUT)util -I$(TRACE_EVENT_DIR) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
 BASIC_LDFLAGS =
 
 # Guard against environment variables
@@ -503,6 +503,7 @@ else
                LIB_OBJS += $(OUTPUT)ui/progress.o
                LIB_OBJS += $(OUTPUT)ui/util.o
                LIB_OBJS += $(OUTPUT)ui/tui/setup.o
+               LIB_OBJS += $(OUTPUT)ui/tui/util.o
                LIB_H += ui/browser.h
                LIB_H += ui/browsers/map.h
                LIB_H += ui/helpline.h
@@ -522,13 +523,18 @@ else
                msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev);
                BASIC_CFLAGS += -DNO_GTK2_SUPPORT
        else
+               ifeq ($(call try-cc,$(SOURCE_GTK2_INFOBAR),$(FLAGS_GTK2)),y)
+                       BASIC_CFLAGS += -DHAVE_GTK_INFO_BAR
+               endif
                BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0)
                EXTLIBS += $(shell pkg-config --libs gtk+-2.0)
                LIB_OBJS += $(OUTPUT)ui/gtk/browser.o
                LIB_OBJS += $(OUTPUT)ui/gtk/setup.o
+               LIB_OBJS += $(OUTPUT)ui/gtk/util.o
                # Make sure that it'd be included only once.
                ifneq ($(findstring -DNO_NEWT_SUPPORT,$(BASIC_CFLAGS)),)
                        LIB_OBJS += $(OUTPUT)ui/setup.o
+                       LIB_OBJS += $(OUTPUT)ui/util.o
                endif
        endif
 endif
index 7155722..02dad5d 100644 (file)
 static const char      *length_str     = "1MB";
 static const char      *routine        = "default";
 static int             iterations      = 1;
-static bool            use_clock;
-static int             clock_fd;
+static bool            use_cycle;
+static int             cycle_fd;
 static bool            only_prefault;
 static bool            no_prefault;
 
 static const struct option options[] = {
        OPT_STRING('l', "length", &length_str, "1MB",
                    "Specify length of memory to copy. "
-                   "available unit: B, MB, GB (upper and lower)"),
+                   "Available units: B, KB, MB, GB and TB (upper and lower)"),
        OPT_STRING('r', "routine", &routine, "default",
                    "Specify routine to copy"),
        OPT_INTEGER('i', "iterations", &iterations,
                    "repeat memcpy() invocation this number of times"),
-       OPT_BOOLEAN('c', "clock", &use_clock,
-                   "Use CPU clock for measuring"),
+       OPT_BOOLEAN('c', "cycle", &use_cycle,
+                   "Use cycles event instead of gettimeofday() for measuring"),
        OPT_BOOLEAN('o', "only-prefault", &only_prefault,
                    "Show only the result with page faults before memcpy()"),
        OPT_BOOLEAN('n', "no-prefault", &no_prefault,
@@ -76,27 +76,27 @@ static const char * const bench_mem_memcpy_usage[] = {
        NULL
 };
 
-static struct perf_event_attr clock_attr = {
+static struct perf_event_attr cycle_attr = {
        .type           = PERF_TYPE_HARDWARE,
        .config         = PERF_COUNT_HW_CPU_CYCLES
 };
 
-static void init_clock(void)
+static void init_cycle(void)
 {
-       clock_fd = sys_perf_event_open(&clock_attr, getpid(), -1, -1, 0);
+       cycle_fd = sys_perf_event_open(&cycle_attr, getpid(), -1, -1, 0);
 
-       if (clock_fd < 0 && errno == ENOSYS)
+       if (cycle_fd < 0 && errno == ENOSYS)
                die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
        else
-               BUG_ON(clock_fd < 0);
+               BUG_ON(cycle_fd < 0);
 }
 
-static u64 get_clock(void)
+static u64 get_cycle(void)
 {
        int ret;
        u64 clk;
 
-       ret = read(clock_fd, &clk, sizeof(u64));
+       ret = read(cycle_fd, &clk, sizeof(u64));
        BUG_ON(ret != sizeof(u64));
 
        return clk;
@@ -119,9 +119,9 @@ static void alloc_mem(void **dst, void **src, size_t length)
                die("memory allocation failed - maybe length is too large?\n");
 }
 
-static u64 do_memcpy_clock(memcpy_t fn, size_t len, bool prefault)
+static u64 do_memcpy_cycle(memcpy_t fn, size_t len, bool prefault)
 {
-       u64 clock_start = 0ULL, clock_end = 0ULL;
+       u64 cycle_start = 0ULL, cycle_end = 0ULL;
        void *src = NULL, *dst = NULL;
        int i;
 
@@ -130,14 +130,14 @@ static u64 do_memcpy_clock(memcpy_t fn, size_t len, bool prefault)
        if (prefault)
                fn(dst, src, len);
 
-       clock_start = get_clock();
+       cycle_start = get_cycle();
        for (i = 0; i < iterations; ++i)
                fn(dst, src, len);
-       clock_end = get_clock();
+       cycle_end = get_cycle();
 
        free(src);
        free(dst);
-       return clock_end - clock_start;
+       return cycle_end - cycle_start;
 }
 
 static double do_memcpy_gettimeofday(memcpy_t fn, size_t len, bool prefault)
@@ -182,17 +182,17 @@ int bench_mem_memcpy(int argc, const char **argv,
        int i;
        size_t len;
        double result_bps[2];
-       u64 result_clock[2];
+       u64 result_cycle[2];
 
        argc = parse_options(argc, argv, options,
                             bench_mem_memcpy_usage, 0);
 
-       if (use_clock)
-               init_clock();
+       if (use_cycle)
+               init_cycle();
 
        len = (size_t)perf_atoll((char *)length_str);
 
-       result_clock[0] = result_clock[1] = 0ULL;
+       result_cycle[0] = result_cycle[1] = 0ULL;
        result_bps[0] = result_bps[1] = 0.0;
 
        if ((s64)len <= 0) {
@@ -223,11 +223,11 @@ int bench_mem_memcpy(int argc, const char **argv,
 
        if (!only_prefault && !no_prefault) {
                /* show both of results */
-               if (use_clock) {
-                       result_clock[0] =
-                               do_memcpy_clock(routines[i].fn, len, false);
-                       result_clock[1] =
-                               do_memcpy_clock(routines[i].fn, len, true);
+               if (use_cycle) {
+                       result_cycle[0] =
+                               do_memcpy_cycle(routines[i].fn, len, false);
+                       result_cycle[1] =
+                               do_memcpy_cycle(routines[i].fn, len, true);
                } else {
                        result_bps[0] =
                                do_memcpy_gettimeofday(routines[i].fn,
@@ -237,9 +237,9 @@ int bench_mem_memcpy(int argc, const char **argv,
                                                len, true);
                }
        } else {
-               if (use_clock) {
-                       result_clock[pf] =
-                               do_memcpy_clock(routines[i].fn,
+               if (use_cycle) {
+                       result_cycle[pf] =
+                               do_memcpy_cycle(routines[i].fn,
                                                len, only_prefault);
                } else {
                        result_bps[pf] =
@@ -251,12 +251,12 @@ int bench_mem_memcpy(int argc, const char **argv,
        switch (bench_format) {
        case BENCH_FORMAT_DEFAULT:
                if (!only_prefault && !no_prefault) {
-                       if (use_clock) {
-                               printf(" %14lf Clock/Byte\n",
-                                       (double)result_clock[0]
+                       if (use_cycle) {
+                               printf(" %14lf Cycle/Byte\n",
+                                       (double)result_cycle[0]
                                        / (double)len);
-                               printf(" %14lf Clock/Byte (with prefault)\n",
-                                       (double)result_clock[1]
+                               printf(" %14lf Cycle/Byte (with prefault)\n",
+                                       (double)result_cycle[1]
                                        / (double)len);
                        } else {
                                print_bps(result_bps[0]);
@@ -265,9 +265,9 @@ int bench_mem_memcpy(int argc, const char **argv,
                                printf(" (with prefault)\n");
                        }
                } else {
-                       if (use_clock) {
-                               printf(" %14lf Clock/Byte",
-                                       (double)result_clock[pf]
+                       if (use_cycle) {
+                               printf(" %14lf Cycle/Byte",
+                                       (double)result_cycle[pf]
                                        / (double)len);
                        } else
                                print_bps(result_bps[pf]);
@@ -277,17 +277,17 @@ int bench_mem_memcpy(int argc, const char **argv,
                break;
        case BENCH_FORMAT_SIMPLE:
                if (!only_prefault && !no_prefault) {
-                       if (use_clock) {
+                       if (use_cycle) {
                                printf("%lf %lf\n",
-                                       (double)result_clock[0] / (double)len,
-                                       (double)result_clock[1] / (double)len);
+                                       (double)result_cycle[0] / (double)len,
+                                       (double)result_cycle[1] / (double)len);
                        } else {
                                printf("%lf %lf\n",
                                        result_bps[0], result_bps[1]);
                        }
                } else {
-                       if (use_clock) {
-                               printf("%lf\n", (double)result_clock[pf]
+                       if (use_cycle) {
+                               printf("%lf\n", (double)result_cycle[pf]
                                        / (double)len);
                        } else
                                printf("%lf\n", result_bps[pf]);
index e907918..350cc95 100644 (file)
 static const char      *length_str     = "1MB";
 static const char      *routine        = "default";
 static int             iterations      = 1;
-static bool            use_clock;
-static int             clock_fd;
+static bool            use_cycle;
+static int             cycle_fd;
 static bool            only_prefault;
 static bool            no_prefault;
 
 static const struct option options[] = {
        OPT_STRING('l', "length", &length_str, "1MB",
-                   "Specify length of memory to copy. "
-                   "available unit: B, MB, GB (upper and lower)"),
+                   "Specify length of memory to set. "
+                   "Available units: B, KB, MB, GB and TB (upper and lower)"),
        OPT_STRING('r', "routine", &routine, "default",
-                   "Specify routine to copy"),
+                   "Specify routine to set"),
        OPT_INTEGER('i', "iterations", &iterations,
                    "repeat memset() invocation this number of times"),
-       OPT_BOOLEAN('c', "clock", &use_clock,
-                   "Use CPU clock for measuring"),
+       OPT_BOOLEAN('c', "cycle", &use_cycle,
+                   "Use cycles event instead of gettimeofday() for measuring"),
        OPT_BOOLEAN('o', "only-prefault", &only_prefault,
                    "Show only the result with page faults before memset()"),
        OPT_BOOLEAN('n', "no-prefault", &no_prefault,
@@ -76,27 +76,27 @@ static const char * const bench_mem_memset_usage[] = {
        NULL
 };
 
-static struct perf_event_attr clock_attr = {
+static struct perf_event_attr cycle_attr = {
        .type           = PERF_TYPE_HARDWARE,
        .config         = PERF_COUNT_HW_CPU_CYCLES
 };
 
-static void init_clock(void)
+static void init_cycle(void)
 {
-       clock_fd = sys_perf_event_open(&clock_attr, getpid(), -1, -1, 0);
+       cycle_fd = sys_perf_event_open(&cycle_attr, getpid(), -1, -1, 0);
 
-       if (clock_fd < 0 && errno == ENOSYS)
+       if (cycle_fd < 0 && errno == ENOSYS)
                die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
        else
-               BUG_ON(clock_fd < 0);
+               BUG_ON(cycle_fd < 0);
 }
 
-static u64 get_clock(void)
+static u64 get_cycle(void)
 {
        int ret;
        u64 clk;
 
-       ret = read(clock_fd, &clk, sizeof(u64));
+       ret = read(cycle_fd, &clk, sizeof(u64));
        BUG_ON(ret != sizeof(u64));
 
        return clk;
@@ -115,9 +115,9 @@ static void alloc_mem(void **dst, size_t length)
                die("memory allocation failed - maybe length is too large?\n");
 }
 
-static u64 do_memset_clock(memset_t fn, size_t len, bool prefault)
+static u64 do_memset_cycle(memset_t fn, size_t len, bool prefault)
 {
-       u64 clock_start = 0ULL, clock_end = 0ULL;
+       u64 cycle_start = 0ULL, cycle_end = 0ULL;
        void *dst = NULL;
        int i;
 
@@ -126,13 +126,13 @@ static u64 do_memset_clock(memset_t fn, size_t len, bool prefault)
        if (prefault)
                fn(dst, -1, len);
 
-       clock_start = get_clock();
+       cycle_start = get_cycle();
        for (i = 0; i < iterations; ++i)
                fn(dst, i, len);
-       clock_end = get_clock();
+       cycle_end = get_cycle();
 
        free(dst);
-       return clock_end - clock_start;
+       return cycle_end - cycle_start;
 }
 
 static double do_memset_gettimeofday(memset_t fn, size_t len, bool prefault)
@@ -176,17 +176,17 @@ int bench_mem_memset(int argc, const char **argv,
        int i;
        size_t len;
        double result_bps[2];
-       u64 result_clock[2];
+       u64 result_cycle[2];
 
        argc = parse_options(argc, argv, options,
                             bench_mem_memset_usage, 0);
 
-       if (use_clock)
-               init_clock();
+       if (use_cycle)
+               init_cycle();
 
        len = (size_t)perf_atoll((char *)length_str);
 
-       result_clock[0] = result_clock[1] = 0ULL;
+       result_cycle[0] = result_cycle[1] = 0ULL;
        result_bps[0] = result_bps[1] = 0.0;
 
        if ((s64)len <= 0) {
@@ -217,11 +217,11 @@ int bench_mem_memset(int argc, const char **argv,
 
        if (!only_prefault && !no_prefault) {
                /* show both of results */
-               if (use_clock) {
-                       result_clock[0] =
-                               do_memset_clock(routines[i].fn, len, false);
-                       result_clock[1] =
-                               do_memset_clock(routines[i].fn, len, true);
+               if (use_cycle) {
+                       result_cycle[0] =
+                               do_memset_cycle(routines[i].fn, len, false);
+                       result_cycle[1] =
+                               do_memset_cycle(routines[i].fn, len, true);
                } else {
                        result_bps[0] =
                                do_memset_gettimeofday(routines[i].fn,
@@ -231,9 +231,9 @@ int bench_mem_memset(int argc, const char **argv,
                                                len, true);
                }
        } else {
-               if (use_clock) {
-                       result_clock[pf] =
-                               do_memset_clock(routines[i].fn,
+               if (use_cycle) {
+                       result_cycle[pf] =
+                               do_memset_cycle(routines[i].fn,
                                                len, only_prefault);
                } else {
                        result_bps[pf] =
@@ -245,12 +245,12 @@ int bench_mem_memset(int argc, const char **argv,
        switch (bench_format) {
        case BENCH_FORMAT_DEFAULT:
                if (!only_prefault && !no_prefault) {
-                       if (use_clock) {
-                               printf(" %14lf Clock/Byte\n",
-                                       (double)result_clock[0]
+                       if (use_cycle) {
+                               printf(" %14lf Cycle/Byte\n",
+                                       (double)result_cycle[0]
                                        / (double)len);
-                               printf(" %14lf Clock/Byte (with prefault)\n ",
-                                       (double)result_clock[1]
+                               printf(" %14lf Cycle/Byte (with prefault)\n ",
+                                       (double)result_cycle[1]
                                        / (double)len);
                        } else {
                                print_bps(result_bps[0]);
@@ -259,9 +259,9 @@ int bench_mem_memset(int argc, const char **argv,
                                printf(" (with prefault)\n");
                        }
                } else {
-                       if (use_clock) {
-                               printf(" %14lf Clock/Byte",
-                                       (double)result_clock[pf]
+                       if (use_cycle) {
+                               printf(" %14lf Cycle/Byte",
+                                       (double)result_cycle[pf]
                                        / (double)len);
                        } else
                                print_bps(result_bps[pf]);
@@ -271,17 +271,17 @@ int bench_mem_memset(int argc, const char **argv,
                break;
        case BENCH_FORMAT_SIMPLE:
                if (!only_prefault && !no_prefault) {
-                       if (use_clock) {
+                       if (use_cycle) {
                                printf("%lf %lf\n",
-                                       (double)result_clock[0] / (double)len,
-                                       (double)result_clock[1] / (double)len);
+                                       (double)result_cycle[0] / (double)len,
+                                       (double)result_cycle[1] / (double)len);
                        } else {
                                printf("%lf %lf\n",
                                        result_bps[0], result_bps[1]);
                        }
                } else {
-                       if (use_clock) {
-                               printf("%lf\n", (double)result_clock[pf]
+                       if (use_cycle) {
+                               printf("%lf\n", (double)result_cycle[pf]
                                        / (double)len);
                        } else
                                printf("%lf\n", result_bps[pf]);
index b0e74ab..1f31002 100644 (file)
@@ -33,7 +33,7 @@ struct bench_suite {
 };
                                                \
 /* sentinel: easy for help */
-#define suite_all { "all", "test all suite (pseudo suite)", NULL }
+#define suite_all { "all", "Test all benchmark suites", NULL }
 
 static struct bench_suite sched_suites[] = {
        { "messaging",
@@ -75,7 +75,7 @@ static struct bench_subsys subsystems[] = {
          "memory access performance",
          mem_suites },
        { "all",                /* sentinel: easy for help */
-         "test all subsystem (pseudo subsystem)",
+         "all benchmark subsystem",
          NULL },
        { NULL,
          NULL,
index acd78dc..0dd5a05 100644 (file)
@@ -60,7 +60,7 @@ static int __cmd_evlist(const char *input_name, struct perf_attr_details *detail
        list_for_each_entry(pos, &session->evlist->entries, node) {
                bool first = true;
 
-               printf("%s", event_name(pos));
+               printf("%s", perf_evsel__name(pos));
 
                if (details->verbose || details->freq) {
                        comma_printf(&first, " sample_freq=%" PRIu64,
index 547af48..ce35015 100644 (file)
@@ -57,6 +57,11 @@ static unsigned long nr_allocs, nr_cross_allocs;
 
 #define PATH_SYS_NODE  "/sys/devices/system/node"
 
+struct perf_kmem {
+       struct perf_tool    tool;
+       struct perf_session *session;
+};
+
 static void init_cpunode_map(void)
 {
        FILE *fp;
@@ -278,14 +283,16 @@ static void process_free_event(void *data,
        s_alloc->alloc_cpu = -1;
 }
 
-static void process_raw_event(union perf_event *raw_event __used, void *data,
+static void process_raw_event(struct perf_tool *tool,
+                             union perf_event *raw_event __used, void *data,
                              int cpu, u64 timestamp, struct thread *thread)
 {
+       struct perf_kmem *kmem = container_of(tool, struct perf_kmem, tool);
        struct event_format *event;
        int type;
 
-       type = trace_parse_common_type(data);
-       event = trace_find_event(type);
+       type = trace_parse_common_type(kmem->session->pevent, data);
+       event = pevent_find_event(kmem->session->pevent, type);
 
        if (!strcmp(event->name, "kmalloc") ||
            !strcmp(event->name, "kmem_cache_alloc")) {
@@ -306,7 +313,7 @@ static void process_raw_event(union perf_event *raw_event __used, void *data,
        }
 }
 
-static int process_sample_event(struct perf_tool *tool __used,
+static int process_sample_event(struct perf_tool *tool,
                                union perf_event *event,
                                struct perf_sample *sample,
                                struct perf_evsel *evsel __used,
@@ -322,16 +329,18 @@ static int process_sample_event(struct perf_tool *tool __used,
 
        dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
 
-       process_raw_event(event, sample->raw_data, sample->cpu,
+       process_raw_event(tool, event, sample->raw_data, sample->cpu,
                          sample->time, thread);
 
        return 0;
 }
 
-static struct perf_tool perf_kmem = {
-       .sample                 = process_sample_event,
-       .comm                   = perf_event__process_comm,
-       .ordered_samples        = true,
+static struct perf_kmem perf_kmem = {
+       .tool = {
+               .sample                 = process_sample_event,
+               .comm                   = perf_event__process_comm,
+               .ordered_samples        = true,
+       },
 };
 
 static double fragmentation(unsigned long n_req, unsigned long n_alloc)
@@ -486,11 +495,15 @@ static void sort_result(void)
 static int __cmd_kmem(void)
 {
        int err = -EINVAL;
-       struct perf_session *session = perf_session__new(input_name, O_RDONLY,
-                                                        0, false, &perf_kmem);
+       struct perf_session *session;
+
+       session = perf_session__new(input_name, O_RDONLY, 0, false,
+                                   &perf_kmem.tool);
        if (session == NULL)
                return -ENOMEM;
 
+       perf_kmem.session = session;
+
        if (perf_session__create_kernel_maps(session) < 0)
                goto out_delete;
 
@@ -498,7 +511,7 @@ static int __cmd_kmem(void)
                goto out_delete;
 
        setup_pager();
-       err = perf_session__process_events(session, &perf_kmem);
+       err = perf_session__process_events(session, &perf_kmem.tool);
        if (err != 0)
                goto out_delete;
        sort_result();
index fd53319..b3c4285 100644 (file)
@@ -724,8 +724,8 @@ process_raw_event(void *data, int cpu, u64 timestamp, struct thread *thread)
        struct event_format *event;
        int type;
 
-       type = trace_parse_common_type(data);
-       event = trace_find_event(type);
+       type = trace_parse_common_type(session->pevent, data);
+       event = pevent_find_event(session->pevent, type);
 
        if (!strcmp(event->name, "lock_acquire"))
                process_lock_acquire_event(data, event, cpu, timestamp, thread);
index f95840d..f5a6452 100644 (file)
@@ -265,7 +265,7 @@ try_again:
 
                        if (err == ENOENT) {
                                ui__error("The %s event is not supported.\n",
-                                           event_name(pos));
+                                         perf_evsel__name(pos));
                                exit(EXIT_FAILURE);
                        }
 
@@ -916,7 +916,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
                usage_with_options(record_usage, record_options);
 
        list_for_each_entry(pos, &evsel_list->entries, node) {
-               if (perf_header__push_event(pos->attr.config, event_name(pos)))
+               if (perf_header__push_event(pos->attr.config, perf_evsel__name(pos)))
                        goto out_free_fd;
        }
 
index 25249f7..69b1c11 100644 (file)
@@ -69,7 +69,7 @@ static int perf_report__add_branch_hist_entry(struct perf_tool *tool,
 
        if ((sort__has_parent || symbol_conf.use_callchain)
            && sample->callchain) {
-               err = machine__resolve_callchain(machine, evsel, al->thread,
+               err = machine__resolve_callchain(machine, al->thread,
                                                 sample->callchain, &parent);
                if (err)
                        return err;
@@ -140,7 +140,7 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel,
        struct hist_entry *he;
 
        if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) {
-               err = machine__resolve_callchain(machine, evsel, al->thread,
+               err = machine__resolve_callchain(machine, al->thread,
                                                 sample->callchain, &parent);
                if (err)
                        return err;
@@ -230,7 +230,7 @@ static int process_read_event(struct perf_tool *tool,
        struct perf_report *rep = container_of(tool, struct perf_report, tool);
 
        if (rep->show_threads) {
-               const char *name = evsel ? event_name(evsel) : "unknown";
+               const char *name = evsel ? perf_evsel__name(evsel) : "unknown";
                perf_read_values_add_value(&rep->show_threads_values,
                                           event->read.pid, event->read.tid,
                                           event->read.id,
@@ -239,17 +239,18 @@ static int process_read_event(struct perf_tool *tool,
        }
 
        dump_printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid,
-                   evsel ? event_name(evsel) : "FAIL",
+                   evsel ? perf_evsel__name(evsel) : "FAIL",
                    event->read.value);
 
        return 0;
 }
 
+/* For pipe mode, sample_type is not currently set */
 static int perf_report__setup_sample_type(struct perf_report *rep)
 {
        struct perf_session *self = rep->session;
 
-       if (!(self->sample_type & PERF_SAMPLE_CALLCHAIN)) {
+       if (!self->fd_pipe && !(self->sample_type & PERF_SAMPLE_CALLCHAIN)) {
                if (sort__has_parent) {
                        ui__error("Selected --sort parent, but no "
                                    "callchain data. Did you call "
@@ -272,7 +273,8 @@ static int perf_report__setup_sample_type(struct perf_report *rep)
        }
 
        if (sort__branch_mode == 1) {
-               if (!(self->sample_type & PERF_SAMPLE_BRANCH_STACK)) {
+               if (!self->fd_pipe &&
+                   !(self->sample_type & PERF_SAMPLE_BRANCH_STACK)) {
                        ui__error("Selected -b but no branch data. "
                                  "Did you call perf record without -b?\n");
                        return -1;
@@ -314,7 +316,7 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
 
        list_for_each_entry(pos, &evlist->entries, node) {
                struct hists *hists = &pos->hists;
-               const char *evname = event_name(pos);
+               const char *evname = perf_evsel__name(pos);
 
                hists__fprintf_nr_sample_events(hists, evname, stdout);
                hists__fprintf(hists, NULL, false, true, 0, 0, stdout);
index b125e07..7a9ad2b 100644 (file)
@@ -43,6 +43,11 @@ static u64                   sleep_measurement_overhead;
 
 static unsigned long           nr_tasks;
 
+struct perf_sched {
+       struct perf_tool    tool;
+       struct perf_session *session;
+};
+
 struct sched_atom;
 
 struct task_desc {
@@ -1597,11 +1602,13 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool,
                                                 struct perf_evsel *evsel,
                                                 struct machine *machine)
 {
+       struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
+       struct pevent *pevent = sched->session->pevent;
        struct thread *thread = machine__findnew_thread(machine, sample->pid);
 
        if (thread == NULL) {
                pr_debug("problem processing %s event, skipping it.\n",
-                        evsel->name);
+                        perf_evsel__name(evsel));
                return -1;
        }
 
@@ -1612,7 +1619,8 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool,
                tracepoint_handler f = evsel->handler.func;
 
                if (evsel->handler.data == NULL)
-                       evsel->handler.data = trace_find_event(evsel->attr.config);
+                       evsel->handler.data = pevent_find_event(pevent,
+                                                         evsel->attr.config);
 
                f(tool, evsel->handler.data, sample, machine, thread);
        }
@@ -1620,12 +1628,14 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool,
        return 0;
 }
 
-static struct perf_tool perf_sched = {
-       .sample                 = perf_sched__process_tracepoint_sample,
-       .comm                   = perf_event__process_comm,
-       .lost                   = perf_event__process_lost,
-       .fork                   = perf_event__process_task,
-       .ordered_samples        = true,
+static struct perf_sched perf_sched = {
+       .tool = {
+               .sample          = perf_sched__process_tracepoint_sample,
+               .comm            = perf_event__process_comm,
+               .lost            = perf_event__process_lost,
+               .fork            = perf_event__process_task,
+               .ordered_samples = true,
+       },
 };
 
 static void read_events(bool destroy, struct perf_session **psession)
@@ -1640,16 +1650,20 @@ static void read_events(bool destroy, struct perf_session **psession)
                { "sched:sched_process_exit", process_sched_exit_event, },
                { "sched:sched_migrate_task", process_sched_migrate_task_event, },
        };
-       struct perf_session *session = perf_session__new(input_name, O_RDONLY,
-                                                        0, false, &perf_sched);
+       struct perf_session *session;
+
+       session = perf_session__new(input_name, O_RDONLY, 0, false,
+                                   &perf_sched.tool);
        if (session == NULL)
                die("No Memory");
 
-       err = perf_evlist__set_tracepoints_handlers_array(session->evlist, handlers);
+       perf_sched.session = session;
+
+       err = perf_session__set_tracepoints_handlers(session, handlers);
        assert(err == 0);
 
        if (perf_session__has_traces(session, "record -R")) {
-               err = perf_session__process_events(session, &perf_sched);
+               err = perf_session__process_events(session, &perf_sched.tool);
                if (err)
                        die("Failed to process events, error %d", err);
 
index 8e395a5..1e60ab7 100644 (file)
@@ -28,6 +28,11 @@ static bool                  system_wide;
 static const char              *cpu_list;
 static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
 
+struct perf_script {
+       struct perf_tool    tool;
+       struct perf_session *session;
+};
+
 enum perf_output_field {
        PERF_OUTPUT_COMM            = 1U << 0,
        PERF_OUTPUT_TID             = 1U << 1,
@@ -137,10 +142,11 @@ static const char *output_field2str(enum perf_output_field field)
 
 #define PRINT_FIELD(x)  (output[attr->type].fields & PERF_OUTPUT_##x)
 
-static int perf_event_attr__check_stype(struct perf_event_attr *attr,
-                                 u64 sample_type, const char *sample_msg,
-                                 enum perf_output_field field)
+static int perf_evsel__check_stype(struct perf_evsel *evsel,
+                                  u64 sample_type, const char *sample_msg,
+                                  enum perf_output_field field)
 {
+       struct perf_event_attr *attr = &evsel->attr;
        int type = attr->type;
        const char *evname;
 
@@ -148,7 +154,7 @@ static int perf_event_attr__check_stype(struct perf_event_attr *attr,
                return 0;
 
        if (output[type].user_set) {
-               evname = __event_name(attr->type, attr->config);
+               evname = perf_evsel__name(evsel);
                pr_err("Samples for '%s' event do not have %s attribute set. "
                       "Cannot print '%s' field.\n",
                       evname, sample_msg, output_field2str(field));
@@ -157,7 +163,7 @@ static int perf_event_attr__check_stype(struct perf_event_attr *attr,
 
        /* user did not ask for it explicitly so remove from the default list */
        output[type].fields &= ~field;
-       evname = __event_name(attr->type, attr->config);
+       evname = perf_evsel__name(evsel);
        pr_debug("Samples for '%s' event do not have %s attribute set. "
                 "Skipping '%s' field.\n",
                 evname, sample_msg, output_field2str(field));
@@ -175,8 +181,8 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
                return -EINVAL;
 
        if (PRINT_FIELD(IP)) {
-               if (perf_event_attr__check_stype(attr, PERF_SAMPLE_IP, "IP",
-                                          PERF_OUTPUT_IP))
+               if (perf_evsel__check_stype(evsel, PERF_SAMPLE_IP, "IP",
+                                           PERF_OUTPUT_IP))
                        return -EINVAL;
 
                if (!no_callchain &&
@@ -185,8 +191,8 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
        }
 
        if (PRINT_FIELD(ADDR) &&
-               perf_event_attr__check_stype(attr, PERF_SAMPLE_ADDR, "ADDR",
-                                      PERF_OUTPUT_ADDR))
+               perf_evsel__check_stype(evsel, PERF_SAMPLE_ADDR, "ADDR",
+                                       PERF_OUTPUT_ADDR))
                return -EINVAL;
 
        if (PRINT_FIELD(SYM) && !PRINT_FIELD(IP) && !PRINT_FIELD(ADDR)) {
@@ -208,18 +214,18 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
        }
 
        if ((PRINT_FIELD(PID) || PRINT_FIELD(TID)) &&
-               perf_event_attr__check_stype(attr, PERF_SAMPLE_TID, "TID",
-                                      PERF_OUTPUT_TID|PERF_OUTPUT_PID))
+               perf_evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID",
+                                       PERF_OUTPUT_TID|PERF_OUTPUT_PID))
                return -EINVAL;
 
        if (PRINT_FIELD(TIME) &&
-               perf_event_attr__check_stype(attr, PERF_SAMPLE_TIME, "TIME",
-                                      PERF_OUTPUT_TIME))
+               perf_evsel__check_stype(evsel, PERF_SAMPLE_TIME, "TIME",
+                                       PERF_OUTPUT_TIME))
                return -EINVAL;
 
        if (PRINT_FIELD(CPU) &&
-               perf_event_attr__check_stype(attr, PERF_SAMPLE_CPU, "CPU",
-                                      PERF_OUTPUT_CPU))
+               perf_evsel__check_stype(evsel, PERF_SAMPLE_CPU, "CPU",
+                                       PERF_OUTPUT_CPU))
                return -EINVAL;
 
        return 0;
@@ -256,11 +262,13 @@ static int perf_session__check_output_opt(struct perf_session *session)
        return 0;
 }
 
-static void print_sample_start(struct perf_sample *sample,
+static void print_sample_start(struct pevent *pevent,
+                              struct perf_sample *sample,
                               struct thread *thread,
-                              struct perf_event_attr *attr)
+                              struct perf_evsel *evsel)
 {
        int type;
+       struct perf_event_attr *attr = &evsel->attr;
        struct event_format *event;
        const char *evname = NULL;
        unsigned long secs;
@@ -300,12 +308,18 @@ static void print_sample_start(struct perf_sample *sample,
 
        if (PRINT_FIELD(EVNAME)) {
                if (attr->type == PERF_TYPE_TRACEPOINT) {
-                       type = trace_parse_common_type(sample->raw_data);
-                       event = trace_find_event(type);
+                       /*
+                        * XXX Do we really need this here?
+                        * perf_evlist__set_tracepoint_names should have done
+                        * this already
+                        */
+                       type = trace_parse_common_type(pevent,
+                                                      sample->raw_data);
+                       event = pevent_find_event(pevent, type);
                        if (event)
                                evname = event->name;
                } else
-                       evname = __event_name(attr->type, attr->config);
+                       evname = perf_evsel__name(evsel);
 
                printf("%s: ", evname ? evname : "[unknown]");
        }
@@ -387,7 +401,7 @@ static void print_sample_bts(union perf_event *event,
                        printf(" ");
                else
                        printf("\n");
-               perf_event__print_ip(event, sample, machine, evsel,
+               perf_event__print_ip(event, sample, machine,
                                     PRINT_FIELD(SYM), PRINT_FIELD(DSO),
                                     PRINT_FIELD(SYMOFFSET));
        }
@@ -402,6 +416,7 @@ static void print_sample_bts(union perf_event *event,
 }
 
 static void process_event(union perf_event *event __unused,
+                         struct pevent *pevent,
                          struct perf_sample *sample,
                          struct perf_evsel *evsel,
                          struct machine *machine,
@@ -412,7 +427,7 @@ static void process_event(union perf_event *event __unused,
        if (output[attr->type].fields == 0)
                return;
 
-       print_sample_start(sample, thread, attr);
+       print_sample_start(pevent, sample, thread, evsel);
 
        if (is_bts_event(attr)) {
                print_sample_bts(event, sample, evsel, machine, thread);
@@ -420,7 +435,7 @@ static void process_event(union perf_event *event __unused,
        }
 
        if (PRINT_FIELD(TRACE))
-               print_trace_event(sample->cpu, sample->raw_data,
+               print_trace_event(pevent, sample->cpu, sample->raw_data,
                                  sample->raw_size);
 
        if (PRINT_FIELD(ADDR))
@@ -431,7 +446,7 @@ static void process_event(union perf_event *event __unused,
                        printf(" ");
                else
                        printf("\n");
-               perf_event__print_ip(event, sample, machine, evsel,
+               perf_event__print_ip(event, sample, machine,
                                     PRINT_FIELD(SYM), PRINT_FIELD(DSO),
                                     PRINT_FIELD(SYMOFFSET));
        }
@@ -451,7 +466,8 @@ static int default_stop_script(void)
        return 0;
 }
 
-static int default_generate_script(const char *outfile __unused)
+static int default_generate_script(struct pevent *pevent __unused,
+                                  const char *outfile __unused)
 {
        return 0;
 }
@@ -489,6 +505,7 @@ static int process_sample_event(struct perf_tool *tool __used,
                                struct machine *machine)
 {
        struct addr_location al;
+       struct perf_script *scr = container_of(tool, struct perf_script, tool);
        struct thread *thread = machine__findnew_thread(machine, event->ip.tid);
 
        if (thread == NULL) {
@@ -520,24 +537,27 @@ static int process_sample_event(struct perf_tool *tool __used,
        if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
                return 0;
 
-       scripting_ops->process_event(event, sample, evsel, machine, thread);
+       scripting_ops->process_event(event, scr->session->pevent,
+                                    sample, evsel, machine, thread);
 
        evsel->hists.stats.total_period += sample->period;
        return 0;
 }
 
-static struct perf_tool perf_script = {
-       .sample          = process_sample_event,
-       .mmap            = perf_event__process_mmap,
-       .comm            = perf_event__process_comm,
-       .exit            = perf_event__process_task,
-       .fork            = perf_event__process_task,
-       .attr            = perf_event__process_attr,
-       .event_type      = perf_event__process_event_type,
-       .tracing_data    = perf_event__process_tracing_data,
-       .build_id        = perf_event__process_build_id,
-       .ordered_samples = true,
-       .ordering_requires_timestamps = true,
+static struct perf_script perf_script = {
+       .tool = {
+               .sample          = process_sample_event,
+               .mmap            = perf_event__process_mmap,
+               .comm            = perf_event__process_comm,
+               .exit            = perf_event__process_task,
+               .fork            = perf_event__process_task,
+               .attr            = perf_event__process_attr,
+               .event_type      = perf_event__process_event_type,
+               .tracing_data    = perf_event__process_tracing_data,
+               .build_id        = perf_event__process_build_id,
+               .ordered_samples = true,
+               .ordering_requires_timestamps = true,
+       },
 };
 
 extern volatile int session_done;
@@ -553,7 +573,7 @@ static int __cmd_script(struct perf_session *session)
 
        signal(SIGINT, sig_handler);
 
-       ret = perf_session__process_events(session, &perf_script);
+       ret = perf_session__process_events(session, &perf_script.tool);
 
        if (debug_mode)
                pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered);
@@ -1335,10 +1355,13 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
        if (!script_name)
                setup_pager();
 
-       session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_script);
+       session = perf_session__new(input_name, O_RDONLY, 0, false,
+                                   &perf_script.tool);
        if (session == NULL)
                return -ENOMEM;
 
+       perf_script.session = session;
+
        if (cpu_list) {
                if (perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap))
                        return -1;
@@ -1384,7 +1407,8 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
                        return -1;
                }
 
-               err = scripting_ops->generate_script("perf-script");
+               err = scripting_ops->generate_script(session->pevent,
+                                                    "perf-script");
                goto out;
        }
 
index 2625899..861f0ae 100644 (file)
@@ -391,7 +391,7 @@ static int read_counter_aggr(struct perf_evsel *counter)
 
        if (verbose) {
                fprintf(output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
-                       event_name(counter), count[0], count[1], count[2]);
+                       perf_evsel__name(counter), count[0], count[1], count[2]);
        }
 
        /*
@@ -496,7 +496,7 @@ static int run_perf_stat(int argc __used, const char **argv)
                            errno == ENXIO) {
                                if (verbose)
                                        ui__warning("%s event is not supported by the kernel.\n",
-                                                   event_name(counter));
+                                                   perf_evsel__name(counter));
                                counter->supported = false;
                                continue;
                        }
@@ -594,7 +594,7 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg)
                        csv_output ? 0 : -4,
                        evsel_list->cpus->map[cpu], csv_sep);
 
-       fprintf(output, fmt, cpustr, msecs, csv_sep, event_name(evsel));
+       fprintf(output, fmt, cpustr, msecs, csv_sep, perf_evsel__name(evsel));
 
        if (evsel->cgrp)
                fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
@@ -792,7 +792,7 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
        else
                cpu = 0;
 
-       fprintf(output, fmt, cpustr, avg, csv_sep, event_name(evsel));
+       fprintf(output, fmt, cpustr, avg, csv_sep, perf_evsel__name(evsel));
 
        if (evsel->cgrp)
                fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
@@ -908,7 +908,7 @@ static void print_counter_aggr(struct perf_evsel *counter)
                        counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
                        csv_sep,
                        csv_output ? 0 : -24,
-                       event_name(counter));
+                       perf_evsel__name(counter));
 
                if (counter->cgrp)
                        fprintf(output, "%s%s", csv_sep, counter->cgrp->name);
@@ -961,7 +961,7 @@ static void print_counter(struct perf_evsel *counter)
                                counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
                                csv_sep,
                                csv_output ? 0 : -24,
-                               event_name(counter));
+                               perf_evsel__name(counter));
 
                        if (counter->cgrp)
                                fprintf(output, "%s%s",
@@ -1179,6 +1179,12 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
                fprintf(stderr, "cannot use both --output and --log-fd\n");
                usage_with_options(stat_usage, options);
        }
+
+       if (output_fd < 0) {
+               fprintf(stderr, "argument to --log-fd must be a > 0\n");
+               usage_with_options(stat_usage, options);
+       }
+
        if (!output) {
                struct timespec tm;
                mode = append_file ? "a" : "w";
@@ -1190,7 +1196,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
                }
                clock_gettime(CLOCK_REALTIME, &tm);
                fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
-       } else if (output_fd != 2) {
+       } else if (output_fd > 0) {
                mode = append_file ? "a" : "w";
                output = fdopen(output_fd, mode);
                if (!output) {
index 5a8727c..5ce3030 100644 (file)
@@ -583,7 +583,7 @@ static int test__basic_mmap(void)
                if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
                        pr_debug("expected %d %s events, got %d\n",
                                 expected_nr_events[evsel->idx],
-                                event_name(evsel), nr_events[evsel->idx]);
+                                perf_evsel__name(evsel), nr_events[evsel->idx]);
                        goto out_munmap;
                }
        }
index 6bb0277..e3cab5f 100644 (file)
@@ -245,7 +245,7 @@ static void perf_top__show_details(struct perf_top *top)
        if (notes->src == NULL)
                goto out_unlock;
 
-       printf("Showing %s for %s\n", event_name(top->sym_evsel), symbol->name);
+       printf("Showing %s for %s\n", perf_evsel__name(top->sym_evsel), symbol->name);
        printf("  Events  Pcnt (>=%d%%)\n", top->sym_pcnt_filter);
 
        more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel->idx,
@@ -408,7 +408,7 @@ static void perf_top__print_mapped_keys(struct perf_top *top)
        fprintf(stdout, "\t[e]     display entries (lines).           \t(%d)\n", top->print_entries);
 
        if (top->evlist->nr_entries > 1)
-               fprintf(stdout, "\t[E]     active event counter.              \t(%s)\n", event_name(top->sym_evsel));
+               fprintf(stdout, "\t[E]     active event counter.              \t(%s)\n", perf_evsel__name(top->sym_evsel));
 
        fprintf(stdout, "\t[f]     profile display filter (count).    \t(%d)\n", top->count_filter);
 
@@ -503,13 +503,13 @@ static void perf_top__handle_keypress(struct perf_top *top, int c)
                                fprintf(stderr, "\nAvailable events:");
 
                                list_for_each_entry(top->sym_evsel, &top->evlist->entries, node)
-                                       fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, event_name(top->sym_evsel));
+                                       fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, perf_evsel__name(top->sym_evsel));
 
                                prompt_integer(&counter, "Enter details event counter");
 
                                if (counter >= top->evlist->nr_entries) {
                                        top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node);
-                                       fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(top->sym_evsel));
+                                       fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel));
                                        sleep(1);
                                        break;
                                }
@@ -774,7 +774,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
 
                if ((sort__has_parent || symbol_conf.use_callchain) &&
                    sample->callchain) {
-                       err = machine__resolve_callchain(machine, evsel, al.thread,
+                       err = machine__resolve_callchain(machine, al.thread,
                                                         sample->callchain, &parent);
                        if (err)
                                return;
@@ -960,7 +960,7 @@ try_again:
 
                        if (err == ENOENT) {
                                ui__error("The %s event is not supported.\n",
-                                           event_name(counter));
+                                         perf_evsel__name(counter));
                                goto out_err;
                        } else if (err == EMFILE) {
                                ui__error("Too many events are opened.\n"
index d9084e0..6c18785 100644 (file)
@@ -78,6 +78,19 @@ int main(int argc, char *argv[])
         return 0;
 }
 endef
+
+define SOURCE_GTK2_INFOBAR
+#pragma GCC diagnostic ignored \"-Wstrict-prototypes\"
+#include <gtk/gtk.h>
+#pragma GCC diagnostic error \"-Wstrict-prototypes\"
+
+int main(void)
+{
+       gtk_info_bar_new();
+
+       return 0;
+}
+endef
 endif
 
 ifndef NO_LIBPERL
index 34b1c46..67a2703 100644 (file)
@@ -814,7 +814,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
 {
        struct disasm_line *pos, *n;
        struct annotation *notes;
-       const size_t size = symbol__size(sym);
+       size_t size;
        struct map_symbol ms = {
                .map = map,
                .sym = sym,
@@ -834,6 +834,8 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
        if (sym == NULL)
                return -1;
 
+       size = symbol__size(sym);
+
        if (map->dso->annotate_warned)
                return -1;
 
index 53f6697..482f051 100644 (file)
@@ -23,6 +23,7 @@ struct hist_browser {
        struct hists        *hists;
        struct hist_entry   *he_selection;
        struct map_symbol   *selection;
+       int                  print_seq;
        bool                 has_symbols;
 };
 
@@ -800,6 +801,196 @@ do_offset:
        }
 }
 
+static int hist_browser__fprintf_callchain_node_rb_tree(struct hist_browser *browser,
+                                                       struct callchain_node *chain_node,
+                                                       u64 total, int level,
+                                                       FILE *fp)
+{
+       struct rb_node *node;
+       int offset = level * LEVEL_OFFSET_STEP;
+       u64 new_total, remaining;
+       int printed = 0;
+
+       if (callchain_param.mode == CHAIN_GRAPH_REL)
+               new_total = chain_node->children_hit;
+       else
+               new_total = total;
+
+       remaining = new_total;
+       node = rb_first(&chain_node->rb_root);
+       while (node) {
+               struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
+               struct rb_node *next = rb_next(node);
+               u64 cumul = callchain_cumul_hits(child);
+               struct callchain_list *chain;
+               char folded_sign = ' ';
+               int first = true;
+               int extra_offset = 0;
+
+               remaining -= cumul;
+
+               list_for_each_entry(chain, &child->val, list) {
+                       char ipstr[BITS_PER_LONG / 4 + 1], *alloc_str;
+                       const char *str;
+                       bool was_first = first;
+
+                       if (first)
+                               first = false;
+                       else
+                               extra_offset = LEVEL_OFFSET_STEP;
+
+                       folded_sign = callchain_list__folded(chain);
+
+                       alloc_str = NULL;
+                       str = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
+                       if (was_first) {
+                               double percent = cumul * 100.0 / new_total;
+
+                               if (asprintf(&alloc_str, "%2.2f%% %s", percent, str) < 0)
+                                       str = "Not enough memory!";
+                               else
+                                       str = alloc_str;
+                       }
+
+                       printed += fprintf(fp, "%*s%c %s\n", offset + extra_offset, " ", folded_sign, str);
+                       free(alloc_str);
+                       if (folded_sign == '+')
+                               break;
+               }
+
+               if (folded_sign == '-') {
+                       const int new_level = level + (extra_offset ? 2 : 1);
+                       printed += hist_browser__fprintf_callchain_node_rb_tree(browser, child, new_total,
+                                                                               new_level, fp);
+               }
+
+               node = next;
+       }
+
+       return printed;
+}
+
+static int hist_browser__fprintf_callchain_node(struct hist_browser *browser,
+                                               struct callchain_node *node,
+                                               int level, FILE *fp)
+{
+       struct callchain_list *chain;
+       int offset = level * LEVEL_OFFSET_STEP;
+       char folded_sign = ' ';
+       int printed = 0;
+
+       list_for_each_entry(chain, &node->val, list) {
+               char ipstr[BITS_PER_LONG / 4 + 1], *s;
+
+               folded_sign = callchain_list__folded(chain);
+               s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
+               printed += fprintf(fp, "%*s%c %s\n", offset, " ", folded_sign, s);
+       }
+
+       if (folded_sign == '-')
+               printed += hist_browser__fprintf_callchain_node_rb_tree(browser, node,
+                                                                       browser->hists->stats.total_period,
+                                                                       level + 1,  fp);
+       return printed;
+}
+
+static int hist_browser__fprintf_callchain(struct hist_browser *browser,
+                                          struct rb_root *chain, int level, FILE *fp)
+{
+       struct rb_node *nd;
+       int printed = 0;
+
+       for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
+               struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
+
+               printed += hist_browser__fprintf_callchain_node(browser, node, level, fp);
+       }
+
+       return printed;
+}
+
+static int hist_browser__fprintf_entry(struct hist_browser *browser,
+                                      struct hist_entry *he, FILE *fp)
+{
+       char s[8192];
+       double percent;
+       int printed = 0;
+       char folded_sign = ' ';
+
+       if (symbol_conf.use_callchain)
+               folded_sign = hist_entry__folded(he);
+
+       hist_entry__snprintf(he, s, sizeof(s), browser->hists);
+       percent = (he->period * 100.0) / browser->hists->stats.total_period;
+
+       if (symbol_conf.use_callchain)
+               printed += fprintf(fp, "%c ", folded_sign);
+
+       printed += fprintf(fp, " %5.2f%%", percent);
+
+       if (symbol_conf.show_nr_samples)
+               printed += fprintf(fp, " %11u", he->nr_events);
+
+       if (symbol_conf.show_total_period)
+               printed += fprintf(fp, " %12" PRIu64, he->period);
+
+       printed += fprintf(fp, "%s\n", rtrim(s));
+
+       if (folded_sign == '-')
+               printed += hist_browser__fprintf_callchain(browser, &he->sorted_chain, 1, fp);
+
+       return printed;
+}
+
+static int hist_browser__fprintf(struct hist_browser *browser, FILE *fp)
+{
+       struct rb_node *nd = hists__filter_entries(rb_first(browser->b.entries));
+       int printed = 0;
+
+       while (nd) {
+               struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+               printed += hist_browser__fprintf_entry(browser, h, fp);
+               nd = hists__filter_entries(rb_next(nd));
+       }
+
+       return printed;
+}
+
+static int hist_browser__dump(struct hist_browser *browser)
+{
+       char filename[64];
+       FILE *fp;
+
+       while (1) {
+               scnprintf(filename, sizeof(filename), "perf.hist.%d", browser->print_seq);
+               if (access(filename, F_OK))
+                       break;
+               /*
+                * XXX: Just an arbitrary lazy upper limit
+                */
+               if (++browser->print_seq == 8192) {
+                       ui_helpline__fpush("Too many perf.hist.N files, nothing written!");
+                       return -1;
+               }
+       }
+
+       fp = fopen(filename, "w");
+       if (fp == NULL) {
+               char bf[64];
+               strerror_r(errno, bf, sizeof(bf));
+               ui_helpline__fpush("Couldn't write to %s: %s", filename, bf);
+               return -1;
+       }
+
+       ++browser->print_seq;
+       hist_browser__fprintf(browser, fp);
+       fclose(fp);
+       ui_helpline__fpush("%s written!", filename);
+
+       return 0;
+}
+
 static struct hist_browser *hist_browser__new(struct hists *hists)
 {
        struct hist_browser *browser = zalloc(sizeof(*browser));
@@ -937,6 +1128,9 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                            browser->selection->map->dso->annotate_warned)
                                continue;
                        goto do_annotate;
+               case 'P':
+                       hist_browser__dump(browser);
+                       continue;
                case 'd':
                        goto zoom_dso;
                case 't':
@@ -969,6 +1163,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                                        "E             Expand all callchains\n"
                                        "d             Zoom into current DSO\n"
                                        "t             Zoom into current Thread\n"
+                                       "P             Print histograms to perf.hist.N\n"
                                        "/             Filter symbol by name");
                        continue;
                case K_ENTER:
@@ -1172,7 +1367,7 @@ static void perf_evsel_menu__write(struct ui_browser *browser,
        struct perf_evsel *evsel = list_entry(entry, struct perf_evsel, node);
        bool current_entry = ui_browser__is_current_entry(browser, row);
        unsigned long nr_events = evsel->hists.stats.nr_events[PERF_RECORD_SAMPLE];
-       const char *ev_name = event_name(evsel);
+       const char *ev_name = perf_evsel__name(evsel);
        char bf[256], unit;
        const char *warn = " ";
        size_t printed;
@@ -1240,7 +1435,7 @@ browse_hists:
                         */
                        if (timer)
                                timer(arg);
-                       ev_name = event_name(pos);
+                       ev_name = perf_evsel__name(pos);
                        key = perf_evsel__hists_browse(pos, nr_events, help,
                                                       ev_name, true, timer,
                                                       arg, delay_secs);
@@ -1309,17 +1504,11 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
        ui_helpline__push("Press ESC to exit");
 
        list_for_each_entry(pos, &evlist->entries, node) {
-               const char *ev_name = event_name(pos);
+               const char *ev_name = perf_evsel__name(pos);
                size_t line_len = strlen(ev_name) + 7;
 
                if (menu.b.width < line_len)
                        menu.b.width = line_len;
-               /*
-                * Cache the evsel name, tracepoints have a _high_ cost per
-                * event_name() call.
-                */
-               if (pos->name == NULL)
-                       pos->name = strdup(ev_name);
        }
 
        return perf_evsel_menu__run(&menu, evlist->nr_entries, help, timer,
@@ -1330,11 +1519,10 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
                                  void(*timer)(void *arg), void *arg,
                                  int delay_secs)
 {
-
        if (evlist->nr_entries == 1) {
                struct perf_evsel *first = list_entry(evlist->entries.next,
                                                      struct perf_evsel, node);
-               const char *ev_name = event_name(first);
+               const char *ev_name = perf_evsel__name(first);
                return perf_evsel__hists_browse(first, evlist->nr_entries, help,
                                                ev_name, false, timer, arg,
                                                delay_secs);
index 0656c38..ec12e0b 100644 (file)
@@ -11,8 +11,8 @@
 
 static void perf_gtk__signal(int sig)
 {
+       perf_gtk__exit(false);
        psignal(sig, "perf");
-       gtk_main_quit();
 }
 
 static void perf_gtk__resize_window(GtkWidget *window)
@@ -122,13 +122,59 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists)
        gtk_container_add(GTK_CONTAINER(window), view);
 }
 
+#ifdef HAVE_GTK_INFO_BAR
+static GtkWidget *perf_gtk__setup_info_bar(void)
+{
+       GtkWidget *info_bar;
+       GtkWidget *label;
+       GtkWidget *content_area;
+
+       info_bar = gtk_info_bar_new();
+       gtk_widget_set_no_show_all(info_bar, TRUE);
+
+       label = gtk_label_new("");
+       gtk_widget_show(label);
+
+       content_area = gtk_info_bar_get_content_area(GTK_INFO_BAR(info_bar));
+       gtk_container_add(GTK_CONTAINER(content_area), label);
+
+       gtk_info_bar_add_button(GTK_INFO_BAR(info_bar), GTK_STOCK_OK,
+                               GTK_RESPONSE_OK);
+       g_signal_connect(info_bar, "response",
+                        G_CALLBACK(gtk_widget_hide), NULL);
+
+       pgctx->info_bar = info_bar;
+       pgctx->message_label = label;
+
+       return info_bar;
+}
+#endif
+
+static GtkWidget *perf_gtk__setup_statusbar(void)
+{
+       GtkWidget *stbar;
+       unsigned ctxid;
+
+       stbar = gtk_statusbar_new();
+
+       ctxid = gtk_statusbar_get_context_id(GTK_STATUSBAR(stbar),
+                                            "perf report");
+       pgctx->statbar = stbar;
+       pgctx->statbar_ctx_id = ctxid;
+
+       return stbar;
+}
+
 int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
                                  const char *help __used,
                                  void (*timer) (void *arg)__used,
                                  void *arg __used, int delay_secs __used)
 {
        struct perf_evsel *pos;
+       GtkWidget *vbox;
        GtkWidget *notebook;
+       GtkWidget *info_bar;
+       GtkWidget *statbar;
        GtkWidget *window;
 
        signal(SIGSEGV, perf_gtk__signal);
@@ -143,11 +189,17 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
 
        g_signal_connect(window, "delete_event", gtk_main_quit, NULL);
 
+       pgctx = perf_gtk__activate_context(window);
+       if (!pgctx)
+               return -1;
+
+       vbox = gtk_vbox_new(FALSE, 0);
+
        notebook = gtk_notebook_new();
 
        list_for_each_entry(pos, &evlist->entries, node) {
                struct hists *hists = &pos->hists;
-               const char *evname = event_name(pos);
+               const char *evname = perf_evsel__name(pos);
                GtkWidget *scrolled_window;
                GtkWidget *tab_label;
 
@@ -164,7 +216,16 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
                gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, tab_label);
        }
 
-       gtk_container_add(GTK_CONTAINER(window), notebook);
+       gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0);
+
+       info_bar = perf_gtk__setup_info_bar();
+       if (info_bar)
+               gtk_box_pack_start(GTK_BOX(vbox), info_bar, FALSE, FALSE, 0);
+
+       statbar = perf_gtk__setup_statusbar();
+       gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0);
+
+       gtk_container_add(GTK_CONTAINER(window), vbox);
 
        gtk_widget_show_all(window);
 
@@ -174,5 +235,7 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
 
        gtk_main();
 
+       perf_gtk__deactivate_context(&pgctx);
+
        return 0;
 }
index 75177ee..a4d0f2b 100644 (file)
@@ -1,8 +1,39 @@
 #ifndef _PERF_GTK_H_
 #define _PERF_GTK_H_ 1
 
+#include <stdbool.h>
+
 #pragma GCC diagnostic ignored "-Wstrict-prototypes"
 #include <gtk/gtk.h>
 #pragma GCC diagnostic error "-Wstrict-prototypes"
 
+
+struct perf_gtk_context {
+       GtkWidget *main_window;
+
+#ifdef HAVE_GTK_INFO_BAR
+       GtkWidget *info_bar;
+       GtkWidget *message_label;
+#endif
+       GtkWidget *statbar;
+       guint statbar_ctx_id;
+};
+
+extern struct perf_gtk_context *pgctx;
+
+static inline bool perf_gtk__is_active_context(struct perf_gtk_context *ctx)
+{
+       return ctx && ctx->main_window;
+}
+
+struct perf_gtk_context *perf_gtk__activate_context(GtkWidget *window);
+int perf_gtk__deactivate_context(struct perf_gtk_context **ctx);
+
+#ifndef HAVE_GTK_INFO_BAR
+static inline GtkWidget *perf_gtk__setup_info_bar(void)
+{
+       return NULL;
+}
+#endif
+
 #endif /* _PERF_GTK_H_ */
index 8295299..92879ce 100644 (file)
@@ -1,12 +1,17 @@
 #include "gtk.h"
 #include "../../util/cache.h"
+#include "../../util/debug.h"
+
+extern struct perf_error_ops perf_gtk_eops;
 
 int perf_gtk__init(void)
 {
+       perf_error__register(&perf_gtk_eops);
        return gtk_init_check(NULL, NULL) ? 0 : -1;
 }
 
 void perf_gtk__exit(bool wait_for_ok __used)
 {
+       perf_error__unregister(&perf_gtk_eops);
        gtk_main_quit();
 }
diff --git a/tools/perf/ui/gtk/util.c b/tools/perf/ui/gtk/util.c
new file mode 100644 (file)
index 0000000..0ead373
--- /dev/null
@@ -0,0 +1,129 @@
+#include "../util.h"
+#include "../../util/debug.h"
+#include "gtk.h"
+
+#include <string.h>
+
+
+struct perf_gtk_context *pgctx;
+
+struct perf_gtk_context *perf_gtk__activate_context(GtkWidget *window)
+{
+       struct perf_gtk_context *ctx;
+
+       ctx = malloc(sizeof(*pgctx));
+       if (ctx)
+               ctx->main_window = window;
+
+       return ctx;
+}
+
+int perf_gtk__deactivate_context(struct perf_gtk_context **ctx)
+{
+       if (!perf_gtk__is_active_context(*ctx))
+               return -1;
+
+       free(*ctx);
+       *ctx = NULL;
+       return 0;
+}
+
+static int perf_gtk__error(const char *format, va_list args)
+{
+       char *msg;
+       GtkWidget *dialog;
+
+       if (!perf_gtk__is_active_context(pgctx) ||
+           vasprintf(&msg, format, args) < 0) {
+               fprintf(stderr, "Error:\n");
+               vfprintf(stderr, format, args);
+               fprintf(stderr, "\n");
+               return -1;
+       }
+
+       dialog = gtk_message_dialog_new_with_markup(GTK_WINDOW(pgctx->main_window),
+                                       GTK_DIALOG_DESTROY_WITH_PARENT,
+                                       GTK_MESSAGE_ERROR,
+                                       GTK_BUTTONS_CLOSE,
+                                       "<b>Error</b>\n\n%s", msg);
+       gtk_dialog_run(GTK_DIALOG(dialog));
+
+       gtk_widget_destroy(dialog);
+       free(msg);
+       return 0;
+}
+
+#ifdef HAVE_GTK_INFO_BAR
+static int perf_gtk__warning_info_bar(const char *format, va_list args)
+{
+       char *msg;
+
+       if (!perf_gtk__is_active_context(pgctx) ||
+           vasprintf(&msg, format, args) < 0) {
+               fprintf(stderr, "Warning:\n");
+               vfprintf(stderr, format, args);
+               fprintf(stderr, "\n");
+               return -1;
+       }
+
+       gtk_label_set_text(GTK_LABEL(pgctx->message_label), msg);
+       gtk_info_bar_set_message_type(GTK_INFO_BAR(pgctx->info_bar),
+                                     GTK_MESSAGE_WARNING);
+       gtk_widget_show(pgctx->info_bar);
+
+       free(msg);
+       return 0;
+}
+#else
+static int perf_gtk__warning_statusbar(const char *format, va_list args)
+{
+       char *msg, *p;
+
+       if (!perf_gtk__is_active_context(pgctx) ||
+           vasprintf(&msg, format, args) < 0) {
+               fprintf(stderr, "Warning:\n");
+               vfprintf(stderr, format, args);
+               fprintf(stderr, "\n");
+               return -1;
+       }
+
+       gtk_statusbar_pop(GTK_STATUSBAR(pgctx->statbar),
+                         pgctx->statbar_ctx_id);
+
+       /* Only first line can be displayed */
+       p = strchr(msg, '\n');
+       if (p)
+               *p = '\0';
+
+       gtk_statusbar_push(GTK_STATUSBAR(pgctx->statbar),
+                          pgctx->statbar_ctx_id, msg);
+
+       free(msg);
+       return 0;
+}
+#endif
+
+struct perf_error_ops perf_gtk_eops = {
+       .error          = perf_gtk__error,
+#ifdef HAVE_GTK_INFO_BAR
+       .warning        = perf_gtk__warning_info_bar,
+#else
+       .warning        = perf_gtk__warning_statusbar,
+#endif
+};
+
+/*
+ * FIXME: Functions below should be implemented properly.
+ *        For now, just add stubs for NO_NEWT=1 build.
+ */
+#ifdef NO_NEWT_SUPPORT
+int ui_helpline__show_help(const char *format __used, va_list ap __used)
+{
+       return 0;
+}
+
+void ui_progress__update(u64 curr __used, u64 total __used,
+                        const char *title __used)
+{
+}
+#endif
index d33e943..e813c1d 100644 (file)
@@ -15,6 +15,8 @@ pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER;
 
 static volatile int ui__need_resize;
 
+extern struct perf_error_ops perf_tui_eops;
+
 void ui__refresh_dimensions(bool force)
 {
        if (force || ui__need_resize) {
@@ -122,6 +124,8 @@ int ui__init(void)
        signal(SIGINT, ui__signal);
        signal(SIGQUIT, ui__signal);
        signal(SIGTERM, ui__signal);
+
+       perf_error__register(&perf_tui_eops);
 out:
        return err;
 }
@@ -137,4 +141,6 @@ void ui__exit(bool wait_for_ok)
        SLsmg_refresh();
        SLsmg_reset_smg();
        SLang_reset_tty();
+
+       perf_error__unregister(&perf_tui_eops);
 }
diff --git a/tools/perf/ui/tui/util.c b/tools/perf/ui/tui/util.c
new file mode 100644 (file)
index 0000000..092902e
--- /dev/null
@@ -0,0 +1,243 @@
+#include "../../util/util.h"
+#include <signal.h>
+#include <stdbool.h>
+#include <string.h>
+#include <sys/ttydefaults.h>
+
+#include "../../util/cache.h"
+#include "../../util/debug.h"
+#include "../browser.h"
+#include "../keysyms.h"
+#include "../helpline.h"
+#include "../ui.h"
+#include "../util.h"
+#include "../libslang.h"
+
+static void ui_browser__argv_write(struct ui_browser *browser,
+                                  void *entry, int row)
+{
+       char **arg = entry;
+       bool current_entry = ui_browser__is_current_entry(browser, row);
+
+       ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
+                                                      HE_COLORSET_NORMAL);
+       slsmg_write_nstring(*arg, browser->width);
+}
+
+static int popup_menu__run(struct ui_browser *menu)
+{
+       int key;
+
+       if (ui_browser__show(menu, " ", "ESC: exit, ENTER|->: Select option") < 0)
+               return -1;
+
+       while (1) {
+               key = ui_browser__run(menu, 0);
+
+               switch (key) {
+               case K_RIGHT:
+               case K_ENTER:
+                       key = menu->index;
+                       break;
+               case K_LEFT:
+               case K_ESC:
+               case 'q':
+               case CTRL('c'):
+                       key = -1;
+                       break;
+               default:
+                       continue;
+               }
+
+               break;
+       }
+
+       ui_browser__hide(menu);
+       return key;
+}
+
+int ui__popup_menu(int argc, char * const argv[])
+{
+       struct ui_browser menu = {
+               .entries    = (void *)argv,
+               .refresh    = ui_browser__argv_refresh,
+               .seek       = ui_browser__argv_seek,
+               .write      = ui_browser__argv_write,
+               .nr_entries = argc,
+       };
+
+       return popup_menu__run(&menu);
+}
+
+int ui_browser__input_window(const char *title, const char *text, char *input,
+                            const char *exit_msg, int delay_secs)
+{
+       int x, y, len, key;
+       int max_len = 60, nr_lines = 0;
+       static char buf[50];
+       const char *t;
+
+       t = text;
+       while (1) {
+               const char *sep = strchr(t, '\n');
+
+               if (sep == NULL)
+                       sep = strchr(t, '\0');
+               len = sep - t;
+               if (max_len < len)
+                       max_len = len;
+               ++nr_lines;
+               if (*sep == '\0')
+                       break;
+               t = sep + 1;
+       }
+
+       max_len += 2;
+       nr_lines += 8;
+       y = SLtt_Screen_Rows / 2 - nr_lines / 2;
+       x = SLtt_Screen_Cols / 2 - max_len / 2;
+
+       SLsmg_set_color(0);
+       SLsmg_draw_box(y, x++, nr_lines, max_len);
+       if (title) {
+               SLsmg_gotorc(y, x + 1);
+               SLsmg_write_string((char *)title);
+       }
+       SLsmg_gotorc(++y, x);
+       nr_lines -= 7;
+       max_len -= 2;
+       SLsmg_write_wrapped_string((unsigned char *)text, y, x,
+                                  nr_lines, max_len, 1);
+       y += nr_lines;
+       len = 5;
+       while (len--) {
+               SLsmg_gotorc(y + len - 1, x);
+               SLsmg_write_nstring((char *)" ", max_len);
+       }
+       SLsmg_draw_box(y++, x + 1, 3, max_len - 2);
+
+       SLsmg_gotorc(y + 3, x);
+       SLsmg_write_nstring((char *)exit_msg, max_len);
+       SLsmg_refresh();
+
+       x += 2;
+       len = 0;
+       key = ui__getch(delay_secs);
+       while (key != K_TIMER && key != K_ENTER && key != K_ESC) {
+               if (key == K_BKSPC) {
+                       if (len == 0)
+                               goto next_key;
+                       SLsmg_gotorc(y, x + --len);
+                       SLsmg_write_char(' ');
+               } else {
+                       buf[len] = key;
+                       SLsmg_gotorc(y, x + len++);
+                       SLsmg_write_char(key);
+               }
+               SLsmg_refresh();
+
+               /* XXX more graceful overflow handling needed */
+               if (len == sizeof(buf) - 1) {
+                       ui_helpline__push("maximum size of symbol name reached!");
+                       key = K_ENTER;
+                       break;
+               }
+next_key:
+               key = ui__getch(delay_secs);
+       }
+
+       buf[len] = '\0';
+       strncpy(input, buf, len+1);
+       return key;
+}
+
+int ui__question_window(const char *title, const char *text,
+                       const char *exit_msg, int delay_secs)
+{
+       int x, y;
+       int max_len = 0, nr_lines = 0;
+       const char *t;
+
+       t = text;
+       while (1) {
+               const char *sep = strchr(t, '\n');
+               int len;
+
+               if (sep == NULL)
+                       sep = strchr(t, '\0');
+               len = sep - t;
+               if (max_len < len)
+                       max_len = len;
+               ++nr_lines;
+               if (*sep == '\0')
+                       break;
+               t = sep + 1;
+       }
+
+       max_len += 2;
+       nr_lines += 4;
+       y = SLtt_Screen_Rows / 2 - nr_lines / 2,
+       x = SLtt_Screen_Cols / 2 - max_len / 2;
+
+       SLsmg_set_color(0);
+       SLsmg_draw_box(y, x++, nr_lines, max_len);
+       if (title) {
+               SLsmg_gotorc(y, x + 1);
+               SLsmg_write_string((char *)title);
+       }
+       SLsmg_gotorc(++y, x);
+       nr_lines -= 2;
+       max_len -= 2;
+       SLsmg_write_wrapped_string((unsigned char *)text, y, x,
+                                  nr_lines, max_len, 1);
+       SLsmg_gotorc(y + nr_lines - 2, x);
+       SLsmg_write_nstring((char *)" ", max_len);
+       SLsmg_gotorc(y + nr_lines - 1, x);
+       SLsmg_write_nstring((char *)exit_msg, max_len);
+       SLsmg_refresh();
+       return ui__getch(delay_secs);
+}
+
+int ui__help_window(const char *text)
+{
+       return ui__question_window("Help", text, "Press any key...", 0);
+}
+
+int ui__dialog_yesno(const char *msg)
+{
+       return ui__question_window(NULL, msg, "Enter: Yes, ESC: No", 0);
+}
+
+static int __ui__warning(const char *title, const char *format, va_list args)
+{
+       char *s;
+
+       if (vasprintf(&s, format, args) > 0) {
+               int key;
+
+               pthread_mutex_lock(&ui__lock);
+               key = ui__question_window(title, s, "Press any key...", 0);
+               pthread_mutex_unlock(&ui__lock);
+               free(s);
+               return key;
+       }
+
+       fprintf(stderr, "%s\n", title);
+       vfprintf(stderr, format, args);
+       return K_ESC;
+}
+
+static int perf_tui__error(const char *format, va_list args)
+{
+       return __ui__warning("Error:", format, args);
+}
+
+static int perf_tui__warning(const char *format, va_list args)
+{
+       return __ui__warning("Warning:", format, args);
+}
+
+struct perf_error_ops perf_tui_eops = {
+       .error          = perf_tui__error,
+       .warning        = perf_tui__warning,
+};
index ad4374a..4f98977 100644 (file)
-#include "../util.h"
-#include <signal.h>
-#include <stdbool.h>
-#include <string.h>
-#include <sys/ttydefaults.h>
-
-#include "../cache.h"
-#include "../debug.h"
-#include "browser.h"
-#include "keysyms.h"
-#include "helpline.h"
-#include "ui.h"
 #include "util.h"
-#include "libslang.h"
-
-static void ui_browser__argv_write(struct ui_browser *browser,
-                                  void *entry, int row)
-{
-       char **arg = entry;
-       bool current_entry = ui_browser__is_current_entry(browser, row);
-
-       ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
-                                                      HE_COLORSET_NORMAL);
-       slsmg_write_nstring(*arg, browser->width);
-}
-
-static int popup_menu__run(struct ui_browser *menu)
-{
-       int key;
-
-       if (ui_browser__show(menu, " ", "ESC: exit, ENTER|->: Select option") < 0)
-               return -1;
+#include "../debug.h"
 
-       while (1) {
-               key = ui_browser__run(menu, 0);
-
-               switch (key) {
-               case K_RIGHT:
-               case K_ENTER:
-                       key = menu->index;
-                       break;
-               case K_LEFT:
-               case K_ESC:
-               case 'q':
-               case CTRL('c'):
-                       key = -1;
-                       break;
-               default:
-                       continue;
-               }
-
-               break;
-       }
-
-       ui_browser__hide(menu);
-       return key;
-}
 
-int ui__popup_menu(int argc, char * const argv[])
+/*
+ * Default error logging functions
+ */
+static int perf_stdio__error(const char *format, va_list args)
 {
-       struct ui_browser menu = {
-               .entries    = (void *)argv,
-               .refresh    = ui_browser__argv_refresh,
-               .seek       = ui_browser__argv_seek,
-               .write      = ui_browser__argv_write,
-               .nr_entries = argc,
-       };
-
-       return popup_menu__run(&menu);
+       fprintf(stderr, "Error:\n");
+       vfprintf(stderr, format, args);
+       return 0;
 }
 
-int ui_browser__input_window(const char *title, const char *text, char *input,
-                            const char *exit_msg, int delay_secs)
+static int perf_stdio__warning(const char *format, va_list args)
 {
-       int x, y, len, key;
-       int max_len = 60, nr_lines = 0;
-       static char buf[50];
-       const char *t;
-
-       t = text;
-       while (1) {
-               const char *sep = strchr(t, '\n');
-
-               if (sep == NULL)
-                       sep = strchr(t, '\0');
-               len = sep - t;
-               if (max_len < len)
-                       max_len = len;
-               ++nr_lines;
-               if (*sep == '\0')
-                       break;
-               t = sep + 1;
-       }
-
-       max_len += 2;
-       nr_lines += 8;
-       y = SLtt_Screen_Rows / 2 - nr_lines / 2;
-       x = SLtt_Screen_Cols / 2 - max_len / 2;
-
-       SLsmg_set_color(0);
-       SLsmg_draw_box(y, x++, nr_lines, max_len);
-       if (title) {
-               SLsmg_gotorc(y, x + 1);
-               SLsmg_write_string((char *)title);
-       }
-       SLsmg_gotorc(++y, x);
-       nr_lines -= 7;
-       max_len -= 2;
-       SLsmg_write_wrapped_string((unsigned char *)text, y, x,
-                                  nr_lines, max_len, 1);
-       y += nr_lines;
-       len = 5;
-       while (len--) {
-               SLsmg_gotorc(y + len - 1, x);
-               SLsmg_write_nstring((char *)" ", max_len);
-       }
-       SLsmg_draw_box(y++, x + 1, 3, max_len - 2);
-
-       SLsmg_gotorc(y + 3, x);
-       SLsmg_write_nstring((char *)exit_msg, max_len);
-       SLsmg_refresh();
-
-       x += 2;
-       len = 0;
-       key = ui__getch(delay_secs);
-       while (key != K_TIMER && key != K_ENTER && key != K_ESC) {
-               if (key == K_BKSPC) {
-                       if (len == 0)
-                               goto next_key;
-                       SLsmg_gotorc(y, x + --len);
-                       SLsmg_write_char(' ');
-               } else {
-                       buf[len] = key;
-                       SLsmg_gotorc(y, x + len++);
-                       SLsmg_write_char(key);
-               }
-               SLsmg_refresh();
-
-               /* XXX more graceful overflow handling needed */
-               if (len == sizeof(buf) - 1) {
-                       ui_helpline__push("maximum size of symbol name reached!");
-                       key = K_ENTER;
-                       break;
-               }
-next_key:
-               key = ui__getch(delay_secs);
-       }
-
-       buf[len] = '\0';
-       strncpy(input, buf, len+1);
-       return key;
+       fprintf(stderr, "Warning:\n");
+       vfprintf(stderr, format, args);
+       return 0;
 }
 
-int ui__question_window(const char *title, const char *text,
-                       const char *exit_msg, int delay_secs)
+static struct perf_error_ops default_eops =
 {
-       int x, y;
-       int max_len = 0, nr_lines = 0;
-       const char *t;
-
-       t = text;
-       while (1) {
-               const char *sep = strchr(t, '\n');
-               int len;
-
-               if (sep == NULL)
-                       sep = strchr(t, '\0');
-               len = sep - t;
-               if (max_len < len)
-                       max_len = len;
-               ++nr_lines;
-               if (*sep == '\0')
-                       break;
-               t = sep + 1;
-       }
-
-       max_len += 2;
-       nr_lines += 4;
-       y = SLtt_Screen_Rows / 2 - nr_lines / 2,
-       x = SLtt_Screen_Cols / 2 - max_len / 2;
-
-       SLsmg_set_color(0);
-       SLsmg_draw_box(y, x++, nr_lines, max_len);
-       if (title) {
-               SLsmg_gotorc(y, x + 1);
-               SLsmg_write_string((char *)title);
-       }
-       SLsmg_gotorc(++y, x);
-       nr_lines -= 2;
-       max_len -= 2;
-       SLsmg_write_wrapped_string((unsigned char *)text, y, x,
-                                  nr_lines, max_len, 1);
-       SLsmg_gotorc(y + nr_lines - 2, x);
-       SLsmg_write_nstring((char *)" ", max_len);
-       SLsmg_gotorc(y + nr_lines - 1, x);
-       SLsmg_write_nstring((char *)exit_msg, max_len);
-       SLsmg_refresh();
-       return ui__getch(delay_secs);
-}
+       .error          = perf_stdio__error,
+       .warning        = perf_stdio__warning,
+};
 
-int ui__help_window(const char *text)
-{
-       return ui__question_window("Help", text, "Press any key...", 0);
-}
+static struct perf_error_ops *perf_eops = &default_eops;
 
-int ui__dialog_yesno(const char *msg)
-{
-       return ui__question_window(NULL, msg, "Enter: Yes, ESC: No", 0);
-}
 
-int __ui__warning(const char *title, const char *format, va_list args)
+int ui__error(const char *format, ...)
 {
-       char *s;
-
-       if (use_browser > 0 && vasprintf(&s, format, args) > 0) {
-               int key;
+       int ret;
+       va_list args;
 
-               pthread_mutex_lock(&ui__lock);
-               key = ui__question_window(title, s, "Press any key...", 0);
-               pthread_mutex_unlock(&ui__lock);
-               free(s);
-               return key;
-       }
+       va_start(args, format);
+       ret = perf_eops->error(format, args);
+       va_end(args);
 
-       fprintf(stderr, "%s:\n", title);
-       vfprintf(stderr, format, args);
-       return K_ESC;
+       return ret;
 }
 
 int ui__warning(const char *format, ...)
 {
-       int key;
+       int ret;
        va_list args;
 
        va_start(args, format);
-       key = __ui__warning("Warning", format, args);
+       ret = perf_eops->warning(format, args);
        va_end(args);
-       return key;
+
+       return ret;
 }
 
-int ui__error(const char *format, ...)
+
+/**
+ * perf_error__register - Register error logging functions
+ * @eops: The pointer to error logging function struct
+ *
+ * Register UI-specific error logging functions. Before calling this,
+ * other logging functions should be unregistered, if any.
+ */
+int perf_error__register(struct perf_error_ops *eops)
 {
-       int key;
-       va_list args;
+       if (perf_eops != &default_eops)
+               return -1;
 
-       va_start(args, format);
-       key = __ui__warning("Error", format, args);
-       va_end(args);
-       return key;
+       perf_eops = eops;
+       return 0;
+}
+
+/**
+ * perf_error__unregister - Unregister error logging functions
+ * @eops: The pointer to error logging function struct
+ *
+ * Unregister already registered error logging functions.
+ */
+int perf_error__unregister(struct perf_error_ops *eops)
+{
+       if (perf_eops != eops)
+               return -1;
+
+       perf_eops = &default_eops;
+       return 0;
 }
index 2d1738b..361f08c 100644 (file)
@@ -9,6 +9,13 @@ int ui__help_window(const char *text);
 int ui__dialog_yesno(const char *msg);
 int ui__question_window(const char *title, const char *text,
                        const char *exit_msg, int delay_secs);
-int __ui__warning(const char *title, const char *format, va_list args);
+
+struct perf_error_ops {
+       int (*error)(const char *format, va_list args);
+       int (*warning)(const char *format, va_list args);
+};
+
+int perf_error__register(struct perf_error_ops *eops);
+int perf_error__unregister(struct perf_error_ops *eops);
 
 #endif /* _PERF_UI_UTIL_H_ */
index efb1fce..4dfe0bb 100644 (file)
@@ -47,7 +47,7 @@ int dump_printf(const char *fmt, ...)
        return ret;
 }
 
-#ifdef NO_NEWT_SUPPORT
+#if defined(NO_NEWT_SUPPORT) && defined(NO_GTK2_SUPPORT)
 int ui__warning(const char *format, ...)
 {
        va_list args;
index 6bebe7f..015c91d 100644 (file)
@@ -12,8 +12,9 @@ int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
 void trace_event(union perf_event *event);
 
 struct ui_progress;
+struct perf_error_ops;
 
-#ifdef NO_NEWT_SUPPORT
+#if defined(NO_NEWT_SUPPORT) && defined(NO_GTK2_SUPPORT)
 static inline int ui_helpline__show_help(const char *format __used, va_list ap __used)
 {
        return 0;
@@ -23,12 +24,28 @@ static inline void ui_progress__update(u64 curr __used, u64 total __used,
                                       const char *title __used) {}
 
 #define ui__error(format, arg...) ui__warning(format, ##arg)
-#else
+
+static inline int
+perf_error__register(struct perf_error_ops *eops __used)
+{
+       return 0;
+}
+
+static inline int
+perf_error__unregister(struct perf_error_ops *eops __used)
+{
+       return 0;
+}
+
+#else /* NO_NEWT_SUPPORT && NO_GTK2_SUPPORT */
+
 extern char ui_helpline__last_msg[];
 int ui_helpline__show_help(const char *format, va_list ap);
 #include "../ui/progress.h"
 int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2)));
-#endif
+#include "../ui/util.h"
+
+#endif /* NO_NEWT_SUPPORT && NO_GTK2_SUPPORT */
 
 int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2)));
 int ui__error_paranoid(void);
index 7400fb3..f74e956 100644 (file)
@@ -224,8 +224,8 @@ out_free_attrs:
        return err;
 }
 
-static struct perf_evsel *
-       perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
+struct perf_evsel *
+perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
 {
        struct perf_evsel *evsel;
 
index 989bee9..40d4d3c 100644 (file)
@@ -73,6 +73,9 @@ int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
 #define perf_evlist__set_tracepoints_handlers_array(evlist, array) \
        perf_evlist__set_tracepoints_handlers(evlist, array, ARRAY_SIZE(array))
 
+struct perf_evsel *
+perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id);
+
 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
                         int cpu, int thread, u64 id);
 
index 9f6cebd..e817713 100644 (file)
@@ -15,7 +15,7 @@
 #include "cpumap.h"
 #include "thread_map.h"
 #include "target.h"
-#include "../../include/linux/perf_event.h"
+#include "../../../include/linux/hw_breakpoint.h"
 
 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
 #define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
@@ -78,7 +78,7 @@ static const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
        "ref-cycles",
 };
 
-const char *__perf_evsel__hw_name(u64 config)
+static const char *__perf_evsel__hw_name(u64 config)
 {
        if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
                return perf_evsel__hw_names[config];
@@ -86,16 +86,15 @@ const char *__perf_evsel__hw_name(u64 config)
        return "unknown-hardware";
 }
 
-static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
+static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
 {
-       int colon = 0;
+       int colon = 0, r = 0;
        struct perf_event_attr *attr = &evsel->attr;
-       int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(attr->config));
        bool exclude_guest_default = false;
 
 #define MOD_PRINT(context, mod)        do {                                    \
                if (!attr->exclude_##context) {                         \
-                       if (!colon) colon = r++;                        \
+                       if (!colon) colon = ++r;                        \
                        r += scnprintf(bf + r, size - r, "%c", mod);    \
                } } while(0)
 
@@ -108,7 +107,7 @@ static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
 
        if (attr->precise_ip) {
                if (!colon)
-                       colon = r++;
+                       colon = ++r;
                r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
                exclude_guest_default = true;
        }
@@ -119,39 +118,211 @@ static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
        }
 #undef MOD_PRINT
        if (colon)
-               bf[colon] = ':';
+               bf[colon - 1] = ':';
        return r;
 }
 
-int perf_evsel__name(struct perf_evsel *evsel, char *bf, size_t size)
+static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+       int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
+       return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
+}
+
+static const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
+       "cpu-clock",
+       "task-clock",
+       "page-faults",
+       "context-switches",
+       "CPU-migrations",
+       "minor-faults",
+       "major-faults",
+       "alignment-faults",
+       "emulation-faults",
+};
+
+static const char *__perf_evsel__sw_name(u64 config)
 {
-       int ret;
+       if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
+               return perf_evsel__sw_names[config];
+       return "unknown-software";
+}
+
+static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+       int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
+       return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
+}
+
+static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
+{
+       int r;
+
+       r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
+
+       if (type & HW_BREAKPOINT_R)
+               r += scnprintf(bf + r, size - r, "r");
+
+       if (type & HW_BREAKPOINT_W)
+               r += scnprintf(bf + r, size - r, "w");
+
+       if (type & HW_BREAKPOINT_X)
+               r += scnprintf(bf + r, size - r, "x");
+
+       return r;
+}
+
+static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+       struct perf_event_attr *attr = &evsel->attr;
+       int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
+       return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
+}
+
+const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
+                               [PERF_EVSEL__MAX_ALIASES] = {
+ { "L1-dcache",        "l1-d",         "l1d",          "L1-data",              },
+ { "L1-icache",        "l1-i",         "l1i",          "L1-instruction",       },
+ { "LLC",      "L2",                                                   },
+ { "dTLB",     "d-tlb",        "Data-TLB",                             },
+ { "iTLB",     "i-tlb",        "Instruction-TLB",                      },
+ { "branch",   "branches",     "bpu",          "btb",          "bpc",  },
+ { "node",                                                             },
+};
+
+const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
+                                  [PERF_EVSEL__MAX_ALIASES] = {
+ { "load",     "loads",        "read",                                 },
+ { "store",    "stores",       "write",                                },
+ { "prefetch", "prefetches",   "speculative-read", "speculative-load", },
+};
+
+const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
+                                      [PERF_EVSEL__MAX_ALIASES] = {
+ { "refs",     "Reference",    "ops",          "access",               },
+ { "misses",   "miss",                                                 },
+};
+
+#define C(x)           PERF_COUNT_HW_CACHE_##x
+#define CACHE_READ     (1 << C(OP_READ))
+#define CACHE_WRITE    (1 << C(OP_WRITE))
+#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
+#define COP(x)         (1 << x)
+
+/*
+ * cache operartion stat
+ * L1I : Read and prefetch only
+ * ITLB and BPU : Read-only
+ */
+static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
+ [C(L1D)]      = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(L1I)]      = (CACHE_READ | CACHE_PREFETCH),
+ [C(LL)]       = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(DTLB)]     = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(ITLB)]     = (CACHE_READ),
+ [C(BPU)]      = (CACHE_READ),
+ [C(NODE)]     = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+};
+
+bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
+{
+       if (perf_evsel__hw_cache_stat[type] & COP(op))
+               return true;    /* valid */
+       else
+               return false;   /* invalid */
+}
+
+int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
+                                           char *bf, size_t size)
+{
+       if (result) {
+               return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
+                                perf_evsel__hw_cache_op[op][0],
+                                perf_evsel__hw_cache_result[result][0]);
+       }
+
+       return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
+                        perf_evsel__hw_cache_op[op][1]);
+}
+
+static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
+{
+       u8 op, result, type = (config >>  0) & 0xff;
+       const char *err = "unknown-ext-hardware-cache-type";
+
+       if (type > PERF_COUNT_HW_CACHE_MAX)
+               goto out_err;
+
+       op = (config >>  8) & 0xff;
+       err = "unknown-ext-hardware-cache-op";
+       if (op > PERF_COUNT_HW_CACHE_OP_MAX)
+               goto out_err;
+
+       result = (config >> 16) & 0xff;
+       err = "unknown-ext-hardware-cache-result";
+       if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
+               goto out_err;
+
+       err = "invalid-cache";
+       if (!perf_evsel__is_cache_op_valid(type, op))
+               goto out_err;
+
+       return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
+out_err:
+       return scnprintf(bf, size, "%s", err);
+}
+
+static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+       int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
+       return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
+}
+
+static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+       int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
+       return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
+}
+
+const char *perf_evsel__name(struct perf_evsel *evsel)
+{
+       char bf[128];
+
+       if (evsel->name)
+               return evsel->name;
 
        switch (evsel->attr.type) {
        case PERF_TYPE_RAW:
-               ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
+               perf_evsel__raw_name(evsel, bf, sizeof(bf));
                break;
 
        case PERF_TYPE_HARDWARE:
-               ret = perf_evsel__hw_name(evsel, bf, size);
+               perf_evsel__hw_name(evsel, bf, sizeof(bf));
+               break;
+
+       case PERF_TYPE_HW_CACHE:
+               perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
+               break;
+
+       case PERF_TYPE_SOFTWARE:
+               perf_evsel__sw_name(evsel, bf, sizeof(bf));
                break;
+
+       case PERF_TYPE_TRACEPOINT:
+               scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
+               break;
+
+       case PERF_TYPE_BREAKPOINT:
+               perf_evsel__bp_name(evsel, bf, sizeof(bf));
+               break;
+
        default:
-               /*
-                * FIXME
-                *
-                * This is the minimal perf_evsel__name so that we can
-                * reconstruct event names taking into account event modifiers.
-                *
-                * The old event_name uses it now for raw anr hw events, so that
-                * we don't drag all the parsing stuff into the python binding.
-                *
-                * On the next devel cycle the rest of the event naming will be
-                * brought here.
-                */
-               return 0;
-       }
-
-       return ret;
+               scnprintf(bf, sizeof(bf), "%s", "unknown attr type");
+               break;
+       }
+
+       evsel->name = strdup(bf);
+
+       return evsel->name ?: "unknown";
 }
 
 void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
index 4ba8b56..67cc503 100644 (file)
@@ -83,8 +83,19 @@ void perf_evsel__config(struct perf_evsel *evsel,
                        struct perf_record_opts *opts,
                        struct perf_evsel *first);
 
-const char* __perf_evsel__hw_name(u64 config);
-int perf_evsel__name(struct perf_evsel *evsel, char *bf, size_t size);
+bool perf_evsel__is_cache_op_valid(u8 type, u8 op);
+
+#define PERF_EVSEL__MAX_ALIASES 8
+
+extern const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
+                                      [PERF_EVSEL__MAX_ALIASES];
+extern const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
+                                         [PERF_EVSEL__MAX_ALIASES];
+const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
+                                      [PERF_EVSEL__MAX_ALIASES];
+int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
+                                           char *bf, size_t size);
+const char *perf_evsel__name(struct perf_evsel *evsel);
 
 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
index 2dd5edf..5a47aba 100644 (file)
@@ -641,7 +641,7 @@ static int write_event_desc(int fd, struct perf_header *h __used,
                /*
                 * write event string as passed on cmdline
                 */
-               ret = do_write_string(fd, event_name(attr));
+               ret = do_write_string(fd, perf_evsel__name(attr));
                if (ret < 0)
                        return ret;
                /*
@@ -1474,15 +1474,15 @@ out:
 
 static int process_tracing_data(struct perf_file_section *section __unused,
                              struct perf_header *ph __unused,
-                             int feat __unused, int fd)
+                             int feat __unused, int fd, void *data)
 {
-       trace_report(fd, false);
+       trace_report(fd, data, false);
        return 0;
 }
 
 static int process_build_id(struct perf_file_section *section,
                            struct perf_header *ph,
-                           int feat __unused, int fd)
+                           int feat __unused, int fd, void *data __used)
 {
        if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
                pr_debug("Failed to read buildids, continuing...\n");
@@ -1493,7 +1493,7 @@ struct feature_ops {
        int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
        void (*print)(struct perf_header *h, int fd, FILE *fp);
        int (*process)(struct perf_file_section *section,
-                      struct perf_header *h, int feat, int fd);
+                      struct perf_header *h, int feat, int fd, void *data);
        const char *name;
        bool full_only;
 };
@@ -1942,7 +1942,6 @@ int perf_file_header__read(struct perf_file_header *header,
                else
                        return -1;
        } else if (ph->needs_swap) {
-               unsigned int i;
                /*
                 * feature bitmap is declared as an array of unsigned longs --
                 * not good since its size can differ between the host that
@@ -1958,14 +1957,17 @@ int perf_file_header__read(struct perf_file_header *header,
                 * file), punt and fallback to the original behavior --
                 * clearing all feature bits and setting buildid.
                 */
-               for (i = 0; i < BITS_TO_LONGS(HEADER_FEAT_BITS); ++i)
-                       header->adds_features[i] = bswap_64(header->adds_features[i]);
+               mem_bswap_64(&header->adds_features,
+                           BITS_TO_U64(HEADER_FEAT_BITS));
 
                if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
-                       for (i = 0; i < BITS_TO_LONGS(HEADER_FEAT_BITS); ++i) {
-                               header->adds_features[i] = bswap_64(header->adds_features[i]);
-                               header->adds_features[i] = bswap_32(header->adds_features[i]);
-                       }
+                       /* unswap as u64 */
+                       mem_bswap_64(&header->adds_features,
+                                   BITS_TO_U64(HEADER_FEAT_BITS));
+
+                       /* unswap as u32 */
+                       mem_bswap_32(&header->adds_features,
+                                   BITS_TO_U32(HEADER_FEAT_BITS));
                }
 
                if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
@@ -1986,7 +1988,7 @@ int perf_file_header__read(struct perf_file_header *header,
 
 static int perf_file_section__process(struct perf_file_section *section,
                                      struct perf_header *ph,
-                                     int feat, int fd, void *data __used)
+                                     int feat, int fd, void *data)
 {
        if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
                pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
@@ -2002,7 +2004,7 @@ static int perf_file_section__process(struct perf_file_section *section,
        if (!feat_ops[feat].process)
                return 0;
 
-       return feat_ops[feat].process(section, ph, feat, fd);
+       return feat_ops[feat].process(section, ph, feat, fd, data);
 }
 
 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
@@ -2091,6 +2093,38 @@ static int read_attr(int fd, struct perf_header *ph,
        return ret <= 0 ? -1 : 0;
 }
 
+static int perf_evsel__set_tracepoint_name(struct perf_evsel *evsel,
+                                          struct pevent *pevent)
+{
+       struct event_format *event = pevent_find_event(pevent,
+                                                      evsel->attr.config);
+       char bf[128];
+
+       if (event == NULL)
+               return -1;
+
+       snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
+       evsel->name = strdup(bf);
+       if (event->name == NULL)
+               return -1;
+
+       return 0;
+}
+
+static int perf_evlist__set_tracepoint_names(struct perf_evlist *evlist,
+                                            struct pevent *pevent)
+{
+       struct perf_evsel *pos;
+
+       list_for_each_entry(pos, &evlist->entries, node) {
+               if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
+                   perf_evsel__set_tracepoint_name(pos, pevent))
+                       return -1;
+       }
+
+       return 0;
+}
+
 int perf_session__read_header(struct perf_session *session, int fd)
 {
        struct perf_header *header = &session->header;
@@ -2167,11 +2201,14 @@ int perf_session__read_header(struct perf_session *session, int fd)
                event_count =  f_header.event_types.size / sizeof(struct perf_trace_event_type);
        }
 
-       perf_header__process_sections(header, fd, NULL,
+       perf_header__process_sections(header, fd, &session->pevent,
                                      perf_file_section__process);
 
        lseek(fd, header->data_offset, SEEK_SET);
 
+       if (perf_evlist__set_tracepoint_names(session->evlist, session->pevent))
+               goto out_delete_evlist;
+
        header->frozen = 1;
        return 0;
 out_errno:
@@ -2385,8 +2422,8 @@ int perf_event__process_tracing_data(union perf_event *event,
        lseek(session->fd, offset + sizeof(struct tracing_data_event),
              SEEK_SET);
 
-       size_read = trace_report(session->fd, session->repipe);
-
+       size_read = trace_report(session->fd, &session->pevent,
+                                session->repipe);
        padding = ALIGN(size_read, sizeof(u64)) - size_read;
 
        if (read(session->fd, buf, padding) < 0)
index 34bb556..0b096c2 100644 (file)
@@ -47,6 +47,7 @@ enum hist_column {
        HISTC_SYMBOL_TO,
        HISTC_DSO_FROM,
        HISTC_DSO_TO,
+       HISTC_SRCLINE,
        HISTC_NR_COLS, /* Last entry */
 };
 
index f158483..587a230 100644 (file)
@@ -8,6 +8,8 @@
 #define BITS_PER_LONG __WORDSIZE
 #define BITS_PER_BYTE           8
 #define BITS_TO_LONGS(nr)       DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+#define BITS_TO_U64(nr)         DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
+#define BITS_TO_U32(nr)         DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
 
 #define for_each_set_bit(bit, addr, size) \
        for ((bit) = find_first_bit((addr), (size));            \
index 1eb804f..b6842c1 100644 (file)
@@ -108,4 +108,14 @@ int eprintf(int level,
 #define pr_debug3(fmt, ...) pr_debugN(3, pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_debug4(fmt, ...) pr_debugN(4, pr_fmt(fmt), ##__VA_ARGS__)
 
+/*
+ * This looks more complex than it should be. But we need to
+ * get the type for the ~ right in round_down (it needs to be
+ * as wide as the result!), and we want to evaluate the macro
+ * arguments just once each.
+ */
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+#define round_down(x, y) ((x) & ~__round_mask(x, y))
+
 #endif
index 35ae568..a1f4e36 100644 (file)
@@ -669,25 +669,26 @@ struct machine *machines__find(struct rb_root *self, pid_t pid)
 struct machine *machines__findnew(struct rb_root *self, pid_t pid)
 {
        char path[PATH_MAX];
-       const char *root_dir;
+       const char *root_dir = "";
        struct machine *machine = machines__find(self, pid);
 
-       if (!machine || machine->pid != pid) {
-               if (pid == HOST_KERNEL_ID || pid == DEFAULT_GUEST_KERNEL_ID)
-                       root_dir = "";
-               else {
-                       if (!symbol_conf.guestmount)
-                               goto out;
-                       sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
-                       if (access(path, R_OK)) {
-                               pr_err("Can't access file %s\n", path);
-                               goto out;
-                       }
-                       root_dir = path;
+       if (machine && (machine->pid == pid))
+               goto out;
+
+       if ((pid != HOST_KERNEL_ID) &&
+           (pid != DEFAULT_GUEST_KERNEL_ID) &&
+           (symbol_conf.guestmount)) {
+               sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
+               if (access(path, R_OK)) {
+                       pr_err("Can't access file %s\n", path);
+                       machine = NULL;
+                       goto out;
                }
-               machine = machines__add(self, pid, root_dir);
+               root_dir = path;
        }
 
+       machine = machines__add(self, pid, root_dir);
+
 out:
        return machine;
 }
index 81371ba..c14c665 100644 (file)
@@ -157,7 +157,7 @@ void machine__exit(struct machine *self);
 void machine__delete(struct machine *self);
 
 int machine__resolve_callchain(struct machine *machine,
-                              struct perf_evsel *evsel, struct thread *thread,
+                              struct thread *thread,
                               struct ip_callchain *chain,
                               struct symbol **parent);
 int maps__set_kallsyms_ref_reloc_sym(struct map **maps, const char *symbol_name,
index 76b98e2..1b997d2 100644 (file)
@@ -181,6 +181,22 @@ static int test__checkevent_breakpoint_w(struct perf_evlist *evlist)
        return 0;
 }
 
+static int test__checkevent_breakpoint_rw(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel = list_entry(evlist->entries.next,
+                                             struct perf_evsel, node);
+
+       TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
+       TEST_ASSERT_VAL("wrong type",
+                       PERF_TYPE_BREAKPOINT == evsel->attr.type);
+       TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
+       TEST_ASSERT_VAL("wrong bp_type",
+               (HW_BREAKPOINT_R|HW_BREAKPOINT_W) == evsel->attr.bp_type);
+       TEST_ASSERT_VAL("wrong bp_len",
+                       HW_BREAKPOINT_LEN_4 == evsel->attr.bp_len);
+       return 0;
+}
+
 static int test__checkevent_tracepoint_modifier(struct perf_evlist *evlist)
 {
        struct perf_evsel *evsel = list_entry(evlist->entries.next,
@@ -309,6 +325,8 @@ static int test__checkevent_breakpoint_modifier(struct perf_evlist *evlist)
        TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
        TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
        TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+       TEST_ASSERT_VAL("wrong name",
+                       !strcmp(perf_evsel__name(evsel), "mem:0x0:rw:u"));
 
        return test__checkevent_breakpoint(evlist);
 }
@@ -322,6 +340,8 @@ static int test__checkevent_breakpoint_x_modifier(struct perf_evlist *evlist)
        TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
        TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
        TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+       TEST_ASSERT_VAL("wrong name",
+                       !strcmp(perf_evsel__name(evsel), "mem:0x0:x:k"));
 
        return test__checkevent_breakpoint_x(evlist);
 }
@@ -335,6 +355,8 @@ static int test__checkevent_breakpoint_r_modifier(struct perf_evlist *evlist)
        TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
        TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
        TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+       TEST_ASSERT_VAL("wrong name",
+                       !strcmp(perf_evsel__name(evsel), "mem:0x0:r:hp"));
 
        return test__checkevent_breakpoint_r(evlist);
 }
@@ -348,10 +370,27 @@ static int test__checkevent_breakpoint_w_modifier(struct perf_evlist *evlist)
        TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
        TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
        TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+       TEST_ASSERT_VAL("wrong name",
+                       !strcmp(perf_evsel__name(evsel), "mem:0x0:w:up"));
 
        return test__checkevent_breakpoint_w(evlist);
 }
 
+static int test__checkevent_breakpoint_rw_modifier(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel = list_entry(evlist->entries.next,
+                                             struct perf_evsel, node);
+
+       TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+       TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+       TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+       TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+       TEST_ASSERT_VAL("wrong name",
+                       !strcmp(perf_evsel__name(evsel), "mem:0x0:rw:kp"));
+
+       return test__checkevent_breakpoint_rw(evlist);
+}
+
 static int test__checkevent_pmu(struct perf_evlist *evlist)
 {
 
@@ -413,19 +452,63 @@ static int test__checkevent_pmu_name(struct perf_evlist *evlist)
 {
        struct perf_evsel *evsel;
 
-       /* cpu/config=1,name=krava1/u */
+       /* cpu/config=1,name=krava/u */
        evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
        TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
        TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
        TEST_ASSERT_VAL("wrong config",  1 == evsel->attr.config);
-       TEST_ASSERT_VAL("wrong name", !strcmp(evsel->name, "krava"));
+       TEST_ASSERT_VAL("wrong name", !strcmp(perf_evsel__name(evsel), "krava"));
 
-       /* cpu/config=2/" */
+       /* cpu/config=2/u" */
        evsel = list_entry(evsel->node.next, struct perf_evsel, node);
        TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
        TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
        TEST_ASSERT_VAL("wrong config",  2 == evsel->attr.config);
-       TEST_ASSERT_VAL("wrong name", !strcmp(evsel->name, "raw 0x2"));
+       TEST_ASSERT_VAL("wrong name",
+                       !strcmp(perf_evsel__name(evsel), "raw 0x2:u"));
+
+       return 0;
+}
+
+static int test__checkterms_simple(struct list_head *terms)
+{
+       struct parse_events__term *term;
+
+       /* config=10 */
+       term = list_entry(terms->next, struct parse_events__term, list);
+       TEST_ASSERT_VAL("wrong type term",
+                       term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG);
+       TEST_ASSERT_VAL("wrong type val",
+                       term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
+       TEST_ASSERT_VAL("wrong val", term->val.num == 10);
+       TEST_ASSERT_VAL("wrong config", !term->config);
+
+       /* config1 */
+       term = list_entry(term->list.next, struct parse_events__term, list);
+       TEST_ASSERT_VAL("wrong type term",
+                       term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG1);
+       TEST_ASSERT_VAL("wrong type val",
+                       term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
+       TEST_ASSERT_VAL("wrong val", term->val.num == 1);
+       TEST_ASSERT_VAL("wrong config", !term->config);
+
+       /* config2=3 */
+       term = list_entry(term->list.next, struct parse_events__term, list);
+       TEST_ASSERT_VAL("wrong type term",
+                       term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG2);
+       TEST_ASSERT_VAL("wrong type val",
+                       term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
+       TEST_ASSERT_VAL("wrong val", term->val.num == 3);
+       TEST_ASSERT_VAL("wrong config", !term->config);
+
+       /* umask=1*/
+       term = list_entry(term->list.next, struct parse_events__term, list);
+       TEST_ASSERT_VAL("wrong type term",
+                       term->type_term == PARSE_EVENTS__TERM_TYPE_USER);
+       TEST_ASSERT_VAL("wrong type val",
+                       term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
+       TEST_ASSERT_VAL("wrong val", term->val.num == 1);
+       TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "umask"));
 
        return 0;
 }
@@ -541,10 +624,16 @@ static struct test__event_st test__events[] = {
                .name  = "instructions:H",
                .check = test__checkevent_exclude_guest_modifier,
        },
+       [26] = {
+               .name  = "mem:0:rw",
+               .check = test__checkevent_breakpoint_rw,
+       },
+       [27] = {
+               .name  = "mem:0:rw:kp",
+               .check = test__checkevent_breakpoint_rw_modifier,
+       },
 };
 
-#define TEST__EVENTS_CNT (sizeof(test__events) / sizeof(struct test__event_st))
-
 static struct test__event_st test__events_pmu[] = {
        [0] = {
                .name  = "cpu/config=10,config1,config2=3,period=1000/u",
@@ -556,10 +645,23 @@ static struct test__event_st test__events_pmu[] = {
        },
 };
 
-#define TEST__EVENTS_PMU_CNT (sizeof(test__events_pmu) / \
-                             sizeof(struct test__event_st))
+struct test__term {
+       const char *str;
+       __u32 type;
+       int (*check)(struct list_head *terms);
+};
+
+static struct test__term test__terms[] = {
+       [0] = {
+               .str   = "config=10,config1,config2=3,umask=1",
+               .check = test__checkterms_simple,
+       },
+};
+
+#define TEST__TERMS_CNT (sizeof(test__terms) / \
+                        sizeof(struct test__term))
 
-static int test(struct test__event_st *e)
+static int test_event(struct test__event_st *e)
 {
        struct perf_evlist *evlist;
        int ret;
@@ -590,7 +692,48 @@ static int test_events(struct test__event_st *events, unsigned cnt)
                struct test__event_st *e = &events[i];
 
                pr_debug("running test %d '%s'\n", i, e->name);
-               ret = test(e);
+               ret = test_event(e);
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
+static int test_term(struct test__term *t)
+{
+       struct list_head *terms;
+       int ret;
+
+       terms = malloc(sizeof(*terms));
+       if (!terms)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(terms);
+
+       ret = parse_events_terms(terms, t->str);
+       if (ret) {
+               pr_debug("failed to parse terms '%s', err %d\n",
+                        t->str , ret);
+               return ret;
+       }
+
+       ret = t->check(terms);
+       parse_events__free_terms(terms);
+
+       return ret;
+}
+
+static int test_terms(struct test__term *terms, unsigned cnt)
+{
+       int ret = 0;
+       unsigned i;
+
+       for (i = 0; i < cnt; i++) {
+               struct test__term *t = &terms[i];
+
+               pr_debug("running test %d '%s'\n", i, t->str);
+               ret = test_term(t);
                if (ret)
                        break;
        }
@@ -617,9 +760,17 @@ int parse_events__test(void)
 {
        int ret;
 
-       ret = test_events(test__events, TEST__EVENTS_CNT);
-       if (!ret && test_pmu())
-               ret = test_events(test__events_pmu, TEST__EVENTS_PMU_CNT);
+#define TEST_EVENTS(tests)                             \
+do {                                                   \
+       ret = test_events(tests, ARRAY_SIZE(tests));    \
+       if (ret)                                        \
+               return ret;                             \
+} while (0)
 
-       return ret;
+       TEST_EVENTS(test__events);
+
+       if (test_pmu())
+               TEST_EVENTS(test__events_pmu);
+
+       return test_terms(test__terms, ARRAY_SIZE(test__terms));
 }
index 05dbc8b..1aa721d 100644 (file)
 #include "cache.h"
 #include "header.h"
 #include "debugfs.h"
+#include "parse-events-bison.h"
+#define YY_EXTRA_TYPE int
 #include "parse-events-flex.h"
 #include "pmu.h"
 
 #define MAX_NAME_LEN 100
 
 struct event_symbol {
-       u8              type;
-       u64             config;
        const char      *symbol;
        const char      *alias;
 };
@@ -26,32 +26,88 @@ struct event_symbol {
 #ifdef PARSER_DEBUG
 extern int parse_events_debug;
 #endif
-int parse_events_parse(struct list_head *list, int *idx);
-
-#define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
-#define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
-
-static struct event_symbol event_symbols[] = {
-  { CHW(CPU_CYCLES),                   "cpu-cycles",                   "cycles"                },
-  { CHW(STALLED_CYCLES_FRONTEND),      "stalled-cycles-frontend",      "idle-cycles-frontend"  },
-  { CHW(STALLED_CYCLES_BACKEND),       "stalled-cycles-backend",       "idle-cycles-backend"   },
-  { CHW(INSTRUCTIONS),                 "instructions",                 ""                      },
-  { CHW(CACHE_REFERENCES),             "cache-references",             ""                      },
-  { CHW(CACHE_MISSES),                 "cache-misses",                 ""                      },
-  { CHW(BRANCH_INSTRUCTIONS),          "branch-instructions",          "branches"              },
-  { CHW(BRANCH_MISSES),                        "branch-misses",                ""                      },
-  { CHW(BUS_CYCLES),                   "bus-cycles",                   ""                      },
-  { CHW(REF_CPU_CYCLES),               "ref-cycles",                   ""                      },
-
-  { CSW(CPU_CLOCK),                    "cpu-clock",                    ""                      },
-  { CSW(TASK_CLOCK),                   "task-clock",                   ""                      },
-  { CSW(PAGE_FAULTS),                  "page-faults",                  "faults"                },
-  { CSW(PAGE_FAULTS_MIN),              "minor-faults",                 ""                      },
-  { CSW(PAGE_FAULTS_MAJ),              "major-faults",                 ""                      },
-  { CSW(CONTEXT_SWITCHES),             "context-switches",             "cs"                    },
-  { CSW(CPU_MIGRATIONS),               "cpu-migrations",               "migrations"            },
-  { CSW(ALIGNMENT_FAULTS),             "alignment-faults",             ""                      },
-  { CSW(EMULATION_FAULTS),             "emulation-faults",             ""                      },
+int parse_events_parse(void *data, void *scanner);
+
+static struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
+       [PERF_COUNT_HW_CPU_CYCLES] = {
+               .symbol = "cpu-cycles",
+               .alias  = "cycles",
+       },
+       [PERF_COUNT_HW_INSTRUCTIONS] = {
+               .symbol = "instructions",
+               .alias  = "",
+       },
+       [PERF_COUNT_HW_CACHE_REFERENCES] = {
+               .symbol = "cache-references",
+               .alias  = "",
+       },
+       [PERF_COUNT_HW_CACHE_MISSES] = {
+               .symbol = "cache-misses",
+               .alias  = "",
+       },
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {
+               .symbol = "branch-instructions",
+               .alias  = "branches",
+       },
+       [PERF_COUNT_HW_BRANCH_MISSES] = {
+               .symbol = "branch-misses",
+               .alias  = "",
+       },
+       [PERF_COUNT_HW_BUS_CYCLES] = {
+               .symbol = "bus-cycles",
+               .alias  = "",
+       },
+       [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {
+               .symbol = "stalled-cycles-frontend",
+               .alias  = "idle-cycles-frontend",
+       },
+       [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {
+               .symbol = "stalled-cycles-backend",
+               .alias  = "idle-cycles-backend",
+       },
+       [PERF_COUNT_HW_REF_CPU_CYCLES] = {
+               .symbol = "ref-cycles",
+               .alias  = "",
+       },
+};
+
+static struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
+       [PERF_COUNT_SW_CPU_CLOCK] = {
+               .symbol = "cpu-clock",
+               .alias  = "",
+       },
+       [PERF_COUNT_SW_TASK_CLOCK] = {
+               .symbol = "task-clock",
+               .alias  = "",
+       },
+       [PERF_COUNT_SW_PAGE_FAULTS] = {
+               .symbol = "page-faults",
+               .alias  = "faults",
+       },
+       [PERF_COUNT_SW_CONTEXT_SWITCHES] = {
+               .symbol = "context-switches",
+               .alias  = "cs",
+       },
+       [PERF_COUNT_SW_CPU_MIGRATIONS] = {
+               .symbol = "cpu-migrations",
+               .alias  = "migrations",
+       },
+       [PERF_COUNT_SW_PAGE_FAULTS_MIN] = {
+               .symbol = "minor-faults",
+               .alias  = "",
+       },
+       [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = {
+               .symbol = "major-faults",
+               .alias  = "",
+       },
+       [PERF_COUNT_SW_ALIGNMENT_FAULTS] = {
+               .symbol = "alignment-faults",
+               .alias  = "",
+       },
+       [PERF_COUNT_SW_EMULATION_FAULTS] = {
+               .symbol = "emulation-faults",
+               .alias  = "",
+       },
 };
 
 #define __PERF_EVENT_FIELD(config, name) \
@@ -62,63 +118,6 @@ static struct event_symbol event_symbols[] = {
 #define PERF_EVENT_TYPE(config)                __PERF_EVENT_FIELD(config, TYPE)
 #define PERF_EVENT_ID(config)          __PERF_EVENT_FIELD(config, EVENT)
 
-static const char *sw_event_names[PERF_COUNT_SW_MAX] = {
-       "cpu-clock",
-       "task-clock",
-       "page-faults",
-       "context-switches",
-       "CPU-migrations",
-       "minor-faults",
-       "major-faults",
-       "alignment-faults",
-       "emulation-faults",
-};
-
-#define MAX_ALIASES 8
-
-static const char *hw_cache[PERF_COUNT_HW_CACHE_MAX][MAX_ALIASES] = {
- { "L1-dcache",        "l1-d",         "l1d",          "L1-data",              },
- { "L1-icache",        "l1-i",         "l1i",          "L1-instruction",       },
- { "LLC",      "L2",                                                   },
- { "dTLB",     "d-tlb",        "Data-TLB",                             },
- { "iTLB",     "i-tlb",        "Instruction-TLB",                      },
- { "branch",   "branches",     "bpu",          "btb",          "bpc",  },
- { "node",                                                             },
-};
-
-static const char *hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][MAX_ALIASES] = {
- { "load",     "loads",        "read",                                 },
- { "store",    "stores",       "write",                                },
- { "prefetch", "prefetches",   "speculative-read", "speculative-load", },
-};
-
-static const char *hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
-                                 [MAX_ALIASES] = {
- { "refs",     "Reference",    "ops",          "access",               },
- { "misses",   "miss",                                                 },
-};
-
-#define C(x)           PERF_COUNT_HW_CACHE_##x
-#define CACHE_READ     (1 << C(OP_READ))
-#define CACHE_WRITE    (1 << C(OP_WRITE))
-#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
-#define COP(x)         (1 << x)
-
-/*
- * cache operartion stat
- * L1I : Read and prefetch only
- * ITLB and BPU : Read-only
- */
-static unsigned long hw_cache_stat[C(MAX)] = {
- [C(L1D)]      = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
- [C(L1I)]      = (CACHE_READ | CACHE_PREFETCH),
- [C(LL)]       = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
- [C(DTLB)]     = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
- [C(ITLB)]     = (CACHE_READ),
- [C(BPU)]      = (CACHE_READ),
- [C(NODE)]     = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
-};
-
 #define for_each_subsystem(sys_dir, sys_dirent, sys_next)             \
        while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next)        \
        if (sys_dirent.d_type == DT_DIR &&                                     \
@@ -218,48 +217,6 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
        return NULL;
 }
 
-#define TP_PATH_LEN (MAX_EVENT_LENGTH * 2 + 1)
-static const char *tracepoint_id_to_name(u64 config)
-{
-       static char buf[TP_PATH_LEN];
-       struct tracepoint_path *path;
-
-       path = tracepoint_id_to_path(config);
-       if (path) {
-               snprintf(buf, TP_PATH_LEN, "%s:%s", path->system, path->name);
-               free(path->name);
-               free(path->system);
-               free(path);
-       } else
-               snprintf(buf, TP_PATH_LEN, "%s:%s", "unknown", "unknown");
-
-       return buf;
-}
-
-static int is_cache_op_valid(u8 cache_type, u8 cache_op)
-{
-       if (hw_cache_stat[cache_type] & COP(cache_op))
-               return 1;       /* valid */
-       else
-               return 0;       /* invalid */
-}
-
-static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result)
-{
-       static char name[50];
-
-       if (cache_result) {
-               sprintf(name, "%s-%s-%s", hw_cache[cache_type][0],
-                       hw_cache_op[cache_op][0],
-                       hw_cache_result[cache_result][0]);
-       } else {
-               sprintf(name, "%s-%s", hw_cache[cache_type][0],
-                       hw_cache_op[cache_op][1]);
-       }
-
-       return name;
-}
-
 const char *event_type(int type)
 {
        switch (type) {
@@ -282,76 +239,6 @@ const char *event_type(int type)
        return "unknown";
 }
 
-const char *event_name(struct perf_evsel *evsel)
-{
-       u64 config = evsel->attr.config;
-       int type = evsel->attr.type;
-
-       if (type == PERF_TYPE_RAW || type == PERF_TYPE_HARDWARE) {
-               /*
-                * XXX minimal fix, see comment on perf_evsen__name, this static buffer
-                * will go away together with event_name in the next devel cycle.
-                */
-               static char bf[128];
-               perf_evsel__name(evsel, bf, sizeof(bf));
-               return bf;
-       }
-
-       if (evsel->name)
-               return evsel->name;
-
-       return __event_name(type, config);
-}
-
-const char *__event_name(int type, u64 config)
-{
-       static char buf[32];
-
-       if (type == PERF_TYPE_RAW) {
-               sprintf(buf, "raw 0x%" PRIx64, config);
-               return buf;
-       }
-
-       switch (type) {
-       case PERF_TYPE_HARDWARE:
-               return __perf_evsel__hw_name(config);
-
-       case PERF_TYPE_HW_CACHE: {
-               u8 cache_type, cache_op, cache_result;
-
-               cache_type   = (config >>  0) & 0xff;
-               if (cache_type > PERF_COUNT_HW_CACHE_MAX)
-                       return "unknown-ext-hardware-cache-type";
-
-               cache_op     = (config >>  8) & 0xff;
-               if (cache_op > PERF_COUNT_HW_CACHE_OP_MAX)
-                       return "unknown-ext-hardware-cache-op";
-
-               cache_result = (config >> 16) & 0xff;
-               if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX)
-                       return "unknown-ext-hardware-cache-result";
-
-               if (!is_cache_op_valid(cache_type, cache_op))
-                       return "invalid-cache";
-
-               return event_cache_name(cache_type, cache_op, cache_result);
-       }
-
-       case PERF_TYPE_SOFTWARE:
-               if (config < PERF_COUNT_SW_MAX && sw_event_names[config])
-                       return sw_event_names[config];
-               return "unknown-software";
-
-       case PERF_TYPE_TRACEPOINT:
-               return tracepoint_id_to_name(config);
-
-       default:
-               break;
-       }
-
-       return "unknown";
-}
-
 static int add_event(struct list_head **_list, int *idx,
                     struct perf_event_attr *attr, char *name)
 {
@@ -373,19 +260,20 @@ static int add_event(struct list_head **_list, int *idx,
                return -ENOMEM;
        }
 
-       evsel->name = strdup(name);
+       if (name)
+               evsel->name = strdup(name);
        list_add_tail(&evsel->node, list);
        *_list = list;
        return 0;
 }
 
-static int parse_aliases(char *str, const char *names[][MAX_ALIASES], int size)
+static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size)
 {
        int i, j;
        int n, longest = -1;
 
        for (i = 0; i < size; i++) {
-               for (j = 0; j < MAX_ALIASES && names[i][j]; j++) {
+               for (j = 0; j < PERF_EVSEL__MAX_ALIASES && names[i][j]; j++) {
                        n = strlen(names[i][j]);
                        if (n > longest && !strncasecmp(str, names[i][j], n))
                                longest = n;
@@ -410,7 +298,7 @@ int parse_events_add_cache(struct list_head **list, int *idx,
         * No fallback - if we cannot get a clear cache type
         * then bail out:
         */
-       cache_type = parse_aliases(type, hw_cache,
+       cache_type = parse_aliases(type, perf_evsel__hw_cache,
                                   PERF_COUNT_HW_CACHE_MAX);
        if (cache_type == -1)
                return -EINVAL;
@@ -423,18 +311,18 @@ int parse_events_add_cache(struct list_head **list, int *idx,
                snprintf(name + n, MAX_NAME_LEN - n, "-%s\n", str);
 
                if (cache_op == -1) {
-                       cache_op = parse_aliases(str, hw_cache_op,
+                       cache_op = parse_aliases(str, perf_evsel__hw_cache_op,
                                                 PERF_COUNT_HW_CACHE_OP_MAX);
                        if (cache_op >= 0) {
-                               if (!is_cache_op_valid(cache_type, cache_op))
+                               if (!perf_evsel__is_cache_op_valid(cache_type, cache_op))
                                        return -EINVAL;
                                continue;
                        }
                }
 
                if (cache_result == -1) {
-                       cache_result = parse_aliases(str, hw_cache_result,
-                                               PERF_COUNT_HW_CACHE_RESULT_MAX);
+                       cache_result = parse_aliases(str, perf_evsel__hw_cache_result,
+                                                    PERF_COUNT_HW_CACHE_RESULT_MAX);
                        if (cache_result >= 0)
                                continue;
                }
@@ -549,21 +437,31 @@ parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
                if (!type || !type[i])
                        break;
 
+#define CHECK_SET_TYPE(bit)            \
+do {                                   \
+       if (attr->bp_type & bit)        \
+               return -EINVAL;         \
+       else                            \
+               attr->bp_type |= bit;   \
+} while (0)
+
                switch (type[i]) {
                case 'r':
-                       attr->bp_type |= HW_BREAKPOINT_R;
+                       CHECK_SET_TYPE(HW_BREAKPOINT_R);
                        break;
                case 'w':
-                       attr->bp_type |= HW_BREAKPOINT_W;
+                       CHECK_SET_TYPE(HW_BREAKPOINT_W);
                        break;
                case 'x':
-                       attr->bp_type |= HW_BREAKPOINT_X;
+                       CHECK_SET_TYPE(HW_BREAKPOINT_X);
                        break;
                default:
                        return -EINVAL;
                }
        }
 
+#undef CHECK_SET_TYPE
+
        if (!attr->bp_type) /* Default */
                attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
 
@@ -574,7 +472,6 @@ int parse_events_add_breakpoint(struct list_head **list, int *idx,
                                void *ptr, char *type)
 {
        struct perf_event_attr attr;
-       char name[MAX_NAME_LEN];
 
        memset(&attr, 0, sizeof(attr));
        attr.bp_addr = (unsigned long) ptr;
@@ -593,8 +490,7 @@ int parse_events_add_breakpoint(struct list_head **list, int *idx,
 
        attr.type = PERF_TYPE_BREAKPOINT;
 
-       snprintf(name, MAX_NAME_LEN, "mem:%p:%s", ptr, type ? type : "rw");
-       return add_event(list, idx, &attr, name);
+       return add_event(list, idx, &attr, NULL);
 }
 
 static int config_term(struct perf_event_attr *attr,
@@ -666,8 +562,7 @@ int parse_events_add_numeric(struct list_head **list, int *idx,
            config_attr(&attr, head_config, 1))
                return -EINVAL;
 
-       return add_event(list, idx, &attr,
-                        (char *) __event_name(type, config));
+       return add_event(list, idx, &attr, NULL);
 }
 
 static int parse_events__is_name_term(struct parse_events__term *term)
@@ -675,8 +570,7 @@ static int parse_events__is_name_term(struct parse_events__term *term)
        return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME;
 }
 
-static char *pmu_event_name(struct perf_event_attr *attr,
-                           struct list_head *head_terms)
+static char *pmu_event_name(struct list_head *head_terms)
 {
        struct parse_events__term *term;
 
@@ -684,7 +578,7 @@ static char *pmu_event_name(struct perf_event_attr *attr,
                if (parse_events__is_name_term(term))
                        return term->val.str;
 
-       return (char *) __event_name(PERF_TYPE_RAW, attr->config);
+       return NULL;
 }
 
 int parse_events_add_pmu(struct list_head **list, int *idx,
@@ -699,6 +593,9 @@ int parse_events_add_pmu(struct list_head **list, int *idx,
 
        memset(&attr, 0, sizeof(attr));
 
+       if (perf_pmu__check_alias(pmu, head_config))
+               return -EINVAL;
+
        /*
         * Configure hardcoded terms first, no need to check
         * return value when called with fail == 0 ;)
@@ -709,7 +606,7 @@ int parse_events_add_pmu(struct list_head **list, int *idx,
                return -EINVAL;
 
        return add_event(list, idx, &attr,
-                        pmu_event_name(&attr, head_config));
+                        pmu_event_name(head_config));
 }
 
 void parse_events_update_lists(struct list_head *list_event,
@@ -787,27 +684,62 @@ int parse_events_modifier(struct list_head *list, char *str)
        return 0;
 }
 
-int parse_events(struct perf_evlist *evlist, const char *str, int unset __used)
+static int parse_events__scanner(const char *str, void *data, int start_token)
 {
-       LIST_HEAD(list);
-       LIST_HEAD(list_tmp);
        YY_BUFFER_STATE buffer;
-       int ret, idx = evlist->nr_entries;
+       void *scanner;
+       int ret;
+
+       ret = parse_events_lex_init_extra(start_token, &scanner);
+       if (ret)
+               return ret;
 
-       buffer = parse_events__scan_string(str);
+       buffer = parse_events__scan_string(str, scanner);
 
 #ifdef PARSER_DEBUG
        parse_events_debug = 1;
 #endif
-       ret = parse_events_parse(&list, &idx);
+       ret = parse_events_parse(data, scanner);
 
-       parse_events__flush_buffer(buffer);
-       parse_events__delete_buffer(buffer);
-       parse_events_lex_destroy();
+       parse_events__flush_buffer(buffer, scanner);
+       parse_events__delete_buffer(buffer, scanner);
+       parse_events_lex_destroy(scanner);
+       return ret;
+}
+
+/*
+ * parse event config string, return a list of event terms.
+ */
+int parse_events_terms(struct list_head *terms, const char *str)
+{
+       struct parse_events_data__terms data = {
+               .terms = NULL,
+       };
+       int ret;
 
+       ret = parse_events__scanner(str, &data, PE_START_TERMS);
        if (!ret) {
-               int entries = idx - evlist->nr_entries;
-               perf_evlist__splice_list_tail(evlist, &list, entries);
+               list_splice(data.terms, terms);
+               free(data.terms);
+               return 0;
+       }
+
+       parse_events__free_terms(data.terms);
+       return ret;
+}
+
+int parse_events(struct perf_evlist *evlist, const char *str, int unset __used)
+{
+       struct parse_events_data__events data = {
+               .list = LIST_HEAD_INIT(data.list),
+               .idx  = evlist->nr_entries,
+       };
+       int ret;
+
+       ret = parse_events__scanner(str, &data, PE_START_EVENTS);
+       if (!ret) {
+               int entries = data.idx - evlist->nr_entries;
+               perf_evlist__splice_list_tail(evlist, &data.list, entries);
                return 0;
        }
 
@@ -946,16 +878,13 @@ int is_valid_tracepoint(const char *event_string)
        return 0;
 }
 
-void print_events_type(u8 type)
+static void __print_events_type(u8 type, struct event_symbol *syms,
+                               unsigned max)
 {
-       struct event_symbol *syms = event_symbols;
-       unsigned int i;
        char name[64];
+       unsigned i;
 
-       for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
-               if (type != syms->type)
-                       continue;
-
+       for (i = 0; i < max ; i++, syms++) {
                if (strlen(syms->alias))
                        snprintf(name, sizeof(name),  "%s OR %s",
                                 syms->symbol, syms->alias);
@@ -967,19 +896,28 @@ void print_events_type(u8 type)
        }
 }
 
+void print_events_type(u8 type)
+{
+       if (type == PERF_TYPE_SOFTWARE)
+               __print_events_type(type, event_symbols_sw, PERF_COUNT_SW_MAX);
+       else
+               __print_events_type(type, event_symbols_hw, PERF_COUNT_HW_MAX);
+}
+
 int print_hwcache_events(const char *event_glob)
 {
        unsigned int type, op, i, printed = 0;
+       char name[64];
 
        for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
                for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
                        /* skip invalid cache type */
-                       if (!is_cache_op_valid(type, op))
+                       if (!perf_evsel__is_cache_op_valid(type, op))
                                continue;
 
                        for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
-                               char *name = event_cache_name(type, op, i);
-
+                               __perf_evsel__hw_cache_type_op_res_name(type, op, i,
+                                                                       name, sizeof(name));
                                if (event_glob != NULL && !strglobmatch(name, event_glob))
                                        continue;
 
@@ -993,26 +931,13 @@ int print_hwcache_events(const char *event_glob)
        return printed;
 }
 
-/*
- * Print the help text for the event symbols:
- */
-void print_events(const char *event_glob)
+static void print_symbol_events(const char *event_glob, unsigned type,
+                               struct event_symbol *syms, unsigned max)
 {
-       unsigned int i, type, prev_type = -1, printed = 0, ntypes_printed = 0;
-       struct event_symbol *syms = event_symbols;
+       unsigned i, printed = 0;
        char name[MAX_NAME_LEN];
 
-       printf("\n");
-       printf("List of pre-defined events (to be used in -e):\n");
-
-       for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
-               type = syms->type;
-
-               if (type != prev_type && printed) {
-                       printf("\n");
-                       printed = 0;
-                       ntypes_printed++;
-               }
+       for (i = 0; i < max; i++, syms++) {
 
                if (event_glob != NULL && 
                    !(strglobmatch(syms->symbol, event_glob) ||
@@ -1023,17 +948,31 @@ void print_events(const char *event_glob)
                        snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
                else
                        strncpy(name, syms->symbol, MAX_NAME_LEN);
-               printf("  %-50s [%s]\n", name,
-                       event_type_descriptors[type]);
 
-               prev_type = type;
-               ++printed;
+               printf("  %-50s [%s]\n", name, event_type_descriptors[type]);
+
+               printed++;
        }
 
-       if (ntypes_printed) {
-               printed = 0;
+       if (printed)
                printf("\n");
-       }
+}
+
+/*
+ * Print the help text for the event symbols:
+ */
+void print_events(const char *event_glob)
+{
+
+       printf("\n");
+       printf("List of pre-defined events (to be used in -e):\n");
+
+       print_symbol_events(event_glob, PERF_TYPE_HARDWARE,
+                           event_symbols_hw, PERF_COUNT_HW_MAX);
+
+       print_symbol_events(event_glob, PERF_TYPE_SOFTWARE,
+                           event_symbols_sw, PERF_COUNT_SW_MAX);
+
        print_hwcache_events(event_glob);
 
        if (event_glob != NULL)
@@ -1106,6 +1045,13 @@ int parse_events__term_str(struct parse_events__term **term,
                        config, str, 0);
 }
 
+int parse_events__term_clone(struct parse_events__term **new,
+                            struct parse_events__term *term)
+{
+       return new_term(new, term->type_val, term->type_term, term->config,
+                       term->val.str, term->val.num);
+}
+
 void parse_events__free_terms(struct list_head *terms)
 {
        struct parse_events__term *term, *h;
index 8cac57a..ee9c218 100644 (file)
@@ -26,13 +26,12 @@ extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
 extern bool have_tracepoints(struct list_head *evlist);
 
 const char *event_type(int type);
-const char *event_name(struct perf_evsel *event);
-extern const char *__event_name(int type, u64 config);
 
 extern int parse_events_option(const struct option *opt, const char *str,
                               int unset);
 extern int parse_events(struct perf_evlist *evlist, const char *str,
                        int unset);
+extern int parse_events_terms(struct list_head *terms, const char *str);
 extern int parse_filter(const struct option *opt, const char *str, int unset);
 
 #define EVENTS_HELP_MAX (128*1024)
@@ -63,11 +62,22 @@ struct parse_events__term {
        struct list_head list;
 };
 
+struct parse_events_data__events {
+       struct list_head list;
+       int idx;
+};
+
+struct parse_events_data__terms {
+       struct list_head *terms;
+};
+
 int parse_events__is_hardcoded_term(struct parse_events__term *term);
 int parse_events__term_num(struct parse_events__term **_term,
                           int type_term, char *config, long num);
 int parse_events__term_str(struct parse_events__term **_term,
                           int type_term, char *config, char *str);
+int parse_events__term_clone(struct parse_events__term **new,
+                            struct parse_events__term *term);
 void parse_events__free_terms(struct list_head *terms);
 int parse_events_modifier(struct list_head *list, char *str);
 int parse_events_add_tracepoint(struct list_head **list, int *idx,
@@ -83,8 +93,7 @@ int parse_events_add_pmu(struct list_head **list, int *idx,
                         char *pmu , struct list_head *head_config);
 void parse_events_update_lists(struct list_head *list_event,
                               struct list_head *list_all);
-void parse_events_error(struct list_head *list_all,
-                       int *idx, char const *msg);
+void parse_events_error(void *data, void *scanner, char const *msg);
 int parse_events__test(void);
 
 void print_events(const char *event_glob);
index 618a8e7..384ca74 100644 (file)
@@ -1,4 +1,6 @@
 
+%option reentrant
+%option bison-bridge
 %option prefix="parse_events_"
 %option stack
 
@@ -8,7 +10,10 @@
 #include "parse-events-bison.h"
 #include "parse-events.h"
 
-static int __value(char *str, int base, int token)
+char *parse_events_get_text(yyscan_t yyscanner);
+YYSTYPE *parse_events_get_lval(yyscan_t yyscanner);
+
+static int __value(YYSTYPE *yylval, char *str, int base, int token)
 {
        long num;
 
@@ -17,35 +22,48 @@ static int __value(char *str, int base, int token)
        if (errno)
                return PE_ERROR;
 
-       parse_events_lval.num = num;
+       yylval->num = num;
        return token;
 }
 
-static int value(int base)
+static int value(yyscan_t scanner, int base)
 {
-       return __value(parse_events_text, base, PE_VALUE);
+       YYSTYPE *yylval = parse_events_get_lval(scanner);
+       char *text = parse_events_get_text(scanner);
+
+       return __value(yylval, text, base, PE_VALUE);
 }
 
-static int raw(void)
+static int raw(yyscan_t scanner)
 {
-       return __value(parse_events_text + 1, 16, PE_RAW);
+       YYSTYPE *yylval = parse_events_get_lval(scanner);
+       char *text = parse_events_get_text(scanner);
+
+       return __value(yylval, text + 1, 16, PE_RAW);
 }
 
-static int str(int token)
+static int str(yyscan_t scanner, int token)
 {
-       parse_events_lval.str = strdup(parse_events_text);
+       YYSTYPE *yylval = parse_events_get_lval(scanner);
+       char *text = parse_events_get_text(scanner);
+
+       yylval->str = strdup(text);
        return token;
 }
 
-static int sym(int type, int config)
+static int sym(yyscan_t scanner, int type, int config)
 {
-       parse_events_lval.num = (type << 16) + config;
-       return PE_VALUE_SYM;
+       YYSTYPE *yylval = parse_events_get_lval(scanner);
+
+       yylval->num = (type << 16) + config;
+       return type == PERF_TYPE_HARDWARE ? PE_VALUE_SYM_HW : PE_VALUE_SYM_SW;
 }
 
-static int term(int type)
+static int term(yyscan_t scanner, int type)
 {
-       parse_events_lval.num = type;
+       YYSTYPE *yylval = parse_events_get_lval(scanner);
+
+       yylval->num = type;
        return PE_TERM;
 }
 
@@ -58,28 +76,41 @@ num_hex             0x[a-fA-F0-9]+
 num_raw_hex    [a-fA-F0-9]+
 name           [a-zA-Z_*?][a-zA-Z0-9_*?]*
 modifier_event [ukhpGH]{1,8}
-modifier_bp    [rwx]
+modifier_bp    [rwx]{1,3}
 
 %%
-cpu-cycles|cycles                              { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES); }
-stalled-cycles-frontend|idle-cycles-frontend   { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND); }
-stalled-cycles-backend|idle-cycles-backend     { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_BACKEND); }
-instructions                                   { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS); }
-cache-references                               { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_REFERENCES); }
-cache-misses                                   { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_MISSES); }
-branch-instructions|branches                   { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); }
-branch-misses                                  { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_MISSES); }
-bus-cycles                                     { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BUS_CYCLES); }
-ref-cycles                                     { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_REF_CPU_CYCLES); }
-cpu-clock                                      { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK); }
-task-clock                                     { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_TASK_CLOCK); }
-page-faults|faults                             { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS); }
-minor-faults                                   { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MIN); }
-major-faults                                   { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MAJ); }
-context-switches|cs                            { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CONTEXT_SWITCHES); }
-cpu-migrations|migrations                      { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_MIGRATIONS); }
-alignment-faults                               { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); }
-emulation-faults                               { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); }
+
+%{
+       {
+               int start_token;
+
+               start_token = (int) parse_events_get_extra(yyscanner);
+               if (start_token) {
+                       parse_events_set_extra(NULL, yyscanner);
+                       return start_token;
+               }
+         }
+%}
+
+cpu-cycles|cycles                              { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES); }
+stalled-cycles-frontend|idle-cycles-frontend   { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND); }
+stalled-cycles-backend|idle-cycles-backend     { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_BACKEND); }
+instructions                                   { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS); }
+cache-references                               { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_REFERENCES); }
+cache-misses                                   { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_MISSES); }
+branch-instructions|branches                   { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); }
+branch-misses                                  { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_MISSES); }
+bus-cycles                                     { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BUS_CYCLES); }
+ref-cycles                                     { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_REF_CPU_CYCLES); }
+cpu-clock                                      { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK); }
+task-clock                                     { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_TASK_CLOCK); }
+page-faults|faults                             { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS); }
+minor-faults                                   { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MIN); }
+major-faults                                   { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MAJ); }
+context-switches|cs                            { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CONTEXT_SWITCHES); }
+cpu-migrations|migrations                      { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_MIGRATIONS); }
+alignment-faults                               { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); }
+emulation-faults                               { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); }
 
 L1-dcache|l1-d|l1d|L1-data             |
 L1-icache|l1-i|l1i|L1-instruction      |
@@ -87,14 +118,14 @@ LLC|L2                                     |
 dTLB|d-tlb|Data-TLB                    |
 iTLB|i-tlb|Instruction-TLB             |
 branch|branches|bpu|btb|bpc            |
-node                                   { return str(PE_NAME_CACHE_TYPE); }
+node                                   { return str(yyscanner, PE_NAME_CACHE_TYPE); }
 
 load|loads|read                                |
 store|stores|write                     |
 prefetch|prefetches                    |
 speculative-read|speculative-load      |
 refs|Reference|ops|access              |
-misses|miss                            { return str(PE_NAME_CACHE_OP_RESULT); }
+misses|miss                            { return str(yyscanner, PE_NAME_CACHE_OP_RESULT); }
 
        /*
         * These are event config hardcoded term names to be specified
@@ -102,38 +133,39 @@ misses|miss                               { return str(PE_NAME_CACHE_OP_RESULT); }
         * so we can put them here directly. In case the we have a conflict
         * in future, this needs to go into '//' condition block.
         */
-config                 { return term(PARSE_EVENTS__TERM_TYPE_CONFIG); }
-config1                        { return term(PARSE_EVENTS__TERM_TYPE_CONFIG1); }
-config2                        { return term(PARSE_EVENTS__TERM_TYPE_CONFIG2); }
-name                   { return term(PARSE_EVENTS__TERM_TYPE_NAME); }
-period                 { return term(PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); }
-branch_type            { return term(PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); }
+config                 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG); }
+config1                        { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG1); }
+config2                        { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG2); }
+name                   { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NAME); }
+period                 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); }
+branch_type            { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); }
 
 mem:                   { BEGIN(mem); return PE_PREFIX_MEM; }
-r{num_raw_hex}         { return raw(); }
-{num_dec}              { return value(10); }
-{num_hex}              { return value(16); }
+r{num_raw_hex}         { return raw(yyscanner); }
+{num_dec}              { return value(yyscanner, 10); }
+{num_hex}              { return value(yyscanner, 16); }
 
-{modifier_event}       { return str(PE_MODIFIER_EVENT); }
-{name}                 { return str(PE_NAME); }
+{modifier_event}       { return str(yyscanner, PE_MODIFIER_EVENT); }
+{name}                 { return str(yyscanner, PE_NAME); }
 "/"                    { return '/'; }
 -                      { return '-'; }
 ,                      { return ','; }
 :                      { return ':'; }
 =                      { return '='; }
+\n                     { }
 
 <mem>{
-{modifier_bp}          { return str(PE_MODIFIER_BP); }
+{modifier_bp}          { return str(yyscanner, PE_MODIFIER_BP); }
 :                      { return ':'; }
-{num_dec}              { return value(10); }
-{num_hex}              { return value(16); }
+{num_dec}              { return value(yyscanner, 10); }
+{num_hex}              { return value(yyscanner, 16); }
        /*
         * We need to separate 'mem:' scanner part, in order to get specific
         * modifier bits parsed out. Otherwise we would need to handle PE_NAME
         * and we'd need to parse it manually. During the escape from <mem>
         * state we need to put the escaping char back, so we dont miss it.
         */
-.                      { unput(*parse_events_text); BEGIN(INITIAL); }
+.                      { unput(*yytext); BEGIN(INITIAL); }
        /*
         * We destroy the scanner after reaching EOF,
         * but anyway just to be sure get back to INIT state.
@@ -143,7 +175,7 @@ r{num_raw_hex}              { return raw(); }
 
 %%
 
-int parse_events_wrap(void)
+int parse_events_wrap(void *scanner __used)
 {
        return 1;
 }
index 362cc59..2bc5fbf 100644 (file)
@@ -1,7 +1,8 @@
-
+%pure-parser
 %name-prefix "parse_events_"
-%parse-param {struct list_head *list_all}
-%parse-param {int *idx}
+%parse-param {void *_data}
+%parse-param {void *scanner}
+%lex-param {void* scanner}
 
 %{
 
@@ -12,8 +13,9 @@
 #include "types.h"
 #include "util.h"
 #include "parse-events.h"
+#include "parse-events-bison.h"
 
-extern int parse_events_lex (void);
+extern int parse_events_lex (YYSTYPE* lvalp, void* scanner);
 
 #define ABORT_ON(val) \
 do { \
@@ -23,14 +25,16 @@ do { \
 
 %}
 
-%token PE_VALUE PE_VALUE_SYM PE_RAW PE_TERM
+%token PE_START_EVENTS PE_START_TERMS
+%token PE_VALUE PE_VALUE_SYM_HW PE_VALUE_SYM_SW PE_RAW PE_TERM
 %token PE_NAME
 %token PE_MODIFIER_EVENT PE_MODIFIER_BP
 %token PE_NAME_CACHE_TYPE PE_NAME_CACHE_OP_RESULT
 %token PE_PREFIX_MEM PE_PREFIX_RAW
 %token PE_ERROR
 %type <num> PE_VALUE
-%type <num> PE_VALUE_SYM
+%type <num> PE_VALUE_SYM_HW
+%type <num> PE_VALUE_SYM_SW
 %type <num> PE_RAW
 %type <num> PE_TERM
 %type <str> PE_NAME
@@ -38,6 +42,7 @@ do { \
 %type <str> PE_NAME_CACHE_OP_RESULT
 %type <str> PE_MODIFIER_EVENT
 %type <str> PE_MODIFIER_BP
+%type <num> value_sym
 %type <head> event_config
 %type <term> event_term
 %type <head> event_pmu
@@ -58,24 +63,33 @@ do { \
 }
 %%
 
+start:
+PE_START_EVENTS events
+|
+PE_START_TERMS  terms
+
 events:
 events ',' event | event
 
 event:
 event_def PE_MODIFIER_EVENT
 {
+       struct parse_events_data__events *data = _data;
+
        /*
         * Apply modifier on all events added by single event definition
         * (there could be more events added for multiple tracepoint
         * definitions via '*?'.
         */
        ABORT_ON(parse_events_modifier($1, $2));
-       parse_events_update_lists($1, list_all);
+       parse_events_update_lists($1, &data->list);
 }
 |
 event_def
 {
-       parse_events_update_lists($1, list_all);
+       struct parse_events_data__events *data = _data;
+
+       parse_events_update_lists($1, &data->list);
 }
 
 event_def: event_pmu |
@@ -89,104 +103,131 @@ event_def: event_pmu |
 event_pmu:
 PE_NAME '/' event_config '/'
 {
+       struct parse_events_data__events *data = _data;
        struct list_head *list = NULL;
 
-       ABORT_ON(parse_events_add_pmu(&list, idx, $1, $3));
+       ABORT_ON(parse_events_add_pmu(&list, &data->idx, $1, $3));
        parse_events__free_terms($3);
        $$ = list;
 }
 
+value_sym:
+PE_VALUE_SYM_HW
+|
+PE_VALUE_SYM_SW
+
 event_legacy_symbol:
-PE_VALUE_SYM '/' event_config '/'
+value_sym '/' event_config '/'
 {
+       struct parse_events_data__events *data = _data;
        struct list_head *list = NULL;
        int type = $1 >> 16;
        int config = $1 & 255;
 
-       ABORT_ON(parse_events_add_numeric(&list, idx, type, config, $3));
+       ABORT_ON(parse_events_add_numeric(&list, &data->idx,
+                                         type, config, $3));
        parse_events__free_terms($3);
        $$ = list;
 }
 |
-PE_VALUE_SYM sep_slash_dc
+value_sym sep_slash_dc
 {
+       struct parse_events_data__events *data = _data;
        struct list_head *list = NULL;
        int type = $1 >> 16;
        int config = $1 & 255;
 
-       ABORT_ON(parse_events_add_numeric(&list, idx, type, config, NULL));
+       ABORT_ON(parse_events_add_numeric(&list, &data->idx,
+                                         type, config, NULL));
        $$ = list;
 }
 
 event_legacy_cache:
 PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT
 {
+       struct parse_events_data__events *data = _data;
        struct list_head *list = NULL;
 
-       ABORT_ON(parse_events_add_cache(&list, idx, $1, $3, $5));
+       ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, $5));
        $$ = list;
 }
 |
 PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT
 {
+       struct parse_events_data__events *data = _data;
        struct list_head *list = NULL;
 
-       ABORT_ON(parse_events_add_cache(&list, idx, $1, $3, NULL));
+       ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, NULL));
        $$ = list;
 }
 |
 PE_NAME_CACHE_TYPE
 {
+       struct parse_events_data__events *data = _data;
        struct list_head *list = NULL;
 
-       ABORT_ON(parse_events_add_cache(&list, idx, $1, NULL, NULL));
+       ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, NULL, NULL));
        $$ = list;
 }
 
 event_legacy_mem:
 PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
 {
+       struct parse_events_data__events *data = _data;
        struct list_head *list = NULL;
 
-       ABORT_ON(parse_events_add_breakpoint(&list, idx, (void *) $2, $4));
+       ABORT_ON(parse_events_add_breakpoint(&list, &data->idx,
+                                            (void *) $2, $4));
        $$ = list;
 }
 |
 PE_PREFIX_MEM PE_VALUE sep_dc
 {
+       struct parse_events_data__events *data = _data;
        struct list_head *list = NULL;
 
-       ABORT_ON(parse_events_add_breakpoint(&list, idx, (void *) $2, NULL));
+       ABORT_ON(parse_events_add_breakpoint(&list, &data->idx,
+                                            (void *) $2, NULL));
        $$ = list;
 }
 
 event_legacy_tracepoint:
 PE_NAME ':' PE_NAME
 {
+       struct parse_events_data__events *data = _data;
        struct list_head *list = NULL;
 
-       ABORT_ON(parse_events_add_tracepoint(&list, idx, $1, $3));
+       ABORT_ON(parse_events_add_tracepoint(&list, &data->idx, $1, $3));
        $$ = list;
 }
 
 event_legacy_numeric:
 PE_VALUE ':' PE_VALUE
 {
+       struct parse_events_data__events *data = _data;
        struct list_head *list = NULL;
 
-       ABORT_ON(parse_events_add_numeric(&list, idx, $1, $3, NULL));
+       ABORT_ON(parse_events_add_numeric(&list, &data->idx, $1, $3, NULL));
        $$ = list;
 }
 
 event_legacy_raw:
 PE_RAW
 {
+       struct parse_events_data__events *data = _data;
        struct list_head *list = NULL;
 
-       ABORT_ON(parse_events_add_numeric(&list, idx, PERF_TYPE_RAW, $1, NULL));
+       ABORT_ON(parse_events_add_numeric(&list, &data->idx,
+                                         PERF_TYPE_RAW, $1, NULL));
        $$ = list;
 }
 
+terms: event_config
+{
+       struct parse_events_data__terms *data = _data;
+       data->terms = $1;
+}
+
 event_config:
 event_config ',' event_term
 {
@@ -267,8 +308,7 @@ sep_slash_dc: '/' | ':' |
 
 %%
 
-void parse_events_error(struct list_head *list_all __used,
-                       int *idx __used,
+void parse_events_error(void *data __used, void *scanner __used,
                        char const *msg __used)
 {
 }
index a119a53..67715a4 100644 (file)
@@ -72,7 +72,7 @@ static int pmu_format(char *name, struct list_head *format)
                 "%s/bus/event_source/devices/%s/format", sysfs, name);
 
        if (stat(path, &st) < 0)
-               return -1;
+               return 0;       /* no error if format does not exist */
 
        if (pmu_format_parse(path, format))
                return -1;
@@ -80,6 +80,114 @@ static int pmu_format(char *name, struct list_head *format)
        return 0;
 }
 
+static int perf_pmu__new_alias(struct list_head *list, char *name, FILE *file)
+{
+       struct perf_pmu__alias *alias;
+       char buf[256];
+       int ret;
+
+       ret = fread(buf, 1, sizeof(buf), file);
+       if (ret == 0)
+               return -EINVAL;
+       buf[ret] = 0;
+
+       alias = malloc(sizeof(*alias));
+       if (!alias)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&alias->terms);
+       ret = parse_events_terms(&alias->terms, buf);
+       if (ret) {
+               free(alias);
+               return ret;
+       }
+
+       alias->name = strdup(name);
+       list_add_tail(&alias->list, list);
+       return 0;
+}
+
+/*
+ * Process all the sysfs attributes located under the directory
+ * specified in 'dir' parameter.
+ */
+static int pmu_aliases_parse(char *dir, struct list_head *head)
+{
+       struct dirent *evt_ent;
+       DIR *event_dir;
+       int ret = 0;
+
+       event_dir = opendir(dir);
+       if (!event_dir)
+               return -EINVAL;
+
+       while (!ret && (evt_ent = readdir(event_dir))) {
+               char path[PATH_MAX];
+               char *name = evt_ent->d_name;
+               FILE *file;
+
+               if (!strcmp(name, ".") || !strcmp(name, ".."))
+                       continue;
+
+               snprintf(path, PATH_MAX, "%s/%s", dir, name);
+
+               ret = -EINVAL;
+               file = fopen(path, "r");
+               if (!file)
+                       break;
+               ret = perf_pmu__new_alias(head, name, file);
+               fclose(file);
+       }
+
+       closedir(event_dir);
+       return ret;
+}
+
+/*
+ * Reading the pmu event aliases definition, which should be located at:
+ * /sys/bus/event_source/devices/<dev>/events as sysfs group attributes.
+ */
+static int pmu_aliases(char *name, struct list_head *head)
+{
+       struct stat st;
+       char path[PATH_MAX];
+       const char *sysfs;
+
+       sysfs = sysfs_find_mountpoint();
+       if (!sysfs)
+               return -1;
+
+       snprintf(path, PATH_MAX,
+                "%s/bus/event_source/devices/%s/events", sysfs, name);
+
+       if (stat(path, &st) < 0)
+               return -1;
+
+       if (pmu_aliases_parse(path, head))
+               return -1;
+
+       return 0;
+}
+
+static int pmu_alias_terms(struct perf_pmu__alias *alias,
+                          struct list_head *terms)
+{
+       struct parse_events__term *term, *clone;
+       LIST_HEAD(list);
+       int ret;
+
+       list_for_each_entry(term, &alias->terms, list) {
+               ret = parse_events__term_clone(&clone, term);
+               if (ret) {
+                       parse_events__free_terms(&list);
+                       return ret;
+               }
+               list_add_tail(&clone->list, &list);
+       }
+       list_splice(&list, terms);
+       return 0;
+}
+
 /*
  * Reading/parsing the default pmu type value, which should be
  * located at:
@@ -118,6 +226,7 @@ static struct perf_pmu *pmu_lookup(char *name)
 {
        struct perf_pmu *pmu;
        LIST_HEAD(format);
+       LIST_HEAD(aliases);
        __u32 type;
 
        /*
@@ -135,10 +244,15 @@ static struct perf_pmu *pmu_lookup(char *name)
        if (!pmu)
                return NULL;
 
+       pmu_aliases(name, &aliases);
+
        INIT_LIST_HEAD(&pmu->format);
+       INIT_LIST_HEAD(&pmu->aliases);
        list_splice(&format, &pmu->format);
+       list_splice(&aliases, &pmu->aliases);
        pmu->name = strdup(name);
        pmu->type = type;
+       list_add_tail(&pmu->list, &pmus);
        return pmu;
 }
 
@@ -279,6 +393,59 @@ int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
        return pmu_config(&pmu->format, attr, head_terms);
 }
 
+static struct perf_pmu__alias *pmu_find_alias(struct perf_pmu *pmu,
+                                             struct parse_events__term *term)
+{
+       struct perf_pmu__alias *alias;
+       char *name;
+
+       if (parse_events__is_hardcoded_term(term))
+               return NULL;
+
+       if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
+               if (term->val.num != 1)
+                       return NULL;
+               if (pmu_find_format(&pmu->format, term->config))
+                       return NULL;
+               name = term->config;
+       } else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) {
+               if (strcasecmp(term->config, "event"))
+                       return NULL;
+               name = term->val.str;
+       } else {
+               return NULL;
+       }
+
+       list_for_each_entry(alias, &pmu->aliases, list) {
+               if (!strcasecmp(alias->name, name))
+                       return alias;
+       }
+       return NULL;
+}
+
+/*
+ * Find alias in the terms list and replace it with the terms
+ * defined for the alias
+ */
+int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms)
+{
+       struct parse_events__term *term, *h;
+       struct perf_pmu__alias *alias;
+       int ret;
+
+       list_for_each_entry_safe(term, h, head_terms, list) {
+               alias = pmu_find_alias(pmu, term);
+               if (!alias)
+                       continue;
+               ret = pmu_alias_terms(alias, &term->list);
+               if (ret)
+                       return ret;
+               list_del(&term->list);
+               free(term);
+       }
+       return 0;
+}
+
 int perf_pmu__new_format(struct list_head *list, char *name,
                         int config, unsigned long *bits)
 {
index 68c0db9..535f2c5 100644 (file)
@@ -19,17 +19,26 @@ struct perf_pmu__format {
        struct list_head list;
 };
 
+struct perf_pmu__alias {
+       char *name;
+       struct list_head terms;
+       struct list_head list;
+};
+
 struct perf_pmu {
        char *name;
        __u32 type;
        struct list_head format;
+       struct list_head aliases;
        struct list_head list;
 };
 
 struct perf_pmu *perf_pmu__find(char *name);
 int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
                     struct list_head *head_terms);
-
+int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms);
+struct list_head *perf_pmu__alias(struct perf_pmu *pmu,
+                               struct list_head *head_terms);
 int perf_pmu_wrap(void);
 void perf_pmu_error(struct list_head *list, char *name, char const *msg);
 
index 4c1b3d7..02dfa19 100644 (file)
@@ -209,6 +209,10 @@ static void define_event_symbols(struct event_format *event,
                define_symbolic_values(args->symbol.symbols, ev_name,
                                       cur_field_name);
                break;
+       case PRINT_HEX:
+               define_event_symbols(event, ev_name, args->hex.field);
+               define_event_symbols(event, ev_name, args->hex.size);
+               break;
        case PRINT_BSTRING:
        case PRINT_DYNAMIC_ARRAY:
        case PRINT_STRING:
@@ -233,7 +237,8 @@ static void define_event_symbols(struct event_format *event,
                define_event_symbols(event, ev_name, args->next);
 }
 
-static inline struct event_format *find_cache_event(int type)
+static inline
+struct event_format *find_cache_event(struct pevent *pevent, int type)
 {
        static char ev_name[256];
        struct event_format *event;
@@ -241,7 +246,7 @@ static inline struct event_format *find_cache_event(int type)
        if (events[type])
                return events[type];
 
-       events[type] = event = trace_find_event(type);
+       events[type] = event = pevent_find_event(pevent, type);
        if (!event)
                return NULL;
 
@@ -252,7 +257,8 @@ static inline struct event_format *find_cache_event(int type)
        return event;
 }
 
-static void perl_process_tracepoint(union perf_event *pevent __unused,
+static void perl_process_tracepoint(union perf_event *perf_event __unused,
+                                   struct pevent *pevent,
                                    struct perf_sample *sample,
                                    struct perf_evsel *evsel,
                                    struct machine *machine __unused,
@@ -275,13 +281,13 @@ static void perl_process_tracepoint(union perf_event *pevent __unused,
        if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
                return;
 
-       type = trace_parse_common_type(data);
+       type = trace_parse_common_type(pevent, data);
 
-       event = find_cache_event(type);
+       event = find_cache_event(pevent, type);
        if (!event)
                die("ug! no event found for type %d", type);
 
-       pid = trace_parse_common_pid(data);
+       pid = trace_parse_common_pid(pevent, data);
 
        sprintf(handler, "%s::%s", event->system, event->name);
 
@@ -314,7 +320,8 @@ static void perl_process_tracepoint(union perf_event *pevent __unused,
                                offset = field->offset;
                        XPUSHs(sv_2mortal(newSVpv((char *)data + offset, 0)));
                } else { /* FIELD_IS_NUMERIC */
-                       val = read_size(data + field->offset, field->size);
+                       val = read_size(pevent, data + field->offset,
+                                       field->size);
                        if (field->flags & FIELD_IS_SIGNED) {
                                XPUSHs(sv_2mortal(newSViv(val)));
                        } else {
@@ -368,14 +375,15 @@ static void perl_process_event_generic(union perf_event *pevent __unused,
        LEAVE;
 }
 
-static void perl_process_event(union perf_event *pevent,
+static void perl_process_event(union perf_event *event,
+                              struct pevent *pevent,
                               struct perf_sample *sample,
                               struct perf_evsel *evsel,
                               struct machine *machine,
                               struct thread *thread)
 {
-       perl_process_tracepoint(pevent, sample, evsel, machine, thread);
-       perl_process_event_generic(pevent, sample, evsel, machine, thread);
+       perl_process_tracepoint(event, pevent, sample, evsel, machine, thread);
+       perl_process_event_generic(event, sample, evsel, machine, thread);
 }
 
 static void run_start_sub(void)
@@ -448,7 +456,7 @@ static int perl_stop_script(void)
        return 0;
 }
 
-static int perl_generate_script(const char *outfile)
+static int perl_generate_script(struct pevent *pevent, const char *outfile)
 {
        struct event_format *event = NULL;
        struct format_field *f;
@@ -495,7 +503,7 @@ static int perl_generate_script(const char *outfile)
        fprintf(ofp, "sub trace_begin\n{\n\t# optional\n}\n\n");
        fprintf(ofp, "sub trace_end\n{\n\t# optional\n}\n\n");
 
-       while ((event = trace_find_next_event(event))) {
+       while ((event = trace_find_next_event(pevent, event))) {
                fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name);
                fprintf(ofp, "\tmy (");
 
index acb9795..ce4d1b0 100644 (file)
@@ -166,6 +166,10 @@ static void define_event_symbols(struct event_format *event,
                define_values(PRINT_SYMBOL, args->symbol.symbols, ev_name,
                              cur_field_name);
                break;
+       case PRINT_HEX:
+               define_event_symbols(event, ev_name, args->hex.field);
+               define_event_symbols(event, ev_name, args->hex.size);
+               break;
        case PRINT_STRING:
                break;
        case PRINT_TYPE:
@@ -190,7 +194,8 @@ static void define_event_symbols(struct event_format *event,
                define_event_symbols(event, ev_name, args->next);
 }
 
-static inline struct event_format *find_cache_event(int type)
+static inline
+struct event_format *find_cache_event(struct pevent *pevent, int type)
 {
        static char ev_name[256];
        struct event_format *event;
@@ -198,7 +203,7 @@ static inline struct event_format *find_cache_event(int type)
        if (events[type])
                return events[type];
 
-       events[type] = event = trace_find_event(type);
+       events[type] = event = pevent_find_event(pevent, type);
        if (!event)
                return NULL;
 
@@ -209,7 +214,8 @@ static inline struct event_format *find_cache_event(int type)
        return event;
 }
 
-static void python_process_event(union perf_event *pevent __unused,
+static void python_process_event(union perf_event *perf_event __unused,
+                                struct pevent *pevent,
                                 struct perf_sample *sample,
                                 struct perf_evsel *evsel __unused,
                                 struct machine *machine __unused,
@@ -233,13 +239,13 @@ static void python_process_event(union perf_event *pevent __unused,
        if (!t)
                Py_FatalError("couldn't create Python tuple");
 
-       type = trace_parse_common_type(data);
+       type = trace_parse_common_type(pevent, data);
 
-       event = find_cache_event(type);
+       event = find_cache_event(pevent, type);
        if (!event)
                die("ug! no event found for type %d", type);
 
-       pid = trace_parse_common_pid(data);
+       pid = trace_parse_common_pid(pevent, data);
 
        sprintf(handler_name, "%s__%s", event->system, event->name);
 
@@ -284,7 +290,8 @@ static void python_process_event(union perf_event *pevent __unused,
                                offset = field->offset;
                        obj = PyString_FromString((char *)data + offset);
                } else { /* FIELD_IS_NUMERIC */
-                       val = read_size(data + field->offset, field->size);
+                       val = read_size(pevent, data + field->offset,
+                                       field->size);
                        if (field->flags & FIELD_IS_SIGNED) {
                                if ((long long)val >= LONG_MIN &&
                                    (long long)val <= LONG_MAX)
@@ -438,7 +445,7 @@ out:
        return err;
 }
 
-static int python_generate_script(const char *outfile)
+static int python_generate_script(struct pevent *pevent, const char *outfile)
 {
        struct event_format *event = NULL;
        struct format_field *f;
@@ -487,7 +494,7 @@ static int python_generate_script(const char *outfile)
        fprintf(ofp, "def trace_end():\n");
        fprintf(ofp, "\tprint \"in trace_end\"\n\n");
 
-       while ((event = trace_find_next_event(event))) {
+       while ((event = trace_find_next_event(pevent, event))) {
                fprintf(ofp, "def %s__%s(", event->system, event->name);
                fprintf(ofp, "event_name, ");
                fprintf(ofp, "context, ");
index 2600916..8e48559 100644 (file)
@@ -14,6 +14,7 @@
 #include "sort.h"
 #include "util.h"
 #include "cpumap.h"
+#include "event-parse.h"
 
 static int perf_session__open(struct perf_session *self, bool force)
 {
@@ -289,7 +290,6 @@ struct branch_info *machine__resolve_bstack(struct machine *self,
 }
 
 int machine__resolve_callchain(struct machine *self,
-                              struct perf_evsel *evsel __used,
                               struct thread *thread,
                               struct ip_callchain *chain,
                               struct symbol **parent)
@@ -442,6 +442,16 @@ static void perf_tool__fill_defaults(struct perf_tool *tool)
                        tool->finished_round = process_finished_round_stub;
        }
 }
+void mem_bswap_32(void *src, int byte_size)
+{
+       u32 *m = src;
+       while (byte_size > 0) {
+               *m = bswap_32(*m);
+               byte_size -= sizeof(u32);
+               ++m;
+       }
+}
 
 void mem_bswap_64(void *src, int byte_size)
 {
@@ -916,7 +926,7 @@ static struct machine *
                else
                        pid = event->ip.pid;
 
-               return perf_session__find_machine(session, pid);
+               return perf_session__findnew_machine(session, pid);
        }
 
        return perf_session__find_host_machine(session);
@@ -1439,7 +1449,7 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
        ret += hists__fprintf_nr_events(&session->hists, fp);
 
        list_for_each_entry(pos, &session->evlist->entries, node) {
-               ret += fprintf(fp, "%s stats:\n", event_name(pos));
+               ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
                ret += hists__fprintf_nr_events(&pos->hists, fp);
        }
 
@@ -1480,8 +1490,8 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
 }
 
 void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
-                         struct machine *machine, struct perf_evsel *evsel,
-                         int print_sym, int print_dso, int print_symoffset)
+                         struct machine *machine, int print_sym,
+                         int print_dso, int print_symoffset)
 {
        struct addr_location al;
        struct callchain_cursor_node *node;
@@ -1495,7 +1505,7 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
 
        if (symbol_conf.use_callchain && sample->callchain) {
 
-               if (machine__resolve_callchain(machine, evsel, al.thread,
+               if (machine__resolve_callchain(machine, al.thread,
                                                sample->callchain, NULL) != 0) {
                        if (verbose)
                                error("Failed to resolve callchain. Skipping\n");
@@ -1601,3 +1611,58 @@ void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
        perf_header__fprintf_info(session, fp, full);
        fprintf(fp, "# ========\n#\n");
 }
+
+
+int __perf_session__set_tracepoints_handlers(struct perf_session *session,
+                                            const struct perf_evsel_str_handler *assocs,
+                                            size_t nr_assocs)
+{
+       struct perf_evlist *evlist = session->evlist;
+       struct event_format *format;
+       struct perf_evsel *evsel;
+       char *tracepoint, *name;
+       size_t i;
+       int err;
+
+       for (i = 0; i < nr_assocs; i++) {
+               err = -ENOMEM;
+               tracepoint = strdup(assocs[i].name);
+               if (tracepoint == NULL)
+                       goto out;
+
+               err = -ENOENT;
+               name = strchr(tracepoint, ':');
+               if (name == NULL)
+                       goto out_free;
+
+               *name++ = '\0';
+               format = pevent_find_event_by_name(session->pevent,
+                                                  tracepoint, name);
+               if (format == NULL) {
+                       /*
+                        * Adding a handler for an event not in the session,
+                        * just ignore it.
+                        */
+                       goto next;
+               }
+
+               evsel = perf_evlist__find_tracepoint_by_id(evlist, format->id);
+               if (evsel == NULL)
+                       goto next;
+
+               err = -EEXIST;
+               if (evsel->handler.func != NULL)
+                       goto out_free;
+               evsel->handler.func = assocs[i].handler;
+next:
+               free(tracepoint);
+       }
+
+       err = 0;
+out:
+       return err;
+
+out_free:
+       free(tracepoint);
+       goto out;
+}
index 7a5434c..7c435bd 100644 (file)
@@ -33,6 +33,7 @@ struct perf_session {
        struct machine          host_machine;
        struct rb_root          machines;
        struct perf_evlist      *evlist;
+       struct pevent           *pevent;
        /*
         * FIXME: Need to split this up further, we need global
         *        stats + per event stats. 'perf diff' also needs
@@ -80,6 +81,7 @@ struct branch_info *machine__resolve_bstack(struct machine *self,
 bool perf_session__has_traces(struct perf_session *self, const char *msg);
 
 void mem_bswap_64(void *src, int byte_size);
+void mem_bswap_32(void *src, int byte_size);
 void perf_event__attr_swap(struct perf_event_attr *attr);
 
 int perf_session__create_kernel_maps(struct perf_session *self);
@@ -150,11 +152,20 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
                                            unsigned int type);
 
 void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
-                         struct machine *machine, struct perf_evsel *evsel,
-                         int print_sym, int print_dso, int print_symoffset);
+                         struct machine *machine, int print_sym,
+                         int print_dso, int print_symoffset);
 
 int perf_session__cpu_bitmap(struct perf_session *session,
                             const char *cpu_list, unsigned long *cpu_bitmap);
 
 void perf_session__fprintf_info(struct perf_session *s, FILE *fp, bool full);
+
+struct perf_evsel_str_handler;
+
+int __perf_session__set_tracepoints_handlers(struct perf_session *session,
+                                            const struct perf_evsel_str_handler *assocs,
+                                            size_t nr_assocs);
+
+#define perf_session__set_tracepoints_handlers(session, array) \
+       __perf_session__set_tracepoints_handlers(session, array, ARRAY_SIZE(array))
 #endif /* __PERF_SESSION_H */
index a272374..0f5a0a4 100644 (file)
@@ -241,6 +241,54 @@ struct sort_entry sort_sym = {
        .se_width_idx   = HISTC_SYMBOL,
 };
 
+/* --sort srcline */
+
+static int64_t
+sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+       return (int64_t)(right->ip - left->ip);
+}
+
+static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf,
+                                  size_t size, unsigned int width __used)
+{
+       FILE *fp;
+       char cmd[PATH_MAX + 2], *path = self->srcline, *nl;
+       size_t line_len;
+
+       if (path != NULL)
+               goto out_path;
+
+       snprintf(cmd, sizeof(cmd), "addr2line -e %s %016" PRIx64,
+                self->ms.map->dso->long_name, self->ip);
+       fp = popen(cmd, "r");
+       if (!fp)
+               goto out_ip;
+
+       if (getline(&path, &line_len, fp) < 0 || !line_len)
+               goto out_ip;
+       fclose(fp);
+       self->srcline = strdup(path);
+       if (self->srcline == NULL)
+               goto out_ip;
+
+       nl = strchr(self->srcline, '\n');
+       if (nl != NULL)
+               *nl = '\0';
+       path = self->srcline;
+out_path:
+       return repsep_snprintf(bf, size, "%s", path);
+out_ip:
+       return repsep_snprintf(bf, size, "%-#*llx", BITS_PER_LONG / 4, self->ip);
+}
+
+struct sort_entry sort_srcline = {
+       .se_header      = "Source:Line",
+       .se_cmp         = sort__srcline_cmp,
+       .se_snprintf    = hist_entry__srcline_snprintf,
+       .se_width_idx   = HISTC_SRCLINE,
+};
+
 /* --sort parent */
 
 static int64_t
@@ -439,6 +487,7 @@ static struct sort_dimension sort_dimensions[] = {
        DIM(SORT_PARENT, "parent", sort_parent),
        DIM(SORT_CPU, "cpu", sort_cpu),
        DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
+       DIM(SORT_SRCLINE, "srcline", sort_srcline),
 };
 
 int sort_dimension__add(const char *tok)
index 472aa5a..e724b26 100644 (file)
@@ -71,6 +71,7 @@ struct hist_entry {
        char                    level;
        bool                    used;
        u8                      filtered;
+       char                    *srcline;
        struct symbol           *parent;
        union {
                unsigned long     position;
@@ -93,6 +94,7 @@ enum sort_type {
        SORT_SYM_FROM,
        SORT_SYM_TO,
        SORT_MISPREDICT,
+       SORT_SRCLINE,
 };
 
 /*
index d583638..199bc4d 100644 (file)
@@ -313,3 +313,25 @@ int strtailcmp(const char *s1, const char *s2)
        return 0;
 }
 
+/**
+ * rtrim - Removes trailing whitespace from @s.
+ * @s: The string to be stripped.
+ *
+ * Note that the first trailing whitespace is replaced with a %NUL-terminator
+ * in the given string @s. Returns @s.
+ */
+char *rtrim(char *s)
+{
+       size_t size = strlen(s);
+       char *end;
+
+       if (!size)
+               return s;
+
+       end = s + size - 1;
+       while (end >= s && isspace(*end))
+               end--;
+       *(end + 1) = '\0';
+
+       return s;
+}
index 3e2e5ea..50958bb 100644 (file)
@@ -1478,14 +1478,31 @@ static int elf_read_build_id(Elf *elf, void *bf, size_t size)
                goto out;
        }
 
-       sec = elf_section_by_name(elf, &ehdr, &shdr,
-                                 ".note.gnu.build-id", NULL);
-       if (sec == NULL) {
+       /*
+        * Check following sections for notes:
+        *   '.note.gnu.build-id'
+        *   '.notes'
+        *   '.note' (VDSO specific)
+        */
+       do {
+               sec = elf_section_by_name(elf, &ehdr, &shdr,
+                                         ".note.gnu.build-id", NULL);
+               if (sec)
+                       break;
+
                sec = elf_section_by_name(elf, &ehdr, &shdr,
                                          ".notes", NULL);
-               if (sec == NULL)
-                       goto out;
-       }
+               if (sec)
+                       break;
+
+               sec = elf_section_by_name(elf, &ehdr, &shdr,
+                                         ".note", NULL);
+               if (sec)
+                       break;
+
+               return err;
+
+       } while (0);
 
        data = elf_getdata(sec, NULL);
        if (data == NULL)
@@ -1590,11 +1607,62 @@ out:
        return err;
 }
 
+static int filename__read_debuglink(const char *filename,
+                                   char *debuglink, size_t size)
+{
+       int fd, err = -1;
+       Elf *elf;
+       GElf_Ehdr ehdr;
+       GElf_Shdr shdr;
+       Elf_Data *data;
+       Elf_Scn *sec;
+       Elf_Kind ek;
+
+       fd = open(filename, O_RDONLY);
+       if (fd < 0)
+               goto out;
+
+       elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+       if (elf == NULL) {
+               pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
+               goto out_close;
+       }
+
+       ek = elf_kind(elf);
+       if (ek != ELF_K_ELF)
+               goto out_close;
+
+       if (gelf_getehdr(elf, &ehdr) == NULL) {
+               pr_err("%s: cannot get elf header.\n", __func__);
+               goto out_close;
+       }
+
+       sec = elf_section_by_name(elf, &ehdr, &shdr,
+                                 ".gnu_debuglink", NULL);
+       if (sec == NULL)
+               goto out_close;
+
+       data = elf_getdata(sec, NULL);
+       if (data == NULL)
+               goto out_close;
+
+       /* the start of this section is a zero-terminated string */
+       strncpy(debuglink, data->d_buf, size);
+
+       elf_end(elf);
+
+out_close:
+       close(fd);
+out:
+       return err;
+}
+
 char dso__symtab_origin(const struct dso *dso)
 {
        static const char origin[] = {
                [SYMTAB__KALLSYMS]            = 'k',
                [SYMTAB__JAVA_JIT]            = 'j',
+               [SYMTAB__DEBUGLINK]           = 'l',
                [SYMTAB__BUILD_ID_CACHE]      = 'B',
                [SYMTAB__FEDORA_DEBUGINFO]    = 'f',
                [SYMTAB__UBUNTU_DEBUGINFO]    = 'u',
@@ -1662,10 +1730,22 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
         */
        want_symtab = 1;
 restart:
-       for (dso->symtab_type = SYMTAB__BUILD_ID_CACHE;
+       for (dso->symtab_type = SYMTAB__DEBUGLINK;
             dso->symtab_type != SYMTAB__NOT_FOUND;
             dso->symtab_type++) {
                switch (dso->symtab_type) {
+               case SYMTAB__DEBUGLINK: {
+                       char *debuglink;
+                       strncpy(name, dso->long_name, size);
+                       debuglink = name + dso->long_name_len;
+                       while (debuglink != name && *debuglink != '/')
+                               debuglink--;
+                       if (*debuglink == '/')
+                               debuglink++;
+                       filename__read_debuglink(dso->long_name, debuglink,
+                                                size - (debuglink - name));
+                       }
+                       break;
                case SYMTAB__BUILD_ID_CACHE:
                        /* skip the locally configured cache if a symfs is given */
                        if (symbol_conf.symfs[0] ||
index af0752b..a884b99 100644 (file)
@@ -257,6 +257,7 @@ enum symtab_type {
        SYMTAB__KALLSYMS = 0,
        SYMTAB__GUEST_KALLSYMS,
        SYMTAB__JAVA_JIT,
+       SYMTAB__DEBUGLINK,
        SYMTAB__BUILD_ID_CACHE,
        SYMTAB__FEDORA_DEBUGINFO,
        SYMTAB__UBUNTU_DEBUGINFO,
index abe0e8e..7eeebce 100644 (file)
@@ -65,7 +65,7 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
                                top->freq ? "Hz" : "");
        }
 
-       ret += SNPRINTF(bf + ret, size - ret, "%s", event_name(top->sym_evsel));
+       ret += SNPRINTF(bf + ret, size - ret, "%s", perf_evsel__name(top->sym_evsel));
 
        ret += SNPRINTF(bf + ret, size - ret, "], ");
 
index df2fddb..0715c84 100644 (file)
@@ -32,29 +32,25 @@ int header_page_size_size;
 int header_page_ts_size;
 int header_page_data_offset;
 
-struct pevent *perf_pevent;
-static struct pevent *pevent;
-
 bool latency_format;
 
-int read_trace_init(int file_bigendian, int host_bigendian)
+struct pevent *read_trace_init(int file_bigendian, int host_bigendian)
 {
-       if (pevent)
-               return 0;
-
-       perf_pevent = pevent_alloc();
-       pevent = perf_pevent;
+       struct pevent *pevent = pevent_alloc();
 
-       pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT);
-       pevent_set_file_bigendian(pevent, file_bigendian);
-       pevent_set_host_bigendian(pevent, host_bigendian);
+       if (pevent != NULL) {
+               pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT);
+               pevent_set_file_bigendian(pevent, file_bigendian);
+               pevent_set_host_bigendian(pevent, host_bigendian);
+       }
 
-       return 0;
+       return pevent;
 }
 
 static int get_common_field(struct scripting_context *context,
                            int *offset, int *size, const char *type)
 {
+       struct pevent *pevent = context->pevent;
        struct event_format *event;
        struct format_field *field;
 
@@ -150,7 +146,7 @@ void *raw_field_ptr(struct event_format *event, const char *name, void *data)
        return data + field->offset;
 }
 
-int trace_parse_common_type(void *data)
+int trace_parse_common_type(struct pevent *pevent, void *data)
 {
        struct pevent_record record;
 
@@ -158,7 +154,7 @@ int trace_parse_common_type(void *data)
        return pevent_data_type(pevent, &record);
 }
 
-int trace_parse_common_pid(void *data)
+int trace_parse_common_pid(struct pevent *pevent, void *data)
 {
        struct pevent_record record;
 
@@ -166,27 +162,21 @@ int trace_parse_common_pid(void *data)
        return pevent_data_pid(pevent, &record);
 }
 
-unsigned long long read_size(void *ptr, int size)
+unsigned long long read_size(struct pevent *pevent, void *ptr, int size)
 {
        return pevent_read_number(pevent, ptr, size);
 }
 
-struct event_format *trace_find_event(int type)
-{
-       return pevent_find_event(pevent, type);
-}
-
-
-void print_trace_event(int cpu, void *data, int size)
+void print_trace_event(struct pevent *pevent, int cpu, void *data, int size)
 {
        struct event_format *event;
        struct pevent_record record;
        struct trace_seq s;
        int type;
 
-       type = trace_parse_common_type(data);
+       type = trace_parse_common_type(pevent, data);
 
-       event = trace_find_event(type);
+       event = pevent_find_event(pevent, type);
        if (!event) {
                warning("ug! no event found for type %d", type);
                return;
@@ -198,13 +188,12 @@ void print_trace_event(int cpu, void *data, int size)
        record.data = data;
 
        trace_seq_init(&s);
-       pevent_print_event(pevent, &s, &record);
+       pevent_event_info(&s, event, &record);
        trace_seq_do_printf(&s);
-       printf("\n");
 }
 
-void print_event(int cpu, void *data, int size, unsigned long long nsecs,
-                 char *comm)
+void print_event(struct pevent *pevent, int cpu, void *data, int size,
+                unsigned long long nsecs, char *comm)
 {
        struct pevent_record record;
        struct trace_seq s;
@@ -227,7 +216,8 @@ void print_event(int cpu, void *data, int size, unsigned long long nsecs,
        printf("\n");
 }
 
-void parse_proc_kallsyms(char *file, unsigned int size __unused)
+void parse_proc_kallsyms(struct pevent *pevent,
+                        char *file, unsigned int size __unused)
 {
        unsigned long long addr;
        char *func;
@@ -258,7 +248,8 @@ void parse_proc_kallsyms(char *file, unsigned int size __unused)
        }
 }
 
-void parse_ftrace_printk(char *file, unsigned int size __unused)
+void parse_ftrace_printk(struct pevent *pevent,
+                        char *file, unsigned int size __unused)
 {
        unsigned long long addr;
        char *printk;
@@ -282,17 +273,19 @@ void parse_ftrace_printk(char *file, unsigned int size __unused)
        }
 }
 
-int parse_ftrace_file(char *buf, unsigned long size)
+int parse_ftrace_file(struct pevent *pevent, char *buf, unsigned long size)
 {
        return pevent_parse_event(pevent, buf, size, "ftrace");
 }
 
-int parse_event_file(char *buf, unsigned long size, char *sys)
+int parse_event_file(struct pevent *pevent,
+                    char *buf, unsigned long size, char *sys)
 {
        return pevent_parse_event(pevent, buf, size, sys);
 }
 
-struct event_format *trace_find_next_event(struct event_format *event)
+struct event_format *trace_find_next_event(struct pevent *pevent,
+                                          struct event_format *event)
 {
        static int idx;
 
index f097e0d..719ed74 100644 (file)
@@ -114,20 +114,20 @@ static void skip(int size)
        };
 }
 
-static unsigned int read4(void)
+static unsigned int read4(struct pevent *pevent)
 {
        unsigned int data;
 
        read_or_die(&data, 4);
-       return __data2host4(perf_pevent, data);
+       return __data2host4(pevent, data);
 }
 
-static unsigned long long read8(void)
+static unsigned long long read8(struct pevent *pevent)
 {
        unsigned long long data;
 
        read_or_die(&data, 8);
-       return __data2host8(perf_pevent, data);
+       return __data2host8(pevent, data);
 }
 
 static char *read_string(void)
@@ -168,12 +168,12 @@ static char *read_string(void)
        return str;
 }
 
-static void read_proc_kallsyms(void)
+static void read_proc_kallsyms(struct pevent *pevent)
 {
        unsigned int size;
        char *buf;
 
-       size = read4();
+       size = read4(pevent);
        if (!size)
                return;
 
@@ -181,29 +181,29 @@ static void read_proc_kallsyms(void)
        read_or_die(buf, size);
        buf[size] = '\0';
 
-       parse_proc_kallsyms(buf, size);
+       parse_proc_kallsyms(pevent, buf, size);
 
        free(buf);
 }
 
-static void read_ftrace_printk(void)
+static void read_ftrace_printk(struct pevent *pevent)
 {
        unsigned int size;
        char *buf;
 
-       size = read4();
+       size = read4(pevent);
        if (!size)
                return;
 
        buf = malloc_or_die(size);
        read_or_die(buf, size);
 
-       parse_ftrace_printk(buf, size);
+       parse_ftrace_printk(pevent, buf, size);
 
        free(buf);
 }
 
-static void read_header_files(void)
+static void read_header_files(struct pevent *pevent)
 {
        unsigned long long size;
        char *header_event;
@@ -214,7 +214,7 @@ static void read_header_files(void)
        if (memcmp(buf, "header_page", 12) != 0)
                die("did not read header page");
 
-       size = read8();
+       size = read8(pevent);
        skip(size);
 
        /*
@@ -227,47 +227,48 @@ static void read_header_files(void)
        if (memcmp(buf, "header_event", 13) != 0)
                die("did not read header event");
 
-       size = read8();
+       size = read8(pevent);
        header_event = malloc_or_die(size);
        read_or_die(header_event, size);
        free(header_event);
 }
 
-static void read_ftrace_file(unsigned long long size)
+static void read_ftrace_file(struct pevent *pevent, unsigned long long size)
 {
        char *buf;
 
        buf = malloc_or_die(size);
        read_or_die(buf, size);
-       parse_ftrace_file(buf, size);
+       parse_ftrace_file(pevent, buf, size);
        free(buf);
 }
 
-static void read_event_file(char *sys, unsigned long long size)
+static void read_event_file(struct pevent *pevent, char *sys,
+                           unsigned long long size)
 {
        char *buf;
 
        buf = malloc_or_die(size);
        read_or_die(buf, size);
-       parse_event_file(buf, size, sys);
+       parse_event_file(pevent, buf, size, sys);
        free(buf);
 }
 
-static void read_ftrace_files(void)
+static void read_ftrace_files(struct pevent *pevent)
 {
        unsigned long long size;
        int count;
        int i;
 
-       count = read4();
+       count = read4(pevent);
 
        for (i = 0; i < count; i++) {
-               size = read8();
-               read_ftrace_file(size);
+               size = read8(pevent);
+               read_ftrace_file(pevent, size);
        }
 }
 
-static void read_event_files(void)
+static void read_event_files(struct pevent *pevent)
 {
        unsigned long long size;
        char *sys;
@@ -275,15 +276,15 @@ static void read_event_files(void)
        int count;
        int i,x;
 
-       systems = read4();
+       systems = read4(pevent);
 
        for (i = 0; i < systems; i++) {
                sys = read_string();
 
-               count = read4();
+               count = read4(pevent);
                for (x=0; x < count; x++) {
-                       size = read8();
-                       read_event_file(sys, size);
+                       size = read8(pevent);
+                       read_event_file(pevent, sys, size);
                }
        }
 }
@@ -377,7 +378,7 @@ static int calc_index(void *ptr, int cpu)
        return (unsigned long)ptr - (unsigned long)cpu_data[cpu].page;
 }
 
-struct pevent_record *trace_peek_data(int cpu)
+struct pevent_record *trace_peek_data(struct pevent *pevent, int cpu)
 {
        struct pevent_record *data;
        void *page = cpu_data[cpu].page;
@@ -399,15 +400,15 @@ struct pevent_record *trace_peek_data(int cpu)
                /* FIXME: handle header page */
                if (header_page_ts_size != 8)
                        die("expected a long long type for timestamp");
-               cpu_data[cpu].timestamp = data2host8(perf_pevent, ptr);
+               cpu_data[cpu].timestamp = data2host8(pevent, ptr);
                ptr += 8;
                switch (header_page_size_size) {
                case 4:
-                       cpu_data[cpu].page_size = data2host4(perf_pevent, ptr);
+                       cpu_data[cpu].page_size = data2host4(pevent, ptr);
                        ptr += 4;
                        break;
                case 8:
-                       cpu_data[cpu].page_size = data2host8(perf_pevent, ptr);
+                       cpu_data[cpu].page_size = data2host8(pevent, ptr);
                        ptr += 8;
                        break;
                default:
@@ -421,10 +422,10 @@ read_again:
 
        if (idx >= cpu_data[cpu].page_size) {
                get_next_page(cpu);
-               return trace_peek_data(cpu);
+               return trace_peek_data(pevent, cpu);
        }
 
-       type_len_ts = data2host4(perf_pevent, ptr);
+       type_len_ts = data2host4(pevent, ptr);
        ptr += 4;
 
        type_len = type_len4host(type_len_ts);
@@ -434,14 +435,14 @@ read_again:
        case RINGBUF_TYPE_PADDING:
                if (!delta)
                        die("error, hit unexpected end of page");
-               length = data2host4(perf_pevent, ptr);
+               length = data2host4(pevent, ptr);
                ptr += 4;
                length *= 4;
                ptr += length;
                goto read_again;
 
        case RINGBUF_TYPE_TIME_EXTEND:
-               extend = data2host4(perf_pevent, ptr);
+               extend = data2host4(pevent, ptr);
                ptr += 4;
                extend <<= TS_SHIFT;
                extend += delta;
@@ -452,7 +453,7 @@ read_again:
                ptr += 12;
                break;
        case 0:
-               length = data2host4(perf_pevent, ptr);
+               length = data2host4(pevent, ptr);
                ptr += 4;
                die("here! length=%d", length);
                break;
@@ -477,17 +478,17 @@ read_again:
        return data;
 }
 
-struct pevent_record *trace_read_data(int cpu)
+struct pevent_record *trace_read_data(struct pevent *pevent, int cpu)
 {
        struct pevent_record *data;
 
-       data = trace_peek_data(cpu);
+       data = trace_peek_data(pevent, cpu);
        cpu_data[cpu].next = NULL;
 
        return data;
 }
 
-ssize_t trace_report(int fd, bool __repipe)
+ssize_t trace_report(int fd, struct pevent **ppevent, bool __repipe)
 {
        char buf[BUFSIZ];
        char test[] = { 23, 8, 68 };
@@ -519,30 +520,32 @@ ssize_t trace_report(int fd, bool __repipe)
        file_bigendian = buf[0];
        host_bigendian = bigendian();
 
-       read_trace_init(file_bigendian, host_bigendian);
+       *ppevent = read_trace_init(file_bigendian, host_bigendian);
+       if (*ppevent == NULL)
+               die("read_trace_init failed");
 
        read_or_die(buf, 1);
        long_size = buf[0];
 
-       page_size = read4();
+       page_size = read4(*ppevent);
 
-       read_header_files();
+       read_header_files(*ppevent);
 
-       read_ftrace_files();
-       read_event_files();
-       read_proc_kallsyms();
-       read_ftrace_printk();
+       read_ftrace_files(*ppevent);
+       read_event_files(*ppevent);
+       read_proc_kallsyms(*ppevent);
+       read_ftrace_printk(*ppevent);
 
        size = calc_data_size - 1;
        calc_data_size = 0;
        repipe = false;
 
        if (show_funcs) {
-               pevent_print_funcs(perf_pevent);
+               pevent_print_funcs(*ppevent);
                return size;
        }
        if (show_printk) {
-               pevent_print_printk(perf_pevent);
+               pevent_print_printk(*ppevent);
                return size;
        }
 
index 18ae6c1..474aa7a 100644 (file)
@@ -36,6 +36,7 @@ static int stop_script_unsupported(void)
 }
 
 static void process_event_unsupported(union perf_event *event __unused,
+                                     struct pevent *pevent __unused,
                                      struct perf_sample *sample __unused,
                                      struct perf_evsel *evsel __unused,
                                      struct machine *machine __unused,
@@ -61,7 +62,8 @@ static int python_start_script_unsupported(const char *script __unused,
        return -1;
 }
 
-static int python_generate_script_unsupported(const char *outfile __unused)
+static int python_generate_script_unsupported(struct pevent *pevent __unused,
+                                             const char *outfile __unused)
 {
        print_python_unsupported_msg();
 
@@ -122,7 +124,8 @@ static int perl_start_script_unsupported(const char *script __unused,
        return -1;
 }
 
-static int perl_generate_script_unsupported(const char *outfile __unused)
+static int perl_generate_script_unsupported(struct pevent *pevent __unused,
+                                           const char *outfile __unused)
 {
        print_perl_unsupported_msg();
 
index 639852a..8fef1d6 100644 (file)
@@ -8,6 +8,7 @@
 struct machine;
 struct perf_sample;
 union perf_event;
+struct perf_tool;
 struct thread;
 
 extern int header_page_size_size;
@@ -29,35 +30,36 @@ enum {
 
 int bigendian(void);
 
-int read_trace_init(int file_bigendian, int host_bigendian);
-void print_trace_event(int cpu, void *data, int size);
+struct pevent *read_trace_init(int file_bigendian, int host_bigendian);
+void print_trace_event(struct pevent *pevent, int cpu, void *data, int size);
 
-void print_event(int cpu, void *data, int size, unsigned long long nsecs,
-                 char *comm);
+void print_event(struct pevent *pevent, int cpu, void *data, int size,
+                unsigned long long nsecs, char *comm);
 
-int parse_ftrace_file(char *buf, unsigned long size);
-int parse_event_file(char *buf, unsigned long size, char *sys);
+int parse_ftrace_file(struct pevent *pevent, char *buf, unsigned long size);
+int parse_event_file(struct pevent *pevent,
+                    char *buf, unsigned long size, char *sys);
 
-struct pevent_record *trace_peek_data(int cpu);
-struct event_format *trace_find_event(int type);
+struct pevent_record *trace_peek_data(struct pevent *pevent, int cpu);
 
 unsigned long long
 raw_field_value(struct event_format *event, const char *name, void *data);
 void *raw_field_ptr(struct event_format *event, const char *name, void *data);
 
-void parse_proc_kallsyms(char *file, unsigned int size __unused);
-void parse_ftrace_printk(char *file, unsigned int size __unused);
+void parse_proc_kallsyms(struct pevent *pevent, char *file, unsigned int size);
+void parse_ftrace_printk(struct pevent *pevent, char *file, unsigned int size);
 
-ssize_t trace_report(int fd, bool repipe);
+ssize_t trace_report(int fd, struct pevent **pevent, bool repipe);
 
-int trace_parse_common_type(void *data);
-int trace_parse_common_pid(void *data);
+int trace_parse_common_type(struct pevent *pevent, void *data);
+int trace_parse_common_pid(struct pevent *pevent, void *data);
 
-struct event_format *trace_find_next_event(struct event_format *event);
-unsigned long long read_size(void *ptr, int size);
+struct event_format *trace_find_next_event(struct pevent *pevent,
+                                          struct event_format *event);
+unsigned long long read_size(struct pevent *pevent, void *ptr, int size);
 unsigned long long eval_flag(const char *flag);
 
-struct pevent_record *trace_read_data(int cpu);
+struct pevent_record *trace_read_data(struct pevent *pevent, int cpu);
 int read_tracing_data(int fd, struct list_head *pattrs);
 
 struct tracing_data {
@@ -77,11 +79,12 @@ struct scripting_ops {
        int (*start_script) (const char *script, int argc, const char **argv);
        int (*stop_script) (void);
        void (*process_event) (union perf_event *event,
+                              struct pevent *pevent,
                               struct perf_sample *sample,
                               struct perf_evsel *evsel,
                               struct machine *machine,
                               struct thread *thread);
-       int (*generate_script) (const char *outfile);
+       int (*generate_script) (struct pevent *pevent, const char *outfile);
 };
 
 int script_spec_register(const char *spec, struct scripting_ops *ops);
@@ -90,6 +93,7 @@ void setup_perl_scripting(void);
 void setup_python_scripting(void);
 
 struct scripting_context {
+       struct pevent *pevent;
        void *event_data;
 };
 
index 2daaedb..b13c733 100644 (file)
@@ -264,4 +264,6 @@ bool is_power_of_2(unsigned long n)
 
 size_t hex_width(u64 v);
 
+char *rtrim(char *s);
+
 #endif
index 01f572c..23a41a9 100644 (file)
@@ -334,6 +334,11 @@ static int assigned_device_enable_host_intx(struct kvm *kvm,
 }
 
 #ifdef __KVM_HAVE_MSI
+static irqreturn_t kvm_assigned_dev_msi(int irq, void *dev_id)
+{
+       return IRQ_WAKE_THREAD;
+}
+
 static int assigned_device_enable_host_msi(struct kvm *kvm,
                                           struct kvm_assigned_dev_kernel *dev)
 {
@@ -346,7 +351,7 @@ static int assigned_device_enable_host_msi(struct kvm *kvm,
        }
 
        dev->host_irq = dev->dev->irq;
-       if (request_threaded_irq(dev->host_irq, NULL,
+       if (request_threaded_irq(dev->host_irq, kvm_assigned_dev_msi,
                                 kvm_assigned_dev_thread_msi, 0,
                                 dev->irq_name, dev)) {
                pci_disable_msi(dev->dev);
@@ -358,6 +363,11 @@ static int assigned_device_enable_host_msi(struct kvm *kvm,
 #endif
 
 #ifdef __KVM_HAVE_MSIX
+static irqreturn_t kvm_assigned_dev_msix(int irq, void *dev_id)
+{
+       return IRQ_WAKE_THREAD;
+}
+
 static int assigned_device_enable_host_msix(struct kvm *kvm,
                                            struct kvm_assigned_dev_kernel *dev)
 {
@@ -374,7 +384,8 @@ static int assigned_device_enable_host_msix(struct kvm *kvm,
 
        for (i = 0; i < dev->entries_nr; i++) {
                r = request_threaded_irq(dev->host_msix_entries[i].vector,
-                                        NULL, kvm_assigned_dev_thread_msix,
+                                        kvm_assigned_dev_msix,
+                                        kvm_assigned_dev_thread_msix,
                                         0, dev->irq_name, dev);
                if (r)
                        goto err;
@@ -635,7 +646,6 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
        int r = 0, idx;
        struct kvm_assigned_dev_kernel *match;
        struct pci_dev *dev;
-       u8 header_type;
 
        if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU))
                return -EINVAL;
@@ -668,8 +678,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
        }
 
        /* Don't allow bridges to be assigned */
-       pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
-       if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) {
+       if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL) {
                r = -EPERM;
                goto out_put;
        }
index f59c1e8..7d7e2aa 100644 (file)
@@ -198,7 +198,7 @@ static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
 }
 
 static int
-kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
+kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
 {
        struct kvm_irq_routing_table *irq_rt;
        struct _irqfd *irqfd, *tmp;
@@ -212,12 +212,12 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
                return -ENOMEM;
 
        irqfd->kvm = kvm;
-       irqfd->gsi = gsi;
+       irqfd->gsi = args->gsi;
        INIT_LIST_HEAD(&irqfd->list);
        INIT_WORK(&irqfd->inject, irqfd_inject);
        INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
 
-       file = eventfd_fget(fd);
+       file = eventfd_fget(args->fd);
        if (IS_ERR(file)) {
                ret = PTR_ERR(file);
                goto fail;
@@ -298,19 +298,19 @@ kvm_eventfd_init(struct kvm *kvm)
  * shutdown any irqfd's that match fd+gsi
  */
 static int
-kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi)
+kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
 {
        struct _irqfd *irqfd, *tmp;
        struct eventfd_ctx *eventfd;
 
-       eventfd = eventfd_ctx_fdget(fd);
+       eventfd = eventfd_ctx_fdget(args->fd);
        if (IS_ERR(eventfd))
                return PTR_ERR(eventfd);
 
        spin_lock_irq(&kvm->irqfds.lock);
 
        list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
-               if (irqfd->eventfd == eventfd && irqfd->gsi == gsi) {
+               if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
                        /*
                         * This rcu_assign_pointer is needed for when
                         * another thread calls kvm_irq_routing_update before
@@ -338,12 +338,15 @@ kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi)
 }
 
 int
-kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
+kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
 {
-       if (flags & KVM_IRQFD_FLAG_DEASSIGN)
-               return kvm_irqfd_deassign(kvm, fd, gsi);
+       if (args->flags & ~KVM_IRQFD_FLAG_DEASSIGN)
+               return -EINVAL;
+
+       if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
+               return kvm_irqfd_deassign(kvm, args);
 
-       return kvm_irqfd_assign(kvm, fd, gsi);
+       return kvm_irqfd_assign(kvm, args);
 }
 
 /*
index 7e14068..44ee712 100644 (file)
@@ -2047,7 +2047,7 @@ static long kvm_vm_ioctl(struct file *filp,
                r = -EFAULT;
                if (copy_from_user(&data, argp, sizeof data))
                        goto out;
-               r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
+               r = kvm_irqfd(kvm, &data);
                break;
        }
        case KVM_IOEVENTFD: {
@@ -2845,6 +2845,7 @@ void kvm_exit(void)
        kvm_arch_hardware_unsetup();
        kvm_arch_exit();
        free_cpumask_var(cpus_hardware_enabled);
+       __free_page(fault_page);
        __free_page(hwpoison_page);
        __free_page(bad_page);
 }