Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 19 Dec 2014 00:05:28 +0000 (16:05 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 19 Dec 2014 00:05:28 +0000 (16:05 -0800)
Pull KVM update from Paolo Bonzini:
 "3.19 changes for KVM:

   - spring cleaning: removed support for IA64, and for hardware-
     assisted virtualization on the PPC970

   - ARM, PPC, s390 all had only small fixes

  For x86:
   - small performance improvements (though only on weird guests)
   - usual round of hardware-compliancy fixes from Nadav
   - APICv fixes
   - XSAVES support for hosts and guests.  XSAVES hosts were broken
     because the (non-KVM) XSAVES patches inadvertently changed the KVM
     userspace ABI whenever XSAVES was enabled; hence, this part is
     going to stable.  Guest support is just a matter of exposing the
     feature and CPUID leaves support"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (179 commits)
  KVM: move APIC types to arch/x86/
  KVM: PPC: Book3S: Enable in-kernel XICS emulation by default
  KVM: PPC: Book3S HV: Improve H_CONFER implementation
  KVM: PPC: Book3S HV: Fix endianness of instruction obtained from HEIR register
  KVM: PPC: Book3S HV: Remove code for PPC970 processors
  KVM: PPC: Book3S HV: Tracepoints for KVM HV guest interactions
  KVM: PPC: Book3S HV: Simplify locking around stolen time calculations
  arch: powerpc: kvm: book3s_paired_singles.c: Remove unused function
  arch: powerpc: kvm: book3s_pr.c: Remove unused function
  arch: powerpc: kvm: book3s.c: Remove some unused functions
  arch: powerpc: kvm: book3s_32_mmu.c: Remove unused function
  KVM: PPC: Book3S HV: Check wait conditions before sleeping in kvmppc_vcore_blocked
  KVM: PPC: Book3S HV: ptes are big endian
  KVM: PPC: Book3S HV: Fix inaccuracies in ICP emulation for H_IPI
  KVM: PPC: Book3S HV: Fix KSM memory corruption
  KVM: PPC: Book3S HV: Fix an issue where guest is paused on receiving HMI
  KVM: PPC: Book3S HV: Fix computation of tlbie operand
  KVM: PPC: Book3S HV: Add missing HPTE unlock
  KVM: PPC: BookE: Improve irq inject tracepoint
  arm/arm64: KVM: Require in-kernel vgic for the arch timers
  ...

13 files changed:
1  2 
MAINTAINERS
arch/arm/kvm/mmu.c
arch/ia64/Kconfig
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kvm/book3s_hv_builtin.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/e500.c
arch/s390/include/asm/pgalloc.h
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/priv.c
arch/s390/mm/pgtable.c
arch/x86/kvm/emulate.c
virt/kvm/arm/vgic.c

diff --combined MAINTAINERS
@@@ -618,16 -618,6 +618,16 @@@ S:       Maintaine
  F:    drivers/iommu/amd_iommu*.[ch]
  F:    include/linux/amd-iommu.h
  
 +AMD KFD
 +M:      Oded Gabbay <oded.gabbay@amd.com>
 +L:      dri-devel@lists.freedesktop.org
 +T:      git git://people.freedesktop.org/~gabbayo/linux.git
 +S:      Supported
 +F:      drivers/gpu/drm/amd/amdkfd/
 +F:      drivers/gpu/drm/radeon/radeon_kfd.c
 +F:      drivers/gpu/drm/radeon/radeon_kfd.h
 +F:      include/uapi/linux/kfd_ioctl.h
 +
  AMD MICROCODE UPDATE SUPPORT
  M:    Andreas Herrmann <herrmann.der.user@googlemail.com>
  L:    amd64-microcode@amd64.org
@@@ -860,7 -850,6 +860,7 @@@ ARM/Amlogic MesonX SoC suppor
  M:    Carlo Caione <carlo@caione.org>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
 +F:    drivers/media/rc/meson-ir.c
  N:    meson[x68]
  
  ARM/ATMEL AT91RM9200 AND AT91SAM ARM ARCHITECTURES
@@@ -872,7 -861,6 +872,7 @@@ W: http://maxim.org.za/at91_26.htm
  W:    http://www.linux4sam.org
  S:    Supported
  F:    arch/arm/mach-at91/
 +F:    include/soc/at91/
  F:    arch/arm/boot/dts/at91*.dts
  F:    arch/arm/boot/dts/at91*.dtsi
  F:    arch/arm/boot/dts/sama*.dts
@@@ -930,15 -918,6 +930,15 @@@ M:       Hubert Feurstein <hubert.feurstein@c
  S:    Maintained
  F:    arch/arm/mach-ep93xx/micro9.c
  
 +ARM/CORESIGHT FRAMEWORK AND DRIVERS
 +M:    Mathieu Poirier <mathieu.poirier@linaro.org>
 +L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 +S:    Maintained
 +F:    drivers/coresight/*
 +F:    Documentation/trace/coresight.txt
 +F:    Documentation/devicetree/bindings/arm/coresight.txt
 +F:    Documentation/ABI/testing/sysfs-bus-coresight-devices-*
 +
  ARM/CORGI MACHINE SUPPORT
  M:    Richard Purdie <rpurdie@rpsys.net>
  S:    Maintained
@@@ -1329,22 -1308,30 +1329,22 @@@ F:   drivers/*/*rockchip
  F:    drivers/*/*/*rockchip*
  F:    sound/soc/rockchip/
  
 -ARM/SAMSUNG ARM ARCHITECTURES
 -M:    Ben Dooks <ben-linux@fluff.org>
 -M:    Kukjin Kim <kgene.kim@samsung.com>
 +ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
 +M:    Kukjin Kim <kgene@kernel.org>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  L:    linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 -W:    http://www.fluff.org/ben/linux/
  S:    Maintained
  F:    arch/arm/boot/dts/s3c*
  F:    arch/arm/boot/dts/exynos*
  F:    arch/arm/plat-samsung/
  F:    arch/arm/mach-s3c24*/
  F:    arch/arm/mach-s3c64xx/
 +F:    arch/arm/mach-s5p*/
 +F:    arch/arm/mach-exynos*/
  F:    drivers/*/*s3c2410*
  F:    drivers/*/*/*s3c2410*
  F:    drivers/spi/spi-s3c*
  F:    sound/soc/samsung/*
 -
 -ARM/S5P EXYNOS ARM ARCHITECTURES
 -M:    Kukjin Kim <kgene.kim@samsung.com>
 -L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 -L:    linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 -S:    Maintained
 -F:    arch/arm/mach-s5p*/
 -F:    arch/arm/mach-exynos*/
  N:    exynos
  
  ARM/SAMSUNG MOBILE MACHINE SUPPORT
@@@ -1394,12 -1381,12 +1394,12 @@@ F:   arch/arm/boot/dts/sh
  F:    arch/arm/configs/ape6evm_defconfig
  F:    arch/arm/configs/armadillo800eva_defconfig
  F:    arch/arm/configs/bockw_defconfig
 -F:    arch/arm/configs/koelsch_defconfig
  F:    arch/arm/configs/kzm9g_defconfig
  F:    arch/arm/configs/lager_defconfig
  F:    arch/arm/configs/mackerel_defconfig
  F:    arch/arm/configs/marzen_defconfig
  F:    arch/arm/configs/shmobile_defconfig
 +F:    arch/arm/include/debug/renesas-scif.S
  F:    arch/arm/mach-shmobile/
  F:    drivers/sh/
  
@@@ -1443,7 -1430,6 +1443,7 @@@ F:      drivers/tty/serial/st-asc.
  F:    drivers/usb/dwc3/dwc3-st.c
  F:    drivers/usb/host/ehci-st.c
  F:    drivers/usb/host/ohci-st.c
 +F:    drivers/ata/ahci_st.c
  
  ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT
  M:    Lennert Buytenhek <kernel@wantstofly.org>
@@@ -1517,19 -1503,6 +1517,19 @@@ S:    Maintaine
  F:    drivers/clk/ux500/
  F:    include/linux/platform_data/clk-ux500.h
  
 +ARM/VERSATILE EXPRESS PLATFORM
 +M:    Liviu Dudau <liviu.dudau@arm.com>
 +M:    Sudeep Holla <sudeep.holla@arm.com>
 +M:    Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
 +L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 +S:    Maintained
 +F:    arch/arm/boot/dts/vexpress*
 +F:    arch/arm/mach-vexpress/
 +F:    */*/vexpress*
 +F:    */*/*/vexpress*
 +F:    drivers/clk/versatile/clk-vexpress-osc.c
 +F:    drivers/clocksource/versatile.c
 +
  ARM/VFP SUPPORT
  M:    Russell King <linux@arm.linux.org.uk>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@@ -1570,7 -1543,6 +1570,7 @@@ F:      arch/arm/mach-pxa/include/mach/z2.
  
  ARM/ZYNQ ARCHITECTURE
  M:    Michal Simek <michal.simek@xilinx.com>
 +R:    Sören Brinkmann <soren.brinkmann@xilinx.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  W:    http://wiki.xilinx.com
  T:    git git://git.xilinx.com/linux-xlnx.git
@@@ -1741,13 -1713,6 +1741,13 @@@ F:    drivers/dma/at_hdmac.
  F:    drivers/dma/at_hdmac_regs.h
  F:    include/linux/platform_data/dma-atmel.h
  
 +ATMEL XDMA DRIVER
 +M:    Ludovic Desroches <ludovic.desroches@atmel.com>
 +L:    linux-arm-kernel@lists.infradead.org
 +L:    dmaengine@vger.kernel.org
 +S:    Supported
 +F:    drivers/dma/at_xdmac.c
 +
  ATMEL I2C DRIVER
  M:    Ludovic Desroches <ludovic.desroches@atmel.com>
  L:    linux-i2c@vger.kernel.org
@@@ -1820,11 -1785,10 +1820,11 @@@ S:   Supporte
  F:    drivers/scsi/esas2r
  
  AUDIT SUBSYSTEM
 +M:    Paul Moore <paul@paul-moore.com>
  M:    Eric Paris <eparis@redhat.com>
 -L:    linux-audit@redhat.com (subscribers-only)
 +L:    linux-audit@redhat.com (moderated for non-subscribers)
  W:    http://people.redhat.com/sgrubb/audit/
 -T:    git git://git.infradead.org/users/eparis/audit.git
 +T:    git git://git.infradead.org/users/pcmoore/audit
  S:    Maintained
  F:    include/linux/audit.h
  F:    include/uapi/linux/audit.h
@@@ -1863,7 -1827,7 +1863,7 @@@ F:      include/net/ax25.
  F:    net/ax25/
  
  AZ6007 DVB DRIVER
 -M:    Mauro Carvalho Chehab <m.chehab@samsung.com>
 +M:    Mauro Carvalho Chehab <mchehab@osg.samsung.com>
  L:    linux-media@vger.kernel.org
  W:    http://linuxtv.org
  T:    git git://linuxtv.org/media_tree.git
@@@ -1897,6 -1861,7 +1897,6 @@@ F:      drivers/net/wireless/b43legacy
  
  BACKLIGHT CLASS/SUBSYSTEM
  M:    Jingoo Han <jg1.han@samsung.com>
 -M:    Bryan Wu <cooloney@gmail.com>
  M:    Lee Jones <lee.jones@linaro.org>
  S:    Maintained
  F:    drivers/video/backlight/
@@@ -1925,6 -1890,13 +1925,6 @@@ W:     http://bcache.evilpiepirate.or
  S:    Maintained:
  F:    drivers/md/bcache/
  
 -BECEEM BCS200/BCS220-3/BCSM250 WIMAX SUPPORT
 -M: Kevin McKinney <klmckinney1@gmail.com>
 -M: Matthias Beyer <mail@beyermatthias.de>
 -L: devel@driverdev.osuosl.org
 -S: Maintained
 -F: drivers/staging/bcm*
 -
  BEFS FILE SYSTEM
  S:    Orphan
  F:    Documentation/filesystems/befs.txt
@@@ -2099,20 -2071,11 +2099,20 @@@ F:   drivers/clocksource/bcm_kona_timer.
  
  BROADCOM BCM2835 ARM ARCHITECTURE
  M:    Stephen Warren <swarren@wwwdotorg.org>
 +M:    Lee Jones <lee@kernel.org>
  L:    linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers)
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-rpi.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/rpi/linux-rpi.git
  S:    Maintained
  N:    bcm2835
  
 +BROADCOM BCM33XX MIPS ARCHITECTURE
 +M:    Kevin Cernekee <cernekee@gmail.com>
 +L:    linux-mips@linux-mips.org
 +S:    Maintained
 +F:    arch/mips/bcm3384/*
 +F:    arch/mips/include/asm/mach-bcm3384/*
 +F:    arch/mips/kernel/*bmips*
 +
  BROADCOM BCM5301X ARM ARCHITECTURE
  M:    Hauke Mehrtens <hauke@hauke-m.de>
  L:    linux-arm-kernel@lists.infradead.org
@@@ -2129,34 -2092,13 +2129,34 @@@ S:   Maintaine
  F:    arch/arm/mach-bcm/bcm63xx.c
  F:    arch/arm/include/debug/bcm63xx.S
  
 +BROADCOM BCM63XX/BCM33XX UDC DRIVER
 +M:    Kevin Cernekee <cernekee@gmail.com>
 +L:    linux-usb@vger.kernel.org
 +S:    Maintained
 +F:    drivers/usb/gadget/udc/bcm63xx_udc.*
 +
  BROADCOM BCM7XXX ARM ARCHITECTURE
  M:    Marc Carino <marc.ceeeee@gmail.com>
  M:    Brian Norris <computersforpeace@gmail.com>
 +M:    Gregory Fong <gregory.0xf0@gmail.com>
 +M:    Florian Fainelli <f.fainelli@gmail.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  F:    arch/arm/mach-bcm/*brcmstb*
  F:    arch/arm/boot/dts/bcm7*.dts*
 +F:    drivers/bus/brcmstb_gisb.c
 +
 +BROADCOM BMIPS MIPS ARCHITECTURE
 +M:    Kevin Cernekee <cernekee@gmail.com>
 +M:    Florian Fainelli <f.fainelli@gmail.com>
 +L:    linux-mips@linux-mips.org
 +S:    Maintained
 +F:    arch/mips/bmips/*
 +F:    arch/mips/include/asm/mach-bmips/*
 +F:    arch/mips/kernel/*bmips*
 +F:    arch/mips/boot/dts/bcm*.dts*
 +F:    drivers/irqchip/irq-bcm7*
 +F:    drivers/irqchip/irq-brcmstb*
  
  BROADCOM TG3 GIGABIT ETHERNET DRIVER
  M:    Prashant Sreedharan <prashant@broadcom.com>
@@@ -2187,20 -2129,6 +2187,20 @@@ L:    linux-scsi@vger.kernel.or
  S:    Supported
  F:    drivers/scsi/bnx2i/
  
 +BROADCOM CYGNUS/IPROC ARM ARCHITECTURE
 +M:    Ray Jui <rjui@broadcom.com>
 +M:    Scott Branden <sbranden@broadcom.com>
 +L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 +L:    bcm-kernel-feedback-list@broadcom.com
 +T:    git git://git.github.com/brcm/linux.git
 +S:    Maintained
 +N:    iproc
 +N:    cygnus
 +N:    bcm9113*
 +N:    bcm9583*
 +N:    bcm583*
 +N:    bcm113*
 +
  BROADCOM KONA GPIO DRIVER
  M:    Ray Jui <rjui@broadcom.com>
  L:    bcm-kernel-feedback-list@broadcom.com
@@@ -2268,7 -2196,7 +2268,7 @@@ F:      Documentation/filesystems/btrfs.tx
  F:    fs/btrfs/
  
  BTTV VIDEO4LINUX DRIVER
 -M:    Mauro Carvalho Chehab <m.chehab@samsung.com>
 +M:    Mauro Carvalho Chehab <mchehab@osg.samsung.com>
  L:    linux-media@vger.kernel.org
  W:    http://linuxtv.org
  T:    git git://linuxtv.org/media_tree.git
@@@ -2378,14 -2306,6 +2378,14 @@@ F:    security/capability.
  F:    security/commoncap.c
  F:    kernel/capability.c
  
 +CC2520 IEEE-802.15.4 RADIO DRIVER
 +M:    Varka Bhadram <varkabhadram@gmail.com>
 +L:    linux-wpan@vger.kernel.org
 +S:    Maintained
 +F:    drivers/net/ieee802154/cc2520.c
 +F:    include/linux/spi/cc2520.h
 +F:    Documentation/devicetree/bindings/net/ieee802154/cc2520.txt
 +
  CELL BROADBAND ENGINE ARCHITECTURE
  M:    Arnd Bergmann <arnd@arndb.de>
  L:    linuxppc-dev@lists.ozlabs.org
@@@ -2567,13 -2487,6 +2567,13 @@@ F:    fs/coda
  F:    include/linux/coda*.h
  F:    include/uapi/linux/coda*.h
  
 +CODA V4L2 MEM2MEM DRIVER
 +M:    Philipp Zabel <p.zabel@pengutronix.de>
 +L:    linux-media@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/media/coda.txt
 +F:    drivers/media/platform/coda/
 +
  COMMON CLK FRAMEWORK
  M:    Mike Turquette <mturquette@linaro.org>
  L:    linux-kernel@vger.kernel.org
@@@ -2589,7 -2502,8 +2589,7 @@@ M:      Steve French <sfrench@samba.org
  L:    linux-cifs@vger.kernel.org
  L:    samba-technical@lists.samba.org (moderated for non-subscribers)
  W:    http://linux-cifs.samba.org/
 -Q:    http://patchwork.ozlabs.org/project/linux-cifs-client/list/
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6.git
 +T:    git git://git.samba.org/sfrench/cifs-2.6.git
  S:    Supported
  F:    Documentation/filesystems/cifs/
  F:    fs/cifs/
@@@ -2666,7 -2580,7 +2666,7 @@@ L:      cgroups@vger.kernel.or
  L:    linux-mm@kvack.org
  S:    Maintained
  F:    mm/memcontrol.c
 -F:    mm/page_cgroup.c
 +F:    mm/swap_cgroup.c
  
  CORETEMP HARDWARE MONITORING DRIVER
  M:    Fenghua Yu <fenghua.yu@intel.com>
@@@ -2716,16 -2630,6 +2716,16 @@@ T:    git git://git.kernel.org/pub/scm/lin
  S:    Maintained
  F:    drivers/cpuidle/cpuidle-big_little.c
  
 +CPUIDLE DRIVER - ARM EXYNOS
 +M:    Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 +M:    Daniel Lezcano <daniel.lezcano@linaro.org>
 +M:    Kukjin Kim <kgene@kernel.org>
 +L:    linux-pm@vger.kernel.org
 +L:    linux-samsung-soc@vger.kernel.org
 +S:    Supported
 +F:    drivers/cpuidle/cpuidle-exynos.c
 +F:    arch/arm/mach-exynos/pm.c
 +
  CPUIDLE DRIVERS
  M:    Rafael J. Wysocki <rjw@rjwysocki.net>
  M:    Daniel Lezcano <daniel.lezcano@linaro.org>
@@@ -2793,7 -2697,7 +2793,7 @@@ F:      drivers/net/wireless/cw1200
  
  CX18 VIDEO4LINUX DRIVER
  M:    Andy Walls <awalls@md.metrocast.net>
 -L:    ivtv-devel@ivtvdriver.org (moderated for non-subscribers)
 +L:    ivtv-devel@ivtvdriver.org (subscribers-only)
  L:    linux-media@vger.kernel.org
  T:    git git://linuxtv.org/media_tree.git
  W:    http://linuxtv.org
@@@ -2813,7 -2717,7 +2813,7 @@@ F:      drivers/media/common/cx2341x
  F:    include/media/cx2341x*
  
  CX88 VIDEO4LINUX DRIVER
 -M:    Mauro Carvalho Chehab <m.chehab@samsung.com>
 +M:    Mauro Carvalho Chehab <mchehab@osg.samsung.com>
  L:    linux-media@vger.kernel.org
  W:    http://linuxtv.org
  T:    git git://linuxtv.org/media_tree.git
@@@ -2838,13 -2742,6 +2838,13 @@@ W:    http://www.chelsio.co
  S:    Supported
  F:    drivers/net/ethernet/chelsio/cxgb3/
  
 +CXGB3 ISCSI DRIVER (CXGB3I)
 +M:      Karen Xie <kxie@chelsio.com>
 +L:      linux-scsi@vger.kernel.org
 +W:      http://www.chelsio.com
 +S:      Supported
 +F:      drivers/scsi/cxgbi/cxgb3i
 +
  CXGB3 IWARP RNIC DRIVER (IW_CXGB3)
  M:    Steve Wise <swise@chelsio.com>
  L:    linux-rdma@vger.kernel.org
@@@ -2859,13 -2756,6 +2859,13 @@@ W:    http://www.chelsio.co
  S:    Supported
  F:    drivers/net/ethernet/chelsio/cxgb4/
  
 +CXGB4 ISCSI DRIVER (CXGB4I)
 +M:      Karen Xie <kxie@chelsio.com>
 +L:      linux-scsi@vger.kernel.org
 +W:      http://www.chelsio.com
 +S:      Supported
 +F:      drivers/scsi/cxgbi/cxgb4i
 +
  CXGB4 IWARP RNIC DRIVER (IW_CXGB4)
  M:    Steve Wise <swise@chelsio.com>
  L:    linux-rdma@vger.kernel.org
@@@ -2956,10 -2846,11 +2956,10 @@@ F:   Documentation/networking/dmfe.tx
  F:    drivers/net/ethernet/dec/tulip/dmfe.c
  
  DC390/AM53C974 SCSI driver
 -M:    Kurt Garloff <garloff@suse.de>
 -W:    http://www.garloff.de/kurt/linux/dc390/
 -M:    Guennadi Liakhovetski <g.liakhovetski@gmx.de>
 +M:    Hannes Reinecke <hare@suse.de>
 +L:    linux-scsi@vger.kernel.org
  S:    Maintained
 -F:    drivers/scsi/tmscsim.*
 +F:    drivers/scsi/am53c974.c
  
  DC395x SCSI driver
  M:    Oliver Neukum <oliver@neukum.org>
@@@ -3182,8 -3073,7 +3182,8 @@@ Q:      https://patchwork.kernel.org/project
  S:    Maintained
  F:    drivers/dma/
  F:    include/linux/dma*
 -T:    git git://git.infradead.org/users/vkoul/slave-dma.git (slave-dma)
 +F:    Documentation/dmaengine/
 +T:    git git://git.infradead.org/users/vkoul/slave-dma.git
  
  DME1737 HARDWARE MONITOR DRIVER
  M:    Juerg Haefliger <juergh@gmail.com>
@@@ -3300,13 -3190,6 +3300,13 @@@ F:    drivers/gpu/drm/exynos
  F:    include/drm/exynos*
  F:    include/uapi/drm/exynos*
  
 +DRM DRIVERS FOR FREESCALE IMX
 +M:    Philipp Zabel <p.zabel@pengutronix.de>
 +L:    dri-devel@lists.freedesktop.org
 +S:    Maintained
 +F:    drivers/gpu/drm/imx/
 +F:    Documentation/devicetree/bindings/drm/imx/
 +
  DRM DRIVERS FOR NVIDIA TEGRA
  M:    Thierry Reding <thierry.reding@gmail.com>
  M:    Terje Bergström <tbergstrom@nvidia.com>
@@@ -3503,7 -3386,7 +3503,7 @@@ F:      fs/ecryptfs
  EDAC-CORE
  M:    Doug Thompson <dougthompson@xmission.com>
  M:    Borislav Petkov <bp@alien8.de>
 -M:    Mauro Carvalho Chehab <m.chehab@samsung.com>
 +M:    Mauro Carvalho Chehab <mchehab@osg.samsung.com>
  L:    linux-edac@vger.kernel.org
  W:    bluesmoke.sourceforge.net
  S:    Supported
@@@ -3552,7 -3435,7 +3552,7 @@@ S:      Maintaine
  F:    drivers/edac/e7xxx_edac.c
  
  EDAC-GHES
 -M:    Mauro Carvalho Chehab <m.chehab@samsung.com>
 +M:    Mauro Carvalho Chehab <mchehab@osg.samsung.com>
  L:    linux-edac@vger.kernel.org
  W:    bluesmoke.sourceforge.net
  S:    Maintained
@@@ -3580,21 -3463,21 +3580,21 @@@ S:   Maintaine
  F:    drivers/edac/i5000_edac.c
  
  EDAC-I5400
 -M:    Mauro Carvalho Chehab <m.chehab@samsung.com>
 +M:    Mauro Carvalho Chehab <mchehab@osg.samsung.com>
  L:    linux-edac@vger.kernel.org
  W:    bluesmoke.sourceforge.net
  S:    Maintained
  F:    drivers/edac/i5400_edac.c
  
  EDAC-I7300
 -M:    Mauro Carvalho Chehab <m.chehab@samsung.com>
 +M:    Mauro Carvalho Chehab <mchehab@osg.samsung.com>
  L:    linux-edac@vger.kernel.org
  W:    bluesmoke.sourceforge.net
  S:    Maintained
  F:    drivers/edac/i7300_edac.c
  
  EDAC-I7CORE
 -M:    Mauro Carvalho Chehab <m.chehab@samsung.com>
 +M:    Mauro Carvalho Chehab <mchehab@osg.samsung.com>
  L:    linux-edac@vger.kernel.org
  W:    bluesmoke.sourceforge.net
  S:    Maintained
@@@ -3637,7 -3520,7 +3637,7 @@@ S:      Maintaine
  F:    drivers/edac/r82600_edac.c
  
  EDAC-SBRIDGE
 -M:    Mauro Carvalho Chehab <m.chehab@samsung.com>
 +M:    Mauro Carvalho Chehab <mchehab@osg.samsung.com>
  L:    linux-edac@vger.kernel.org
  W:    bluesmoke.sourceforge.net
  S:    Maintained
@@@ -3697,7 -3580,7 +3697,7 @@@ S:      Maintaine
  F:    drivers/net/ethernet/ibm/ehea/
  
  EM28XX VIDEO4LINUX DRIVER
 -M:    Mauro Carvalho Chehab <m.chehab@samsung.com>
 +M:    Mauro Carvalho Chehab <mchehab@osg.samsung.com>
  L:    linux-media@vger.kernel.org
  W:    http://linuxtv.org
  T:    git git://linuxtv.org/media_tree.git
@@@ -4065,7 -3948,7 +4065,7 @@@ F:      drivers/tty/serial/ucc_uart.
  FREESCALE SOC SOUND DRIVERS
  M:    Timur Tabi <timur@tabi.org>
  M:    Nicolin Chen <nicoleotsuka@gmail.com>
 -M:    Xiubo Li <Li.Xiubo@freescale.com>
 +M:    Xiubo Li <Xiubo.Lee@gmail.com>
  L:    alsa-devel@alsa-project.org (moderated for non-subscribers)
  L:    linuxppc-dev@lists.ozlabs.org
  S:    Maintained
@@@ -4255,12 -4138,6 +4255,12 @@@ L:    linux-media@vger.kernel.or
  S:    Maintained
  F:    drivers/media/usb/go7007/
  
 +GOODIX TOUCHSCREEN
 +M:    Bastien Nocera <hadess@hadess.net>
 +L:    linux-input@vger.kernel.org
 +S:    Maintained
 +F:    drivers/input/touchscreen/goodix.c
 +
  GPIO SUBSYSTEM
  M:    Linus Walleij <linus.walleij@linaro.org>
  M:    Alexandre Courbot <gnurou@gmail.com>
@@@ -4682,7 -4559,6 +4682,7 @@@ W:      https://i2c.wiki.kernel.org
  Q:    https://patchwork.ozlabs.org/project/linux-i2c/list/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git
  S:    Maintained
 +F:    Documentation/devicetree/bindings/i2c/
  F:    Documentation/i2c/
  F:    drivers/i2c/
  F:    include/linux/i2c.h
@@@ -4821,21 -4697,8 +4821,21 @@@ S:    Maintaine
  F:    net/ieee802154/
  F:    net/mac802154/
  F:    drivers/net/ieee802154/
 +F:    include/linux/nl802154.h
 +F:    include/linux/ieee802154.h
 +F:    include/net/nl802154.h
 +F:    include/net/mac802154.h
 +F:    include/net/af_ieee802154.h
 +F:    include/net/cfg802154.h
 +F:    include/net/ieee802154_netdev.h
  F:    Documentation/networking/ieee802154.txt
  
 +IGORPLUG-USB IR RECEIVER
 +M:    Sean Young <sean@mess.org>
 +L:    linux-media@vger.kernel.org
 +S:    Maintained
 +F:    drivers/media/rc/igorplugusb.c
 +
  IGUANAWORKS USB IR TRANSCEIVER
  M:    Sean Young <sean@mess.org>
  L:    linux-media@vger.kernel.org
@@@ -4851,7 -4714,6 +4851,7 @@@ L:      linux-iio@vger.kernel.or
  S:    Maintained
  F:    drivers/iio/
  F:    drivers/staging/iio/
 +F:    include/linux/iio/
  
  IKANOS/ADI EAGLE ADSL USB DRIVER
  M:    Matthieu Castet <castet.matthieu@free.fr>
@@@ -4893,11 -4755,6 +4893,11 @@@ L:    linux-security-module@vger.kernel.or
  S:    Supported
  F:    security/integrity/ima/
  
 +IMGTEC IR DECODER DRIVER
 +M:    James Hogan <james.hogan@imgtec.com>
 +S:    Maintained
 +F:    drivers/media/rc/img-ir/
 +
  IMS TWINTURBO FRAMEBUFFER DRIVER
  L:    linux-fbdev@vger.kernel.org
  S:    Orphan
@@@ -5312,7 -5169,7 +5312,7 @@@ F:      drivers/media/tuners/it913x
  
  IVTV VIDEO4LINUX DRIVER
  M:    Andy Walls <awalls@md.metrocast.net>
 -L:    ivtv-devel@ivtvdriver.org (moderated for non-subscribers)
 +L:    ivtv-devel@ivtvdriver.org (subscribers-only)
  L:    linux-media@vger.kernel.org
  T:    git git://linuxtv.org/media_tree.git
  W:    http://www.ivtvdriver.org
@@@ -5495,15 -5352,6 +5495,6 @@@ S:     Supporte
  F:    arch/powerpc/include/asm/kvm*
  F:    arch/powerpc/kvm/
  
- KERNEL VIRTUAL MACHINE For Itanium (KVM/IA64)
- M:    Xiantao Zhang <xiantao.zhang@intel.com>
- L:    kvm-ia64@vger.kernel.org
- W:    http://kvm.qumranet.com
- S:    Supported
- F:    Documentation/ia64/kvm.txt
- F:    arch/ia64/include/asm/kvm*
- F:    arch/ia64/kvm/
  KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
  M:    Christian Borntraeger <borntraeger@de.ibm.com>
  M:    Cornelia Huck <cornelia.huck@de.ibm.com>
@@@ -6007,11 -5855,6 +5998,11 @@@ M:    Russell King <rmk+kernel@arm.linux.o
  S:    Maintained
  F:    drivers/gpu/drm/armada/
  
 +MARVELL 88E6352 DSA support
 +M:    Guenter Roeck <linux@roeck-us.net>
 +S:    Maintained
 +F:    drivers/net/dsa/mv88e6352.c
 +
  MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)
  M:    Mirko Lindner <mlindner@marvell.com>
  M:    Stephen Hemminger <stephen@networkplumber.org>
@@@ -6093,7 -5936,7 +6084,7 @@@ S:      Maintaine
  F:    drivers/media/radio/radio-maxiradio*
  
  MEDIA INPUT INFRASTRUCTURE (V4L/DVB)
 -M:    Mauro Carvalho Chehab <m.chehab@samsung.com>
 +M:    Mauro Carvalho Chehab <mchehab@osg.samsung.com>
  P:    LinuxTV.org Project
  L:    linux-media@vger.kernel.org
  W:    http://linuxtv.org
@@@ -6122,13 -5965,10 +6113,13 @@@ W:   http://linuxtv.or
  S:    Odd Fixes
  F:    drivers/media/parport/pms*
  
 -MEGARAID SCSI DRIVERS
 -M:    Neela Syam Kolli <megaraidlinux@lsi.com>
 +MEGARAID SCSI/SAS DRIVERS
 +M:    Kashyap Desai <kashyap.desai@avagotech.com>
 +M:    Sumit Saxena <sumit.saxena@avagotech.com>
 +M:    Uday Lingala <uday.lingala@avagotech.com>
 +L:    megaraidlinux.pdl@avagotech.com
  L:    linux-scsi@vger.kernel.org
 -W:    http://megaraid.lsilogic.com
 +W:    http://www.lsi.com
  S:    Maintained
  F:    Documentation/scsi/megaraid.txt
  F:    drivers/scsi/megaraid.*
@@@ -6251,28 -6091,6 +6242,28 @@@ S:    Supporte
  F:    include/linux/mlx5/
  F:    drivers/infiniband/hw/mlx5/
  
 +MN88472 MEDIA DRIVER
 +M:    Antti Palosaari <crope@iki.fi>
 +L:    linux-media@vger.kernel.org
 +W:    http://linuxtv.org/
 +W:    http://palosaari.fi/linux/
 +Q:    http://patchwork.linuxtv.org/project/linux-media/list/
 +T:    git git://linuxtv.org/anttip/media_tree.git
 +S:    Maintained
 +F:    drivers/staging/media/mn88472/
 +F:    drivers/media/dvb-frontends/mn88472.h
 +
 +MN88473 MEDIA DRIVER
 +M:    Antti Palosaari <crope@iki.fi>
 +L:    linux-media@vger.kernel.org
 +W:    http://linuxtv.org/
 +W:    http://palosaari.fi/linux/
 +Q:    http://patchwork.linuxtv.org/project/linux-media/list/
 +T:    git git://linuxtv.org/anttip/media_tree.git
 +S:    Maintained
 +F:    drivers/staging/media/mn88473/
 +F:    drivers/media/dvb-frontends/mn88473.h
 +
  MODULE SUPPORT
  M:    Rusty Russell <rusty@rustcorp.com.au>
  S:    Maintained
@@@ -6461,6 -6279,7 +6452,6 @@@ F:      drivers/scsi/g_NCR5380.
  F:    drivers/scsi/g_NCR5380_mmio.c
  F:    drivers/scsi/mac_scsi.*
  F:    drivers/scsi/pas16.*
 -F:    drivers/scsi/sun3_NCR5380.c
  F:    drivers/scsi/sun3_scsi.*
  F:    drivers/scsi/sun3_scsi_vme.c
  F:    drivers/scsi/t128.*
@@@ -6716,13 -6535,6 +6707,13 @@@ S:    Maintaine
  F:    Documentation/scsi/NinjaSCSI.txt
  F:    drivers/scsi/nsp32*
  
 +NIOS2 ARCHITECTURE
 +M:    Ley Foon Tan <lftan@altera.com>
 +L:    nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
 +T:    git git://git.rocketboards.org/linux-socfpga.git
 +S:    Maintained
 +F:    arch/nios2/
 +
  NTB DRIVER
  M:    Jon Mason <jdmason@kudzu.us>
  M:    Dave Jiang <dave.jiang@intel.com>
@@@ -6763,12 -6575,6 +6754,12 @@@ S:    Supporte
  F:    drivers/gpu/drm/i2c/tda998x_drv.c
  F:    include/drm/i2c/tda998x.h
  
 +NXP TFA9879 DRIVER
 +M:    Peter Rosin <peda@axentia.se>
 +L:    alsa-devel@alsa-project.org (moderated for non-subscribers)
 +S:    Maintained
 +F:    sound/soc/codecs/tfa9879*
 +
  OMAP SUPPORT
  M:    Tony Lindgren <tony@atomide.com>
  L:    linux-omap@vger.kernel.org
@@@ -6779,23 -6585,6 +6770,23 @@@ T:    git git://git.kernel.org/pub/scm/lin
  S:    Maintained
  F:    arch/arm/*omap*/
  F:    drivers/i2c/busses/i2c-omap.c
 +F:    drivers/irqchip/irq-omap-intc.c
 +F:    drivers/mfd/*omap*.c
 +F:    drivers/mfd/menelaus.c
 +F:    drivers/mfd/palmas.c
 +F:    drivers/mfd/tps65217.c
 +F:    drivers/mfd/tps65218.c
 +F:    drivers/mfd/tps65910.c
 +F:    drivers/mfd/twl-core.[ch]
 +F:    drivers/mfd/twl4030*.c
 +F:    drivers/mfd/twl6030*.c
 +F:    drivers/mfd/twl6040*.c
 +F:    drivers/regulator/palmas-regulator*.c
 +F:    drivers/regulator/pbias-regulator.c
 +F:    drivers/regulator/tps65217-regulator.c
 +F:    drivers/regulator/tps65218-regulator.c
 +F:    drivers/regulator/tps65910-regulator.c
 +F:    drivers/regulator/twl-regulator.c
  F:    include/linux/i2c-omap.h
  
  OMAP DEVICE TREE SUPPORT
@@@ -6806,9 -6595,6 +6797,9 @@@ L:      devicetree@vger.kernel.or
  S:    Maintained
  F:    arch/arm/boot/dts/*omap*
  F:    arch/arm/boot/dts/*am3*
 +F:    arch/arm/boot/dts/*am4*
 +F:    arch/arm/boot/dts/*am5*
 +F:    arch/arm/boot/dts/*dra7*
  
  OMAP CLOCK FRAMEWORK SUPPORT
  M:    Paul Walmsley <paul@pwsan.com>
@@@ -6838,14 -6624,6 +6829,14 @@@ L:    linux-omap@vger.kernel.or
  S:    Maintained
  F:    sound/soc/omap/
  
 +OMAP GENERAL PURPOSE MEMORY CONTROLLER SUPPORT
 +M:    Roger Quadros <rogerq@ti.com>
 +M:    Tony Lindgren <tony@atomide.com>
 +L:    linux-omap@vger.kernel.org
 +S:    Maintained
 +F:    drivers/memory/omap-gpmc.c
 +F:    arch/arm/mach-omap2/*gpmc*
 +
  OMAP FRAMEBUFFER SUPPORT
  M:    Tomi Valkeinen <tomi.valkeinen@ti.com>
  L:    linux-fbdev@vger.kernel.org
@@@ -7064,12 -6842,11 +7055,12 @@@ F:   drivers/scsi/osd
  F:    include/scsi/osd_*
  F:    fs/exofs/
  
 -OVERLAYFS FILESYSTEM
 +OVERLAY FILESYSTEM
  M:    Miklos Szeredi <miklos@szeredi.hu>
 -L:    linux-fsdevel@vger.kernel.org
 +L:    linux-unionfs@vger.kernel.org
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/vfs.git
  S:    Supported
 -F:    fs/overlayfs/*
 +F:    fs/overlayfs/
  F:    Documentation/filesystems/overlayfs.txt
  
  P54 WIRELESS DRIVER
@@@ -7223,16 -7000,6 +7214,16 @@@ S:    Maintaine
  F:    Documentation/devicetree/bindings/pci/xgene-pci.txt
  F:    drivers/pci/host/pci-xgene.c
  
 +PCI DRIVER FOR FREESCALE LAYERSCAPE
 +M:    Minghuan Lian <minghuan.Lian@freescale.com>
 +M:    Mingkai Hu <mingkai.hu@freescale.com>
 +M:    Roy Zang <tie-fei.zang@freescale.com>
 +L:    linuxppc-dev@lists.ozlabs.org
 +L:    linux-pci@vger.kernel.org
 +L:    linux-arm-kernel@lists.infradead.org
 +S:    Maintained
 +F:    drivers/pci/host/*layerscape*
 +
  PCI DRIVER FOR IMX6
  M:    Richard Zhu <r65037@freescale.com>
  M:    Lucas Stach <l.stach@pengutronix.de>
@@@ -7403,7 -7170,6 +7394,7 @@@ F:      drivers/crypto/picoxcell
  
  PIN CONTROL SUBSYSTEM
  M:    Linus Walleij <linus.walleij@linaro.org>
 +L:    linux-gpio@vger.kernel.org
  S:    Maintained
  F:    drivers/pinctrl/
  F:    include/linux/pinctrl/
@@@ -7412,13 -7178,7 +7403,13 @@@ PIN CONTROLLER - ATMEL AT9
  M:    Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
 -F:    drivers/pinctrl/pinctrl-at91.c
 +F:    drivers/pinctrl/pinctrl-at91.*
 +
 +PIN CONTROLLER - INTEL
 +M:    Mika Westerberg <mika.westerberg@linux.intel.com>
 +M:    Heikki Krogerus <heikki.krogerus@linux.intel.com>
 +S:    Maintained
 +F:    drivers/pinctrl/intel/
  
  PIN CONTROLLER - RENESAS
  M:    Laurent Pinchart <laurent.pinchart@ideasonboard.com>
@@@ -8037,13 -7797,6 +8028,13 @@@ F:    drivers/hid/hid-roccat
  F:    include/linux/hid-roccat*
  F:    Documentation/ABI/*/sysfs-driver-hid-roccat*
  
 +ROCKER DRIVER
 +M:    Jiri Pirko <jiri@resnulli.us>
 +M:    Scott Feldman <sfeldma@gmail.com>
 +L:    netdev@vger.kernel.org
 +S:    Supported
 +F:    drivers/net/ethernet/rocker/
 +
  ROCKETPORT DRIVER
  P:    Comtrol Corp.
  W:    http://www.comtrol.com
@@@ -8051,12 -7804,6 +8042,12 @@@ S:    Maintaine
  F:    Documentation/serial/rocket.txt
  F:    drivers/tty/rocket*
  
 +ROCKETPORT EXPRESS/INFINITY DRIVER
 +M:    Kevin Cernekee <cernekee@gmail.com>
 +L:    linux-serial@vger.kernel.org
 +S:    Odd Fixes
 +F:    drivers/tty/serial/rp2.*
 +
  ROSE NETWORK LAYER
  M:    Ralf Baechle <ralf@linux-mips.org>
  L:    linux-hams@vger.kernel.org
@@@ -8097,10 -7844,11 +8088,10 @@@ S:   Maintaine
  F:    drivers/media/dvb-frontends/rtl2832_sdr*
  
  RTL8180 WIRELESS DRIVER
 -M:    "John W. Linville" <linville@tuxdriver.com>
  L:    linux-wireless@vger.kernel.org
  W:    http://wireless.kernel.org/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
 -S:    Maintained
 +S:    Orphan
  F:    drivers/net/wireless/rtl818x/rtl8180/
  
  RTL8187 WIRELESS DRIVER
@@@ -8217,7 -7965,7 +8208,7 @@@ S:      Odd Fixe
  F:    drivers/media/i2c/saa6588*
  
  SAA7134 VIDEO4LINUX DRIVER
 -M:    Mauro Carvalho Chehab <m.chehab@samsung.com>
 +M:    Mauro Carvalho Chehab <mchehab@osg.samsung.com>
  L:    linux-media@vger.kernel.org
  W:    http://linuxtv.org
  T:    git git://linuxtv.org/media_tree.git
@@@ -8675,7 -8423,7 +8666,7 @@@ S:      Maintaine
  F:    drivers/media/radio/si4713/radio-usb-si4713.c
  
  SIANO DVB DRIVER
 -M:    Mauro Carvalho Chehab <m.chehab@samsung.com>
 +M:    Mauro Carvalho Chehab <mchehab@osg.samsung.com>
  L:    linux-media@vger.kernel.org
  W:    http://linuxtv.org
  T:    git git://linuxtv.org/media_tree.git
@@@ -8685,14 -8433,6 +8676,14 @@@ F:    drivers/media/usb/siano
  F:    drivers/media/usb/siano/
  F:    drivers/media/mmc/siano/
  
 +SIMPLEFB FB DRIVER
 +M:    Hans de Goede <hdegoede@redhat.com>
 +L:    linux-fbdev@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/video/simple-framebuffer.txt
 +F:    drivers/video/fbdev/simplefb.c
 +F:    include/linux/platform_data/simplefb.h
 +
  SH_VEU V4L2 MEM2MEM DRIVER
  L:    linux-media@vger.kernel.org
  S:    Orphan
@@@ -8734,6 -8474,7 +8725,6 @@@ F:      arch/arm/mach-s3c24xx/bast-irq.
  TI DAVINCI MACHINE SUPPORT
  M:    Sekhar Nori <nsekhar@ti.com>
  M:    Kevin Hilman <khilman@deeprootsystems.com>
 -L:    davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers)
  T:    git git://gitorious.org/linux-davinci/linux-davinci.git
  Q:    http://patchwork.kernel.org/project/linux-davinci/list/
  S:    Supported
@@@ -8743,6 -8484,7 +8734,6 @@@ F:      drivers/i2c/busses/i2c-davinci.
  TI DAVINCI SERIES MEDIA DRIVER
  M:    Lad, Prabhakar <prabhakar.csengg@gmail.com>
  L:    linux-media@vger.kernel.org
 -L:    davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers)
  W:    http://linuxtv.org/
  Q:    http://patchwork.linuxtv.org/project/linux-media/list/
  T:    git git://linuxtv.org/mhadli/v4l-dvb-davinci_devices.git
@@@ -8894,9 -8636,7 +8885,9 @@@ S:      Maintaine
  F:    drivers/leds/leds-net48xx.c
  
  SOFTLOGIC 6x10 MPEG CODEC
 -M:    Ismael Luceno <ismael.luceno@corp.bluecherry.net>
 +M:    Bluecherry Maintainers <maintainers@bluecherrydvr.com>
 +M:    Andrey Utkin <andrey.utkin@corp.bluecherry.net>
 +M:    Andrey Utkin <andrey.krieger.utkin@gmail.com>
  L:    linux-media@vger.kernel.org
  S:    Supported
  F:    drivers/media/pci/solo6x10/
@@@ -9253,13 -8993,6 +9244,13 @@@ F:    lib/swiotlb.
  F:    arch/*/kernel/pci-swiotlb.c
  F:    include/linux/swiotlb.h
  
 +SWITCHDEV
 +M:    Jiri Pirko <jiri@resnulli.us>
 +L:    netdev@vger.kernel.org
 +S:    Supported
 +F:    net/switchdev/
 +F:    include/net/switchdev.h
 +
  SYNOPSYS ARC ARCHITECTURE
  M:    Vineet Gupta <vgupta@synopsys.com>
  S:    Supported
@@@ -9377,7 -9110,7 +9368,7 @@@ S:      Maintaine
  F:    drivers/media/i2c/tda9840*
  
  TEA5761 TUNER DRIVER
 -M:    Mauro Carvalho Chehab <m.chehab@samsung.com>
 +M:    Mauro Carvalho Chehab <mchehab@osg.samsung.com>
  L:    linux-media@vger.kernel.org
  W:    http://linuxtv.org
  T:    git git://linuxtv.org/media_tree.git
@@@ -9385,7 -9118,7 +9376,7 @@@ S:      Odd fixe
  F:    drivers/media/tuners/tea5761.*
  
  TEA5767 TUNER DRIVER
 -M:    Mauro Carvalho Chehab <m.chehab@samsung.com>
 +M:    Mauro Carvalho Chehab <mchehab@osg.samsung.com>
  L:    linux-media@vger.kernel.org
  W:    http://linuxtv.org
  T:    git git://linuxtv.org/media_tree.git
@@@ -9516,7 -9249,6 +9507,7 @@@ Q:      https://patchwork.kernel.org/project
  S:    Supported
  F:    drivers/thermal/
  F:    include/linux/thermal.h
 +F:    include/uapi/linux/thermal.h
  F:    include/linux/cpu_cooling.h
  F:    Documentation/devicetree/bindings/thermal/
  
@@@ -9621,7 -9353,7 +9612,7 @@@ F:      include/uapi/linux/tipc*.
  F:    net/tipc/
  
  TILE ARCHITECTURE
 -M:    Chris Metcalf <cmetcalf@tilera.com>
 +M:    Chris Metcalf <cmetcalf@ezchip.com>
  W:    http://www.tilera.com/scm/
  S:    Supported
  F:    arch/tile/
@@@ -9698,7 -9430,7 +9689,7 @@@ F:      include/linux/shmem_fs.
  F:    mm/shmem.c
  
  TM6000 VIDEO4LINUX DRIVER
 -M:    Mauro Carvalho Chehab <m.chehab@samsung.com>
 +M:    Mauro Carvalho Chehab <mchehab@osg.samsung.com>
  L:    linux-media@vger.kernel.org
  W:    http://linuxtv.org
  T:    git git://linuxtv.org/media_tree.git
@@@ -9962,6 -9694,11 +9953,6 @@@ S:     Maintaine
  F:    Documentation/hid/hiddev.txt
  F:    drivers/hid/usbhid/
  
 -USB/IP DRIVERS
 -L:    linux-usb@vger.kernel.org
 -S:    Orphan
 -F:    drivers/staging/usbip/
 -
  USB ISP116X DRIVER
  M:    Olav Kongas <ok@artecdesign.ee>
  L:    linux-usb@vger.kernel.org
@@@ -10518,15 -10255,8 +10509,15 @@@ L: linux-edac@vger.kernel.or
  S:    Maintained
  F:    arch/x86/kernel/cpu/mcheck/*
  
 +X86 VDSO
 +M:    Andy Lutomirski <luto@amacapital.net>
 +L:    linux-kernel@vger.kernel.org
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/vdso
 +S:    Maintained
 +F:    arch/x86/vdso/
 +
  XC2028/3028 TUNER DRIVER
 -M:    Mauro Carvalho Chehab <m.chehab@samsung.com>
 +M:    Mauro Carvalho Chehab <mchehab@osg.samsung.com>
  L:    linux-media@vger.kernel.org
  W:    http://linuxtv.org
  T:    git git://linuxtv.org/media_tree.git
diff --combined arch/arm/kvm/mmu.c
@@@ -197,8 -197,7 +197,8 @@@ static void unmap_range(struct kvm *kvm
        pgd = pgdp + pgd_index(addr);
        do {
                next = kvm_pgd_addr_end(addr, end);
 -              unmap_puds(kvm, pgd, addr, next);
 +              if (!pgd_none(*pgd))
 +                      unmap_puds(kvm, pgd, addr, next);
        } while (pgd++, addr = next, addr != end);
  }
  
@@@ -612,6 -611,71 +612,71 @@@ static void unmap_stage2_range(struct k
        unmap_range(kvm, kvm->arch.pgd, start, size);
  }
  
+ static void stage2_unmap_memslot(struct kvm *kvm,
+                                struct kvm_memory_slot *memslot)
+ {
+       hva_t hva = memslot->userspace_addr;
+       phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
+       phys_addr_t size = PAGE_SIZE * memslot->npages;
+       hva_t reg_end = hva + size;
+       /*
+        * A memory region could potentially cover multiple VMAs, and any holes
+        * between them, so iterate over all of them to find out if we should
+        * unmap any of them.
+        *
+        *     +--------------------------------------------+
+        * +---------------+----------------+   +----------------+
+        * |   : VMA 1     |      VMA 2     |   |    VMA 3  :    |
+        * +---------------+----------------+   +----------------+
+        *     |               memory region                |
+        *     +--------------------------------------------+
+        */
+       do {
+               struct vm_area_struct *vma = find_vma(current->mm, hva);
+               hva_t vm_start, vm_end;
+               if (!vma || vma->vm_start >= reg_end)
+                       break;
+               /*
+                * Take the intersection of this VMA with the memory region
+                */
+               vm_start = max(hva, vma->vm_start);
+               vm_end = min(reg_end, vma->vm_end);
+               if (!(vma->vm_flags & VM_PFNMAP)) {
+                       gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
+                       unmap_stage2_range(kvm, gpa, vm_end - vm_start);
+               }
+               hva = vm_end;
+       } while (hva < reg_end);
+ }
+ /**
+  * stage2_unmap_vm - Unmap Stage-2 RAM mappings
+  * @kvm: The struct kvm pointer
+  *
+  * Go through the memregions and unmap any reguler RAM
+  * backing memory already mapped to the VM.
+  */
+ void stage2_unmap_vm(struct kvm *kvm)
+ {
+       struct kvm_memslots *slots;
+       struct kvm_memory_slot *memslot;
+       int idx;
+       idx = srcu_read_lock(&kvm->srcu);
+       spin_lock(&kvm->mmu_lock);
+       slots = kvm_memslots(kvm);
+       kvm_for_each_memslot(memslot, slots)
+               stage2_unmap_memslot(kvm, memslot);
+       spin_unlock(&kvm->mmu_lock);
+       srcu_read_unlock(&kvm->srcu, idx);
+ }
  /**
   * kvm_free_stage2_pgd - free all stage-2 tables
   * @kvm:      The KVM struct pointer for the VM.
@@@ -853,6 -917,7 +918,7 @@@ static int user_mem_abort(struct kvm_vc
        struct vm_area_struct *vma;
        pfn_t pfn;
        pgprot_t mem_type = PAGE_S2;
+       bool fault_ipa_uncached;
  
        write_fault = kvm_is_write_fault(vcpu);
        if (fault_status == FSC_PERM && !write_fault) {
        if (!hugetlb && !force_pte)
                hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
  
+       fault_ipa_uncached = memslot->flags & KVM_MEMSLOT_INCOHERENT;
        if (hugetlb) {
                pmd_t new_pmd = pfn_pmd(pfn, mem_type);
                new_pmd = pmd_mkhuge(new_pmd);
                        kvm_set_s2pmd_writable(&new_pmd);
                        kvm_set_pfn_dirty(pfn);
                }
-               coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE);
+               coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE,
+                                         fault_ipa_uncached);
                ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
        } else {
                pte_t new_pte = pfn_pte(pfn, mem_type);
                        kvm_set_s2pte_writable(&new_pte);
                        kvm_set_pfn_dirty(pfn);
                }
-               coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
+               coherent_cache_guest_page(vcpu, hva, PAGE_SIZE,
+                                         fault_ipa_uncached);
                ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
                        pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
        }
@@@ -1294,11 -1363,12 +1364,12 @@@ int kvm_arch_prepare_memory_region(stru
                hva = vm_end;
        } while (hva < reg_end);
  
-       if (ret) {
-               spin_lock(&kvm->mmu_lock);
+       spin_lock(&kvm->mmu_lock);
+       if (ret)
                unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
-               spin_unlock(&kvm->mmu_lock);
-       }
+       else
+               stage2_flush_memslot(kvm, memslot);
+       spin_unlock(&kvm->mmu_lock);
        return ret;
  }
  
@@@ -1310,6 -1380,15 +1381,15 @@@ void kvm_arch_free_memslot(struct kvm *
  int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
                            unsigned long npages)
  {
+       /*
+        * Readonly memslots are not incoherent with the caches by definition,
+        * but in practice, they are used mostly to emulate ROMs or NOR flashes
+        * that the guest may consider devices and hence map as uncached.
+        * To prevent incoherency issues in these cases, tag all readonly
+        * regions as incoherent.
+        */
+       if (slot->flags & KVM_MEM_READONLY)
+               slot->flags |= KVM_MEMSLOT_INCOHERENT;
        return 0;
  }
  
diff --combined arch/ia64/Kconfig
@@@ -11,6 -11,7 +11,6 @@@ config IA6
        select PCI if (!IA64_HP_SIM)
        select ACPI if (!IA64_HP_SIM)
        select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
 -      select PM if (!IA64_HP_SIM)
        select HAVE_UNSTABLE_SCHED_CLOCK
        select HAVE_IDE
        select HAVE_OPROFILE
@@@ -20,7 -21,6 +20,6 @@@
        select HAVE_DYNAMIC_FTRACE if (!ITANIUM)
        select HAVE_FUNCTION_TRACER
        select HAVE_DMA_ATTRS
-       select HAVE_KVM
        select TTY
        select HAVE_ARCH_TRACEHOOK
        select HAVE_DMA_API_DEBUG
@@@ -232,7 -232,6 +231,7 @@@ config IA64_SGI_U
  config IA64_HP_SIM
        bool "Ski-simulator"
        select SWIOTLB
 +      depends on !PM_RUNTIME
  
  endchoice
  
@@@ -640,8 -639,6 +639,6 @@@ source "security/Kconfig
  
  source "crypto/Kconfig"
  
- source "arch/ia64/kvm/Kconfig"
  source "lib/Kconfig"
  
  config IOMMU_HELPER
@@@ -489,7 -489,6 +489,6 @@@ int main(void
        DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
        DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
        DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
-       DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock));
        DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits));
        DEFINE(KVM_ENABLED_HCALLS, offsetof(struct kvm, arch.enabled_hcalls));
        DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
        DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
        DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
        DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
+       DEFINE(VCPU_HEIR, offsetof(struct kvm_vcpu, arch.emul_inst));
  #endif
  #ifdef CONFIG_PPC_BOOK3S
        DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
                                        arch.timing_last_enter.tv32.tbl));
  #endif
  
 -#ifdef CONFIG_PPC_POWERNV
 -      DEFINE(OPAL_MC_GPR3, offsetof(struct opal_machine_check_event, gpr3));
 -      DEFINE(OPAL_MC_SRR0, offsetof(struct opal_machine_check_event, srr0));
 -      DEFINE(OPAL_MC_SRR1, offsetof(struct opal_machine_check_event, srr1));
 -      DEFINE(PACA_OPAL_MC_EVT, offsetof(struct paca_struct, opal_mc_evt));
 -#endif
 -
        return 0;
  }
  #include <linux/export.h>
  #include <linux/sched.h>
  #include <linux/spinlock.h>
 -#include <linux/bootmem.h>
  #include <linux/init.h>
  #include <linux/memblock.h>
  #include <linux/sizes.h>
  #include <linux/cma.h>
+ #include <linux/bitops.h>
  
  #include <asm/cputable.h>
  #include <asm/kvm_ppc.h>
   * By default we reserve 5% of memory for hash pagetable allocation.
   */
  static unsigned long kvm_cma_resv_ratio = 5;
- /*
-  * We allocate RMAs (real mode areas) for KVM guests from the KVM CMA area.
-  * Each RMA has to be physically contiguous and of a size that the
-  * hardware supports.  PPC970 and POWER7 support 64MB, 128MB and 256MB,
-  * and other larger sizes.  Since we are unlikely to be allocate that
-  * much physically contiguous memory after the system is up and running,
-  * we preallocate a set of RMAs in early boot using CMA.
-  * should be power of 2.
-  */
- unsigned long kvm_rma_pages = (1 << 27) >> PAGE_SHIFT;        /* 128MB */
- EXPORT_SYMBOL_GPL(kvm_rma_pages);
  
  static struct cma *kvm_cma;
  
- /* Work out RMLS (real mode limit selector) field value for a given RMA size.
-    Assumes POWER7 or PPC970. */
- static inline int lpcr_rmls(unsigned long rma_size)
- {
-       switch (rma_size) {
-       case 32ul << 20:        /* 32 MB */
-               if (cpu_has_feature(CPU_FTR_ARCH_206))
-                       return 8;       /* only supported on POWER7 */
-               return -1;
-       case 64ul << 20:        /* 64 MB */
-               return 3;
-       case 128ul << 20:       /* 128 MB */
-               return 7;
-       case 256ul << 20:       /* 256 MB */
-               return 4;
-       case 1ul << 30:         /* 1 GB */
-               return 2;
-       case 16ul << 30:        /* 16 GB */
-               return 1;
-       case 256ul << 30:       /* 256 GB */
-               return 0;
-       default:
-               return -1;
-       }
- }
- static int __init early_parse_rma_size(char *p)
- {
-       unsigned long kvm_rma_size;
-       pr_debug("%s(%s)\n", __func__, p);
-       if (!p)
-               return -EINVAL;
-       kvm_rma_size = memparse(p, &p);
-       /*
-        * Check that the requested size is one supported in hardware
-        */
-       if (lpcr_rmls(kvm_rma_size) < 0) {
-               pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
-               return -EINVAL;
-       }
-       kvm_rma_pages = kvm_rma_size >> PAGE_SHIFT;
-       return 0;
- }
- early_param("kvm_rma_size", early_parse_rma_size);
- struct kvm_rma_info *kvm_alloc_rma()
- {
-       struct page *page;
-       struct kvm_rma_info *ri;
-       ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL);
-       if (!ri)
-               return NULL;
-       page = cma_alloc(kvm_cma, kvm_rma_pages, order_base_2(kvm_rma_pages));
-       if (!page)
-               goto err_out;
-       atomic_set(&ri->use_count, 1);
-       ri->base_pfn = page_to_pfn(page);
-       return ri;
- err_out:
-       kfree(ri);
-       return NULL;
- }
- EXPORT_SYMBOL_GPL(kvm_alloc_rma);
- void kvm_release_rma(struct kvm_rma_info *ri)
- {
-       if (atomic_dec_and_test(&ri->use_count)) {
-               cma_release(kvm_cma, pfn_to_page(ri->base_pfn), kvm_rma_pages);
-               kfree(ri);
-       }
- }
- EXPORT_SYMBOL_GPL(kvm_release_rma);
  static int __init early_parse_kvm_cma_resv(char *p)
  {
        pr_debug("%s(%s)\n", __func__, p);
@@@ -132,14 -48,9 +47,9 @@@ early_param("kvm_cma_resv_ratio", early
  
  struct page *kvm_alloc_hpt(unsigned long nr_pages)
  {
-       unsigned long align_pages = HPT_ALIGN_PAGES;
        VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
  
-       /* Old CPUs require HPT aligned on a multiple of its size */
-       if (!cpu_has_feature(CPU_FTR_ARCH_206))
-               align_pages = nr_pages;
-       return cma_alloc(kvm_cma, nr_pages, order_base_2(align_pages));
+       return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES));
  }
  EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
  
@@@ -153,7 -64,7 +63,7 @@@ EXPORT_SYMBOL_GPL(kvm_release_hpt)
   * kvm_cma_reserve() - reserve area for kvm hash pagetable
   *
   * This function reserves memory from early allocator. It should be
 - * called by arch specific code once the early allocator (memblock or bootmem)
 + * called by arch specific code once the memblock allocator
   * has been activated and all other subsystems have already allocated/reserved
   * memory.
   */
@@@ -180,21 -91,43 +90,43 @@@ void __init kvm_cma_reserve(void
        if (selected_size) {
                pr_debug("%s: reserving %ld MiB for global area\n", __func__,
                         (unsigned long)selected_size / SZ_1M);
-               /*
-                * Old CPUs require HPT aligned on a multiple of its size. So for them
-                * make the alignment as max size we could request.
-                */
-               if (!cpu_has_feature(CPU_FTR_ARCH_206))
-                       align_size = __rounddown_pow_of_two(selected_size);
-               else
-                       align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
-               align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size);
+               align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
                cma_declare_contiguous(0, selected_size, 0, align_size,
                        KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma);
        }
  }
  
+ /*
+  * Real-mode H_CONFER implementation.
+  * We check if we are the only vcpu out of this virtual core
+  * still running in the guest and not ceded.  If so, we pop up
+  * to the virtual-mode implementation; if not, just return to
+  * the guest.
+  */
+ long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
+                           unsigned int yield_count)
+ {
+       struct kvmppc_vcore *vc = vcpu->arch.vcore;
+       int threads_running;
+       int threads_ceded;
+       int threads_conferring;
+       u64 stop = get_tb() + 10 * tb_ticks_per_usec;
+       int rv = H_SUCCESS; /* => don't yield */
+       set_bit(vcpu->arch.ptid, &vc->conferring_threads);
+       while ((get_tb() < stop) && (VCORE_EXIT_COUNT(vc) == 0)) {
+               threads_running = VCORE_ENTRY_COUNT(vc);
+               threads_ceded = hweight32(vc->napping_threads);
+               threads_conferring = hweight32(vc->conferring_threads);
+               if (threads_ceded + threads_conferring >= threads_running) {
+                       rv = H_TOO_HARD; /* => do yield */
+                       break;
+               }
+       }
+       clear_bit(vcpu->arch.ptid, &vc->conferring_threads);
+       return rv;
+ }
  /*
   * When running HV mode KVM we need to block certain operations while KVM VMs
   * exist in the system. We use a counter of VMs to track this.
@@@ -94,20 -94,12 +94,12 @@@ END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG
        lwz     r6, HSTATE_PMC + 12(r13)
        lwz     r8, HSTATE_PMC + 16(r13)
        lwz     r9, HSTATE_PMC + 20(r13)
- BEGIN_FTR_SECTION
-       lwz     r10, HSTATE_PMC + 24(r13)
-       lwz     r11, HSTATE_PMC + 28(r13)
- END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
        mtspr   SPRN_PMC1, r3
        mtspr   SPRN_PMC2, r4
        mtspr   SPRN_PMC3, r5
        mtspr   SPRN_PMC4, r6
        mtspr   SPRN_PMC5, r8
        mtspr   SPRN_PMC6, r9
- BEGIN_FTR_SECTION
-       mtspr   SPRN_PMC7, r10
-       mtspr   SPRN_PMC8, r11
- END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
        ld      r3, HSTATE_MMCR(r13)
        ld      r4, HSTATE_MMCR + 8(r13)
        ld      r5, HSTATE_MMCR + 16(r13)
@@@ -153,11 -145,9 +145,9 @@@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S
  
        cmpwi   cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
        cmpwi   r12, BOOK3S_INTERRUPT_EXTERNAL
- BEGIN_FTR_SECTION
        beq     11f
        cmpwi   cr2, r12, BOOK3S_INTERRUPT_HMI
        beq     cr2, 14f                        /* HMI check */
- END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  
        /* RFI into the highmem handler, or branch to interrupt handler */
        mfmsr   r6
        mtmsrd  r6, 1                   /* Clear RI in MSR */
        mtsrr0  r8
        mtsrr1  r7
-       beqa    0x500                   /* external interrupt (PPC970) */
        beq     cr1, 13f                /* machine check */
        RFI
  
@@@ -201,6 -190,8 +190,6 @@@ kvmppc_primary_no_guest
        bge     kvm_novcpu_exit /* another thread already exiting */
        li      r3, NAPPING_NOVCPU
        stb     r3, HSTATE_NAPPING(r13)
 -      li      r3, 1
 -      stb     r3, HSTATE_HWTHREAD_REQ(r13)
  
        b       kvm_do_nap
  
@@@ -291,8 -282,6 +280,8 @@@ kvm_start_guest
        /* if we have no vcpu to run, go back to sleep */
        beq     kvm_no_guest
  
 +kvm_secondary_got_guest:
 +
        /* Set HSTATE_DSCR(r13) to something sensible */
        ld      r6, PACA_DSCR(r13)
        std     r6, HSTATE_DSCR(r13)
        stwcx.  r3, 0, r4
        bne     51b
  
 +/*
 + * At this point we have finished executing in the guest.
 + * We need to wait for hwthread_req to become zero, since
 + * we may not turn on the MMU while hwthread_req is non-zero.
 + * While waiting we also need to check if we get given a vcpu to run.
 + */
  kvm_no_guest:
 -      li      r0, KVM_HWTHREAD_IN_NAP
 +      lbz     r3, HSTATE_HWTHREAD_REQ(r13)
 +      cmpwi   r3, 0
 +      bne     53f
 +      HMT_MEDIUM
 +      li      r0, KVM_HWTHREAD_IN_KERNEL
        stb     r0, HSTATE_HWTHREAD_STATE(r13)
 -kvm_do_nap:
 -      /* Clear the runlatch bit before napping */
 -      mfspr   r2, SPRN_CTRLF
 -      clrrdi  r2, r2, 1
 -      mtspr   SPRN_CTRLT, r2
 -
 +      /* need to recheck hwthread_req after a barrier, to avoid race */
 +      sync
 +      lbz     r3, HSTATE_HWTHREAD_REQ(r13)
 +      cmpwi   r3, 0
 +      bne     54f
 +/*
 + * We jump to power7_wakeup_loss, which will return to the caller
 + * of power7_nap in the powernv cpu offline loop.  The value we
 + * put in r3 becomes the return value for power7_nap.
 + */
        li      r3, LPCR_PECE0
        mfspr   r4, SPRN_LPCR
        rlwimi  r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
        mtspr   SPRN_LPCR, r4
 -      isync
 -      std     r0, HSTATE_SCRATCH0(r13)
 -      ptesync
 -      ld      r0, HSTATE_SCRATCH0(r13)
 -1:    cmpd    r0, r0
 -      bne     1b
 -      nap
 -      b       .
 +      li      r3, 0
 +      b       power7_wakeup_loss
 +
 +53:   HMT_LOW
 +      ld      r4, HSTATE_KVM_VCPU(r13)
 +      cmpdi   r4, 0
 +      beq     kvm_no_guest
 +      HMT_MEDIUM
 +      b       kvm_secondary_got_guest
 +
 +54:   li      r0, KVM_HWTHREAD_IN_KVM
 +      stb     r0, HSTATE_HWTHREAD_STATE(r13)
 +      b       kvm_no_guest
  
  /******************************************************************************
   *                                                                            *
@@@ -393,11 -363,8 +382,8 @@@ kvmppc_hv_entry
        slbia
        ptesync
  
- BEGIN_FTR_SECTION
-       b       30f
- END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
        /*
-        * POWER7 host -> guest partition switch code.
+        * POWER7/POWER8 host -> guest partition switch code.
         * We don't have to lock against concurrent tlbies,
         * but we do have to coordinate across hardware threads.
         */
@@@ -505,97 -472,7 +491,7 @@@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S
        cmpwi   r3,512          /* 1 microsecond */
        li      r12,BOOK3S_INTERRUPT_HV_DECREMENTER
        blt     hdec_soon
-       b       31f
-       /*
-        * PPC970 host -> guest partition switch code.
-        * We have to lock against concurrent tlbies,
-        * using native_tlbie_lock to lock against host tlbies
-        * and kvm->arch.tlbie_lock to lock against guest tlbies.
-        * We also have to invalidate the TLB since its
-        * entries aren't tagged with the LPID.
-        */
- 30:   ld      r5,HSTATE_KVM_VCORE(r13)
-       ld      r9,VCORE_KVM(r5)        /* pointer to struct kvm */
-       /* first take native_tlbie_lock */
-       .section ".toc","aw"
- toc_tlbie_lock:
-       .tc     native_tlbie_lock[TC],native_tlbie_lock
-       .previous
-       ld      r3,toc_tlbie_lock@toc(r2)
- #ifdef __BIG_ENDIAN__
-       lwz     r8,PACA_LOCK_TOKEN(r13)
- #else
-       lwz     r8,PACAPACAINDEX(r13)
- #endif
- 24:   lwarx   r0,0,r3
-       cmpwi   r0,0
-       bne     24b
-       stwcx.  r8,0,r3
-       bne     24b
-       isync
-       ld      r5,HSTATE_KVM_VCORE(r13)
-       ld      r7,VCORE_LPCR(r5)       /* use vcore->lpcr to store HID4 */
-       li      r0,0x18f
-       rotldi  r0,r0,HID4_LPID5_SH     /* all lpid bits in HID4 = 1 */
-       or      r0,r7,r0
-       ptesync
-       sync
-       mtspr   SPRN_HID4,r0            /* switch to reserved LPID */
-       isync
-       li      r0,0
-       stw     r0,0(r3)                /* drop native_tlbie_lock */
-       /* invalidate the whole TLB */
-       li      r0,256
-       mtctr   r0
-       li      r6,0
- 25:   tlbiel  r6
-       addi    r6,r6,0x1000
-       bdnz    25b
-       ptesync
  
-       /* Take the guest's tlbie_lock */
-       addi    r3,r9,KVM_TLBIE_LOCK
- 24:   lwarx   r0,0,r3
-       cmpwi   r0,0
-       bne     24b
-       stwcx.  r8,0,r3
-       bne     24b
-       isync
-       ld      r6,KVM_SDR1(r9)
-       mtspr   SPRN_SDR1,r6            /* switch to partition page table */
-       /* Set up HID4 with the guest's LPID etc. */
-       sync
-       mtspr   SPRN_HID4,r7
-       isync
-       /* drop the guest's tlbie_lock */
-       li      r0,0
-       stw     r0,0(r3)
-       /* Check if HDEC expires soon */
-       mfspr   r3,SPRN_HDEC
-       cmpwi   r3,10
-       li      r12,BOOK3S_INTERRUPT_HV_DECREMENTER
-       blt     hdec_soon
-       /* Enable HDEC interrupts */
-       mfspr   r0,SPRN_HID0
-       li      r3,1
-       rldimi  r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
-       sync
-       mtspr   SPRN_HID0,r0
-       mfspr   r0,SPRN_HID0
-       mfspr   r0,SPRN_HID0
-       mfspr   r0,SPRN_HID0
-       mfspr   r0,SPRN_HID0
-       mfspr   r0,SPRN_HID0
-       mfspr   r0,SPRN_HID0
- 31:
        /* Do we have a guest vcpu to run? */
        cmpdi   r4, 0
        beq     kvmppc_primary_no_guest
@@@ -625,7 -502,6 +521,6 @@@ kvmppc_got_guest
        stb     r6, VCPU_VPA_DIRTY(r4)
  25:
  
- BEGIN_FTR_SECTION
        /* Save purr/spurr */
        mfspr   r5,SPRN_PURR
        mfspr   r6,SPRN_SPURR
        ld      r8,VCPU_SPURR(r4)
        mtspr   SPRN_PURR,r7
        mtspr   SPRN_SPURR,r8
- END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  
  BEGIN_FTR_SECTION
        /* Set partition DABR */
        ld      r6,VCPU_DABR(r4)
        mtspr   SPRN_DABRX,r5
        mtspr   SPRN_DABR,r6
-  BEGIN_FTR_SECTION_NESTED(89)
        isync
-  END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
  END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
  
  #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@@ -777,20 -650,12 +669,12 @@@ END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG
        lwz     r7, VCPU_PMC + 12(r4)
        lwz     r8, VCPU_PMC + 16(r4)
        lwz     r9, VCPU_PMC + 20(r4)
- BEGIN_FTR_SECTION
-       lwz     r10, VCPU_PMC + 24(r4)
-       lwz     r11, VCPU_PMC + 28(r4)
- END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
        mtspr   SPRN_PMC1, r3
        mtspr   SPRN_PMC2, r5
        mtspr   SPRN_PMC3, r6
        mtspr   SPRN_PMC4, r7
        mtspr   SPRN_PMC5, r8
        mtspr   SPRN_PMC6, r9
- BEGIN_FTR_SECTION
-       mtspr   SPRN_PMC7, r10
-       mtspr   SPRN_PMC8, r11
- END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
        ld      r3, VCPU_MMCR(r4)
        ld      r5, VCPU_MMCR + 8(r4)
        ld      r6, VCPU_MMCR + 16(r4)
@@@ -837,14 -702,12 +721,12 @@@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S
        ld      r30, VCPU_GPR(R30)(r4)
        ld      r31, VCPU_GPR(R31)(r4)
  
- BEGIN_FTR_SECTION
        /* Switch DSCR to guest value */
        ld      r5, VCPU_DSCR(r4)
        mtspr   SPRN_DSCR, r5
- END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  
  BEGIN_FTR_SECTION
-       /* Skip next section on POWER7 or PPC970 */
+       /* Skip next section on POWER7 */
        b       8f
  END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
        /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
        mtspr   SPRN_DAR, r5
        mtspr   SPRN_DSISR, r6
  
- BEGIN_FTR_SECTION
        /* Restore AMR and UAMOR, set AMOR to all 1s */
        ld      r5,VCPU_AMR(r4)
        ld      r6,VCPU_UAMOR(r4)
        mtspr   SPRN_AMR,r5
        mtspr   SPRN_UAMOR,r6
        mtspr   SPRN_AMOR,r7
- END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  
        /* Restore state of CTRL run bit; assume 1 on entry */
        lwz     r5,VCPU_CTRL(r4)
@@@ -963,13 -824,11 +843,11 @@@ deliver_guest_interrupt
        rldicl  r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
        cmpdi   cr1, r0, 0
        andi.   r8, r11, MSR_EE
- BEGIN_FTR_SECTION
        mfspr   r8, SPRN_LPCR
        /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
        rldimi  r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
        mtspr   SPRN_LPCR, r8
        isync
- END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
        beq     5f
        li      r0, BOOK3S_INTERRUPT_EXTERNAL
        bne     cr1, 12f
@@@ -1124,15 -983,13 +1002,13 @@@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR
  
        stw     r12,VCPU_TRAP(r9)
  
-       /* Save HEIR (HV emulation assist reg) in last_inst
+       /* Save HEIR (HV emulation assist reg) in emul_inst
           if this is an HEI (HV emulation interrupt, e40) */
        li      r3,KVM_INST_FETCH_FAILED
- BEGIN_FTR_SECTION
        cmpwi   r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
        bne     11f
        mfspr   r3,SPRN_HEIR
- END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
- 11:   stw     r3,VCPU_LAST_INST(r9)
+ 11:   stw     r3,VCPU_HEIR(r9)
  
        /* these are volatile across C function calls */
        mfctr   r3
        std     r3, VCPU_CTR(r9)
        stw     r4, VCPU_XER(r9)
  
- BEGIN_FTR_SECTION
        /* If this is a page table miss then see if it's theirs or ours */
        cmpwi   r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
        beq     kvmppc_hdsi
        cmpwi   r12, BOOK3S_INTERRUPT_H_INST_STORAGE
        beq     kvmppc_hisi
- END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  
        /* See if this is a leftover HDEC interrupt */
        cmpwi   r12,BOOK3S_INTERRUPT_HV_DECREMENTER
        cmpwi   r12,BOOK3S_INTERRUPT_SYSCALL
        beq     hcall_try_real_mode
  
-       /* Only handle external interrupts here on arch 206 and later */
- BEGIN_FTR_SECTION
-       b       ext_interrupt_to_host
- END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
        /* External interrupt ? */
        cmpwi   r12, BOOK3S_INTERRUPT_EXTERNAL
        bne+    ext_interrupt_to_host
@@@ -1193,11 -1043,9 +1062,9 @@@ guest_exit_cont:               /* r9 = vcpu, r12 = t
        mfdsisr r7
        std     r6, VCPU_DAR(r9)
        stw     r7, VCPU_DSISR(r9)
- BEGIN_FTR_SECTION
        /* don't overwrite fault_dar/fault_dsisr if HDSI */
        cmpwi   r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
        beq     6f
- END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
        std     r6, VCPU_FAULT_DAR(r9)
        stw     r7, VCPU_FAULT_DSISR(r9)
  
@@@ -1236,7 -1084,6 +1103,6 @@@ mc_cont
        /*
         * Save the guest PURR/SPURR
         */
- BEGIN_FTR_SECTION
        mfspr   r5,SPRN_PURR
        mfspr   r6,SPRN_SPURR
        ld      r7,VCPU_PURR(r9)
        add     r4,r4,r6
        mtspr   SPRN_PURR,r3
        mtspr   SPRN_SPURR,r4
- END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
  
        /* Save DEC */
        mfspr   r5,SPRN_DEC
@@@ -1306,22 -1152,18 +1171,18 @@@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S
  8:
  
        /* Save and reset AMR and UAMOR before turning on the MMU */
- BEGIN_FTR_SECTION
        mfspr   r5,SPRN_AMR
        mfspr   r6,SPRN_UAMOR
        std     r5,VCPU_AMR(r9)
        std     r6,VCPU_UAMOR(r9)
        li      r6,0
        mtspr   SPRN_AMR,r6
- END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  
        /* Switch DSCR back to host value */
- BEGIN_FTR_SECTION
        mfspr   r8, SPRN_DSCR
        ld      r7, HSTATE_DSCR(r13)
        std     r8, VCPU_DSCR(r9)
        mtspr   SPRN_DSCR, r7
- END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  
        /* Save non-volatile GPRs */
        std     r14, VCPU_GPR(R14)(r9)
@@@ -1503,11 -1345,9 +1364,9 @@@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S
        mfspr   r4, SPRN_MMCR0          /* save MMCR0 */
        mtspr   SPRN_MMCR0, r3          /* freeze all counters, disable ints */
        mfspr   r6, SPRN_MMCRA
- BEGIN_FTR_SECTION
-       /* On P7, clear MMCRA in order to disable SDAR updates */
+       /* Clear MMCRA in order to disable SDAR updates */
        li      r7, 0
        mtspr   SPRN_MMCRA, r7
- END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
        isync
        beq     21f                     /* if no VPA, save PMU stuff anyway */
        lbz     r7, LPPACA_PMCINUSE(r8)
@@@ -1532,20 -1372,12 +1391,12 @@@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S
        mfspr   r6, SPRN_PMC4
        mfspr   r7, SPRN_PMC5
        mfspr   r8, SPRN_PMC6
- BEGIN_FTR_SECTION
-       mfspr   r10, SPRN_PMC7
-       mfspr   r11, SPRN_PMC8
- END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
        stw     r3, VCPU_PMC(r9)
        stw     r4, VCPU_PMC + 4(r9)
        stw     r5, VCPU_PMC + 8(r9)
        stw     r6, VCPU_PMC + 12(r9)
        stw     r7, VCPU_PMC + 16(r9)
        stw     r8, VCPU_PMC + 20(r9)
- BEGIN_FTR_SECTION
-       stw     r10, VCPU_PMC + 24(r9)
-       stw     r11, VCPU_PMC + 28(r9)
- END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  BEGIN_FTR_SECTION
        mfspr   r5, SPRN_SIER
        mfspr   r6, SPRN_SPMC1
@@@ -1566,11 -1398,8 +1417,8 @@@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S
        ptesync
  
  hdec_soon:                    /* r12 = trap, r13 = paca */
- BEGIN_FTR_SECTION
-       b       32f
- END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
        /*
-        * POWER7 guest -> host partition switch code.
+        * POWER7/POWER8 guest -> host partition switch code.
         * We don't have to lock against tlbies but we do
         * have to coordinate the hardware threads.
         */
@@@ -1698,87 -1527,9 +1546,9 @@@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S
  16:   ld      r8,KVM_HOST_LPCR(r4)
        mtspr   SPRN_LPCR,r8
        isync
-       b       33f
-       /*
-        * PPC970 guest -> host partition switch code.
-        * We have to lock against concurrent tlbies, and
-        * we have to flush the whole TLB.
-        */
- 32:   ld      r5,HSTATE_KVM_VCORE(r13)
-       ld      r4,VCORE_KVM(r5)        /* pointer to struct kvm */
-       /* Take the guest's tlbie_lock */
- #ifdef __BIG_ENDIAN__
-       lwz     r8,PACA_LOCK_TOKEN(r13)
- #else
-       lwz     r8,PACAPACAINDEX(r13)
- #endif
-       addi    r3,r4,KVM_TLBIE_LOCK
- 24:   lwarx   r0,0,r3
-       cmpwi   r0,0
-       bne     24b
-       stwcx.  r8,0,r3
-       bne     24b
-       isync
-       ld      r7,KVM_HOST_LPCR(r4)    /* use kvm->arch.host_lpcr for HID4 */
-       li      r0,0x18f
-       rotldi  r0,r0,HID4_LPID5_SH     /* all lpid bits in HID4 = 1 */
-       or      r0,r7,r0
-       ptesync
-       sync
-       mtspr   SPRN_HID4,r0            /* switch to reserved LPID */
-       isync
-       li      r0,0
-       stw     r0,0(r3)                /* drop guest tlbie_lock */
-       /* invalidate the whole TLB */
-       li      r0,256
-       mtctr   r0
-       li      r6,0
- 25:   tlbiel  r6
-       addi    r6,r6,0x1000
-       bdnz    25b
-       ptesync
-       /* take native_tlbie_lock */
-       ld      r3,toc_tlbie_lock@toc(2)
- 24:   lwarx   r0,0,r3
-       cmpwi   r0,0
-       bne     24b
-       stwcx.  r8,0,r3
-       bne     24b
-       isync
-       ld      r6,KVM_HOST_SDR1(r4)
-       mtspr   SPRN_SDR1,r6            /* switch to host page table */
-       /* Set up host HID4 value */
-       sync
-       mtspr   SPRN_HID4,r7
-       isync
-       li      r0,0
-       stw     r0,0(r3)                /* drop native_tlbie_lock */
-       lis     r8,0x7fff               /* MAX_INT@h */
-       mtspr   SPRN_HDEC,r8
-       /* Disable HDEC interrupts */
-       mfspr   r0,SPRN_HID0
-       li      r3,0
-       rldimi  r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
-       sync
-       mtspr   SPRN_HID0,r0
-       mfspr   r0,SPRN_HID0
-       mfspr   r0,SPRN_HID0
-       mfspr   r0,SPRN_HID0
-       mfspr   r0,SPRN_HID0
-       mfspr   r0,SPRN_HID0
-       mfspr   r0,SPRN_HID0
  
        /* load host SLB entries */
33:   ld      r8,PACA_SLBSHADOWPTR(r13)
      ld      r8,PACA_SLBSHADOWPTR(r13)
  
        .rept   SLB_NUM_BOLTED
        li      r3, SLBSHADOW_SAVEAREA
@@@ -2047,7 -1798,7 +1817,7 @@@ hcall_real_table
        .long   0               /* 0xd8 */
        .long   0               /* 0xdc */
        .long   DOTSYM(kvmppc_h_cede) - hcall_real_table
-       .long   0               /* 0xe4 */
+       .long   DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
        .long   0               /* 0xe8 */
        .long   0               /* 0xec */
        .long   0               /* 0xf0 */
@@@ -2126,9 -1877,6 +1896,6 @@@ _GLOBAL(kvmppc_h_cede
        stw     r0,VCPU_TRAP(r3)
        li      r0,H_SUCCESS
        std     r0,VCPU_GPR(R3)(r3)
- BEGIN_FTR_SECTION
-       b       kvm_cede_exit   /* just send it up to host on 970 */
- END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
  
        /*
         * Set our bit in the bitmask of napping threads unless all the
         * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the
         * runlatch bit before napping.
         */
 +kvm_do_nap:
        mfspr   r2, SPRN_CTRLF
        clrrdi  r2, r2, 1
        mtspr   SPRN_CTRLT, r2
@@@ -2455,7 -2202,6 +2222,6 @@@ BEGIN_FTR_SECTIO
  END_FTR_SECTION_IFSET(CPU_FTR_VSX)
  #endif
        mtmsrd  r8
-       isync
        addi    r3,r3,VCPU_FPRS
        bl      store_fp_state
  #ifdef CONFIG_ALTIVEC
@@@ -2491,7 -2237,6 +2257,6 @@@ BEGIN_FTR_SECTIO
  END_FTR_SECTION_IFSET(CPU_FTR_VSX)
  #endif
        mtmsrd  r8
-       isync
        addi    r3,r4,VCPU_FPRS
        bl      load_fp_state
  #ifdef CONFIG_ALTIVEC
diff --combined arch/powerpc/kvm/e500.c
@@@ -76,11 -76,11 +76,11 @@@ static inline int local_sid_setup_one(s
        unsigned long sid;
        int ret = -1;
  
 -      sid = ++(__get_cpu_var(pcpu_last_used_sid));
 +      sid = __this_cpu_inc_return(pcpu_last_used_sid);
        if (sid < NUM_TIDS) {
 -              __get_cpu_var(pcpu_sids).entry[sid] = entry;
 +              __this_cpu_write(pcpu_sids.entry[sid], entry);
                entry->val = sid;
 -              entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
 +              entry->pentry = this_cpu_ptr(&pcpu_sids.entry[sid]);
                ret = sid;
        }
  
  static inline int local_sid_lookup(struct id *entry)
  {
        if (entry && entry->val != 0 &&
 -          __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
 -          entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
 +          __this_cpu_read(pcpu_sids.entry[entry->val]) == entry &&
 +          entry->pentry == this_cpu_ptr(&pcpu_sids.entry[entry->val]))
                return entry->val;
        return -1;
  }
  /* Invalidate all id mappings on local core -- call with preempt disabled */
  static inline void local_sid_destroy_all(void)
  {
 -      __get_cpu_var(pcpu_last_used_sid) = 0;
 -      memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
 +      __this_cpu_write(pcpu_last_used_sid, 0);
 +      memset(this_cpu_ptr(&pcpu_sids), 0, sizeof(pcpu_sids));
  }
  
  static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
@@@ -299,14 -299,6 +299,6 @@@ void kvmppc_mmu_msr_notify(struct kvm_v
        kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
  }
  
- void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
- {
- }
- void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
- {
- }
  static void kvmppc_core_vcpu_load_e500(struct kvm_vcpu *vcpu, int cpu)
  {
        kvmppc_booke_vcpu_load(vcpu, cpu);
@@@ -22,8 -22,11 +22,9 @@@ unsigned long *page_table_alloc(struct 
  void page_table_free(struct mm_struct *, unsigned long *);
  void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
  
 -void page_table_reset_pgste(struct mm_struct *, unsigned long, unsigned long,
 -                          bool init_skey);
  int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
                          unsigned long key, bool nq);
+ unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr);
  
  static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
  {
diff --combined arch/s390/kvm/kvm-s390.c
@@@ -81,10 -81,17 +81,17 @@@ struct kvm_stats_debugfs_item debugfs_e
        { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
        { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
        { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
+       { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
+       { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
        { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
+       { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
+       { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
        { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
        { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
        { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
+       { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
+       { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
+       { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
        { "diagnose_10", VCPU_STAT(diagnose_10) },
        { "diagnose_44", VCPU_STAT(diagnose_44) },
        { "diagnose_9c", VCPU_STAT(diagnose_9c) },
@@@ -271,7 -278,7 +278,7 @@@ static int kvm_s390_mem_control(struct 
        case KVM_S390_VM_MEM_CLR_CMMA:
                mutex_lock(&kvm->lock);
                idx = srcu_read_lock(&kvm->srcu);
 -              page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false);
 +              s390_reset_cmma(kvm->arch.gmap->mm);
                srcu_read_unlock(&kvm->srcu, idx);
                mutex_unlock(&kvm->lock);
                ret = 0;
@@@ -453,6 -460,7 +460,7 @@@ int kvm_arch_init_vm(struct kvm *kvm, u
        spin_lock_init(&kvm->arch.float_int.lock);
        INIT_LIST_HEAD(&kvm->arch.float_int.list);
        init_waitqueue_head(&kvm->arch.ipte_wq);
+       mutex_init(&kvm->arch.ipte_mutex);
  
        debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
        VM_EVENT(kvm, 3, "%s", "vm created");
@@@ -711,7 -719,6 +719,6 @@@ struct kvm_vcpu *kvm_arch_vcpu_create(s
        }
  
        spin_lock_init(&vcpu->arch.local_int.lock);
-       INIT_LIST_HEAD(&vcpu->arch.local_int.list);
        vcpu->arch.local_int.float_int = &kvm->arch.float_int;
        vcpu->arch.local_int.wq = &vcpu->wq;
        vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
@@@ -1114,13 -1121,15 +1121,15 @@@ static void __kvm_inject_pfault_token(s
                                      unsigned long token)
  {
        struct kvm_s390_interrupt inti;
-       inti.parm64 = token;
+       struct kvm_s390_irq irq;
  
        if (start_token) {
-               inti.type = KVM_S390_INT_PFAULT_INIT;
-               WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
+               irq.u.ext.ext_params2 = token;
+               irq.type = KVM_S390_INT_PFAULT_INIT;
+               WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
        } else {
                inti.type = KVM_S390_INT_PFAULT_DONE;
+               inti.parm64 = token;
                WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
        }
  }
@@@ -1614,11 -1623,14 +1623,14 @@@ long kvm_arch_vcpu_ioctl(struct file *f
        switch (ioctl) {
        case KVM_S390_INTERRUPT: {
                struct kvm_s390_interrupt s390int;
+               struct kvm_s390_irq s390irq;
  
                r = -EFAULT;
                if (copy_from_user(&s390int, argp, sizeof(s390int)))
                        break;
-               r = kvm_s390_inject_vcpu(vcpu, &s390int);
+               if (s390int_to_s390irq(&s390int, &s390irq))
+                       return -EINVAL;
+               r = kvm_s390_inject_vcpu(vcpu, &s390irq);
                break;
        }
        case KVM_S390_STORE_STATUS:
diff --combined arch/s390/kvm/priv.c
@@@ -156,45 -156,38 +156,42 @@@ static int handle_store_cpu_address(str
        return 0;
  }
  
 -static void __skey_check_enable(struct kvm_vcpu *vcpu)
 +static int __skey_check_enable(struct kvm_vcpu *vcpu)
  {
 +      int rc = 0;
        if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
 -              return;
 +              return rc;
  
 -      s390_enable_skey();
 +      rc = s390_enable_skey();
        trace_kvm_s390_skey_related_inst(vcpu);
        vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
 +      return rc;
  }
  
  
  static int handle_skey(struct kvm_vcpu *vcpu)
  {
 -      __skey_check_enable(vcpu);
 +      int rc = __skey_check_enable(vcpu);
  
 +      if (rc)
 +              return rc;
        vcpu->stat.instruction_storage_key++;
  
        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  
-       vcpu->arch.sie_block->gpsw.addr =
-               __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
+       kvm_s390_rewind_psw(vcpu, 4);
        VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
        return 0;
  }
  
  static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
  {
-       psw_t *psw = &vcpu->arch.sie_block->gpsw;
        vcpu->stat.instruction_ipte_interlock++;
-       if (psw_bits(*psw).p)
+       if (psw_bits(vcpu->arch.sie_block->gpsw).p)
                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
        wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
-       psw->addr = __rewind_psw(*psw, 4);
+       kvm_s390_rewind_psw(vcpu, 4);
        VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
        return 0;
  }
@@@ -650,10 -643,7 +647,7 @@@ static int handle_pfmf(struct kvm_vcpu 
                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  
        start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
-       if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
-               if (kvm_s390_check_low_addr_protection(vcpu, start))
-                       return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
-       }
+       start = kvm_s390_logical_to_effective(vcpu, start);
  
        switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
        case 0x00000000:
        default:
                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
        }
+       if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
+               if (kvm_s390_check_low_addr_protection(vcpu, start))
+                       return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
+       }
        while (start < end) {
                unsigned long useraddr, abs_addr;
  
                }
  
                if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
 -                      __skey_check_enable(vcpu);
 +                      int rc = __skey_check_enable(vcpu);
 +
 +                      if (rc)
 +                              return rc;
                        if (set_guest_storage_key(current->mm, useraddr,
                                        vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
                                        vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
@@@ -725,8 -718,7 +725,7 @@@ static int handle_essa(struct kvm_vcpu 
                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  
        /* Rewind PSW to repeat the ESSA instruction */
-       vcpu->arch.sie_block->gpsw.addr =
-               __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
+       kvm_s390_rewind_psw(vcpu, 4);
        vcpu->arch.sie_block->cbrlo &= PAGE_MASK;       /* reset nceo */
        cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
        down_read(&gmap->mm->mmap_sem);
@@@ -769,8 -761,8 +768,8 @@@ int kvm_s390_handle_lctl(struct kvm_vcp
  {
        int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
        int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
-       u32 val = 0;
-       int reg, rc;
+       int reg, rc, nr_regs;
+       u32 ctl_array[16];
        u64 ga;
  
        vcpu->stat.instruction_lctl++;
        VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
        trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
  
+       nr_regs = ((reg3 - reg1) & 0xf) + 1;
+       rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32));
+       if (rc)
+               return kvm_s390_inject_prog_cond(vcpu, rc);
        reg = reg1;
+       nr_regs = 0;
        do {
-               rc = read_guest(vcpu, ga, &val, sizeof(val));
-               if (rc)
-                       return kvm_s390_inject_prog_cond(vcpu, rc);
                vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
-               vcpu->arch.sie_block->gcr[reg] |= val;
-               ga += 4;
+               vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
                if (reg == reg3)
                        break;
                reg = (reg + 1) % 16;
        } while (1);
+       kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
        return 0;
  }
  
@@@ -806,9 -799,9 +806,9 @@@ int kvm_s390_handle_stctl(struct kvm_vc
  {
        int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
        int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
+       int reg, rc, nr_regs;
+       u32 ctl_array[16];
        u64 ga;
-       u32 val;
-       int reg, rc;
  
        vcpu->stat.instruction_stctl++;
  
        trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
  
        reg = reg1;
+       nr_regs = 0;
        do {
-               val = vcpu->arch.sie_block->gcr[reg] &  0x00000000fffffffful;
-               rc = write_guest(vcpu, ga, &val, sizeof(val));
-               if (rc)
-                       return kvm_s390_inject_prog_cond(vcpu, rc);
-               ga += 4;
+               ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
                if (reg == reg3)
                        break;
                reg = (reg + 1) % 16;
        } while (1);
-       return 0;
+       rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32));
+       return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
  }
  
  static int handle_lctlg(struct kvm_vcpu *vcpu)
  {
        int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
        int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
-       u64 ga, val;
-       int reg, rc;
+       int reg, rc, nr_regs;
+       u64 ctl_array[16];
+       u64 ga;
  
        vcpu->stat.instruction_lctlg++;
  
        if (ga & 7)
                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  
-       reg = reg1;
        VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
        trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
  
+       nr_regs = ((reg3 - reg1) & 0xf) + 1;
+       rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64));
+       if (rc)
+               return kvm_s390_inject_prog_cond(vcpu, rc);
+       reg = reg1;
+       nr_regs = 0;
        do {
-               rc = read_guest(vcpu, ga, &val, sizeof(val));
-               if (rc)
-                       return kvm_s390_inject_prog_cond(vcpu, rc);
-               vcpu->arch.sie_block->gcr[reg] = val;
-               ga += 8;
+               vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
                if (reg == reg3)
                        break;
                reg = (reg + 1) % 16;
        } while (1);
+       kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
        return 0;
  }
  
@@@ -878,8 -869,9 +876,9 @@@ static int handle_stctg(struct kvm_vcp
  {
        int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
        int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
-       u64 ga, val;
-       int reg, rc;
+       int reg, rc, nr_regs;
+       u64 ctl_array[16];
+       u64 ga;
  
        vcpu->stat.instruction_stctg++;
  
        if (ga & 7)
                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  
-       reg = reg1;
        VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
        trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
  
+       reg = reg1;
+       nr_regs = 0;
        do {
-               val = vcpu->arch.sie_block->gcr[reg];
-               rc = write_guest(vcpu, ga, &val, sizeof(val));
-               if (rc)
-                       return kvm_s390_inject_prog_cond(vcpu, rc);
-               ga += 8;
+               ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
                if (reg == reg3)
                        break;
                reg = (reg + 1) % 16;
        } while (1);
-       return 0;
+       rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64));
+       return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
  }
  
  static const intercept_handler_t eb_handlers[256] = {
diff --combined arch/s390/mm/pgtable.c
@@@ -18,8 -18,6 +18,8 @@@
  #include <linux/rcupdate.h>
  #include <linux/slab.h>
  #include <linux/swapops.h>
 +#include <linux/ksm.h>
 +#include <linux/mman.h>
  
  #include <asm/pgtable.h>
  #include <asm/pgalloc.h>
@@@ -752,7 -750,8 +752,7 @@@ int gmap_ipte_notify(struct gmap *gmap
                        break;
                /* Walk the process page table, lock and get pte pointer */
                ptep = get_locked_pte(gmap->mm, addr, &ptl);
 -              if (unlikely(!ptep))
 -                      continue;
 +              VM_BUG_ON(!ptep);
                /* Set notification bit in the pgste of the pte */
                entry = *ptep;
                if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
                        gaddr += PAGE_SIZE;
                        len -= PAGE_SIZE;
                }
 -              spin_unlock(ptl);
 +              pte_unmap_unlock(ptep, ptl);
        }
        up_read(&gmap->mm->mmap_sem);
        return rc;
@@@ -835,6 -834,99 +835,6 @@@ static inline void page_table_free_pgst
        __free_page(page);
  }
  
 -static inline unsigned long page_table_reset_pte(struct mm_struct *mm, pmd_t *pmd,
 -                      unsigned long addr, unsigned long end, bool init_skey)
 -{
 -      pte_t *start_pte, *pte;
 -      spinlock_t *ptl;
 -      pgste_t pgste;
 -
 -      start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
 -      pte = start_pte;
 -      do {
 -              pgste = pgste_get_lock(pte);
 -              pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
 -              if (init_skey) {
 -                      unsigned long address;
 -
 -                      pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
 -                                            PGSTE_GR_BIT | PGSTE_GC_BIT);
 -
 -                      /* skip invalid and not writable pages */
 -                      if (pte_val(*pte) & _PAGE_INVALID ||
 -                          !(pte_val(*pte) & _PAGE_WRITE)) {
 -                              pgste_set_unlock(pte, pgste);
 -                              continue;
 -                      }
 -
 -                      address = pte_val(*pte) & PAGE_MASK;
 -                      page_set_storage_key(address, PAGE_DEFAULT_KEY, 1);
 -              }
 -              pgste_set_unlock(pte, pgste);
 -      } while (pte++, addr += PAGE_SIZE, addr != end);
 -      pte_unmap_unlock(start_pte, ptl);
 -
 -      return addr;
 -}
 -
 -static inline unsigned long page_table_reset_pmd(struct mm_struct *mm, pud_t *pud,
 -                      unsigned long addr, unsigned long end, bool init_skey)
 -{
 -      unsigned long next;
 -      pmd_t *pmd;
 -
 -      pmd = pmd_offset(pud, addr);
 -      do {
 -              next = pmd_addr_end(addr, end);
 -              if (pmd_none_or_clear_bad(pmd))
 -                      continue;
 -              next = page_table_reset_pte(mm, pmd, addr, next, init_skey);
 -      } while (pmd++, addr = next, addr != end);
 -
 -      return addr;
 -}
 -
 -static inline unsigned long page_table_reset_pud(struct mm_struct *mm, pgd_t *pgd,
 -                      unsigned long addr, unsigned long end, bool init_skey)
 -{
 -      unsigned long next;
 -      pud_t *pud;
 -
 -      pud = pud_offset(pgd, addr);
 -      do {
 -              next = pud_addr_end(addr, end);
 -              if (pud_none_or_clear_bad(pud))
 -                      continue;
 -              next = page_table_reset_pmd(mm, pud, addr, next, init_skey);
 -      } while (pud++, addr = next, addr != end);
 -
 -      return addr;
 -}
 -
 -void page_table_reset_pgste(struct mm_struct *mm, unsigned long start,
 -                          unsigned long end, bool init_skey)
 -{
 -      unsigned long addr, next;
 -      pgd_t *pgd;
 -
 -      down_write(&mm->mmap_sem);
 -      if (init_skey && mm_use_skey(mm))
 -              goto out_up;
 -      addr = start;
 -      pgd = pgd_offset(mm, addr);
 -      do {
 -              next = pgd_addr_end(addr, end);
 -              if (pgd_none_or_clear_bad(pgd))
 -                      continue;
 -              next = page_table_reset_pud(mm, pgd, addr, next, init_skey);
 -      } while (pgd++, addr = next, addr != end);
 -      if (init_skey)
 -              current->mm->context.use_skey = 1;
 -out_up:
 -      up_write(&mm->mmap_sem);
 -}
 -EXPORT_SYMBOL(page_table_reset_pgste);
 -
  int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
                          unsigned long key, bool nq)
  {
  
        down_read(&mm->mmap_sem);
  retry:
-       ptep = get_locked_pte(current->mm, addr, &ptl);
+       ptep = get_locked_pte(mm, addr, &ptl);
        if (unlikely(!ptep)) {
                up_read(&mm->mmap_sem);
                return -EFAULT;
  }
  EXPORT_SYMBOL(set_guest_storage_key);
  
+ unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
+ {
+       spinlock_t *ptl;
+       pgste_t pgste;
+       pte_t *ptep;
+       uint64_t physaddr;
+       unsigned long key = 0;
+       down_read(&mm->mmap_sem);
+       ptep = get_locked_pte(mm, addr, &ptl);
+       if (unlikely(!ptep)) {
+               up_read(&mm->mmap_sem);
+               return -EFAULT;
+       }
+       pgste = pgste_get_lock(ptep);
+       if (pte_val(*ptep) & _PAGE_INVALID) {
+               key |= (pgste_val(pgste) & PGSTE_ACC_BITS) >> 56;
+               key |= (pgste_val(pgste) & PGSTE_FP_BIT) >> 56;
+               key |= (pgste_val(pgste) & PGSTE_GR_BIT) >> 48;
+               key |= (pgste_val(pgste) & PGSTE_GC_BIT) >> 48;
+       } else {
+               physaddr = pte_val(*ptep) & PAGE_MASK;
+               key = page_get_storage_key(physaddr);
+               /* Reflect guest's logical view, not physical */
+               if (pgste_val(pgste) & PGSTE_GR_BIT)
+                       key |= _PAGE_REFERENCED;
+               if (pgste_val(pgste) & PGSTE_GC_BIT)
+                       key |= _PAGE_CHANGED;
+       }
+       pgste_set_unlock(ptep, pgste);
+       pte_unmap_unlock(ptep, ptl);
+       up_read(&mm->mmap_sem);
+       return key;
+ }
+ EXPORT_SYMBOL(get_guest_storage_key);
  #else /* CONFIG_PGSTE */
  
  static inline int page_table_with_pgste(struct page *page)
@@@ -900,6 -1031,11 +939,6 @@@ static inline unsigned long *page_table
        return NULL;
  }
  
 -void page_table_reset_pgste(struct mm_struct *mm, unsigned long start,
 -                          unsigned long end, bool init_skey)
 -{
 -}
 -
  static inline void page_table_free_pgste(unsigned long *table)
  {
  }
@@@ -1250,88 -1386,12 +1289,88 @@@ EXPORT_SYMBOL_GPL(s390_enable_sie)
   * Enable storage key handling from now on and initialize the storage
   * keys with the default key.
   */
 -void s390_enable_skey(void)
 +static int __s390_enable_skey(pte_t *pte, unsigned long addr,
 +                            unsigned long next, struct mm_walk *walk)
  {
 -      page_table_reset_pgste(current->mm, 0, TASK_SIZE, true);
 +      unsigned long ptev;
 +      pgste_t pgste;
 +
 +      pgste = pgste_get_lock(pte);
 +      /*
 +       * Remove all zero page mappings,
 +       * after establishing a policy to forbid zero page mappings
 +       * following faults for that page will get fresh anonymous pages
 +       */
 +      if (is_zero_pfn(pte_pfn(*pte))) {
 +              ptep_flush_direct(walk->mm, addr, pte);
 +              pte_val(*pte) = _PAGE_INVALID;
 +      }
 +      /* Clear storage key */
 +      pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
 +                            PGSTE_GR_BIT | PGSTE_GC_BIT);
 +      ptev = pte_val(*pte);
 +      if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
 +              page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
 +      pgste_set_unlock(pte, pgste);
 +      return 0;
 +}
 +
 +int s390_enable_skey(void)
 +{
 +      struct mm_walk walk = { .pte_entry = __s390_enable_skey };
 +      struct mm_struct *mm = current->mm;
 +      struct vm_area_struct *vma;
 +      int rc = 0;
 +
 +      down_write(&mm->mmap_sem);
 +      if (mm_use_skey(mm))
 +              goto out_up;
 +
 +      mm->context.use_skey = 1;
 +      for (vma = mm->mmap; vma; vma = vma->vm_next) {
 +              if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
 +                              MADV_UNMERGEABLE, &vma->vm_flags)) {
 +                      mm->context.use_skey = 0;
 +                      rc = -ENOMEM;
 +                      goto out_up;
 +              }
 +      }
 +      mm->def_flags &= ~VM_MERGEABLE;
 +
 +      walk.mm = mm;
 +      walk_page_range(0, TASK_SIZE, &walk);
 +
 +out_up:
 +      up_write(&mm->mmap_sem);
 +      return rc;
  }
  EXPORT_SYMBOL_GPL(s390_enable_skey);
  
 +/*
 + * Reset CMMA state, make all pages stable again.
 + */
 +static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
 +                           unsigned long next, struct mm_walk *walk)
 +{
 +      pgste_t pgste;
 +
 +      pgste = pgste_get_lock(pte);
 +      pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
 +      pgste_set_unlock(pte, pgste);
 +      return 0;
 +}
 +
 +void s390_reset_cmma(struct mm_struct *mm)
 +{
 +      struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
 +
 +      down_write(&mm->mmap_sem);
 +      walk.mm = mm;
 +      walk_page_range(0, TASK_SIZE, &walk);
 +      up_write(&mm->mmap_sem);
 +}
 +EXPORT_SYMBOL_GPL(s390_reset_cmma);
 +
  /*
   * Test and reset if a guest page is dirty
   */
diff --combined arch/x86/kvm/emulate.c
  #define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
  #define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
  #define Escape      (5<<15)     /* Escape to coprocessor instruction */
+ #define InstrDual   (6<<15)     /* Alternate instruction decoding of mod == 3 */
  #define Sse         (1<<18)     /* SSE Vector instruction */
  /* Generic ModRM decode. */
  #define ModRM       (1<<19)
  #define CheckPerm   ((u64)1 << 49)  /* Has valid check_perm field */
  #define NoBigReal   ((u64)1 << 50)  /* No big real mode */
  #define PrivUD      ((u64)1 << 51)  /* #UD instead of #GP on CPL > 0 */
+ #define NearBranch  ((u64)1 << 52)  /* Near branches */
+ #define No16      ((u64)1 << 53)  /* No 16 bit operand */
  
  #define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
  
@@@ -209,6 -212,7 +212,7 @@@ struct opcode 
                const struct group_dual *gdual;
                const struct gprefix *gprefix;
                const struct escape *esc;
+               const struct instr_dual *idual;
                void (*fastop)(struct fastop *fake);
        } u;
        int (*check_perm)(struct x86_emulate_ctxt *ctxt);
@@@ -231,6 -235,11 +235,11 @@@ struct escape 
        struct opcode high[64];
  };
  
+ struct instr_dual {
+       struct opcode mod012;
+       struct opcode mod3;
+ };
  /* EFLAGS bit definitions. */
  #define EFLG_ID (1<<21)
  #define EFLG_VIP (1<<20)
@@@ -379,6 -388,15 +388,15 @@@ static int fastop(struct x86_emulate_ct
        ON64(FOP2E(op##q, rax, cl)) \
        FOP_END
  
+ /* 2 operand, src and dest are reversed */
+ #define FASTOP2R(op, name) \
+       FOP_START(name) \
+       FOP2E(op##b, dl, al) \
+       FOP2E(op##w, dx, ax) \
+       FOP2E(op##l, edx, eax) \
+       ON64(FOP2E(op##q, rdx, rax)) \
+       FOP_END
  #define FOP3E(op,  dst, src, src2) \
        FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
  
@@@ -477,9 -495,9 +495,9 @@@ address_mask(struct x86_emulate_ctxt *c
  }
  
  static inline unsigned long
- register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
+ register_address(struct x86_emulate_ctxt *ctxt, int reg)
  {
-       return address_mask(ctxt, reg);
+       return address_mask(ctxt, reg_read(ctxt, reg));
  }
  
  static void masked_increment(ulong *reg, ulong mask, int inc)
  }
  
  static inline void
- register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
+ register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
  {
        ulong mask;
  
                mask = ~0UL;
        else
                mask = ad_mask(ctxt);
-       masked_increment(reg, mask, inc);
+       masked_increment(reg_rmw(ctxt, reg), mask, inc);
  }
  
  static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
@@@ -564,40 -582,6 +582,6 @@@ static int emulate_nm(struct x86_emulat
        return emulate_exception(ctxt, NM_VECTOR, 0, false);
  }
  
- static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
-                              int cs_l)
- {
-       switch (ctxt->op_bytes) {
-       case 2:
-               ctxt->_eip = (u16)dst;
-               break;
-       case 4:
-               ctxt->_eip = (u32)dst;
-               break;
- #ifdef CONFIG_X86_64
-       case 8:
-               if ((cs_l && is_noncanonical_address(dst)) ||
-                   (!cs_l && (dst >> 32) != 0))
-                       return emulate_gp(ctxt, 0);
-               ctxt->_eip = dst;
-               break;
- #endif
-       default:
-               WARN(1, "unsupported eip assignment size\n");
-       }
-       return X86EMUL_CONTINUE;
- }
- static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
- {
-       return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
- }
- static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
- {
-       return assign_eip_near(ctxt, ctxt->_eip + rel);
- }
  static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
  {
        u16 selector;
@@@ -641,25 -625,24 +625,24 @@@ static bool insn_aligned(struct x86_emu
                return true;
  }
  
- static int __linearize(struct x86_emulate_ctxt *ctxt,
-                    struct segmented_address addr,
-                    unsigned *max_size, unsigned size,
-                    bool write, bool fetch,
-                    ulong *linear)
+ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
+                                      struct segmented_address addr,
+                                      unsigned *max_size, unsigned size,
+                                      bool write, bool fetch,
+                                      enum x86emul_mode mode, ulong *linear)
  {
        struct desc_struct desc;
        bool usable;
        ulong la;
        u32 lim;
        u16 sel;
-       unsigned cpl;
  
        la = seg_base(ctxt, addr.seg) + addr.ea;
        *max_size = 0;
-       switch (ctxt->mode) {
+       switch (mode) {
        case X86EMUL_MODE_PROT64:
-               if (((signed long)la << 16) >> 16 != la)
-                       return emulate_gp(ctxt, 0);
+               if (is_noncanonical_address(la))
+                       goto bad;
  
                *max_size = min_t(u64, ~0u, (1ull << 48) - la);
                if (size > *max_size)
                if (!fetch && (desc.type & 8) && !(desc.type & 2))
                        goto bad;
                lim = desc_limit_scaled(&desc);
-               if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
-                   (ctxt->d & NoBigReal)) {
-                       /* la is between zero and 0xffff */
-                       if (la > 0xffff)
-                               goto bad;
-                       *max_size = 0x10000 - la;
-               } else if ((desc.type & 8) || !(desc.type & 4)) {
-                       /* expand-up segment */
-                       if (addr.ea > lim)
-                               goto bad;
-                       *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
-               } else {
+               if (!(desc.type & 8) && (desc.type & 4)) {
                        /* expand-down segment */
                        if (addr.ea <= lim)
                                goto bad;
                        lim = desc.d ? 0xffffffff : 0xffff;
-                       if (addr.ea > lim)
-                               goto bad;
-                       *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
                }
+               if (addr.ea > lim)
+                       goto bad;
+               *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
                if (size > *max_size)
                        goto bad;
-               cpl = ctxt->ops->cpl(ctxt);
-               if (!(desc.type & 8)) {
-                       /* data segment */
-                       if (cpl > desc.dpl)
-                               goto bad;
-               } else if ((desc.type & 8) && !(desc.type & 4)) {
-                       /* nonconforming code segment */
-                       if (cpl != desc.dpl)
-                               goto bad;
-               } else if ((desc.type & 8) && (desc.type & 4)) {
-                       /* conforming code segment */
-                       if (cpl < desc.dpl)
-                               goto bad;
-               }
+               la &= (u32)-1;
                break;
        }
-       if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
-               la &= (u32)-1;
        if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
                return emulate_gp(ctxt, 0);
        *linear = la;
@@@ -735,9 -692,55 +692,55 @@@ static int linearize(struct x86_emulate
                     ulong *linear)
  {
        unsigned max_size;
-       return __linearize(ctxt, addr, &max_size, size, write, false, linear);
+       return __linearize(ctxt, addr, &max_size, size, write, false,
+                          ctxt->mode, linear);
+ }
+ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
+                            enum x86emul_mode mode)
+ {
+       ulong linear;
+       int rc;
+       unsigned max_size;
+       struct segmented_address addr = { .seg = VCPU_SREG_CS,
+                                          .ea = dst };
+       if (ctxt->op_bytes != sizeof(unsigned long))
+               addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
+       rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
+       if (rc == X86EMUL_CONTINUE)
+               ctxt->_eip = addr.ea;
+       return rc;
+ }
+ static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
+ {
+       return assign_eip(ctxt, dst, ctxt->mode);
  }
  
+ static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
+                         const struct desc_struct *cs_desc)
+ {
+       enum x86emul_mode mode = ctxt->mode;
+ #ifdef CONFIG_X86_64
+       if (ctxt->mode >= X86EMUL_MODE_PROT32 && cs_desc->l) {
+               u64 efer = 0;
+               ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
+               if (efer & EFER_LMA)
+                       mode = X86EMUL_MODE_PROT64;
+       }
+ #endif
+       if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
+               mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
+       return assign_eip(ctxt, dst, mode);
+ }
+ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
+ {
+       return assign_eip_near(ctxt, ctxt->_eip + rel);
+ }
  
  static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
                              struct segmented_address addr,
@@@ -776,7 -779,8 +779,8 @@@ static int __do_insn_fetch_bytes(struc
         * boundary check itself.  Instead, we use max_size to check
         * against op_size.
         */
-       rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
+       rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
+                        &linear);
        if (unlikely(rc != X86EMUL_CONTINUE))
                return rc;
  
@@@ -911,6 -915,8 +915,8 @@@ FASTOP2W(btc)
  
  FASTOP2(xadd);
  
+ FASTOP2R(cmp, cmp_r);
  static u8 test_cc(unsigned int condition, unsigned long flags)
  {
        u8 rc;
@@@ -1221,6 -1227,7 +1227,7 @@@ static int decode_modrm(struct x86_emul
                        if (index_reg != 4)
                                modrm_ea += reg_read(ctxt, index_reg) << scale;
                } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
+                       modrm_ea += insn_fetch(s32, ctxt);
                        if (ctxt->mode == X86EMUL_MODE_PROT64)
                                ctxt->rip_relative = 1;
                } else {
                        adjust_modrm_seg(ctxt, base_reg);
                }
                switch (ctxt->modrm_mod) {
-               case 0:
-                       if (ctxt->modrm_rm == 5)
-                               modrm_ea += insn_fetch(s32, ctxt);
-                       break;
                case 1:
                        modrm_ea += insn_fetch(s8, ctxt);
                        break;
@@@ -1284,7 -1287,8 +1287,8 @@@ static void fetch_bit_operand(struct x8
                else
                        sv = (s64)ctxt->src.val & (s64)mask;
  
-               ctxt->dst.addr.mem.ea += (sv >> 3);
+               ctxt->dst.addr.mem.ea = address_mask(ctxt,
+                                          ctxt->dst.addr.mem.ea + (sv >> 3));
        }
  
        /* only subword offset */
@@@ -1610,6 -1614,9 +1614,9 @@@ static int __load_segment_descriptor(st
                                sizeof(base3), &ctxt->exception);
                if (ret != X86EMUL_CONTINUE)
                        return ret;
+               if (is_noncanonical_address(get_desc_base(&seg_desc) |
+                                            ((u64)base3 << 32)))
+                       return emulate_gp(ctxt, 0);
        }
  load:
        ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
@@@ -1807,6 -1814,10 +1814,10 @@@ static int em_push_sreg(struct x86_emul
        int seg = ctxt->src2.val;
  
        ctxt->src.val = get_segment_selector(ctxt, seg);
+       if (ctxt->op_bytes == 4) {
+               rsp_increment(ctxt, -2);
+               ctxt->op_bytes = 2;
+       }
  
        return em_push(ctxt);
  }
@@@ -1850,7 -1861,7 +1861,7 @@@ static int em_pusha(struct x86_emulate_
  
  static int em_pushf(struct x86_emulate_ctxt *ctxt)
  {
-       ctxt->src.val =  (unsigned long)ctxt->eflags;
+       ctxt->src.val = (unsigned long)ctxt->eflags & ~EFLG_VM;
        return em_push(ctxt);
  }
  
@@@ -2035,7 -2046,7 +2046,7 @@@ static int em_jmp_far(struct x86_emulat
        if (rc != X86EMUL_CONTINUE)
                return rc;
  
-       rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
+       rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
        if (rc != X86EMUL_CONTINUE) {
                WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
                /* assigning eip failed; restore the old cs */
        return rc;
  }
  
- static int em_grp45(struct x86_emulate_ctxt *ctxt)
+ static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
  {
-       int rc = X86EMUL_CONTINUE;
+       return assign_eip_near(ctxt, ctxt->src.val);
+ }
  
-       switch (ctxt->modrm_reg) {
-       case 2: /* call near abs */ {
-               long int old_eip;
-               old_eip = ctxt->_eip;
-               rc = assign_eip_near(ctxt, ctxt->src.val);
-               if (rc != X86EMUL_CONTINUE)
-                       break;
-               ctxt->src.val = old_eip;
-               rc = em_push(ctxt);
-               break;
-       }
-       case 4: /* jmp abs */
-               rc = assign_eip_near(ctxt, ctxt->src.val);
-               break;
-       case 5: /* jmp far */
-               rc = em_jmp_far(ctxt);
-               break;
-       case 6: /* push */
-               rc = em_push(ctxt);
-               break;
-       }
+ static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
+ {
+       int rc;
+       long int old_eip;
+       old_eip = ctxt->_eip;
+       rc = assign_eip_near(ctxt, ctxt->src.val);
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+       ctxt->src.val = old_eip;
+       rc = em_push(ctxt);
        return rc;
  }
  
@@@ -2128,11 -2130,11 +2130,11 @@@ static int em_ret_far(struct x86_emulat
        /* Outer-privilege level return is not implemented */
        if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
                return X86EMUL_UNHANDLEABLE;
-       rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
+       rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, false,
                                       &new_desc);
        if (rc != X86EMUL_CONTINUE)
                return rc;
-       rc = assign_eip_far(ctxt, eip, new_desc.l);
+       rc = assign_eip_far(ctxt, eip, &new_desc);
        if (rc != X86EMUL_CONTINUE) {
                WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
                ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
@@@ -2316,6 -2318,7 +2318,7 @@@ static int em_syscall(struct x86_emulat
  
                ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
                ctxt->eflags &= ~msr_data;
+               ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
  #endif
        } else {
                /* legacy mode */
@@@ -2349,11 -2352,9 +2352,9 @@@ static int em_sysenter(struct x86_emula
            && !vendor_intel(ctxt))
                return emulate_ud(ctxt);
  
-       /* XXX sysenter/sysexit have not been tested in 64bit mode.
-       * Therefore, we inject an #UD.
-       */
+       /* sysenter/sysexit have not been tested in 64bit mode. */
        if (ctxt->mode == X86EMUL_MODE_PROT64)
-               return emulate_ud(ctxt);
+               return X86EMUL_UNHANDLEABLE;
  
        setup_syscalls_segments(ctxt, &cs, &ss);
  
@@@ -2425,6 -2426,8 +2426,8 @@@ static int em_sysexit(struct x86_emulat
                if ((msr_data & 0xfffc) == 0x0)
                        return emulate_gp(ctxt, 0);
                ss_sel = (u16)(msr_data + 24);
+               rcx = (u32)rcx;
+               rdx = (u32)rdx;
                break;
        case X86EMUL_MODE_PROT64:
                cs_sel = (u16)(msr_data + 32);
@@@ -2599,7 -2602,6 +2602,6 @@@ static int task_switch_16(struct x86_em
        ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
                            &ctxt->exception);
        if (ret != X86EMUL_CONTINUE)
-               /* FIXME: need to provide precise fault address */
                return ret;
  
        save_state_to_tss16(ctxt, &tss_seg);
        ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
                             &ctxt->exception);
        if (ret != X86EMUL_CONTINUE)
-               /* FIXME: need to provide precise fault address */
                return ret;
  
        ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
                            &ctxt->exception);
        if (ret != X86EMUL_CONTINUE)
-               /* FIXME: need to provide precise fault address */
                return ret;
  
        if (old_tss_sel != 0xffff) {
                                     sizeof tss_seg.prev_task_link,
                                     &ctxt->exception);
                if (ret != X86EMUL_CONTINUE)
-                       /* FIXME: need to provide precise fault address */
                        return ret;
        }
  
@@@ -2813,7 -2812,8 +2812,8 @@@ static int emulator_do_task_switch(stru
         *
         * 1. jmp/call/int to task gate: Check against DPL of the task gate
         * 2. Exception/IRQ/iret: No check is performed
-        * 3. jmp/call to TSS: Check against DPL of the TSS
+        * 3. jmp/call to TSS/task-gate: No check is performed since the
+        *    hardware checks it before exiting.
         */
        if (reason == TASK_SWITCH_GATE) {
                if (idt_index != -1) {
                        if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
                                return emulate_gp(ctxt, (idt_index << 3) | 0x2);
                }
-       } else if (reason != TASK_SWITCH_IRET) {
-               int dpl = next_tss_desc.dpl;
-               if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
-                       return emulate_gp(ctxt, tss_selector);
        }
  
        desc_limit = desc_limit_scaled(&next_tss_desc);
        if (!next_tss_desc.p ||
            ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
@@@ -2913,8 -2908,8 +2908,8 @@@ static void string_addr_inc(struct x86_
  {
        int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
  
-       register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
-       op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
+       register_address_increment(ctxt, reg, df * op->bytes);
+       op->addr.mem.ea = register_address(ctxt, reg);
  }
  
  static int em_das(struct x86_emulate_ctxt *ctxt)
@@@ -3025,7 -3020,7 +3020,7 @@@ static int em_call_far(struct x86_emula
        if (rc != X86EMUL_CONTINUE)
                return X86EMUL_CONTINUE;
  
-       rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
+       rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
        if (rc != X86EMUL_CONTINUE)
                goto fail;
  
@@@ -3215,6 -3210,8 +3210,8 @@@ static int em_mov_rm_sreg(struct x86_em
                return emulate_ud(ctxt);
  
        ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
+       if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
+               ctxt->dst.bytes = 2;
        return X86EMUL_CONTINUE;
  }
  
@@@ -3317,7 -3314,7 +3314,7 @@@ static int em_sidt(struct x86_emulate_c
        return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
  }
  
- static int em_lgdt(struct x86_emulate_ctxt *ctxt)
+ static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
  {
        struct desc_ptr desc_ptr;
        int rc;
                             ctxt->op_bytes);
        if (rc != X86EMUL_CONTINUE)
                return rc;
-       ctxt->ops->set_gdt(ctxt, &desc_ptr);
+       if (ctxt->mode == X86EMUL_MODE_PROT64 &&
+           is_noncanonical_address(desc_ptr.address))
+               return emulate_gp(ctxt, 0);
+       if (lgdt)
+               ctxt->ops->set_gdt(ctxt, &desc_ptr);
+       else
+               ctxt->ops->set_idt(ctxt, &desc_ptr);
        /* Disable writeback. */
        ctxt->dst.type = OP_NONE;
        return X86EMUL_CONTINUE;
  }
  
+ static int em_lgdt(struct x86_emulate_ctxt *ctxt)
+ {
+       return em_lgdt_lidt(ctxt, true);
+ }
  static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
  {
        int rc;
  
  static int em_lidt(struct x86_emulate_ctxt *ctxt)
  {
-       struct desc_ptr desc_ptr;
-       int rc;
-       if (ctxt->mode == X86EMUL_MODE_PROT64)
-               ctxt->op_bytes = 8;
-       rc = read_descriptor(ctxt, ctxt->src.addr.mem,
-                            &desc_ptr.size, &desc_ptr.address,
-                            ctxt->op_bytes);
-       if (rc != X86EMUL_CONTINUE)
-               return rc;
-       ctxt->ops->set_idt(ctxt, &desc_ptr);
-       /* Disable writeback. */
-       ctxt->dst.type = OP_NONE;
-       return X86EMUL_CONTINUE;
+       return em_lgdt_lidt(ctxt, false);
  }
  
  static int em_smsw(struct x86_emulate_ctxt *ctxt)
@@@ -3384,7 -3379,7 +3379,7 @@@ static int em_loop(struct x86_emulate_c
  {
        int rc = X86EMUL_CONTINUE;
  
-       register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
+       register_address_increment(ctxt, VCPU_REGS_RCX, -1);
        if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
            (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
                rc = jmp_rel(ctxt, ctxt->src.val);
@@@ -3554,7 -3549,7 +3549,7 @@@ static int check_cr_write(struct x86_em
  
                ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
                if (efer & EFER_LMA)
-                       rsvd = CR3_L_MODE_RESERVED_BITS;
+                       rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
  
                if (new_val & rsvd)
                        return emulate_gp(ctxt, 0);
@@@ -3596,8 -3591,15 +3591,15 @@@ static int check_dr_read(struct x86_emu
        if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
                return emulate_ud(ctxt);
  
-       if (check_dr7_gd(ctxt))
+       if (check_dr7_gd(ctxt)) {
+               ulong dr6;
+               ctxt->ops->get_dr(ctxt, 6, &dr6);
+               dr6 &= ~15;
+               dr6 |= DR6_BD | DR6_RTM;
+               ctxt->ops->set_dr(ctxt, 6, dr6);
                return emulate_db(ctxt);
+       }
  
        return X86EMUL_CONTINUE;
  }
@@@ -3684,6 -3686,7 +3686,7 @@@ static int check_perm_out(struct x86_em
  #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
  #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
  #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
+ #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
  #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
  #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
  #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
@@@ -3780,11 -3783,11 +3783,11 @@@ static const struct opcode group4[] = 
  static const struct opcode group5[] = {
        F(DstMem | SrcNone | Lock,              em_inc),
        F(DstMem | SrcNone | Lock,              em_dec),
-       I(SrcMem | Stack,                       em_grp45),
+       I(SrcMem | NearBranch,                  em_call_near_abs),
        I(SrcMemFAddr | ImplicitOps | Stack,    em_call_far),
-       I(SrcMem | Stack,                       em_grp45),
-       I(SrcMemFAddr | ImplicitOps,            em_grp45),
-       I(SrcMem | Stack,                       em_grp45), D(Undefined),
+       I(SrcMem | NearBranch,                  em_jmp_abs),
+       I(SrcMemFAddr | ImplicitOps,            em_jmp_far),
+       I(SrcMem | Stack,                       em_push), D(Undefined),
  };
  
  static const struct opcode group6[] = {
@@@ -3845,8 -3848,12 +3848,12 @@@ static const struct gprefix pfx_0f_6f_0
        I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
  };
  
+ static const struct instr_dual instr_dual_0f_2b = {
+       I(0, em_mov), N
+ };
  static const struct gprefix pfx_0f_2b = {
-       I(0, em_mov), I(0, em_mov), N, N,
+       ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
  };
  
  static const struct gprefix pfx_0f_28_0f_29 = {
@@@ -3920,6 -3927,10 +3927,10 @@@ static const struct escape escape_dd = 
        N, N, N, N, N, N, N, N,
  } };
  
+ static const struct instr_dual instr_dual_0f_c3 = {
+       I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
+ };
  static const struct opcode opcode_table[256] = {
        /* 0x00 - 0x07 */
        F6ALU(Lock, em_add),
        I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
        I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
        /* 0x70 - 0x7F */
-       X16(D(SrcImmByte)),
+       X16(D(SrcImmByte | NearBranch)),
        /* 0x80 - 0x87 */
        G(ByteOp | DstMem | SrcImm, group1),
        G(DstMem | SrcImm, group1),
        I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
        I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
        I2bv(SrcSI | DstDI | Mov | String, em_mov),
-       F2bv(SrcSI | DstDI | String | NoWrite, em_cmp),
+       F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
        /* 0xA8 - 0xAF */
        F2bv(DstAcc | SrcImm | NoWrite, em_test),
        I2bv(SrcAcc | DstDI | Mov | String, em_mov),
        I2bv(SrcSI | DstAcc | Mov | String, em_mov),
-       F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp),
+       F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
        /* 0xB0 - 0xB7 */
        X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
        /* 0xB8 - 0xBF */
        X8(I(DstReg | SrcImm64 | Mov, em_mov)),
        /* 0xC0 - 0xC7 */
        G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
-       I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
-       I(ImplicitOps | Stack, em_ret),
+       I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
+       I(ImplicitOps | NearBranch, em_ret),
        I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
        I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
        G(ByteOp, group11), G(0, group11),
        /* 0xD8 - 0xDF */
        N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
        /* 0xE0 - 0xE7 */
-       X3(I(SrcImmByte, em_loop)),
-       I(SrcImmByte, em_jcxz),
+       X3(I(SrcImmByte | NearBranch, em_loop)),
+       I(SrcImmByte | NearBranch, em_jcxz),
        I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
        I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
        /* 0xE8 - 0xEF */
-       I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
-       I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
+       I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
+       I(SrcImmFAddr | No64, em_jmp_far),
+       D(SrcImmByte | ImplicitOps | NearBranch),
        I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
        I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
        /* 0xF0 - 0xF7 */
@@@ -4090,7 -4102,7 +4102,7 @@@ static const struct opcode twobyte_tabl
        N, N, N, N,
        N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
        /* 0x80 - 0x8F */
-       X16(D(SrcImm)),
+       X16(D(SrcImm | NearBranch)),
        /* 0x90 - 0x9F */
        X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
        /* 0xA0 - 0xA7 */
        D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
        /* 0xC0 - 0xC7 */
        F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
-       N, D(DstMem | SrcReg | ModRM | Mov),
+       N, ID(0, &instr_dual_0f_c3),
        N, N, N, GD(0, &group9),
        /* 0xC8 - 0xCF */
        X8(I(DstReg, em_bswap)),
        N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
  };
  
+ static const struct instr_dual instr_dual_0f_38_f0 = {
+       I(DstReg | SrcMem | Mov, em_movbe), N
+ };
+ static const struct instr_dual instr_dual_0f_38_f1 = {
+       I(DstMem | SrcReg | Mov, em_movbe), N
+ };
  static const struct gprefix three_byte_0f_38_f0 = {
-       I(DstReg | SrcMem | Mov, em_movbe), N, N, N
+       ID(0, &instr_dual_0f_38_f0), N, N, N
  };
  
  static const struct gprefix three_byte_0f_38_f1 = {
-       I(DstMem | SrcReg | Mov, em_movbe), N, N, N
+       ID(0, &instr_dual_0f_38_f1), N, N, N
  };
  
  /*
@@@ -4152,8 -4172,8 +4172,8 @@@ static const struct opcode opcode_map_0
        /* 0x80 - 0xef */
        X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
        /* 0xf0 - 0xf1 */
-       GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
-       GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
+       GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
+       GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
        /* 0xf2 - 0xff */
        N, N, X4(N), X8(N)
  };
@@@ -4275,7 -4295,7 +4295,7 @@@ static int decode_operand(struct x86_em
                op->type = OP_MEM;
                op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
                op->addr.mem.ea =
-                       register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
+                       register_address(ctxt, VCPU_REGS_RDI);
                op->addr.mem.seg = VCPU_SREG_ES;
                op->val = 0;
                op->count = 1;
                fetch_register_operand(op);
                break;
        case OpCL:
 +              op->type = OP_IMM;
                op->bytes = 1;
                op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
                break;
                rc = decode_imm(ctxt, op, 1, true);
                break;
        case OpOne:
 +              op->type = OP_IMM;
                op->bytes = 1;
                op->val = 1;
                break;
                op->type = OP_MEM;
                op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
                op->addr.mem.ea =
-                       register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
+                       register_address(ctxt, VCPU_REGS_RSI);
                op->addr.mem.seg = ctxt->seg_override;
                op->val = 0;
                op->count = 1;
                op->type = OP_MEM;
                op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
                op->addr.mem.ea =
-                       register_address(ctxt,
+                       address_mask(ctxt,
                                reg_read(ctxt, VCPU_REGS_RBX) +
                                (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
                op->addr.mem.seg = ctxt->seg_override;
                ctxt->memop.bytes = ctxt->op_bytes + 2;
                goto mem_common;
        case OpES:
 +              op->type = OP_IMM;
                op->val = VCPU_SREG_ES;
                break;
        case OpCS:
 +              op->type = OP_IMM;
                op->val = VCPU_SREG_CS;
                break;
        case OpSS:
 +              op->type = OP_IMM;
                op->val = VCPU_SREG_SS;
                break;
        case OpDS:
 +              op->type = OP_IMM;
                op->val = VCPU_SREG_DS;
                break;
        case OpFS:
 +              op->type = OP_IMM;
                op->val = VCPU_SREG_FS;
                break;
        case OpGS:
 +              op->type = OP_IMM;
                op->val = VCPU_SREG_GS;
                break;
        case OpImplicit:
@@@ -4510,8 -4522,7 +4530,7 @@@ done_prefixes
  
        /* vex-prefix instructions are not implemented */
        if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
-           (mode == X86EMUL_MODE_PROT64 ||
-           (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
+           (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
                ctxt->d = NotImpl;
        }
  
                        else
                                opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
                        break;
+               case InstrDual:
+                       if ((ctxt->modrm >> 6) == 3)
+                               opcode = opcode.u.idual->mod3;
+                       else
+                               opcode = opcode.u.idual->mod012;
+                       break;
                default:
                        return EMULATION_FAILED;
                }
                return EMULATION_FAILED;
  
        if (unlikely(ctxt->d &
-                    (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
+           (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
+            No16))) {
                /*
                 * These are copied unconditionally here, and checked unconditionally
                 * in x86_emulate_insn.
                if (ctxt->d & NotImpl)
                        return EMULATION_FAILED;
  
-               if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
-                       ctxt->op_bytes = 8;
+               if (mode == X86EMUL_MODE_PROT64) {
+                       if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
+                               ctxt->op_bytes = 8;
+                       else if (ctxt->d & NearBranch)
+                               ctxt->op_bytes = 8;
+               }
  
                if (ctxt->d & Op3264) {
                        if (mode == X86EMUL_MODE_PROT64)
                                ctxt->op_bytes = 4;
                }
  
+               if ((ctxt->d & No16) && ctxt->op_bytes == 2)
+                       ctxt->op_bytes = 4;
                if (ctxt->d & Sse)
                        ctxt->op_bytes = 16;
                else if (ctxt->d & Mmx)
        rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
  
        if (ctxt->rip_relative)
-               ctxt->memopp->addr.mem.ea += ctxt->_eip;
+               ctxt->memopp->addr.mem.ea = address_mask(ctxt,
+                                       ctxt->memopp->addr.mem.ea + ctxt->_eip);
  
  done:
        return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
@@@ -4775,6 -4801,12 +4809,12 @@@ int x86_emulate_insn(struct x86_emulate
                                goto done;
                }
  
+               /* Instruction can only be executed in protected mode */
+               if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
+                       rc = emulate_ud(ctxt);
+                       goto done;
+               }
                /* Privileged instruction can be executed only in CPL=0 */
                if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
                        if (ctxt->d & PrivUD)
                        goto done;
                }
  
-               /* Instruction can only be executed in protected mode */
-               if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
-                       rc = emulate_ud(ctxt);
-                       goto done;
-               }
                /* Do instruction specific permission checks */
                if (ctxt->d & CheckPerm) {
                        rc = ctxt->check_perm(ctxt);
@@@ -4974,8 -5000,7 +5008,7 @@@ writeback
                        count = ctxt->src.count;
                else
                        count = ctxt->dst.count;
-               register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
-                               -count);
+               register_address_increment(ctxt, VCPU_REGS_RCX, -count);
  
                if (!string_insn_completed(ctxt)) {
                        /*
@@@ -5053,11 -5078,6 +5086,6 @@@ twobyte_insn
                ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
                                                        (s16) ctxt->src.val;
                break;
-       case 0xc3:              /* movnti */
-               ctxt->dst.bytes = ctxt->op_bytes;
-               ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val :
-                                                       (u32) ctxt->src.val;
-               break;
        default:
                goto cannot_emulate;
        }
diff --combined virt/kvm/arm/vgic.c
@@@ -91,6 -91,7 +91,7 @@@
  #define ACCESS_WRITE_VALUE    (3 << 1)
  #define ACCESS_WRITE_MASK(x)  ((x) & (3 << 1))
  
+ static int vgic_init(struct kvm *kvm);
  static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
  static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
  static void vgic_update_state(struct kvm *kvm);
@@@ -1607,7 -1608,7 +1608,7 @@@ static int vgic_validate_injection(stru
        }
  }
  
- static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid,
+ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
                                  unsigned int irq_num, bool level)
  {
        struct vgic_dist *dist = &kvm->arch.vgic;
                        vgic_dist_irq_clear_level(vcpu, irq_num);
                        if (!vgic_dist_irq_soft_pend(vcpu, irq_num))
                                vgic_dist_irq_clear_pending(vcpu, irq_num);
-               } else {
-                       vgic_dist_irq_clear_pending(vcpu, irq_num);
                }
+               ret = false;
+               goto out;
        }
  
        enabled = vgic_irq_is_enabled(vcpu, irq_num);
  out:
        spin_unlock(&dist->lock);
  
-       return ret;
+       return ret ? cpuid : -EINVAL;
  }
  
  /**
  int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
                        bool level)
  {
-       if (likely(vgic_initialized(kvm)) &&
-           vgic_update_irq_pending(kvm, cpuid, irq_num, level))
-               vgic_kick_vcpus(kvm);
+       int ret = 0;
+       int vcpu_id;
  
-       return 0;
+       if (unlikely(!vgic_initialized(kvm))) {
+               mutex_lock(&kvm->lock);
+               ret = vgic_init(kvm);
+               mutex_unlock(&kvm->lock);
+               if (ret)
+                       goto out;
+       }
+       vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level);
+       if (vcpu_id >= 0) {
+               /* kick the specified vcpu */
+               kvm_vcpu_kick(kvm_get_vcpu(kvm, vcpu_id));
+       }
+ out:
+       return ret;
  }
  
  static irqreturn_t vgic_maintenance_handler(int irq, void *data)
@@@ -1726,39 -1743,14 +1743,14 @@@ static int vgic_vcpu_init_maps(struct k
  
        int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
        vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
-       vgic_cpu->vgic_irq_lr_map = kzalloc(nr_irqs, GFP_KERNEL);
+       vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL);
  
        if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) {
                kvm_vgic_vcpu_destroy(vcpu);
                return -ENOMEM;
        }
  
-       return 0;
- }
- /**
-  * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state
-  * @vcpu: pointer to the vcpu struct
-  *
-  * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to
-  * this vcpu and enable the VGIC for this VCPU
-  */
- static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
- {
-       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
-       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-       int i;
-       for (i = 0; i < dist->nr_irqs; i++) {
-               if (i < VGIC_NR_PPIS)
-                       vgic_bitmap_set_irq_val(&dist->irq_enabled,
-                                               vcpu->vcpu_id, i, 1);
-               if (i < VGIC_NR_PRIVATE_IRQS)
-                       vgic_bitmap_set_irq_val(&dist->irq_cfg,
-                                               vcpu->vcpu_id, i, VGIC_CFG_EDGE);
-               vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
-       }
+       memset(vgic_cpu->vgic_irq_lr_map, LR_EMPTY, nr_irqs);
  
        /*
         * Store the number of LRs per vcpu, so we don't have to go
         */
        vgic_cpu->nr_lr = vgic->nr_lr;
  
-       vgic_enable(vcpu);
+       return 0;
  }
  
  void kvm_vgic_destroy(struct kvm *kvm)
        dist->irq_spi_cpu = NULL;
        dist->irq_spi_target = NULL;
        dist->irq_pending_on_cpu = NULL;
+       dist->nr_cpus = 0;
  }
  
  /*
   * Allocate and initialize the various data structures. Must be called
   * with kvm->lock held!
   */
- static int vgic_init_maps(struct kvm *kvm)
+ static int vgic_init(struct kvm *kvm)
  {
        struct vgic_dist *dist = &kvm->arch.vgic;
        struct kvm_vcpu *vcpu;
        int nr_cpus, nr_irqs;
-       int ret, i;
+       int ret, i, vcpu_id;
  
-       if (dist->nr_cpus)      /* Already allocated */
+       if (vgic_initialized(kvm))
                return 0;
  
        nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus);
        if (ret)
                goto out;
  
-       kvm_for_each_vcpu(i, vcpu, kvm) {
+       for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4)
+               vgic_set_target_reg(kvm, 0, i);
+       kvm_for_each_vcpu(vcpu_id, vcpu, kvm) {
                ret = vgic_vcpu_init_maps(vcpu, nr_irqs);
                if (ret) {
                        kvm_err("VGIC: Failed to allocate vcpu memory\n");
                        break;
                }
-       }
  
-       for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4)
-               vgic_set_target_reg(kvm, 0, i);
+               for (i = 0; i < dist->nr_irqs; i++) {
+                       if (i < VGIC_NR_PPIS)
+                               vgic_bitmap_set_irq_val(&dist->irq_enabled,
+                                                       vcpu->vcpu_id, i, 1);
+                       if (i < VGIC_NR_PRIVATE_IRQS)
+                               vgic_bitmap_set_irq_val(&dist->irq_cfg,
+                                                       vcpu->vcpu_id, i,
+                                                       VGIC_CFG_EDGE);
+               }
+               vgic_enable(vcpu);
+       }
  
  out:
        if (ret)
  }
  
  /**
-  * kvm_vgic_init - Initialize global VGIC state before running any VCPUs
+  * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs
   * @kvm: pointer to the kvm struct
   *
   * Map the virtual CPU interface into the VM before running any VCPUs.  We
   * can't do this at creation time, because user space must first set the
-  * virtual CPU interface address in the guest physical address space.  Also
-  * initialize the ITARGETSRn regs to 0 on the emulated distributor.
+  * virtual CPU interface address in the guest physical address space.
   */
- int kvm_vgic_init(struct kvm *kvm)
+ int kvm_vgic_map_resources(struct kvm *kvm)
  {
-       struct kvm_vcpu *vcpu;
-       int ret = 0, i;
+       int ret = 0;
  
        if (!irqchip_in_kernel(kvm))
                return 0;
  
        mutex_lock(&kvm->lock);
  
-       if (vgic_initialized(kvm))
+       if (vgic_ready(kvm))
                goto out;
  
        if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
                goto out;
        }
  
-       ret = vgic_init_maps(kvm);
+       /*
+        * Initialize the vgic if this hasn't already been done on demand by
+        * accessing the vgic state from userspace.
+        */
+       ret = vgic_init(kvm);
        if (ret) {
                kvm_err("Unable to allocate maps\n");
                goto out;
                goto out;
        }
  
-       kvm_for_each_vcpu(i, vcpu, kvm)
-               kvm_vgic_vcpu_init(vcpu);
        kvm->arch.vgic.ready = true;
  out:
        if (ret)
  
  int kvm_vgic_create(struct kvm *kvm)
  {
 -      int i, vcpu_lock_idx = -1, ret = 0;
 +      int i, vcpu_lock_idx = -1, ret;
        struct kvm_vcpu *vcpu;
  
        mutex_lock(&kvm->lock);
         * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
         * that no other VCPUs are run while we create the vgic.
         */
 +      ret = -EBUSY;
        kvm_for_each_vcpu(i, vcpu, kvm) {
                if (!mutex_trylock(&vcpu->mutex))
                        goto out_unlock;
        }
  
        kvm_for_each_vcpu(i, vcpu, kvm) {
 -              if (vcpu->arch.has_run_once) {
 -                      ret = -EBUSY;
 +              if (vcpu->arch.has_run_once)
                        goto out_unlock;
 -              }
        }
 +      ret = 0;
  
        spin_lock_init(&kvm->arch.vgic.lock);
        kvm->arch.vgic.in_kernel = true;
@@@ -2167,7 -2171,7 +2171,7 @@@ static int vgic_attr_regs_access(struc
  
        mutex_lock(&dev->kvm->lock);
  
-       ret = vgic_init_maps(dev->kvm);
+       ret = vgic_init(dev->kvm);
        if (ret)
                goto out;
  
@@@ -2289,7 -2293,7 +2293,7 @@@ static int vgic_set_attr(struct kvm_dev
  
                mutex_lock(&dev->kvm->lock);
  
-               if (vgic_initialized(dev->kvm) || dev->kvm->arch.vgic.nr_irqs)
+               if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_irqs)
                        ret = -EBUSY;
                else
                        dev->kvm->arch.vgic.nr_irqs = val;