Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 2 Jan 2014 20:45:07 +0000 (12:45 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 2 Jan 2014 20:45:07 +0000 (12:45 -0800)
Pull s390 fixes from Martin Schwidefsky:
 "Two small bug fixes and a follow-up to the CONFIG_NR_CPUS change.

  A kernel compiled with CONFIG_NR_CPUS=256 will waste quite a bit of
  memory for the per-cpu arrays.  Under z/VM the maximum number of CPUs
  is 64, the code now limits the possible cpu mask to 64 if running
  under z/VM"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/pci: obtain function handle in hotplug notifier
  s390/3270: fix allocation of tty3270_screen structure
  s390/smp: improve setup of possible cpu mask

395 files changed:
Documentation/block/null_blk.txt [new file with mode: 0644]
Documentation/kernel-parameters.txt
Documentation/module-signing.txt [new file with mode: 0644]
Documentation/networking/ip-sysctl.txt
MAINTAINERS
Makefile
arch/arc/include/uapi/asm/unistd.h
arch/arm/boot/dts/r8a7790.dtsi
arch/arm/mach-omap2/board-ldp.c
arch/arm/mach-omap2/display.c
arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
arch/arm/mach-omap2/omap_hwmod_7xx_data.c
arch/arm/mach-pxa/include/mach/lubbock.h
arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c
arch/arm/mach-shmobile/board-armadillo800eva.c
arch/arm/mach-shmobile/board-bockw.c
arch/arm/mach-shmobile/board-lager.c
arch/arm/xen/enlighten.c
arch/arm64/include/asm/xen/page-coherent.h
arch/arm64/kernel/ptrace.c
arch/powerpc/boot/dts/mpc5125twr.dts
arch/powerpc/include/asm/exception-64s.h
arch/powerpc/include/asm/kvm_book3s.h
arch/powerpc/include/asm/kvm_book3s_asm.h
arch/powerpc/include/asm/opal.h
arch/powerpc/include/asm/switch_to.h
arch/powerpc/include/asm/unaligned.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/crash_dump.c
arch/powerpc/kernel/head_64.S
arch/powerpc/kernel/process.c
arch/powerpc/kernel/ptrace.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/smp.c
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_rm_mmu.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_interrupts.S
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/book3s_rmhandlers.S
arch/powerpc/kvm/booke.c
arch/powerpc/lib/copyuser_64.S
arch/powerpc/platforms/powernv/eeh-ioda.c
arch/powerpc/platforms/powernv/opal-lpc.c
arch/powerpc/platforms/powernv/opal-xscom.c
arch/powerpc/platforms/powernv/pci.h
arch/powerpc/platforms/pseries/lparcfg.c
arch/powerpc/platforms/pseries/msi.c
arch/powerpc/platforms/pseries/nvram.c
arch/powerpc/platforms/pseries/pci.c
arch/sh/lib/Makefile
arch/sparc/include/asm/pgtable_64.h
arch/x86/Kconfig
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/preempt.h
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/perf_event.h
arch/x86/mm/gup.c
block/blk-mq-sysfs.c
drivers/acpi/Kconfig
drivers/acpi/acpi_lpss.c
drivers/acpi/apei/Kconfig
drivers/acpi/apei/erst.c
drivers/ata/ahci.c
drivers/ata/ahci_imx.c
drivers/ata/libata-core.c
drivers/ata/libata-scsi.c
drivers/block/null_blk.c
drivers/block/skd_main.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/btusb.c
drivers/clk/clk-s2mps11.c
drivers/clocksource/Kconfig
drivers/clocksource/clksrc-of.c
drivers/clocksource/dw_apb_timer_of.c
drivers/clocksource/sun4i_timer.c
drivers/clocksource/time-armada-370-xp.c
drivers/cpufreq/cpufreq.c
drivers/crypto/ixp4xx_crypto.c
drivers/dma/Kconfig
drivers/dma/at_hdmac_regs.h
drivers/dma/dmaengine.c
drivers/dma/dmatest.c
drivers/dma/fsldma.c
drivers/dma/mv_xor.c
drivers/dma/pl330.c
drivers/dma/ppc4xx/adma.c
drivers/dma/txx9dmac.c
drivers/firewire/sbp2.c
drivers/firmware/Makefile
drivers/firmware/efi/Kconfig
drivers/firmware/efi/Makefile
drivers/firmware/efi/efi-pstore.c
drivers/gpio/gpio-msm-v2.c
drivers/gpio/gpio-rcar.c
drivers/gpio/gpio-twl4030.c
drivers/gpu/drm/armada/armada_drm.h
drivers/gpu/drm/armada/armada_drv.c
drivers/gpu/drm/armada/armada_fbdev.c
drivers/gpu/drm/armada/armada_gem.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_stub.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/qxl/Kconfig
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/cik_sdma.c
drivers/gpu/drm/radeon/dce6_afmt.c
drivers/gpu/drm/radeon/evergreen_hdmi.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/radeon/rs690.c
drivers/gpu/drm/radeon/rv770_dpm.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
drivers/idle/intel_idle.c
drivers/iio/adc/ad7887.c
drivers/iio/imu/adis16400_core.c
drivers/iio/light/cm36651.c
drivers/infiniband/core/iwcm.c
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/mem.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/input/input.c
drivers/input/touchscreen/zforce_ts.c
drivers/irqchip/irq-renesas-intc-irqpin.c
drivers/md/bcache/alloc.c
drivers/md/bcache/bcache.h
drivers/md/bcache/btree.c
drivers/md/bcache/movinggc.c
drivers/md/bcache/super.c
drivers/md/bcache/sysfs.c
drivers/md/bcache/util.c
drivers/md/bcache/util.h
drivers/md/bcache/writeback.c
drivers/net/bonding/bond_3ad.c
drivers/net/can/usb/ems_usb.c
drivers/net/can/usb/peak_usb/pcan_usb_pro.c
drivers/net/ethernet/arc/emac_main.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
drivers/net/ethernet/chelsio/cxgb4/l2t.c
drivers/net/ethernet/chelsio/cxgb4/l2t.h
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/intel/e1000e/80003es2lan.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/e1000e/phy.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/marvell/mvmdio.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/hamradio/hdlcdrv.c
drivers/net/hamradio/yam.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/macvlan.c
drivers/net/phy/phy.c
drivers/net/usb/Kconfig
drivers/net/usb/dm9601.c
drivers/net/wireless/ath/ath9k/ar9002_mac.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/rtlwifi/pci.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/of/Kconfig
drivers/of/address.c
drivers/of/fdt.c
drivers/of/irq.c
drivers/phy/Kconfig
drivers/phy/phy-core.c
drivers/pinctrl/pinctrl-baytrail.c
drivers/pinctrl/sh-pfc/sh_pfc.h
drivers/powercap/intel_rapl.c
drivers/regulator/s2mps11.c
drivers/scsi/qla2xxx/qla_target.c
drivers/staging/comedi/drivers.c
drivers/staging/comedi/drivers/8255_pci.c
drivers/staging/iio/magnetometer/hmc5843.c
drivers/staging/imx-drm/imx-drm-core.c
drivers/staging/imx-drm/imx-tve.c
drivers/staging/imx-drm/ipu-v3/ipu-common.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_configfs.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/target_core_device.c
drivers/target/target_core_file.c
drivers/target/target_core_file.h
drivers/target/target_core_tpg.c
drivers/tty/n_tty.c
drivers/tty/serial/8250/8250_dw.c
drivers/tty/serial/xilinx_uartps.c
drivers/tty/tty_ldsem.c
drivers/usb/chipidea/core.c
drivers/usb/chipidea/host.c
drivers/usb/chipidea/udc.c
drivers/usb/class/cdc-wdm.c
drivers/usb/dwc3/core.c
drivers/usb/host/ohci-at91.c
drivers/usb/host/xhci-pci.c
drivers/usb/phy/Kconfig
drivers/usb/phy/phy-tegra-usb.c
drivers/usb/phy/phy-twl6030-usb.c
drivers/usb/serial/option.c
drivers/usb/serial/zte_ev.c
drivers/virtio/virtio_balloon.c
drivers/xen/balloon.c
drivers/xen/grant-table.c
drivers/xen/privcmd.c
fs/aio.c
fs/ceph/addr.c
fs/ceph/inode.c
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/dir.c
fs/cifs/inode.c
fs/cifs/link.c
fs/ext2/super.c
fs/ext4/ext4.h
fs/ext4/ext4_jbd2.c
fs/ext4/extents.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/super.c
fs/jbd2/journal.c
fs/jbd2/recovery.c
fs/jbd2/transaction.c
fs/pstore/platform.c
fs/sysfs/file.c
fs/xfs/xfs_bmap.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf.h
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_dir2_node.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_qm.c
fs/xfs/xfs_trans_buf.c
include/asm-generic/pgtable.h
include/asm-generic/preempt.h
include/drm/drm_pciids.h
include/linux/auxvec.h
include/linux/libata.h
include/linux/lockref.h
include/linux/math64.h
include/linux/migrate.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/percpu-defs.h
include/linux/pstore.h
include/linux/reboot.h
include/linux/rtnetlink.h
include/linux/sched.h
include/linux/skbuff.h
include/rdma/ib_verbs.h
include/target/target_core_base.h
include/uapi/drm/radeon_drm.h
include/uapi/drm/vmwgfx_drm.h
include/uapi/linux/input.h
include/uapi/linux/perf_event.h
include/xen/interface/io/blkif.h
init/Kconfig
kernel/Makefile
kernel/bounds.c
kernel/cgroup.c
kernel/events/core.c
kernel/fork.c
kernel/freezer.c
kernel/kexec.c
kernel/power/console.c
kernel/reboot.c
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/rt.c
kernel/trace/ftrace.c
kernel/user.c
mm/Kconfig
mm/compaction.c
mm/huge_memory.c
mm/memory-failure.c
mm/memory.c
mm/mempolicy.c
mm/migrate.c
mm/mprotect.c
mm/page_alloc.c
mm/pgtable-generic.c
mm/rmap.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/fragmentation.c
net/batman-adv/icmp_socket.c
net/batman-adv/main.c
net/batman-adv/network-coding.c
net/batman-adv/packet.h
net/batman-adv/routing.c
net/batman-adv/send.c
net/batman-adv/soft-interface.c
net/batman-adv/translation-table.c
net/bluetooth/hci_sock.c
net/core/dev.c
net/core/neighbour.c
net/dccp/probe.c
net/ipv4/inet_diag.c
net/ipv4/ip_gre.c
net/ipv4/ip_output.c
net/ipv4/netfilter/ipt_SYNPROXY.c
net/ipv4/netfilter/nft_reject_ipv4.c
net/ipv4/udp.c
net/ipv6/ip6_output.c
net/ipv6/netfilter/ip6t_SYNPROXY.c
net/ipv6/route.c
net/ipv6/sit.c
net/netfilter/ipvs/ip_vs_nfct.c
net/netfilter/nf_conntrack_seqadj.c
net/netfilter/nf_conntrack_timestamp.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_log.c
net/netfilter/nft_exthdr.c
net/rds/ib.c
net/rose/af_rose.c
net/sched/act_csum.c
net/sched/act_gact.c
net/sched/act_ipt.c
net/sched/act_nat.c
net/sched/act_pedit.c
net/sched/act_police.c
net/sched/act_simple.c
net/sched/act_skbedit.c
net/sctp/probe.c
net/tipc/port.c
net/tipc/port.h
net/tipc/socket.c
net/unix/af_unix.c
net/wireless/radiotap.c
net/wireless/sme.c
scripts/link-vmlinux.sh
security/selinux/hooks.c
sound/core/pcm_lib.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/soc/atmel/atmel_ssc_dai.c
sound/soc/atmel/sam9x5_wm8731.c
sound/soc/codecs/wm5110.c
sound/soc/codecs/wm8904.c
sound/soc/codecs/wm8962.c
sound/soc/codecs/wm_adsp.c
sound/soc/fsl/imx-wm8962.c
sound/soc/kirkwood/kirkwood-i2s.c
sound/soc/soc-generic-dmaengine-pcm.c
sound/soc/soc-pcm.c
sound/soc/tegra/tegra20_i2s.c
sound/soc/tegra/tegra20_spdif.c
sound/soc/tegra/tegra30_i2s.c
tools/power/cpupower/utils/cpupower-set.c

diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt
new file mode 100644 (file)
index 0000000..b2830b4
--- /dev/null
@@ -0,0 +1,72 @@
+Null block device driver
+================================================================================
+
+I. Overview
+
+The null block device (/dev/nullb*) is used for benchmarking the various
+block-layer implementations. It emulates a block device of X gigabytes in size.
+The following instances are possible:
+
+  Single-queue block-layer
+    - Request-based.
+    - Single submission queue per device.
+    - Implements IO scheduling algorithms (CFQ, Deadline, noop).
+  Multi-queue block-layer
+    - Request-based.
+    - Configurable submission queues per device.
+  No block-layer (Known as bio-based)
+    - Bio-based. IO requests are submitted directly to the device driver.
+    - Directly accepts bio data structure and returns them.
+
+All of them have a completion queue for each core in the system.
+
+II. Module parameters applicable for all instances:
+
+queue_mode=[0-2]: Default: 2-Multi-queue
+  Selects which block-layer the module should instantiate with.
+
+  0: Bio-based.
+  1: Single-queue.
+  2: Multi-queue.
+
+home_node=[0--nr_nodes]: Default: NUMA_NO_NODE
+  Selects what CPU node the data structures are allocated from.
+
+gb=[Size in GB]: Default: 250GB
+  The size of the device reported to the system.
+
+bs=[Block size (in bytes)]: Default: 512 bytes
+  The block size reported to the system.
+
+nr_devices=[Number of devices]: Default: 2
+  Number of block devices instantiated. They are instantiated as /dev/nullb0,
+  etc.
+
+irq_mode=[0-2]: Default: 1-Soft-irq
+  The completion mode used for completing IOs to the block-layer.
+
+  0: None.
+  1: Soft-irq. Uses IPI to complete IOs across CPU nodes. Simulates the overhead
+     when IOs are issued from another CPU node than the home the device is
+     connected to.
+  2: Timer: Waits a specific period (completion_nsec) for each IO before
+     completion.
+
+completion_nsec=[ns]: Default: 10.000ns
+  Combined with irq_mode=2 (timer). The time each completion event must wait.
+
+submit_queues=[0..nr_cpus]:
+  The number of submission queues attached to the device driver. If unset, it
+  defaults to 1 on single-queue and bio-based instances. For multi-queue,
+  it is ignored when use_per_node_hctx module parameter is 1.
+
+hw_queue_depth=[0..qdepth]: Default: 64
+  The hardware queue depth of the device.
+
+III: Multi-queue specific parameters
+
+use_per_node_hctx=[0/1]: Default: 0
+  0: The number of submit queues are set to the value of the submit_queues
+     parameter.
+  1: The multi-queue block layer is instantiated with a hardware dispatch
+     queue for each CPU node in the system.
index 50680a5..b9e9bd8 100644 (file)
@@ -1529,6 +1529,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 
                        * atapi_dmadir: Enable ATAPI DMADIR bridge support
 
+                       * disable: Disable this device.
+
                        If there are multiple matching configurations changing
                        the same attribute, the last one is used.
 
diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt
new file mode 100644 (file)
index 0000000..2b40e04
--- /dev/null
@@ -0,0 +1,240 @@
+                       ==============================
+                       KERNEL MODULE SIGNING FACILITY
+                       ==============================
+
+CONTENTS
+
+ - Overview.
+ - Configuring module signing.
+ - Generating signing keys.
+ - Public keys in the kernel.
+ - Manually signing modules.
+ - Signed modules and stripping.
+ - Loading signed modules.
+ - Non-valid signatures and unsigned modules.
+ - Administering/protecting the private key.
+
+
+========
+OVERVIEW
+========
+
+The kernel module signing facility cryptographically signs modules during
+installation and then checks the signature upon loading the module.  This
+allows increased kernel security by disallowing the loading of unsigned modules
+or modules signed with an invalid key.  Module signing increases security by
+making it harder to load a malicious module into the kernel.  The module
+signature checking is done by the kernel so that it is not necessary to have
+trusted userspace bits.
+
+This facility uses X.509 ITU-T standard certificates to encode the public keys
+involved.  The signatures are not themselves encoded in any industrial standard
+type.  The facility currently only supports the RSA public key encryption
+standard (though it is pluggable and permits others to be used).  The possible
+hash algorithms that can be used are SHA-1, SHA-224, SHA-256, SHA-384, and
+SHA-512 (the algorithm is selected by data in the signature).
+
+
+==========================
+CONFIGURING MODULE SIGNING
+==========================
+
+The module signing facility is enabled by going to the "Enable Loadable Module
+Support" section of the kernel configuration and turning on
+
+       CONFIG_MODULE_SIG       "Module signature verification"
+
+This has a number of options available:
+
+ (1) "Require modules to be validly signed" (CONFIG_MODULE_SIG_FORCE)
+
+     This specifies how the kernel should deal with a module that has a
+     signature for which the key is not known or a module that is unsigned.
+
+     If this is off (ie. "permissive"), then modules for which the key is not
+     available and modules that are unsigned are permitted, but the kernel will
+     be marked as being tainted.
+
+     If this is on (ie. "restrictive"), only modules that have a valid
+     signature that can be verified by a public key in the kernel's possession
+     will be loaded.  All other modules will generate an error.
+
+     Irrespective of the setting here, if the module has a signature block that
+     cannot be parsed, it will be rejected out of hand.
+
+
+ (2) "Automatically sign all modules" (CONFIG_MODULE_SIG_ALL)
+
+     If this is on then modules will be automatically signed during the
+     modules_install phase of a build.  If this is off, then the modules must
+     be signed manually using:
+
+       scripts/sign-file
+
+
+ (3) "Which hash algorithm should modules be signed with?"
+
+     This presents a choice of which hash algorithm the installation phase will
+     sign the modules with:
+
+       CONFIG_SIG_SHA1         "Sign modules with SHA-1"
+       CONFIG_SIG_SHA224       "Sign modules with SHA-224"
+       CONFIG_SIG_SHA256       "Sign modules with SHA-256"
+       CONFIG_SIG_SHA384       "Sign modules with SHA-384"
+       CONFIG_SIG_SHA512       "Sign modules with SHA-512"
+
+     The algorithm selected here will also be built into the kernel (rather
+     than being a module) so that modules signed with that algorithm can have
+     their signatures checked without causing a dependency loop.
+
+
+=======================
+GENERATING SIGNING KEYS
+=======================
+
+Cryptographic keypairs are required to generate and check signatures.  A
+private key is used to generate a signature and the corresponding public key is
+used to check it.  The private key is only needed during the build, after which
+it can be deleted or stored securely.  The public key gets built into the
+kernel so that it can be used to check the signatures as the modules are
+loaded.
+
+Under normal conditions, the kernel build will automatically generate a new
+keypair using openssl if one does not exist in the files:
+
+       signing_key.priv
+       signing_key.x509
+
+during the building of vmlinux (the public part of the key needs to be built
+into vmlinux) using parameters in the:
+
+       x509.genkey
+
+file (which is also generated if it does not already exist).
+
+It is strongly recommended that you provide your own x509.genkey file.
+
+Most notably, in the x509.genkey file, the req_distinguished_name section
+should be altered from the default:
+
+       [ req_distinguished_name ]
+       O = Magrathea
+       CN = Glacier signing key
+       emailAddress = slartibartfast@magrathea.h2g2
+
+The generated RSA key size can also be set with:
+
+       [ req ]
+       default_bits = 4096
+
+
+It is also possible to manually generate the key private/public files using the
+x509.genkey key generation configuration file in the root node of the Linux
+kernel sources tree and the openssl command.  The following is an example to
+generate the public/private key files:
+
+       openssl req -new -nodes -utf8 -sha256 -days 36500 -batch -x509 \
+          -config x509.genkey -outform DER -out signing_key.x509 \
+          -keyout signing_key.priv
+
+
+=========================
+PUBLIC KEYS IN THE KERNEL
+=========================
+
+The kernel contains a ring of public keys that can be viewed by root.  They're
+in a keyring called ".system_keyring" that can be seen by:
+
+       [root@deneb ~]# cat /proc/keys
+       ...
+       223c7853 I------     1 perm 1f030000     0     0 keyring   .system_keyring: 1
+       302d2d52 I------     1 perm 1f010000     0     0 asymmetri Fedora kernel signing key: d69a84e6bce3d216b979e9505b3e3ef9a7118079: X509.RSA a7118079 []
+       ...
+
+Beyond the public key generated specifically for module signing, any file
+placed in the kernel source root directory or the kernel build root directory
+whose name is suffixed with ".x509" will be assumed to be an X.509 public key
+and will be added to the keyring.
+
+Further, the architecture code may take public keys from a hardware store and
+add those in also (e.g. from the UEFI key database).
+
+Finally, it is possible to add additional public keys by doing:
+
+       keyctl padd asymmetric "" [.system_keyring-ID] <[key-file]
+
+e.g.:
+
+       keyctl padd asymmetric "" 0x223c7853 <my_public_key.x509
+
+Note, however, that the kernel will only permit keys to be added to
+.system_keyring _if_ the new key's X.509 wrapper is validly signed by a key
+that is already resident in the .system_keyring at the time the key was added.
+
+
+=========================
+MANUALLY SIGNING MODULES
+=========================
+
+To manually sign a module, use the scripts/sign-file tool available in
+the Linux kernel source tree.  The script requires 4 arguments:
+
+       1.  The hash algorithm (e.g., sha256)
+       2.  The private key filename
+       3.  The public key filename
+       4.  The kernel module to be signed
+
+The following is an example to sign a kernel module:
+
+       scripts/sign-file sha512 kernel-signkey.priv \
+               kernel-signkey.x509 module.ko
+
+The hash algorithm used does not have to match the one configured, but if it
+doesn't, you should make sure that hash algorithm is either built into the
+kernel or can be loaded without requiring itself.
+
+
+============================
+SIGNED MODULES AND STRIPPING
+============================
+
+A signed module has a digital signature simply appended at the end.  The string
+"~Module signature appended~." at the end of the module's file confirms that a
+signature is present but it does not confirm that the signature is valid!
+
+Signed modules are BRITTLE as the signature is outside of the defined ELF
+container.  Thus they MAY NOT be stripped once the signature is computed and
+attached.  Note the entire module is the signed payload, including any and all
+debug information present at the time of signing.
+
+
+======================
+LOADING SIGNED MODULES
+======================
+
+Modules are loaded with insmod, modprobe, init_module() or finit_module(),
+exactly as for unsigned modules as no processing is done in userspace.  The
+signature checking is all done within the kernel.
+
+
+=========================================
+NON-VALID SIGNATURES AND UNSIGNED MODULES
+=========================================
+
+If CONFIG_MODULE_SIG_FORCE is enabled or enforcemodulesig=1 is supplied on
+the kernel command line, the kernel will only load validly signed modules
+for which it has a public key.   Otherwise, it will also load modules that are
+unsigned.   Any module for which the kernel has a key, but which proves to have
+a signature mismatch will not be permitted to load.
+
+Any module that has an unparseable signature will be rejected.
+
+
+=========================================
+ADMINISTERING/PROTECTING THE PRIVATE KEY
+=========================================
+
+Since the private key is used to sign modules, viruses and malware could use
+the private key to sign modules and compromise the operating system.  The
+private key must be either destroyed or moved to a secure location and not kept
+in the root node of the kernel source tree.
index 3c12d9a..8a984e9 100644 (file)
@@ -16,8 +16,12 @@ ip_default_ttl - INTEGER
        Default: 64 (as recommended by RFC1700)
 
 ip_no_pmtu_disc - BOOLEAN
-       Disable Path MTU Discovery.
-       default FALSE
+       Disable Path MTU Discovery. If enabled and a
+       fragmentation-required ICMP is received, the PMTU to this
+       destination will be set to min_pmtu (see below). You will need
+       to raise min_pmtu to the smallest interface MTU on your system
+       manually if you want to avoid locally generated fragments.
+       Default: FALSE
 
 min_pmtu - INTEGER
        default 552 - minimum discovered Path MTU
index 1344816..21c038f 100644 (file)
@@ -783,7 +783,7 @@ F:  arch/arm/boot/dts/sama*.dts
 F:     arch/arm/boot/dts/sama*.dtsi
 
 ARM/CALXEDA HIGHBANK ARCHITECTURE
-M:     Rob Herring <rob.herring@calxeda.com>
+M:     Rob Herring <robh@kernel.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-highbank/
@@ -1008,6 +1008,8 @@ M:        Santosh Shilimkar <santosh.shilimkar@ti.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-keystone/
+F:     drivers/clk/keystone/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
 
 ARM/LOGICPD PXA270 MACHINE SUPPORT
 M:     Lennert Buytenhek <kernel@wantstofly.org>
@@ -3761,9 +3763,11 @@ F:       include/uapi/linux/gigaset_dev.h
 
 GPIO SUBSYSTEM
 M:     Linus Walleij <linus.walleij@linaro.org>
-S:     Maintained
+M:     Alexandre Courbot <gnurou@gmail.com>
 L:     linux-gpio@vger.kernel.org
-F:     Documentation/gpio.txt
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
+S:     Maintained
+F:     Documentation/gpio/
 F:     drivers/gpio/
 F:     include/linux/gpio*
 F:     include/asm-generic/gpio.h
@@ -3831,6 +3835,12 @@ T:       git git://linuxtv.org/media_tree.git
 S:     Maintained
 F:     drivers/media/usb/gspca/
 
+GUID PARTITION TABLE (GPT)
+M:     Davidlohr Bueso <davidlohr@hp.com>
+L:     linux-efi@vger.kernel.org
+S:     Maintained
+F:     block/partitions/efi.*
+
 STK1160 USB VIDEO CAPTURE DRIVER
 M:     Ezequiel Garcia <elezegarcia@gmail.com>
 L:     linux-media@vger.kernel.org
@@ -5911,12 +5921,21 @@ M:      Steffen Klassert <steffen.klassert@secunet.com>
 M:     Herbert Xu <herbert@gondor.apana.org.au>
 M:     "David S. Miller" <davem@davemloft.net>
 L:     netdev@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next.git
 S:     Maintained
 F:     net/xfrm/
 F:     net/key/
 F:     net/ipv4/xfrm*
+F:     net/ipv4/esp4.c
+F:     net/ipv4/ah4.c
+F:     net/ipv4/ipcomp.c
+F:     net/ipv4/ip_vti.c
 F:     net/ipv6/xfrm*
+F:     net/ipv6/esp6.c
+F:     net/ipv6/ah6.c
+F:     net/ipv6/ipcomp6.c
+F:     net/ipv6/ip6_vti.c
 F:     include/uapi/linux/xfrm.h
 F:     include/net/xfrm.h
 
@@ -6237,7 +6256,7 @@ F:        drivers/i2c/busses/i2c-ocores.c
 
 OPEN FIRMWARE AND FLATTENED DEVICE TREE
 M:     Grant Likely <grant.likely@linaro.org>
-M:     Rob Herring <rob.herring@calxeda.com>
+M:     Rob Herring <robh+dt@kernel.org>
 L:     devicetree@vger.kernel.org
 W:     http://fdt.secretlab.ca
 T:     git git://git.secretlab.ca/git/linux-2.6.git
@@ -6249,7 +6268,7 @@ K:        of_get_property
 K:     of_match_table
 
 OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
-M:     Rob Herring <rob.herring@calxeda.com>
+M:     Rob Herring <robh+dt@kernel.org>
 M:     Pawel Moll <pawel.moll@arm.com>
 M:     Mark Rutland <mark.rutland@arm.com>
 M:     Ian Campbell <ijc+devicetree@hellion.org.uk>
@@ -9571,7 +9590,7 @@ F:        drivers/xen/*swiotlb*
 
 XFS FILESYSTEM
 P:     Silicon Graphics Inc
-M:     Dave Chinner <dchinner@fromorbit.com>
+M:     Dave Chinner <david@fromorbit.com>
 M:     Ben Myers <bpm@sgi.com>
 M:     xfs@oss.sgi.com
 L:     xfs@oss.sgi.com
index 858a147..ab80be7 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 13
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
 NAME = One Giant Leap for Frogkind
 
 # *DOCUMENTATION*
@@ -732,19 +732,15 @@ export mod_strip_cmd
 # Select initial ramdisk compression format, default is gzip(1).
 # This shall be used by the dracut(8) tool while creating an initramfs image.
 #
-INITRD_COMPRESS=gzip
-ifeq ($(CONFIG_RD_BZIP2), y)
-        INITRD_COMPRESS=bzip2
-else ifeq ($(CONFIG_RD_LZMA), y)
-        INITRD_COMPRESS=lzma
-else ifeq ($(CONFIG_RD_XZ), y)
-        INITRD_COMPRESS=xz
-else ifeq ($(CONFIG_RD_LZO), y)
-        INITRD_COMPRESS=lzo
-else ifeq ($(CONFIG_RD_LZ4), y)
-        INITRD_COMPRESS=lz4
-endif
-export INITRD_COMPRESS
+INITRD_COMPRESS-y                  := gzip
+INITRD_COMPRESS-$(CONFIG_RD_BZIP2) := bzip2
+INITRD_COMPRESS-$(CONFIG_RD_LZMA)  := lzma
+INITRD_COMPRESS-$(CONFIG_RD_XZ)    := xz
+INITRD_COMPRESS-$(CONFIG_RD_LZO)   := lzo
+INITRD_COMPRESS-$(CONFIG_RD_LZ4)   := lz4
+# do not export INITRD_COMPRESS, since we didn't actually
+# choose a sane default compression above.
+# export INITRD_COMPRESS := $(INITRD_COMPRESS-y)
 
 ifdef CONFIG_MODULE_SIG_ALL
 MODSECKEY = ./signing_key.priv
index 68125dd..39e58d1 100644 (file)
@@ -8,7 +8,11 @@
 
 /******** no-legacy-syscalls-ABI *******/
 
-#ifndef _UAPI_ASM_ARC_UNISTD_H
+/*
+ * Non-typical guard macro to enable inclusion twice in ARCH sys.c
+ * That is how the Generic syscall wrapper generator works
+ */
+#if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL)
 #define _UAPI_ASM_ARC_UNISTD_H
 
 #define __ARCH_WANT_SYS_EXECVE
@@ -36,4 +40,6 @@ __SYSCALL(__NR_arc_gettls, sys_arc_gettls)
 #define __NR_sysfs             (__NR_arch_specific_syscall + 3)
 __SYSCALL(__NR_sysfs, sys_sysfs)
 
+#undef __SYSCALL
+
 #endif
index ee845fa..9987dd0 100644 (file)
@@ -87,9 +87,9 @@
                interrupts = <1 9 0xf04>;
        };
 
-       gpio0: gpio@ffc40000 {
+       gpio0: gpio@e6050000 {
                compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
-               reg = <0 0xffc40000 0 0x2c>;
+               reg = <0 0xe6050000 0 0x50>;
                interrupt-parent = <&gic>;
                interrupts = <0 4 0x4>;
                #gpio-cells = <2>;
@@ -99,9 +99,9 @@
                interrupt-controller;
        };
 
-       gpio1: gpio@ffc41000 {
+       gpio1: gpio@e6051000 {
                compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
-               reg = <0 0xffc41000 0 0x2c>;
+               reg = <0 0xe6051000 0 0x50>;
                interrupt-parent = <&gic>;
                interrupts = <0 5 0x4>;
                #gpio-cells = <2>;
                interrupt-controller;
        };
 
-       gpio2: gpio@ffc42000 {
+       gpio2: gpio@e6052000 {
                compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
-               reg = <0 0xffc42000 0 0x2c>;
+               reg = <0 0xe6052000 0 0x50>;
                interrupt-parent = <&gic>;
                interrupts = <0 6 0x4>;
                #gpio-cells = <2>;
                interrupt-controller;
        };
 
-       gpio3: gpio@ffc43000 {
+       gpio3: gpio@e6053000 {
                compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
-               reg = <0 0xffc43000 0 0x2c>;
+               reg = <0 0xe6053000 0 0x50>;
                interrupt-parent = <&gic>;
                interrupts = <0 7 0x4>;
                #gpio-cells = <2>;
                interrupt-controller;
        };
 
-       gpio4: gpio@ffc44000 {
+       gpio4: gpio@e6054000 {
                compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
-               reg = <0 0xffc44000 0 0x2c>;
+               reg = <0 0xe6054000 0 0x50>;
                interrupt-parent = <&gic>;
                interrupts = <0 8 0x4>;
                #gpio-cells = <2>;
                interrupt-controller;
        };
 
-       gpio5: gpio@ffc45000 {
+       gpio5: gpio@e6055000 {
                compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
-               reg = <0 0xffc45000 0 0x2c>;
+               reg = <0 0xe6055000 0 0x50>;
                interrupt-parent = <&gic>;
                interrupts = <0 9 0x4>;
                #gpio-cells = <2>;
 
        sdhi0: sdhi@ee100000 {
                compatible = "renesas,sdhi-r8a7790";
-               reg = <0 0xee100000 0 0x100>;
+               reg = <0 0xee100000 0 0x200>;
                interrupt-parent = <&gic>;
                interrupts = <0 165 4>;
                cap-sd-highspeed;
 
        sdhi1: sdhi@ee120000 {
                compatible = "renesas,sdhi-r8a7790";
-               reg = <0 0xee120000 0 0x100>;
+               reg = <0 0xee120000 0 0x200>;
                interrupt-parent = <&gic>;
                interrupts = <0 166 4>;
                cap-sd-highspeed;
index 4ec8d82..44a59c3 100644 (file)
@@ -242,12 +242,18 @@ static void __init ldp_display_init(void)
 
 static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio)
 {
+       int res;
+
        /* LCD enable GPIO */
        ldp_lcd_pdata.enable_gpio = gpio + 7;
 
        /* Backlight enable GPIO */
        ldp_lcd_pdata.backlight_gpio = gpio + 15;
 
+       res = platform_device_register(&ldp_lcd_device);
+       if (res)
+               pr_err("Unable to register LCD: %d\n", res);
+
        return 0;
 }
 
@@ -346,7 +352,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
 
 static struct platform_device *ldp_devices[] __initdata = {
        &ldp_gpio_keys_device,
-       &ldp_lcd_device,
 };
 
 #ifdef CONFIG_OMAP_MUX
index 58347bb..4cf1655 100644 (file)
@@ -101,13 +101,51 @@ static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initconst = {
        { "dss_hdmi", "omapdss_hdmi", -1 },
 };
 
+static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
+{
+       u32 enable_mask, enable_shift;
+       u32 pipd_mask, pipd_shift;
+       u32 reg;
+
+       if (dsi_id == 0) {
+               enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
+               enable_shift = OMAP4_DSI1_LANEENABLE_SHIFT;
+               pipd_mask = OMAP4_DSI1_PIPD_MASK;
+               pipd_shift = OMAP4_DSI1_PIPD_SHIFT;
+       } else if (dsi_id == 1) {
+               enable_mask = OMAP4_DSI2_LANEENABLE_MASK;
+               enable_shift = OMAP4_DSI2_LANEENABLE_SHIFT;
+               pipd_mask = OMAP4_DSI2_PIPD_MASK;
+               pipd_shift = OMAP4_DSI2_PIPD_SHIFT;
+       } else {
+               return -ENODEV;
+       }
+
+       reg = omap4_ctrl_pad_readl(OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY);
+
+       reg &= ~enable_mask;
+       reg &= ~pipd_mask;
+
+       reg |= (lanes << enable_shift) & enable_mask;
+       reg |= (lanes << pipd_shift) & pipd_mask;
+
+       omap4_ctrl_pad_writel(reg, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY);
+
+       return 0;
+}
+
 static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask)
 {
+       if (cpu_is_omap44xx())
+               return omap4_dsi_mux_pads(dsi_id, lane_mask);
+
        return 0;
 }
 
 static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask)
 {
+       if (cpu_is_omap44xx())
+               omap4_dsi_mux_pads(dsi_id, 0);
 }
 
 static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput)
index 56cebb0..d23c77f 100644 (file)
@@ -796,7 +796,7 @@ struct omap_hwmod omap2xxx_counter_32k_hwmod = {
 
 /* gpmc */
 static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = {
-       { .irq = 20 },
+       { .irq = 20 + OMAP_INTC_START, },
        { .irq = -1 }
 };
 
@@ -841,7 +841,7 @@ static struct omap_hwmod_class omap2_rng_hwmod_class = {
 };
 
 static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = {
-       { .irq = 52 },
+       { .irq = 52 + OMAP_INTC_START, },
        { .irq = -1 }
 };
 
index d337429..4c3b1e6 100644 (file)
@@ -2165,7 +2165,7 @@ static struct omap_hwmod_class omap3xxx_gpmc_hwmod_class = {
 };
 
 static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = {
-       { .irq = 20 },
+       { .irq = 20 + OMAP_INTC_START, },
        { .irq = -1 }
 };
 
@@ -2999,7 +2999,7 @@ static struct omap_mmu_dev_attr mmu_isp_dev_attr = {
 
 static struct omap_hwmod omap3xxx_mmu_isp_hwmod;
 static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = {
-       { .irq = 24 },
+       { .irq = 24 + OMAP_INTC_START, },
        { .irq = -1 }
 };
 
@@ -3041,7 +3041,7 @@ static struct omap_mmu_dev_attr mmu_iva_dev_attr = {
 
 static struct omap_hwmod omap3xxx_mmu_iva_hwmod;
 static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = {
-       { .irq = 28 },
+       { .irq = 28 + OMAP_INTC_START, },
        { .irq = -1 }
 };
 
index db32d53..18f333c 100644 (file)
@@ -1637,7 +1637,7 @@ static struct omap_hwmod dra7xx_uart1_hwmod = {
        .class          = &dra7xx_uart_hwmod_class,
        .clkdm_name     = "l4per_clkdm",
        .main_clk       = "uart1_gfclk_mux",
-       .flags          = HWMOD_SWSUP_SIDLE_ACT,
+       .flags          = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP2UART1_FLAGS,
        .prcm = {
                .omap4 = {
                        .clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET,
index 2a086e8..958cd6a 100644 (file)
@@ -10,6 +10,8 @@
  * published by the Free Software Foundation.
  */
 
+#include <mach/irqs.h>
+
 #define LUBBOCK_ETH_PHYS       PXA_CS3_PHYS
 
 #define LUBBOCK_FPGA_PHYS      PXA_CS2_PHYS
index 7eb9a10..2fddf38 100644 (file)
@@ -8,8 +8,6 @@
  * published by the Free Software Foundation.
 */
 
-#include <linux/clk-provider.h>
-#include <linux/irqchip.h>
 #include <linux/of_platform.h>
 
 #include <asm/mach/arch.h>
@@ -48,15 +46,9 @@ static void __init s3c64xx_dt_map_io(void)
                panic("SoC is not S3C64xx!");
 }
 
-static void __init s3c64xx_dt_init_irq(void)
-{
-       of_clk_init(NULL);
-       samsung_wdt_reset_of_init();
-       irqchip_init();
-};
-
 static void __init s3c64xx_dt_init_machine(void)
 {
+       samsung_wdt_reset_of_init();
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
@@ -79,7 +71,6 @@ DT_MACHINE_START(S3C6400_DT, "Samsung S3C64xx (Flattened Device Tree)")
        /* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */
        .dt_compat      = s3c64xx_dt_compat,
        .map_io         = s3c64xx_dt_map_io,
-       .init_irq       = s3c64xx_dt_init_irq,
        .init_machine   = s3c64xx_dt_init_machine,
        .restart        = s3c64xx_dt_restart,
 MACHINE_END
index 958e3cb..c186891 100644 (file)
@@ -614,6 +614,11 @@ static struct regulator_consumer_supply fixed3v3_power_consumers[] = {
        REGULATOR_SUPPLY("vqmmc", "sh_mmcif"),
 };
 
+/* Fixed 3.3V regulator used by LCD backlight */
+static struct regulator_consumer_supply fixed5v0_power_consumers[] = {
+       REGULATOR_SUPPLY("power", "pwm-backlight.0"),
+};
+
 /* Fixed 3.3V regulator to be used by SDHI0 */
 static struct regulator_consumer_supply vcc_sdhi0_consumers[] = {
        REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
@@ -1196,6 +1201,8 @@ static void __init eva_init(void)
 
        regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers,
                                     ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
+       regulator_register_always_on(3, "fixed-5.0V", fixed5v0_power_consumers,
+                                    ARRAY_SIZE(fixed5v0_power_consumers), 5000000);
 
        pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map));
        pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup));
index 3861152..3c4995a 100644 (file)
@@ -679,7 +679,7 @@ static void __init bockw_init(void)
                        .id             = i,
                        .data           = &rsnd_card_info[i],
                        .size_data      = sizeof(struct asoc_simple_card_info),
-                       .dma_mask       = ~0,
+                       .dma_mask       = DMA_BIT_MASK(32),
                };
 
                platform_device_register_full(&cardinfo);
index a8d3ce6..e0406fd 100644 (file)
@@ -245,7 +245,9 @@ static void __init lager_init(void)
 {
        lager_add_standard_devices();
 
-       phy_register_fixup_for_id("r8a7790-ether-ff:01", lager_ksz8041_fixup);
+       if (IS_ENABLED(CONFIG_PHYLIB))
+               phy_register_fixup_for_id("r8a7790-ether-ff:01",
+                                         lager_ksz8041_fixup);
 }
 
 static const char * const lager_boards_compat_dt[] __initconst = {
index 83e4f95..8550123 100644 (file)
@@ -96,7 +96,7 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
        struct remap_data *info = data;
        struct page *page = info->pages[info->index++];
        unsigned long pfn = page_to_pfn(page);
-       pte_t pte = pfn_pte(pfn, info->prot);
+       pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
 
        if (map_foreign_page(pfn, info->fgmfn, info->domid))
                return -EFAULT;
@@ -224,10 +224,10 @@ static int __init xen_guest_init(void)
        }
        if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res))
                return 0;
-       xen_hvm_resume_frames = res.start >> PAGE_SHIFT;
+       xen_hvm_resume_frames = res.start;
        xen_events_irq = irq_of_parse_and_map(node, 0);
        pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n",
-                       version, xen_events_irq, xen_hvm_resume_frames);
+                       version, xen_events_irq, (xen_hvm_resume_frames >> PAGE_SHIFT));
        xen_domain_type = XEN_HVM_DOMAIN;
 
        xen_setup_features();
index 2820f1a..dde3fc9 100644 (file)
@@ -23,25 +23,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
             unsigned long offset, size_t size, enum dma_data_direction dir,
             struct dma_attrs *attrs)
 {
-       __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
 }
 
 static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
                size_t size, enum dma_data_direction dir,
                struct dma_attrs *attrs)
 {
-       __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
 }
 
 static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
                dma_addr_t handle, size_t size, enum dma_data_direction dir)
 {
-       __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
 }
 
 static inline void xen_dma_sync_single_for_device(struct device *hwdev,
                dma_addr_t handle, size_t size, enum dma_data_direction dir)
 {
-       __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
 }
 #endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
index 6777a21..6a8928b 100644 (file)
@@ -214,31 +214,29 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
 {
        int err, len, type, disabled = !ctrl.enabled;
 
-       if (disabled) {
-               len = 0;
-               type = HW_BREAKPOINT_EMPTY;
-       } else {
-               err = arch_bp_generic_fields(ctrl, &len, &type);
-               if (err)
-                       return err;
-
-               switch (note_type) {
-               case NT_ARM_HW_BREAK:
-                       if ((type & HW_BREAKPOINT_X) != type)
-                               return -EINVAL;
-                       break;
-               case NT_ARM_HW_WATCH:
-                       if ((type & HW_BREAKPOINT_RW) != type)
-                               return -EINVAL;
-                       break;
-               default:
+       attr->disabled = disabled;
+       if (disabled)
+               return 0;
+
+       err = arch_bp_generic_fields(ctrl, &len, &type);
+       if (err)
+               return err;
+
+       switch (note_type) {
+       case NT_ARM_HW_BREAK:
+               if ((type & HW_BREAKPOINT_X) != type)
                        return -EINVAL;
-               }
+               break;
+       case NT_ARM_HW_WATCH:
+               if ((type & HW_BREAKPOINT_RW) != type)
+                       return -EINVAL;
+               break;
+       default:
+               return -EINVAL;
        }
 
        attr->bp_len    = len;
        attr->bp_type   = type;
-       attr->disabled  = disabled;
 
        return 0;
 }
index 4177b62..a618dfc 100644 (file)
@@ -58,7 +58,6 @@
                compatible = "fsl,mpc5121-immr";
                #address-cells = <1>;
                #size-cells = <1>;
-               #interrupt-cells = <2>;
                ranges = <0x0 0x80000000 0x400000>;
                reg = <0x80000000 0x400000>;
                bus-frequency = <66000000>;     // 66 MHz ips bus
                        reg = <0xA000 0x1000>;
                };
 
+               // disable USB1 port
+               // TODO:
+               // correct pinmux config and fix USB3320 ulpi dependency
+               // before re-enabling it
                usb@3000 {
                        compatible = "fsl,mpc5121-usb2-dr";
                        reg = <0x3000 0x400>;
                        interrupts = <43 0x8>;
                        dr_mode = "host";
                        phy_type = "ulpi";
+                       status = "disabled";
                };
 
                // 5125 PSCs are not 52xx or 5121 PSC compatible
index 894662a..243ce69 100644 (file)
@@ -284,7 +284,7 @@ do_kvm_##n:                                                         \
        subi    r1,r1,INT_FRAME_SIZE;   /* alloc frame on kernel stack  */ \
        beq-    1f;                                                        \
        ld      r1,PACAKSAVE(r13);      /* kernel stack to use          */ \
-1:     cmpdi   cr1,r1,0;               /* check if r1 is in userspace  */ \
+1:     cmpdi   cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace  */ \
        blt+    cr1,3f;                 /* abort if it is               */ \
        li      r1,(n);                 /* will be reloaded later       */ \
        sth     r1,PACA_TRAP_SAVE(r13);                                    \
index 4a594b7..bc23b1b 100644 (file)
@@ -192,6 +192,10 @@ extern void kvmppc_load_up_vsx(void);
 extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
 extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
 extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
+extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
+                                struct kvm_vcpu *vcpu);
+extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
+                                  struct kvmppc_book3s_shadow_vcpu *svcpu);
 
 static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
 {
index 0bd9348..192917d 100644 (file)
@@ -79,6 +79,7 @@ struct kvmppc_host_state {
        ulong vmhandler;
        ulong scratch0;
        ulong scratch1;
+       ulong scratch2;
        u8 in_guest;
        u8 restore_hid5;
        u8 napping;
@@ -106,6 +107,7 @@ struct kvmppc_host_state {
 };
 
 struct kvmppc_book3s_shadow_vcpu {
+       bool in_use;
        ulong gpr[14];
        u32 cr;
        u32 xer;
index 033c06b..7bdcf34 100644 (file)
@@ -720,13 +720,13 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe,
 int64_t opal_pci_poll(uint64_t phb_id);
 int64_t opal_return_cpu(void);
 
-int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, uint64_t *val);
+int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val);
 int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val);
 
 int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
                       uint32_t addr, uint32_t data, uint32_t sz);
 int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
-                     uint32_t addr, uint32_t *data, uint32_t sz);
+                     uint32_t addr, __be32 *data, uint32_t sz);
 int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result);
 int64_t opal_manage_flash(uint8_t op);
 int64_t opal_update_flash(uint64_t blk_list);
index 9ee1261..aace905 100644 (file)
@@ -35,7 +35,7 @@ extern void giveup_vsx(struct task_struct *);
 extern void enable_kernel_spe(void);
 extern void giveup_spe(struct task_struct *);
 extern void load_up_spe(struct task_struct *);
-extern void switch_booke_debug_regs(struct thread_struct *new_thread);
+extern void switch_booke_debug_regs(struct debug_reg *new_debug);
 
 #ifndef CONFIG_SMP
 extern void discard_lazy_cpu_state(void);
index 5f1b1e3..8296381 100644 (file)
@@ -4,13 +4,18 @@
 #ifdef __KERNEL__
 
 /*
- * The PowerPC can do unaligned accesses itself in big endian mode.
+ * The PowerPC can do unaligned accesses itself based on its endian mode.
  */
 #include <linux/unaligned/access_ok.h>
 #include <linux/unaligned/generic.h>
 
+#ifdef __LITTLE_ENDIAN__
+#define get_unaligned  __get_unaligned_le
+#define put_unaligned  __put_unaligned_le
+#else
 #define get_unaligned  __get_unaligned_be
 #define put_unaligned  __put_unaligned_be
+#endif
 
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_UNALIGNED_H */
index 2ea5cc0..d3de010 100644 (file)
@@ -576,6 +576,7 @@ int main(void)
        HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
        HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
        HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
+       HSTATE_FIELD(HSTATE_SCRATCH2, scratch2);
        HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
        HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
        HSTATE_FIELD(HSTATE_NAPPING, napping);
index 779a78c..11c1d06 100644 (file)
@@ -124,15 +124,15 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
 void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
 {
        unsigned long addr;
-       const u32 *basep, *sizep;
+       const __be32 *basep, *sizep;
        unsigned int rtas_start = 0, rtas_end = 0;
 
        basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
        sizep = of_get_property(rtas.dev, "rtas-size", NULL);
 
        if (basep && sizep) {
-               rtas_start = *basep;
-               rtas_end = *basep + *sizep;
+               rtas_start = be32_to_cpup(basep);
+               rtas_end = rtas_start + be32_to_cpup(sizep);
        }
 
        for (addr = begin; addr < end; addr += PAGE_SIZE) {
index 2ae41ab..4f0946d 100644 (file)
@@ -80,6 +80,7 @@ END_FTR_SECTION(0, 1)
         * of the function that the cpu should jump to to continue
         * initialization.
         */
+       .balign 8
        .globl  __secondary_hold_spinloop
 __secondary_hold_spinloop:
        .llong  0x0
@@ -470,6 +471,7 @@ _STATIC(__after_prom_start)
        mtctr   r8
        bctr
 
+.balign 8
 p_end: .llong  _end - _stext
 
 4:     /* Now copy the rest of the kernel up to _end */
index 3386d8a..4a96556 100644 (file)
@@ -339,7 +339,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
 #endif
 }
 
-static void prime_debug_regs(struct thread_struct *thread)
+static void prime_debug_regs(struct debug_reg *debug)
 {
        /*
         * We could have inherited MSR_DE from userspace, since
@@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread)
         */
        mtmsr(mfmsr() & ~MSR_DE);
 
-       mtspr(SPRN_IAC1, thread->debug.iac1);
-       mtspr(SPRN_IAC2, thread->debug.iac2);
+       mtspr(SPRN_IAC1, debug->iac1);
+       mtspr(SPRN_IAC2, debug->iac2);
 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
-       mtspr(SPRN_IAC3, thread->debug.iac3);
-       mtspr(SPRN_IAC4, thread->debug.iac4);
+       mtspr(SPRN_IAC3, debug->iac3);
+       mtspr(SPRN_IAC4, debug->iac4);
 #endif
-       mtspr(SPRN_DAC1, thread->debug.dac1);
-       mtspr(SPRN_DAC2, thread->debug.dac2);
+       mtspr(SPRN_DAC1, debug->dac1);
+       mtspr(SPRN_DAC2, debug->dac2);
 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
-       mtspr(SPRN_DVC1, thread->debug.dvc1);
-       mtspr(SPRN_DVC2, thread->debug.dvc2);
+       mtspr(SPRN_DVC1, debug->dvc1);
+       mtspr(SPRN_DVC2, debug->dvc2);
 #endif
-       mtspr(SPRN_DBCR0, thread->debug.dbcr0);
-       mtspr(SPRN_DBCR1, thread->debug.dbcr1);
+       mtspr(SPRN_DBCR0, debug->dbcr0);
+       mtspr(SPRN_DBCR1, debug->dbcr1);
 #ifdef CONFIG_BOOKE
-       mtspr(SPRN_DBCR2, thread->debug.dbcr2);
+       mtspr(SPRN_DBCR2, debug->dbcr2);
 #endif
 }
 /*
@@ -371,11 +371,11 @@ static void prime_debug_regs(struct thread_struct *thread)
  * debug registers, set the debug registers from the values
  * stored in the new thread.
  */
-void switch_booke_debug_regs(struct thread_struct *new_thread)
+void switch_booke_debug_regs(struct debug_reg *new_debug)
 {
        if ((current->thread.debug.dbcr0 & DBCR0_IDM)
-               || (new_thread->debug.dbcr0 & DBCR0_IDM))
-                       prime_debug_regs(new_thread);
+               || (new_debug->dbcr0 & DBCR0_IDM))
+                       prime_debug_regs(new_debug);
 }
 EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
 #else  /* !CONFIG_PPC_ADV_DEBUG_REGS */
@@ -683,7 +683,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
 #endif /* CONFIG_SMP */
 
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
-       switch_booke_debug_regs(&new->thread);
+       switch_booke_debug_regs(&new->thread.debug);
 #else
 /*
  * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
index 75fb404..2e3d2bf 100644 (file)
@@ -1555,7 +1555,7 @@ long arch_ptrace(struct task_struct *child, long request,
 
                        flush_fp_to_thread(child);
                        if (fpidx < (PT_FPSCR - PT_FPR0))
-                               memcpy(&tmp, &child->thread.fp_state.fpr,
+                               memcpy(&tmp, &child->thread.TS_FPR(fpidx),
                                       sizeof(long));
                        else
                                tmp = child->thread.fp_state.fpscr;
@@ -1588,7 +1588,7 @@ long arch_ptrace(struct task_struct *child, long request,
 
                        flush_fp_to_thread(child);
                        if (fpidx < (PT_FPSCR - PT_FPR0))
-                               memcpy(&child->thread.fp_state.fpr, &data,
+                               memcpy(&child->thread.TS_FPR(fpidx), &data,
                                       sizeof(long));
                        else
                                child->thread.fp_state.fpscr = data;
index febc804..bc76cc6 100644 (file)
@@ -479,7 +479,7 @@ void __init smp_setup_cpu_maps(void)
        if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) &&
            (dn = of_find_node_by_path("/rtas"))) {
                int num_addr_cell, num_size_cell, maxcpus;
-               const unsigned int *ireg;
+               const __be32 *ireg;
 
                num_addr_cell = of_n_addr_cells(dn);
                num_size_cell = of_n_size_cells(dn);
@@ -489,7 +489,7 @@ void __init smp_setup_cpu_maps(void)
                if (!ireg)
                        goto out;
 
-               maxcpus = ireg[num_addr_cell + num_size_cell];
+               maxcpus = be32_to_cpup(ireg + num_addr_cell + num_size_cell);
 
                /* Double maxcpus for processors which have SMT capability */
                if (cpu_has_feature(CPU_FTR_SMT))
index a3b64f3..c1cf4a1 100644 (file)
@@ -580,7 +580,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 int cpu_to_core_id(int cpu)
 {
        struct device_node *np;
-       const int *reg;
+       const __be32 *reg;
        int id = -1;
 
        np = of_get_cpu_node(cpu, NULL);
@@ -591,7 +591,7 @@ int cpu_to_core_id(int cpu)
        if (!reg)
                goto out;
 
-       id = *reg;
+       id = be32_to_cpup(reg);
 out:
        of_node_put(np);
        return id;
index f3ff587..c5d1484 100644 (file)
@@ -469,11 +469,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
                slb_v = vcpu->kvm->arch.vrma_slb_v;
        }
 
+       preempt_disable();
        /* Find the HPTE in the hash table */
        index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
                                         HPTE_V_VALID | HPTE_V_ABSENT);
-       if (index < 0)
+       if (index < 0) {
+               preempt_enable();
                return -ENOENT;
+       }
        hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
        v = hptep[0] & ~HPTE_V_HVLOCK;
        gr = kvm->arch.revmap[index].guest_rpte;
@@ -481,6 +484,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
        /* Unlock the HPTE */
        asm volatile("lwsync" : : : "memory");
        hptep[0] = v;
+       preempt_enable();
 
        gpte->eaddr = eaddr;
        gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
@@ -665,6 +669,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
                        return -EFAULT;
        } else {
                page = pages[0];
+               pfn = page_to_pfn(page);
                if (PageHuge(page)) {
                        page = compound_head(page);
                        pte_size <<= compound_order(page);
@@ -689,7 +694,6 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
                        }
                        rcu_read_unlock_sched();
                }
-               pfn = page_to_pfn(page);
        }
 
        ret = -EFAULT;
@@ -707,8 +711,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
                r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
        }
 
-       /* Set the HPTE to point to pfn */
-       r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT);
+       /*
+        * Set the HPTE to point to pfn.
+        * Since the pfn is at PAGE_SIZE granularity, make sure we
+        * don't mask out lower-order bits if psize < PAGE_SIZE.
+        */
+       if (psize < PAGE_SIZE)
+               psize = PAGE_SIZE;
+       r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1));
        if (hpte_is_writable(r) && !write_ok)
                r = hpte_make_readonly(r);
        ret = RESUME_GUEST;
index 072287f..b51d5db 100644 (file)
@@ -131,8 +131,9 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
 static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
 {
        struct kvmppc_vcore *vc = vcpu->arch.vcore;
+       unsigned long flags;
 
-       spin_lock(&vcpu->arch.tbacct_lock);
+       spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
        if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE &&
            vc->preempt_tb != TB_NIL) {
                vc->stolen_tb += mftb() - vc->preempt_tb;
@@ -143,19 +144,20 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
                vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
                vcpu->arch.busy_preempt = TB_NIL;
        }
-       spin_unlock(&vcpu->arch.tbacct_lock);
+       spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
 }
 
 static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
 {
        struct kvmppc_vcore *vc = vcpu->arch.vcore;
+       unsigned long flags;
 
-       spin_lock(&vcpu->arch.tbacct_lock);
+       spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
        if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
                vc->preempt_tb = mftb();
        if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
                vcpu->arch.busy_preempt = mftb();
-       spin_unlock(&vcpu->arch.tbacct_lock);
+       spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
 }
 
 static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
@@ -486,11 +488,11 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
         */
        if (vc->vcore_state != VCORE_INACTIVE &&
            vc->runner->arch.run_task != current) {
-               spin_lock(&vc->runner->arch.tbacct_lock);
+               spin_lock_irq(&vc->runner->arch.tbacct_lock);
                p = vc->stolen_tb;
                if (vc->preempt_tb != TB_NIL)
                        p += now - vc->preempt_tb;
-               spin_unlock(&vc->runner->arch.tbacct_lock);
+               spin_unlock_irq(&vc->runner->arch.tbacct_lock);
        } else {
                p = vc->stolen_tb;
        }
@@ -512,10 +514,10 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
        core_stolen = vcore_stolen_time(vc, now);
        stolen = core_stolen - vcpu->arch.stolen_logged;
        vcpu->arch.stolen_logged = core_stolen;
-       spin_lock(&vcpu->arch.tbacct_lock);
+       spin_lock_irq(&vcpu->arch.tbacct_lock);
        stolen += vcpu->arch.busy_stolen;
        vcpu->arch.busy_stolen = 0;
-       spin_unlock(&vcpu->arch.tbacct_lock);
+       spin_unlock_irq(&vcpu->arch.tbacct_lock);
        if (!dt || !vpa)
                return;
        memset(dt, 0, sizeof(struct dtl_entry));
@@ -589,7 +591,9 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
                if (list_empty(&vcpu->kvm->arch.rtas_tokens))
                        return RESUME_HOST;
 
+               idx = srcu_read_lock(&vcpu->kvm->srcu);
                rc = kvmppc_rtas_hcall(vcpu);
+               srcu_read_unlock(&vcpu->kvm->srcu, idx);
 
                if (rc == -ENOENT)
                        return RESUME_HOST;
@@ -1115,13 +1119,13 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
 
        if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
                return;
-       spin_lock(&vcpu->arch.tbacct_lock);
+       spin_lock_irq(&vcpu->arch.tbacct_lock);
        now = mftb();
        vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
                vcpu->arch.stolen_logged;
        vcpu->arch.busy_preempt = now;
        vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
-       spin_unlock(&vcpu->arch.tbacct_lock);
+       spin_unlock_irq(&vcpu->arch.tbacct_lock);
        --vc->n_runnable;
        list_del(&vcpu->arch.run_list);
 }
index 9c51544..8689e2e 100644 (file)
@@ -225,6 +225,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
                is_io = pa & (HPTE_R_I | HPTE_R_W);
                pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
                pa &= PAGE_MASK;
+               pa |= gpa & ~PAGE_MASK;
        } else {
                /* Translate to host virtual address */
                hva = __gfn_to_hva_memslot(memslot, gfn);
@@ -238,13 +239,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
                                ptel = hpte_make_readonly(ptel);
                        is_io = hpte_cache_bits(pte_val(pte));
                        pa = pte_pfn(pte) << PAGE_SHIFT;
+                       pa |= hva & (pte_size - 1);
+                       pa |= gpa & ~PAGE_MASK;
                }
        }
 
        if (pte_size < psize)
                return H_PARAMETER;
-       if (pa && pte_size > psize)
-               pa |= gpa & (pte_size - 1);
 
        ptel &= ~(HPTE_R_PP0 - psize);
        ptel |= pa;
@@ -749,6 +750,10 @@ static int slb_base_page_shift[4] = {
        20,     /* 1M, unsupported */
 };
 
+/* When called from virtmode, this func should be protected by
+ * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
+ * can trigger deadlock issue.
+ */
 long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
                              unsigned long valid)
 {
index bc8de75..be4fa04 100644 (file)
@@ -153,7 +153,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 
 13:    b       machine_check_fwnmi
 
-
 /*
  * We come in here when wakened from nap mode on a secondary hw thread.
  * Relocation is off and most register values are lost.
@@ -224,6 +223,11 @@ kvm_start_guest:
        /* Clear our vcpu pointer so we don't come back in early */
        li      r0, 0
        std     r0, HSTATE_KVM_VCPU(r13)
+       /*
+        * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing
+        * the nap_count, because once the increment to nap_count is
+        * visible we could be given another vcpu.
+        */
        lwsync
        /* Clear any pending IPI - we're an offline thread */
        ld      r5, HSTATE_XICS_PHYS(r13)
@@ -241,7 +245,6 @@ kvm_start_guest:
        /* increment the nap count and then go to nap mode */
        ld      r4, HSTATE_KVM_VCORE(r13)
        addi    r4, r4, VCORE_NAP_COUNT
-       lwsync                          /* make previous updates visible */
 51:    lwarx   r3, 0, r4
        addi    r3, r3, 1
        stwcx.  r3, 0, r4
@@ -751,15 +754,14 @@ kvmppc_interrupt_hv:
         * guest CR, R12 saved in shadow VCPU SCRATCH1/0
         * guest R13 saved in SPRN_SCRATCH0
         */
-       /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
-       std     r9, HSTATE_HOST_R2(r13)
+       std     r9, HSTATE_SCRATCH2(r13)
 
        lbz     r9, HSTATE_IN_GUEST(r13)
        cmpwi   r9, KVM_GUEST_MODE_HOST_HV
        beq     kvmppc_bad_host_intr
 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
        cmpwi   r9, KVM_GUEST_MODE_GUEST
-       ld      r9, HSTATE_HOST_R2(r13)
+       ld      r9, HSTATE_SCRATCH2(r13)
        beq     kvmppc_interrupt_pr
 #endif
        /* We're now back in the host but in guest MMU context */
@@ -779,7 +781,7 @@ kvmppc_interrupt_hv:
        std     r6, VCPU_GPR(R6)(r9)
        std     r7, VCPU_GPR(R7)(r9)
        std     r8, VCPU_GPR(R8)(r9)
-       ld      r0, HSTATE_HOST_R2(r13)
+       ld      r0, HSTATE_SCRATCH2(r13)
        std     r0, VCPU_GPR(R9)(r9)
        std     r10, VCPU_GPR(R10)(r9)
        std     r11, VCPU_GPR(R11)(r9)
@@ -990,14 +992,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
         */
        /* Increment the threads-exiting-guest count in the 0xff00
           bits of vcore->entry_exit_count */
-       lwsync
        ld      r5,HSTATE_KVM_VCORE(r13)
        addi    r6,r5,VCORE_ENTRY_EXIT
 41:    lwarx   r3,0,r6
        addi    r0,r3,0x100
        stwcx.  r0,0,r6
        bne     41b
-       lwsync
+       isync           /* order stwcx. vs. reading napping_threads */
 
        /*
         * At this point we have an interrupt that we have to pass
@@ -1030,6 +1031,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
        sld     r0,r0,r4
        andc.   r3,r3,r0                /* no sense IPI'ing ourselves */
        beq     43f
+       /* Order entry/exit update vs. IPIs */
+       sync
        mulli   r4,r4,PACA_SIZE         /* get paca for thread 0 */
        subf    r6,r4,r13
 42:    andi.   r0,r3,1
@@ -1638,10 +1641,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
        bge     kvm_cede_exit
        stwcx.  r4,0,r6
        bne     31b
+       /* order napping_threads update vs testing entry_exit_count */
+       isync
        li      r0,1
        stb     r0,HSTATE_NAPPING(r13)
-       /* order napping_threads update vs testing entry_exit_count */
-       lwsync
        mr      r4,r3
        lwz     r7,VCORE_ENTRY_EXIT(r5)
        cmpwi   r7,0x100
index f4dd041..f779450 100644 (file)
@@ -129,29 +129,32 @@ kvm_start_lightweight:
         * R12      = exit handler id
         * R13      = PACA
         * SVCPU.*  = guest *
+        * MSR.EE   = 1
         *
         */
 
+       PPC_LL  r3, GPR4(r1)            /* vcpu pointer */
+
+       /*
+        * kvmppc_copy_from_svcpu can clobber volatile registers, save
+        * the exit handler id to the vcpu and restore it from there later.
+        */
+       stw     r12, VCPU_TRAP(r3)
+
        /* Transfer reg values from shadow vcpu back to vcpu struct */
        /* On 64-bit, interrupts are still off at this point */
-       PPC_LL  r3, GPR4(r1)            /* vcpu pointer */
+
        GET_SHADOW_VCPU(r4)
        bl      FUNC(kvmppc_copy_from_svcpu)
        nop
 
 #ifdef CONFIG_PPC_BOOK3S_64
-       /* Re-enable interrupts */
-       ld      r3, HSTATE_HOST_MSR(r13)
-       ori     r3, r3, MSR_EE
-       MTMSR_EERI(r3)
-
        /*
         * Reload kernel SPRG3 value.
         * No need to save guest value as usermode can't modify SPRG3.
         */
        ld      r3, PACA_SPRG3(r13)
        mtspr   SPRN_SPRG3, r3
-
 #endif /* CONFIG_PPC_BOOK3S_64 */
 
        /* R7 = vcpu */
@@ -177,7 +180,7 @@ kvm_start_lightweight:
        PPC_STL r31, VCPU_GPR(R31)(r7)
 
        /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
-       mr      r5, r12
+       lwz     r5, VCPU_TRAP(r7)
 
        /* Restore r3 (kvm_run) and r4 (vcpu) */
        REST_2GPRS(3, r1)
index fe14ca3..5b9e906 100644 (file)
@@ -66,6 +66,7 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
        memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
        svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
+       svcpu->in_use = 0;
        svcpu_put(svcpu);
 #endif
        vcpu->cpu = smp_processor_id();
@@ -78,6 +79,9 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
 {
 #ifdef CONFIG_PPC_BOOK3S_64
        struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+       if (svcpu->in_use) {
+               kvmppc_copy_from_svcpu(vcpu, svcpu);
+       }
        memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
        to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
        svcpu_put(svcpu);
@@ -110,12 +114,26 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
        svcpu->ctr = vcpu->arch.ctr;
        svcpu->lr  = vcpu->arch.lr;
        svcpu->pc  = vcpu->arch.pc;
+       svcpu->in_use = true;
 }
 
 /* Copy data touched by real-mode code from shadow vcpu back to vcpu */
 void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
                            struct kvmppc_book3s_shadow_vcpu *svcpu)
 {
+       /*
+        * vcpu_put would just call us again because in_use hasn't
+        * been updated yet.
+        */
+       preempt_disable();
+
+       /*
+        * Maybe we were already preempted and synced the svcpu from
+        * our preempt notifiers. Don't bother touching this svcpu then.
+        */
+       if (!svcpu->in_use)
+               goto out;
+
        vcpu->arch.gpr[0] = svcpu->gpr[0];
        vcpu->arch.gpr[1] = svcpu->gpr[1];
        vcpu->arch.gpr[2] = svcpu->gpr[2];
@@ -139,6 +157,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
        vcpu->arch.fault_dar   = svcpu->fault_dar;
        vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
        vcpu->arch.last_inst   = svcpu->last_inst;
+       svcpu->in_use = false;
+
+out:
+       preempt_enable();
 }
 
 static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
index a38c4c9..c3c5231 100644 (file)
@@ -153,15 +153,11 @@ _GLOBAL(kvmppc_entry_trampoline)
 
        li      r6, MSR_IR | MSR_DR
        andc    r6, r5, r6      /* Clear DR and IR in MSR value */
-#ifdef CONFIG_PPC_BOOK3S_32
        /*
         * Set EE in HOST_MSR so that it's enabled when we get into our
-        * C exit handler function.  On 64-bit we delay enabling
-        * interrupts until we have finished transferring stuff
-        * to or from the PACA.
+        * C exit handler function.
         */
        ori     r5, r5, MSR_EE
-#endif
        mtsrr0  r7
        mtsrr1  r6
        RFI
index 53e65a2..0591e05 100644 (file)
@@ -681,7 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 {
        int ret, s;
-       struct thread_struct thread;
+       struct debug_reg debug;
 #ifdef CONFIG_PPC_FPU
        struct thread_fp_state fp;
        int fpexc_mode;
@@ -723,9 +723,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 #endif
 
        /* Switch to guest debug context */
-       thread.debug = vcpu->arch.shadow_dbg_reg;
-       switch_booke_debug_regs(&thread);
-       thread.debug = current->thread.debug;
+       debug = vcpu->arch.shadow_dbg_reg;
+       switch_booke_debug_regs(&debug);
+       debug = current->thread.debug;
        current->thread.debug = vcpu->arch.shadow_dbg_reg;
 
        kvmppc_fix_ee_before_entry();
@@ -736,8 +736,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
           We also get here with interrupts enabled. */
 
        /* Switch back to user space debug context */
-       switch_booke_debug_regs(&thread);
-       current->thread.debug = thread.debug;
+       switch_booke_debug_regs(&debug);
+       current->thread.debug = debug;
 
 #ifdef CONFIG_PPC_FPU
        kvmppc_save_guest_fp(vcpu);
index d73a590..596a285 100644 (file)
@@ -9,6 +9,14 @@
 #include <asm/processor.h>
 #include <asm/ppc_asm.h>
 
+#ifdef __BIG_ENDIAN__
+#define sLd sld                /* Shift towards low-numbered address. */
+#define sHd srd                /* Shift towards high-numbered address. */
+#else
+#define sLd srd                /* Shift towards low-numbered address. */
+#define sHd sld                /* Shift towards high-numbered address. */
+#endif
+
        .align  7
 _GLOBAL(__copy_tofrom_user)
 BEGIN_FTR_SECTION
@@ -118,10 +126,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
 
 24:    ld      r9,0(r4)        /* 3+2n loads, 2+2n stores */
 25:    ld      r0,8(r4)
-       sld     r6,r9,r10
+       sLd     r6,r9,r10
 26:    ldu     r9,16(r4)
-       srd     r7,r0,r11
-       sld     r8,r0,r10
+       sHd     r7,r0,r11
+       sLd     r8,r0,r10
        or      r7,r7,r6
        blt     cr6,79f
 27:    ld      r0,8(r4)
@@ -129,35 +137,35 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
 
 28:    ld      r0,0(r4)        /* 4+2n loads, 3+2n stores */
 29:    ldu     r9,8(r4)
-       sld     r8,r0,r10
+       sLd     r8,r0,r10
        addi    r3,r3,-8
        blt     cr6,5f
 30:    ld      r0,8(r4)
-       srd     r12,r9,r11
-       sld     r6,r9,r10
+       sHd     r12,r9,r11
+       sLd     r6,r9,r10
 31:    ldu     r9,16(r4)
        or      r12,r8,r12
-       srd     r7,r0,r11
-       sld     r8,r0,r10
+       sHd     r7,r0,r11
+       sLd     r8,r0,r10
        addi    r3,r3,16
        beq     cr6,78f
 
 1:     or      r7,r7,r6
 32:    ld      r0,8(r4)
 76:    std     r12,8(r3)
-2:     srd     r12,r9,r11
-       sld     r6,r9,r10
+2:     sHd     r12,r9,r11
+       sLd     r6,r9,r10
 33:    ldu     r9,16(r4)
        or      r12,r8,r12
 77:    stdu    r7,16(r3)
-       srd     r7,r0,r11
-       sld     r8,r0,r10
+       sHd     r7,r0,r11
+       sLd     r8,r0,r10
        bdnz    1b
 
 78:    std     r12,8(r3)
        or      r7,r7,r6
 79:    std     r7,16(r3)
-5:     srd     r12,r9,r11
+5:     sHd     r12,r9,r11
        or      r12,r8,r12
 80:    std     r12,24(r3)
        bne     6f
@@ -165,23 +173,38 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
        blr
 6:     cmpwi   cr1,r5,8
        addi    r3,r3,32
-       sld     r9,r9,r10
+       sLd     r9,r9,r10
        ble     cr1,7f
 34:    ld      r0,8(r4)
-       srd     r7,r0,r11
+       sHd     r7,r0,r11
        or      r9,r7,r9
 7:
        bf      cr7*4+1,1f
+#ifdef __BIG_ENDIAN__
        rotldi  r9,r9,32
+#endif
 94:    stw     r9,0(r3)
+#ifdef __LITTLE_ENDIAN__
+       rotrdi  r9,r9,32
+#endif
        addi    r3,r3,4
 1:     bf      cr7*4+2,2f
+#ifdef __BIG_ENDIAN__
        rotldi  r9,r9,16
+#endif
 95:    sth     r9,0(r3)
+#ifdef __LITTLE_ENDIAN__
+       rotrdi  r9,r9,16
+#endif
        addi    r3,r3,2
 2:     bf      cr7*4+3,3f
+#ifdef __BIG_ENDIAN__
        rotldi  r9,r9,8
+#endif
 96:    stb     r9,0(r3)
+#ifdef __LITTLE_ENDIAN__
+       rotrdi  r9,r9,8
+#endif
 3:     li      r3,0
        blr
 
index 02245ce..d7ddcee 100644 (file)
@@ -36,7 +36,6 @@
 #include "powernv.h"
 #include "pci.h"
 
-static char *hub_diag = NULL;
 static int ioda_eeh_nb_init = 0;
 
 static int ioda_eeh_event(struct notifier_block *nb,
@@ -140,15 +139,6 @@ static int ioda_eeh_post_init(struct pci_controller *hose)
                ioda_eeh_nb_init = 1;
        }
 
-       /* We needn't HUB diag-data on PHB3 */
-       if (phb->type == PNV_PHB_IODA1 && !hub_diag) {
-               hub_diag = (char *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
-               if (!hub_diag) {
-                       pr_err("%s: Out of memory !\n", __func__);
-                       return -ENOMEM;
-               }
-       }
-
 #ifdef CONFIG_DEBUG_FS
        if (phb->dbgfs) {
                debugfs_create_file("err_injct_outbound", 0600,
@@ -633,11 +623,10 @@ static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data)
 static void ioda_eeh_hub_diag(struct pci_controller *hose)
 {
        struct pnv_phb *phb = hose->private_data;
-       struct OpalIoP7IOCErrorData *data;
+       struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
        long rc;
 
-       data = (struct OpalIoP7IOCErrorData *)ioda_eeh_hub_diag;
-       rc = opal_pci_get_hub_diag_data(phb->hub_id, data, PAGE_SIZE);
+       rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
        if (rc != OPAL_SUCCESS) {
                pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n",
                           __func__, phb->hub_id, rc);
@@ -820,14 +809,15 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose)
        struct OpalIoPhbErrorCommon *common;
        long rc;
 
-       common = (struct OpalIoPhbErrorCommon *)phb->diag.blob;
-       rc = opal_pci_get_phb_diag_data2(phb->opal_id, common, PAGE_SIZE);
+       rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
+                                        PNV_PCI_DIAG_BUF_SIZE);
        if (rc != OPAL_SUCCESS) {
                pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
                            __func__, hose->global_number, rc);
                return;
        }
 
+       common = (struct OpalIoPhbErrorCommon *)phb->diag.blob;
        switch (common->ioType) {
        case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
                ioda_eeh_p7ioc_phb_diag(hose, common);
index e7e59e4..79d83ca 100644 (file)
@@ -24,25 +24,25 @@ static int opal_lpc_chip_id = -1;
 static u8 opal_lpc_inb(unsigned long port)
 {
        int64_t rc;
-       uint32_t data;
+       __be32 data;
 
        if (opal_lpc_chip_id < 0 || port > 0xffff)
                return 0xff;
        rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1);
-       return rc ? 0xff : data;
+       return rc ? 0xff : be32_to_cpu(data);
 }
 
 static __le16 __opal_lpc_inw(unsigned long port)
 {
        int64_t rc;
-       uint32_t data;
+       __be32 data;
 
        if (opal_lpc_chip_id < 0 || port > 0xfffe)
                return 0xffff;
        if (port & 1)
                return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1);
        rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2);
-       return rc ? 0xffff : data;
+       return rc ? 0xffff : be32_to_cpu(data);
 }
 static u16 opal_lpc_inw(unsigned long port)
 {
@@ -52,7 +52,7 @@ static u16 opal_lpc_inw(unsigned long port)
 static __le32 __opal_lpc_inl(unsigned long port)
 {
        int64_t rc;
-       uint32_t data;
+       __be32 data;
 
        if (opal_lpc_chip_id < 0 || port > 0xfffc)
                return 0xffffffff;
@@ -62,7 +62,7 @@ static __le32 __opal_lpc_inl(unsigned long port)
                       (__le32)opal_lpc_inb(port + 2) <<  8 |
                               opal_lpc_inb(port + 3);
        rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4);
-       return rc ? 0xffffffff : data;
+       return rc ? 0xffffffff : be32_to_cpu(data);
 }
 
 static u32 opal_lpc_inl(unsigned long port)
index 4d99a8f..4fbf276 100644 (file)
@@ -96,9 +96,11 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value)
 {
        struct opal_scom_map *m = map;
        int64_t rc;
+       __be64 v;
 
        reg = opal_scom_unmangle(reg);
-       rc = opal_xscom_read(m->chip, m->addr + reg, (uint64_t *)__pa(value));
+       rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v));
+       *value = be64_to_cpu(v);
        return opal_xscom_err_xlate(rc);
 }
 
index 911c24e..1ed8d5f 100644 (file)
@@ -172,11 +172,13 @@ struct pnv_phb {
                } ioda;
        };
 
-       /* PHB status structure */
+       /* PHB and hub status structure */
        union {
                unsigned char                   blob[PNV_PCI_DIAG_BUF_SIZE];
                struct OpalIoP7IOCPhbErrorData  p7ioc;
+               struct OpalIoP7IOCErrorData     hub_diag;
        } diag;
+
 };
 
 extern struct pci_ops pnv_pci_ops;
index e738007..c9fecf0 100644 (file)
@@ -157,7 +157,7 @@ static void parse_ppp_data(struct seq_file *m)
 {
        struct hvcall_ppp_data ppp_data;
        struct device_node *root;
-       const int *perf_level;
+       const __be32 *perf_level;
        int rc;
 
        rc = h_get_ppp(&ppp_data);
@@ -201,7 +201,7 @@ static void parse_ppp_data(struct seq_file *m)
                perf_level = of_get_property(root,
                                "ibm,partition-performance-parameters-level",
                                             NULL);
-               if (perf_level && (*perf_level >= 1)) {
+               if (perf_level && (be32_to_cpup(perf_level) >= 1)) {
                        seq_printf(m,
                            "physical_procs_allocated_to_virtualization=%d\n",
                                   ppp_data.phys_platform_procs);
@@ -435,7 +435,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
        int partition_potential_processors;
        int partition_active_processors;
        struct device_node *rtas_node;
-       const int *lrdrp = NULL;
+       const __be32 *lrdrp = NULL;
 
        rtas_node = of_find_node_by_path("/rtas");
        if (rtas_node)
@@ -444,7 +444,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
        if (lrdrp == NULL) {
                partition_potential_processors = vdso_data->processorCount;
        } else {
-               partition_potential_processors = *(lrdrp + 4);
+               partition_potential_processors = be32_to_cpup(lrdrp + 4);
        }
        of_node_put(rtas_node);
 
@@ -654,7 +654,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
        const char *model = "";
        const char *system_id = "";
        const char *tmp;
-       const unsigned int *lp_index_ptr;
+       const __be32 *lp_index_ptr;
        unsigned int lp_index = 0;
 
        seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS);
@@ -670,7 +670,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
                lp_index_ptr = of_get_property(rootdn, "ibm,partition-no",
                                        NULL);
                if (lp_index_ptr)
-                       lp_index = *lp_index_ptr;
+                       lp_index = be32_to_cpup(lp_index_ptr);
                of_node_put(rootdn);
        }
        seq_printf(m, "serial_number=%s\n", system_id);
index 6d2f0ab..0c882e8 100644 (file)
@@ -130,7 +130,8 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
 {
        struct device_node *dn;
        struct pci_dn *pdn;
-       const u32 *req_msi;
+       const __be32 *p;
+       u32 req_msi;
 
        pdn = pci_get_pdn(pdev);
        if (!pdn)
@@ -138,19 +139,20 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
 
        dn = pdn->node;
 
-       req_msi = of_get_property(dn, prop_name, NULL);
-       if (!req_msi) {
+       p = of_get_property(dn, prop_name, NULL);
+       if (!p) {
                pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name);
                return -ENOENT;
        }
 
-       if (*req_msi < nvec) {
+       req_msi = be32_to_cpup(p);
+       if (req_msi < nvec) {
                pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec);
 
-               if (*req_msi == 0) /* Be paranoid */
+               if (req_msi == 0) /* Be paranoid */
                        return -ENOSPC;
 
-               return *req_msi;
+               return req_msi;
        }
 
        return 0;
@@ -171,7 +173,7 @@ static int check_req_msix(struct pci_dev *pdev, int nvec)
 static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
 {
        struct device_node *dn;
-       const u32 *p;
+       const __be32 *p;
 
        dn = of_node_get(pci_device_to_OF_node(dev));
        while (dn) {
@@ -179,7 +181,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
                if (p) {
                        pr_debug("rtas_msi: found prop on dn %s\n",
                                dn->full_name);
-                       *total = *p;
+                       *total = be32_to_cpup(p);
                        return dn;
                }
 
@@ -232,13 +234,13 @@ struct msi_counts {
 static void *count_non_bridge_devices(struct device_node *dn, void *data)
 {
        struct msi_counts *counts = data;
-       const u32 *p;
+       const __be32 *p;
        u32 class;
 
        pr_debug("rtas_msi: counting %s\n", dn->full_name);
 
        p = of_get_property(dn, "class-code", NULL);
-       class = p ? *p : 0;
+       class = p ? be32_to_cpup(p) : 0;
 
        if ((class >> 8) != PCI_CLASS_BRIDGE_PCI)
                counts->num_devices++;
@@ -249,7 +251,7 @@ static void *count_non_bridge_devices(struct device_node *dn, void *data)
 static void *count_spare_msis(struct device_node *dn, void *data)
 {
        struct msi_counts *counts = data;
-       const u32 *p;
+       const __be32 *p;
        int req;
 
        if (dn == counts->requestor)
@@ -260,11 +262,11 @@ static void *count_spare_msis(struct device_node *dn, void *data)
                req = 0;
                p = of_get_property(dn, "ibm,req#msi", NULL);
                if (p)
-                       req = *p;
+                       req = be32_to_cpup(p);
 
                p = of_get_property(dn, "ibm,req#msi-x", NULL);
                if (p)
-                       req = max(req, (int)*p);
+                       req = max(req, (int)be32_to_cpup(p));
        }
 
        if (req < counts->quota)
index 7bfaf58..d7096f2 100644 (file)
@@ -43,8 +43,8 @@ static char nvram_buf[NVRW_CNT];      /* assume this is in the first 4GB */
 static DEFINE_SPINLOCK(nvram_lock);
 
 struct err_log_info {
-       int error_type;
-       unsigned int seq_num;
+       __be32 error_type;
+       __be32 seq_num;
 };
 
 struct nvram_os_partition {
@@ -79,9 +79,9 @@ static const char *pseries_nvram_os_partitions[] = {
 };
 
 struct oops_log_info {
-       u16 version;
-       u16 report_length;
-       u64 timestamp;
+       __be16 version;
+       __be16 report_length;
+       __be64 timestamp;
 } __attribute__((packed));
 
 static void oops_to_nvram(struct kmsg_dumper *dumper,
@@ -291,8 +291,8 @@ int nvram_write_os_partition(struct nvram_os_partition *part, char * buff,
                length = part->size;
        }
 
-       info.error_type = err_type;
-       info.seq_num = error_log_cnt;
+       info.error_type = cpu_to_be32(err_type);
+       info.seq_num = cpu_to_be32(error_log_cnt);
 
        tmp_index = part->index;
 
@@ -364,8 +364,8 @@ int nvram_read_partition(struct nvram_os_partition *part, char *buff,
        }
 
        if (part->os_partition) {
-               *error_log_cnt = info.seq_num;
-               *err_type = info.error_type;
+               *error_log_cnt = be32_to_cpu(info.seq_num);
+               *err_type = be32_to_cpu(info.error_type);
        }
 
        return 0;
@@ -529,9 +529,9 @@ static int zip_oops(size_t text_len)
                pr_err("nvram: logging uncompressed oops/panic report\n");
                return -1;
        }
-       oops_hdr->version = OOPS_HDR_VERSION;
-       oops_hdr->report_length = (u16) zipped_len;
-       oops_hdr->timestamp = get_seconds();
+       oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
+       oops_hdr->report_length = cpu_to_be16(zipped_len);
+       oops_hdr->timestamp = cpu_to_be64(get_seconds());
        return 0;
 }
 
@@ -574,9 +574,9 @@ static int nvram_pstore_write(enum pstore_type_id type,
                                clobbering_unread_rtas_event())
                return -1;
 
-       oops_hdr->version = OOPS_HDR_VERSION;
-       oops_hdr->report_length = (u16) size;
-       oops_hdr->timestamp = get_seconds();
+       oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
+       oops_hdr->report_length = cpu_to_be16(size);
+       oops_hdr->timestamp = cpu_to_be64(get_seconds());
 
        if (compressed)
                err_type = ERR_TYPE_KERNEL_PANIC_GZ;
@@ -670,16 +670,16 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
                size_t length, hdr_size;
 
                oops_hdr = (struct oops_log_info *)buff;
-               if (oops_hdr->version < OOPS_HDR_VERSION) {
+               if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) {
                        /* Old format oops header had 2-byte record size */
                        hdr_size = sizeof(u16);
-                       length = oops_hdr->version;
+                       length = be16_to_cpu(oops_hdr->version);
                        time->tv_sec = 0;
                        time->tv_nsec = 0;
                } else {
                        hdr_size = sizeof(*oops_hdr);
-                       length = oops_hdr->report_length;
-                       time->tv_sec = oops_hdr->timestamp;
+                       length = be16_to_cpu(oops_hdr->report_length);
+                       time->tv_sec = be64_to_cpu(oops_hdr->timestamp);
                        time->tv_nsec = 0;
                }
                *buf = kmalloc(length, GFP_KERNEL);
@@ -889,13 +889,13 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
                kmsg_dump_get_buffer(dumper, false,
                                     oops_data, oops_data_sz, &text_len);
                err_type = ERR_TYPE_KERNEL_PANIC;
-               oops_hdr->version = OOPS_HDR_VERSION;
-               oops_hdr->report_length = (u16) text_len;
-               oops_hdr->timestamp = get_seconds();
+               oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
+               oops_hdr->report_length = cpu_to_be16(text_len);
+               oops_hdr->timestamp = cpu_to_be64(get_seconds());
        }
 
        (void) nvram_write_os_partition(&oops_log_partition, oops_buf,
-               (int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type,
+               (int) (sizeof(*oops_hdr) + text_len), err_type,
                ++oops_count);
 
        spin_unlock_irqrestore(&lock, flags);
index 5f93856..70670a2 100644 (file)
@@ -113,7 +113,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
 {
        struct device_node *dn, *pdn;
        struct pci_bus *bus;
-       const uint32_t *pcie_link_speed_stats;
+       const __be32 *pcie_link_speed_stats;
 
        bus = bridge->bus;
 
@@ -122,7 +122,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
                return 0;
 
        for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) {
-               pcie_link_speed_stats = (const uint32_t *) of_get_property(pdn,
+               pcie_link_speed_stats = of_get_property(pdn,
                        "ibm,pcie-link-speed-stats", NULL);
                if (pcie_link_speed_stats)
                        break;
@@ -135,7 +135,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
                return 0;
        }
 
-       switch (pcie_link_speed_stats[0]) {
+       switch (be32_to_cpup(pcie_link_speed_stats)) {
        case 0x01:
                bus->max_bus_speed = PCIE_SPEED_2_5GT;
                break;
@@ -147,7 +147,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
                break;
        }
 
-       switch (pcie_link_speed_stats[1]) {
+       switch (be32_to_cpup(pcie_link_speed_stats)) {
        case 0x01:
                bus->cur_bus_speed = PCIE_SPEED_2_5GT;
                break;
index 7b95f29..3baff31 100644 (file)
@@ -6,7 +6,7 @@ lib-y  = delay.o memmove.o memchr.o \
         checksum.o strlen.o div64.o div64-generic.o
 
 # Extracted from libgcc
-lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
+obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
         ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \
         udiv_qrnnd.o
 
index 8358dc1..0f9e945 100644 (file)
@@ -619,7 +619,7 @@ static inline unsigned long pte_present(pte_t pte)
 }
 
 #define pte_accessible pte_accessible
-static inline unsigned long pte_accessible(pte_t a)
+static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
 {
        return pte_val(a) & _PAGE_VALID;
 }
@@ -847,7 +847,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
         * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
         *             and SUN4V pte layout, so this inline test is fine.
         */
-       if (likely(mm != &init_mm) && pte_accessible(orig))
+       if (likely(mm != &init_mm) && pte_accessible(mm, orig))
                tlb_batch_add(mm, addr, ptep, orig, fullmm);
 }
 
index e903c71..0952ecd 100644 (file)
@@ -26,6 +26,7 @@ config X86
        select HAVE_AOUT if X86_32
        select HAVE_UNSTABLE_SCHED_CLOCK
        select ARCH_SUPPORTS_NUMA_BALANCING
+       select ARCH_SUPPORTS_INT128 if X86_64
        select ARCH_WANTS_PROT_NUMA_PROT_NONE
        select HAVE_IDE
        select HAVE_OPROFILE
index 3d19994..bbc8b12 100644 (file)
@@ -452,9 +452,16 @@ static inline int pte_present(pte_t a)
 }
 
 #define pte_accessible pte_accessible
-static inline int pte_accessible(pte_t a)
+static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
 {
-       return pte_flags(a) & _PAGE_PRESENT;
+       if (pte_flags(a) & _PAGE_PRESENT)
+               return true;
+
+       if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) &&
+                       mm_tlb_flush_pending(mm))
+               return true;
+
+       return false;
 }
 
 static inline int pte_hidden(pte_t pte)
index 8729723..c8b0519 100644 (file)
@@ -7,6 +7,12 @@
 
 DECLARE_PER_CPU(int, __preempt_count);
 
+/*
+ * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
+ * that a decrement hitting 0 means we can and should reschedule.
+ */
+#define PREEMPT_ENABLED        (0 + PREEMPT_NEED_RESCHED)
+
 /*
  * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
  * that think a non-zero value indicates we cannot preempt.
@@ -74,6 +80,11 @@ static __always_inline void __preempt_count_sub(int val)
        __this_cpu_add_4(__preempt_count, -val);
 }
 
+/*
+ * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
+ * a decrement which hits zero means we have no preempt_count and should
+ * reschedule.
+ */
 static __always_inline bool __preempt_count_dec_and_test(void)
 {
        GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
index dc1ec0d..ea04b34 100644 (file)
@@ -387,7 +387,8 @@ static void init_intel(struct cpuinfo_x86 *c)
                        set_cpu_cap(c, X86_FEATURE_PEBS);
        }
 
-       if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
+       if (c->x86 == 6 && cpu_has_clflush &&
+           (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
                set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
 
 #ifdef CONFIG_X86_64
index fd00bb2..c1a8618 100644 (file)
@@ -262,11 +262,20 @@ struct cpu_hw_events {
        __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
                          HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
 
-#define EVENT_CONSTRAINT_END           \
-       EVENT_CONSTRAINT(0, 0, 0)
+/*
+ * We define the end marker as having a weight of -1
+ * to enable blacklisting of events using a counter bitmask
+ * of zero and thus a weight of zero.
+ * The end marker has a weight that cannot possibly be
+ * obtained from counting the bits in the bitmask.
+ */
+#define EVENT_CONSTRAINT_END { .weight = -1 }
 
+/*
+ * Check for end marker with weight == -1
+ */
 #define for_each_event_constraint(e, c)        \
-       for ((e) = (c); (e)->weight; (e)++)
+       for ((e) = (c); (e)->weight != -1; (e)++)
 
 /*
  * Extra registers for specific events.
index dd74e46..0596e8e 100644 (file)
@@ -83,6 +83,12 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
                pte_t pte = gup_get_pte(ptep);
                struct page *page;
 
+               /* Similar to the PMD case, NUMA hinting must take slow path */
+               if (pte_numa(pte)) {
+                       pte_unmap(ptep);
+                       return 0;
+               }
+
                if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) {
                        pte_unmap(ptep);
                        return 0;
@@ -167,6 +173,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
                if (pmd_none(pmd) || pmd_trans_splitting(pmd))
                        return 0;
                if (unlikely(pmd_large(pmd))) {
+                       /*
+                        * NUMA hinting faults need to be handled in the GUP
+                        * slowpath for accounting purposes and so that they
+                        * can be serialised against THP migration.
+                        */
+                       if (pmd_numa(pmd))
+                               return 0;
                        if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
                                return 0;
                } else {
index ba6cf8e..b91ce75 100644 (file)
@@ -335,9 +335,22 @@ static struct kobj_type blk_mq_hw_ktype = {
 void blk_mq_unregister_disk(struct gendisk *disk)
 {
        struct request_queue *q = disk->queue;
+       struct blk_mq_hw_ctx *hctx;
+       struct blk_mq_ctx *ctx;
+       int i, j;
+
+       queue_for_each_hw_ctx(q, hctx, i) {
+               hctx_for_each_ctx(hctx, ctx, j) {
+                       kobject_del(&ctx->kobj);
+                       kobject_put(&ctx->kobj);
+               }
+               kobject_del(&hctx->kobj);
+               kobject_put(&hctx->kobj);
+       }
 
        kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
        kobject_del(&q->mq_kobj);
+       kobject_put(&q->mq_kobj);
 
        kobject_put(&disk_to_dev(disk)->kobj);
 }
index 5d92485..4770de5 100644 (file)
@@ -348,7 +348,6 @@ source "drivers/acpi/apei/Kconfig"
 config ACPI_EXTLOG
        tristate "Extended Error Log support"
        depends on X86_MCE && X86_LOCAL_APIC
-       select EFI
        select UEFI_CPER
        default n
        help
index 6745fe1..e603905 100644 (file)
@@ -162,6 +162,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
        { "80860F14", (unsigned long)&byt_sdio_dev_desc },
        { "80860F41", (unsigned long)&byt_i2c_dev_desc },
        { "INT33B2", },
+       { "INT33FC", },
 
        { "INT3430", (unsigned long)&lpt_dev_desc },
        { "INT3431", (unsigned long)&lpt_dev_desc },
index 786294b..3650b21 100644 (file)
@@ -2,7 +2,6 @@ config ACPI_APEI
        bool "ACPI Platform Error Interface (APEI)"
        select MISC_FILESYSTEMS
        select PSTORE
-       select EFI
        select UEFI_CPER
        depends on X86
        help
index 26311f2..cb1d557 100644 (file)
@@ -942,6 +942,7 @@ static int erst_clearer(enum pstore_type_id type, u64 id, int count,
 static struct pstore_info erst_info = {
        .owner          = THIS_MODULE,
        .name           = "erst",
+       .flags          = PSTORE_FLAGS_FRAGILE,
        .open           = erst_open_pstore,
        .close          = erst_close_pstore,
        .read           = erst_reader,
index 14f1e95..c0ed4f2 100644 (file)
@@ -1238,15 +1238,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (rc)
                return rc;
 
-       /* AHCI controllers often implement SFF compatible interface.
-        * Grab all PCI BARs just in case.
-        */
-       rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
-       if (rc == -EBUSY)
-               pcim_pin_device(pdev);
-       if (rc)
-               return rc;
-
        if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
            (pdev->device == 0x2652 || pdev->device == 0x2653)) {
                u8 map;
@@ -1263,6 +1254,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                }
        }
 
+       /* AHCI controllers often implement SFF compatible interface.
+        * Grab all PCI BARs just in case.
+        */
+       rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
+       if (rc == -EBUSY)
+               pcim_pin_device(pdev);
+       if (rc)
+               return rc;
+
        hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
        if (!hpriv)
                return -ENOMEM;
index ae2d73f..3e23e99 100644 (file)
@@ -113,7 +113,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
        /*
         * set PHY Paremeters, two steps to configure the GPR13,
         * one write for rest of parameters, mask of first write
-        * is 0x07fffffd, and the other one write for setting
+        * is 0x07ffffff, and the other one write for setting
         * the mpll_clk_en.
         */
        regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK
@@ -124,6 +124,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
                        | IMX6Q_GPR13_SATA_TX_ATTEN_MASK
                        | IMX6Q_GPR13_SATA_TX_BOOST_MASK
                        | IMX6Q_GPR13_SATA_TX_LVL_MASK
+                       | IMX6Q_GPR13_SATA_MPLL_CLK_EN
                        | IMX6Q_GPR13_SATA_TX_EDGE_RATE
                        , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB
                        | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M
index 75b9367..1393a58 100644 (file)
@@ -2149,9 +2149,16 @@ static int ata_dev_config_ncq(struct ata_device *dev,
                                    "failed to get NCQ Send/Recv Log Emask 0x%x\n",
                                    err_mask);
                } else {
+                       u8 *cmds = dev->ncq_send_recv_cmds;
+
                        dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
-                       memcpy(dev->ncq_send_recv_cmds, ap->sector_buf,
-                               ATA_LOG_NCQ_SEND_RECV_SIZE);
+                       memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
+
+                       if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
+                               ata_dev_dbg(dev, "disabling queued TRIM support\n");
+                               cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
+                                       ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
+                       }
                }
        }
 
@@ -4156,6 +4163,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "ST3320[68]13AS",     "SD1[5-9]",     ATA_HORKAGE_NONCQ |
                                                ATA_HORKAGE_FIRMWARE_WARN },
 
+       /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
+       { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+
        /* Blacklist entries taken from Silicon Image 3124/3132
           Windows driver .inf file - also several Linux problem reports */
        { "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
@@ -4202,6 +4212,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "PIONEER DVD-RW  DVR-212D",   NULL,   ATA_HORKAGE_NOSETXFER },
        { "PIONEER DVD-RW  DVR-216D",   NULL,   ATA_HORKAGE_NOSETXFER },
 
+       /* devices that don't properly handle queued TRIM commands */
+       { "Micron_M500*",               NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
+       { "Crucial_CT???M500SSD1",      NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
+
        /* End Marker */
        { }
 };
@@ -6519,6 +6533,7 @@ static int __init ata_parse_force_one(char **cur,
                { "norst",      .lflags         = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
                { "rstonce",    .lflags         = ATA_LFLAG_RST_ONCE },
                { "atapi_dmadir", .horkage_on   = ATA_HORKAGE_ATAPI_DMADIR },
+               { "disable",    .horkage_on     = ATA_HORKAGE_DISABLE },
        };
        char *start = *cur, *p = *cur;
        char *id, *val, *endp;
index ab58556..377eb88 100644 (file)
@@ -3872,6 +3872,27 @@ void ata_scsi_hotplug(struct work_struct *work)
                return;
        }
 
+       /*
+        * XXX - UGLY HACK
+        *
+        * The block layer suspend/resume path is fundamentally broken due
+        * to freezable kthreads and workqueue and may deadlock if a block
+        * device gets removed while resume is in progress.  I don't know
+        * what the solution is short of removing freezable kthreads and
+        * workqueues altogether.
+        *
+        * The following is an ugly hack to avoid kicking off device
+        * removal while freezer is active.  This is a joke but does avoid
+        * this particular deadlock scenario.
+        *
+        * https://bugzilla.kernel.org/show_bug.cgi?id=62801
+        * http://marc.info/?l=linux-kernel&m=138695698516487
+        */
+#ifdef CONFIG_FREEZER
+       while (pm_freezing)
+               msleep(10);
+#endif
+
        DPRINTK("ENTER\n");
        mutex_lock(&ap->scsi_scan_mutex);
 
index f370fc1..a2e69d2 100644 (file)
@@ -1,4 +1,5 @@
 #include <linux/module.h>
+
 #include <linux/moduleparam.h>
 #include <linux/sched.h>
 #include <linux/fs.h>
@@ -65,7 +66,7 @@ enum {
        NULL_Q_MQ               = 2,
 };
 
-static int submit_queues = 1;
+static int submit_queues;
 module_param(submit_queues, int, S_IRUGO);
 MODULE_PARM_DESC(submit_queues, "Number of submission queues");
 
@@ -101,9 +102,9 @@ static int hw_queue_depth = 64;
 module_param(hw_queue_depth, int, S_IRUGO);
 MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
 
-static bool use_per_node_hctx = true;
+static bool use_per_node_hctx = false;
 module_param(use_per_node_hctx, bool, S_IRUGO);
-MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: true");
+MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
 
 static void put_tag(struct nullb_queue *nq, unsigned int tag)
 {
@@ -346,8 +347,37 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
 
 static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index)
 {
-       return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL,
-                               hctx_index);
+       int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes);
+       int tip = (reg->nr_hw_queues % nr_online_nodes);
+       int node = 0, i, n;
+
+       /*
+        * Split submit queues evenly wrt to the number of nodes. If uneven,
+        * fill the first buckets with one extra, until the rest is filled with
+        * no extra.
+        */
+       for (i = 0, n = 1; i < hctx_index; i++, n++) {
+               if (n % b_size == 0) {
+                       n = 0;
+                       node++;
+
+                       tip--;
+                       if (!tip)
+                               b_size = reg->nr_hw_queues / nr_online_nodes;
+               }
+       }
+
+       /*
+        * A node might not be online, therefore map the relative node id to the
+        * real node id.
+        */
+       for_each_online_node(n) {
+               if (!node)
+                       break;
+               node--;
+       }
+
+       return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n);
 }
 
 static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
@@ -355,16 +385,24 @@ static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
        kfree(hctx);
 }
 
+static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
+{
+       BUG_ON(!nullb);
+       BUG_ON(!nq);
+
+       init_waitqueue_head(&nq->wait);
+       nq->queue_depth = nullb->queue_depth;
+}
+
 static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
                          unsigned int index)
 {
        struct nullb *nullb = data;
        struct nullb_queue *nq = &nullb->queues[index];
 
-       init_waitqueue_head(&nq->wait);
-       nq->queue_depth = nullb->queue_depth;
-       nullb->nr_queues++;
        hctx->driver_data = nq;
+       null_init_queue(nullb, nq);
+       nullb->nr_queues++;
 
        return 0;
 }
@@ -417,13 +455,13 @@ static int setup_commands(struct nullb_queue *nq)
 
        nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
        if (!nq->cmds)
-               return 1;
+               return -ENOMEM;
 
        tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
        nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
        if (!nq->tag_map) {
                kfree(nq->cmds);
-               return 1;
+               return -ENOMEM;
        }
 
        for (i = 0; i < nq->queue_depth; i++) {
@@ -454,33 +492,37 @@ static void cleanup_queues(struct nullb *nullb)
 
 static int setup_queues(struct nullb *nullb)
 {
-       struct nullb_queue *nq;
-       int i;
-
-       nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL);
+       nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
+                                                               GFP_KERNEL);
        if (!nullb->queues)
-               return 1;
+               return -ENOMEM;
 
        nullb->nr_queues = 0;
        nullb->queue_depth = hw_queue_depth;
 
-       if (queue_mode == NULL_Q_MQ)
-               return 0;
+       return 0;
+}
+
+static int init_driver_queues(struct nullb *nullb)
+{
+       struct nullb_queue *nq;
+       int i, ret = 0;
 
        for (i = 0; i < submit_queues; i++) {
                nq = &nullb->queues[i];
-               init_waitqueue_head(&nq->wait);
-               nq->queue_depth = hw_queue_depth;
-               if (setup_commands(nq))
-                       break;
+
+               null_init_queue(nullb, nq);
+
+               ret = setup_commands(nq);
+               if (ret)
+                       goto err_queue;
                nullb->nr_queues++;
        }
 
-       if (i == submit_queues)
-               return 0;
-
+       return 0;
+err_queue:
        cleanup_queues(nullb);
-       return 1;
+       return ret;
 }
 
 static int null_add_dev(void)
@@ -518,11 +560,13 @@ static int null_add_dev(void)
        } else if (queue_mode == NULL_Q_BIO) {
                nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
                blk_queue_make_request(nullb->q, null_queue_bio);
+               init_driver_queues(nullb);
        } else {
                nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
                blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
                if (nullb->q)
                        blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
+               init_driver_queues(nullb);
        }
 
        if (!nullb->q)
@@ -579,7 +623,13 @@ static int __init null_init(void)
        }
 #endif
 
-       if (submit_queues > nr_cpu_ids)
+       if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
+               if (submit_queues < nr_online_nodes) {
+                       pr_warn("null_blk: submit_queues param is set to %u.",
+                                                       nr_online_nodes);
+                       submit_queues = nr_online_nodes;
+               }
+       } else if (submit_queues > nr_cpu_ids)
                submit_queues = nr_cpu_ids;
        else if (!submit_queues)
                submit_queues = 1;
index 9199c93..eb6e1e0 100644 (file)
@@ -5269,7 +5269,7 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state)
        }
 }
 
-const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
+static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
 {
        switch (state) {
        case SKD_MSG_STATE_IDLE:
@@ -5281,7 +5281,7 @@ const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
        }
 }
 
-const char *skd_skreq_state_to_str(enum skd_req_state state)
+static const char *skd_skreq_state_to_str(enum skd_req_state state)
 {
        switch (state) {
        case SKD_REQ_STATE_IDLE:
index 6bfc1bb..dceb85f 100644 (file)
@@ -87,6 +87,7 @@ static const struct usb_device_id ath3k_table[] = {
        { USB_DEVICE(0x0CF3, 0xE004) },
        { USB_DEVICE(0x0CF3, 0xE005) },
        { USB_DEVICE(0x0930, 0x0219) },
+       { USB_DEVICE(0x0930, 0x0220) },
        { USB_DEVICE(0x0489, 0xe057) },
        { USB_DEVICE(0x13d3, 0x3393) },
        { USB_DEVICE(0x0489, 0xe04e) },
@@ -129,6 +130,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
        { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
index c0ff34f..3980fd1 100644 (file)
@@ -154,6 +154,7 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
index 7be41e6..00a3abe 100644 (file)
@@ -60,7 +60,7 @@ static int s2mps11_clk_prepare(struct clk_hw *hw)
        struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
        int ret;
 
-       ret = regmap_update_bits(s2mps11->iodev->regmap,
+       ret = regmap_update_bits(s2mps11->iodev->regmap_pmic,
                                S2MPS11_REG_RTC_CTRL,
                                 s2mps11->mask, s2mps11->mask);
        if (!ret)
@@ -74,7 +74,7 @@ static void s2mps11_clk_unprepare(struct clk_hw *hw)
        struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
        int ret;
 
-       ret = regmap_update_bits(s2mps11->iodev->regmap, S2MPS11_REG_RTC_CTRL,
+       ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, S2MPS11_REG_RTC_CTRL,
                           s2mps11->mask, ~s2mps11->mask);
 
        if (!ret)
@@ -174,7 +174,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
                s2mps11_clk->hw.init = &s2mps11_clks_init[i];
                s2mps11_clk->mask = 1 << i;
 
-               ret = regmap_read(s2mps11_clk->iodev->regmap,
+               ret = regmap_read(s2mps11_clk->iodev->regmap_pmic,
                                  S2MPS11_REG_RTC_CTRL, &val);
                if (ret < 0)
                        goto err_reg;
index 5c07a56..634c4d6 100644 (file)
@@ -75,6 +75,7 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
 config CLKSRC_EFM32
        bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32
        depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
+       select CLKSRC_MMIO
        default ARCH_EFM32
        help
          Support to use the timers of EFM32 SoCs as clock source and clock
index 35639cf..b9ddd9e 100644 (file)
@@ -35,6 +35,5 @@ void __init clocksource_of_init(void)
 
                init_func = match->data;
                init_func(np);
-               of_node_put(np);
        }
 }
index 45ba8ae..2a2ea27 100644 (file)
@@ -108,12 +108,11 @@ static void __init add_clocksource(struct device_node *source_timer)
 
 static u64 read_sched_clock(void)
 {
-       return __raw_readl(sched_io_base);
+       return ~__raw_readl(sched_io_base);
 }
 
 static const struct of_device_id sptimer_ids[] __initconst = {
        { .compatible = "picochip,pc3x2-rtc" },
-       { .compatible = "snps,dw-apb-timer-sp" },
        { /* Sentinel */ },
 };
 
@@ -151,4 +150,6 @@ static void __init dw_apb_timer_init(struct device_node *timer)
        num_called++;
 }
 CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init);
-CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer-osc", dw_apb_timer_init);
+CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init);
+CLOCKSOURCE_OF_DECLARE(apb_timer_sp, "snps,dw-apb-timer-sp", dw_apb_timer_init);
+CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer", dw_apb_timer_init);
index 2fb4695..a4f6119 100644 (file)
@@ -179,6 +179,9 @@ static void __init sun4i_timer_init(struct device_node *node)
        writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M),
               timer_base + TIMER_CTL_REG(0));
 
+       /* Make sure timer is stopped before playing with interrupts */
+       sun4i_clkevt_time_stop(0);
+
        ret = setup_irq(irq, &sun4i_timer_irq);
        if (ret)
                pr_warn("failed to setup irq %d\n", irq);
index d8e47e5..4e7f680 100644 (file)
@@ -255,11 +255,6 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
 
        ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;
 
-       /*
-        * Set scale and timer for sched_clock.
-        */
-       sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
-
        /*
         * Setup free-running clocksource timer (interrupts
         * disabled).
@@ -270,6 +265,11 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
        timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN |
                             TIMER0_DIV(TIMER_DIVIDER_SHIFT));
 
+       /*
+        * Set scale and timer for sched_clock.
+        */
+       sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
+
        clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
                              "armada_370_xp_clocksource",
                              timer_clk, 300, 32, clocksource_mmio_readl_down);
index 02d534d..16d7b4a 100644 (file)
@@ -828,6 +828,12 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
        int ret = 0;
 
        memcpy(&new_policy, policy, sizeof(*policy));
+
+       /* Use the default policy if its valid. */
+       if (cpufreq_driver->setpolicy)
+               cpufreq_parse_governor(policy->governor->name,
+                                       &new_policy.policy, NULL);
+
        /* assure that the starting sequence is run in cpufreq_set_policy */
        policy->governor = NULL;
 
@@ -845,8 +851,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
 
 #ifdef CONFIG_HOTPLUG_CPU
 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
-                                 unsigned int cpu, struct device *dev,
-                                 bool frozen)
+                                 unsigned int cpu, struct device *dev)
 {
        int ret = 0;
        unsigned long flags;
@@ -877,11 +882,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
                }
        }
 
-       /* Don't touch sysfs links during light-weight init */
-       if (!frozen)
-               ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
-
-       return ret;
+       return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
 }
 #endif
 
@@ -926,6 +927,27 @@ err_free_policy:
        return NULL;
 }
 
+static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
+{
+       struct kobject *kobj;
+       struct completion *cmp;
+
+       down_read(&policy->rwsem);
+       kobj = &policy->kobj;
+       cmp = &policy->kobj_unregister;
+       up_read(&policy->rwsem);
+       kobject_put(kobj);
+
+       /*
+        * We need to make sure that the underlying kobj is
+        * actually not referenced anymore by anybody before we
+        * proceed with unloading.
+        */
+       pr_debug("waiting for dropping of refcount\n");
+       wait_for_completion(cmp);
+       pr_debug("wait complete\n");
+}
+
 static void cpufreq_policy_free(struct cpufreq_policy *policy)
 {
        free_cpumask_var(policy->related_cpus);
@@ -986,7 +1008,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
        list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
                if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
                        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
-                       ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen);
+                       ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
                        up_read(&cpufreq_rwsem);
                        return ret;
                }
@@ -1096,7 +1118,10 @@ err_get_freq:
        if (cpufreq_driver->exit)
                cpufreq_driver->exit(policy);
 err_set_policy_cpu:
+       if (frozen)
+               cpufreq_policy_put_kobj(policy);
        cpufreq_policy_free(policy);
+
 nomem_out:
        up_read(&cpufreq_rwsem);
 
@@ -1118,7 +1143,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
 }
 
 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
-                                          unsigned int old_cpu, bool frozen)
+                                          unsigned int old_cpu)
 {
        struct device *cpu_dev;
        int ret;
@@ -1126,10 +1151,6 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
        /* first sibling now owns the new sysfs dir */
        cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
 
-       /* Don't touch sysfs files during light-weight tear-down */
-       if (frozen)
-               return cpu_dev->id;
-
        sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
        ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
        if (ret) {
@@ -1196,7 +1217,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
                if (!frozen)
                        sysfs_remove_link(&dev->kobj, "cpufreq");
        } else if (cpus > 1) {
-               new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
+               new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
                if (new_cpu >= 0) {
                        update_policy_cpu(policy, new_cpu);
 
@@ -1218,8 +1239,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
        int ret;
        unsigned long flags;
        struct cpufreq_policy *policy;
-       struct kobject *kobj;
-       struct completion *cmp;
 
        read_lock_irqsave(&cpufreq_driver_lock, flags);
        policy = per_cpu(cpufreq_cpu_data, cpu);
@@ -1249,22 +1268,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
                        }
                }
 
-               if (!frozen) {
-                       down_read(&policy->rwsem);
-                       kobj = &policy->kobj;
-                       cmp = &policy->kobj_unregister;
-                       up_read(&policy->rwsem);
-                       kobject_put(kobj);
-
-                       /*
-                        * We need to make sure that the underlying kobj is
-                        * actually not referenced anymore by anybody before we
-                        * proceed with unloading.
-                        */
-                       pr_debug("waiting for dropping of refcount\n");
-                       wait_for_completion(cmp);
-                       pr_debug("wait complete\n");
-               }
+               if (!frozen)
+                       cpufreq_policy_put_kobj(policy);
 
                /*
                 * Perform the ->exit() even during light-weight tear-down,
index 9dd6e01..f757a0f 100644 (file)
@@ -1410,14 +1410,12 @@ static const struct platform_device_info ixp_dev_info __initdata = {
 static int __init ixp_module_init(void)
 {
        int num = ARRAY_SIZE(ixp4xx_algos);
-       int i, err ;
+       int i, err;
 
        pdev = platform_device_register_full(&ixp_dev_info);
        if (IS_ERR(pdev))
                return PTR_ERR(pdev);
 
-       dev = &pdev->dev;
-
        spin_lock_init(&desc_lock);
        spin_lock_init(&emerg_lock);
 
index 446687c..c823daa 100644 (file)
@@ -62,6 +62,7 @@ config INTEL_IOATDMA
        tristate "Intel I/OAT DMA support"
        depends on PCI && X86
        select DMA_ENGINE
+       select DMA_ENGINE_RAID
        select DCA
        help
          Enable support for the Intel(R) I/OAT DMA engine present
@@ -112,6 +113,7 @@ config MV_XOR
        bool "Marvell XOR engine support"
        depends on PLAT_ORION
        select DMA_ENGINE
+       select DMA_ENGINE_RAID
        select ASYNC_TX_ENABLE_CHANNEL_SWITCH
        ---help---
          Enable support for the Marvell XOR engine.
@@ -187,6 +189,7 @@ config AMCC_PPC440SPE_ADMA
        tristate "AMCC PPC440SPe ADMA support"
        depends on 440SPe || 440SP
        select DMA_ENGINE
+       select DMA_ENGINE_RAID
        select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
        select ASYNC_TX_ENABLE_CHANNEL_SWITCH
        help
@@ -352,6 +355,7 @@ config NET_DMA
        bool "Network: TCP receive copy offload"
        depends on DMA_ENGINE && NET
        default (INTEL_IOATDMA || FSL_DMA)
+       depends on BROKEN
        help
          This enables the use of DMA engines in the network stack to
          offload receive copy-to-user operations, freeing CPU cycles.
@@ -377,4 +381,7 @@ config DMATEST
          Simple DMA test client. Say N unless you're debugging a
          DMA Device driver.
 
+config DMA_ENGINE_RAID
+       bool
+
 endif
index f31d647..2787aba 100644 (file)
@@ -347,10 +347,6 @@ static struct device *chan2dev(struct dma_chan *chan)
 {
        return &chan->dev->device;
 }
-static struct device *chan2parent(struct dma_chan *chan)
-{
-       return chan->dev->device.parent;
-}
 
 #if defined(VERBOSE_DEBUG)
 static void vdbg_dump_regs(struct at_dma_chan *atchan)
index ea806bd..ef63b90 100644 (file)
@@ -912,7 +912,7 @@ struct dmaengine_unmap_pool {
 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
 static struct dmaengine_unmap_pool unmap_pool[] = {
        __UNMAP_POOL(2),
-       #if IS_ENABLED(CONFIG_ASYNC_TX_DMA)
+       #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
        __UNMAP_POOL(16),
        __UNMAP_POOL(128),
        __UNMAP_POOL(256),
@@ -1054,7 +1054,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
        dma_cookie_t cookie;
        unsigned long flags;
 
-       unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO);
+       unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
        if (!unmap)
                return -ENOMEM;
 
index 20f9a3a..9dfcaf5 100644 (file)
@@ -539,9 +539,9 @@ static int dmatest_func(void *data)
 
                um->len = params->buf_size;
                for (i = 0; i < src_cnt; i++) {
-                       unsigned long buf = (unsigned long) thread->srcs[i];
+                       void *buf = thread->srcs[i];
                        struct page *pg = virt_to_page(buf);
-                       unsigned pg_off = buf & ~PAGE_MASK;
+                       unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
 
                        um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
                                                   um->len, DMA_TO_DEVICE);
@@ -559,9 +559,9 @@ static int dmatest_func(void *data)
                /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
                dsts = &um->addr[src_cnt];
                for (i = 0; i < dst_cnt; i++) {
-                       unsigned long buf = (unsigned long) thread->dsts[i];
+                       void *buf = thread->dsts[i];
                        struct page *pg = virt_to_page(buf);
-                       unsigned pg_off = buf & ~PAGE_MASK;
+                       unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
 
                        dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
                                               DMA_BIDIRECTIONAL);
index 7086a16..f157c6f 100644 (file)
@@ -86,11 +86,6 @@ static void set_desc_cnt(struct fsldma_chan *chan,
        hw->count = CPU_TO_DMA(chan, count, 32);
 }
 
-static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
-{
-       return DMA_TO_CPU(chan, desc->hw.count, 32);
-}
-
 static void set_desc_src(struct fsldma_chan *chan,
                         struct fsl_dma_ld_hw *hw, dma_addr_t src)
 {
@@ -101,16 +96,6 @@ static void set_desc_src(struct fsldma_chan *chan,
        hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
 }
 
-static dma_addr_t get_desc_src(struct fsldma_chan *chan,
-                              struct fsl_desc_sw *desc)
-{
-       u64 snoop_bits;
-
-       snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
-               ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
-       return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
-}
-
 static void set_desc_dst(struct fsldma_chan *chan,
                         struct fsl_dma_ld_hw *hw, dma_addr_t dst)
 {
@@ -121,16 +106,6 @@ static void set_desc_dst(struct fsldma_chan *chan,
        hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
 }
 
-static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
-                              struct fsl_desc_sw *desc)
-{
-       u64 snoop_bits;
-
-       snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
-               ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
-       return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
-}
-
 static void set_desc_next(struct fsldma_chan *chan,
                          struct fsl_dma_ld_hw *hw, dma_addr_t next)
 {
@@ -408,7 +383,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
        struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
        struct fsl_desc_sw *child;
        unsigned long flags;
-       dma_cookie_t cookie;
+       dma_cookie_t cookie = -EINVAL;
 
        spin_lock_irqsave(&chan->desc_lock, flags);
 
@@ -854,10 +829,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
                                      struct fsl_desc_sw *desc)
 {
        struct dma_async_tx_descriptor *txd = &desc->async_tx;
-       struct device *dev = chan->common.device->dev;
-       dma_addr_t src = get_desc_src(chan, desc);
-       dma_addr_t dst = get_desc_dst(chan, desc);
-       u32 len = get_desc_cnt(chan, desc);
 
        /* Run the link descriptor callback function */
        if (txd->callback) {
index 7807f0e..53fb0c8 100644 (file)
@@ -54,12 +54,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
        hw_desc->desc_command = (1 << 31);
 }
 
-static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
-{
-       struct mv_xor_desc *hw_desc = desc->hw_desc;
-       return hw_desc->phy_dest_addr;
-}
-
 static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
                                   u32 byte_count)
 {
@@ -787,7 +781,6 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
 /*
  * Perform a transaction to verify the HW works.
  */
-#define MV_XOR_TEST_SIZE 2000
 
 static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
 {
@@ -797,20 +790,21 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
        struct dma_chan *dma_chan;
        dma_cookie_t cookie;
        struct dma_async_tx_descriptor *tx;
+       struct dmaengine_unmap_data *unmap;
        int err = 0;
 
-       src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
+       src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
        if (!src)
                return -ENOMEM;
 
-       dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
+       dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
        if (!dest) {
                kfree(src);
                return -ENOMEM;
        }
 
        /* Fill in src buffer */
-       for (i = 0; i < MV_XOR_TEST_SIZE; i++)
+       for (i = 0; i < PAGE_SIZE; i++)
                ((u8 *) src)[i] = (u8)i;
 
        dma_chan = &mv_chan->dmachan;
@@ -819,14 +813,26 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
                goto out;
        }
 
-       dest_dma = dma_map_single(dma_chan->device->dev, dest,
-                                 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
+       unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
+       if (!unmap) {
+               err = -ENOMEM;
+               goto free_resources;
+       }
+
+       src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
+                                PAGE_SIZE, DMA_TO_DEVICE);
+       unmap->to_cnt = 1;
+       unmap->addr[0] = src_dma;
 
-       src_dma = dma_map_single(dma_chan->device->dev, src,
-                                MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
+       dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
+                                 PAGE_SIZE, DMA_FROM_DEVICE);
+       unmap->from_cnt = 1;
+       unmap->addr[1] = dest_dma;
+
+       unmap->len = PAGE_SIZE;
 
        tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
-                                   MV_XOR_TEST_SIZE, 0);
+                                   PAGE_SIZE, 0);
        cookie = mv_xor_tx_submit(tx);
        mv_xor_issue_pending(dma_chan);
        async_tx_ack(tx);
@@ -841,8 +847,8 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
        }
 
        dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
-                               MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
-       if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
+                               PAGE_SIZE, DMA_FROM_DEVICE);
+       if (memcmp(src, dest, PAGE_SIZE)) {
                dev_err(dma_chan->device->dev,
                        "Self-test copy failed compare, disabling\n");
                err = -ENODEV;
@@ -850,6 +856,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
        }
 
 free_resources:
+       dmaengine_unmap_put(unmap);
        mv_xor_free_chan_resources(dma_chan);
 out:
        kfree(src);
@@ -867,13 +874,15 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
        dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
        dma_addr_t dest_dma;
        struct dma_async_tx_descriptor *tx;
+       struct dmaengine_unmap_data *unmap;
        struct dma_chan *dma_chan;
        dma_cookie_t cookie;
        u8 cmp_byte = 0;
        u32 cmp_word;
        int err = 0;
+       int src_count = MV_XOR_NUM_SRC_TEST;
 
-       for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
+       for (src_idx = 0; src_idx < src_count; src_idx++) {
                xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
                if (!xor_srcs[src_idx]) {
                        while (src_idx--)
@@ -890,13 +899,13 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
        }
 
        /* Fill in src buffers */
-       for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
+       for (src_idx = 0; src_idx < src_count; src_idx++) {
                u8 *ptr = page_address(xor_srcs[src_idx]);
                for (i = 0; i < PAGE_SIZE; i++)
                        ptr[i] = (1 << src_idx);
        }
 
-       for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
+       for (src_idx = 0; src_idx < src_count; src_idx++)
                cmp_byte ^= (u8) (1 << src_idx);
 
        cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
@@ -910,16 +919,29 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
                goto out;
        }
 
+       unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
+                                        GFP_KERNEL);
+       if (!unmap) {
+               err = -ENOMEM;
+               goto free_resources;
+       }
+
        /* test xor */
-       dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
-                               DMA_FROM_DEVICE);
+       for (i = 0; i < src_count; i++) {
+               unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
+                                             0, PAGE_SIZE, DMA_TO_DEVICE);
+               dma_srcs[i] = unmap->addr[i];
+               unmap->to_cnt++;
+       }
 
-       for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
-               dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
-                                          0, PAGE_SIZE, DMA_TO_DEVICE);
+       unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
+                                     DMA_FROM_DEVICE);
+       dest_dma = unmap->addr[src_count];
+       unmap->from_cnt = 1;
+       unmap->len = PAGE_SIZE;
 
        tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
-                                MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
+                                src_count, PAGE_SIZE, 0);
 
        cookie = mv_xor_tx_submit(tx);
        mv_xor_issue_pending(dma_chan);
@@ -948,9 +970,10 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
        }
 
 free_resources:
+       dmaengine_unmap_put(unmap);
        mv_xor_free_chan_resources(dma_chan);
 out:
-       src_idx = MV_XOR_NUM_SRC_TEST;
+       src_idx = src_count;
        while (src_idx--)
                __free_page(xor_srcs[src_idx]);
        __free_page(dest);
@@ -1176,6 +1199,7 @@ static int mv_xor_probe(struct platform_device *pdev)
                int i = 0;
 
                for_each_child_of_node(pdev->dev.of_node, np) {
+                       struct mv_xor_chan *chan;
                        dma_cap_mask_t cap_mask;
                        int irq;
 
@@ -1193,21 +1217,21 @@ static int mv_xor_probe(struct platform_device *pdev)
                                goto err_channel_add;
                        }
 
-                       xordev->channels[i] =
-                               mv_xor_channel_add(xordev, pdev, i,
-                                                  cap_mask, irq);
-                       if (IS_ERR(xordev->channels[i])) {
-                               ret = PTR_ERR(xordev->channels[i]);
-                               xordev->channels[i] = NULL;
+                       chan = mv_xor_channel_add(xordev, pdev, i,
+                                                 cap_mask, irq);
+                       if (IS_ERR(chan)) {
+                               ret = PTR_ERR(chan);
                                irq_dispose_mapping(irq);
                                goto err_channel_add;
                        }
 
+                       xordev->channels[i] = chan;
                        i++;
                }
        } else if (pdata && pdata->channels) {
                for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
                        struct mv_xor_channel_data *cd;
+                       struct mv_xor_chan *chan;
                        int irq;
 
                        cd = &pdata->channels[i];
@@ -1222,13 +1246,14 @@ static int mv_xor_probe(struct platform_device *pdev)
                                goto err_channel_add;
                        }
 
-                       xordev->channels[i] =
-                               mv_xor_channel_add(xordev, pdev, i,
-                                                  cd->cap_mask, irq);
-                       if (IS_ERR(xordev->channels[i])) {
-                               ret = PTR_ERR(xordev->channels[i]);
+                       chan = mv_xor_channel_add(xordev, pdev, i,
+                                                 cd->cap_mask, irq);
+                       if (IS_ERR(chan)) {
+                               ret = PTR_ERR(chan);
                                goto err_channel_add;
                        }
+
+                       xordev->channels[i] = chan;
                }
        }
 
index cdf0483..536632f 100644 (file)
@@ -2492,12 +2492,9 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
 
 static inline void _init_desc(struct dma_pl330_desc *desc)
 {
-       desc->pchan = NULL;
        desc->req.x = &desc->px;
        desc->req.token = desc;
        desc->rqcfg.swap = SWAP_NO;
-       desc->rqcfg.privileged = 0;
-       desc->rqcfg.insnaccess = 0;
        desc->rqcfg.scctl = SCCTRL0;
        desc->rqcfg.dcctl = DCCTRL0;
        desc->req.cfg = &desc->rqcfg;
@@ -2517,7 +2514,7 @@ static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
        if (!pdmac)
                return 0;
 
-       desc = kmalloc(count * sizeof(*desc), flg);
+       desc = kcalloc(count, sizeof(*desc), flg);
        if (!desc)
                return 0;
 
index 8da48c6..8bba298 100644 (file)
@@ -532,29 +532,6 @@ static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc,
        hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
 }
 
-/**
- * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation
- */
-static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc,
-                                       int value, unsigned long flags)
-{
-       struct dma_cdb *hw_desc = desc->hw_desc;
-
-       memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
-       desc->hw_next = NULL;
-       desc->src_cnt = 1;
-       desc->dst_cnt = 1;
-
-       if (flags & DMA_PREP_INTERRUPT)
-               set_bit(PPC440SPE_DESC_INT, &desc->flags);
-       else
-               clear_bit(PPC440SPE_DESC_INT, &desc->flags);
-
-       hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value);
-       hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value);
-       hw_desc->opc = DMA_CDB_OPC_DFILL128;
-}
-
 /**
  * ppc440spe_desc_set_src_addr - set source address into the descriptor
  */
@@ -1504,8 +1481,6 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
                struct ppc440spe_adma_chan *chan,
                dma_cookie_t cookie)
 {
-       int i;
-
        BUG_ON(desc->async_tx.cookie < 0);
        if (desc->async_tx.cookie > 0) {
                cookie = desc->async_tx.cookie;
@@ -3898,7 +3873,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
                        ppc440spe_adma_prep_dma_interrupt;
        }
        pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: "
-         "( %s%s%s%s%s%s%s)\n",
+         "( %s%s%s%s%s%s)\n",
          dev_name(adev->dev),
          dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "",
          dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "",
index bae6c29..17686ca 100644 (file)
@@ -406,7 +406,6 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
        dma_async_tx_callback callback;
        void *param;
        struct dma_async_tx_descriptor *txd = &desc->txd;
-       struct txx9dmac_slave *ds = dc->chan.private;
 
        dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
                 txd->cookie, desc);
index b0bb056..281029d 100644 (file)
@@ -1623,7 +1623,6 @@ static struct scsi_host_template scsi_driver_template = {
        .cmd_per_lun            = 1,
        .can_queue              = 1,
        .sdev_attrs             = sbp2_scsi_sysfs_attrs,
-       .no_write_same          = 1,
 };
 
 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
index 299fad6..5373dc5 100644 (file)
@@ -14,3 +14,4 @@ obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
 
 obj-$(CONFIG_GOOGLE_FIRMWARE)  += google/
 obj-$(CONFIG_EFI)              += efi/
+obj-$(CONFIG_UEFI_CPER)                += efi/
index 3150aa4..6aecbc8 100644 (file)
@@ -36,7 +36,7 @@ config EFI_VARS_PSTORE_DEFAULT_DISABLE
          backend for pstore by default. This setting can be overridden
          using the efivars module's pstore_disable parameter.
 
-config UEFI_CPER
-       def_bool n
-
 endmenu
+
+config UEFI_CPER
+       bool
index 9ba156d..6c2a41e 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Makefile for linux kernel
 #
-obj-y                                  += efi.o vars.o
+obj-$(CONFIG_EFI)                      += efi.o vars.o
 obj-$(CONFIG_EFI_VARS)                 += efivars.o
 obj-$(CONFIG_EFI_VARS_PSTORE)          += efi-pstore.o
 obj-$(CONFIG_UEFI_CPER)                        += cper.o
index 743fd42..4b9dc83 100644 (file)
@@ -356,6 +356,7 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
 static struct pstore_info efi_pstore_info = {
        .owner          = THIS_MODULE,
        .name           = "efi",
+       .flags          = PSTORE_FLAGS_FRAGILE,
        .open           = efi_pstore_open,
        .close          = efi_pstore_close,
        .read           = efi_pstore_read,
index 7b37300..2baf0dd 100644 (file)
@@ -252,7 +252,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
 
        spin_lock_irqsave(&tlmm_lock, irq_flags);
        writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
-       clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
+       clear_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
        __clear_bit(gpio, msm_gpio.enabled_irqs);
        spin_unlock_irqrestore(&tlmm_lock, irq_flags);
 }
@@ -264,7 +264,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
 
        spin_lock_irqsave(&tlmm_lock, irq_flags);
        __set_bit(gpio, msm_gpio.enabled_irqs);
-       set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
+       set_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
        writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio));
        spin_unlock_irqrestore(&tlmm_lock, irq_flags);
 }
index fe088a3..8b7e719 100644 (file)
@@ -169,7 +169,8 @@ static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
        u32 pending;
        unsigned int offset, irqs_handled = 0;
 
-       while ((pending = gpio_rcar_read(p, INTDT))) {
+       while ((pending = gpio_rcar_read(p, INTDT) &
+                         gpio_rcar_read(p, INTMSK))) {
                offset = __ffs(pending);
                gpio_rcar_write(p, INTCLR, BIT(offset));
                generic_handle_irq(irq_find_mapping(p->irq_domain, offset));
index b97d6a6..f999689 100644 (file)
@@ -300,7 +300,7 @@ static int twl_direction_in(struct gpio_chip *chip, unsigned offset)
        if (offset < TWL4030_GPIO_MAX)
                ret = twl4030_set_gpio_direction(offset, 1);
        else
-               ret = -EINVAL;
+               ret = -EINVAL;  /* LED outputs can't be set as input */
 
        if (!ret)
                priv->direction &= ~BIT(offset);
@@ -354,11 +354,20 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
 static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
 {
        struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
-       int ret = -EINVAL;
+       int ret = 0;
 
        mutex_lock(&priv->mutex);
-       if (offset < TWL4030_GPIO_MAX)
+       if (offset < TWL4030_GPIO_MAX) {
                ret = twl4030_set_gpio_direction(offset, 0);
+               if (ret) {
+                       mutex_unlock(&priv->mutex);
+                       return ret;
+               }
+       }
+
+       /*
+        *  LED gpios i.e. offset >= TWL4030_GPIO_MAX are always output
+        */
 
        priv->direction |= BIT(offset);
        mutex_unlock(&priv->mutex);
index eef09ec..a72cae0 100644 (file)
@@ -103,6 +103,7 @@ void armada_drm_queue_unref_work(struct drm_device *,
 extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
 
 int armada_fbdev_init(struct drm_device *);
+void armada_fbdev_lastclose(struct drm_device *);
 void armada_fbdev_fini(struct drm_device *);
 
 int armada_overlay_plane_create(struct drm_device *, unsigned long);
index 4f2b283..62d0ff3 100644 (file)
@@ -321,6 +321,11 @@ static struct drm_ioctl_desc armada_ioctls[] = {
                DRM_UNLOCKED),
 };
 
+static void armada_drm_lastclose(struct drm_device *dev)
+{
+       armada_fbdev_lastclose(dev);
+}
+
 static const struct file_operations armada_drm_fops = {
        .owner                  = THIS_MODULE,
        .llseek                 = no_llseek,
@@ -337,7 +342,7 @@ static struct drm_driver armada_drm_driver = {
        .open                   = NULL,
        .preclose               = NULL,
        .postclose              = NULL,
-       .lastclose              = NULL,
+       .lastclose              = armada_drm_lastclose,
        .unload                 = armada_drm_unload,
        .get_vblank_counter     = drm_vblank_count,
        .enable_vblank          = armada_drm_enable_vblank,
index dd5ea77..948cb14 100644 (file)
@@ -105,9 +105,9 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
        drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
        drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
 
-       DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n",
-               dfb->fb.width, dfb->fb.height,
-               dfb->fb.bits_per_pixel, obj->phys_addr);
+       DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n",
+               dfb->fb.width, dfb->fb.height, dfb->fb.bits_per_pixel,
+               (unsigned long long)obj->phys_addr);
 
        return 0;
 
@@ -177,6 +177,16 @@ int armada_fbdev_init(struct drm_device *dev)
        return ret;
 }
 
+void armada_fbdev_lastclose(struct drm_device *dev)
+{
+       struct armada_private *priv = dev->dev_private;
+
+       drm_modeset_lock_all(dev);
+       if (priv->fbdev)
+               drm_fb_helper_restore_fbdev_mode(priv->fbdev);
+       drm_modeset_unlock_all(dev);
+}
+
 void armada_fbdev_fini(struct drm_device *dev)
 {
        struct armada_private *priv = dev->dev_private;
@@ -192,11 +202,11 @@ void armada_fbdev_fini(struct drm_device *dev)
                        framebuffer_release(info);
                }
 
+               drm_fb_helper_fini(fbh);
+
                if (fbh->fb)
                        fbh->fb->funcs->destroy(fbh->fb);
 
-               drm_fb_helper_fini(fbh);
-
                priv->fbdev = NULL;
        }
 }
index 9f2356b..887816f 100644 (file)
@@ -172,8 +172,9 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
                obj->dev_addr = obj->linear->start;
        }
 
-       DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n",
-                        obj, obj->phys_addr, obj->dev_addr);
+       DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
+                        (unsigned long long)obj->phys_addr,
+                        (unsigned long long)obj->dev_addr);
 
        return 0;
 }
@@ -557,7 +558,6 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
                         * refcount on the gem object itself.
                         */
                        drm_gem_object_reference(obj);
-                       dma_buf_put(buf);
                        return obj;
                }
        }
@@ -573,6 +573,7 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
        }
 
        dobj->obj.import_attach = attach;
+       get_dma_buf(buf);
 
        /*
         * Don't call dma_buf_map_attachment() here - it maps the
index 0a1e4a5..8835dcd 100644 (file)
@@ -68,6 +68,8 @@
 #define EDID_QUIRK_DETAILED_SYNC_PP            (1 << 6)
 /* Force reduced-blanking timings for detailed modes */
 #define EDID_QUIRK_FORCE_REDUCED_BLANKING      (1 << 7)
+/* Force 8bpc */
+#define EDID_QUIRK_FORCE_8BPC                  (1 << 8)
 
 struct detailed_mode_closure {
        struct drm_connector *connector;
@@ -128,6 +130,9 @@ static struct edid_quirk {
 
        /* Medion MD 30217 PG */
        { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
+
+       /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
+       { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
 };
 
 /*
@@ -3435,6 +3440,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
 
        drm_add_display_info(edid, &connector->display_info);
 
+       if (quirks & EDID_QUIRK_FORCE_8BPC)
+               connector->display_info.bpc = 8;
+
        return num_modes;
 }
 EXPORT_SYMBOL(drm_add_edid_modes);
index f53d524..66dd3a0 100644 (file)
@@ -566,11 +566,11 @@ err_unload:
        if (dev->driver->unload)
                dev->driver->unload(dev);
 err_primary_node:
-       drm_put_minor(dev->primary);
+       drm_unplug_minor(dev->primary);
 err_render_node:
-       drm_put_minor(dev->render);
+       drm_unplug_minor(dev->render);
 err_control_node:
-       drm_put_minor(dev->control);
+       drm_unplug_minor(dev->control);
 err_agp:
        if (dev->driver->bus->agp_destroy)
                dev->driver->bus->agp_destroy(dev);
index 0cab2d0..5c64842 100644 (file)
@@ -83,6 +83,14 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv;
 
+       /*
+        * The dri breadcrumb update races against the drm master disappearing.
+        * Instead of trying to fix this (this is by far not the only ums issue)
+        * just don't do the update in kms mode.
+        */
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return;
+
        if (dev->primary->master) {
                master_priv = dev->primary->master->driver_priv;
                if (master_priv->sarea_priv)
@@ -1490,16 +1498,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        spin_lock_init(&dev_priv->uncore.lock);
        spin_lock_init(&dev_priv->mm.object_stat_lock);
        mutex_init(&dev_priv->dpio_lock);
-       mutex_init(&dev_priv->rps.hw_lock);
        mutex_init(&dev_priv->modeset_restore_lock);
 
-       mutex_init(&dev_priv->pc8.lock);
-       dev_priv->pc8.requirements_met = false;
-       dev_priv->pc8.gpu_idle = false;
-       dev_priv->pc8.irqs_disabled = false;
-       dev_priv->pc8.enabled = false;
-       dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
-       INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
+       intel_pm_setup(dev);
 
        intel_display_crc_init(dev);
 
@@ -1603,7 +1604,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        }
 
        intel_irq_init(dev);
-       intel_pm_init(dev);
        intel_uncore_sanitize(dev);
 
        /* Try to make sure MCHBAR is enabled before poking at it */
@@ -1848,8 +1848,10 @@ void i915_driver_lastclose(struct drm_device * dev)
 
 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
 {
+       mutex_lock(&dev->struct_mutex);
        i915_gem_context_close(dev, file_priv);
        i915_gem_release(dev, file_priv);
+       mutex_unlock(&dev->struct_mutex);
 }
 
 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
index 2e367a1..5b7b7e0 100644 (file)
@@ -651,6 +651,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
                intel_modeset_init_hw(dev);
 
                drm_modeset_lock_all(dev);
+               drm_mode_config_reset(dev);
                intel_modeset_setup_hw_state(dev, true);
                drm_modeset_unlock_all(dev);
 
index ccdbecc..90fcccb 100644 (file)
@@ -1755,8 +1755,13 @@ struct drm_i915_file_private {
 #define IS_MOBILE(dev)         (INTEL_INFO(dev)->is_mobile)
 #define IS_HSW_EARLY_SDV(dev)  (IS_HASWELL(dev) && \
                                 ((dev)->pdev->device & 0xFF00) == 0x0C00)
-#define IS_ULT(dev)            (IS_HASWELL(dev) && \
+#define IS_BDW_ULT(dev)                (IS_BROADWELL(dev) && \
+                                (((dev)->pdev->device & 0xf) == 0x2  || \
+                                ((dev)->pdev->device & 0xf) == 0x6 || \
+                                ((dev)->pdev->device & 0xf) == 0xe))
+#define IS_HSW_ULT(dev)                (IS_HASWELL(dev) && \
                                 ((dev)->pdev->device & 0xFF00) == 0x0A00)
+#define IS_ULT(dev)            (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
 #define IS_HSW_GT3(dev)                (IS_HASWELL(dev) && \
                                 ((dev)->pdev->device & 0x00F0) == 0x0020)
 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
@@ -1901,9 +1906,7 @@ void i915_queue_hangcheck(struct drm_device *dev);
 void i915_handle_error(struct drm_device *dev, bool wedged);
 
 extern void intel_irq_init(struct drm_device *dev);
-extern void intel_pm_init(struct drm_device *dev);
 extern void intel_hpd_init(struct drm_device *dev);
-extern void intel_pm_init(struct drm_device *dev);
 
 extern void intel_uncore_sanitize(struct drm_device *dev);
 extern void intel_uncore_early_sanitize(struct drm_device *dev);
index 621c7c6..76d3d1a 100644 (file)
@@ -2343,15 +2343,24 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
        kfree(request);
 }
 
-static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
-                                     struct intel_ring_buffer *ring)
+static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
+                                      struct intel_ring_buffer *ring)
 {
-       u32 completed_seqno;
-       u32 acthd;
+       u32 completed_seqno = ring->get_seqno(ring, false);
+       u32 acthd = intel_ring_get_active_head(ring);
+       struct drm_i915_gem_request *request;
+
+       list_for_each_entry(request, &ring->request_list, list) {
+               if (i915_seqno_passed(completed_seqno, request->seqno))
+                       continue;
 
-       acthd = intel_ring_get_active_head(ring);
-       completed_seqno = ring->get_seqno(ring, false);
+               i915_set_reset_status(ring, request, acthd);
+       }
+}
 
+static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
+                                       struct intel_ring_buffer *ring)
+{
        while (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;
 
@@ -2359,9 +2368,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
                                           struct drm_i915_gem_request,
                                           list);
 
-               if (request->seqno > completed_seqno)
-                       i915_set_reset_status(ring, request, acthd);
-
                i915_gem_free_request(request);
        }
 
@@ -2403,8 +2409,16 @@ void i915_gem_reset(struct drm_device *dev)
        struct intel_ring_buffer *ring;
        int i;
 
+       /*
+        * Before we free the objects from the requests, we need to inspect
+        * them for finding the guilty party. As the requests only borrow
+        * their reference to the objects, the inspection must be done first.
+        */
+       for_each_ring(ring, dev_priv, i)
+               i915_gem_reset_ring_status(dev_priv, ring);
+
        for_each_ring(ring, dev_priv, i)
-               i915_gem_reset_ring_lists(dev_priv, ring);
+               i915_gem_reset_ring_cleanup(dev_priv, ring);
 
        i915_gem_cleanup_ringbuffer(dev);
 
index 72a3df3..b0f42b9 100644 (file)
@@ -347,10 +347,8 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
 
-       mutex_lock(&dev->struct_mutex);
        idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
        idr_destroy(&file_priv->context_idr);
-       mutex_unlock(&dev->struct_mutex);
 }
 
 static struct i915_hw_context *
@@ -423,11 +421,21 @@ static int do_switch(struct i915_hw_context *to)
        if (ret)
                return ret;
 
-       /* Clear this page out of any CPU caches for coherent swap-in/out. Note
+       /*
+        * Pin can switch back to the default context if we end up calling into
+        * evict_everything - as a last ditch gtt defrag effort that also
+        * switches to the default context. Hence we need to reload from here.
+        */
+       from = ring->last_context;
+
+       /*
+        * Clear this page out of any CPU caches for coherent swap-in/out. Note
         * that thanks to write = false in this call and us not setting any gpu
         * write domains when putting a context object onto the active list
         * (when switching away from it), this won't block.
-        * XXX: We need a real interface to do this instead of trickery. */
+        *
+        * XXX: We need a real interface to do this instead of trickery.
+        */
        ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
        if (ret) {
                i915_gem_object_unpin(to->obj);
index b737653..8f3adc7 100644 (file)
@@ -88,6 +88,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
        } else
                drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
 
+search_again:
        /* First see if there is a large enough contiguous idle region... */
        list_for_each_entry(vma, &vm->inactive_list, mm_list) {
                if (mark_free(vma, &unwind_list))
@@ -115,10 +116,17 @@ none:
                list_del_init(&vma->exec_list);
        }
 
-       /* We expect the caller to unpin, evict all and try again, or give up.
-        * So calling i915_gem_evict_vm() is unnecessary.
+       /* Can we unpin some objects such as idle hw contents,
+        * or pending flips?
         */
-       return -ENOSPC;
+       ret = nonblocking ? -ENOSPC : i915_gpu_idle(dev);
+       if (ret)
+               return ret;
+
+       /* Only idle the GPU and repeat the search once */
+       i915_gem_retire_requests(dev);
+       nonblocking = true;
+       goto search_again;
 
 found:
        /* drm_mm doesn't allow any other other operations while
index b7e787f..a3ba9a8 100644 (file)
@@ -93,7 +93,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
 {
        struct drm_i915_gem_object *obj;
        struct list_head objects;
-       int i, ret = 0;
+       int i, ret;
 
        INIT_LIST_HEAD(&objects);
        spin_lock(&file->table_lock);
@@ -106,7 +106,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
                        DRM_DEBUG("Invalid object handle %d at index %d\n",
                                   exec[i].handle, i);
                        ret = -ENOENT;
-                       goto out;
+                       goto err;
                }
 
                if (!list_empty(&obj->obj_exec_link)) {
@@ -114,7 +114,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
                        DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
                                   obj, exec[i].handle, i);
                        ret = -EINVAL;
-                       goto out;
+                       goto err;
                }
 
                drm_gem_object_reference(&obj->base);
@@ -123,9 +123,13 @@ eb_lookup_vmas(struct eb_vmas *eb,
        spin_unlock(&file->table_lock);
 
        i = 0;
-       list_for_each_entry(obj, &objects, obj_exec_link) {
+       while (!list_empty(&objects)) {
                struct i915_vma *vma;
 
+               obj = list_first_entry(&objects,
+                                      struct drm_i915_gem_object,
+                                      obj_exec_link);
+
                /*
                 * NOTE: We can leak any vmas created here when something fails
                 * later on. But that's no issue since vma_unbind can deal with
@@ -138,10 +142,12 @@ eb_lookup_vmas(struct eb_vmas *eb,
                if (IS_ERR(vma)) {
                        DRM_DEBUG("Failed to lookup VMA\n");
                        ret = PTR_ERR(vma);
-                       goto out;
+                       goto err;
                }
 
+               /* Transfer ownership from the objects list to the vmas list. */
                list_add_tail(&vma->exec_list, &eb->vmas);
+               list_del_init(&obj->obj_exec_link);
 
                vma->exec_entry = &exec[i];
                if (eb->and < 0) {
@@ -155,16 +161,22 @@ eb_lookup_vmas(struct eb_vmas *eb,
                ++i;
        }
 
+       return 0;
+
 
-out:
+err:
        while (!list_empty(&objects)) {
                obj = list_first_entry(&objects,
                                       struct drm_i915_gem_object,
                                       obj_exec_link);
                list_del_init(&obj->obj_exec_link);
-               if (ret)
-                       drm_gem_object_unreference(&obj->base);
+               drm_gem_object_unreference(&obj->base);
        }
+       /*
+        * Objects already transfered to the vmas list will be unreferenced by
+        * eb_destroy.
+        */
+
        return ret;
 }
 
index 38cb8d4..c79dd2b 100644 (file)
@@ -337,8 +337,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
                kfree(ppgtt->gen8_pt_dma_addr[i]);
        }
 
-       __free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT);
-       __free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT);
+       __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT));
+       __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
 }
 
 /**
@@ -1241,6 +1241,11 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
        bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
        if (bdw_gmch_ctl)
                bdw_gmch_ctl = 1 << bdw_gmch_ctl;
+       if (bdw_gmch_ctl > 4) {
+               WARN_ON(!i915_preliminary_hw_support);
+               return 4<<20;
+       }
+
        return bdw_gmch_ctl << 20;
 }
 
index 080f6fd..54e82a8 100644 (file)
@@ -6303,7 +6303,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
        uint32_t val;
 
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
-               WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n",
+               WARN(crtc->active, "CRTC for pipe %c enabled\n",
                     pipe_name(crtc->pipe));
 
        WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
@@ -9135,7 +9135,7 @@ intel_pipe_config_compare(struct drm_device *dev,
        if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
                PIPE_CONF_CHECK_I(pipe_bpp);
 
-       if (!IS_HASWELL(dev)) {
+       if (!HAS_DDI(dev)) {
                PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
                PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
        }
@@ -11036,8 +11036,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
        }
 
        intel_modeset_check_state(dev);
-
-       drm_mode_config_reset(dev);
 }
 
 void intel_modeset_gem_init(struct drm_device *dev)
@@ -11046,7 +11044,10 @@ void intel_modeset_gem_init(struct drm_device *dev)
 
        intel_setup_overlay(dev);
 
+       drm_modeset_lock_all(dev);
+       drm_mode_config_reset(dev);
        intel_modeset_setup_hw_state(dev, false);
+       drm_modeset_unlock_all(dev);
 }
 
 void intel_modeset_cleanup(struct drm_device *dev)
@@ -11125,14 +11126,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
        u16 gmch_ctrl;
 
-       pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
+       pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
        if (state)
                gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
        else
                gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
-       pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
+       pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
        return 0;
 }
 
index a18e88b..79f91f2 100644 (file)
@@ -821,6 +821,7 @@ void intel_update_sprite_watermarks(struct drm_plane *plane,
                                    uint32_t sprite_width, int pixel_size,
                                    bool enabled, bool scaled);
 void intel_init_pm(struct drm_device *dev);
+void intel_pm_setup(struct drm_device *dev);
 bool intel_fbc_enabled(struct drm_device *dev);
 void intel_update_fbc(struct drm_device *dev);
 void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
index f161ac0..e6f782d 100644 (file)
@@ -451,7 +451,9 @@ static u32 intel_panel_get_backlight(struct drm_device *dev,
 
        spin_lock_irqsave(&dev_priv->backlight.lock, flags);
 
-       if (HAS_PCH_SPLIT(dev)) {
+       if (IS_BROADWELL(dev)) {
+               val = I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
+       } else if (HAS_PCH_SPLIT(dev)) {
                val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
        } else {
                if (IS_VALLEYVIEW(dev))
@@ -479,6 +481,13 @@ static u32 intel_panel_get_backlight(struct drm_device *dev,
        return val;
 }
 
+static void intel_bdw_panel_set_backlight(struct drm_device *dev, u32 level)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+       I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
+}
+
 static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -496,7 +505,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev,
        DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
        level = intel_panel_compute_brightness(dev, pipe, level);
 
-       if (HAS_PCH_SPLIT(dev))
+       if (IS_BROADWELL(dev))
+               return intel_bdw_panel_set_backlight(dev, level);
+       else if (HAS_PCH_SPLIT(dev))
                return intel_pch_panel_set_backlight(dev, level);
 
        if (is_backlight_combination_mode(dev)) {
@@ -666,7 +677,16 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
                POSTING_READ(reg);
                I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
 
-               if (HAS_PCH_SPLIT(dev) &&
+               if (IS_BROADWELL(dev)) {
+                       /*
+                        * Broadwell requires PCH override to drive the PCH
+                        * backlight pin. The above will configure the CPU
+                        * backlight pin, which we don't plan to use.
+                        */
+                       tmp = I915_READ(BLC_PWM_PCH_CTL1);
+                       tmp |= BLM_PCH_OVERRIDE_ENABLE | BLM_PCH_PWM_ENABLE;
+                       I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
+               } else if (HAS_PCH_SPLIT(dev) &&
                    !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
                        tmp = I915_READ(BLC_PWM_PCH_CTL1);
                        tmp |= BLM_PCH_PWM_ENABLE;
index 6e0d5e0..26c29c1 100644 (file)
@@ -5685,8 +5685,11 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        bool is_enabled, enable_requested;
+       unsigned long irqflags;
        uint32_t tmp;
 
+       WARN_ON(dev_priv->pc8.enabled);
+
        tmp = I915_READ(HSW_PWR_WELL_DRIVER);
        is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
        enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
@@ -5702,9 +5705,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
                                      HSW_PWR_WELL_STATE_ENABLED), 20))
                                DRM_ERROR("Timeout enabling power well\n");
                }
+
+               if (IS_BROADWELL(dev)) {
+                       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+                       I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
+                                  dev_priv->de_irq_mask[PIPE_B]);
+                       I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
+                                  ~dev_priv->de_irq_mask[PIPE_B] |
+                                  GEN8_PIPE_VBLANK);
+                       I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
+                                  dev_priv->de_irq_mask[PIPE_C]);
+                       I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
+                                  ~dev_priv->de_irq_mask[PIPE_C] |
+                                  GEN8_PIPE_VBLANK);
+                       POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
+                       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+               }
        } else {
                if (enable_requested) {
-                       unsigned long irqflags;
                        enum pipe p;
 
                        I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
@@ -5731,16 +5749,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
 static void __intel_power_well_get(struct drm_device *dev,
                                   struct i915_power_well *power_well)
 {
-       if (!power_well->count++)
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (!power_well->count++) {
+               hsw_disable_package_c8(dev_priv);
                __intel_set_power_well(dev, true);
+       }
 }
 
 static void __intel_power_well_put(struct drm_device *dev,
                                   struct i915_power_well *power_well)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
        WARN_ON(!power_well->count);
-       if (!--power_well->count && i915_disable_power_well)
+       if (!--power_well->count && i915_disable_power_well) {
                __intel_set_power_well(dev, false);
+               hsw_enable_package_c8(dev_priv);
+       }
 }
 
 void intel_display_power_get(struct drm_device *dev,
@@ -6130,10 +6156,19 @@ int vlv_freq_opcode(int ddr_freq, int val)
        return val;
 }
 
-void intel_pm_init(struct drm_device *dev)
+void intel_pm_setup(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
+       mutex_init(&dev_priv->rps.hw_lock);
+
+       mutex_init(&dev_priv->pc8.lock);
+       dev_priv->pc8.requirements_met = false;
+       dev_priv->pc8.gpu_idle = false;
+       dev_priv->pc8.irqs_disabled = false;
+       dev_priv->pc8.enabled = false;
+       dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
+       INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
        INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
                          intel_gen6_powersave_work);
 }
index b620337..c2f09d4 100644 (file)
@@ -965,6 +965,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
        } else if (IS_GEN6(ring->dev)) {
                mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
        } else {
+               /* XXX: gen8 returns to sanity */
                mmio = RING_HWS_PGA(ring->mmio_base);
        }
 
index 0b02078..25cbe07 100644 (file)
@@ -784,6 +784,7 @@ static int gen6_do_reset(struct drm_device *dev)
 int intel_gpu_reset(struct drm_device *dev)
 {
        switch (INTEL_INFO(dev)->gen) {
+       case 8:
        case 7:
        case 6: return gen6_do_reset(dev);
        case 5: return ironlake_do_reset(dev);
index 7a3759f..98a22e6 100644 (file)
@@ -858,6 +858,12 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
        if (nouveau_runtime_pm == 0)
                return -EINVAL;
 
+       /* are we optimus enabled? */
+       if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
+               DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+               return -EINVAL;
+       }
+
        nv_debug_level(SILENT);
        drm_kms_helper_poll_disable(drm_dev);
        vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
index 037d324..66ac0ff 100644 (file)
@@ -8,5 +8,6 @@ config DRM_QXL
         select DRM_KMS_HELPER
        select DRM_KMS_FB_HELPER
         select DRM_TTM
+       select CRC32
        help
                QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
index 5e827c2..d70aafb 100644 (file)
@@ -24,7 +24,7 @@
  */
 
 
-#include "linux/crc32.h"
+#include <linux/crc32.h>
 
 #include "qxl_drv.h"
 #include "qxl_object.h"
index 80a2012..0b9621c 100644 (file)
@@ -1143,31 +1143,53 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
        }
 
        if (tiling_flags & RADEON_TILING_MACRO) {
-               if (rdev->family >= CHIP_BONAIRE)
-                       tmp = rdev->config.cik.tile_config;
-               else if (rdev->family >= CHIP_TAHITI)
-                       tmp = rdev->config.si.tile_config;
-               else if (rdev->family >= CHIP_CAYMAN)
-                       tmp = rdev->config.cayman.tile_config;
-               else
-                       tmp = rdev->config.evergreen.tile_config;
+               evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
 
-               switch ((tmp & 0xf0) >> 4) {
-               case 0: /* 4 banks */
-                       fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
-                       break;
-               case 1: /* 8 banks */
-               default:
-                       fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
-                       break;
-               case 2: /* 16 banks */
-                       fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
-                       break;
+               /* Set NUM_BANKS. */
+               if (rdev->family >= CHIP_BONAIRE) {
+                       unsigned tileb, index, num_banks, tile_split_bytes;
+
+                       /* Calculate the macrotile mode index. */
+                       tile_split_bytes = 64 << tile_split;
+                       tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
+                       tileb = min(tile_split_bytes, tileb);
+
+                       for (index = 0; tileb > 64; index++) {
+                               tileb >>= 1;
+                       }
+
+                       if (index >= 16) {
+                               DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
+                                         target_fb->bits_per_pixel, tile_split);
+                               return -EINVAL;
+                       }
+
+                       num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
+                       fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
+               } else {
+                       /* SI and older. */
+                       if (rdev->family >= CHIP_TAHITI)
+                               tmp = rdev->config.si.tile_config;
+                       else if (rdev->family >= CHIP_CAYMAN)
+                               tmp = rdev->config.cayman.tile_config;
+                       else
+                               tmp = rdev->config.evergreen.tile_config;
+
+                       switch ((tmp & 0xf0) >> 4) {
+                       case 0: /* 4 banks */
+                               fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
+                               break;
+                       case 1: /* 8 banks */
+                       default:
+                               fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
+                               break;
+                       case 2: /* 16 banks */
+                               fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
+                               break;
+                       }
                }
 
                fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
-
-               evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
                fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
                fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
                fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
@@ -1180,23 +1202,18 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
                fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
 
        if (rdev->family >= CHIP_BONAIRE) {
-               u32 num_pipe_configs = rdev->config.cik.max_tile_pipes;
-               u32 num_rb = rdev->config.cik.max_backends_per_se;
-               if (num_pipe_configs > 8)
-                       num_pipe_configs = 8;
-               if (num_pipe_configs == 8)
-                       fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P8_32x32_16x16);
-               else if (num_pipe_configs == 4) {
-                       if (num_rb == 4)
-                               fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_16x16);
-                       else if (num_rb < 4)
-                               fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_8x16);
-               } else if (num_pipe_configs == 2)
-                       fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P2);
+               /* Read the pipe config from the 2D TILED SCANOUT mode.
+                * It should be the same for the other modes too, but not all
+                * modes set the pipe config field. */
+               u32 pipe_config = (rdev->config.cik.tile_mode_array[10] >> 6) & 0x1f;
+
+               fb_format |= CIK_GRPH_PIPE_CONFIG(pipe_config);
        } else if ((rdev->family == CHIP_TAHITI) ||
                   (rdev->family == CHIP_PITCAIRN))
                fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
-       else if (rdev->family == CHIP_VERDE)
+       else if ((rdev->family == CHIP_VERDE) ||
+                (rdev->family == CHIP_OLAND) ||
+                (rdev->family == CHIP_HAINAN)) /* for completeness.  HAINAN has no display hw */
                fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
 
        switch (radeon_crtc->crtc_id) {
index b43a3a3..e950fab 100644 (file)
@@ -3057,7 +3057,7 @@ static u32 cik_create_bitmask(u32 bit_width)
  * Returns the disabled RB bitmask.
  */
 static u32 cik_get_rb_disabled(struct radeon_device *rdev,
-                             u32 max_rb_num, u32 se_num,
+                             u32 max_rb_num_per_se,
                              u32 sh_per_se)
 {
        u32 data, mask;
@@ -3071,7 +3071,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
 
        data >>= BACKEND_DISABLE_SHIFT;
 
-       mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se);
+       mask = cik_create_bitmask(max_rb_num_per_se / sh_per_se);
 
        return data & mask;
 }
@@ -3088,7 +3088,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
  */
 static void cik_setup_rb(struct radeon_device *rdev,
                         u32 se_num, u32 sh_per_se,
-                        u32 max_rb_num)
+                        u32 max_rb_num_per_se)
 {
        int i, j;
        u32 data, mask;
@@ -3098,7 +3098,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
        for (i = 0; i < se_num; i++) {
                for (j = 0; j < sh_per_se; j++) {
                        cik_select_se_sh(rdev, i, j);
-                       data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
+                       data = cik_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
                        if (rdev->family == CHIP_HAWAII)
                                disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
                        else
@@ -3108,12 +3108,14 @@ static void cik_setup_rb(struct radeon_device *rdev,
        cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
 
        mask = 1;
-       for (i = 0; i < max_rb_num; i++) {
+       for (i = 0; i < max_rb_num_per_se * se_num; i++) {
                if (!(disabled_rbs & mask))
                        enabled_rbs |= mask;
                mask <<= 1;
        }
 
+       rdev->config.cik.backend_enable_mask = enabled_rbs;
+
        for (i = 0; i < se_num; i++) {
                cik_select_se_sh(rdev, i, 0xffffffff);
                data = 0;
index 0300727..d08b83c 100644 (file)
@@ -458,7 +458,7 @@ int cik_copy_dma(struct radeon_device *rdev,
                radeon_ring_write(ring, 0); /* src/dst endian swap */
                radeon_ring_write(ring, src_offset & 0xffffffff);
                radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
-               radeon_ring_write(ring, dst_offset & 0xfffffffc);
+               radeon_ring_write(ring, dst_offset & 0xffffffff);
                radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
                src_offset += cur_size_in_bytes;
                dst_offset += cur_size_in_bytes;
index de86493..713a5d3 100644 (file)
@@ -174,7 +174,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
        }
 
        sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
-       if (sad_count < 0) {
+       if (sad_count <= 0) {
                DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
                return;
        }
@@ -235,7 +235,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
        }
 
        sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
-       if (sad_count < 0) {
+       if (sad_count <= 0) {
                DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
                return;
        }
@@ -308,7 +308,9 @@ int dce6_audio_init(struct radeon_device *rdev)
        rdev->audio.enabled = true;
 
        if (ASIC_IS_DCE8(rdev))
-               rdev->audio.num_pins = 7;
+               rdev->audio.num_pins = 6;
+       else if (ASIC_IS_DCE61(rdev))
+               rdev->audio.num_pins = 4;
        else
                rdev->audio.num_pins = 6;
 
index aa695c4..0c6d5ce 100644 (file)
@@ -118,7 +118,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
        }
 
        sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
-       if (sad_count < 0) {
+       if (sad_count <= 0) {
                DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
                return;
        }
@@ -173,7 +173,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
        }
 
        sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
-       if (sad_count < 0) {
+       if (sad_count <= 0) {
                DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
                return;
        }
index 11aab2a..f59a9e9 100644 (file)
@@ -895,6 +895,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
                    (rdev->pdev->device == 0x999C)) {
                        rdev->config.cayman.max_simds_per_se = 6;
                        rdev->config.cayman.max_backends_per_se = 2;
+                       rdev->config.cayman.max_hw_contexts = 8;
+                       rdev->config.cayman.sx_max_export_size = 256;
+                       rdev->config.cayman.sx_max_export_pos_size = 64;
+                       rdev->config.cayman.sx_max_export_smx_size = 192;
                } else if ((rdev->pdev->device == 0x9903) ||
                           (rdev->pdev->device == 0x9904) ||
                           (rdev->pdev->device == 0x990A) ||
@@ -905,6 +909,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
                           (rdev->pdev->device == 0x999D)) {
                        rdev->config.cayman.max_simds_per_se = 4;
                        rdev->config.cayman.max_backends_per_se = 2;
+                       rdev->config.cayman.max_hw_contexts = 8;
+                       rdev->config.cayman.sx_max_export_size = 256;
+                       rdev->config.cayman.sx_max_export_pos_size = 64;
+                       rdev->config.cayman.sx_max_export_smx_size = 192;
                } else if ((rdev->pdev->device == 0x9919) ||
                           (rdev->pdev->device == 0x9990) ||
                           (rdev->pdev->device == 0x9991) ||
@@ -915,9 +923,17 @@ static void cayman_gpu_init(struct radeon_device *rdev)
                           (rdev->pdev->device == 0x99A0)) {
                        rdev->config.cayman.max_simds_per_se = 3;
                        rdev->config.cayman.max_backends_per_se = 1;
+                       rdev->config.cayman.max_hw_contexts = 4;
+                       rdev->config.cayman.sx_max_export_size = 128;
+                       rdev->config.cayman.sx_max_export_pos_size = 32;
+                       rdev->config.cayman.sx_max_export_smx_size = 96;
                } else {
                        rdev->config.cayman.max_simds_per_se = 2;
                        rdev->config.cayman.max_backends_per_se = 1;
+                       rdev->config.cayman.max_hw_contexts = 4;
+                       rdev->config.cayman.sx_max_export_size = 128;
+                       rdev->config.cayman.sx_max_export_pos_size = 32;
+                       rdev->config.cayman.sx_max_export_smx_size = 96;
                }
                rdev->config.cayman.max_texture_channel_caches = 2;
                rdev->config.cayman.max_gprs = 256;
@@ -925,10 +941,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
                rdev->config.cayman.max_gs_threads = 32;
                rdev->config.cayman.max_stack_entries = 512;
                rdev->config.cayman.sx_num_of_sets = 8;
-               rdev->config.cayman.sx_max_export_size = 256;
-               rdev->config.cayman.sx_max_export_pos_size = 64;
-               rdev->config.cayman.sx_max_export_smx_size = 192;
-               rdev->config.cayman.max_hw_contexts = 8;
                rdev->config.cayman.sq_num_cf_insts = 2;
 
                rdev->config.cayman.sc_prim_fifo_size = 0x40;
index b1f990d..45e1f44 100644 (file)
@@ -1940,7 +1940,7 @@ struct si_asic {
        unsigned sc_earlyz_tile_fifo_size;
 
        unsigned num_tile_pipes;
-       unsigned num_backends_per_se;
+       unsigned backend_enable_mask;
        unsigned backend_disable_mask_per_asic;
        unsigned backend_map;
        unsigned num_texture_channel_caches;
@@ -1970,7 +1970,7 @@ struct cik_asic {
        unsigned sc_earlyz_tile_fifo_size;
 
        unsigned num_tile_pipes;
-       unsigned num_backends_per_se;
+       unsigned backend_enable_mask;
        unsigned backend_disable_mask_per_asic;
        unsigned backend_map;
        unsigned num_texture_channel_caches;
index e354ce9..c0425bb 100644 (file)
@@ -2021,7 +2021,7 @@ static struct radeon_asic ci_asic = {
                .hdmi_setmode = &evergreen_hdmi_setmode,
        },
        .copy = {
-               .blit = NULL,
+               .blit = &cik_copy_cpdma,
                .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
                .dma = &cik_copy_dma,
                .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -2122,7 +2122,7 @@ static struct radeon_asic kv_asic = {
                .hdmi_setmode = &evergreen_hdmi_setmode,
        },
        .copy = {
-               .blit = NULL,
+               .blit = &cik_copy_cpdma,
                .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
                .dma = &cik_copy_dma,
                .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
index 9f5ff28..db39ea3 100644 (file)
  *   2.33.0 - Add SI tiling mode array query
  *   2.34.0 - Add CIK tiling mode array query
  *   2.35.0 - Add CIK macrotile mode array query
+ *   2.36.0 - Fix CIK DCE tiling setup
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       35
+#define KMS_DRIVER_MINOR       36
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
@@ -508,15 +509,6 @@ static const struct file_operations radeon_driver_kms_fops = {
 #endif
 };
 
-
-static void
-radeon_pci_shutdown(struct pci_dev *pdev)
-{
-       struct drm_device *dev = pci_get_drvdata(pdev);
-
-       radeon_driver_unload_kms(dev);
-}
-
 static struct drm_driver kms_driver = {
        .driver_features =
            DRIVER_USE_AGP |
@@ -586,7 +578,6 @@ static struct pci_driver radeon_kms_pci_driver = {
        .probe = radeon_pci_probe,
        .remove = radeon_pci_remove,
        .driver.pm = &radeon_pm_ops,
-       .shutdown = radeon_pci_shutdown,
 };
 
 static int __init radeon_init(void)
index 55d0b47..21d593c 100644 (file)
@@ -461,6 +461,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
        case RADEON_INFO_SI_CP_DMA_COMPUTE:
                *value = 1;
                break;
+       case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
+               if (rdev->family >= CHIP_BONAIRE) {
+                       *value = rdev->config.cik.backend_enable_mask;
+               } else if (rdev->family >= CHIP_TAHITI) {
+                       *value = rdev->config.si.backend_enable_mask;
+               } else {
+                       DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
+               }
+               break;
        default:
                DRM_DEBUG_KMS("Invalid request %d\n", info->request);
                return -EINVAL;
index 373d088..b9c0529 100644 (file)
@@ -473,7 +473,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
                return -EINVAL;
        }
 
-       if ((start >> 28) != (end >> 28)) {
+       if ((start >> 28) != ((end - 1) >> 28)) {
                DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
                          start, end);
                return -EINVAL;
index 1c56062..e7dab06 100644 (file)
@@ -162,6 +162,16 @@ static void rs690_mc_init(struct radeon_device *rdev)
        base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
        base = G_000100_MC_FB_START(base) << 16;
        rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+       /* Some boards seem to be configured for 128MB of sideport memory,
+        * but really only have 64MB.  Just skip the sideport and use
+        * UMA memory.
+        */
+       if (rdev->mc.igp_sideport_enabled &&
+           (rdev->mc.real_vram_size == (384 * 1024 * 1024))) {
+               base += 128 * 1024 * 1024;
+               rdev->mc.real_vram_size -= 128 * 1024 * 1024;
+               rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+       }
 
        /* Use K8 direct mapping for fast fb access. */ 
        rdev->fastfb_working = false;
index 913b025..374499d 100644 (file)
@@ -2328,6 +2328,12 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
        pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
                                                       ASIC_INTERNAL_MEMORY_SS, 0);
 
+       /* disable ss, causes hangs on some cayman boards */
+       if (rdev->family == CHIP_CAYMAN) {
+               pi->sclk_ss = false;
+               pi->mclk_ss = false;
+       }
+
        if (pi->sclk_ss || pi->mclk_ss)
                pi->dynamic_ss = true;
        else
index a36736d..85e1edf 100644 (file)
@@ -2811,7 +2811,7 @@ static void si_setup_spi(struct radeon_device *rdev,
 }
 
 static u32 si_get_rb_disabled(struct radeon_device *rdev,
-                             u32 max_rb_num, u32 se_num,
+                             u32 max_rb_num_per_se,
                              u32 sh_per_se)
 {
        u32 data, mask;
@@ -2825,14 +2825,14 @@ static u32 si_get_rb_disabled(struct radeon_device *rdev,
 
        data >>= BACKEND_DISABLE_SHIFT;
 
-       mask = si_create_bitmask(max_rb_num / se_num / sh_per_se);
+       mask = si_create_bitmask(max_rb_num_per_se / sh_per_se);
 
        return data & mask;
 }
 
 static void si_setup_rb(struct radeon_device *rdev,
                        u32 se_num, u32 sh_per_se,
-                       u32 max_rb_num)
+                       u32 max_rb_num_per_se)
 {
        int i, j;
        u32 data, mask;
@@ -2842,19 +2842,21 @@ static void si_setup_rb(struct radeon_device *rdev,
        for (i = 0; i < se_num; i++) {
                for (j = 0; j < sh_per_se; j++) {
                        si_select_se_sh(rdev, i, j);
-                       data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
+                       data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
                        disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
                }
        }
        si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
 
        mask = 1;
-       for (i = 0; i < max_rb_num; i++) {
+       for (i = 0; i < max_rb_num_per_se * se_num; i++) {
                if (!(disabled_rbs & mask))
                        enabled_rbs |= mask;
                mask <<= 1;
        }
 
+       rdev->config.si.backend_enable_mask = enabled_rbs;
+
        for (i = 0; i < se_num; i++) {
                si_select_se_sh(rdev, i, 0xffffffff);
                data = 0;
index 15b86a9..4061521 100644 (file)
@@ -353,7 +353,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
         * Don't move nonexistent data. Clear destination instead.
         */
        if (old_iomap == NULL &&
-           (ttm == NULL || ttm->state == tt_unpopulated)) {
+           (ttm == NULL || (ttm->state == tt_unpopulated &&
+                            !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
                memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
                goto out2;
        }
index b249ab9..6440eea 100644 (file)
@@ -169,9 +169,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        }
 
        page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
-           drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
-       page_last = vma_pages(vma) +
-           drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
+               vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
+       page_last = vma_pages(vma) + vma->vm_pgoff -
+               drm_vma_node_start(&bo->vma_node);
 
        if (unlikely(page_offset >= bo->num_pages)) {
                retval = VM_FAULT_SIGBUS;
index a51f48e..45d5b5a 100644 (file)
@@ -68,6 +68,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
                                  SVGA_FIFO_3D_HWVERSION));
                break;
        }
+       case DRM_VMW_PARAM_MAX_SURF_MEMORY:
+               param->value = dev_priv->memory_size;
+               break;
        default:
                DRM_ERROR("Illegal vmwgfx get param request: %d\n",
                          param->param);
index 92d1206..f80b700 100644 (file)
@@ -377,6 +377,9 @@ static int intel_idle(struct cpuidle_device *dev,
 
        if (!current_set_polling_and_test()) {
 
+               if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
+                       clflush((void *)&current_thread_info()->flags);
+
                __monitor((void *)&current_thread_info()->flags, 0, 0);
                smp_mb();
                if (!need_resched())
index acb7f90..749a6ca 100644 (file)
@@ -200,7 +200,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
                        .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
                        .address = 1,
                        .scan_index = 1,
-                       .scan_type = IIO_ST('u', 12, 16, 0),
+                       .scan_type = {
+                               .sign = 'u',
+                               .realbits = 12,
+                               .storagebits = 16,
+                               .shift = 0,
+                               .endianness = IIO_BE,
+                       },
                },
                .channel[1] = {
                        .type = IIO_VOLTAGE,
@@ -210,7 +216,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
                        .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
                        .address = 0,
                        .scan_index = 0,
-                       .scan_type = IIO_ST('u', 12, 16, 0),
+                       .scan_type = {
+                               .sign = 'u',
+                               .realbits = 12,
+                               .storagebits = 16,
+                               .shift = 0,
+                               .endianness = IIO_BE,
+                       },
                },
                .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2),
                .int_vref_mv = 2500,
index 3fb7757..368660d 100644 (file)
@@ -651,7 +651,12 @@ static const struct iio_chan_spec adis16448_channels[] = {
                .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
                .address = ADIS16448_BARO_OUT,
                .scan_index = ADIS16400_SCAN_BARO,
-               .scan_type = IIO_ST('s', 16, 16, 0),
+               .scan_type = {
+                       .sign = 's',
+                       .realbits = 16,
+                       .storagebits = 16,
+                       .endianness = IIO_BE,
+               },
        },
        ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
        IIO_CHAN_SOFT_TIMESTAMP(11)
index 21df571..0922e39 100644 (file)
@@ -387,7 +387,7 @@ static int cm36651_read_int_time(struct cm36651_data *cm36651,
                return -EINVAL;
        }
 
-       return IIO_VAL_INT_PLUS_MICRO;
+       return IIO_VAL_INT;
 }
 
 static int cm36651_write_int_time(struct cm36651_data *cm36651,
index c47c203..0717940 100644 (file)
@@ -181,9 +181,16 @@ static void add_ref(struct iw_cm_id *cm_id)
 static void rem_ref(struct iw_cm_id *cm_id)
 {
        struct iwcm_id_private *cm_id_priv;
+       int cb_destroy;
+
        cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
-       if (iwcm_deref_id(cm_id_priv) &&
-           test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) {
+
+       /*
+        * Test bit before deref in case the cm_id gets freed on another
+        * thread.
+        */
+       cb_destroy = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
+       if (iwcm_deref_id(cm_id_priv) && cb_destroy) {
                BUG_ON(!list_empty(&cm_id_priv->work_list));
                free_cm_id(cm_id_priv);
        }
index bdc842e..a283274 100644 (file)
 
 #define INIT_UDATA(udata, ibuf, obuf, ilen, olen)                      \
        do {                                                            \
-               (udata)->inbuf  = (void __user *) (ibuf);               \
+               (udata)->inbuf  = (const void __user *) (ibuf);         \
                (udata)->outbuf = (void __user *) (obuf);               \
                (udata)->inlen  = (ilen);                               \
                (udata)->outlen = (olen);                               \
        } while (0)
 
+#define INIT_UDATA_BUF_OR_NULL(udata, ibuf, obuf, ilen, olen)                  \
+       do {                                                                    \
+               (udata)->inbuf  = (ilen) ? (const void __user *) (ibuf) : NULL; \
+               (udata)->outbuf = (olen) ? (void __user *) (obuf) : NULL;       \
+               (udata)->inlen  = (ilen);                                       \
+               (udata)->outlen = (olen);                                       \
+       } while (0)
+
 /*
  * Our lifetime rules for these structs are the following:
  *
index 65f6e7d..f1cc838 100644 (file)
@@ -2593,6 +2593,9 @@ out_put:
 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
                                union ib_flow_spec *ib_spec)
 {
+       if (kern_spec->reserved)
+               return -EINVAL;
+
        ib_spec->type = kern_spec->type;
 
        switch (ib_spec->type) {
@@ -2646,6 +2649,9 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
        void *ib_spec;
        int i;
 
+       if (ucore->inlen < sizeof(cmd))
+               return -EINVAL;
+
        if (ucore->outlen < sizeof(resp))
                return -ENOSPC;
 
@@ -2671,6 +2677,10 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
            (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
                return -EINVAL;
 
+       if (cmd.flow_attr.reserved[0] ||
+           cmd.flow_attr.reserved[1])
+               return -EINVAL;
+
        if (cmd.flow_attr.num_of_specs) {
                kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
                                         GFP_KERNEL);
@@ -2731,6 +2741,7 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
        if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
                pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
                        i, cmd.flow_attr.size);
+               err = -EINVAL;
                goto err_free;
        }
        flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
@@ -2791,10 +2802,16 @@ int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
        struct ib_uobject               *uobj;
        int                             ret;
 
+       if (ucore->inlen < sizeof(cmd))
+               return -EINVAL;
+
        ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
        if (ret)
                return ret;
 
+       if (cmd.comp_mask)
+               return -EINVAL;
+
        uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
                              file->ucontext);
        if (!uobj)
index 3438694..08219fb 100644 (file)
@@ -668,25 +668,30 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
                if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count)
                        return -EINVAL;
 
+               if (ex_hdr.cmd_hdr_reserved)
+                       return -EINVAL;
+
                if (ex_hdr.response) {
                        if (!hdr.out_words && !ex_hdr.provider_out_words)
                                return -EINVAL;
+
+                       if (!access_ok(VERIFY_WRITE,
+                                      (void __user *) (unsigned long) ex_hdr.response,
+                                      (hdr.out_words + ex_hdr.provider_out_words) * 8))
+                               return -EFAULT;
                } else {
                        if (hdr.out_words || ex_hdr.provider_out_words)
                                return -EINVAL;
                }
 
-               INIT_UDATA(&ucore,
-                          (hdr.in_words) ? buf : 0,
-                          (unsigned long)ex_hdr.response,
-                          hdr.in_words * 8,
-                          hdr.out_words * 8);
-
-               INIT_UDATA(&uhw,
-                          (ex_hdr.provider_in_words) ? buf + ucore.inlen : 0,
-                          (ex_hdr.provider_out_words) ? (unsigned long)ex_hdr.response + ucore.outlen : 0,
-                          ex_hdr.provider_in_words * 8,
-                          ex_hdr.provider_out_words * 8);
+               INIT_UDATA_BUF_OR_NULL(&ucore, buf, (unsigned long) ex_hdr.response,
+                                      hdr.in_words * 8, hdr.out_words * 8);
+
+               INIT_UDATA_BUF_OR_NULL(&uhw,
+                                      buf + ucore.inlen,
+                                      (unsigned long) ex_hdr.response + ucore.outlen,
+                                      ex_hdr.provider_in_words * 8,
+                                      ex_hdr.provider_out_words * 8);
 
                err = uverbs_ex_cmd_table[command](file,
                                                   &ucore,
index 12fef76..4512687 100644 (file)
@@ -524,50 +524,6 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
        return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
 }
 
-#define VLAN_NONE 0xfff
-#define FILTER_SEL_VLAN_NONE 0xffff
-#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
-#define FILTER_SEL_WIDTH_VIN_P_FC \
-       (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
-#define FILTER_SEL_WIDTH_TAG_P_FC \
-       (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
-#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
-
-static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
-                                 struct l2t_entry *l2t)
-{
-       unsigned int ntuple = 0;
-       u32 viid;
-
-       switch (dev->rdev.lldi.filt_mode) {
-
-       /* default filter mode */
-       case HW_TPL_FR_MT_PR_IV_P_FC:
-               if (l2t->vlan == VLAN_NONE)
-                       ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
-               else {
-                       ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
-                       ntuple |= 1 << FILTER_SEL_WIDTH_TAG_P_FC;
-               }
-               ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
-                         FILTER_SEL_WIDTH_VLD_TAG_P_FC;
-               break;
-       case HW_TPL_FR_MT_PR_OV_P_FC: {
-               viid = cxgb4_port_viid(l2t->neigh->dev);
-
-               ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
-               ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
-               ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
-               ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
-                         FILTER_SEL_WIDTH_VLD_TAG_P_FC;
-               break;
-       }
-       default:
-               break;
-       }
-       return ntuple;
-}
-
 static int send_connect(struct c4iw_ep *ep)
 {
        struct cpl_act_open_req *req;
@@ -641,8 +597,9 @@ static int send_connect(struct c4iw_ep *ep)
                        req->local_ip = la->sin_addr.s_addr;
                        req->peer_ip = ra->sin_addr.s_addr;
                        req->opt0 = cpu_to_be64(opt0);
-                       req->params = cpu_to_be32(select_ntuple(ep->com.dev,
-                                               ep->dst, ep->l2t));
+                       req->params = cpu_to_be32(cxgb4_select_ntuple(
+                                               ep->com.dev->rdev.lldi.ports[0],
+                                               ep->l2t));
                        req->opt2 = cpu_to_be32(opt2);
                } else {
                        req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
@@ -662,9 +619,9 @@ static int send_connect(struct c4iw_ep *ep)
                        req6->peer_ip_lo = *((__be64 *)
                                                (ra6->sin6_addr.s6_addr + 8));
                        req6->opt0 = cpu_to_be64(opt0);
-                       req6->params = cpu_to_be32(
-                                       select_ntuple(ep->com.dev, ep->dst,
-                                                     ep->l2t));
+                       req6->params = cpu_to_be32(cxgb4_select_ntuple(
+                                               ep->com.dev->rdev.lldi.ports[0],
+                                               ep->l2t));
                        req6->opt2 = cpu_to_be32(opt2);
                }
        } else {
@@ -681,8 +638,9 @@ static int send_connect(struct c4iw_ep *ep)
                        t5_req->peer_ip = ra->sin_addr.s_addr;
                        t5_req->opt0 = cpu_to_be64(opt0);
                        t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
-                                               select_ntuple(ep->com.dev,
-                                               ep->dst, ep->l2t)));
+                                                    cxgb4_select_ntuple(
+                                            ep->com.dev->rdev.lldi.ports[0],
+                                            ep->l2t)));
                        t5_req->opt2 = cpu_to_be32(opt2);
                } else {
                        t5_req6 = (struct cpl_t5_act_open_req6 *)
@@ -703,7 +661,9 @@ static int send_connect(struct c4iw_ep *ep)
                                                (ra6->sin6_addr.s6_addr + 8));
                        t5_req6->opt0 = cpu_to_be64(opt0);
                        t5_req6->params = (__force __be64)cpu_to_be32(
-                               select_ntuple(ep->com.dev, ep->dst, ep->l2t));
+                                                       cxgb4_select_ntuple(
+                                               ep->com.dev->rdev.lldi.ports[0],
+                                               ep->l2t));
                        t5_req6->opt2 = cpu_to_be32(opt2);
                }
        }
@@ -1630,7 +1590,8 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
        memset(req, 0, sizeof(*req));
        req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
        req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
-       req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst,
+       req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
+                                    ep->com.dev->rdev.lldi.ports[0],
                                     ep->l2t));
        sin = (struct sockaddr_in *)&ep->com.local_addr;
        req->le.lport = sin->sin_port;
@@ -2938,7 +2899,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
        /*
         * Allocate a server TID.
         */
-       if (dev->rdev.lldi.enable_fw_ofld_conn)
+       if (dev->rdev.lldi.enable_fw_ofld_conn &&
+           ep->com.local_addr.ss_family == AF_INET)
                ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
                                             cm_id->local_addr.ss_family, ep);
        else
@@ -3323,9 +3285,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
        /*
         * Calculate the server tid from filter hit index from cpl_rx_pkt.
         */
-       stid = (__force int) cpu_to_be32((__force u32) rss->hash_val)
-                                         - dev->rdev.lldi.tids->sftid_base
-                                         + dev->rdev.lldi.tids->nstids;
+       stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
 
        lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
        if (!lep) {
@@ -3397,7 +3357,9 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
        window = (__force u16) htons((__force u16)tcph->window);
 
        /* Calcuate filter portion for LE region. */
-       filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e));
+       filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
+                                                   dev->rdev.lldi.ports[0],
+                                                   e));
 
        /*
         * Synthesize the cpl_pass_accept_req. We have everything except the
index 4cb8eb2..84e4500 100644 (file)
@@ -173,7 +173,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
        return ret;
 }
 
-int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
+static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
 {
        u32 remain = len;
        u32 dmalen;
index 6be57c3..9804fca 100644 (file)
@@ -207,7 +207,9 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
        isert_conn->conn_rx_descs = NULL;
 }
 
+static void isert_cq_tx_work(struct work_struct *);
 static void isert_cq_tx_callback(struct ib_cq *, void *);
+static void isert_cq_rx_work(struct work_struct *);
 static void isert_cq_rx_callback(struct ib_cq *, void *);
 
 static int
@@ -259,26 +261,36 @@ isert_create_device_ib_res(struct isert_device *device)
                cq_desc[i].device = device;
                cq_desc[i].cq_index = i;
 
+               INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
                device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
                                                isert_cq_rx_callback,
                                                isert_cq_event_callback,
                                                (void *)&cq_desc[i],
                                                ISER_MAX_RX_CQ_LEN, i);
-               if (IS_ERR(device->dev_rx_cq[i]))
+               if (IS_ERR(device->dev_rx_cq[i])) {
+                       ret = PTR_ERR(device->dev_rx_cq[i]);
+                       device->dev_rx_cq[i] = NULL;
                        goto out_cq;
+               }
 
+               INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
                device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
                                                isert_cq_tx_callback,
                                                isert_cq_event_callback,
                                                (void *)&cq_desc[i],
                                                ISER_MAX_TX_CQ_LEN, i);
-               if (IS_ERR(device->dev_tx_cq[i]))
+               if (IS_ERR(device->dev_tx_cq[i])) {
+                       ret = PTR_ERR(device->dev_tx_cq[i]);
+                       device->dev_tx_cq[i] = NULL;
                        goto out_cq;
+               }
 
-               if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP))
+               ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
+               if (ret)
                        goto out_cq;
 
-               if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP))
+               ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
+               if (ret)
                        goto out_cq;
        }
 
@@ -1724,7 +1736,6 @@ isert_cq_tx_callback(struct ib_cq *cq, void *context)
 {
        struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
 
-       INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
        queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
 }
 
@@ -1768,7 +1779,6 @@ isert_cq_rx_callback(struct ib_cq *cq, void *context)
 {
        struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
 
-       INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
        queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
 }
 
index 846ccdd..d2965e4 100644 (file)
@@ -1871,6 +1871,10 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
                break;
 
        case EV_ABS:
+               input_alloc_absinfo(dev);
+               if (!dev->absinfo)
+                       return;
+
                __set_bit(code, dev->absbit);
                break;
 
index 75762d6..aa127ba 100644 (file)
@@ -455,7 +455,18 @@ static void zforce_complete(struct zforce_ts *ts, int cmd, int result)
        }
 }
 
-static irqreturn_t zforce_interrupt(int irq, void *dev_id)
+static irqreturn_t zforce_irq(int irq, void *dev_id)
+{
+       struct zforce_ts *ts = dev_id;
+       struct i2c_client *client = ts->client;
+
+       if (ts->suspended && device_may_wakeup(&client->dev))
+               pm_wakeup_event(&client->dev, 500);
+
+       return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
 {
        struct zforce_ts *ts = dev_id;
        struct i2c_client *client = ts->client;
@@ -465,12 +476,10 @@ static irqreturn_t zforce_interrupt(int irq, void *dev_id)
        u8 *payload;
 
        /*
-        * When suspended, emit a wakeup signal if necessary and return.
+        * When still suspended, return.
         * Due to the level-interrupt we will get re-triggered later.
         */
        if (ts->suspended) {
-               if (device_may_wakeup(&client->dev))
-                       pm_wakeup_event(&client->dev, 500);
                msleep(20);
                return IRQ_HANDLED;
        }
@@ -763,8 +772,8 @@ static int zforce_probe(struct i2c_client *client,
         * Therefore we can trigger the interrupt anytime it is low and do
         * not need to limit it to the interrupt edge.
         */
-       ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
-                                       zforce_interrupt,
+       ret = devm_request_threaded_irq(&client->dev, client->irq,
+                                       zforce_irq, zforce_irq_thread,
                                        IRQF_TRIGGER_LOW | IRQF_ONESHOT,
                                        input_dev->name, ts);
        if (ret) {
index 82cec63..3ee78f0 100644 (file)
@@ -149,8 +149,9 @@ static void intc_irqpin_read_modify_write(struct intc_irqpin_priv *p,
 static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
                                         int irq, int do_mask)
 {
-       int bitfield_width = 4; /* PRIO assumed to have fixed bitfield width */
-       int shift = (7 - irq) * bitfield_width; /* PRIO assumed to be 32-bit */
+       /* The PRIO register is assumed to be 32-bit with fixed 4-bit fields. */
+       int bitfield_width = 4;
+       int shift = 32 - (irq + 1) * bitfield_width;
 
        intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO,
                                      shift, bitfield_width,
@@ -159,8 +160,9 @@ static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
 
 static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value)
 {
+       /* The SENSE register is assumed to be 32-bit. */
        int bitfield_width = p->config.sense_bitfield_width;
-       int shift = (7 - irq) * bitfield_width; /* SENSE assumed to be 32-bit */
+       int shift = 32 - (irq + 1) * bitfield_width;
 
        dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value);
 
index 2b46bf1..4c9852d 100644 (file)
@@ -421,9 +421,11 @@ out:
 
        if (watermark <= WATERMARK_METADATA) {
                SET_GC_MARK(b, GC_MARK_METADATA);
+               SET_GC_MOVE(b, 0);
                b->prio = BTREE_PRIO;
        } else {
                SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
+               SET_GC_MOVE(b, 0);
                b->prio = INITIAL_PRIO;
        }
 
index 4beb55a..754f431 100644 (file)
@@ -197,7 +197,7 @@ struct bucket {
        uint8_t         disk_gen;
        uint8_t         last_gc; /* Most out of date gen in the btree */
        uint8_t         gc_gen;
-       uint16_t        gc_mark;
+       uint16_t        gc_mark; /* Bitfield used by GC. See below for field */
 };
 
 /*
@@ -209,7 +209,8 @@ BITMASK(GC_MARK,     struct bucket, gc_mark, 0, 2);
 #define GC_MARK_RECLAIMABLE    0
 #define GC_MARK_DIRTY          1
 #define GC_MARK_METADATA       2
-BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14);
+BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13);
+BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
 
 #include "journal.h"
 #include "stats.h"
@@ -372,14 +373,14 @@ struct cached_dev {
        unsigned char           writeback_percent;
        unsigned                writeback_delay;
 
-       int                     writeback_rate_change;
-       int64_t                 writeback_rate_derivative;
        uint64_t                writeback_rate_target;
+       int64_t                 writeback_rate_proportional;
+       int64_t                 writeback_rate_derivative;
+       int64_t                 writeback_rate_change;
 
        unsigned                writeback_rate_update_seconds;
        unsigned                writeback_rate_d_term;
        unsigned                writeback_rate_p_term_inverse;
-       unsigned                writeback_rate_d_smooth;
 };
 
 enum alloc_watermarks {
@@ -445,7 +446,6 @@ struct cache {
         * call prio_write() to keep gens from wrapping.
         */
        uint8_t                 need_save_prio;
-       unsigned                gc_move_threshold;
 
        /*
         * If nonzero, we know we aren't going to find any buckets to invalidate
index 5e2765a..31bb53f 100644 (file)
@@ -1561,6 +1561,28 @@ size_t bch_btree_gc_finish(struct cache_set *c)
                SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
                            GC_MARK_METADATA);
 
+       /* don't reclaim buckets to which writeback keys point */
+       rcu_read_lock();
+       for (i = 0; i < c->nr_uuids; i++) {
+               struct bcache_device *d = c->devices[i];
+               struct cached_dev *dc;
+               struct keybuf_key *w, *n;
+               unsigned j;
+
+               if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
+                       continue;
+               dc = container_of(d, struct cached_dev, disk);
+
+               spin_lock(&dc->writeback_keys.lock);
+               rbtree_postorder_for_each_entry_safe(w, n,
+                                       &dc->writeback_keys.keys, node)
+                       for (j = 0; j < KEY_PTRS(&w->key); j++)
+                               SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
+                                           GC_MARK_DIRTY);
+               spin_unlock(&dc->writeback_keys.lock);
+       }
+       rcu_read_unlock();
+
        for_each_cache(ca, c, i) {
                uint64_t *i;
 
@@ -1817,7 +1839,8 @@ static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
                        if (KEY_START(k) > KEY_START(insert) + sectors_found)
                                goto check_failed;
 
-                       if (KEY_PTRS(replace_key) != KEY_PTRS(k))
+                       if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
+                           KEY_DIRTY(k) != KEY_DIRTY(replace_key))
                                goto check_failed;
 
                        /* skip past gen */
@@ -2217,7 +2240,7 @@ struct btree_insert_op {
        struct bkey     *replace_key;
 };
 
-int btree_insert_fn(struct btree_op *b_op, struct btree *b)
+static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
 {
        struct btree_insert_op *op = container_of(b_op,
                                        struct btree_insert_op, op);
index 7c1275e..f2f0998 100644 (file)
@@ -25,10 +25,9 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
        unsigned i;
 
        for (i = 0; i < KEY_PTRS(k); i++) {
-               struct cache *ca = PTR_CACHE(c, k, i);
                struct bucket *g = PTR_BUCKET(c, k, i);
 
-               if (GC_SECTORS_USED(g) < ca->gc_move_threshold)
+               if (GC_MOVE(g))
                        return true;
        }
 
@@ -65,11 +64,16 @@ static void write_moving_finish(struct closure *cl)
 
 static void read_moving_endio(struct bio *bio, int error)
 {
+       struct bbio *b = container_of(bio, struct bbio, bio);
        struct moving_io *io = container_of(bio->bi_private,
                                            struct moving_io, cl);
 
        if (error)
                io->op.error = error;
+       else if (!KEY_DIRTY(&b->key) &&
+                ptr_stale(io->op.c, &b->key, 0)) {
+               io->op.error = -EINTR;
+       }
 
        bch_bbio_endio(io->op.c, bio, error, "reading data to move");
 }
@@ -141,6 +145,11 @@ static void read_moving(struct cache_set *c)
                if (!w)
                        break;
 
+               if (ptr_stale(c, &w->key, 0)) {
+                       bch_keybuf_del(&c->moving_gc_keys, w);
+                       continue;
+               }
+
                io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
                             * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
                             GFP_KERNEL);
@@ -184,7 +193,8 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r)
 
 static unsigned bucket_heap_top(struct cache *ca)
 {
-       return GC_SECTORS_USED(heap_peek(&ca->heap));
+       struct bucket *b;
+       return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
 }
 
 void bch_moving_gc(struct cache_set *c)
@@ -226,9 +236,8 @@ void bch_moving_gc(struct cache_set *c)
                        sectors_to_move -= GC_SECTORS_USED(b);
                }
 
-               ca->gc_move_threshold = bucket_heap_top(ca);
-
-               pr_debug("threshold %u", ca->gc_move_threshold);
+               while (heap_pop(&ca->heap, b, bucket_cmp))
+                       SET_GC_MOVE(b, 1);
        }
 
        mutex_unlock(&c->bucket_lock);
index dec15cd..c57bfa0 100644 (file)
@@ -1676,7 +1676,7 @@ err:
 static bool can_attach_cache(struct cache *ca, struct cache_set *c)
 {
        return ca->sb.block_size        == c->sb.block_size &&
-               ca->sb.bucket_size      == c->sb.block_size &&
+               ca->sb.bucket_size      == c->sb.bucket_size &&
                ca->sb.nr_in_set        == c->sb.nr_in_set;
 }
 
index 80d4c2b..a1f8561 100644 (file)
@@ -83,7 +83,6 @@ rw_attribute(writeback_rate);
 rw_attribute(writeback_rate_update_seconds);
 rw_attribute(writeback_rate_d_term);
 rw_attribute(writeback_rate_p_term_inverse);
-rw_attribute(writeback_rate_d_smooth);
 read_attribute(writeback_rate_debug);
 
 read_attribute(stripe_size);
@@ -129,31 +128,41 @@ SHOW(__bch_cached_dev)
        var_printf(writeback_running,   "%i");
        var_print(writeback_delay);
        var_print(writeback_percent);
-       sysfs_print(writeback_rate,     dc->writeback_rate.rate);
+       sysfs_hprint(writeback_rate,    dc->writeback_rate.rate << 9);
 
        var_print(writeback_rate_update_seconds);
        var_print(writeback_rate_d_term);
        var_print(writeback_rate_p_term_inverse);
-       var_print(writeback_rate_d_smooth);
 
        if (attr == &sysfs_writeback_rate_debug) {
+               char rate[20];
                char dirty[20];
-               char derivative[20];
                char target[20];
-               bch_hprint(dirty,
-                          bcache_dev_sectors_dirty(&dc->disk) << 9);
-               bch_hprint(derivative,  dc->writeback_rate_derivative << 9);
+               char proportional[20];
+               char derivative[20];
+               char change[20];
+               s64 next_io;
+
+               bch_hprint(rate,        dc->writeback_rate.rate << 9);
+               bch_hprint(dirty,       bcache_dev_sectors_dirty(&dc->disk) << 9);
                bch_hprint(target,      dc->writeback_rate_target << 9);
+               bch_hprint(proportional,dc->writeback_rate_proportional << 9);
+               bch_hprint(derivative,  dc->writeback_rate_derivative << 9);
+               bch_hprint(change,      dc->writeback_rate_change << 9);
+
+               next_io = div64_s64(dc->writeback_rate.next - local_clock(),
+                                   NSEC_PER_MSEC);
 
                return sprintf(buf,
-                              "rate:\t\t%u\n"
-                              "change:\t\t%i\n"
+                              "rate:\t\t%s/sec\n"
                               "dirty:\t\t%s\n"
+                              "target:\t\t%s\n"
+                              "proportional:\t%s\n"
                               "derivative:\t%s\n"
-                              "target:\t\t%s\n",
-                              dc->writeback_rate.rate,
-                              dc->writeback_rate_change,
-                              dirty, derivative, target);
+                              "change:\t\t%s/sec\n"
+                              "next io:\t%llims\n",
+                              rate, dirty, target, proportional,
+                              derivative, change, next_io);
        }
 
        sysfs_hprint(dirty_data,
@@ -189,6 +198,7 @@ STORE(__cached_dev)
        struct kobj_uevent_env *env;
 
 #define d_strtoul(var)         sysfs_strtoul(var, dc->var)
+#define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
 #define d_strtoi_h(var)                sysfs_hatoi(var, dc->var)
 
        sysfs_strtoul(data_csum,        dc->disk.data_csum);
@@ -197,16 +207,15 @@ STORE(__cached_dev)
        d_strtoul(writeback_metadata);
        d_strtoul(writeback_running);
        d_strtoul(writeback_delay);
-       sysfs_strtoul_clamp(writeback_rate,
-                           dc->writeback_rate.rate, 1, 1000000);
+
        sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
 
-       d_strtoul(writeback_rate_update_seconds);
+       sysfs_strtoul_clamp(writeback_rate,
+                           dc->writeback_rate.rate, 1, INT_MAX);
+
+       d_strtoul_nonzero(writeback_rate_update_seconds);
        d_strtoul(writeback_rate_d_term);
-       d_strtoul(writeback_rate_p_term_inverse);
-       sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
-                           dc->writeback_rate_p_term_inverse, 1, INT_MAX);
-       d_strtoul(writeback_rate_d_smooth);
+       d_strtoul_nonzero(writeback_rate_p_term_inverse);
 
        d_strtoi_h(sequential_cutoff);
        d_strtoi_h(readahead);
@@ -313,7 +322,6 @@ static struct attribute *bch_cached_dev_files[] = {
        &sysfs_writeback_rate_update_seconds,
        &sysfs_writeback_rate_d_term,
        &sysfs_writeback_rate_p_term_inverse,
-       &sysfs_writeback_rate_d_smooth,
        &sysfs_writeback_rate_debug,
        &sysfs_dirty_data,
        &sysfs_stripe_size,
index 462214e..bb37618 100644 (file)
@@ -209,7 +209,13 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
 {
        uint64_t now = local_clock();
 
-       d->next += div_u64(done, d->rate);
+       d->next += div_u64(done * NSEC_PER_SEC, d->rate);
+
+       if (time_before64(now + NSEC_PER_SEC, d->next))
+               d->next = now + NSEC_PER_SEC;
+
+       if (time_after64(now - NSEC_PER_SEC * 2, d->next))
+               d->next = now - NSEC_PER_SEC * 2;
 
        return time_after64(d->next, now)
                ? div_u64(d->next - now, NSEC_PER_SEC / HZ)
index 362c4b3..1030c60 100644 (file)
@@ -110,7 +110,7 @@ do {                                                                        \
        _r;                                                             \
 })
 
-#define heap_peek(h)   ((h)->size ? (h)->data[0] : NULL)
+#define heap_peek(h)   ((h)->used ? (h)->data[0] : NULL)
 
 #define heap_full(h)   ((h)->used == (h)->size)
 
index 99053b1..6c44fe0 100644 (file)
@@ -30,38 +30,40 @@ static void __update_writeback_rate(struct cached_dev *dc)
 
        /* PD controller */
 
-       int change = 0;
-       int64_t error;
        int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
        int64_t derivative = dirty - dc->disk.sectors_dirty_last;
+       int64_t proportional = dirty - target;
+       int64_t change;
 
        dc->disk.sectors_dirty_last = dirty;
 
-       derivative *= dc->writeback_rate_d_term;
-       derivative = clamp(derivative, -dirty, dirty);
+       /* Scale to sectors per second */
 
-       derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
-                             dc->writeback_rate_d_smooth, 0);
+       proportional *= dc->writeback_rate_update_seconds;
+       proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse);
 
-       /* Avoid divide by zero */
-       if (!target)
-               goto out;
+       derivative = div_s64(derivative, dc->writeback_rate_update_seconds);
 
-       error = div64_s64((dirty + derivative - target) << 8, target);
+       derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
+                             (dc->writeback_rate_d_term /
+                              dc->writeback_rate_update_seconds) ?: 1, 0);
+
+       derivative *= dc->writeback_rate_d_term;
+       derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse);
 
-       change = div_s64((dc->writeback_rate.rate * error) >> 8,
-                        dc->writeback_rate_p_term_inverse);
+       change = proportional + derivative;
 
        /* Don't increase writeback rate if the device isn't keeping up */
        if (change > 0 &&
            time_after64(local_clock(),
-                        dc->writeback_rate.next + 10 * NSEC_PER_MSEC))
+                        dc->writeback_rate.next + NSEC_PER_MSEC))
                change = 0;
 
        dc->writeback_rate.rate =
-               clamp_t(int64_t, dc->writeback_rate.rate + change,
+               clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change,
                        1, NSEC_PER_MSEC);
-out:
+
+       dc->writeback_rate_proportional = proportional;
        dc->writeback_rate_derivative = derivative;
        dc->writeback_rate_change = change;
        dc->writeback_rate_target = target;
@@ -87,15 +89,11 @@ static void update_writeback_rate(struct work_struct *work)
 
 static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
 {
-       uint64_t ret;
-
        if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
            !dc->writeback_percent)
                return 0;
 
-       ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
-
-       return min_t(uint64_t, ret, HZ);
+       return bch_next_delay(&dc->writeback_rate, sectors);
 }
 
 struct dirty_io {
@@ -241,7 +239,7 @@ static void read_dirty(struct cached_dev *dc)
                if (KEY_START(&w->key) != dc->last_read ||
                    jiffies_to_msecs(delay) > 50)
                        while (!kthread_should_stop() && delay)
-                               delay = schedule_timeout_interruptible(delay);
+                               delay = schedule_timeout_uninterruptible(delay);
 
                dc->last_read   = KEY_OFFSET(&w->key);
 
@@ -438,7 +436,7 @@ static int bch_writeback_thread(void *arg)
                        while (delay &&
                               !kthread_should_stop() &&
                               !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
-                               delay = schedule_timeout_interruptible(delay);
+                               delay = schedule_timeout_uninterruptible(delay);
                }
        }
 
@@ -476,6 +474,8 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
 
        bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
                           sectors_dirty_init_fn, 0);
+
+       dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);
 }
 
 int bch_cached_dev_writeback_init(struct cached_dev *dc)
@@ -490,18 +490,15 @@ int bch_cached_dev_writeback_init(struct cached_dev *dc)
        dc->writeback_delay             = 30;
        dc->writeback_rate.rate         = 1024;
 
-       dc->writeback_rate_update_seconds = 30;
-       dc->writeback_rate_d_term       = 16;
-       dc->writeback_rate_p_term_inverse = 64;
-       dc->writeback_rate_d_smooth     = 8;
+       dc->writeback_rate_update_seconds = 5;
+       dc->writeback_rate_d_term       = 30;
+       dc->writeback_rate_p_term_inverse = 6000;
 
        dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
                                              "bcache_writeback");
        if (IS_ERR(dc->writeback_thread))
                return PTR_ERR(dc->writeback_thread);
 
-       set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE);
-
        INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
        schedule_delayed_work(&dc->writeback_rate_update,
                              dc->writeback_rate_update_seconds * HZ);
index 187b1b7..4ced594 100644 (file)
@@ -2201,20 +2201,25 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
 
        port = &(SLAVE_AD_INFO(slave).port);
 
-       // if slave is null, the whole port is not initialized
+       /* if slave is null, the whole port is not initialized */
        if (!port->slave) {
                pr_warning("Warning: %s: speed changed for uninitialized port on %s\n",
                           slave->bond->dev->name, slave->dev->name);
                return;
        }
 
+       __get_state_machine_lock(port);
+
        port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
        port->actor_oper_port_key = port->actor_admin_port_key |=
                (__get_link_speed(port) << 1);
        pr_debug("Port %d changed speed\n", port->actor_port_number);
-       // there is no need to reselect a new aggregator, just signal the
-       // state machines to reinitialize
+       /* there is no need to reselect a new aggregator, just signal the
+        * state machines to reinitialize
+        */
        port->sm_vars |= AD_PORT_BEGIN;
+
+       __release_state_machine_lock(port);
 }
 
 /**
@@ -2229,20 +2234,25 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
 
        port = &(SLAVE_AD_INFO(slave).port);
 
-       // if slave is null, the whole port is not initialized
+       /* if slave is null, the whole port is not initialized */
        if (!port->slave) {
                pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n",
                           slave->bond->dev->name, slave->dev->name);
                return;
        }
 
+       __get_state_machine_lock(port);
+
        port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
        port->actor_oper_port_key = port->actor_admin_port_key |=
                __get_duplex(port);
        pr_debug("Port %d changed duplex\n", port->actor_port_number);
-       // there is no need to reselect a new aggregator, just signal the
-       // state machines to reinitialize
+       /* there is no need to reselect a new aggregator, just signal the
+        * state machines to reinitialize
+        */
        port->sm_vars |= AD_PORT_BEGIN;
+
+       __release_state_machine_lock(port);
 }
 
 /**
@@ -2258,15 +2268,21 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
 
        port = &(SLAVE_AD_INFO(slave).port);
 
-       // if slave is null, the whole port is not initialized
+       /* if slave is null, the whole port is not initialized */
        if (!port->slave) {
                pr_warning("Warning: %s: link status changed for uninitialized port on %s\n",
                           slave->bond->dev->name, slave->dev->name);
                return;
        }
 
-       // on link down we are zeroing duplex and speed since some of the adaptors(ce1000.lan) report full duplex/speed instead of N/A(duplex) / 0(speed)
-       // on link up we are forcing recheck on the duplex and speed since some of he adaptors(ce1000.lan) report
+       __get_state_machine_lock(port);
+       /* on link down we are zeroing duplex and speed since
+        * some of the adaptors(ce1000.lan) report full duplex/speed
+        * instead of N/A(duplex) / 0(speed).
+        *
+        * on link up we are forcing recheck on the duplex and speed since
+        * some of he adaptors(ce1000.lan) report.
+        */
        if (link == BOND_LINK_UP) {
                port->is_enabled = true;
                port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
@@ -2282,10 +2298,15 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
                port->actor_oper_port_key = (port->actor_admin_port_key &=
                                             ~AD_SPEED_KEY_BITS);
        }
-       //BOND_PRINT_DBG(("Port %d changed link status to %s", port->actor_port_number, ((link == BOND_LINK_UP)?"UP":"DOWN")));
-       // there is no need to reselect a new aggregator, just signal the
-       // state machines to reinitialize
+       pr_debug("Port %d changed link status to %s",
+               port->actor_port_number,
+               (link == BOND_LINK_UP) ? "UP" : "DOWN");
+       /* there is no need to reselect a new aggregator, just signal the
+        * state machines to reinitialize
+        */
        port->sm_vars |= AD_PORT_BEGIN;
+
+       __release_state_machine_lock(port);
 }
 
 /*
index 5f9a7ad..8aeec0b 100644 (file)
@@ -625,6 +625,7 @@ static int ems_usb_start(struct ems_usb *dev)
                        usb_unanchor_urb(urb);
                        usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
                                          urb->transfer_dma);
+                       usb_free_urb(urb);
                        break;
                }
 
@@ -798,8 +799,8 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
         * allowed (MAX_TX_URBS).
         */
        if (!context) {
-               usb_unanchor_urb(urb);
                usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
+               usb_free_urb(urb);
 
                netdev_warn(netdev, "couldn't find free context\n");
 
index 8ee9d15..263dd92 100644 (file)
@@ -927,6 +927,9 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
        /* set LED in default state (end of init phase) */
        pcan_usb_pro_set_led(dev, 0, 1);
 
+       kfree(bi);
+       kfree(fi);
+
        return 0;
 
  err_out:
index b2ffad1..248baf6 100644 (file)
@@ -565,6 +565,8 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
        /* Make sure pointer to data buffer is set */
        wmb();
 
+       skb_tx_timestamp(skb);
+
        *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
 
        /* Increment index to point to the next BD */
@@ -579,8 +581,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
 
        arc_reg_set(priv, R_STATUS, TXPL_MASK);
 
-       skb_tx_timestamp(skb);
-
        return NETDEV_TX_OK;
 }
 
index a36a760..2980175 100644 (file)
@@ -145,9 +145,11 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
         * Mask some pcie error bits
         */
        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
-       pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
-       data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
-       pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
+       if (pos) {
+               pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
+               data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
+               pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
+       }
        /* clear error status */
        pcie_capability_write_word(pdev, PCI_EXP_DEVSTA,
                        PCI_EXP_DEVSTA_NFED |
index efa8a15..3dc2537 100644 (file)
@@ -208,7 +208,7 @@ static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
                return -EINVAL;
        }
 
-       BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg);
+       DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
 
        *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
 
index f3dd93b..15a66e4 100644 (file)
@@ -7622,7 +7622,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
 {
        u32 base = (u32) mapping & 0xffffffff;
 
-       return (base > 0xffffdcc0) && (base + len + 8 < base);
+       return base + len + 8 < base;
 }
 
 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
index 6c93088..56e0415 100644 (file)
@@ -228,6 +228,25 @@ struct tp_params {
 
        uint32_t dack_re;            /* DACK timer resolution */
        unsigned short tx_modq[NCHAN];  /* channel to modulation queue map */
+
+       u32 vlan_pri_map;               /* cached TP_VLAN_PRI_MAP */
+       u32 ingress_config;             /* cached TP_INGRESS_CONFIG */
+
+       /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets.  This is a
+        * subset of the set of fields which may be present in the Compressed
+        * Filter Tuple portion of filters and TCP TCB connections.  The
+        * fields which are present are controlled by the TP_VLAN_PRI_MAP.
+        * Since a variable number of fields may or may not be present, their
+        * shifted field positions within the Compressed Filter Tuple may
+        * vary, or not even be present if the field isn't selected in
+        * TP_VLAN_PRI_MAP.  Since some of these fields are needed in various
+        * places we store their offsets here, or a -1 if the field isn't
+        * present.
+        */
+       int vlan_shift;
+       int vnic_shift;
+       int port_shift;
+       int protocol_shift;
 };
 
 struct vpd_params {
@@ -926,6 +945,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
               const u8 *fw_data, unsigned int fw_size,
               struct fw_hdr *card_fw, enum dev_state state, int *reset);
 int t4_prep_adapter(struct adapter *adapter);
+int t4_init_tp_params(struct adapter *adap);
+int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
 void t4_fatal_err(struct adapter *adapter);
 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
index d6b12e0..fff02ed 100644 (file)
@@ -2986,7 +2986,14 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
        if (stid >= 0) {
                t->stid_tab[stid].data = data;
                stid += t->stid_base;
-               t->stids_in_use++;
+               /* IPv6 requires max of 520 bits or 16 cells in TCAM
+                * This is equivalent to 4 TIDs. With CLIP enabled it
+                * needs 2 TIDs.
+                */
+               if (family == PF_INET)
+                       t->stids_in_use++;
+               else
+                       t->stids_in_use += 4;
        }
        spin_unlock_bh(&t->stid_lock);
        return stid;
@@ -3012,7 +3019,8 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
        }
        if (stid >= 0) {
                t->stid_tab[stid].data = data;
-               stid += t->stid_base;
+               stid -= t->nstids;
+               stid += t->sftid_base;
                t->stids_in_use++;
        }
        spin_unlock_bh(&t->stid_lock);
@@ -3024,14 +3032,24 @@ EXPORT_SYMBOL(cxgb4_alloc_sftid);
  */
 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
 {
-       stid -= t->stid_base;
+       /* Is it a server filter TID? */
+       if (t->nsftids && (stid >= t->sftid_base)) {
+               stid -= t->sftid_base;
+               stid += t->nstids;
+       } else {
+               stid -= t->stid_base;
+       }
+
        spin_lock_bh(&t->stid_lock);
        if (family == PF_INET)
                __clear_bit(stid, t->stid_bmap);
        else
                bitmap_release_region(t->stid_bmap, stid, 2);
        t->stid_tab[stid].data = NULL;
-       t->stids_in_use--;
+       if (family == PF_INET)
+               t->stids_in_use--;
+       else
+               t->stids_in_use -= 4;
        spin_unlock_bh(&t->stid_lock);
 }
 EXPORT_SYMBOL(cxgb4_free_stid);
@@ -3134,6 +3152,7 @@ static int tid_init(struct tid_info *t)
        size_t size;
        unsigned int stid_bmap_size;
        unsigned int natids = t->natids;
+       struct adapter *adap = container_of(t, struct adapter, tids);
 
        stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
        size = t->ntids * sizeof(*t->tid_tab) +
@@ -3167,6 +3186,11 @@ static int tid_init(struct tid_info *t)
                t->afree = t->atid_tab;
        }
        bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
+       /* Reserve stid 0 for T4/T5 adapters */
+       if (!t->stid_base &&
+           (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
+               __set_bit(0, t->stid_bmap);
+
        return 0;
 }
 
@@ -3731,7 +3755,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
        lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
                        t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
                        (adap->fn * 4));
-       lli.filt_mode = adap->filter_mode;
+       lli.filt_mode = adap->params.tp.vlan_pri_map;
        /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
        for (i = 0; i < NCHAN; i++)
                lli.tx_modq[i] = i;
@@ -4179,7 +4203,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
        adap = netdev2adap(dev);
 
        /* Adjust stid to correct filter index */
-       stid -= adap->tids.nstids;
+       stid -= adap->tids.sftid_base;
        stid += adap->tids.nftids;
 
        /* Check to make sure the filter requested is writable ...
@@ -4205,12 +4229,17 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
                        f->fs.val.lip[i] = val[i];
                        f->fs.mask.lip[i] = ~0;
                }
-               if (adap->filter_mode & F_PORT) {
+               if (adap->params.tp.vlan_pri_map & F_PORT) {
                        f->fs.val.iport = port;
                        f->fs.mask.iport = mask;
                }
        }
 
+       if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
+               f->fs.val.proto = IPPROTO_TCP;
+               f->fs.mask.proto = ~0;
+       }
+
        f->fs.dirsteer = 1;
        f->fs.iq = queue;
        /* Mark filter as locked */
@@ -4237,7 +4266,7 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
        adap = netdev2adap(dev);
 
        /* Adjust stid to correct filter index */
-       stid -= adap->tids.nstids;
+       stid -= adap->tids.sftid_base;
        stid += adap->tids.nftids;
 
        f = &adap->tids.ftid_tab[stid];
@@ -5092,7 +5121,7 @@ static int adap_init0(struct adapter *adap)
        enum dev_state state;
        u32 params[7], val[7];
        struct fw_caps_config_cmd caps_cmd;
-       int reset = 1, j;
+       int reset = 1;
 
        /*
         * Contact FW, advertising Master capability (and potentially forcing
@@ -5434,21 +5463,11 @@ static int adap_init0(struct adapter *adap)
        /*
         * These are finalized by FW initialization, load their values now.
         */
-       v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
-       adap->params.tp.tre = TIMERRESOLUTION_GET(v);
-       adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
        t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
        t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
                     adap->params.b_wnd);
 
-       /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
-       for (j = 0; j < NCHAN; j++)
-               adap->params.tp.tx_modq[j] = j;
-
-       t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
-                        &adap->filter_mode, 1,
-                        TP_VLAN_PRI_MAP);
-
+       t4_init_tp_params(adap);
        adap->flags |= FW_OK;
        return 0;
 
index 6f21f24..4dd0a82 100644 (file)
@@ -131,7 +131,14 @@ static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
 
 static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
 {
-       stid -= t->stid_base;
+       /* Is it a server filter TID? */
+       if (t->nsftids && (stid >= t->sftid_base)) {
+               stid -= t->sftid_base;
+               stid += t->nstids;
+       } else {
+               stid -= t->stid_base;
+       }
+
        return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL;
 }
 
index 2987809..cb05be9 100644 (file)
@@ -45,6 +45,7 @@
 #include "l2t.h"
 #include "t4_msg.h"
 #include "t4fw_api.h"
+#include "t4_regs.h"
 
 #define VLAN_NONE 0xfff
 
@@ -411,6 +412,40 @@ done:
 }
 EXPORT_SYMBOL(cxgb4_l2t_get);
 
+u64 cxgb4_select_ntuple(struct net_device *dev,
+                       const struct l2t_entry *l2t)
+{
+       struct adapter *adap = netdev2adap(dev);
+       struct tp_params *tp = &adap->params.tp;
+       u64 ntuple = 0;
+
+       /* Initialize each of the fields which we care about which are present
+        * in the Compressed Filter Tuple.
+        */
+       if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
+               ntuple |= (F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift;
+
+       if (tp->port_shift >= 0)
+               ntuple |= (u64)l2t->lport << tp->port_shift;
+
+       if (tp->protocol_shift >= 0)
+               ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
+
+       if (tp->vnic_shift >= 0) {
+               u32 viid = cxgb4_port_viid(dev);
+               u32 vf = FW_VIID_VIN_GET(viid);
+               u32 pf = FW_VIID_PFN_GET(viid);
+               u32 vld = FW_VIID_VIVLD_GET(viid);
+
+               ntuple |= (u64)(V_FT_VNID_ID_VF(vf) |
+                               V_FT_VNID_ID_PF(pf) |
+                               V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift;
+       }
+
+       return ntuple;
+}
+EXPORT_SYMBOL(cxgb4_select_ntuple);
+
 /*
  * Called when address resolution fails for an L2T entry to handle packets
  * on the arpq head.  If a packet specifies a failure handler it is invoked,
index 108c0f1..85eb5c7 100644 (file)
@@ -98,7 +98,8 @@ int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
 struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
                                const struct net_device *physdev,
                                unsigned int priority);
-
+u64 cxgb4_select_ntuple(struct net_device *dev,
+                       const struct l2t_entry *l2t);
 void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
 struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
 int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
index 74a6fce..e1413ea 100644 (file)
@@ -3808,6 +3808,109 @@ int t4_prep_adapter(struct adapter *adapter)
        return 0;
 }
 
+/**
+ *      t4_init_tp_params - initialize adap->params.tp
+ *      @adap: the adapter
+ *
+ *      Initialize various fields of the adapter's TP Parameters structure.
+ */
+int t4_init_tp_params(struct adapter *adap)
+{
+       int chan;
+       u32 v;
+
+       v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
+       adap->params.tp.tre = TIMERRESOLUTION_GET(v);
+       adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
+
+       /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
+       for (chan = 0; chan < NCHAN; chan++)
+               adap->params.tp.tx_modq[chan] = chan;
+
+       /* Cache the adapter's Compressed Filter Mode and global Incress
+        * Configuration.
+        */
+       t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+                        &adap->params.tp.vlan_pri_map, 1,
+                        TP_VLAN_PRI_MAP);
+       t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+                        &adap->params.tp.ingress_config, 1,
+                        TP_INGRESS_CONFIG);
+
+       /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
+        * shift positions of several elements of the Compressed Filter Tuple
+        * for this adapter which we need frequently ...
+        */
+       adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
+       adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
+       adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
+       adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
+                                                              F_PROTOCOL);
+
+       /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
+        * represents the presense of an Outer VLAN instead of a VNIC ID.
+        */
+       if ((adap->params.tp.ingress_config & F_VNIC) == 0)
+               adap->params.tp.vnic_shift = -1;
+
+       return 0;
+}
+
+/**
+ *      t4_filter_field_shift - calculate filter field shift
+ *      @adap: the adapter
+ *      @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
+ *
+ *      Return the shift position of a filter field within the Compressed
+ *      Filter Tuple.  The filter field is specified via its selection bit
+ *      within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
+ */
+int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
+{
+       unsigned int filter_mode = adap->params.tp.vlan_pri_map;
+       unsigned int sel;
+       int field_shift;
+
+       if ((filter_mode & filter_sel) == 0)
+               return -1;
+
+       for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
+               switch (filter_mode & sel) {
+               case F_FCOE:
+                       field_shift += W_FT_FCOE;
+                       break;
+               case F_PORT:
+                       field_shift += W_FT_PORT;
+                       break;
+               case F_VNIC_ID:
+                       field_shift += W_FT_VNIC_ID;
+                       break;
+               case F_VLAN:
+                       field_shift += W_FT_VLAN;
+                       break;
+               case F_TOS:
+                       field_shift += W_FT_TOS;
+                       break;
+               case F_PROTOCOL:
+                       field_shift += W_FT_PROTOCOL;
+                       break;
+               case F_ETHERTYPE:
+                       field_shift += W_FT_ETHERTYPE;
+                       break;
+               case F_MACMATCH:
+                       field_shift += W_FT_MACMATCH;
+                       break;
+               case F_MPSHITTYPE:
+                       field_shift += W_FT_MPSHITTYPE;
+                       break;
+               case F_FRAGMENTATION:
+                       field_shift += W_FT_FRAGMENTATION;
+                       break;
+               }
+       }
+       return field_shift;
+}
+
 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
 {
        u8 addr[6];
index 0a8205d..4082522 100644 (file)
 
 #define A_TP_TX_SCHED_PCMD 0x25
 
+#define S_VNIC    11
+#define V_VNIC(x) ((x) << S_VNIC)
+#define F_VNIC    V_VNIC(1U)
+
+#define S_FRAGMENTATION    9
+#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION)
+#define F_FRAGMENTATION    V_FRAGMENTATION(1U)
+
+#define S_MPSHITTYPE    8
+#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE)
+#define F_MPSHITTYPE    V_MPSHITTYPE(1U)
+
+#define S_MACMATCH    7
+#define V_MACMATCH(x) ((x) << S_MACMATCH)
+#define F_MACMATCH    V_MACMATCH(1U)
+
+#define S_ETHERTYPE    6
+#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE)
+#define F_ETHERTYPE    V_ETHERTYPE(1U)
+
+#define S_PROTOCOL    5
+#define V_PROTOCOL(x) ((x) << S_PROTOCOL)
+#define F_PROTOCOL    V_PROTOCOL(1U)
+
+#define S_TOS    4
+#define V_TOS(x) ((x) << S_TOS)
+#define F_TOS    V_TOS(1U)
+
+#define S_VLAN    3
+#define V_VLAN(x) ((x) << S_VLAN)
+#define F_VLAN    V_VLAN(1U)
+
+#define S_VNIC_ID    2
+#define V_VNIC_ID(x) ((x) << S_VNIC_ID)
+#define F_VNIC_ID    V_VNIC_ID(1U)
+
 #define S_PORT    1
 #define V_PORT(x) ((x) << S_PORT)
 #define F_PORT    V_PORT(1U)
 
+#define S_FCOE    0
+#define V_FCOE(x) ((x) << S_FCOE)
+#define F_FCOE    V_FCOE(1U)
+
 #define NUM_MPS_CLS_SRAM_L_INSTANCES 336
 #define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
 
 #define V_CHIPID(x) ((x) << S_CHIPID)
 #define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID)
 
+/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
+ * Compressed Filter Tuple for LE filters.  Each bit set in TP_VLAN_PRI_MAP
+ * selects for a particular field being present.  These fields, when present
+ * in the Compressed Filter Tuple, have the following widths in bits.
+ */
+#define W_FT_FCOE                       1
+#define W_FT_PORT                       3
+#define W_FT_VNIC_ID                    17
+#define W_FT_VLAN                       17
+#define W_FT_TOS                        8
+#define W_FT_PROTOCOL                   8
+#define W_FT_ETHERTYPE                  16
+#define W_FT_MACMATCH                   9
+#define W_FT_MPSHITTYPE                 3
+#define W_FT_FRAGMENTATION              1
+
+/* Some of the Compressed Filter Tuple fields have internal structure.  These
+ * bit shifts/masks describe those structures.  All shifts are relative to the
+ * base position of the fields within the Compressed Filter Tuple
+ */
+#define S_FT_VLAN_VLD                   16
+#define V_FT_VLAN_VLD(x)                ((x) << S_FT_VLAN_VLD)
+#define F_FT_VLAN_VLD                   V_FT_VLAN_VLD(1U)
+
+#define S_FT_VNID_ID_VF                 0
+#define V_FT_VNID_ID_VF(x)              ((x) << S_FT_VNID_ID_VF)
+
+#define S_FT_VNID_ID_PF                 7
+#define V_FT_VNID_ID_PF(x)              ((x) << S_FT_VNID_ID_PF)
+
+#define S_FT_VNID_ID_VLD                16
+#define V_FT_VNID_ID_VLD(x)             ((x) << S_FT_VNID_ID_VLD)
+
 #endif /* __T4_REGS_H */
index e7c8b74..45b8b22 100644 (file)
@@ -428,6 +428,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        /* If this was the last BD in the ring, start at the beginning again. */
        bdp = fec_enet_get_nextdesc(bdp, fep);
 
+       skb_tx_timestamp(skb);
+
        fep->cur_tx = bdp;
 
        if (fep->cur_tx == fep->dirty_tx)
@@ -436,8 +438,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        /* Trigger transmission start */
        writel(0, fep->hwp + FEC_X_DES_ACTIVE);
 
-       skb_tx_timestamp(skb);
-
        return NETDEV_TX_OK;
 }
 
@@ -2049,6 +2049,8 @@ static void fec_reset_phy(struct platform_device *pdev)
        int err, phy_reset;
        int msec = 1;
        struct device_node *np = pdev->dev.of_node;
+       enum of_gpio_flags flags;
+       bool port;
 
        if (!np)
                return;
@@ -2058,18 +2060,22 @@ static void fec_reset_phy(struct platform_device *pdev)
        if (msec > 1000)
                msec = 1;
 
-       phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
+       phy_reset = of_get_named_gpio_flags(np, "phy-reset-gpios", 0, &flags);
        if (!gpio_is_valid(phy_reset))
                return;
 
-       err = devm_gpio_request_one(&pdev->dev, phy_reset,
-                                   GPIOF_OUT_INIT_LOW, "phy-reset");
+       if (flags & OF_GPIO_ACTIVE_LOW)
+               port = GPIOF_OUT_INIT_LOW;
+       else
+               port = GPIOF_OUT_INIT_HIGH;
+
+       err = devm_gpio_request_one(&pdev->dev, phy_reset, port, "phy-reset");
        if (err) {
                dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
                return;
        }
        msleep(msec);
-       gpio_set_value(phy_reset, 1);
+       gpio_set_value(phy_reset, !port);
 }
 #else /* CONFIG_OF */
 static void fec_reset_phy(struct platform_device *pdev)
index 895450e..ff2d806 100644 (file)
@@ -718,8 +718,11 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
        e1000_release_phy_80003es2lan(hw);
 
        /* Disable IBIST slave mode (far-end loopback) */
-       e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
-                                       &kum_reg_data);
+       ret_val =
+           e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+                                           &kum_reg_data);
+       if (ret_val)
+               return ret_val;
        kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
        e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
                                         kum_reg_data);
index 8d3945a..c30d41d 100644 (file)
@@ -6174,7 +6174,7 @@ static int __e1000_resume(struct pci_dev *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
 static int e1000_suspend(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
@@ -6193,7 +6193,7 @@ static int e1000_resume(struct device *dev)
 
        return __e1000_resume(pdev);
 }
-#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM */
 
 #ifdef CONFIG_PM_RUNTIME
 static int e1000_runtime_suspend(struct device *dev)
index da2be59..20e71f4 100644 (file)
@@ -1757,19 +1757,23 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
                 * it across the board.
                 */
                ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
-               if (ret_val)
+               if (ret_val) {
                        /* If the first read fails, another entity may have
                         * ownership of the resources, wait and try again to
                         * see if they have relinquished the resources yet.
                         */
-                       udelay(usec_interval);
+                       if (usec_interval >= 1000)
+                               msleep(usec_interval / 1000);
+                       else
+                               udelay(usec_interval);
+               }
                ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
                if (ret_val)
                        break;
                if (phy_status & BMSR_LSTATUS)
                        break;
                if (usec_interval >= 1000)
-                       mdelay(usec_interval / 1000);
+                       msleep(usec_interval / 1000);
                else
                        udelay(usec_interval);
        }
index d6f0c0d..72084f7 100644 (file)
@@ -291,7 +291,9 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
 {
        struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
        int err;
+#ifdef CONFIG_PCI_IOV
        u32 current_flags = adapter->flags;
+#endif
 
        err = ixgbe_disable_sriov(adapter);
 
index 7354960..c4eeb69 100644 (file)
@@ -92,6 +92,12 @@ static int orion_mdio_wait_ready(struct mii_bus *bus)
                        if (time_is_before_jiffies(end))
                                ++timedout;
                } else {
+                       /* wait_event_timeout does not guarantee a delay of at
+                        * least one whole jiffie, so timeout must be no less
+                        * than two.
+                        */
+                       if (timeout < 2)
+                               timeout = 2;
                        wait_event_timeout(dev->smi_busy_wait,
                                           orion_mdio_smi_is_done(dev),
                                           timeout);
index 7692dfd..cc68657 100644 (file)
@@ -1604,13 +1604,13 @@ netxen_process_lro(struct netxen_adapter *adapter,
        u32 seq_number;
        u8 vhdr_len = 0;
 
-       if (unlikely(ring > adapter->max_rds_rings))
+       if (unlikely(ring >= adapter->max_rds_rings))
                return NULL;
 
        rds_ring = &recv_ctx->rds_rings[ring];
 
        index = netxen_get_lro_sts_refhandle(sts_data0);
-       if (unlikely(index > rds_ring->num_desc))
+       if (unlikely(index >= rds_ring->num_desc))
                return NULL;
 
        buffer = &rds_ring->rx_buf_arr[index];
index b1cb0ff..6055d39 100644 (file)
@@ -447,8 +447,9 @@ irqreturn_t qlcnic_83xx_intr(int irq, void *data)
 
        qlcnic_83xx_poll_process_aen(adapter);
 
-       if (ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
-               ahw->diag_cnt++;
+       if (ahw->diag_test) {
+               if (ahw->diag_test == QLCNIC_INTERRUPT_TEST)
+                       ahw->diag_cnt++;
                qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
                return IRQ_HANDLED;
        }
@@ -1345,11 +1346,6 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
        }
 
        if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
-               /* disable and free mailbox interrupt */
-               if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
-                       qlcnic_83xx_enable_mbx_poll(adapter);
-                       qlcnic_83xx_free_mbx_intr(adapter);
-               }
                adapter->ahw->loopback_state = 0;
                adapter->ahw->hw_ops->setup_link_event(adapter, 1);
        }
@@ -1363,33 +1359,20 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct qlcnic_host_sds_ring *sds_ring;
-       int ring, err;
+       int ring;
 
        clear_bit(__QLCNIC_DEV_UP, &adapter->state);
        if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
                for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
                        sds_ring = &adapter->recv_ctx->sds_rings[ring];
-                       qlcnic_83xx_disable_intr(adapter, sds_ring);
-                       if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
-                               qlcnic_83xx_enable_mbx_poll(adapter);
+                       if (adapter->flags & QLCNIC_MSIX_ENABLED)
+                               qlcnic_83xx_disable_intr(adapter, sds_ring);
                }
        }
 
        qlcnic_fw_destroy_ctx(adapter);
        qlcnic_detach(adapter);
 
-       if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
-               if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
-                       err = qlcnic_83xx_setup_mbx_intr(adapter);
-                       qlcnic_83xx_disable_mbx_poll(adapter);
-                       if (err) {
-                               dev_err(&adapter->pdev->dev,
-                                       "%s: failed to setup mbx interrupt\n",
-                                       __func__);
-                               goto out;
-                       }
-               }
-       }
        adapter->ahw->diag_test = 0;
        adapter->drv_sds_rings = drv_sds_rings;
 
@@ -1399,9 +1382,6 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
        if (netif_running(netdev))
                __qlcnic_up(adapter, netdev);
 
-       if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST &&
-           !(adapter->flags & QLCNIC_MSIX_ENABLED))
-               qlcnic_83xx_disable_mbx_poll(adapter);
 out:
        netif_device_attach(netdev);
 }
@@ -3754,6 +3734,19 @@ static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter,
        return;
 }
 
+static inline void qlcnic_dump_mailbox_registers(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       u32 offset;
+
+       offset = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
+       dev_info(&adapter->pdev->dev, "Mbx interrupt mask=0x%x, Mbx interrupt enable=0x%x, Host mbx control=0x%x, Fw mbx control=0x%x",
+                readl(ahw->pci_base0 + offset),
+                QLCRDX(ahw, QLCNIC_MBX_INTR_ENBL),
+                QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL),
+                QLCRDX(ahw, QLCNIC_FW_MBX_CTRL));
+}
+
 static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
 {
        struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
@@ -3798,6 +3791,8 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
                                __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
                                ahw->op_mode);
                        clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+                       qlcnic_dump_mailbox_registers(adapter);
+                       qlcnic_83xx_get_mbx_data(adapter, cmd);
                        qlcnic_dump_mbx(adapter, cmd);
                        qlcnic_83xx_idc_request_reset(adapter,
                                                      QLCNIC_FORCE_FW_DUMP_KEY);
index 4cae6ca..a6a3350 100644 (file)
@@ -662,4 +662,5 @@ pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *,
                                               pci_channel_state_t);
 pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *);
 void qlcnic_83xx_io_resume(struct pci_dev *);
+void qlcnic_83xx_stop_hw(struct qlcnic_adapter *);
 #endif
index 89208e5..918e18d 100644 (file)
@@ -740,6 +740,7 @@ static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter)
        adapter->ahw->idc.err_code = -EIO;
        dev_err(&adapter->pdev->dev,
                "%s: Device in unknown state\n", __func__);
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
        return 0;
 }
 
@@ -818,7 +819,6 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
        struct qlcnic_hardware_context *ahw = adapter->ahw;
        struct qlcnic_mailbox *mbx = ahw->mailbox;
        int ret = 0;
-       u32 owner;
        u32 val;
 
        /* Perform NIC configuration based ready state entry actions */
@@ -848,9 +848,9 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
                        set_bit(__QLCNIC_RESETTING, &adapter->state);
                        qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
                }  else {
-                       owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
-                       if (ahw->pci_func == owner)
-                               qlcnic_dump_fw(adapter);
+                       netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
+                                   __func__);
+                       qlcnic_83xx_idc_enter_failed_state(adapter, 1);
                }
                return -EIO;
        }
@@ -948,13 +948,26 @@ static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter)
        return 0;
 }
 
-static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
+static void qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
 {
-       dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__);
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       u32 val, owner;
+
+       val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+       if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
+               owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+               if (ahw->pci_func == owner) {
+                       qlcnic_83xx_stop_hw(adapter);
+                       qlcnic_dump_fw(adapter);
+               }
+       }
+
+       netdev_warn(adapter->netdev, "%s: Reboot will be required to recover the adapter!!\n",
+                   __func__);
        clear_bit(__QLCNIC_RESETTING, &adapter->state);
-       adapter->ahw->idc.err_code = -EIO;
+       ahw->idc.err_code = -EIO;
 
-       return 0;
+       return;
 }
 
 static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter)
@@ -1063,12 +1076,6 @@ void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work)
        adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state;
        qlcnic_83xx_periodic_tasks(adapter);
 
-       /* Do not reschedule if firmaware is in hanged state and auto
-        * recovery is disabled
-        */
-       if ((adapter->flags & QLCNIC_FW_HANG) && !qlcnic_auto_fw_reset)
-               return;
-
        /* Re-schedule the function */
        if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status))
                qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
@@ -1219,10 +1226,10 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key)
        }
 
        val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
-       if ((val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) ||
-           !qlcnic_auto_fw_reset) {
-               dev_err(&adapter->pdev->dev,
-                       "%s:failed, device in non reset mode\n", __func__);
+       if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
+               netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
+                           __func__);
+               qlcnic_83xx_idc_enter_failed_state(adapter, 0);
                qlcnic_83xx_unlock_driver(adapter);
                return;
        }
@@ -1254,24 +1261,24 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
        if (size & 0xF)
                size = (size + 16) & ~0xF;
 
-       p_cache = kzalloc(size, GFP_KERNEL);
+       p_cache = vzalloc(size);
        if (p_cache == NULL)
                return -ENOMEM;
 
        ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache,
                                                size / sizeof(u32));
        if (ret) {
-               kfree(p_cache);
+               vfree(p_cache);
                return ret;
        }
        /* 16 byte write to MS memory */
        ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache,
                                          size / 16);
        if (ret) {
-               kfree(p_cache);
+               vfree(p_cache);
                return ret;
        }
-       kfree(p_cache);
+       vfree(p_cache);
 
        return ret;
 }
@@ -1939,7 +1946,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
        p_dev->ahw->reset.seq_index = index;
 }
 
-static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev)
+void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev)
 {
        p_dev->ahw->reset.seq_index = 0;
 
@@ -1994,6 +2001,14 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
        val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
        if (!(val & QLC_83XX_IDC_GRACEFULL_RESET))
                qlcnic_dump_fw(adapter);
+
+       if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
+               netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
+                           __func__);
+               qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+               return err;
+       }
+
        qlcnic_83xx_init_hw(adapter);
 
        if (qlcnic_83xx_copy_bootloader(adapter))
@@ -2073,8 +2088,8 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
                ahw->nic_mode = QLCNIC_DEFAULT_MODE;
                adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
                ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
-               adapter->max_sds_rings = ahw->max_rx_ques;
-               adapter->max_tx_rings = ahw->max_tx_ques;
+               adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
+               adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
        } else {
                return -EIO;
        }
index b36c02f..e3be276 100644 (file)
@@ -667,30 +667,25 @@ qlcnic_set_ringparam(struct net_device *dev,
 static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter,
                                      u8 rx_ring, u8 tx_ring)
 {
+       if (rx_ring == 0 || tx_ring == 0)
+               return -EINVAL;
+
        if (rx_ring != 0) {
                if (rx_ring > adapter->max_sds_rings) {
-                       netdev_err(adapter->netdev, "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n",
+                       netdev_err(adapter->netdev,
+                                  "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n",
                                   rx_ring, adapter->max_sds_rings);
                        return -EINVAL;
                }
        }
 
         if (tx_ring != 0) {
-               if (qlcnic_82xx_check(adapter) &&
-                   (tx_ring > adapter->max_tx_rings)) {
+               if (tx_ring > adapter->max_tx_rings) {
                        netdev_err(adapter->netdev,
                                   "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n",
                                   tx_ring, adapter->max_tx_rings);
                        return -EINVAL;
                }
-
-               if (qlcnic_83xx_check(adapter) &&
-                   (tx_ring > QLCNIC_SINGLE_RING)) {
-                       netdev_err(adapter->netdev,
-                                  "Invalid ring count, Tx ring count %d should not be greater than %d driver Tx rings.\n",
-                                  tx_ring, QLCNIC_SINGLE_RING);
-                        return -EINVAL;
-               }
        }
 
        return 0;
@@ -948,6 +943,7 @@ static int qlcnic_irq_test(struct net_device *netdev)
        struct qlcnic_hardware_context *ahw = adapter->ahw;
        struct qlcnic_cmd_args cmd;
        int ret, drv_sds_rings = adapter->drv_sds_rings;
+       int drv_tx_rings = adapter->drv_tx_rings;
 
        if (qlcnic_83xx_check(adapter))
                return qlcnic_83xx_interrupt_test(netdev);
@@ -980,6 +976,7 @@ free_diag_res:
 
 clear_diag_irq:
        adapter->drv_sds_rings = drv_sds_rings;
+       adapter->drv_tx_rings = drv_tx_rings;
        clear_bit(__QLCNIC_RESETTING, &adapter->state);
 
        return ret;
index 0149c94..eda6c69 100644 (file)
@@ -687,17 +687,11 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
        if (adapter->ahw->linkup && !linkup) {
                netdev_info(netdev, "NIC Link is down\n");
                adapter->ahw->linkup = 0;
-               if (netif_running(netdev)) {
-                       netif_carrier_off(netdev);
-                       netif_tx_stop_all_queues(netdev);
-               }
+               netif_carrier_off(netdev);
        } else if (!adapter->ahw->linkup && linkup) {
                netdev_info(netdev, "NIC Link is up\n");
                adapter->ahw->linkup = 1;
-               if (netif_running(netdev)) {
-                       netif_carrier_on(netdev);
-                       netif_wake_queue(netdev);
-               }
+               netif_carrier_on(netdev);
        }
 }
 
index 05c1eef..2c8cac0 100644 (file)
@@ -1178,6 +1178,7 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
        } else {
                adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
                adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS;
+               adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
                adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
        }
 
@@ -1940,7 +1941,6 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
        qlcnic_detach(adapter);
 
        adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
-       adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
        adapter->ahw->diag_test = test;
        adapter->ahw->linkup = 0;
 
index 8a7a23a..797b56a 100644 (file)
@@ -622,17 +622,15 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
        if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
                return -EOPNOTSUPP;
 
-       if (netif_msg_hw(priv)) {
-               if (priv->dma_cap.time_stamp) {
-                       pr_debug("IEEE 1588-2002 Time Stamp supported\n");
-                       priv->adv_ts = 0;
-               }
-               if (priv->dma_cap.atime_stamp && priv->extend_desc) {
-                       pr_debug
-                           ("IEEE 1588-2008 Advanced Time Stamp supported\n");
-                       priv->adv_ts = 1;
-               }
-       }
+       priv->adv_ts = 0;
+       if (priv->dma_cap.atime_stamp && priv->extend_desc)
+               priv->adv_ts = 1;
+
+       if (netif_msg_hw(priv) && priv->dma_cap.time_stamp)
+               pr_debug("IEEE 1588-2002 Time Stamp supported\n");
+
+       if (netif_msg_hw(priv) && priv->adv_ts)
+               pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
 
        priv->hw->ptp = &stmmac_ptp;
        priv->hwts_tx_en = 0;
index b8b0eee..7680581 100644 (file)
@@ -56,7 +56,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
 
        priv->hw->ptp->config_addend(priv->ioaddr, addend);
 
-       spin_unlock_irqrestore(&priv->lock, flags);
+       spin_unlock_irqrestore(&priv->ptp_lock, flags);
 
        return 0;
 }
@@ -91,7 +91,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
 
        priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj);
 
-       spin_unlock_irqrestore(&priv->lock, flags);
+       spin_unlock_irqrestore(&priv->ptp_lock, flags);
 
        return 0;
 }
index 5120d9c..5330fd2 100644 (file)
@@ -740,6 +740,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
                /* set speed_in input in case RMII mode is used in 100Mbps */
                if (phy->speed == 100)
                        mac_control |= BIT(15);
+               else if (phy->speed == 10)
+                       mac_control |= BIT(18); /* In Band mode */
 
                *link = true;
        } else {
@@ -2106,7 +2108,7 @@ static int cpsw_probe(struct platform_device *pdev)
        while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
                for (i = res->start; i <= res->end; i++) {
                        if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0,
-                                            dev_name(priv->dev), priv)) {
+                                            dev_name(&pdev->dev), priv)) {
                                dev_err(priv->dev, "error attaching irq\n");
                                goto clean_ale_ret;
                        }
index 3169252..5d78c1d 100644 (file)
@@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        case HDLCDRVCTL_CALIBRATE:
                if(!capable(CAP_SYS_RAWIO))
                        return -EPERM;
+               if (bi.data.calibrate > INT_MAX / s->par.bitrate)
+                       return -EINVAL;
                s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
                return 0;
 
index 1971411..61dd244 100644 (file)
@@ -1057,6 +1057,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                break;
 
        case SIOCYAMGCFG:
+               memset(&yi, 0, sizeof(yi));
                yi.cfg.mask = 0xffffffff;
                yi.cfg.iobase = yp->iobase;
                yi.cfg.irq = yp->irq;
index 524f713..71baeb3 100644 (file)
@@ -261,9 +261,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
        struct sk_buff *skb;
 
        net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
-       if (!net) {
-               netdev_err(net, "got receive callback but net device"
-                       " not initialized yet\n");
+       if (!net || net->reg_state != NETREG_REGISTERED) {
                packet->status = NVSP_STAT_FAIL;
                return 0;
        }
@@ -327,7 +325,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
                return -EINVAL;
 
        nvdev->start_remove = true;
-       cancel_delayed_work_sync(&ndevctx->dwork);
        cancel_work_sync(&ndevctx->work);
        netif_tx_disable(ndev);
        rndis_filter_device_remove(hdev);
@@ -436,19 +433,11 @@ static int netvsc_probe(struct hv_device *dev,
        SET_ETHTOOL_OPS(net, &ethtool_ops);
        SET_NETDEV_DEV(net, &dev->device);
 
-       ret = register_netdev(net);
-       if (ret != 0) {
-               pr_err("Unable to register netdev.\n");
-               free_netdev(net);
-               goto out;
-       }
-
        /* Notify the netvsc driver of the new device */
        device_info.ring_size = ring_size;
        ret = rndis_filter_device_add(dev, &device_info);
        if (ret != 0) {
                netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
-               unregister_netdev(net);
                free_netdev(net);
                hv_set_drvdata(dev, NULL);
                return ret;
@@ -457,7 +446,13 @@ static int netvsc_probe(struct hv_device *dev,
 
        netif_carrier_on(net);
 
-out:
+       ret = register_netdev(net);
+       if (ret != 0) {
+               pr_err("Unable to register netdev.\n");
+               rndis_filter_device_remove(dev);
+               free_netdev(net);
+       }
+
        return ret;
 }
 
index acf9379..60406b0 100644 (file)
@@ -690,8 +690,19 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
                                              netdev_features_t features)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
+       netdev_features_t mask;
 
-       return features & (vlan->set_features | ~MACVLAN_FEATURES);
+       features |= NETIF_F_ALL_FOR_ALL;
+       features &= (vlan->set_features | ~MACVLAN_FEATURES);
+       mask = features;
+
+       features = netdev_increment_features(vlan->lowerdev->features,
+                                            features,
+                                            mask);
+       if (!vlan->fwd_priv)
+               features |= NETIF_F_LLTX;
+
+       return features;
 }
 
 static const struct ethtool_ops macvlan_ethtool_ops = {
@@ -1019,9 +1030,8 @@ static int macvlan_device_event(struct notifier_block *unused,
                break;
        case NETDEV_FEAT_CHANGE:
                list_for_each_entry(vlan, &port->vlans, list) {
-                       vlan->dev->features = dev->features & MACVLAN_FEATURES;
                        vlan->dev->gso_max_size = dev->gso_max_size;
-                       netdev_features_change(vlan->dev);
+                       netdev_update_features(vlan->dev);
                }
                break;
        case NETDEV_UNREGISTER:
index 36c6994..98434b8 100644 (file)
@@ -565,10 +565,8 @@ int phy_start_interrupts(struct phy_device *phydev)
        int err = 0;
 
        atomic_set(&phydev->irq_disable, 0);
-       if (request_irq(phydev->irq, phy_interrupt,
-                               IRQF_SHARED,
-                               "phy_interrupt",
-                               phydev) < 0) {
+       if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
+                       phydev) < 0) {
                pr_warn("%s: Can't get IRQ %d (PHY)\n",
                        phydev->bus->name, phydev->irq);
                phydev->irq = PHY_POLL;
index 85e4a01..47b0f73 100644 (file)
@@ -276,12 +276,12 @@ config USB_NET_CDC_MBIM
          module will be called cdc_mbim.
 
 config USB_NET_DM9601
-       tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices"
+       tristate "Davicom DM96xx based USB 10/100 ethernet devices"
        depends on USB_USBNET
        select CRC32
        help
-         This option adds support for Davicom DM9601 based USB 1.1
-         10/100 Ethernet adapters.
+         This option adds support for Davicom DM9601/DM9620/DM9621A
+         based USB 10/100 Ethernet adapters.
 
 config USB_NET_SR9700
        tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices"
index c6867f9..14aa48f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Davicom DM9601 USB 1.1 10/100Mbps ethernet devices
+ * Davicom DM96xx USB 10/100Mbps ethernet devices
  *
  * Peter Korsgaard <jacmet@sunsite.dk>
  *
@@ -364,7 +364,12 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
        dev->net->ethtool_ops = &dm9601_ethtool_ops;
        dev->net->hard_header_len += DM_TX_OVERHEAD;
        dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
-       dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD;
+
+       /* dm9620/21a require room for 4 byte padding, even in dm9601
+        * mode, so we need +1 to be able to receive full size
+        * ethernet frames.
+        */
+       dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD + 1;
 
        dev->mii.dev = dev->net;
        dev->mii.mdio_read = dm9601_mdio_read;
@@ -468,7 +473,7 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
                                       gfp_t flags)
 {
-       int len;
+       int len, pad;
 
        /* format:
           b1: packet length low
@@ -476,12 +481,23 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
           b3..n: packet data
        */
 
-       len = skb->len;
+       len = skb->len + DM_TX_OVERHEAD;
+
+       /* workaround for dm962x errata with tx fifo getting out of
+        * sync if a USB bulk transfer retry happens right after a
+        * packet with odd / maxpacket length by adding up to 3 bytes
+        * padding.
+        */
+       while ((len & 1) || !(len % dev->maxpacket))
+               len++;
 
-       if (skb_headroom(skb) < DM_TX_OVERHEAD) {
+       len -= DM_TX_OVERHEAD; /* hw header doesn't count as part of length */
+       pad = len - skb->len;
+
+       if (skb_headroom(skb) < DM_TX_OVERHEAD || skb_tailroom(skb) < pad) {
                struct sk_buff *skb2;
 
-               skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, 0, flags);
+               skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, pad, flags);
                dev_kfree_skb_any(skb);
                skb = skb2;
                if (!skb)
@@ -490,10 +506,10 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
 
        __skb_push(skb, DM_TX_OVERHEAD);
 
-       /* usbnet adds padding if length is a multiple of packet size
-          if so, adjust length value in header */
-       if ((skb->len % dev->maxpacket) == 0)
-               len++;
+       if (pad) {
+               memset(skb->data + skb->len, 0, pad);
+               __skb_put(skb, pad);
+       }
 
        skb->data[0] = len;
        skb->data[1] = len >> 8;
@@ -543,7 +559,7 @@ static int dm9601_link_reset(struct usbnet *dev)
 }
 
 static const struct driver_info dm9601_info = {
-       .description    = "Davicom DM9601 USB Ethernet",
+       .description    = "Davicom DM96xx USB 10/100 Ethernet",
        .flags          = FLAG_ETHER | FLAG_LINK_INTR,
        .bind           = dm9601_bind,
        .rx_fixup       = dm9601_rx_fixup,
@@ -594,6 +610,10 @@ static const struct usb_device_id products[] = {
         USB_DEVICE(0x0a46, 0x9620),    /* DM9620 USB to Fast Ethernet Adapter */
         .driver_info = (unsigned long)&dm9601_info,
         },
+       {
+        USB_DEVICE(0x0a46, 0x9621),    /* DM9621A USB to Fast Ethernet Adapter */
+        .driver_info = (unsigned long)&dm9601_info,
+       },
        {},                     // END
 };
 
@@ -612,5 +632,5 @@ static struct usb_driver dm9601_driver = {
 module_usb_driver(dm9601_driver);
 
 MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
-MODULE_DESCRIPTION("Davicom DM9601 USB 1.1 ethernet devices");
+MODULE_DESCRIPTION("Davicom DM96xx USB 10/100 ethernet devices");
 MODULE_LICENSE("GPL");
index 8d78253..a366d6b 100644 (file)
@@ -76,9 +76,16 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
                                mask2 |= ATH9K_INT_CST;
                        if (isr2 & AR_ISR_S2_TSFOOR)
                                mask2 |= ATH9K_INT_TSFOOR;
+
+                       if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+                               REG_WRITE(ah, AR_ISR_S2, isr2);
+                               isr &= ~AR_ISR_BCNMISC;
+                       }
                }
 
-               isr = REG_READ(ah, AR_ISR_RAC);
+               if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
+                       isr = REG_READ(ah, AR_ISR_RAC);
+
                if (isr == 0xffffffff) {
                        *masked = 0;
                        return false;
@@ -97,11 +104,23 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
 
                        *masked |= ATH9K_INT_TX;
 
-                       s0_s = REG_READ(ah, AR_ISR_S0_S);
+                       if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
+                               s0_s = REG_READ(ah, AR_ISR_S0_S);
+                               s1_s = REG_READ(ah, AR_ISR_S1_S);
+                       } else {
+                               s0_s = REG_READ(ah, AR_ISR_S0);
+                               REG_WRITE(ah, AR_ISR_S0, s0_s);
+                               s1_s = REG_READ(ah, AR_ISR_S1);
+                               REG_WRITE(ah, AR_ISR_S1, s1_s);
+
+                               isr &= ~(AR_ISR_TXOK |
+                                        AR_ISR_TXDESC |
+                                        AR_ISR_TXERR |
+                                        AR_ISR_TXEOL);
+                       }
+
                        ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
                        ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
-
-                       s1_s = REG_READ(ah, AR_ISR_S1_S);
                        ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
                        ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
                }
@@ -114,13 +133,15 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
                *masked |= mask2;
        }
 
-       if (AR_SREV_9100(ah))
-               return true;
-
-       if (isr & AR_ISR_GENTMR) {
+       if (!AR_SREV_9100(ah) && (isr & AR_ISR_GENTMR)) {
                u32 s5_s;
 
-               s5_s = REG_READ(ah, AR_ISR_S5_S);
+               if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
+                       s5_s = REG_READ(ah, AR_ISR_S5_S);
+               } else {
+                       s5_s = REG_READ(ah, AR_ISR_S5);
+               }
+
                ah->intr_gen_timer_trigger =
                                MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
 
@@ -133,8 +154,21 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
                if ((s5_s & AR_ISR_S5_TIM_TIMER) &&
                    !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
                        *masked |= ATH9K_INT_TIM_TIMER;
+
+               if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+                       REG_WRITE(ah, AR_ISR_S5, s5_s);
+                       isr &= ~AR_ISR_GENTMR;
+               }
        }
 
+       if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+               REG_WRITE(ah, AR_ISR, isr);
+               REG_READ(ah, AR_ISR);
+       }
+
+       if (AR_SREV_9100(ah))
+               return true;
+
        if (sync_cause) {
                ath9k_debug_sync_cause(common, sync_cause);
                fatal_int =
index 9a2657f..608d739 100644 (file)
@@ -127,21 +127,26 @@ static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
        struct ath9k_vif_iter_data *iter_data = data;
        int i;
 
-       for (i = 0; i < ETH_ALEN; i++)
-               iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
+       if (iter_data->hw_macaddr != NULL) {
+               for (i = 0; i < ETH_ALEN; i++)
+                       iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
+       } else {
+               iter_data->hw_macaddr = mac;
+       }
 }
 
-static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
+static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv,
                                     struct ieee80211_vif *vif)
 {
        struct ath_common *common = ath9k_hw_common(priv->ah);
        struct ath9k_vif_iter_data iter_data;
 
        /*
-        * Use the hardware MAC address as reference, the hardware uses it
-        * together with the BSSID mask when matching addresses.
+        * Pick the MAC address of the first interface as the new hardware
+        * MAC address. The hardware will use it together with the BSSID mask
+        * when matching addresses.
         */
-       iter_data.hw_macaddr = common->macaddr;
+       iter_data.hw_macaddr = NULL;
        memset(&iter_data.mask, 0xff, ETH_ALEN);
 
        if (vif)
@@ -153,6 +158,10 @@ static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
                ath9k_htc_bssid_iter, &iter_data);
 
        memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
+
+       if (iter_data.hw_macaddr)
+               memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN);
+
        ath_hw_setbssidmask(common);
 }
 
@@ -1063,7 +1072,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
                goto out;
        }
 
-       ath9k_htc_set_bssid_mask(priv, vif);
+       ath9k_htc_set_mac_bssid_mask(priv, vif);
 
        priv->vif_slot |= (1 << avp->index);
        priv->nvifs++;
@@ -1128,7 +1137,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
 
        ath9k_htc_set_opmode(priv);
 
-       ath9k_htc_set_bssid_mask(priv, vif);
+       ath9k_htc_set_mac_bssid_mask(priv, vif);
 
        /*
         * Stop ANI only if there are no associated station interfaces.
index 74f452c..21aa09e 100644 (file)
@@ -965,8 +965,9 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
        struct ath_common *common = ath9k_hw_common(ah);
 
        /*
-        * Use the hardware MAC address as reference, the hardware uses it
-        * together with the BSSID mask when matching addresses.
+        * Pick the MAC address of the first interface as the new hardware
+        * MAC address. The hardware will use it together with the BSSID mask
+        * when matching addresses.
         */
        memset(iter_data, 0, sizeof(*iter_data));
        memset(&iter_data->mask, 0xff, ETH_ALEN);
index 0f49444..5a53195 100644 (file)
@@ -740,6 +740,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
        };
        int index = rtlpci->rx_ring[rx_queue_idx].idx;
 
+       if (rtlpci->driver_is_goingto_unload)
+               return;
        /*RX NORMAL PKT */
        while (count--) {
                /*rx descriptor */
@@ -1636,6 +1638,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
         */
        set_hal_stop(rtlhal);
 
+       rtlpci->driver_is_goingto_unload = true;
        rtlpriv->cfg->ops->disable_interrupt(hw);
        cancel_work_sync(&rtlpriv->works.lps_change_work);
 
@@ -1653,7 +1656,6 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
        ppsc->rfchange_inprogress = true;
        spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
 
-       rtlpci->driver_is_goingto_unload = true;
        rtlpriv->cfg->ops->hw_disable(hw);
        /* some things are not needed if firmware not available */
        if (!rtlpriv->max_fw_size)
index 08ae01b..c47794b 100644 (file)
@@ -101,6 +101,13 @@ struct xenvif_rx_meta {
 
 #define MAX_PENDING_REQS 256
 
+/* It's possible for an skb to have a maximal number of frags
+ * but still be less than MAX_BUFFER_OFFSET in size. Thus the
+ * worst-case number of copy operations is MAX_SKB_FRAGS per
+ * ring slot.
+ */
+#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
+
 struct xenvif {
        /* Unique identifier for this interface. */
        domid_t          domid;
@@ -143,13 +150,13 @@ struct xenvif {
         */
        RING_IDX rx_req_cons_peek;
 
-       /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
-        * head/fragment page uses 2 copy operations because it
-        * straddles two buffers in the frontend.
-        */
-       struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
-       struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
+       /* This array is allocated seperately as it is large */
+       struct gnttab_copy *grant_copy_op;
 
+       /* We create one meta structure per ring request we consume, so
+        * the maximum number is the same as the ring size.
+        */
+       struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
 
        u8               fe_dev_addr[6];
 
index 870f1fa..34ca4e5 100644 (file)
@@ -307,6 +307,15 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
        SET_NETDEV_DEV(dev, parent);
 
        vif = netdev_priv(dev);
+
+       vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
+                                    MAX_GRANT_COPY_OPS);
+       if (vif->grant_copy_op == NULL) {
+               pr_warn("Could not allocate grant copy space for %s\n", name);
+               free_netdev(dev);
+               return ERR_PTR(-ENOMEM);
+       }
+
        vif->domid  = domid;
        vif->handle = handle;
        vif->can_sg = 1;
@@ -487,6 +496,7 @@ void xenvif_free(struct xenvif *vif)
 
        unregister_netdev(vif->dev);
 
+       vfree(vif->grant_copy_op);
        free_netdev(vif->dev);
 
        module_put(THIS_MODULE);
index e884ee1..7842555 100644 (file)
@@ -608,7 +608,7 @@ void xenvif_rx_action(struct xenvif *vif)
        if (!npo.copy_prod)
                return;
 
-       BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op));
+       BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
        gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
 
        while ((skb = __skb_dequeue(&rxq)) != NULL) {
@@ -1197,6 +1197,9 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
 
        err = -EPROTO;
 
+       if (fragment)
+               goto out;
+
        switch (ip_hdr(skb)->protocol) {
        case IPPROTO_TCP:
                err = maybe_pull_tail(skb,
@@ -1206,8 +1209,10 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
                        goto out;
 
                if (!skb_partial_csum_set(skb, off,
-                                         offsetof(struct tcphdr, check)))
+                                         offsetof(struct tcphdr, check))) {
+                       err = -EPROTO;
                        goto out;
+               }
 
                if (recalculate_partial_csum)
                        tcp_hdr(skb)->check =
@@ -1224,8 +1229,10 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
                        goto out;
 
                if (!skb_partial_csum_set(skb, off,
-                                         offsetof(struct udphdr, check)))
+                                         offsetof(struct udphdr, check))) {
+                       err = -EPROTO;
                        goto out;
+               }
 
                if (recalculate_partial_csum)
                        udp_hdr(skb)->check =
@@ -1347,8 +1354,10 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
                        goto out;
 
                if (!skb_partial_csum_set(skb, off,
-                                         offsetof(struct tcphdr, check)))
+                                         offsetof(struct tcphdr, check))) {
+                       err = -EPROTO;
                        goto out;
+               }
 
                if (recalculate_partial_csum)
                        tcp_hdr(skb)->check =
@@ -1365,8 +1374,10 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
                        goto out;
 
                if (!skb_partial_csum_set(skb, off,
-                                         offsetof(struct udphdr, check)))
+                                         offsetof(struct udphdr, check))) {
+                       err = -EPROTO;
                        goto out;
+               }
 
                if (recalculate_partial_csum)
                        udp_hdr(skb)->check =
index de6f899..c6973f1 100644 (file)
@@ -20,7 +20,7 @@ config OF_SELFTEST
        depends on OF_IRQ
        help
          This option builds in test cases for the device tree infrastructure
-         that are executed one at boot time, and the results dumped to the
+         that are executed once at boot time, and the results dumped to the
          console.
 
          If unsure, say N here, but this option is safe to enable.
index 4b9317b..d3dd41c 100644 (file)
@@ -69,14 +69,6 @@ static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
                 (unsigned long long)cp, (unsigned long long)s,
                 (unsigned long long)da);
 
-       /*
-        * If the number of address cells is larger than 2 we assume the
-        * mapping doesn't specify a physical address. Rather, the address
-        * specifies an identifier that must match exactly.
-        */
-       if (na > 2 && memcmp(range, addr, na * 4) != 0)
-               return OF_BAD_ADDR;
-
        if (da < cp || da >= (cp + s))
                return OF_BAD_ADDR;
        return da - cp;
index 2fa024b..758b4f8 100644 (file)
@@ -922,8 +922,16 @@ void __init unflatten_device_tree(void)
  */
 void __init unflatten_and_copy_device_tree(void)
 {
-       int size = __be32_to_cpu(initial_boot_params->totalsize);
-       void *dt = early_init_dt_alloc_memory_arch(size,
+       int size;
+       void *dt;
+
+       if (!initial_boot_params) {
+               pr_warn("No valid device tree found, continuing without\n");
+               return;
+       }
+
+       size = __be32_to_cpu(initial_boot_params->totalsize);
+       dt = early_init_dt_alloc_memory_arch(size,
                __alignof__(struct boot_param_header));
 
        if (dt) {
index 786b0b4..2721240 100644 (file)
@@ -165,7 +165,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
                if (of_get_property(ipar, "interrupt-controller", NULL) !=
                                NULL) {
                        pr_debug(" -> got it !\n");
-                       of_node_put(old);
                        return 0;
                }
 
@@ -250,8 +249,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
                 * Successfully parsed an interrrupt-map translation; copy new
                 * interrupt specifier into the out_irq structure
                 */
-               of_node_put(out_irq->np);
-               out_irq->np = of_node_get(newpar);
+               out_irq->np = newpar;
 
                match_array = imap - newaddrsize - newintsize;
                for (i = 0; i < newintsize; i++)
@@ -268,7 +266,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
        }
  fail:
        of_node_put(ipar);
-       of_node_put(out_irq->np);
        of_node_put(newpar);
 
        return -EINVAL;
index a344f3d..330ef2d 100644 (file)
@@ -24,8 +24,8 @@ config PHY_EXYNOS_MIPI_VIDEO
 config OMAP_USB2
        tristate "OMAP USB2 PHY Driver"
        depends on ARCH_OMAP2PLUS
+       depends on USB_PHY
        select GENERIC_PHY
-       select USB_PHY
        select OMAP_CONTROL_USB
        help
          Enable this to support the transceiver that is part of SOC. This
@@ -36,8 +36,8 @@ config OMAP_USB2
 config TWL4030_USB
        tristate "TWL4030 USB Transceiver Driver"
        depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
+       depends on USB_PHY
        select GENERIC_PHY
-       select USB_PHY
        help
          Enable this to support the USB OTG transceiver on TWL4030
          family chips (including the TWL5030 and TPS659x0 devices).
index 03cf8fb..58e0e97 100644 (file)
@@ -437,23 +437,18 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
        int id;
        struct phy *phy;
 
-       if (!dev) {
-               dev_WARN(dev, "no device provided for PHY\n");
-               ret = -EINVAL;
-               goto err0;
-       }
+       if (WARN_ON(!dev))
+               return ERR_PTR(-EINVAL);
 
        phy = kzalloc(sizeof(*phy), GFP_KERNEL);
-       if (!phy) {
-               ret = -ENOMEM;
-               goto err0;
-       }
+       if (!phy)
+               return ERR_PTR(-ENOMEM);
 
        id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL);
        if (id < 0) {
                dev_err(dev, "unable to get id\n");
                ret = id;
-               goto err0;
+               goto free_phy;
        }
 
        device_initialize(&phy->dev);
@@ -468,11 +463,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
 
        ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id);
        if (ret)
-               goto err1;
+               goto put_dev;
 
        ret = device_add(&phy->dev);
        if (ret)
-               goto err1;
+               goto put_dev;
 
        if (pm_runtime_enabled(dev)) {
                pm_runtime_enable(&phy->dev);
@@ -481,12 +476,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
 
        return phy;
 
-err1:
-       ida_remove(&phy_ida, phy->id);
+put_dev:
        put_device(&phy->dev);
+       ida_remove(&phy_ida, phy->id);
+free_phy:
        kfree(phy);
-
-err0:
        return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(phy_create);
index 2832576..114f5ef 100644 (file)
@@ -512,6 +512,7 @@ static const struct dev_pm_ops byt_gpio_pm_ops = {
 
 static const struct acpi_device_id byt_gpio_acpi_match[] = {
        { "INT33B2", 0 },
+       { "INT33FC", 0 },
        { }
 };
 MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match);
index 11bd0d9..e214295 100644 (file)
@@ -254,7 +254,7 @@ struct sh_pfc_soc_info {
 #define PINMUX_GPIO(_pin)                                              \
        [GPIO_##_pin] = {                                               \
                .pin = (u16)-1,                                         \
-               .name = __stringify(name),                              \
+               .name = __stringify(GPIO_##_pin),                       \
                .enum_id = _pin##_DATA,                                 \
        }
 
index 2a786c5..3c67683 100644 (file)
@@ -833,6 +833,11 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
        return 0;
 }
 
+static const struct x86_cpu_id energy_unit_quirk_ids[] = {
+       { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
+       {}
+};
+
 static int rapl_check_unit(struct rapl_package *rp, int cpu)
 {
        u64 msr_val;
@@ -853,8 +858,11 @@ static int rapl_check_unit(struct rapl_package *rp, int cpu)
         * time unit: 1/time_unit_divisor Seconds
         */
        value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
-       rp->energy_unit_divisor = 1 << value;
-
+       /* some CPUs have different way to calculate energy unit */
+       if (x86_match_cpu(energy_unit_quirk_ids))
+               rp->energy_unit_divisor = 1000000 / (1 << value);
+       else
+               rp->energy_unit_divisor = 1 << value;
 
        value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
        rp->power_unit_divisor = 1 << value;
@@ -941,6 +949,7 @@ static void package_power_limit_irq_restore(int package_id)
 static const struct x86_cpu_id rapl_ids[] = {
        { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */
        { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */
+       { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
        { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */
        { X86_VENDOR_INTEL, 6, 0x45},/* HSW */
        /* TODO: Add more CPU IDs after testing */
index 333677d..9e61922 100644 (file)
@@ -438,7 +438,7 @@ common_reg:
        platform_set_drvdata(pdev, s2mps11);
 
        config.dev = &pdev->dev;
-       config.regmap = iodev->regmap;
+       config.regmap = iodev->regmap_pmic;
        config.driver_data = s2mps11;
        for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) {
                if (!reg_np) {
index 5964800..38a1257 100644 (file)
@@ -471,7 +471,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
                schedule_delayed_work(&tgt->sess_del_work, 0);
        else
                schedule_delayed_work(&tgt->sess_del_work,
-                   jiffies - sess->expires);
+                   sess->expires - jiffies);
 }
 
 /* ha->hardware_lock supposed to be held on entry */
@@ -550,13 +550,14 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
        struct scsi_qla_host *vha = tgt->vha;
        struct qla_hw_data *ha = vha->hw;
        struct qla_tgt_sess *sess;
-       unsigned long flags;
+       unsigned long flags, elapsed;
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
        while (!list_empty(&tgt->del_sess_list)) {
                sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
                    del_list_entry);
-               if (time_after_eq(jiffies, sess->expires)) {
+               elapsed = jiffies;
+               if (time_after_eq(elapsed, sess->expires)) {
                        qlt_undelete_sess(sess);
 
                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
@@ -566,7 +567,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
                        ha->tgt.tgt_ops->put_sess(sess);
                } else {
                        schedule_delayed_work(&tgt->sess_del_work,
-                           jiffies - sess->expires);
+                           sess->expires - elapsed);
                        break;
                }
        }
@@ -4290,6 +4291,7 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
                if (rc != 0) {
                        ha->tgt.tgt_ops = NULL;
                        ha->tgt.target_lport_ptr = NULL;
+                       scsi_host_put(host);
                }
                mutex_unlock(&qla_tgt_mutex);
                return rc;
index 8f02bf6..4964d2a 100644 (file)
@@ -446,7 +446,7 @@ int comedi_load_firmware(struct comedi_device *dev,
                release_firmware(fw);
        }
 
-       return ret;
+       return ret < 0 ? ret : 0;
 }
 EXPORT_SYMBOL_GPL(comedi_load_firmware);
 
index 432e3f9..c55f234 100644 (file)
@@ -63,7 +63,8 @@ enum pci_8255_boardid {
        BOARD_ADLINK_PCI7296,
        BOARD_CB_PCIDIO24,
        BOARD_CB_PCIDIO24H,
-       BOARD_CB_PCIDIO48H,
+       BOARD_CB_PCIDIO48H_OLD,
+       BOARD_CB_PCIDIO48H_NEW,
        BOARD_CB_PCIDIO96H,
        BOARD_NI_PCIDIO96,
        BOARD_NI_PCIDIO96B,
@@ -106,11 +107,16 @@ static const struct pci_8255_boardinfo pci_8255_boards[] = {
                .dio_badr       = 2,
                .n_8255         = 1,
        },
-       [BOARD_CB_PCIDIO48H] = {
+       [BOARD_CB_PCIDIO48H_OLD] = {
                .name           = "cb_pci-dio48h",
                .dio_badr       = 1,
                .n_8255         = 2,
        },
+       [BOARD_CB_PCIDIO48H_NEW] = {
+               .name           = "cb_pci-dio48h",
+               .dio_badr       = 2,
+               .n_8255         = 2,
+       },
        [BOARD_CB_PCIDIO96H] = {
                .name           = "cb_pci-dio96h",
                .dio_badr       = 2,
@@ -263,7 +269,10 @@ static DEFINE_PCI_DEVICE_TABLE(pci_8255_pci_table) = {
        { PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 },
        { PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 },
        { PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H },
-       { PCI_VDEVICE(CB, 0x000b), BOARD_CB_PCIDIO48H },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, 0x0000, 0x0000),
+         .driver_data = BOARD_CB_PCIDIO48H_OLD },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, PCI_VENDOR_ID_CB, 0x000b),
+         .driver_data = BOARD_CB_PCIDIO48H_NEW },
        { PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H },
        { PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 },
        { PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B },
index 99421f9..0485d7f 100644 (file)
@@ -451,7 +451,12 @@ done:
                .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |  \
                        BIT(IIO_CHAN_INFO_SAMP_FREQ),                   \
                .scan_index = idx,                                      \
-               .scan_type = IIO_ST('s', 16, 16, IIO_BE),               \
+               .scan_type = {                                          \
+                       .sign = 's',                                    \
+                       .realbits = 16,                                 \
+                       .storagebits = 16,                              \
+                       .endianness = IIO_BE,                           \
+               },                                                      \
        }
 
 static const struct iio_chan_spec hmc5843_channels[] = {
index 6bd015a..96e4eee 100644 (file)
@@ -88,8 +88,9 @@ static int imx_drm_driver_unload(struct drm_device *drm)
 
        imx_drm_device_put();
 
-       drm_mode_config_cleanup(imxdrm->drm);
+       drm_vblank_cleanup(imxdrm->drm);
        drm_kms_helper_poll_fini(imxdrm->drm);
+       drm_mode_config_cleanup(imxdrm->drm);
 
        return 0;
 }
@@ -199,8 +200,8 @@ static void imx_drm_driver_preclose(struct drm_device *drm,
        if (!file->is_master)
                return;
 
-       for (i = 0; i < 4; i++)
-               imx_drm_disable_vblank(drm , i);
+       for (i = 0; i < MAX_CRTC; i++)
+               imx_drm_disable_vblank(drm, i);
 }
 
 static const struct file_operations imx_drm_driver_fops = {
@@ -376,8 +377,6 @@ static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc)
        struct imx_drm_device *imxdrm = __imx_drm_device();
        int ret;
 
-       drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc,
-                       imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
        ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
        if (ret)
                return ret;
@@ -385,6 +384,9 @@ static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc)
        drm_crtc_helper_add(imx_drm_crtc->crtc,
                        imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
 
+       drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc,
+                       imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
+
        drm_mode_group_reinit(imxdrm->drm);
 
        return 0;
@@ -428,11 +430,11 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
        ret = drm_mode_group_init_legacy_group(imxdrm->drm,
                        &imxdrm->drm->primary->mode_group);
        if (ret)
-               goto err_init;
+               goto err_kms;
 
        ret = drm_vblank_init(imxdrm->drm, MAX_CRTC);
        if (ret)
-               goto err_init;
+               goto err_kms;
 
        /*
         * with vblank_disable_allowed = true, vblank interrupt will be disabled
@@ -441,12 +443,19 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
         */
        imxdrm->drm->vblank_disable_allowed = true;
 
-       if (!imx_drm_device_get())
+       if (!imx_drm_device_get()) {
                ret = -EINVAL;
+               goto err_vblank;
+       }
 
-       ret = 0;
+       mutex_unlock(&imxdrm->mutex);
+       return 0;
 
-err_init:
+err_vblank:
+       drm_vblank_cleanup(drm);
+err_kms:
+       drm_kms_helper_poll_fini(drm);
+       drm_mode_config_cleanup(drm);
        mutex_unlock(&imxdrm->mutex);
 
        return ret;
@@ -492,6 +501,15 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
 
        mutex_lock(&imxdrm->mutex);
 
+       /*
+        * The vblank arrays are dimensioned by MAX_CRTC - we can't
+        * pass IDs greater than this to those functions.
+        */
+       if (imxdrm->pipes >= MAX_CRTC) {
+               ret = -EINVAL;
+               goto err_busy;
+       }
+
        if (imxdrm->drm->open_count) {
                ret = -EBUSY;
                goto err_busy;
@@ -528,6 +546,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
        return 0;
 
 err_register:
+       list_del(&imx_drm_crtc->list);
        kfree(imx_drm_crtc);
 err_alloc:
 err_busy:
index 680f4c8..2c44fef 100644 (file)
@@ -114,7 +114,6 @@ struct imx_tve {
        struct drm_encoder encoder;
        struct imx_drm_encoder *imx_drm_encoder;
        struct device *dev;
-       spinlock_t enable_lock; /* serializes tve_enable/disable */
        spinlock_t lock;        /* register lock */
        bool enabled;
        int mode;
@@ -146,10 +145,8 @@ __releases(&tve->lock)
 
 static void tve_enable(struct imx_tve *tve)
 {
-       unsigned long flags;
        int ret;
 
-       spin_lock_irqsave(&tve->enable_lock, flags);
        if (!tve->enabled) {
                tve->enabled = true;
                clk_prepare_enable(tve->clk);
@@ -169,23 +166,18 @@ static void tve_enable(struct imx_tve *tve)
                             TVE_CD_SM_IEN |
                             TVE_CD_LM_IEN |
                             TVE_CD_MON_END_IEN);
-
-       spin_unlock_irqrestore(&tve->enable_lock, flags);
 }
 
 static void tve_disable(struct imx_tve *tve)
 {
-       unsigned long flags;
        int ret;
 
-       spin_lock_irqsave(&tve->enable_lock, flags);
        if (tve->enabled) {
                tve->enabled = false;
                ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
                                         TVE_IPU_CLK_EN | TVE_EN, 0);
                clk_disable_unprepare(tve->clk);
        }
-       spin_unlock_irqrestore(&tve->enable_lock, flags);
 }
 
 static int tve_setup_tvout(struct imx_tve *tve)
@@ -601,7 +593,6 @@ static int imx_tve_probe(struct platform_device *pdev)
 
        tve->dev = &pdev->dev;
        spin_lock_init(&tve->lock);
-       spin_lock_init(&tve->enable_lock);
 
        ddc_node = of_parse_phandle(np, "ddc", 0);
        if (ddc_node) {
index 7a22ce6..97ca692 100644 (file)
@@ -996,35 +996,35 @@ static const struct ipu_platform_reg client_reg[] = {
        },
 };
 
+static DEFINE_MUTEX(ipu_client_id_mutex);
 static int ipu_client_id;
 
-static int ipu_add_subdevice_pdata(struct device *dev,
-               const struct ipu_platform_reg *reg)
-{
-       struct platform_device *pdev;
-
-       pdev = platform_device_register_data(dev, reg->name, ipu_client_id++,
-                       &reg->pdata, sizeof(struct ipu_platform_reg));
-
-       return PTR_ERR_OR_ZERO(pdev);
-}
-
 static int ipu_add_client_devices(struct ipu_soc *ipu)
 {
-       int ret;
-       int i;
+       struct device *dev = ipu->dev;
+       unsigned i;
+       int id, ret;
+
+       mutex_lock(&ipu_client_id_mutex);
+       id = ipu_client_id;
+       ipu_client_id += ARRAY_SIZE(client_reg);
+       mutex_unlock(&ipu_client_id_mutex);
 
        for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
                const struct ipu_platform_reg *reg = &client_reg[i];
-               ret = ipu_add_subdevice_pdata(ipu->dev, reg);
-               if (ret)
+               struct platform_device *pdev;
+
+               pdev = platform_device_register_data(dev, reg->name,
+                       id++, &reg->pdata, sizeof(reg->pdata));
+
+               if (IS_ERR(pdev))
                        goto err_register;
        }
 
        return 0;
 
 err_register:
-       platform_device_unregister_children(to_platform_device(ipu->dev));
+       platform_device_unregister_children(to_platform_device(dev));
 
        return ret;
 }
index d70e911..0086719 100644 (file)
@@ -465,6 +465,7 @@ int iscsit_del_np(struct iscsi_np *np)
                 */
                send_sig(SIGINT, np->np_thread, 1);
                kthread_stop(np->np_thread);
+               np->np_thread = NULL;
        }
 
        np->np_transport->iscsit_free_np(np);
@@ -823,24 +824,22 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
             (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
                /*
-                * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2)
-                * that adds support for RESERVE/RELEASE.  There is a bug
-                * add with this new functionality that sets R/W bits when
-                * neither CDB carries any READ or WRITE datapayloads.
+                * From RFC-3720 Section 10.3.1:
+                *
+                * "Either or both of R and W MAY be 1 when either the
+                *  Expected Data Transfer Length and/or Bidirectional Read
+                *  Expected Data Transfer Length are 0"
+                *
+                * For this case, go ahead and clear the unnecssary bits
+                * to avoid any confusion with ->data_direction.
                 */
-               if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) {
-                       hdr->flags &= ~ISCSI_FLAG_CMD_READ;
-                       hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
-                       goto done;
-               }
+               hdr->flags &= ~ISCSI_FLAG_CMD_READ;
+               hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
 
-               pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
+               pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
                        " set when Expected Data Transfer Length is 0 for"
-                       " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
-               return iscsit_add_reject_cmd(cmd,
-                                            ISCSI_REASON_BOOKMARK_INVALID, buf);
+                       " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
        }
-done:
 
        if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
            !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
index e3318ed..1c0088f 100644 (file)
@@ -474,7 +474,8 @@ static ssize_t __iscsi_##prefix##_store_##name(                             \
                                                                        \
        if (!capable(CAP_SYS_ADMIN))                                    \
                return -EPERM;                                          \
-                                                                       \
+       if (count >= sizeof(auth->name))                                \
+               return -EINVAL;                                         \
        snprintf(auth->name, sizeof(auth->name), "%s", page);           \
        if (!strncmp("NULL", auth->name, 4))                            \
                auth->naf_flags &= ~flags;                              \
index 4eb93b2..e29279e 100644 (file)
@@ -1403,11 +1403,6 @@ old_sess_out:
 
 out:
        stop = kthread_should_stop();
-       if (!stop && signal_pending(current)) {
-               spin_lock_bh(&np->np_thread_lock);
-               stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN);
-               spin_unlock_bh(&np->np_thread_lock);
-       }
        /* Wait for another socket.. */
        if (!stop)
                return 1;
@@ -1415,7 +1410,6 @@ exit:
        iscsi_stop_login_thread_timer(np);
        spin_lock_bh(&np->np_thread_lock);
        np->np_thread_state = ISCSI_NP_THREAD_EXIT;
-       np->np_thread = NULL;
        spin_unlock_bh(&np->np_thread_lock);
 
        return 0;
index 207b340..d06de84 100644 (file)
@@ -1106,6 +1106,11 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
        dev->dev_attrib.block_size = block_size;
        pr_debug("dev[%p]: SE Device block_size changed to %u\n",
                        dev, block_size);
+
+       if (dev->dev_attrib.max_bytes_per_io)
+               dev->dev_attrib.hw_max_sectors =
+                       dev->dev_attrib.max_bytes_per_io / block_size;
+
        return 0;
 }
 
index 0e34cda..78241a5 100644 (file)
@@ -66,9 +66,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
        pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
                " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
                TARGET_CORE_MOD_VERSION);
-       pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
-               " MaxSectors: %u\n",
-               hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
+       pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
+               hba->hba_id, fd_host->fd_host_id);
 
        return 0;
 }
@@ -220,7 +219,8 @@ static int fd_configure_device(struct se_device *dev)
        }
 
        dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
-       dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
+       dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
+       dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
        dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
 
        if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
index 37ffc5b..d7772c1 100644 (file)
@@ -7,7 +7,10 @@
 #define FD_DEVICE_QUEUE_DEPTH  32
 #define FD_MAX_DEVICE_QUEUE_DEPTH 128
 #define FD_BLOCKSIZE           512
-#define FD_MAX_SECTORS         2048
+/*
+ * Limited by the number of iovecs (2048) per vfs_[writev,readv] call
+ */
+#define FD_MAX_BYTES           8388608
 
 #define RRF_EMULATE_CDB                0x01
 #define RRF_GOT_LBA            0x02
index f697f8b..2a573de 100644 (file)
@@ -278,7 +278,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
        snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
        acl->se_tpg = tpg;
        acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
-       spin_lock_init(&acl->stats_lock);
        acl->dynamic_node_acl = 1;
 
        tpg->se_tpg_tfo->set_default_node_attributes(acl);
@@ -406,7 +405,6 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
        snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
        acl->se_tpg = tpg;
        acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
-       spin_lock_init(&acl->stats_lock);
 
        tpg->se_tpg_tfo->set_default_node_attributes(acl);
 
@@ -658,15 +656,9 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
        spin_lock_init(&lun->lun_sep_lock);
        init_completion(&lun->lun_ref_comp);
 
-       ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release);
-       if (ret < 0)
-               return ret;
-
        ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
-       if (ret < 0) {
-               percpu_ref_cancel_init(&lun->lun_ref);
+       if (ret < 0)
                return ret;
-       }
 
        return 0;
 }
index 268b627..34aacaa 100644 (file)
@@ -93,6 +93,7 @@ struct n_tty_data {
        size_t canon_head;
        size_t echo_head;
        size_t echo_commit;
+       size_t echo_mark;
        DECLARE_BITMAP(char_map, 256);
 
        /* private to n_tty_receive_overrun (single-threaded) */
@@ -336,6 +337,7 @@ static void reset_buffer_flags(struct n_tty_data *ldata)
 {
        ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
        ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
+       ldata->echo_mark = 0;
        ldata->line_start = 0;
 
        ldata->erasing = 0;
@@ -787,6 +789,7 @@ static void commit_echoes(struct tty_struct *tty)
        size_t head;
 
        head = ldata->echo_head;
+       ldata->echo_mark = head;
        old = ldata->echo_commit - ldata->echo_tail;
 
        /* Process committed echoes if the accumulated # of bytes
@@ -811,10 +814,11 @@ static void process_echoes(struct tty_struct *tty)
        size_t echoed;
 
        if ((!L_ECHO(tty) && !L_ECHONL(tty)) ||
-           ldata->echo_commit == ldata->echo_tail)
+           ldata->echo_mark == ldata->echo_tail)
                return;
 
        mutex_lock(&ldata->output_lock);
+       ldata->echo_commit = ldata->echo_mark;
        echoed = __process_echoes(tty);
        mutex_unlock(&ldata->output_lock);
 
@@ -822,6 +826,7 @@ static void process_echoes(struct tty_struct *tty)
                tty->ops->flush_chars(tty);
 }
 
+/* NB: echo_mark and echo_head should be equivalent here */
 static void flush_echoes(struct tty_struct *tty)
 {
        struct n_tty_data *ldata = tty->disc_data;
index 4658e3e..06525f1 100644 (file)
@@ -96,7 +96,8 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value)
        if (offset == UART_LCR) {
                int tries = 1000;
                while (tries--) {
-                       if (value == p->serial_in(p, UART_LCR))
+                       unsigned int lcr = p->serial_in(p, UART_LCR);
+                       if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
                                return;
                        dw8250_force_idle(p);
                        writeb(value, p->membase + (UART_LCR << p->regshift));
@@ -132,7 +133,8 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
        if (offset == UART_LCR) {
                int tries = 1000;
                while (tries--) {
-                       if (value == p->serial_in(p, UART_LCR))
+                       unsigned int lcr = p->serial_in(p, UART_LCR);
+                       if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
                                return;
                        dw8250_force_idle(p);
                        writel(value, p->membase + (UART_LCR << p->regshift));
@@ -455,6 +457,8 @@ MODULE_DEVICE_TABLE(of, dw8250_of_match);
 static const struct acpi_device_id dw8250_acpi_match[] = {
        { "INT33C4", 0 },
        { "INT33C5", 0 },
+       { "INT3434", 0 },
+       { "INT3435", 0 },
        { "80860F0A", 0 },
        { },
 };
index e46e9f3..f619ad5 100644 (file)
@@ -240,6 +240,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
                                        continue;
                        }
 
+#ifdef SUPPORT_SYSRQ
                        /*
                         * uart_handle_sysrq_char() doesn't work if
                         * spinlocked, for some reason
@@ -253,6 +254,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
                                }
                                spin_lock(&port->lock);
                        }
+#endif
 
                        port->icount.rx++;
 
index 22fad8a..d8a55e8 100644 (file)
@@ -86,11 +86,21 @@ static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem)
        return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
 }
 
+/*
+ * ldsem_cmpxchg() updates @*old with the last-known sem->count value.
+ * Returns 1 if count was successfully changed; @*old will have @new value.
+ * Returns 0 if count was not changed; @*old will have most recent sem->count
+ */
 static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem)
 {
-       long tmp = *old;
-       *old = atomic_long_cmpxchg(&sem->count, *old, new);
-       return *old == tmp;
+       long tmp = atomic_long_cmpxchg(&sem->count, *old, new);
+       if (tmp == *old) {
+               *old = new;
+               return 1;
+       } else {
+               *old = tmp;
+               return 0;
+       }
 }
 
 /*
index 5d8981c..6e73f8c 100644 (file)
@@ -642,6 +642,10 @@ static int ci_hdrc_probe(struct platform_device *pdev)
                        : CI_ROLE_GADGET;
        }
 
+       /* only update vbus status for peripheral */
+       if (ci->role == CI_ROLE_GADGET)
+               ci_handle_vbus_change(ci);
+
        ret = ci_role_start(ci, ci->role);
        if (ret) {
                dev_err(dev, "can't start %s role\n", ci_role(ci)->name);
index 59e6020..526cd77 100644 (file)
@@ -88,7 +88,8 @@ static int host_start(struct ci_hdrc *ci)
        return ret;
 
 disable_reg:
-       regulator_disable(ci->platdata->reg_vbus);
+       if (ci->platdata->reg_vbus)
+               regulator_disable(ci->platdata->reg_vbus);
 
 put_hcd:
        usb_put_hcd(hcd);
index b34c819..69d20fb 100644 (file)
@@ -1795,9 +1795,6 @@ static int udc_start(struct ci_hdrc *ci)
        pm_runtime_no_callbacks(&ci->gadget.dev);
        pm_runtime_enable(&ci->gadget.dev);
 
-       /* Update ci->vbus_active */
-       ci_handle_vbus_change(ci);
-
        return retval;
 
 destroy_eps:
index 4d38759..0b23a86 100644 (file)
@@ -854,13 +854,11 @@ static int wdm_manage_power(struct usb_interface *intf, int on)
 {
        /* need autopm_get/put here to ensure the usbcore sees the new value */
        int rv = usb_autopm_get_interface(intf);
-       if (rv < 0)
-               goto err;
 
        intf->needs_remote_wakeup = on;
-       usb_autopm_put_interface(intf);
-err:
-       return rv;
+       if (!rv)
+               usb_autopm_put_interface(intf);
+       return 0;
 }
 
 static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
index 74f9cf0..a49217a 100644 (file)
@@ -455,9 +455,6 @@ static int dwc3_probe(struct platform_device *pdev)
        if (IS_ERR(regs))
                return PTR_ERR(regs);
 
-       usb_phy_set_suspend(dwc->usb2_phy, 0);
-       usb_phy_set_suspend(dwc->usb3_phy, 0);
-
        spin_lock_init(&dwc->lock);
        platform_set_drvdata(pdev, dwc);
 
@@ -488,6 +485,9 @@ static int dwc3_probe(struct platform_device *pdev)
                goto err0;
        }
 
+       usb_phy_set_suspend(dwc->usb2_phy, 0);
+       usb_phy_set_suspend(dwc->usb3_phy, 0);
+
        ret = dwc3_event_buffers_setup(dwc);
        if (ret) {
                dev_err(dwc->dev, "failed to setup event buffers\n");
@@ -569,6 +569,8 @@ err2:
        dwc3_event_buffers_cleanup(dwc);
 
 err1:
+       usb_phy_set_suspend(dwc->usb2_phy, 1);
+       usb_phy_set_suspend(dwc->usb3_phy, 1);
        dwc3_core_exit(dwc);
 
 err0:
index 418444e..8c356af 100644 (file)
@@ -136,23 +136,27 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
        struct ohci_hcd *ohci;
        int retval;
        struct usb_hcd *hcd = NULL;
-
-       if (pdev->num_resources != 2) {
-               pr_debug("hcd probe: invalid num_resources");
-               return -ENODEV;
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+       int irq;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_dbg(dev, "hcd probe: missing memory resource\n");
+               return -ENXIO;
        }
 
-       if ((pdev->resource[0].flags != IORESOURCE_MEM)
-                       || (pdev->resource[1].flags != IORESOURCE_IRQ)) {
-               pr_debug("hcd probe: invalid resource type\n");
-               return -ENODEV;
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_dbg(dev, "hcd probe: missing irq resource\n");
+               return irq;
        }
 
        hcd = usb_create_hcd(driver, &pdev->dev, "at91");
        if (!hcd)
                return -ENOMEM;
-       hcd->rsrc_start = pdev->resource[0].start;
-       hcd->rsrc_len = resource_size(&pdev->resource[0]);
+       hcd->rsrc_start = res->start;
+       hcd->rsrc_len = resource_size(res);
 
        if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
                pr_debug("request_mem_region failed\n");
@@ -199,7 +203,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
        ohci->num_ports = board->ports;
        at91_start_hc(pdev);
 
-       retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED);
+       retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
        if (retval == 0)
                return retval;
 
index b8dffd5..73f5208 100644 (file)
@@ -128,7 +128,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                 * any other sleep) on Haswell machines with LPT and LPT-LP
                 * with the new Intel BIOS
                 */
-               xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
+               /* Limit the quirk to only known vendors, as this triggers
+                * yet another BIOS bug on some other machines
+                * https://bugzilla.kernel.org/show_bug.cgi?id=66171
+                */
+               if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
+                       xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
        }
        if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
                        pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
index 08e2f39..2b41c63 100644 (file)
@@ -19,8 +19,9 @@ config AB8500_USB
          in host mode, low speed.
 
 config FSL_USB2_OTG
-       bool "Freescale USB OTG Transceiver Driver"
+       tristate "Freescale USB OTG Transceiver Driver"
        depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME
+       depends on USB
        select USB_OTG
        select USB_PHY
        help
@@ -29,6 +30,7 @@ config FSL_USB2_OTG
 config ISP1301_OMAP
        tristate "Philips ISP1301 with OMAP OTG"
        depends on I2C && ARCH_OMAP_OTG
+       depends on USB
        select USB_PHY
        help
          If you say yes here you get support for the Philips ISP1301
index 82232ac..bbe4f8e 100644 (file)
@@ -876,7 +876,7 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
 
        tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start,
                resource_size(res));
-       if (!tegra_phy->regs) {
+       if (!tegra_phy->pad_regs) {
                dev_err(&pdev->dev, "Failed to remap UTMI Pad regs\n");
                return -ENOMEM;
        }
index 30e8a61..bad57ce 100644 (file)
@@ -127,7 +127,8 @@ static inline int twl6030_writeb(struct twl6030_usb *twl, u8 module,
 
 static inline u8 twl6030_readb(struct twl6030_usb *twl, u8 module, u8 address)
 {
-       u8 data, ret = 0;
+       u8 data;
+       int ret;
 
        ret = twl_i2c_read_u8(module, &data, address);
        if (ret >= 0)
index 496b7e3..cc7a241 100644 (file)
@@ -251,6 +251,7 @@ static void option_instat_callback(struct urb *urb);
 #define ZTE_PRODUCT_MF628                      0x0015
 #define ZTE_PRODUCT_MF626                      0x0031
 #define ZTE_PRODUCT_MC2718                     0xffe8
+#define ZTE_PRODUCT_AC2726                     0xfff1
 
 #define BENQ_VENDOR_ID                         0x04a5
 #define BENQ_PRODUCT_H10                       0x4068
@@ -1453,6 +1454,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
        { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
        { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
 
        { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
        { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
index fca4c75..eae2c87 100644 (file)
@@ -281,8 +281,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x19d2, 0xfffd) },
        { USB_DEVICE(0x19d2, 0xfffc) },
        { USB_DEVICE(0x19d2, 0xfffb) },
-       /* AC2726, AC8710_V3 */
-       { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfff1, 0xff, 0xff, 0xff) },
+       /* AC8710_V3 */
        { USB_DEVICE(0x19d2, 0xfff6) },
        { USB_DEVICE(0x19d2, 0xfff7) },
        { USB_DEVICE(0x19d2, 0xfff8) },
index c444654..5c4a95b 100644 (file)
@@ -285,7 +285,7 @@ static void update_balloon_size(struct virtio_balloon *vb)
 {
        __le32 actual = cpu_to_le32(vb->num_pages);
 
-       virtio_cwrite(vb->vdev, struct virtio_balloon_config, num_pages,
+       virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual,
                      &actual);
 }
 
index 55ea73f..4c02e2b 100644 (file)
@@ -350,17 +350,19 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
 
                pfn = page_to_pfn(page);
 
-               set_phys_to_machine(pfn, frame_list[i]);
-
 #ifdef CONFIG_XEN_HAVE_PVMMU
-               /* Link back into the page tables if not highmem. */
-               if (xen_pv_domain() && !PageHighMem(page)) {
-                       int ret;
-                       ret = HYPERVISOR_update_va_mapping(
-                               (unsigned long)__va(pfn << PAGE_SHIFT),
-                               mfn_pte(frame_list[i], PAGE_KERNEL),
-                               0);
-                       BUG_ON(ret);
+               if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+                       set_phys_to_machine(pfn, frame_list[i]);
+
+                       /* Link back into the page tables if not highmem. */
+                       if (!PageHighMem(page)) {
+                               int ret;
+                               ret = HYPERVISOR_update_va_mapping(
+                                               (unsigned long)__va(pfn << PAGE_SHIFT),
+                                               mfn_pte(frame_list[i], PAGE_KERNEL),
+                                               0);
+                               BUG_ON(ret);
+                       }
                }
 #endif
 
@@ -378,7 +380,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
        enum bp_state state = BP_DONE;
        unsigned long  pfn, i;
        struct page   *page;
-       struct page   *scratch_page;
        int ret;
        struct xen_memory_reservation reservation = {
                .address_bits = 0,
@@ -411,27 +412,29 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
 
                scrub_page(page);
 
+#ifdef CONFIG_XEN_HAVE_PVMMU
                /*
                 * Ballooned out frames are effectively replaced with
                 * a scratch frame.  Ensure direct mappings and the
                 * p2m are consistent.
                 */
-               scratch_page = get_balloon_scratch_page();
-#ifdef CONFIG_XEN_HAVE_PVMMU
-               if (xen_pv_domain() && !PageHighMem(page)) {
-                       ret = HYPERVISOR_update_va_mapping(
-                               (unsigned long)__va(pfn << PAGE_SHIFT),
-                               pfn_pte(page_to_pfn(scratch_page),
-                                       PAGE_KERNEL_RO), 0);
-                       BUG_ON(ret);
-               }
-#endif
                if (!xen_feature(XENFEAT_auto_translated_physmap)) {
                        unsigned long p;
+                       struct page   *scratch_page = get_balloon_scratch_page();
+
+                       if (!PageHighMem(page)) {
+                               ret = HYPERVISOR_update_va_mapping(
+                                               (unsigned long)__va(pfn << PAGE_SHIFT),
+                                               pfn_pte(page_to_pfn(scratch_page),
+                                                       PAGE_KERNEL_RO), 0);
+                               BUG_ON(ret);
+                       }
                        p = page_to_pfn(scratch_page);
                        __set_phys_to_machine(pfn, pfn_to_mfn(p));
+
+                       put_balloon_scratch_page();
                }
-               put_balloon_scratch_page();
+#endif
 
                balloon_append(pfn_to_page(pfn));
        }
@@ -627,15 +630,17 @@ static int __init balloon_init(void)
        if (!xen_domain())
                return -ENODEV;
 
-       for_each_online_cpu(cpu)
-       {
-               per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
-               if (per_cpu(balloon_scratch_page, cpu) == NULL) {
-                       pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
-                       return -ENOMEM;
+       if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+               for_each_online_cpu(cpu)
+               {
+                       per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
+                       if (per_cpu(balloon_scratch_page, cpu) == NULL) {
+                               pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
+                               return -ENOMEM;
+                       }
                }
+               register_cpu_notifier(&balloon_cpu_notifier);
        }
-       register_cpu_notifier(&balloon_cpu_notifier);
 
        pr_info("Initialising balloon driver\n");
 
index 0283871..aa846a4 100644 (file)
@@ -1176,7 +1176,8 @@ static int gnttab_setup(void)
                gnttab_shared.addr = xen_remap(xen_hvm_resume_frames,
                                                PAGE_SIZE * max_nr_gframes);
                if (gnttab_shared.addr == NULL) {
-                       pr_warn("Failed to ioremap gnttab share frames!\n");
+                       pr_warn("Failed to ioremap gnttab share frames (addr=0x%08lx)!\n",
+                                       xen_hvm_resume_frames);
                        return -ENOMEM;
                }
        }
index 8e74590..569a13b 100644 (file)
@@ -533,12 +533,17 @@ static void privcmd_close(struct vm_area_struct *vma)
 {
        struct page **pages = vma->vm_private_data;
        int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+       int rc;
 
        if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
                return;
 
-       xen_unmap_domain_mfn_range(vma, numpgs, pages);
-       free_xenballooned_pages(numpgs, pages);
+       rc = xen_unmap_domain_mfn_range(vma, numpgs, pages);
+       if (rc == 0)
+               free_xenballooned_pages(numpgs, pages);
+       else
+               pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
+                       numpgs, rc);
        kfree(pages);
 }
 
index 6efb7f6..062a5f6 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -244,9 +244,14 @@ static void aio_free_ring(struct kioctx *ctx)
        int i;
 
        for (i = 0; i < ctx->nr_pages; i++) {
+               struct page *page;
                pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
                                page_count(ctx->ring_pages[i]));
-               put_page(ctx->ring_pages[i]);
+               page = ctx->ring_pages[i];
+               if (!page)
+                       continue;
+               ctx->ring_pages[i] = NULL;
+               put_page(page);
        }
 
        put_aio_ring_file(ctx);
@@ -280,18 +285,38 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
        unsigned long flags;
        int rc;
 
+       rc = 0;
+
+       /* Make sure the old page hasn't already been changed */
+       spin_lock(&mapping->private_lock);
+       ctx = mapping->private_data;
+       if (ctx) {
+               pgoff_t idx;
+               spin_lock_irqsave(&ctx->completion_lock, flags);
+               idx = old->index;
+               if (idx < (pgoff_t)ctx->nr_pages) {
+                       if (ctx->ring_pages[idx] != old)
+                               rc = -EAGAIN;
+               } else
+                       rc = -EINVAL;
+               spin_unlock_irqrestore(&ctx->completion_lock, flags);
+       } else
+               rc = -EINVAL;
+       spin_unlock(&mapping->private_lock);
+
+       if (rc != 0)
+               return rc;
+
        /* Writeback must be complete */
        BUG_ON(PageWriteback(old));
-       put_page(old);
+       get_page(new);
 
-       rc = migrate_page_move_mapping(mapping, new, old, NULL, mode);
+       rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1);
        if (rc != MIGRATEPAGE_SUCCESS) {
-               get_page(old);
+               put_page(new);
                return rc;
        }
 
-       get_page(new);
-
        /* We can potentially race against kioctx teardown here.  Use the
         * address_space's private data lock to protect the mapping's
         * private_data.
@@ -303,13 +328,24 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
                spin_lock_irqsave(&ctx->completion_lock, flags);
                migrate_page_copy(new, old);
                idx = old->index;
-               if (idx < (pgoff_t)ctx->nr_pages)
-                       ctx->ring_pages[idx] = new;
+               if (idx < (pgoff_t)ctx->nr_pages) {
+                       /* And only do the move if things haven't changed */
+                       if (ctx->ring_pages[idx] == old)
+                               ctx->ring_pages[idx] = new;
+                       else
+                               rc = -EAGAIN;
+               } else
+                       rc = -EINVAL;
                spin_unlock_irqrestore(&ctx->completion_lock, flags);
        } else
                rc = -EBUSY;
        spin_unlock(&mapping->private_lock);
 
+       if (rc == MIGRATEPAGE_SUCCESS)
+               put_page(old);
+       else
+               put_page(new);
+
        return rc;
 }
 #endif
@@ -326,7 +362,7 @@ static int aio_setup_ring(struct kioctx *ctx)
        struct aio_ring *ring;
        unsigned nr_events = ctx->max_reqs;
        struct mm_struct *mm = current->mm;
-       unsigned long size, populate;
+       unsigned long size, unused;
        int nr_pages;
        int i;
        struct file *file;
@@ -347,6 +383,20 @@ static int aio_setup_ring(struct kioctx *ctx)
                return -EAGAIN;
        }
 
+       ctx->aio_ring_file = file;
+       nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
+                       / sizeof(struct io_event);
+
+       ctx->ring_pages = ctx->internal_pages;
+       if (nr_pages > AIO_RING_PAGES) {
+               ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
+                                         GFP_KERNEL);
+               if (!ctx->ring_pages) {
+                       put_aio_ring_file(ctx);
+                       return -ENOMEM;
+               }
+       }
+
        for (i = 0; i < nr_pages; i++) {
                struct page *page;
                page = find_or_create_page(file->f_inode->i_mapping,
@@ -358,19 +408,14 @@ static int aio_setup_ring(struct kioctx *ctx)
                SetPageUptodate(page);
                SetPageDirty(page);
                unlock_page(page);
+
+               ctx->ring_pages[i] = page;
        }
-       ctx->aio_ring_file = file;
-       nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
-                       / sizeof(struct io_event);
+       ctx->nr_pages = i;
 
-       ctx->ring_pages = ctx->internal_pages;
-       if (nr_pages > AIO_RING_PAGES) {
-               ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
-                                         GFP_KERNEL);
-               if (!ctx->ring_pages) {
-                       put_aio_ring_file(ctx);
-                       return -ENOMEM;
-               }
+       if (unlikely(i != nr_pages)) {
+               aio_free_ring(ctx);
+               return -EAGAIN;
        }
 
        ctx->mmap_size = nr_pages * PAGE_SIZE;
@@ -379,9 +424,9 @@ static int aio_setup_ring(struct kioctx *ctx)
        down_write(&mm->mmap_sem);
        ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
                                       PROT_READ | PROT_WRITE,
-                                      MAP_SHARED | MAP_POPULATE, 0, &populate);
+                                      MAP_SHARED, 0, &unused);
+       up_write(&mm->mmap_sem);
        if (IS_ERR((void *)ctx->mmap_base)) {
-               up_write(&mm->mmap_sem);
                ctx->mmap_size = 0;
                aio_free_ring(ctx);
                return -EAGAIN;
@@ -389,27 +434,6 @@ static int aio_setup_ring(struct kioctx *ctx)
 
        pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
 
-       /* We must do this while still holding mmap_sem for write, as we
-        * need to be protected against userspace attempting to mremap()
-        * or munmap() the ring buffer.
-        */
-       ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages,
-                                      1, 0, ctx->ring_pages, NULL);
-
-       /* Dropping the reference here is safe as the page cache will hold
-        * onto the pages for us.  It is also required so that page migration
-        * can unmap the pages and get the right reference count.
-        */
-       for (i = 0; i < ctx->nr_pages; i++)
-               put_page(ctx->ring_pages[i]);
-
-       up_write(&mm->mmap_sem);
-
-       if (unlikely(ctx->nr_pages != nr_pages)) {
-               aio_free_ring(ctx);
-               return -EAGAIN;
-       }
-
        ctx->user_id = ctx->mmap_base;
        ctx->nr_events = nr_events; /* trusted copy */
 
@@ -652,7 +676,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
        aio_nr += ctx->max_reqs;
        spin_unlock(&aio_nr_lock);
 
-       percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
+       percpu_ref_get(&ctx->users);    /* io_setup() will drop this ref */
+       percpu_ref_get(&ctx->reqs);     /* free_ioctx_users() will drop this */
 
        err = ioctx_add_table(ctx, mm);
        if (err)
index 1e561c0..ec3ba43 100644 (file)
@@ -210,9 +210,13 @@ static int readpage_nounlock(struct file *filp, struct page *page)
        if (err < 0) {
                SetPageError(page);
                goto out;
-       } else if (err < PAGE_CACHE_SIZE) {
+       } else {
+               if (err < PAGE_CACHE_SIZE) {
                /* zero fill remainder of page */
-               zero_user_segment(page, err, PAGE_CACHE_SIZE);
+                       zero_user_segment(page, err, PAGE_CACHE_SIZE);
+               } else {
+                       flush_dcache_page(page);
+               }
        }
        SetPageUptodate(page);
 
index 9a8e396..278fd28 100644 (file)
@@ -978,7 +978,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
        struct ceph_mds_reply_inode *ininfo;
        struct ceph_vino vino;
        struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
-       int i = 0;
        int err = 0;
 
        dout("fill_trace %p is_dentry %d is_target %d\n", req,
@@ -1039,6 +1038,29 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
                }
        }
 
+       if (rinfo->head->is_target) {
+               vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
+               vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
+
+               in = ceph_get_inode(sb, vino);
+               if (IS_ERR(in)) {
+                       err = PTR_ERR(in);
+                       goto done;
+               }
+               req->r_target_inode = in;
+
+               err = fill_inode(in, &rinfo->targeti, NULL,
+                               session, req->r_request_started,
+                               (le32_to_cpu(rinfo->head->result) == 0) ?
+                               req->r_fmode : -1,
+                               &req->r_caps_reservation);
+               if (err < 0) {
+                       pr_err("fill_inode badness %p %llx.%llx\n",
+                               in, ceph_vinop(in));
+                       goto done;
+               }
+       }
+
        /*
         * ignore null lease/binding on snapdir ENOENT, or else we
         * will have trouble splicing in the virtual snapdir later
@@ -1108,7 +1130,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
                             ceph_dentry(req->r_old_dentry)->offset);
 
                        dn = req->r_old_dentry;  /* use old_dentry */
-                       in = dn->d_inode;
                }
 
                /* null dentry? */
@@ -1130,44 +1151,28 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
                }
 
                /* attach proper inode */
-               ininfo = rinfo->targeti.in;
-               vino.ino = le64_to_cpu(ininfo->ino);
-               vino.snap = le64_to_cpu(ininfo->snapid);
-               in = dn->d_inode;
-               if (!in) {
-                       in = ceph_get_inode(sb, vino);
-                       if (IS_ERR(in)) {
-                               pr_err("fill_trace bad get_inode "
-                                      "%llx.%llx\n", vino.ino, vino.snap);
-                               err = PTR_ERR(in);
-                               d_drop(dn);
-                               goto done;
-                       }
+               if (!dn->d_inode) {
+                       ihold(in);
                        dn = splice_dentry(dn, in, &have_lease, true);
                        if (IS_ERR(dn)) {
                                err = PTR_ERR(dn);
                                goto done;
                        }
                        req->r_dentry = dn;  /* may have spliced */
-                       ihold(in);
-               } else if (ceph_ino(in) == vino.ino &&
-                          ceph_snap(in) == vino.snap) {
-                       ihold(in);
-               } else {
+               } else if (dn->d_inode && dn->d_inode != in) {
                        dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
-                            dn, in, ceph_ino(in), ceph_snap(in),
-                            vino.ino, vino.snap);
+                            dn, dn->d_inode, ceph_vinop(dn->d_inode),
+                            ceph_vinop(in));
                        have_lease = false;
-                       in = NULL;
                }
 
                if (have_lease)
                        update_dentry_lease(dn, rinfo->dlease, session,
                                            req->r_request_started);
                dout(" final dn %p\n", dn);
-               i++;
-       } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
-                  req->r_op == CEPH_MDS_OP_MKSNAP) && !req->r_aborted) {
+       } else if (!req->r_aborted &&
+                  (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
+                   req->r_op == CEPH_MDS_OP_MKSNAP)) {
                struct dentry *dn = req->r_dentry;
 
                /* fill out a snapdir LOOKUPSNAP dentry */
@@ -1177,52 +1182,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
                ininfo = rinfo->targeti.in;
                vino.ino = le64_to_cpu(ininfo->ino);
                vino.snap = le64_to_cpu(ininfo->snapid);
-               in = ceph_get_inode(sb, vino);
-               if (IS_ERR(in)) {
-                       pr_err("fill_inode get_inode badness %llx.%llx\n",
-                              vino.ino, vino.snap);
-                       err = PTR_ERR(in);
-                       d_delete(dn);
-                       goto done;
-               }
                dout(" linking snapped dir %p to dn %p\n", in, dn);
+               ihold(in);
                dn = splice_dentry(dn, in, NULL, true);
                if (IS_ERR(dn)) {
                        err = PTR_ERR(dn);
                        goto done;
                }
                req->r_dentry = dn;  /* may have spliced */
-               ihold(in);
-               rinfo->head->is_dentry = 1;  /* fool notrace handlers */
-       }
-
-       if (rinfo->head->is_target) {
-               vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
-               vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
-
-               if (in == NULL || ceph_ino(in) != vino.ino ||
-                   ceph_snap(in) != vino.snap) {
-                       in = ceph_get_inode(sb, vino);
-                       if (IS_ERR(in)) {
-                               err = PTR_ERR(in);
-                               goto done;
-                       }
-               }
-               req->r_target_inode = in;
-
-               err = fill_inode(in,
-                                &rinfo->targeti, NULL,
-                                session, req->r_request_started,
-                                (le32_to_cpu(rinfo->head->result) == 0) ?
-                                req->r_fmode : -1,
-                                &req->r_caps_reservation);
-               if (err < 0) {
-                       pr_err("fill_inode badness %p %llx.%llx\n",
-                              in, ceph_vinop(in));
-                       goto done;
-               }
        }
-
 done:
        dout("fill_trace done err=%d\n", err);
        return err;
@@ -1272,7 +1240,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
        struct qstr dname;
        struct dentry *dn;
        struct inode *in;
-       int err = 0, i;
+       int err = 0, ret, i;
        struct inode *snapdir = NULL;
        struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
        struct ceph_dentry_info *di;
@@ -1305,6 +1273,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
                        ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
        }
 
+       /* FIXME: release caps/leases if error occurs */
        for (i = 0; i < rinfo->dir_nr; i++) {
                struct ceph_vino vino;
 
@@ -1329,9 +1298,10 @@ retry_lookup:
                                err = -ENOMEM;
                                goto out;
                        }
-                       err = ceph_init_dentry(dn);
-                       if (err < 0) {
+                       ret = ceph_init_dentry(dn);
+                       if (ret < 0) {
                                dput(dn);
+                               err = ret;
                                goto out;
                        }
                } else if (dn->d_inode &&
@@ -1351,9 +1321,6 @@ retry_lookup:
                        spin_unlock(&parent->d_lock);
                }
 
-               di = dn->d_fsdata;
-               di->offset = ceph_make_fpos(frag, i + r_readdir_offset);
-
                /* inode */
                if (dn->d_inode) {
                        in = dn->d_inode;
@@ -1366,26 +1333,39 @@ retry_lookup:
                                err = PTR_ERR(in);
                                goto out;
                        }
-                       dn = splice_dentry(dn, in, NULL, false);
-                       if (IS_ERR(dn))
-                               dn = NULL;
                }
 
                if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
                               req->r_request_started, -1,
                               &req->r_caps_reservation) < 0) {
                        pr_err("fill_inode badness on %p\n", in);
+                       if (!dn->d_inode)
+                               iput(in);
+                       d_drop(dn);
                        goto next_item;
                }
-               if (dn)
-                       update_dentry_lease(dn, rinfo->dir_dlease[i],
-                                           req->r_session,
-                                           req->r_request_started);
+
+               if (!dn->d_inode) {
+                       dn = splice_dentry(dn, in, NULL, false);
+                       if (IS_ERR(dn)) {
+                               err = PTR_ERR(dn);
+                               dn = NULL;
+                               goto next_item;
+                       }
+               }
+
+               di = dn->d_fsdata;
+               di->offset = ceph_make_fpos(frag, i + r_readdir_offset);
+
+               update_dentry_lease(dn, rinfo->dir_dlease[i],
+                                   req->r_session,
+                                   req->r_request_started);
 next_item:
                if (dn)
                        dput(dn);
        }
-       req->r_did_prepopulate = true;
+       if (err == 0)
+               req->r_did_prepopulate = true;
 
 out:
        if (snapdir) {
index aa33976..2c29db6 100644 (file)
@@ -477,9 +477,10 @@ extern int CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon,
                        const int netfid, __u64 *pExtAttrBits, __u64 *pMask);
 extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb);
 extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr);
-extern int CIFSCheckMFSymlink(struct cifs_fattr *fattr,
-               const unsigned char *path,
-               struct cifs_sb_info *cifs_sb, unsigned int xid);
+extern int CIFSCheckMFSymlink(unsigned int xid, struct cifs_tcon *tcon,
+                             struct cifs_sb_info *cifs_sb,
+                             struct cifs_fattr *fattr,
+                             const unsigned char *path);
 extern int mdfour(unsigned char *, unsigned char *, int);
 extern int E_md4hash(const unsigned char *passwd, unsigned char *p16,
                        const struct nls_table *codepage);
index 124aa02..d707edb 100644 (file)
@@ -4010,7 +4010,7 @@ QFileInfoRetry:
        rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
        if (rc) {
-               cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc);
+               cifs_dbg(FYI, "Send error in QFileInfo = %d", rc);
        } else {                /* decode response */
                rc = validate_t2((struct smb_t2_rsp *)pSMBr);
 
@@ -4179,7 +4179,7 @@ UnixQFileInfoRetry:
        rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
        if (rc) {
-               cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc);
+               cifs_dbg(FYI, "Send error in UnixQFileInfo = %d", rc);
        } else {                /* decode response */
                rc = validate_t2((struct smb_t2_rsp *)pSMBr);
 
@@ -4263,7 +4263,7 @@ UnixQPathInfoRetry:
        rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
        if (rc) {
-               cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc);
+               cifs_dbg(FYI, "Send error in UnixQPathInfo = %d", rc);
        } else {                /* decode response */
                rc = validate_t2((struct smb_t2_rsp *)pSMBr);
 
index 11ff5f1..a514e0a 100644 (file)
@@ -193,7 +193,7 @@ check_name(struct dentry *direntry)
 static int
 cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
               struct tcon_link *tlink, unsigned oflags, umode_t mode,
-              __u32 *oplock, struct cifs_fid *fid, int *created)
+              __u32 *oplock, struct cifs_fid *fid)
 {
        int rc = -ENOENT;
        int create_options = CREATE_NOT_DIR;
@@ -349,7 +349,6 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
                                .device = 0,
                };
 
-               *created |= FILE_CREATED;
                if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
                        args.uid = current_fsuid();
                        if (inode->i_mode & S_ISGID)
@@ -480,13 +479,16 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
        cifs_add_pending_open(&fid, tlink, &open);
 
        rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode,
-                           &oplock, &fid, opened);
+                           &oplock, &fid);
 
        if (rc) {
                cifs_del_pending_open(&open);
                goto out;
        }
 
+       if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
+               *opened |= FILE_CREATED;
+
        rc = finish_open(file, direntry, generic_file_open, opened);
        if (rc) {
                if (server->ops->close)
@@ -529,7 +531,6 @@ int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
        struct TCP_Server_Info *server;
        struct cifs_fid fid;
        __u32 oplock;
-       int created = FILE_CREATED;
 
        cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %s and dentry = 0x%p\n",
                 inode, direntry->d_name.name, direntry);
@@ -546,7 +547,7 @@ int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
                server->ops->new_lease_key(&fid);
 
        rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode,
-                           &oplock, &fid, &created);
+                           &oplock, &fid);
        if (!rc && server->ops->close)
                server->ops->close(xid, tcon, &fid);
 
index 36f9ebb..49719b8 100644 (file)
@@ -383,7 +383,8 @@ int cifs_get_inode_info_unix(struct inode **pinode,
 
        /* check for Minshall+French symlinks */
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
-               int tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid);
+               int tmprc = CIFSCheckMFSymlink(xid, tcon, cifs_sb, &fattr,
+                                              full_path);
                if (tmprc)
                        cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc);
        }
@@ -799,7 +800,8 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
 
        /* check for Minshall+French symlinks */
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
-               tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid);
+               tmprc = CIFSCheckMFSymlink(xid, tcon, cifs_sb, &fattr,
+                                          full_path);
                if (tmprc)
                        cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc);
        }
index cc02347..92aee08 100644 (file)
@@ -354,34 +354,30 @@ open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
 
 
 int
-CIFSCheckMFSymlink(struct cifs_fattr *fattr,
-                  const unsigned char *path,
-                  struct cifs_sb_info *cifs_sb, unsigned int xid)
+CIFSCheckMFSymlink(unsigned int xid, struct cifs_tcon *tcon,
+                  struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
+                  const unsigned char *path)
 {
-       int rc = 0;
+       int rc;
        u8 *buf = NULL;
        unsigned int link_len = 0;
        unsigned int bytes_read = 0;
-       struct cifs_tcon *ptcon;
 
        if (!CIFSCouldBeMFSymlink(fattr))
                /* it's not a symlink */
                return 0;
 
        buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
-       if (!buf) {
-               rc = -ENOMEM;
-               goto out;
-       }
+       if (!buf)
+               return -ENOMEM;
 
-       ptcon = tlink_tcon(cifs_sb_tlink(cifs_sb));
-       if ((ptcon->ses) && (ptcon->ses->server->ops->query_mf_symlink))
-               rc = ptcon->ses->server->ops->query_mf_symlink(path, buf,
-                                                &bytes_read, cifs_sb, xid);
+       if (tcon->ses->server->ops->query_mf_symlink)
+               rc = tcon->ses->server->ops->query_mf_symlink(path, buf,
+                                               &bytes_read, cifs_sb, xid);
        else
-               goto out;
+               rc = -ENOSYS;
 
-       if (rc != 0)
+       if (rc)
                goto out;
 
        if (bytes_read == 0) /* not a symlink */
index 2885349..20d6697 100644 (file)
@@ -1493,6 +1493,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
                                sb->s_blocksize - offset : towrite;
 
                tmp_bh.b_state = 0;
+               tmp_bh.b_size = sb->s_blocksize;
                err = ext2_get_block(inode, blk, &tmp_bh, 1);
                if (err < 0)
                        goto out;
index e618503..ece5556 100644 (file)
@@ -268,6 +268,16 @@ struct ext4_io_submit {
 /* Translate # of blks to # of clusters */
 #define EXT4_NUM_B2C(sbi, blks)        (((blks) + (sbi)->s_cluster_ratio - 1) >> \
                                 (sbi)->s_cluster_bits)
+/* Mask out the low bits to get the starting block of the cluster */
+#define EXT4_PBLK_CMASK(s, pblk) ((pblk) &                             \
+                                 ~((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
+#define EXT4_LBLK_CMASK(s, lblk) ((lblk) &                             \
+                                 ~((ext4_lblk_t) (s)->s_cluster_ratio - 1))
+/* Get the cluster offset */
+#define EXT4_PBLK_COFF(s, pblk) ((pblk) &                              \
+                                ((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
+#define EXT4_LBLK_COFF(s, lblk) ((lblk) &                              \
+                                ((ext4_lblk_t) (s)->s_cluster_ratio - 1))
 
 /*
  * Structure of a blocks group descriptor
index 17ac112..3fe29de 100644 (file)
@@ -259,6 +259,15 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
                if (WARN_ON_ONCE(err)) {
                        ext4_journal_abort_handle(where, line, __func__, bh,
                                                  handle, err);
+                       ext4_error_inode(inode, where, line,
+                                        bh->b_blocknr,
+                                        "journal_dirty_metadata failed: "
+                                        "handle type %u started at line %u, "
+                                        "credits %u/%u, errcode %d",
+                                        handle->h_type,
+                                        handle->h_line_no,
+                                        handle->h_requested_credits,
+                                        handle->h_buffer_credits, err);
                }
        } else {
                if (inode)
index 35f65cf..4410cc3 100644 (file)
@@ -360,8 +360,10 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
 {
        ext4_fsblk_t block = ext4_ext_pblock(ext);
        int len = ext4_ext_get_actual_len(ext);
+       ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
+       ext4_lblk_t last = lblock + len - 1;
 
-       if (len == 0)
+       if (lblock > last)
                return 0;
        return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
 }
@@ -387,11 +389,26 @@ static int ext4_valid_extent_entries(struct inode *inode,
        if (depth == 0) {
                /* leaf entries */
                struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
+               struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
+               ext4_fsblk_t pblock = 0;
+               ext4_lblk_t lblock = 0;
+               ext4_lblk_t prev = 0;
+               int len = 0;
                while (entries) {
                        if (!ext4_valid_extent(inode, ext))
                                return 0;
+
+                       /* Check for overlapping extents */
+                       lblock = le32_to_cpu(ext->ee_block);
+                       len = ext4_ext_get_actual_len(ext);
+                       if ((lblock <= prev) && prev) {
+                               pblock = ext4_ext_pblock(ext);
+                               es->s_last_error_block = cpu_to_le64(pblock);
+                               return 0;
+                       }
                        ext++;
                        entries--;
+                       prev = lblock + len - 1;
                }
        } else {
                struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
@@ -1834,8 +1851,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
        depth = ext_depth(inode);
        if (!path[depth].p_ext)
                goto out;
-       b2 = le32_to_cpu(path[depth].p_ext->ee_block);
-       b2 &= ~(sbi->s_cluster_ratio - 1);
+       b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
 
        /*
         * get the next allocated block if the extent in the path
@@ -1845,7 +1861,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
                b2 = ext4_ext_next_allocated_block(path);
                if (b2 == EXT_MAX_BLOCKS)
                        goto out;
-               b2 &= ~(sbi->s_cluster_ratio - 1);
+               b2 = EXT4_LBLK_CMASK(sbi, b2);
        }
 
        /* check for wrap through zero on extent logical start block*/
@@ -2504,7 +2520,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
                 * extent, we have to mark the cluster as used (store negative
                 * cluster number in partial_cluster).
                 */
-               unaligned = pblk & (sbi->s_cluster_ratio - 1);
+               unaligned = EXT4_PBLK_COFF(sbi, pblk);
                if (unaligned && (ee_len == num) &&
                    (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk))))
                        *partial_cluster = EXT4_B2C(sbi, pblk);
@@ -2598,7 +2614,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
                         * accidentally freeing it later on
                         */
                        pblk = ext4_ext_pblock(ex);
-                       if (pblk & (sbi->s_cluster_ratio - 1))
+                       if (EXT4_PBLK_COFF(sbi, pblk))
                                *partial_cluster =
                                        -((long long)EXT4_B2C(sbi, pblk));
                        ex--;
@@ -3753,7 +3769,7 @@ int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
 {
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
        ext4_lblk_t lblk_start, lblk_end;
-       lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
+       lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
        lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
 
        return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
@@ -3812,9 +3828,9 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
        trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
 
        /* Check towards left side */
-       c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
+       c_offset = EXT4_LBLK_COFF(sbi, lblk_start);
        if (c_offset) {
-               lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
+               lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start);
                lblk_to = lblk_from + c_offset - 1;
 
                if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
@@ -3822,7 +3838,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
        }
 
        /* Now check towards right. */
-       c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1);
+       c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks);
        if (allocated_clusters && c_offset) {
                lblk_from = lblk_start + num_blks;
                lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
@@ -4030,7 +4046,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
                                     struct ext4_ext_path *path)
 {
        struct ext4_sb_info *sbi = EXT4_SB(sb);
-       ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
+       ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
        ext4_lblk_t ex_cluster_start, ex_cluster_end;
        ext4_lblk_t rr_cluster_start;
        ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
@@ -4048,8 +4064,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
            (rr_cluster_start == ex_cluster_start)) {
                if (rr_cluster_start == ex_cluster_end)
                        ee_start += ee_len - 1;
-               map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) +
-                       c_offset;
+               map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
                map->m_len = min(map->m_len,
                                 (unsigned) sbi->s_cluster_ratio - c_offset);
                /*
@@ -4203,7 +4218,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
         */
        map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
        newex.ee_block = cpu_to_le32(map->m_lblk);
-       cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
+       cluster_offset = EXT4_LBLK_CMASK(sbi, map->m_lblk);
 
        /*
         * If we are doing bigalloc, check to see if the extent returned
@@ -4271,7 +4286,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
         * needed so that future calls to get_implied_cluster_alloc()
         * work correctly.
         */
-       offset = map->m_lblk & (sbi->s_cluster_ratio - 1);
+       offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
        ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
        ar.goal -= offset;
        ar.logical -= offset;
index 0757634..61d49ff 100644 (file)
@@ -1206,7 +1206,6 @@ static int ext4_journalled_write_end(struct file *file,
  */
 static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
 {
-       int retries = 0;
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
        struct ext4_inode_info *ei = EXT4_I(inode);
        unsigned int md_needed;
@@ -1218,7 +1217,6 @@ static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
         * in order to allocate nrblocks
         * worse case is one extent per block
         */
-repeat:
        spin_lock(&ei->i_block_reservation_lock);
        /*
         * ext4_calc_metadata_amount() has side effects, which we have
@@ -1238,10 +1236,6 @@ repeat:
                ei->i_da_metadata_calc_len = save_len;
                ei->i_da_metadata_calc_last_lblock = save_last_lblock;
                spin_unlock(&ei->i_block_reservation_lock);
-               if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
-                       cond_resched();
-                       goto repeat;
-               }
                return -ENOSPC;
        }
        ei->i_reserved_meta_blocks += md_needed;
@@ -1255,7 +1249,6 @@ repeat:
  */
 static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
 {
-       int retries = 0;
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
        struct ext4_inode_info *ei = EXT4_I(inode);
        unsigned int md_needed;
@@ -1277,7 +1270,6 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
         * in order to allocate nrblocks
         * worse case is one extent per block
         */
-repeat:
        spin_lock(&ei->i_block_reservation_lock);
        /*
         * ext4_calc_metadata_amount() has side effects, which we have
@@ -1297,10 +1289,6 @@ repeat:
                ei->i_da_metadata_calc_len = save_len;
                ei->i_da_metadata_calc_last_lblock = save_last_lblock;
                spin_unlock(&ei->i_block_reservation_lock);
-               if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
-                       cond_resched();
-                       goto repeat;
-               }
                dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
                return -ENOSPC;
        }
index 4d113ef..04a5c75 100644 (file)
@@ -3442,6 +3442,9 @@ static void ext4_mb_pa_callback(struct rcu_head *head)
 {
        struct ext4_prealloc_space *pa;
        pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
+
+       BUG_ON(atomic_read(&pa->pa_count));
+       BUG_ON(pa->pa_deleted == 0);
        kmem_cache_free(ext4_pspace_cachep, pa);
 }
 
@@ -3455,11 +3458,13 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
        ext4_group_t grp;
        ext4_fsblk_t grp_blk;
 
-       if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
-               return;
-
        /* in this short window concurrent discard can set pa_deleted */
        spin_lock(&pa->pa_lock);
+       if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
+               spin_unlock(&pa->pa_lock);
+               return;
+       }
+
        if (pa->pa_deleted == 1) {
                spin_unlock(&pa->pa_lock);
                return;
@@ -4121,7 +4126,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
        ext4_get_group_no_and_offset(sb, goal, &group, &block);
 
        /* set up allocation goals */
-       ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1);
+       ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
        ac->ac_status = AC_STATUS_CONTINUE;
        ac->ac_sb = sb;
        ac->ac_inode = ar->inode;
@@ -4663,7 +4668,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
         * blocks at the beginning or the end unless we are explicitly
         * requested to avoid doing so.
         */
-       overflow = block & (sbi->s_cluster_ratio - 1);
+       overflow = EXT4_PBLK_COFF(sbi, block);
        if (overflow) {
                if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
                        overflow = sbi->s_cluster_ratio - overflow;
@@ -4677,7 +4682,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
                        count += overflow;
                }
        }
-       overflow = count & (sbi->s_cluster_ratio - 1);
+       overflow = EXT4_LBLK_COFF(sbi, count);
        if (overflow) {
                if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
                        if (count > overflow)
index c977f4e..1f7784d 100644 (file)
@@ -792,7 +792,7 @@ static void ext4_put_super(struct super_block *sb)
        }
 
        ext4_es_unregister_shrinker(sbi);
-       del_timer(&sbi->s_err_report);
+       del_timer_sync(&sbi->s_err_report);
        ext4_release_system_zone(sb);
        ext4_mb_release(sb);
        ext4_ext_release(sb);
@@ -3316,10 +3316,18 @@ int ext4_calculate_overhead(struct super_block *sb)
 }
 
 
-static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi)
+static ext4_fsblk_t ext4_calculate_resv_clusters(struct super_block *sb)
 {
        ext4_fsblk_t resv_clusters;
 
+       /*
+        * There's no need to reserve anything when we aren't using extents.
+        * The space estimates are exact, there are no unwritten extents,
+        * hole punching doesn't need new metadata... This is needed especially
+        * to keep ext2/3 backward compatibility.
+        */
+       if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
+               return 0;
        /*
         * By default we reserve 2% or 4096 clusters, whichever is smaller.
         * This should cover the situations where we can not afford to run
@@ -3328,7 +3336,8 @@ static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi)
         * allocation would require 1, or 2 blocks, higher numbers are
         * very rare.
         */
-       resv_clusters = ext4_blocks_count(sbi->s_es) >> sbi->s_cluster_bits;
+       resv_clusters = ext4_blocks_count(EXT4_SB(sb)->s_es) >>
+                       EXT4_SB(sb)->s_cluster_bits;
 
        do_div(resv_clusters, 50);
        resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
@@ -4071,10 +4080,10 @@ no_journal:
                         "available");
        }
 
-       err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sbi));
+       err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sb));
        if (err) {
                ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for "
-                        "reserved pool", ext4_calculate_resv_clusters(sbi));
+                        "reserved pool", ext4_calculate_resv_clusters(sb));
                goto failed_mount4a;
        }
 
@@ -4184,7 +4193,7 @@ failed_mount_wq:
        }
 failed_mount3:
        ext4_es_unregister_shrinker(sbi);
-       del_timer(&sbi->s_err_report);
+       del_timer_sync(&sbi->s_err_report);
        if (sbi->s_flex_groups)
                ext4_kvfree(sbi->s_flex_groups);
        percpu_counter_destroy(&sbi->s_freeclusters_counter);
index 5203264..5fa344a 100644 (file)
@@ -702,7 +702,7 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
        read_lock(&journal->j_state_lock);
 #ifdef CONFIG_JBD2_DEBUG
        if (!tid_geq(journal->j_commit_request, tid)) {
-               printk(KERN_EMERG
+               printk(KERN_ERR
                       "%s: error: j_commit_request=%d, tid=%d\n",
                       __func__, journal->j_commit_request, tid);
        }
@@ -718,10 +718,8 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
        }
        read_unlock(&journal->j_state_lock);
 
-       if (unlikely(is_journal_aborted(journal))) {
-               printk(KERN_EMERG "journal commit I/O error\n");
+       if (unlikely(is_journal_aborted(journal)))
                err = -EIO;
-       }
        return err;
 }
 
@@ -1527,13 +1525,13 @@ static int journal_get_superblock(journal_t *journal)
        if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) &&
            JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
                /* Can't have checksum v1 and v2 on at the same time! */
-               printk(KERN_ERR "JBD: Can't enable checksumming v1 and v2 "
+               printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2 "
                       "at the same time!\n");
                goto out;
        }
 
        if (!jbd2_verify_csum_type(journal, sb)) {
-               printk(KERN_ERR "JBD: Unknown checksum type\n");
+               printk(KERN_ERR "JBD2: Unknown checksum type\n");
                goto out;
        }
 
@@ -1541,7 +1539,7 @@ static int journal_get_superblock(journal_t *journal)
        if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
                journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
                if (IS_ERR(journal->j_chksum_driver)) {
-                       printk(KERN_ERR "JBD: Cannot load crc32c driver.\n");
+                       printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n");
                        err = PTR_ERR(journal->j_chksum_driver);
                        journal->j_chksum_driver = NULL;
                        goto out;
@@ -1550,7 +1548,7 @@ static int journal_get_superblock(journal_t *journal)
 
        /* Check superblock checksum */
        if (!jbd2_superblock_csum_verify(journal, sb)) {
-               printk(KERN_ERR "JBD: journal checksum error\n");
+               printk(KERN_ERR "JBD2: journal checksum error\n");
                goto out;
        }
 
@@ -1836,7 +1834,7 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
                        journal->j_chksum_driver = crypto_alloc_shash("crc32c",
                                                                      0, 0);
                        if (IS_ERR(journal->j_chksum_driver)) {
-                               printk(KERN_ERR "JBD: Cannot load crc32c "
+                               printk(KERN_ERR "JBD2: Cannot load crc32c "
                                       "driver.\n");
                                journal->j_chksum_driver = NULL;
                                return 0;
@@ -2645,7 +2643,7 @@ static void __exit journal_exit(void)
 #ifdef CONFIG_JBD2_DEBUG
        int n = atomic_read(&nr_journal_heads);
        if (n)
-               printk(KERN_EMERG "JBD2: leaked %d journal_heads!\n", n);
+               printk(KERN_ERR "JBD2: leaked %d journal_heads!\n", n);
 #endif
        jbd2_remove_jbd_stats_proc_entry();
        jbd2_journal_destroy_caches();
index 3929c50..3b6bb19 100644 (file)
@@ -594,7 +594,7 @@ static int do_one_pass(journal_t *journal,
                                                be32_to_cpu(tmp->h_sequence))) {
                                                brelse(obh);
                                                success = -EIO;
-                                               printk(KERN_ERR "JBD: Invalid "
+                                               printk(KERN_ERR "JBD2: Invalid "
                                                       "checksum recovering "
                                                       "block %llu in log\n",
                                                       blocknr);
index 7aa9a32..8360674 100644 (file)
@@ -932,7 +932,7 @@ repeat:
                                        jbd2_alloc(jh2bh(jh)->b_size,
                                                         GFP_NOFS);
                                if (!frozen_buffer) {
-                                       printk(KERN_EMERG
+                                       printk(KERN_ERR
                                               "%s: OOM for frozen_buffer\n",
                                               __func__);
                                        JBUFFER_TRACE(jh, "oom!");
@@ -1166,7 +1166,7 @@ repeat:
        if (!jh->b_committed_data) {
                committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS);
                if (!committed_data) {
-                       printk(KERN_EMERG "%s: No memory for committed data\n",
+                       printk(KERN_ERR "%s: No memory for committed data\n",
                                __func__);
                        err = -ENOMEM;
                        goto out;
@@ -1290,7 +1290,10 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
                 * once a transaction -bzzz
                 */
                jh->b_modified = 1;
-               J_ASSERT_JH(jh, handle->h_buffer_credits > 0);
+               if (handle->h_buffer_credits <= 0) {
+                       ret = -ENOSPC;
+                       goto out_unlock_bh;
+               }
                handle->h_buffer_credits--;
        }
 
@@ -1305,7 +1308,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
                JBUFFER_TRACE(jh, "fastpath");
                if (unlikely(jh->b_transaction !=
                             journal->j_running_transaction)) {
-                       printk(KERN_EMERG "JBD: %s: "
+                       printk(KERN_ERR "JBD2: %s: "
                               "jh->b_transaction (%llu, %p, %u) != "
                               "journal->j_running_transaction (%p, %u)",
                               journal->j_devname,
@@ -1332,7 +1335,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
                JBUFFER_TRACE(jh, "already on other transaction");
                if (unlikely(jh->b_transaction !=
                             journal->j_committing_transaction)) {
-                       printk(KERN_EMERG "JBD: %s: "
+                       printk(KERN_ERR "JBD2: %s: "
                               "jh->b_transaction (%llu, %p, %u) != "
                               "journal->j_committing_transaction (%p, %u)",
                               journal->j_devname,
@@ -1345,7 +1348,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
                        ret = -EINVAL;
                }
                if (unlikely(jh->b_next_transaction != transaction)) {
-                       printk(KERN_EMERG "JBD: %s: "
+                       printk(KERN_ERR "JBD2: %s: "
                               "jh->b_next_transaction (%llu, %p, %u) != "
                               "transaction (%p, %u)",
                               journal->j_devname,
@@ -1373,7 +1376,6 @@ out_unlock_bh:
        jbd2_journal_put_journal_head(jh);
 out:
        JBUFFER_TRACE(jh, "exit");
-       WARN_ON(ret);   /* All errors are bugs, so dump the stack */
        return ret;
 }
 
index b8e93a4..78c3c20 100644 (file)
@@ -443,8 +443,11 @@ int pstore_register(struct pstore_info *psi)
                pstore_get_records(0);
 
        kmsg_dump_register(&pstore_dumper);
-       pstore_register_console();
-       pstore_register_ftrace();
+
+       if ((psi->flags & PSTORE_FLAGS_FRAGILE) == 0) {
+               pstore_register_console();
+               pstore_register_ftrace();
+       }
 
        if (pstore_update_ms >= 0) {
                pstore_timer.expires = jiffies +
index b94f936..35e7d08 100644 (file)
@@ -609,7 +609,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
        struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
        struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
        struct sysfs_open_file *of;
-       bool has_read, has_write, has_mmap;
+       bool has_read, has_write;
        int error = -EACCES;
 
        /* need attr_sd for attr and ops, its parent for kobj */
@@ -621,7 +621,6 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
 
                has_read = battr->read || battr->mmap;
                has_write = battr->write || battr->mmap;
-               has_mmap = battr->mmap;
        } else {
                const struct sysfs_ops *ops = sysfs_file_ops(attr_sd);
 
@@ -633,7 +632,6 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
 
                has_read = ops->show;
                has_write = ops->store;
-               has_mmap = false;
        }
 
        /* check perms and supported operations */
@@ -661,9 +659,9 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
         * open file has a separate mutex, it's okay as long as those don't
         * happen on the same file.  At this point, we can't easily give
         * each file a separate locking class.  Let's differentiate on
-        * whether the file has mmap or not for now.
+        * whether the file is bin or not for now.
         */
-       if (has_mmap)
+       if (sysfs_is_bin(attr_sd))
                mutex_init(&of->mutex);
        else
                mutex_init(&of->mutex);
index 3ef11b2..3b2c14b 100644 (file)
@@ -1635,7 +1635,7 @@ xfs_bmap_last_extent(
  * blocks at the end of the file which do not start at the previous data block,
  * we will try to align the new blocks at stripe unit boundaries.
  *
- * Returns 0 in bma->aeof if the file (fork) is empty as any new write will be
+ * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
  * at, or past the EOF.
  */
 STATIC int
@@ -1650,9 +1650,14 @@ xfs_bmap_isaeof(
        bma->aeof = 0;
        error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
                                     &is_empty);
-       if (error || is_empty)
+       if (error)
                return error;
 
+       if (is_empty) {
+               bma->aeof = 1;
+               return 0;
+       }
+
        /*
         * Check if we are allocation or past the last extent, or at least into
         * the last delayed allocated extent.
@@ -3643,10 +3648,19 @@ xfs_bmap_btalloc(
        int             isaligned;
        int             tryagain;
        int             error;
+       int             stripe_align;
 
        ASSERT(ap->length);
 
        mp = ap->ip->i_mount;
+
+       /* stripe alignment for allocation is determined by mount parameters */
+       stripe_align = 0;
+       if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
+               stripe_align = mp->m_swidth;
+       else if (mp->m_dalign)
+               stripe_align = mp->m_dalign;
+
        align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
        if (unlikely(align)) {
                error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
@@ -3655,6 +3669,8 @@ xfs_bmap_btalloc(
                ASSERT(!error);
                ASSERT(ap->length);
        }
+
+
        nullfb = *ap->firstblock == NULLFSBLOCK;
        fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
        if (nullfb) {
@@ -3730,7 +3746,7 @@ xfs_bmap_btalloc(
         */
        if (!ap->flist->xbf_low && ap->aeof) {
                if (!ap->offset) {
-                       args.alignment = mp->m_dalign;
+                       args.alignment = stripe_align;
                        atype = args.type;
                        isaligned = 1;
                        /*
@@ -3755,13 +3771,13 @@ xfs_bmap_btalloc(
                         * of minlen+alignment+slop doesn't go up
                         * between the calls.
                         */
-                       if (blen > mp->m_dalign && blen <= args.maxlen)
-                               nextminlen = blen - mp->m_dalign;
+                       if (blen > stripe_align && blen <= args.maxlen)
+                               nextminlen = blen - stripe_align;
                        else
                                nextminlen = args.minlen;
-                       if (nextminlen + mp->m_dalign > args.minlen + 1)
+                       if (nextminlen + stripe_align > args.minlen + 1)
                                args.minalignslop =
-                                       nextminlen + mp->m_dalign -
+                                       nextminlen + stripe_align -
                                        args.minlen - 1;
                        else
                                args.minalignslop = 0;
@@ -3783,7 +3799,7 @@ xfs_bmap_btalloc(
                 */
                args.type = atype;
                args.fsbno = ap->blkno;
-               args.alignment = mp->m_dalign;
+               args.alignment = stripe_align;
                args.minlen = nextminlen;
                args.minalignslop = 0;
                isaligned = 1;
index 5887e41..1394106 100644 (file)
@@ -1187,7 +1187,12 @@ xfs_zero_remaining_bytes(
                XFS_BUF_UNWRITE(bp);
                XFS_BUF_READ(bp);
                XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));
-               xfsbdstrat(mp, bp);
+
+               if (XFS_FORCED_SHUTDOWN(mp)) {
+                       error = XFS_ERROR(EIO);
+                       break;
+               }
+               xfs_buf_iorequest(bp);
                error = xfs_buf_iowait(bp);
                if (error) {
                        xfs_buf_ioerror_alert(bp,
@@ -1200,7 +1205,12 @@ xfs_zero_remaining_bytes(
                XFS_BUF_UNDONE(bp);
                XFS_BUF_UNREAD(bp);
                XFS_BUF_WRITE(bp);
-               xfsbdstrat(mp, bp);
+
+               if (XFS_FORCED_SHUTDOWN(mp)) {
+                       error = XFS_ERROR(EIO);
+                       break;
+               }
+               xfs_buf_iorequest(bp);
                error = xfs_buf_iowait(bp);
                if (error) {
                        xfs_buf_ioerror_alert(bp,
index c7f0b77..afe7645 100644 (file)
@@ -698,7 +698,11 @@ xfs_buf_read_uncached(
        bp->b_flags |= XBF_READ;
        bp->b_ops = ops;
 
-       xfsbdstrat(target->bt_mount, bp);
+       if (XFS_FORCED_SHUTDOWN(target->bt_mount)) {
+               xfs_buf_relse(bp);
+               return NULL;
+       }
+       xfs_buf_iorequest(bp);
        xfs_buf_iowait(bp);
        return bp;
 }
@@ -1089,7 +1093,7 @@ xfs_bioerror(
  * This is meant for userdata errors; metadata bufs come with
  * iodone functions attached, so that we can track down errors.
  */
-STATIC int
+int
 xfs_bioerror_relse(
        struct xfs_buf  *bp)
 {
@@ -1152,7 +1156,7 @@ xfs_bwrite(
        ASSERT(xfs_buf_islocked(bp));
 
        bp->b_flags |= XBF_WRITE;
-       bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
+       bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | XBF_WRITE_FAIL);
 
        xfs_bdstrat_cb(bp);
 
@@ -1164,25 +1168,6 @@ xfs_bwrite(
        return error;
 }
 
-/*
- * Wrapper around bdstrat so that we can stop data from going to disk in case
- * we are shutting down the filesystem.  Typically user data goes thru this
- * path; one of the exceptions is the superblock.
- */
-void
-xfsbdstrat(
-       struct xfs_mount        *mp,
-       struct xfs_buf          *bp)
-{
-       if (XFS_FORCED_SHUTDOWN(mp)) {
-               trace_xfs_bdstrat_shut(bp, _RET_IP_);
-               xfs_bioerror_relse(bp);
-               return;
-       }
-
-       xfs_buf_iorequest(bp);
-}
-
 STATIC void
 _xfs_buf_ioend(
        xfs_buf_t               *bp,
@@ -1516,6 +1501,12 @@ xfs_wait_buftarg(
                        struct xfs_buf *bp;
                        bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
                        list_del_init(&bp->b_lru);
+                       if (bp->b_flags & XBF_WRITE_FAIL) {
+                               xfs_alert(btp->bt_mount,
+"Corruption Alert: Buffer at block 0x%llx had permanent write failures!\n"
+"Please run xfs_repair to determine the extent of the problem.",
+                                       (long long)bp->b_bn);
+                       }
                        xfs_buf_rele(bp);
                }
                if (loop++ != 0)
@@ -1799,7 +1790,7 @@ __xfs_buf_delwri_submit(
 
        blk_start_plug(&plug);
        list_for_each_entry_safe(bp, n, io_list, b_list) {
-               bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC);
+               bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL);
                bp->b_flags |= XBF_WRITE;
 
                if (!wait) {
index e656833..1cf21a4 100644 (file)
@@ -45,6 +45,7 @@ typedef enum {
 #define XBF_ASYNC       (1 << 4) /* initiator will not wait for completion */
 #define XBF_DONE        (1 << 5) /* all pages in the buffer uptodate */
 #define XBF_STALE       (1 << 6) /* buffer has been staled, do not find it */
+#define XBF_WRITE_FAIL  (1 << 24)/* async writes have failed on this buffer */
 
 /* I/O hints for the BIO layer */
 #define XBF_SYNCIO      (1 << 10)/* treat this buffer as synchronous I/O */
@@ -70,6 +71,7 @@ typedef unsigned int xfs_buf_flags_t;
        { XBF_ASYNC,            "ASYNC" }, \
        { XBF_DONE,             "DONE" }, \
        { XBF_STALE,            "STALE" }, \
+       { XBF_WRITE_FAIL,       "WRITE_FAIL" }, \
        { XBF_SYNCIO,           "SYNCIO" }, \
        { XBF_FUA,              "FUA" }, \
        { XBF_FLUSH,            "FLUSH" }, \
@@ -80,6 +82,7 @@ typedef unsigned int xfs_buf_flags_t;
        { _XBF_DELWRI_Q,        "DELWRI_Q" }, \
        { _XBF_COMPOUND,        "COMPOUND" }
 
+
 /*
  * Internal state flags.
  */
@@ -269,9 +272,6 @@ extern void xfs_buf_unlock(xfs_buf_t *);
 
 /* Buffer Read and Write Routines */
 extern int xfs_bwrite(struct xfs_buf *bp);
-
-extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
-
 extern void xfs_buf_ioend(xfs_buf_t *, int);
 extern void xfs_buf_ioerror(xfs_buf_t *, int);
 extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
@@ -282,6 +282,8 @@ extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
 #define xfs_buf_zero(bp, off, len) \
            xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
 
+extern int xfs_bioerror_relse(struct xfs_buf *);
+
 static inline int xfs_buf_geterror(xfs_buf_t *bp)
 {
        return bp ? bp->b_error : ENOMEM;
@@ -301,7 +303,8 @@ extern void xfs_buf_terminate(void);
 
 #define XFS_BUF_ZEROFLAGS(bp) \
        ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \
-                           XBF_SYNCIO|XBF_FUA|XBF_FLUSH))
+                           XBF_SYNCIO|XBF_FUA|XBF_FLUSH| \
+                           XBF_WRITE_FAIL))
 
 void xfs_buf_stale(struct xfs_buf *bp);
 #define XFS_BUF_UNSTALE(bp)    ((bp)->b_flags &= ~XBF_STALE)
index a64f67b..2227b9b 100644 (file)
@@ -496,6 +496,14 @@ xfs_buf_item_unpin(
        }
 }
 
+/*
+ * Buffer IO error rate limiting. Limit it to no more than 10 messages per 30
+ * seconds so as to not spam logs too much on repeated detection of the same
+ * buffer being bad..
+ */
+
+DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10);
+
 STATIC uint
 xfs_buf_item_push(
        struct xfs_log_item     *lip,
@@ -524,6 +532,14 @@ xfs_buf_item_push(
 
        trace_xfs_buf_item_push(bip);
 
+       /* has a previous flush failed due to IO errors? */
+       if ((bp->b_flags & XBF_WRITE_FAIL) &&
+           ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) {
+               xfs_warn(bp->b_target->bt_mount,
+"Detected failing async write on buffer block 0x%llx. Retrying async write.\n",
+                        (long long)bp->b_bn);
+       }
+
        if (!xfs_buf_delwri_queue(bp, buffer_list))
                rval = XFS_ITEM_FLUSHING;
        xfs_buf_unlock(bp);
@@ -1096,8 +1112,9 @@ xfs_buf_iodone_callbacks(
 
                xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */
 
-               if (!XFS_BUF_ISSTALE(bp)) {
-                       bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE;
+               if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL))) {
+                       bp->b_flags |= XBF_WRITE | XBF_ASYNC |
+                                      XBF_DONE | XBF_WRITE_FAIL;
                        xfs_buf_iorequest(bp);
                } else {
                        xfs_buf_relse(bp);
index 56369d4..48c7d18 100644 (file)
@@ -2067,12 +2067,12 @@ xfs_dir2_node_lookup(
  */
 int                                            /* error */
 xfs_dir2_node_removename(
-       xfs_da_args_t           *args)          /* operation arguments */
+       struct xfs_da_args      *args)          /* operation arguments */
 {
-       xfs_da_state_blk_t      *blk;           /* leaf block */
+       struct xfs_da_state_blk *blk;           /* leaf block */
        int                     error;          /* error return value */
        int                     rval;           /* operation return value */
-       xfs_da_state_t          *state;         /* btree cursor */
+       struct xfs_da_state     *state;         /* btree cursor */
 
        trace_xfs_dir2_node_removename(args);
 
@@ -2084,19 +2084,18 @@ xfs_dir2_node_removename(
        state->mp = args->dp->i_mount;
        state->blocksize = state->mp->m_dirblksize;
        state->node_ents = state->mp->m_dir_node_ents;
-       /*
-        * Look up the entry we're deleting, set up the cursor.
-        */
+
+       /* Look up the entry we're deleting, set up the cursor. */
        error = xfs_da3_node_lookup_int(state, &rval);
        if (error)
-               rval = error;
-       /*
-        * Didn't find it, upper layer screwed up.
-        */
+               goto out_free;
+
+       /* Didn't find it, upper layer screwed up. */
        if (rval != EEXIST) {
-               xfs_da_state_free(state);
-               return rval;
+               error = rval;
+               goto out_free;
        }
+
        blk = &state->path.blk[state->path.active - 1];
        ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC);
        ASSERT(state->extravalid);
@@ -2107,7 +2106,7 @@ xfs_dir2_node_removename(
        error = xfs_dir2_leafn_remove(args, blk->bp, blk->index,
                &state->extrablk, &rval);
        if (error)
-               return error;
+               goto out_free;
        /*
         * Fix the hash values up the btree.
         */
@@ -2122,6 +2121,7 @@ xfs_dir2_node_removename(
         */
        if (!error)
                error = xfs_dir2_node_to_leaf(state);
+out_free:
        xfs_da_state_free(state);
        return error;
 }
index 27e0e54..104455b 100644 (file)
@@ -618,7 +618,8 @@ xfs_setattr_nonsize(
                }
                if (!gid_eq(igid, gid)) {
                        if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) {
-                               ASSERT(!XFS_IS_PQUOTA_ON(mp));
+                               ASSERT(xfs_sb_version_has_pquotino(&mp->m_sb) ||
+                                      !XFS_IS_PQUOTA_ON(mp));
                                ASSERT(mask & ATTR_GID);
                                ASSERT(gdqp);
                                olddquot2 = xfs_qm_vop_chown(tp, ip,
index b6b669d..eae1692 100644 (file)
@@ -193,7 +193,10 @@ xlog_bread_noalign(
        bp->b_io_length = nbblks;
        bp->b_error = 0;
 
-       xfsbdstrat(log->l_mp, bp);
+       if (XFS_FORCED_SHUTDOWN(log->l_mp))
+               return XFS_ERROR(EIO);
+
+       xfs_buf_iorequest(bp);
        error = xfs_buf_iowait(bp);
        if (error)
                xfs_buf_ioerror_alert(bp, __func__);
@@ -4397,7 +4400,13 @@ xlog_do_recover(
        XFS_BUF_READ(bp);
        XFS_BUF_UNASYNC(bp);
        bp->b_ops = &xfs_sb_buf_ops;
-       xfsbdstrat(log->l_mp, bp);
+
+       if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
+               xfs_buf_relse(bp);
+               return XFS_ERROR(EIO);
+       }
+
+       xfs_buf_iorequest(bp);
        error = xfs_buf_iowait(bp);
        if (error) {
                xfs_buf_ioerror_alert(bp, __func__);
index 14a4996..dd88f0e 100644 (file)
@@ -134,8 +134,6 @@ xfs_qm_dqpurge(
 {
        struct xfs_mount        *mp = dqp->q_mount;
        struct xfs_quotainfo    *qi = mp->m_quotainfo;
-       struct xfs_dquot        *gdqp = NULL;
-       struct xfs_dquot        *pdqp = NULL;
 
        xfs_dqlock(dqp);
        if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
@@ -143,21 +141,6 @@ xfs_qm_dqpurge(
                return EAGAIN;
        }
 
-       /*
-        * If this quota has a hint attached, prepare for releasing it now.
-        */
-       gdqp = dqp->q_gdquot;
-       if (gdqp) {
-               xfs_dqlock(gdqp);
-               dqp->q_gdquot = NULL;
-       }
-
-       pdqp = dqp->q_pdquot;
-       if (pdqp) {
-               xfs_dqlock(pdqp);
-               dqp->q_pdquot = NULL;
-       }
-
        dqp->dq_flags |= XFS_DQ_FREEING;
 
        xfs_dqflock(dqp);
@@ -206,11 +189,47 @@ xfs_qm_dqpurge(
        XFS_STATS_DEC(xs_qm_dquot_unused);
 
        xfs_qm_dqdestroy(dqp);
+       return 0;
+}
+
+/*
+ * Release the group or project dquot pointers the user dquots maybe carrying
+ * around as a hint, and proceed to purge the user dquot cache if requested.
+*/
+STATIC int
+xfs_qm_dqpurge_hints(
+       struct xfs_dquot        *dqp,
+       void                    *data)
+{
+       struct xfs_dquot        *gdqp = NULL;
+       struct xfs_dquot        *pdqp = NULL;
+       uint                    flags = *((uint *)data);
+
+       xfs_dqlock(dqp);
+       if (dqp->dq_flags & XFS_DQ_FREEING) {
+               xfs_dqunlock(dqp);
+               return EAGAIN;
+       }
+
+       /* If this quota has a hint attached, prepare for releasing it now */
+       gdqp = dqp->q_gdquot;
+       if (gdqp)
+               dqp->q_gdquot = NULL;
+
+       pdqp = dqp->q_pdquot;
+       if (pdqp)
+               dqp->q_pdquot = NULL;
+
+       xfs_dqunlock(dqp);
 
        if (gdqp)
-               xfs_qm_dqput(gdqp);
+               xfs_qm_dqrele(gdqp);
        if (pdqp)
-               xfs_qm_dqput(pdqp);
+               xfs_qm_dqrele(pdqp);
+
+       if (flags & XFS_QMOPT_UQUOTA)
+               return xfs_qm_dqpurge(dqp, NULL);
+
        return 0;
 }
 
@@ -222,8 +241,18 @@ xfs_qm_dqpurge_all(
        struct xfs_mount        *mp,
        uint                    flags)
 {
-       if (flags & XFS_QMOPT_UQUOTA)
-               xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
+       /*
+        * We have to release group/project dquot hint(s) from the user dquot
+        * at first if they are there, otherwise we would run into an infinite
+        * loop while walking through radix tree to purge other type of dquots
+        * since their refcount is not zero if the user dquot refers to them
+        * as hint.
+        *
+        * Call the special xfs_qm_dqpurge_hints() will end up go through the
+        * general xfs_qm_dqpurge() against user dquot cache if requested.
+        */
+       xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge_hints, &flags);
+
        if (flags & XFS_QMOPT_GQUOTA)
                xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
        if (flags & XFS_QMOPT_PQUOTA)
@@ -2082,24 +2111,21 @@ xfs_qm_vop_create_dqattach(
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
        ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 
-       if (udqp) {
+       if (udqp && XFS_IS_UQUOTA_ON(mp)) {
                ASSERT(ip->i_udquot == NULL);
-               ASSERT(XFS_IS_UQUOTA_ON(mp));
                ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
 
                ip->i_udquot = xfs_qm_dqhold(udqp);
                xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
        }
-       if (gdqp) {
+       if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
                ASSERT(ip->i_gdquot == NULL);
-               ASSERT(XFS_IS_GQUOTA_ON(mp));
                ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
                ip->i_gdquot = xfs_qm_dqhold(gdqp);
                xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
        }
-       if (pdqp) {
+       if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
                ASSERT(ip->i_pdquot == NULL);
-               ASSERT(XFS_IS_PQUOTA_ON(mp));
                ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
 
                ip->i_pdquot = xfs_qm_dqhold(pdqp);
index c035d11..647b6f1 100644 (file)
@@ -314,7 +314,18 @@ xfs_trans_read_buf_map(
                        ASSERT(bp->b_iodone == NULL);
                        XFS_BUF_READ(bp);
                        bp->b_ops = ops;
-                       xfsbdstrat(tp->t_mountp, bp);
+
+                       /*
+                        * XXX(hch): clean up the error handling here to be less
+                        * of a mess..
+                        */
+                       if (XFS_FORCED_SHUTDOWN(mp)) {
+                               trace_xfs_bdstrat_shut(bp, _RET_IP_);
+                               xfs_bioerror_relse(bp);
+                       } else {
+                               xfs_buf_iorequest(bp);
+                       }
+
                        error = xfs_buf_iowait(bp);
                        if (error) {
                                xfs_buf_ioerror_alert(bp, __func__);
index f330d28..db09234 100644 (file)
@@ -217,7 +217,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
 #endif
 
 #ifndef pte_accessible
-# define pte_accessible(pte)           ((void)(pte),1)
+# define pte_accessible(mm, pte)       ((void)(pte), 1)
 #endif
 
 #ifndef flush_tlb_fix_spurious_fault
@@ -599,11 +599,10 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
        barrier();
 #endif
-       if (pmd_none(pmdval))
+       if (pmd_none(pmdval) || pmd_trans_huge(pmdval))
                return 1;
        if (unlikely(pmd_bad(pmdval))) {
-               if (!pmd_trans_huge(pmdval))
-                       pmd_clear_bad(pmd);
+               pmd_clear_bad(pmd);
                return 1;
        }
        return 0;
index ddf2b42..1cd3f5d 100644 (file)
@@ -3,13 +3,11 @@
 
 #include <linux/thread_info.h>
 
-/*
- * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
- * that think a non-zero value indicates we cannot preempt.
- */
+#define PREEMPT_ENABLED        (0)
+
 static __always_inline int preempt_count(void)
 {
-       return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED;
+       return current_thread_info()->preempt_count;
 }
 
 static __always_inline int *preempt_count_ptr(void)
@@ -17,11 +15,6 @@ static __always_inline int *preempt_count_ptr(void)
        return &current_thread_info()->preempt_count;
 }
 
-/*
- * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the
- * alternative is loosing a reschedule. Better schedule too often -- also this
- * should be a very rare operation.
- */
 static __always_inline void preempt_count_set(int pc)
 {
        *preempt_count_ptr() = pc;
@@ -41,28 +34,17 @@ static __always_inline void preempt_count_set(int pc)
        task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
 } while (0)
 
-/*
- * We fold the NEED_RESCHED bit into the preempt count such that
- * preempt_enable() can decrement and test for needing to reschedule with a
- * single instruction.
- *
- * We invert the actual bit, so that when the decrement hits 0 we know we both
- * need to resched (the bit is cleared) and can resched (no preempt count).
- */
-
 static __always_inline void set_preempt_need_resched(void)
 {
-       *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED;
 }
 
 static __always_inline void clear_preempt_need_resched(void)
 {
-       *preempt_count_ptr() |= PREEMPT_NEED_RESCHED;
 }
 
 static __always_inline bool test_preempt_need_resched(void)
 {
-       return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED);
+       return false;
 }
 
 /*
@@ -81,7 +63,12 @@ static __always_inline void __preempt_count_sub(int val)
 
 static __always_inline bool __preempt_count_dec_and_test(void)
 {
-       return !--*preempt_count_ptr();
+       /*
+        * Because of load-store architectures cannot do per-cpu atomic
+        * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
+        * lost.
+        */
+       return !--*preempt_count_ptr() && tif_need_resched();
 }
 
 /*
@@ -89,7 +76,7 @@ static __always_inline bool __preempt_count_dec_and_test(void)
  */
 static __always_inline bool should_resched(void)
 {
-       return unlikely(!*preempt_count_ptr());
+       return unlikely(!preempt_count() && tif_need_resched());
 }
 
 #ifdef CONFIG_PREEMPT
index 87578c1..49376ae 100644 (file)
        {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
-       {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+       {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
index 669fef5..3e0fbe4 100644 (file)
@@ -3,6 +3,6 @@
 
 #include <uapi/linux/auxvec.h>
 
-#define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */
+#define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */
   /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */
 #endif /* _LINUX_AUXVEC_H */
index 0e23c26..9b50337 100644 (file)
@@ -418,6 +418,7 @@ enum {
        ATA_HORKAGE_DUMP_ID     = (1 << 16),    /* dump IDENTIFY data */
        ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17),  /* Set max sects to 65535 */
        ATA_HORKAGE_ATAPI_DMADIR = (1 << 18),   /* device requires dmadir */
+       ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19),    /* don't use queued TRIM */
 
         /* DMA mask for user DMA control: User visible values; DO NOT
            renumber */
index c8929c3..4bfde0e 100644 (file)
@@ -19,7 +19,7 @@
 
 #define USE_CMPXCHG_LOCKREF \
        (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
-        IS_ENABLED(CONFIG_SMP) && !BLOATED_SPINLOCKS)
+        IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
 
 struct lockref {
        union {
index 69ed5f5..c45c089 100644 (file)
@@ -133,4 +133,34 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
        return ret;
 }
 
+#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
+
+#ifndef mul_u64_u32_shr
+static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
+{
+       return (u64)(((unsigned __int128)a * mul) >> shift);
+}
+#endif /* mul_u64_u32_shr */
+
+#else
+
+#ifndef mul_u64_u32_shr
+static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
+{
+       u32 ah, al;
+       u64 ret;
+
+       al = a;
+       ah = a >> 32;
+
+       ret = ((u64)al * mul) >> shift;
+       if (ah)
+               ret += ((u64)ah * mul) << (32 - shift);
+
+       return ret;
+}
+#endif /* mul_u64_u32_shr */
+
+#endif
+
 #endif /* _LINUX_MATH64_H */
index f5096b5..f015c05 100644 (file)
@@ -55,7 +55,8 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
                                  struct page *newpage, struct page *page);
 extern int migrate_page_move_mapping(struct address_space *mapping,
                struct page *newpage, struct page *page,
-               struct buffer_head *head, enum migrate_mode mode);
+               struct buffer_head *head, enum migrate_mode mode,
+               int extra_count);
 #else
 
 static inline void putback_lru_pages(struct list_head *l) {}
@@ -90,10 +91,19 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
 #endif /* CONFIG_MIGRATION */
 
 #ifdef CONFIG_NUMA_BALANCING
+extern bool pmd_trans_migrating(pmd_t pmd);
+extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
 extern int migrate_misplaced_page(struct page *page,
                                  struct vm_area_struct *vma, int node);
 extern bool migrate_ratelimited(int node);
 #else
+static inline bool pmd_trans_migrating(pmd_t pmd)
+{
+       return false;
+}
+static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
+{
+}
 static inline int migrate_misplaced_page(struct page *page,
                                         struct vm_area_struct *vma, int node)
 {
index 1cedd00..3552717 100644 (file)
@@ -1317,7 +1317,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
 
 #if USE_SPLIT_PTE_PTLOCKS
-#if BLOATED_SPINLOCKS
+#if ALLOC_SPLIT_PTLOCKS
 extern bool ptlock_alloc(struct page *page);
 extern void ptlock_free(struct page *page);
 
@@ -1325,7 +1325,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page)
 {
        return page->ptl;
 }
-#else /* BLOATED_SPINLOCKS */
+#else /* ALLOC_SPLIT_PTLOCKS */
 static inline bool ptlock_alloc(struct page *page)
 {
        return true;
@@ -1339,7 +1339,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page)
 {
        return &page->ptl;
 }
-#endif /* BLOATED_SPINLOCKS */
+#endif /* ALLOC_SPLIT_PTLOCKS */
 
 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
 {
index bd29941..290901a 100644 (file)
@@ -26,6 +26,7 @@ struct address_space;
 #define USE_SPLIT_PTE_PTLOCKS  (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
 #define USE_SPLIT_PMD_PTLOCKS  (USE_SPLIT_PTE_PTLOCKS && \
                IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
+#define ALLOC_SPLIT_PTLOCKS    (SPINLOCK_SIZE > BITS_PER_LONG/8)
 
 /*
  * Each physical page in the system has a struct page associated with
@@ -155,7 +156,7 @@ struct page {
                                                 * system if PG_buddy is set.
                                                 */
 #if USE_SPLIT_PTE_PTLOCKS
-#if BLOATED_SPINLOCKS
+#if ALLOC_SPLIT_PTLOCKS
                spinlock_t *ptl;
 #else
                spinlock_t ptl;
@@ -442,6 +443,14 @@ struct mm_struct {
 
        /* numa_scan_seq prevents two threads setting pte_numa */
        int numa_scan_seq;
+#endif
+#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
+       /*
+        * An operation with batched TLB flushing is going on. Anything that
+        * can move process memory needs to flush the TLB when moving a
+        * PROT_NONE or PROT_NUMA mapped page.
+        */
+       bool tlb_flush_pending;
 #endif
        struct uprobes_state uprobes_state;
 };
@@ -459,4 +468,45 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
        return mm->cpu_vm_mask_var;
 }
 
+#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
+/*
+ * Memory barriers to keep this state in sync are graciously provided by
+ * the page table locks, outside of which no page table modifications happen.
+ * The barriers below prevent the compiler from re-ordering the instructions
+ * around the memory barriers that are already present in the code.
+ */
+static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+{
+       barrier();
+       return mm->tlb_flush_pending;
+}
+static inline void set_tlb_flush_pending(struct mm_struct *mm)
+{
+       mm->tlb_flush_pending = true;
+
+       /*
+        * Guarantee that the tlb_flush_pending store does not leak into the
+        * critical section updating the page tables
+        */
+       smp_mb__before_spinlock();
+}
+/* Clearing is done after a TLB flush, which also provides a barrier. */
+static inline void clear_tlb_flush_pending(struct mm_struct *mm)
+{
+       barrier();
+       mm->tlb_flush_pending = false;
+}
+#else
+static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+{
+       return false;
+}
+static inline void set_tlb_flush_pending(struct mm_struct *mm)
+{
+}
+static inline void clear_tlb_flush_pending(struct mm_struct *mm)
+{
+}
+#endif
+
 #endif /* _LINUX_MM_TYPES_H */
index 57e890a..a5fc7d0 100644 (file)
@@ -69,6 +69,7 @@
        __PCPU_DUMMY_ATTRS char __pcpu_scope_##name;                    \
        extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name;            \
        __PCPU_DUMMY_ATTRS char __pcpu_unique_##name;                   \
+       extern __PCPU_ATTRS(sec) __typeof__(type) name;                 \
        __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak                 \
        __typeof__(type) name
 #else
index abd437d..ece0c6b 100644 (file)
@@ -51,6 +51,7 @@ struct pstore_info {
        char            *buf;
        size_t          bufsize;
        struct mutex    read_mutex;     /* serialize open/read/close */
+       int             flags;
        int             (*open)(struct pstore_info *psi);
        int             (*close)(struct pstore_info *psi);
        ssize_t         (*read)(u64 *id, enum pstore_type_id *type,
@@ -70,6 +71,8 @@ struct pstore_info {
        void            *data;
 };
 
+#define        PSTORE_FLAGS_FRAGILE    1
+
 #ifdef CONFIG_PSTORE
 extern int pstore_register(struct pstore_info *);
 extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
index 8e00f9f..9e7db9e 100644 (file)
@@ -43,6 +43,7 @@ extern int unregister_reboot_notifier(struct notifier_block *);
  * Architecture-specific implementations of sys_reboot commands.
  */
 
+extern void migrate_to_reboot_cpu(void);
 extern void machine_restart(char *cmd);
 extern void machine_halt(void);
 extern void machine_power_off(void);
index 939428a..8e3e66a 100644 (file)
@@ -24,6 +24,11 @@ extern int rtnl_trylock(void);
 extern int rtnl_is_locked(void);
 #ifdef CONFIG_PROVE_LOCKING
 extern int lockdep_rtnl_is_held(void);
+#else
+static inline int lockdep_rtnl_is_held(void)
+{
+       return 1;
+}
 #endif /* #ifdef CONFIG_PROVE_LOCKING */
 
 /**
index 768b037..53f97eb 100644 (file)
@@ -440,8 +440,6 @@ struct task_cputime {
                .sum_exec_runtime = 0,                          \
        }
 
-#define PREEMPT_ENABLED                (PREEMPT_NEED_RESCHED)
-
 #ifdef CONFIG_PREEMPT_COUNT
 #define PREEMPT_DISABLED       (1 + PREEMPT_ENABLED)
 #else
@@ -932,7 +930,8 @@ struct pipe_inode_info;
 struct uts_namespace;
 
 struct load_weight {
-       unsigned long weight, inv_weight;
+       unsigned long weight;
+       u32 inv_weight;
 };
 
 struct sched_avg {
index 215b5ea..6f69b3f 100644 (file)
@@ -1638,6 +1638,11 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
        skb->mac_header += offset;
 }
 
+static inline void skb_pop_mac_header(struct sk_buff *skb)
+{
+       skb->mac_header = skb->network_header;
+}
+
 static inline void skb_probe_transport_header(struct sk_buff *skb,
                                              const int offset_hint)
 {
@@ -2526,6 +2531,10 @@ static inline void sw_tx_timestamp(struct sk_buff *skb)
  * Ethernet MAC Drivers should call this function in their hard_xmit()
  * function immediately before giving the sk_buff to the MAC hardware.
  *
+ * Specifically, one should make absolutely sure that this function is
+ * called before TX completion of this packet can trigger.  Otherwise
+ * the packet could potentially already be freed.
+ *
  * @skb: A socket buffer.
  */
 static inline void skb_tx_timestamp(struct sk_buff *skb)
index 979874c..61e1935 100644 (file)
@@ -978,7 +978,7 @@ struct ib_uobject {
 };
 
 struct ib_udata {
-       void __user *inbuf;
+       const void __user *inbuf;
        void __user *outbuf;
        size_t       inlen;
        size_t       outlen;
index 45412a6..321301c 100644 (file)
@@ -517,10 +517,6 @@ struct se_node_acl {
        u32                     acl_index;
 #define MAX_ACL_TAG_SIZE 64
        char                    acl_tag[MAX_ACL_TAG_SIZE];
-       u64                     num_cmds;
-       u64                     read_bytes;
-       u64                     write_bytes;
-       spinlock_t              stats_lock;
        /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
        atomic_t                acl_pr_ref_count;
        struct se_dev_entry     **device_list;
@@ -624,6 +620,7 @@ struct se_dev_attrib {
        u32             unmap_granularity;
        u32             unmap_granularity_alignment;
        u32             max_write_same_len;
+       u32             max_bytes_per_io;
        struct se_device *da_dev;
        struct config_group da_group;
 };
index 2f3f7ea..fe421e8 100644 (file)
@@ -983,6 +983,8 @@ struct drm_radeon_cs {
 #define RADEON_INFO_SI_CP_DMA_COMPUTE  0x17
 /* CIK macrotile mode array */
 #define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY   0x18
+/* query the number of render backends */
+#define RADEON_INFO_SI_BACKEND_ENABLED_MASK    0x19
 
 
 struct drm_radeon_info {
index bcb0912..f854ca4 100644 (file)
@@ -75,6 +75,7 @@
 #define DRM_VMW_PARAM_FIFO_CAPS        4
 #define DRM_VMW_PARAM_MAX_FB_SIZE      5
 #define DRM_VMW_PARAM_FIFO_HW_VERSION  6
+#define DRM_VMW_PARAM_MAX_SURF_MEMORY  7
 
 /**
  * struct drm_vmw_getparam_arg
index ecc8859..bd24470 100644 (file)
@@ -464,7 +464,8 @@ struct input_keymap_entry {
 #define KEY_BRIGHTNESS_ZERO    244     /* brightness off, use ambient */
 #define KEY_DISPLAY_OFF                245     /* display device to off state */
 
-#define KEY_WIMAX              246
+#define KEY_WWAN               246     /* Wireless WAN (LTE, UMTS, GSM, etc.) */
+#define KEY_WIMAX              KEY_WWAN
 #define KEY_RFKILL             247     /* Key that controls all radios */
 
 #define KEY_MICMUTE            248     /* Mute / unmute the microphone */
index e1802d6..959d454 100644 (file)
@@ -679,6 +679,7 @@ enum perf_event_type {
         *
         *      { u64                   weight;   } && PERF_SAMPLE_WEIGHT
         *      { u64                   data_src; } && PERF_SAMPLE_DATA_SRC
+        *      { u64                   transaction; } && PERF_SAMPLE_TRANSACTION
         * };
         */
        PERF_RECORD_SAMPLE                      = 9,
index 65e1209..ae665ac 100644 (file)
@@ -146,7 +146,7 @@ struct blkif_request_segment_aligned {
 struct blkif_request_rw {
        uint8_t        nr_segments;  /* number of segments                   */
        blkif_vdev_t   handle;       /* only for read/write requests         */
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
        uint32_t       _pad1;        /* offsetof(blkif_request,u.rw.id) == 8 */
 #endif
        uint64_t       id;           /* private guest value, echoed in resp  */
@@ -163,7 +163,7 @@ struct blkif_request_discard {
        uint8_t        flag;         /* BLKIF_DISCARD_SECURE or zero.        */
 #define BLKIF_DISCARD_SECURE (1<<0)  /* ignored if discard-secure=0          */
        blkif_vdev_t   _pad1;        /* only for read/write requests         */
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
        uint32_t       _pad2;        /* offsetof(blkif_req..,u.discard.id)==8*/
 #endif
        uint64_t       id;           /* private guest value, echoed in resp  */
@@ -175,7 +175,7 @@ struct blkif_request_discard {
 struct blkif_request_other {
        uint8_t      _pad1;
        blkif_vdev_t _pad2;        /* only for read/write requests         */
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
        uint32_t     _pad3;        /* offsetof(blkif_req..,u.other.id)==8*/
 #endif
        uint64_t     id;           /* private guest value, echoed in resp  */
@@ -184,7 +184,7 @@ struct blkif_request_other {
 struct blkif_request_indirect {
        uint8_t        indirect_op;
        uint16_t       nr_segments;
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
        uint32_t       _pad1;        /* offsetof(blkif_...,u.indirect.id) == 8 */
 #endif
        uint64_t       id;
@@ -192,7 +192,7 @@ struct blkif_request_indirect {
        blkif_vdev_t   handle;
        uint16_t       _pad2;
        grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
        uint32_t      _pad3;         /* make it 64 byte aligned */
 #else
        uint64_t      _pad3;         /* make it 64 byte aligned */
index 79383d3..4e5d96a 100644 (file)
@@ -809,6 +809,12 @@ config GENERIC_SCHED_CLOCK
 config ARCH_SUPPORTS_NUMA_BALANCING
        bool
 
+#
+# For architectures that know their GCC __int128 support is sound
+#
+config ARCH_SUPPORTS_INT128
+       bool
+
 # For architectures that (ab)use NUMA to represent different memory regions
 # all cpu-local but of different latencies, such as SuperH.
 #
index bbaf7d5..bc010ee 100644 (file)
@@ -137,9 +137,10 @@ $(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE
 ###############################################################################
 ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y)
 X509_CERTIFICATES-y := $(wildcard *.x509) $(wildcard $(srctree)/*.x509)
-X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += signing_key.x509
-X509_CERTIFICATES := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \
+X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += $(objtree)/signing_key.x509
+X509_CERTIFICATES-raw := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \
                                $(or $(realpath $(CERT)),$(CERT))))
+X509_CERTIFICATES := $(subst $(realpath $(objtree))/,,$(X509_CERTIFICATES-raw))
 
 ifeq ($(X509_CERTIFICATES),)
 $(warning *** No X.509 certificates found ***)
@@ -164,9 +165,9 @@ $(obj)/x509_certificate_list: $(X509_CERTIFICATES) $(obj)/.x509.list
 targets += $(obj)/.x509.list
 $(obj)/.x509.list:
        @echo $(X509_CERTIFICATES) >$@
+endif
 
 clean-files := x509_certificate_list .x509.list
-endif
 
 ifeq ($(CONFIG_MODULE_SIG),y)
 ###############################################################################
index 5253204..9fd4246 100644 (file)
@@ -22,6 +22,6 @@ void foo(void)
 #ifdef CONFIG_SMP
        DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS));
 #endif
-       DEFINE(BLOATED_SPINLOCKS, sizeof(spinlock_t) > sizeof(int));
+       DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
        /* End of constants */
 }
index 8b729c2..bc1dcab 100644 (file)
@@ -890,6 +890,16 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
                struct cgroup *cgrp = dentry->d_fsdata;
 
                BUG_ON(!(cgroup_is_dead(cgrp)));
+
+               /*
+                * XXX: cgrp->id is only used to look up css's.  As cgroup
+                * and css's lifetimes will be decoupled, it should be made
+                * per-subsystem and moved to css->id so that lookups are
+                * successful until the target css is released.
+                */
+               idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
+               cgrp->id = -1;
+
                call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
        } else {
                struct cfent *cfe = __d_cfe(dentry);
@@ -4268,6 +4278,7 @@ static void css_release(struct percpu_ref *ref)
        struct cgroup_subsys_state *css =
                container_of(ref, struct cgroup_subsys_state, refcnt);
 
+       rcu_assign_pointer(css->cgroup->subsys[css->ss->subsys_id], NULL);
        call_rcu(&css->rcu_head, css_free_rcu_fn);
 }
 
@@ -4426,14 +4437,6 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
        list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children);
        root->number_of_cgroups++;
 
-       /* each css holds a ref to the cgroup's dentry and the parent css */
-       for_each_root_subsys(root, ss) {
-               struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
-
-               dget(dentry);
-               css_get(css->parent);
-       }
-
        /* hold a ref to the parent's dentry */
        dget(parent->dentry);
 
@@ -4445,6 +4448,13 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
                if (err)
                        goto err_destroy;
 
+               /* each css holds a ref to the cgroup's dentry and parent css */
+               dget(dentry);
+               css_get(css->parent);
+
+               /* mark it consumed for error path */
+               css_ar[ss->subsys_id] = NULL;
+
                if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
                    parent->parent) {
                        pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
@@ -4491,6 +4501,14 @@ err_free_cgrp:
        return err;
 
 err_destroy:
+       for_each_root_subsys(root, ss) {
+               struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
+
+               if (css) {
+                       percpu_ref_cancel_init(&css->refcnt);
+                       ss->css_free(css);
+               }
+       }
        cgroup_destroy_locked(cgrp);
        mutex_unlock(&cgroup_mutex);
        mutex_unlock(&dentry->d_inode->i_mutex);
@@ -4652,8 +4670,12 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
         * will be invoked to perform the rest of destruction once the
         * percpu refs of all css's are confirmed to be killed.
         */
-       for_each_root_subsys(cgrp->root, ss)
-               kill_css(cgroup_css(cgrp, ss));
+       for_each_root_subsys(cgrp->root, ss) {
+               struct cgroup_subsys_state *css = cgroup_css(cgrp, ss);
+
+               if (css)
+                       kill_css(css);
+       }
 
        /*
         * Mark @cgrp dead.  This prevents further task migration and child
@@ -4722,14 +4744,6 @@ static void cgroup_destroy_css_killed(struct cgroup *cgrp)
        /* delete this cgroup from parent->children */
        list_del_rcu(&cgrp->sibling);
 
-       /*
-        * We should remove the cgroup object from idr before its grace
-        * period starts, so we won't be looking up a cgroup while the
-        * cgroup is being freed.
-        */
-       idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
-       cgrp->id = -1;
-
        dput(d);
 
        set_bit(CGRP_RELEASABLE, &parent->flags);
index 72348dc..f574401 100644 (file)
@@ -1396,6 +1396,8 @@ event_sched_out(struct perf_event *event,
        if (event->state != PERF_EVENT_STATE_ACTIVE)
                return;
 
+       perf_pmu_disable(event->pmu);
+
        event->state = PERF_EVENT_STATE_INACTIVE;
        if (event->pending_disable) {
                event->pending_disable = 0;
@@ -1412,6 +1414,8 @@ event_sched_out(struct perf_event *event,
                ctx->nr_freq--;
        if (event->attr.exclusive || !cpuctx->active_oncpu)
                cpuctx->exclusive = 0;
+
+       perf_pmu_enable(event->pmu);
 }
 
 static void
@@ -1652,6 +1656,7 @@ event_sched_in(struct perf_event *event,
                 struct perf_event_context *ctx)
 {
        u64 tstamp = perf_event_time(event);
+       int ret = 0;
 
        if (event->state <= PERF_EVENT_STATE_OFF)
                return 0;
@@ -1674,10 +1679,13 @@ event_sched_in(struct perf_event *event,
         */
        smp_wmb();
 
+       perf_pmu_disable(event->pmu);
+
        if (event->pmu->add(event, PERF_EF_START)) {
                event->state = PERF_EVENT_STATE_INACTIVE;
                event->oncpu = -1;
-               return -EAGAIN;
+               ret = -EAGAIN;
+               goto out;
        }
 
        event->tstamp_running += tstamp - event->tstamp_stopped;
@@ -1693,7 +1701,10 @@ event_sched_in(struct perf_event *event,
        if (event->attr.exclusive)
                cpuctx->exclusive = 1;
 
-       return 0;
+out:
+       perf_pmu_enable(event->pmu);
+
+       return ret;
 }
 
 static int
@@ -2743,6 +2754,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
                if (!event_filter_match(event))
                        continue;
 
+               perf_pmu_disable(event->pmu);
+
                hwc = &event->hw;
 
                if (hwc->interrupts == MAX_INTERRUPTS) {
@@ -2752,7 +2765,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
                }
 
                if (!event->attr.freq || !event->attr.sample_freq)
-                       continue;
+                       goto next;
 
                /*
                 * stop the event and update event->count
@@ -2774,6 +2787,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
                        perf_adjust_period(event, period, delta, false);
 
                event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
+       next:
+               perf_pmu_enable(event->pmu);
        }
 
        perf_pmu_enable(ctx->pmu);
index 728d5be..5721f0e 100644 (file)
@@ -537,6 +537,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
        spin_lock_init(&mm->page_table_lock);
        mm_init_aio(mm);
        mm_init_owner(mm, p);
+       clear_tlb_flush_pending(mm);
 
        if (likely(!mm_alloc_pgd(mm))) {
                mm->def_flags = 0;
index b462fa1..aa6a8aa 100644 (file)
@@ -19,6 +19,12 @@ EXPORT_SYMBOL(system_freezing_cnt);
 bool pm_freezing;
 bool pm_nosig_freezing;
 
+/*
+ * Temporary export for the deadlock workaround in ata_scsi_hotplug().
+ * Remove once the hack becomes unnecessary.
+ */
+EXPORT_SYMBOL_GPL(pm_freezing);
+
 /* protects freezing and frozen transitions */
 static DEFINE_SPINLOCK(freezer_lock);
 
index d0d8fca..9c97016 100644 (file)
@@ -1680,6 +1680,7 @@ int kernel_kexec(void)
        {
                kexec_in_progress = true;
                kernel_restart_prepare(NULL);
+               migrate_to_reboot_cpu();
                printk(KERN_EMERG "Starting new kernel\n");
                machine_shutdown();
        }
index 463aa67..eacb8bd 100644 (file)
@@ -81,6 +81,7 @@ void pm_vt_switch_unregister(struct device *dev)
        list_for_each_entry(tmp, &pm_vt_switch_list, head) {
                if (tmp->dev == dev) {
                        list_del(&tmp->head);
+                       kfree(tmp);
                        break;
                }
        }
index f813b34..662c83f 100644 (file)
@@ -104,7 +104,7 @@ int unregister_reboot_notifier(struct notifier_block *nb)
 }
 EXPORT_SYMBOL(unregister_reboot_notifier);
 
-static void migrate_to_reboot_cpu(void)
+void migrate_to_reboot_cpu(void)
 {
        /* The boot cpu is always logical cpu 0 */
        int cpu = reboot_cpu;
index e85cda2..a88f4a4 100644 (file)
@@ -4902,6 +4902,7 @@ DEFINE_PER_CPU(struct sched_domain *, sd_asym);
 static void update_top_cache_domain(int cpu)
 {
        struct sched_domain *sd;
+       struct sched_domain *busy_sd = NULL;
        int id = cpu;
        int size = 1;
 
@@ -4909,9 +4910,9 @@ static void update_top_cache_domain(int cpu)
        if (sd) {
                id = cpumask_first(sched_domain_span(sd));
                size = cpumask_weight(sched_domain_span(sd));
-               sd = sd->parent; /* sd_busy */
+               busy_sd = sd->parent; /* sd_busy */
        }
-       rcu_assign_pointer(per_cpu(sd_busy, cpu), sd);
+       rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
 
        rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
        per_cpu(sd_llc_size, cpu) = size;
@@ -5112,6 +5113,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
                 * die on a /0 trap.
                 */
                sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
+               sg->sgp->power_orig = sg->sgp->power;
 
                /*
                 * Make sure the first group of this domain contains the
index fd773ad..c7395d9 100644 (file)
@@ -178,59 +178,61 @@ void sched_init_granularity(void)
        update_sysctl();
 }
 
-#if BITS_PER_LONG == 32
-# define WMULT_CONST   (~0UL)
-#else
-# define WMULT_CONST   (1UL << 32)
-#endif
-
+#define WMULT_CONST    (~0U)
 #define WMULT_SHIFT    32
 
-/*
- * Shift right and round:
- */
-#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
+static void __update_inv_weight(struct load_weight *lw)
+{
+       unsigned long w;
+
+       if (likely(lw->inv_weight))
+               return;
+
+       w = scale_load_down(lw->weight);
+
+       if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
+               lw->inv_weight = 1;
+       else if (unlikely(!w))
+               lw->inv_weight = WMULT_CONST;
+       else
+               lw->inv_weight = WMULT_CONST / w;
+}
 
 /*
- * delta *= weight / lw
+ * delta_exec * weight / lw.weight
+ *   OR
+ * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
+ *
+ * Either weight := NICE_0_LOAD and lw \e prio_to_wmult[], in which case
+ * we're guaranteed shift stays positive because inv_weight is guaranteed to
+ * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
+ *
+ * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
+ * weight/lw.weight <= 1, and therefore our shift will also be positive.
  */
-static unsigned long
-calc_delta_mine(unsigned long delta_exec, unsigned long weight,
-               struct load_weight *lw)
+static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
 {
-       u64 tmp;
-
-       /*
-        * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
-        * entities since MIN_SHARES = 2. Treat weight as 1 if less than
-        * 2^SCHED_LOAD_RESOLUTION.
-        */
-       if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
-               tmp = (u64)delta_exec * scale_load_down(weight);
-       else
-               tmp = (u64)delta_exec;
+       u64 fact = scale_load_down(weight);
+       int shift = WMULT_SHIFT;
 
-       if (!lw->inv_weight) {
-               unsigned long w = scale_load_down(lw->weight);
+       __update_inv_weight(lw);
 
-               if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
-                       lw->inv_weight = 1;
-               else if (unlikely(!w))
-                       lw->inv_weight = WMULT_CONST;
-               else
-                       lw->inv_weight = WMULT_CONST / w;
+       if (unlikely(fact >> 32)) {
+               while (fact >> 32) {
+                       fact >>= 1;
+                       shift--;
+               }
        }
 
-       /*
-        * Check whether we'd overflow the 64-bit multiplication:
-        */
-       if (unlikely(tmp > WMULT_CONST))
-               tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
-                       WMULT_SHIFT/2);
-       else
-               tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
+       /* hint to use a 32x32->64 mul */
+       fact = (u64)(u32)fact * lw->inv_weight;
+
+       while (fact >> 32) {
+               fact >>= 1;
+               shift--;
+       }
 
-       return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
+       return mul_u64_u32_shr(delta_exec, fact, shift);
 }
 
 
@@ -443,7 +445,7 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
 static __always_inline
-void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
+void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
 
 /**************************************************************
  * Scheduling class tree data structure manipulation methods:
@@ -612,11 +614,10 @@ int sched_proc_update_handler(struct ctl_table *table, int write,
 /*
  * delta /= w
  */
-static inline unsigned long
-calc_delta_fair(unsigned long delta, struct sched_entity *se)
+static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
 {
        if (unlikely(se->load.weight != NICE_0_LOAD))
-               delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
+               delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
 
        return delta;
 }
@@ -665,7 +666,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
                        update_load_add(&lw, se->load.weight);
                        load = &lw;
                }
-               slice = calc_delta_mine(slice, se->load.weight, load);
+               slice = __calc_delta(slice, se->load.weight, load);
        }
        return slice;
 }
@@ -703,47 +704,32 @@ void init_task_runnable_average(struct task_struct *p)
 #endif
 
 /*
- * Update the current task's runtime statistics. Skip current tasks that
- * are not in our scheduling class.
+ * Update the current task's runtime statistics.
  */
-static inline void
-__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
-             unsigned long delta_exec)
-{
-       unsigned long delta_exec_weighted;
-
-       schedstat_set(curr->statistics.exec_max,
-                     max((u64)delta_exec, curr->statistics.exec_max));
-
-       curr->sum_exec_runtime += delta_exec;
-       schedstat_add(cfs_rq, exec_clock, delta_exec);
-       delta_exec_weighted = calc_delta_fair(delta_exec, curr);
-
-       curr->vruntime += delta_exec_weighted;
-       update_min_vruntime(cfs_rq);
-}
-
 static void update_curr(struct cfs_rq *cfs_rq)
 {
        struct sched_entity *curr = cfs_rq->curr;
        u64 now = rq_clock_task(rq_of(cfs_rq));
-       unsigned long delta_exec;
+       u64 delta_exec;
 
        if (unlikely(!curr))
                return;
 
-       /*
-        * Get the amount of time the current task was running
-        * since the last time we changed load (this cannot
-        * overflow on 32 bits):
-        */
-       delta_exec = (unsigned long)(now - curr->exec_start);
-       if (!delta_exec)
+       delta_exec = now - curr->exec_start;
+       if (unlikely((s64)delta_exec <= 0))
                return;
 
-       __update_curr(cfs_rq, curr, delta_exec);
        curr->exec_start = now;
 
+       schedstat_set(curr->statistics.exec_max,
+                     max(delta_exec, curr->statistics.exec_max));
+
+       curr->sum_exec_runtime += delta_exec;
+       schedstat_add(cfs_rq, exec_clock, delta_exec);
+
+       curr->vruntime += calc_delta_fair(delta_exec, curr);
+       update_min_vruntime(cfs_rq);
+
        if (entity_is_task(curr)) {
                struct task_struct *curtask = task_of(curr);
 
@@ -1752,6 +1738,13 @@ void task_numa_work(struct callback_head *work)
                    (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
                        continue;
 
+               /*
+                * Skip inaccessible VMAs to avoid any confusion between
+                * PROT_NONE and NUMA hinting ptes
+                */
+               if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
+                       continue;
+
                do {
                        start = max(start, vma->vm_start);
                        end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
@@ -3015,8 +3008,7 @@ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
        }
 }
 
-static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
-                                    unsigned long delta_exec)
+static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
 {
        /* dock delta_exec before expiring quota (as it could span periods) */
        cfs_rq->runtime_remaining -= delta_exec;
@@ -3034,7 +3026,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
 }
 
 static __always_inline
-void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
+void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
 {
        if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
                return;
@@ -3574,8 +3566,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
        return rq_clock_task(rq_of(cfs_rq));
 }
 
-static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
-                                    unsigned long delta_exec) {}
+static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
index 7d57275..1c40655 100644 (file)
@@ -901,6 +901,13 @@ inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
 {
        struct rq *rq = rq_of_rt_rq(rt_rq);
 
+#ifdef CONFIG_RT_GROUP_SCHED
+       /*
+        * Change rq's cpupri only if rt_rq is the top queue.
+        */
+       if (&rq->rt != rt_rq)
+               return;
+#endif
        if (rq->online && prio < prev_prio)
                cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
 }
@@ -910,6 +917,13 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
 {
        struct rq *rq = rq_of_rt_rq(rt_rq);
 
+#ifdef CONFIG_RT_GROUP_SCHED
+       /*
+        * Change rq's cpupri only if rt_rq is the top queue.
+        */
+       if (&rq->rt != rt_rq)
+               return;
+#endif
        if (rq->online && rt_rq->highest_prio.curr != prev_prio)
                cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
 }
index 0e9f9ea..72a0f81 100644 (file)
@@ -775,7 +775,7 @@ static int ftrace_profile_init(void)
        int cpu;
        int ret = 0;
 
-       for_each_online_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                ret = ftrace_profile_init_cpu(cpu);
                if (ret)
                        break;
index a3a0dbf..c006131 100644 (file)
@@ -51,9 +51,9 @@ struct user_namespace init_user_ns = {
        .owner = GLOBAL_ROOT_UID,
        .group = GLOBAL_ROOT_GID,
        .proc_inum = PROC_USER_INIT_INO,
-#ifdef CONFIG_KEYS_KERBEROS_CACHE
-       .krb_cache_register_sem =
-       __RWSEM_INITIALIZER(init_user_ns.krb_cache_register_sem),
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+       .persistent_keyring_register_sem =
+       __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem),
 #endif
 };
 EXPORT_SYMBOL_GPL(init_user_ns);
index eb69f35..723bbe0 100644 (file)
@@ -543,7 +543,7 @@ config ZSWAP
 
 config MEM_SOFT_DIRTY
        bool "Track memory changes"
-       depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY
+       depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS
        select PROC_PAGE_MONITOR
        help
          This option enables memory changes tracking by introducing a
index 805165b..f58bcd0 100644 (file)
@@ -134,6 +134,10 @@ static void update_pageblock_skip(struct compact_control *cc,
                        bool migrate_scanner)
 {
        struct zone *zone = cc->zone;
+
+       if (cc->ignore_skip_hint)
+               return;
+
        if (!page)
                return;
 
index 33a5dc4..7de1bf8 100644 (file)
@@ -882,6 +882,10 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                ret = 0;
                goto out_unlock;
        }
+
+       /* mmap_sem prevents this happening but warn if that changes */
+       WARN_ON(pmd_trans_migrating(pmd));
+
        if (unlikely(pmd_trans_splitting(pmd))) {
                /* split huge page running from under us */
                spin_unlock(src_ptl);
@@ -1243,6 +1247,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
        if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
                return ERR_PTR(-EFAULT);
 
+       /* Full NUMA hinting faults to serialise migration in fault paths */
+       if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
+               goto out;
+
        page = pmd_page(*pmd);
        VM_BUG_ON(!PageHead(page));
        if (flags & FOLL_TOUCH) {
@@ -1295,6 +1303,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        if (unlikely(!pmd_same(pmd, *pmdp)))
                goto out_unlock;
 
+       /*
+        * If there are potential migrations, wait for completion and retry
+        * without disrupting NUMA hinting information. Do not relock and
+        * check_same as the page may no longer be mapped.
+        */
+       if (unlikely(pmd_trans_migrating(*pmdp))) {
+               spin_unlock(ptl);
+               wait_migrate_huge_page(vma->anon_vma, pmdp);
+               goto out;
+       }
+
        page = pmd_page(pmd);
        BUG_ON(is_huge_zero_page(page));
        page_nid = page_to_nid(page);
@@ -1323,23 +1342,22 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
                /* If the page was locked, there are no parallel migrations */
                if (page_locked)
                        goto clear_pmdnuma;
+       }
 
-               /*
-                * Otherwise wait for potential migrations and retry. We do
-                * relock and check_same as the page may no longer be mapped.
-                * As the fault is being retried, do not account for it.
-                */
+       /* Migration could have started since the pmd_trans_migrating check */
+       if (!page_locked) {
                spin_unlock(ptl);
                wait_on_page_locked(page);
                page_nid = -1;
                goto out;
        }
 
-       /* Page is misplaced, serialise migrations and parallel THP splits */
+       /*
+        * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
+        * to serialises splits
+        */
        get_page(page);
        spin_unlock(ptl);
-       if (!page_locked)
-               lock_page(page);
        anon_vma = page_lock_anon_vma_read(page);
 
        /* Confirm the PMD did not change while page_table_lock was released */
@@ -1351,6 +1369,13 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto out_unlock;
        }
 
+       /* Bail if we fail to protect against THP splits for any reason */
+       if (unlikely(!anon_vma)) {
+               put_page(page);
+               page_nid = -1;
+               goto clear_pmdnuma;
+       }
+
        /*
         * Migrate the THP to the requested node, returns with page unlocked
         * and pmd_numa cleared.
@@ -1517,6 +1542,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                ret = 1;
                if (!prot_numa) {
                        entry = pmdp_get_and_clear(mm, addr, pmd);
+                       if (pmd_numa(entry))
+                               entry = pmd_mknonnuma(entry);
                        entry = pmd_modify(entry, newprot);
                        ret = HPAGE_PMD_NR;
                        BUG_ON(pmd_write(entry));
@@ -1531,7 +1558,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                         */
                        if (!is_huge_zero_page(page) &&
                            !pmd_numa(*pmd)) {
-                               entry = pmdp_get_and_clear(mm, addr, pmd);
+                               entry = *pmd;
                                entry = pmd_mknuma(entry);
                                ret = HPAGE_PMD_NR;
                        }
index b7c1716..db08af9 100644 (file)
@@ -1505,10 +1505,16 @@ static int soft_offline_huge_page(struct page *page, int flags)
                if (ret > 0)
                        ret = -EIO;
        } else {
-               set_page_hwpoison_huge_page(hpage);
-               dequeue_hwpoisoned_huge_page(hpage);
-               atomic_long_add(1 << compound_order(hpage),
-                               &num_poisoned_pages);
+               /* overcommit hugetlb page will be freed to buddy */
+               if (PageHuge(page)) {
+                       set_page_hwpoison_huge_page(hpage);
+                       dequeue_hwpoisoned_huge_page(hpage);
+                       atomic_long_add(1 << compound_order(hpage),
+                                       &num_poisoned_pages);
+               } else {
+                       SetPageHWPoison(page);
+                       atomic_long_inc(&num_poisoned_pages);
+               }
        }
        return ret;
 }
index 5d9025f..6768ce9 100644 (file)
@@ -4271,7 +4271,7 @@ void copy_user_huge_page(struct page *dst, struct page *src,
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
 
-#if USE_SPLIT_PTE_PTLOCKS && BLOATED_SPINLOCKS
+#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
 bool ptlock_alloc(struct page *page)
 {
        spinlock_t *ptl;
index eca4a31..0cd2c4d 100644 (file)
@@ -1197,14 +1197,16 @@ static struct page *new_vma_page(struct page *page, unsigned long private, int *
                        break;
                vma = vma->vm_next;
        }
+
+       if (PageHuge(page)) {
+               if (vma)
+                       return alloc_huge_page_noerr(vma, address, 1);
+               else
+                       return NULL;
+       }
        /*
-        * queue_pages_range() confirms that @page belongs to some vma,
-        * so vma shouldn't be NULL.
+        * if !vma, alloc_page_vma() will use task or system default policy
         */
-       BUG_ON(!vma);
-
-       if (PageHuge(page))
-               return alloc_huge_page_noerr(vma, address, 1);
        return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
 }
 #else
@@ -1318,7 +1320,7 @@ static long do_mbind(unsigned long start, unsigned long len,
                if (nr_failed && (flags & MPOL_MF_STRICT))
                        err = -EIO;
        } else
-               putback_lru_pages(&pagelist);
+               putback_movable_pages(&pagelist);
 
        up_write(&mm->mmap_sem);
  mpol_out:
index bb94004..9194375 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/hugetlb_cgroup.h>
 #include <linux/gfp.h>
 #include <linux/balloon_compaction.h>
+#include <linux/mmu_notifier.h>
 
 #include <asm/tlbflush.h>
 
@@ -316,14 +317,15 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
  */
 int migrate_page_move_mapping(struct address_space *mapping,
                struct page *newpage, struct page *page,
-               struct buffer_head *head, enum migrate_mode mode)
+               struct buffer_head *head, enum migrate_mode mode,
+               int extra_count)
 {
-       int expected_count = 0;
+       int expected_count = 1 + extra_count;
        void **pslot;
 
        if (!mapping) {
                /* Anonymous page without mapping */
-               if (page_count(page) != 1)
+               if (page_count(page) != expected_count)
                        return -EAGAIN;
                return MIGRATEPAGE_SUCCESS;
        }
@@ -333,7 +335,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
        pslot = radix_tree_lookup_slot(&mapping->page_tree,
                                        page_index(page));
 
-       expected_count = 2 + page_has_private(page);
+       expected_count += 1 + page_has_private(page);
        if (page_count(page) != expected_count ||
                radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
                spin_unlock_irq(&mapping->tree_lock);
@@ -583,7 +585,7 @@ int migrate_page(struct address_space *mapping,
 
        BUG_ON(PageWriteback(page));    /* Writeback must be complete */
 
-       rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode);
+       rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
 
        if (rc != MIGRATEPAGE_SUCCESS)
                return rc;
@@ -610,7 +612,7 @@ int buffer_migrate_page(struct address_space *mapping,
 
        head = page_buffers(page);
 
-       rc = migrate_page_move_mapping(mapping, newpage, page, head, mode);
+       rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
 
        if (rc != MIGRATEPAGE_SUCCESS)
                return rc;
@@ -1654,6 +1656,18 @@ int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
        return 1;
 }
 
+bool pmd_trans_migrating(pmd_t pmd)
+{
+       struct page *page = pmd_page(pmd);
+       return PageLocked(page);
+}
+
+void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
+{
+       struct page *page = pmd_page(*pmd);
+       wait_on_page_locked(page);
+}
+
 /*
  * Attempt to migrate a misplaced page to the specified destination
  * node. Caller is expected to have an elevated reference count on
@@ -1716,12 +1730,14 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
                                struct page *page, int node)
 {
        spinlock_t *ptl;
-       unsigned long haddr = address & HPAGE_PMD_MASK;
        pg_data_t *pgdat = NODE_DATA(node);
        int isolated = 0;
        struct page *new_page = NULL;
        struct mem_cgroup *memcg = NULL;
        int page_lru = page_is_file_cache(page);
+       unsigned long mmun_start = address & HPAGE_PMD_MASK;
+       unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
+       pmd_t orig_entry;
 
        /*
         * Rate-limit the amount of data that is being migrated to a node.
@@ -1744,6 +1760,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
                goto out_fail;
        }
 
+       if (mm_tlb_flush_pending(mm))
+               flush_tlb_range(vma, mmun_start, mmun_end);
+
        /* Prepare a page as a migration target */
        __set_page_locked(new_page);
        SetPageSwapBacked(new_page);
@@ -1755,9 +1774,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
        WARN_ON(PageLRU(new_page));
 
        /* Recheck the target PMD */
+       mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
        ptl = pmd_lock(mm, pmd);
-       if (unlikely(!pmd_same(*pmd, entry))) {
+       if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) {
+fail_putback:
                spin_unlock(ptl);
+               mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 
                /* Reverse changes made by migrate_page_copy() */
                if (TestClearPageActive(new_page))
@@ -1774,7 +1796,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
                putback_lru_page(page);
                mod_zone_page_state(page_zone(page),
                         NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
-               goto out_fail;
+
+               goto out_unlock;
        }
 
        /*
@@ -1786,16 +1809,35 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
         */
        mem_cgroup_prepare_migration(page, new_page, &memcg);
 
+       orig_entry = *pmd;
        entry = mk_pmd(new_page, vma->vm_page_prot);
-       entry = pmd_mknonnuma(entry);
-       entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
        entry = pmd_mkhuge(entry);
+       entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
 
-       pmdp_clear_flush(vma, haddr, pmd);
-       set_pmd_at(mm, haddr, pmd, entry);
-       page_add_new_anon_rmap(new_page, vma, haddr);
+       /*
+        * Clear the old entry under pagetable lock and establish the new PTE.
+        * Any parallel GUP will either observe the old page blocking on the
+        * page lock, block on the page table lock or observe the new page.
+        * The SetPageUptodate on the new page and page_add_new_anon_rmap
+        * guarantee the copy is visible before the pagetable update.
+        */
+       flush_cache_range(vma, mmun_start, mmun_end);
+       page_add_new_anon_rmap(new_page, vma, mmun_start);
+       pmdp_clear_flush(vma, mmun_start, pmd);
+       set_pmd_at(mm, mmun_start, pmd, entry);
+       flush_tlb_range(vma, mmun_start, mmun_end);
        update_mmu_cache_pmd(vma, address, &entry);
+
+       if (page_count(page) != 2) {
+               set_pmd_at(mm, mmun_start, pmd, orig_entry);
+               flush_tlb_range(vma, mmun_start, mmun_end);
+               update_mmu_cache_pmd(vma, address, &entry);
+               page_remove_rmap(new_page);
+               goto fail_putback;
+       }
+
        page_remove_rmap(page);
+
        /*
         * Finish the charge transaction under the page table lock to
         * prevent split_huge_page() from dividing up the charge
@@ -1803,6 +1845,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
         */
        mem_cgroup_end_migration(memcg, page, new_page, true);
        spin_unlock(ptl);
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 
        unlock_page(new_page);
        unlock_page(page);
@@ -1820,10 +1863,15 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 out_fail:
        count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
 out_dropref:
-       entry = pmd_mknonnuma(entry);
-       set_pmd_at(mm, haddr, pmd, entry);
-       update_mmu_cache_pmd(vma, address, &entry);
+       ptl = pmd_lock(mm, pmd);
+       if (pmd_same(*pmd, entry)) {
+               entry = pmd_mknonnuma(entry);
+               set_pmd_at(mm, mmun_start, pmd, entry);
+               update_mmu_cache_pmd(vma, address, &entry);
+       }
+       spin_unlock(ptl);
 
+out_unlock:
        unlock_page(page);
        put_page(page);
        return 0;
index 2666797..bb53a65 100644 (file)
@@ -52,17 +52,21 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                        pte_t ptent;
                        bool updated = false;
 
-                       ptent = ptep_modify_prot_start(mm, addr, pte);
                        if (!prot_numa) {
+                               ptent = ptep_modify_prot_start(mm, addr, pte);
+                               if (pte_numa(ptent))
+                                       ptent = pte_mknonnuma(ptent);
                                ptent = pte_modify(ptent, newprot);
                                updated = true;
                        } else {
                                struct page *page;
 
+                               ptent = *pte;
                                page = vm_normal_page(vma, addr, oldpte);
                                if (page) {
                                        if (!pte_numa(oldpte)) {
                                                ptent = pte_mknuma(ptent);
+                                               set_pte_at(mm, addr, pte, ptent);
                                                updated = true;
                                        }
                                }
@@ -79,7 +83,10 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 
                        if (updated)
                                pages++;
-                       ptep_modify_prot_commit(mm, addr, pte, ptent);
+
+                       /* Only !prot_numa always clears the pte */
+                       if (!prot_numa)
+                               ptep_modify_prot_commit(mm, addr, pte, ptent);
                } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
                        swp_entry_t entry = pte_to_swp_entry(oldpte);
 
@@ -181,6 +188,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
        BUG_ON(addr >= end);
        pgd = pgd_offset(mm, addr);
        flush_cache_range(vma, addr, end);
+       set_tlb_flush_pending(mm);
        do {
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
@@ -192,6 +200,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
        /* Only flush the TLB if we actually modified any entries: */
        if (pages)
                flush_tlb_range(vma, start, end);
+       clear_tlb_flush_pending(mm);
 
        return pages;
 }
index 580a5f0..5248fe0 100644 (file)
@@ -1816,7 +1816,7 @@ static void zlc_clear_zones_full(struct zonelist *zonelist)
 
 static bool zone_local(struct zone *local_zone, struct zone *zone)
 {
-       return node_distance(local_zone->node, zone->node) == LOCAL_DISTANCE;
+       return local_zone->node == zone->node;
 }
 
 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
@@ -1913,18 +1913,17 @@ zonelist_scan:
                 * page was allocated in should have no effect on the
                 * time the page has in memory before being reclaimed.
                 *
-                * When zone_reclaim_mode is enabled, try to stay in
-                * local zones in the fastpath.  If that fails, the
-                * slowpath is entered, which will do another pass
-                * starting with the local zones, but ultimately fall
-                * back to remote zones that do not partake in the
-                * fairness round-robin cycle of this zonelist.
+                * Try to stay in local zones in the fastpath.  If
+                * that fails, the slowpath is entered, which will do
+                * another pass starting with the local zones, but
+                * ultimately fall back to remote zones that do not
+                * partake in the fairness round-robin cycle of this
+                * zonelist.
                 */
                if (alloc_flags & ALLOC_WMARK_LOW) {
                        if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
                                continue;
-                       if (zone_reclaim_mode &&
-                           !zone_local(preferred_zone, zone))
+                       if (!zone_local(preferred_zone, zone))
                                continue;
                }
                /*
@@ -2390,7 +2389,7 @@ static void prepare_slowpath(gfp_t gfp_mask, unsigned int order,
                 * thrash fairness information for zones that are not
                 * actually part of this zonelist's round-robin cycle.
                 */
-               if (zone_reclaim_mode && !zone_local(preferred_zone, zone))
+               if (!zone_local(preferred_zone, zone))
                        continue;
                mod_zone_page_state(zone, NR_ALLOC_BATCH,
                                    high_wmark_pages(zone) -
index cbb3854..a8b9199 100644 (file)
@@ -110,9 +110,10 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
                       pte_t *ptep)
 {
+       struct mm_struct *mm = (vma)->vm_mm;
        pte_t pte;
-       pte = ptep_get_and_clear((vma)->vm_mm, address, ptep);
-       if (pte_accessible(pte))
+       pte = ptep_get_and_clear(mm, address, ptep);
+       if (pte_accessible(mm, pte))
                flush_tlb_page(vma, address);
        return pte;
 }
@@ -191,6 +192,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
                     pmd_t *pmdp)
 {
+       pmd_t entry = *pmdp;
+       if (pmd_numa(entry))
+               entry = pmd_mknonnuma(entry);
        set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp));
        flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 }
index 55c8b8d..068522d 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -600,7 +600,11 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
        spinlock_t *ptl;
 
        if (unlikely(PageHuge(page))) {
+               /* when pud is not present, pte will be NULL */
                pte = huge_pte_offset(mm, address);
+               if (!pte)
+                       return NULL;
+
                ptl = huge_pte_lockptr(page_hstate(page), mm, pte);
                goto check;
        }
index a2b480a..b9c8a6e 100644 (file)
@@ -307,9 +307,9 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
        hard_iface->bat_iv.ogm_buff = ogm_buff;
 
        batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
-       batadv_ogm_packet->header.packet_type = BATADV_IV_OGM;
-       batadv_ogm_packet->header.version = BATADV_COMPAT_VERSION;
-       batadv_ogm_packet->header.ttl = 2;
+       batadv_ogm_packet->packet_type = BATADV_IV_OGM;
+       batadv_ogm_packet->version = BATADV_COMPAT_VERSION;
+       batadv_ogm_packet->ttl = 2;
        batadv_ogm_packet->flags = BATADV_NO_FLAGS;
        batadv_ogm_packet->reserved = 0;
        batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
@@ -346,7 +346,7 @@ batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
 
        batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
        batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP;
-       batadv_ogm_packet->header.ttl = BATADV_TTL;
+       batadv_ogm_packet->ttl = BATADV_TTL;
 }
 
 /* when do we schedule our own ogm to be sent */
@@ -435,7 +435,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
                           fwd_str, (packet_num > 0 ? "aggregated " : ""),
                           batadv_ogm_packet->orig,
                           ntohl(batadv_ogm_packet->seqno),
-                          batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl,
+                          batadv_ogm_packet->tq, batadv_ogm_packet->ttl,
                           (batadv_ogm_packet->flags & BATADV_DIRECTLINK ?
                            "on" : "off"),
                           hard_iface->net_dev->name,
@@ -491,7 +491,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
        /* multihomed peer assumed
         * non-primary OGMs are only broadcasted on their interface
         */
-       if ((directlink && (batadv_ogm_packet->header.ttl == 1)) ||
+       if ((directlink && (batadv_ogm_packet->ttl == 1)) ||
            (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
                /* FIXME: what about aggregated packets ? */
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -499,7 +499,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
                           (forw_packet->own ? "Sending own" : "Forwarding"),
                           batadv_ogm_packet->orig,
                           ntohl(batadv_ogm_packet->seqno),
-                          batadv_ogm_packet->header.ttl,
+                          batadv_ogm_packet->ttl,
                           forw_packet->if_incoming->net_dev->name,
                           forw_packet->if_incoming->net_dev->dev_addr);
 
@@ -572,7 +572,7 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
                 */
                if ((!directlink) &&
                    (!(batadv_ogm_packet->flags & BATADV_DIRECTLINK)) &&
-                   (batadv_ogm_packet->header.ttl != 1) &&
+                   (batadv_ogm_packet->ttl != 1) &&
 
                    /* own packets originating non-primary
                     * interfaces leave only that interface
@@ -587,7 +587,7 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
                 * interface only - we still can aggregate
                 */
                if ((directlink) &&
-                   (new_bat_ogm_packet->header.ttl == 1) &&
+                   (new_bat_ogm_packet->ttl == 1) &&
                    (forw_packet->if_incoming == if_incoming) &&
 
                    /* packets from direct neighbors or
@@ -778,7 +778,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
        struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
        uint16_t tvlv_len;
 
-       if (batadv_ogm_packet->header.ttl <= 1) {
+       if (batadv_ogm_packet->ttl <= 1) {
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
                return;
        }
@@ -798,7 +798,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
 
        tvlv_len = ntohs(batadv_ogm_packet->tvlv_len);
 
-       batadv_ogm_packet->header.ttl--;
+       batadv_ogm_packet->ttl--;
        memcpy(batadv_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
 
        /* apply hop penalty */
@@ -807,7 +807,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
 
        batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                   "Forwarding packet: tq: %i, ttl: %i\n",
-                  batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl);
+                  batadv_ogm_packet->tq, batadv_ogm_packet->ttl);
 
        /* switch of primaries first hop flag when forwarding */
        batadv_ogm_packet->flags &= ~BATADV_PRIMARIES_FIRST_HOP;
@@ -972,8 +972,8 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
        spin_unlock_bh(&neigh_node->bat_iv.lq_update_lock);
 
        if (dup_status == BATADV_NO_DUP) {
-               orig_node->last_ttl = batadv_ogm_packet->header.ttl;
-               neigh_node->last_ttl = batadv_ogm_packet->header.ttl;
+               orig_node->last_ttl = batadv_ogm_packet->ttl;
+               neigh_node->last_ttl = batadv_ogm_packet->ttl;
        }
 
        batadv_bonding_candidate_add(bat_priv, orig_node, neigh_node);
@@ -1247,7 +1247,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
         * packet in an aggregation.  Here we expect that the padding
         * is always zero (or not 0x01)
         */
-       if (batadv_ogm_packet->header.packet_type != BATADV_IV_OGM)
+       if (batadv_ogm_packet->packet_type != BATADV_IV_OGM)
                return;
 
        /* could be changed by schedule_own_packet() */
@@ -1267,8 +1267,8 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
                   if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig,
                   batadv_ogm_packet->prev_sender,
                   ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->tq,
-                  batadv_ogm_packet->header.ttl,
-                  batadv_ogm_packet->header.version, has_directlink_flag);
+                  batadv_ogm_packet->ttl,
+                  batadv_ogm_packet->version, has_directlink_flag);
 
        rcu_read_lock();
        list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
@@ -1433,7 +1433,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
         * seqno and similar ttl as the non-duplicate
         */
        sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno);
-       similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl;
+       similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->ttl;
        if (is_bidirect && ((dup_status == BATADV_NO_DUP) ||
                            (sameseq && similar_ttl)))
                batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
index 6c8c393..b316a4c 100644 (file)
@@ -349,7 +349,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
 
        unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
 
-       switch (unicast_4addr_packet->u.header.packet_type) {
+       switch (unicast_4addr_packet->u.packet_type) {
        case BATADV_UNICAST:
                batadv_dbg(BATADV_DBG_DAT, bat_priv,
                           "* encapsulated within a UNICAST packet\n");
@@ -374,7 +374,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
                        break;
                default:
                        batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: Unknown (%u)!\n",
-                                  unicast_4addr_packet->u.header.packet_type);
+                                  unicast_4addr_packet->u.packet_type);
                }
                break;
        case BATADV_BCAST:
@@ -387,7 +387,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
        default:
                batadv_dbg(BATADV_DBG_DAT, bat_priv,
                           "* encapsulated within an unknown packet type (0x%x)\n",
-                          unicast_4addr_packet->u.header.packet_type);
+                          unicast_4addr_packet->u.packet_type);
        }
 }
 
index 271d321..6ddb614 100644 (file)
@@ -355,7 +355,7 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb,
                batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES,
                                   skb->len + ETH_HLEN);
 
-               packet->header.ttl--;
+               packet->ttl--;
                batadv_send_skb_packet(skb, neigh_node->if_incoming,
                                       neigh_node->addr);
                ret = true;
@@ -444,9 +444,9 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
                goto out_err;
 
        /* Create one header to be copied to all fragments */
-       frag_header.header.packet_type = BATADV_UNICAST_FRAG;
-       frag_header.header.version = BATADV_COMPAT_VERSION;
-       frag_header.header.ttl = BATADV_TTL;
+       frag_header.packet_type = BATADV_UNICAST_FRAG;
+       frag_header.version = BATADV_COMPAT_VERSION;
+       frag_header.ttl = BATADV_TTL;
        frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
        frag_header.reserved = 0;
        frag_header.no = 0;
index 29ae4ef..130cc32 100644 (file)
@@ -194,7 +194,7 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
                goto free_skb;
        }
 
-       if (icmp_header->header.packet_type != BATADV_ICMP) {
+       if (icmp_header->packet_type != BATADV_ICMP) {
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                           "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n");
                len = -EINVAL;
@@ -243,9 +243,9 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
 
        icmp_header->uid = socket_client->index;
 
-       if (icmp_header->header.version != BATADV_COMPAT_VERSION) {
+       if (icmp_header->version != BATADV_COMPAT_VERSION) {
                icmp_header->msg_type = BATADV_PARAMETER_PROBLEM;
-               icmp_header->header.version = BATADV_COMPAT_VERSION;
+               icmp_header->version = BATADV_COMPAT_VERSION;
                batadv_socket_add_packet(socket_client, icmp_header,
                                         packet_len);
                goto free_skb;
index c51a5e5..1511f64 100644 (file)
@@ -383,17 +383,17 @@ int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
 
        batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
 
-       if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) {
+       if (batadv_ogm_packet->version != BATADV_COMPAT_VERSION) {
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                           "Drop packet: incompatible batman version (%i)\n",
-                          batadv_ogm_packet->header.version);
+                          batadv_ogm_packet->version);
                goto err_free;
        }
 
        /* all receive handlers return whether they received or reused
         * the supplied skb. if not, we have to free the skb.
         */
-       idx = batadv_ogm_packet->header.packet_type;
+       idx = batadv_ogm_packet->packet_type;
        ret = (*batadv_rx_handler[idx])(skb, hard_iface);
 
        if (ret == NET_RX_DROP)
@@ -426,8 +426,8 @@ static void batadv_recv_handler_init(void)
        BUILD_BUG_ON(offsetof(struct batadv_unicast_packet, dest) != 4);
        BUILD_BUG_ON(offsetof(struct batadv_unicast_tvlv_packet, dst) != 4);
        BUILD_BUG_ON(offsetof(struct batadv_frag_packet, dest) != 4);
-       BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, icmph.dst) != 4);
-       BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, icmph.dst) != 4);
+       BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, dst) != 4);
+       BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, dst) != 4);
 
        /* broadcast packet */
        batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
@@ -1119,9 +1119,9 @@ void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
        skb_reserve(skb, ETH_HLEN);
        tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
        unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
-       unicast_tvlv_packet->header.packet_type = BATADV_UNICAST_TVLV;
-       unicast_tvlv_packet->header.version = BATADV_COMPAT_VERSION;
-       unicast_tvlv_packet->header.ttl = BATADV_TTL;
+       unicast_tvlv_packet->packet_type = BATADV_UNICAST_TVLV;
+       unicast_tvlv_packet->version = BATADV_COMPAT_VERSION;
+       unicast_tvlv_packet->ttl = BATADV_TTL;
        unicast_tvlv_packet->reserved = 0;
        unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
        unicast_tvlv_packet->align = 0;
index 351e199..511d7e1 100644 (file)
@@ -722,7 +722,7 @@ static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv,
 {
        if (orig_node->last_real_seqno != ntohl(ogm_packet->seqno))
                return false;
-       if (orig_node->last_ttl != ogm_packet->header.ttl + 1)
+       if (orig_node->last_ttl != ogm_packet->ttl + 1)
                return false;
        if (!batadv_compare_eth(ogm_packet->orig, ogm_packet->prev_sender))
                return false;
@@ -1082,9 +1082,9 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
        coded_packet = (struct batadv_coded_packet *)skb_dest->data;
        skb_reset_mac_header(skb_dest);
 
-       coded_packet->header.packet_type = BATADV_CODED;
-       coded_packet->header.version = BATADV_COMPAT_VERSION;
-       coded_packet->header.ttl = packet1->header.ttl;
+       coded_packet->packet_type = BATADV_CODED;
+       coded_packet->version = BATADV_COMPAT_VERSION;
+       coded_packet->ttl = packet1->ttl;
 
        /* Info about first unicast packet */
        memcpy(coded_packet->first_source, first_source, ETH_ALEN);
@@ -1097,7 +1097,7 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
        memcpy(coded_packet->second_source, second_source, ETH_ALEN);
        memcpy(coded_packet->second_orig_dest, packet2->dest, ETH_ALEN);
        coded_packet->second_crc = packet_id2;
-       coded_packet->second_ttl = packet2->header.ttl;
+       coded_packet->second_ttl = packet2->ttl;
        coded_packet->second_ttvn = packet2->ttvn;
        coded_packet->coded_len = htons(coding_len);
 
@@ -1452,7 +1452,7 @@ bool batadv_nc_skb_forward(struct sk_buff *skb,
        /* We only handle unicast packets */
        payload = skb_network_header(skb);
        packet = (struct batadv_unicast_packet *)payload;
-       if (packet->header.packet_type != BATADV_UNICAST)
+       if (packet->packet_type != BATADV_UNICAST)
                goto out;
 
        /* Try to find a coding opportunity and send the skb if one is found */
@@ -1505,7 +1505,7 @@ void batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv,
        /* Check for supported packet type */
        payload = skb_network_header(skb);
        packet = (struct batadv_unicast_packet *)payload;
-       if (packet->header.packet_type != BATADV_UNICAST)
+       if (packet->packet_type != BATADV_UNICAST)
                goto out;
 
        /* Find existing nc_path or create a new */
@@ -1623,7 +1623,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
                ttvn = coded_packet_tmp.second_ttvn;
        } else {
                orig_dest = coded_packet_tmp.first_orig_dest;
-               ttl = coded_packet_tmp.header.ttl;
+               ttl = coded_packet_tmp.ttl;
                ttvn = coded_packet_tmp.first_ttvn;
        }
 
@@ -1648,9 +1648,9 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
 
        /* Create decoded unicast packet */
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
-       unicast_packet->header.packet_type = BATADV_UNICAST;
-       unicast_packet->header.version = BATADV_COMPAT_VERSION;
-       unicast_packet->header.ttl = ttl;
+       unicast_packet->packet_type = BATADV_UNICAST;
+       unicast_packet->version = BATADV_COMPAT_VERSION;
+       unicast_packet->ttl = ttl;
        memcpy(unicast_packet->dest, orig_dest, ETH_ALEN);
        unicast_packet->ttvn = ttvn;
 
index 207459b..2dd8f24 100644 (file)
@@ -155,6 +155,7 @@ enum batadv_tvlv_type {
        BATADV_TVLV_ROAM        = 0x05,
 };
 
+#pragma pack(2)
 /* the destination hardware field in the ARP frame is used to
  * transport the claim type and the group id
  */
@@ -163,24 +164,20 @@ struct batadv_bla_claim_dst {
        uint8_t type;           /* bla_claimframe */
        __be16 group;           /* group id */
 };
-
-struct batadv_header {
-       uint8_t  packet_type;
-       uint8_t  version;  /* batman version field */
-       uint8_t  ttl;
-       /* the parent struct has to add a byte after the header to make
-        * everything 4 bytes aligned again
-        */
-};
+#pragma pack()
 
 /**
  * struct batadv_ogm_packet - ogm (routing protocol) packet
- * @header: common batman packet header
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
  * @flags: contains routing relevant flags - see enum batadv_iv_flags
  * @tvlv_len: length of tvlv data following the ogm header
  */
 struct batadv_ogm_packet {
-       struct batadv_header header;
+       uint8_t  packet_type;
+       uint8_t  version;
+       uint8_t  ttl;
        uint8_t  flags;
        __be32   seqno;
        uint8_t  orig[ETH_ALEN];
@@ -196,29 +193,51 @@ struct batadv_ogm_packet {
 #define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet)
 
 /**
- * batadv_icmp_header - common ICMP header
- * @header: common batman header
+ * batadv_icmp_header - common members among all the ICMP packets
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
  * @msg_type: ICMP packet type
  * @dst: address of the destination node
  * @orig: address of the source node
  * @uid: local ICMP socket identifier
+ * @align: not used - useful for alignment purposes only
+ *
+ * This structure is used for ICMP packets parsing only and it is never sent
+ * over the wire. The alignment field at the end is there to ensure that
+ * members are padded the same way as they are in real packets.
  */
 struct batadv_icmp_header {
-       struct batadv_header header;
+       uint8_t  packet_type;
+       uint8_t  version;
+       uint8_t  ttl;
        uint8_t  msg_type; /* see ICMP message types above */
        uint8_t  dst[ETH_ALEN];
        uint8_t  orig[ETH_ALEN];
        uint8_t  uid;
+       uint8_t  align[3];
 };
 
 /**
  * batadv_icmp_packet - ICMP packet
- * @icmph: common ICMP header
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @msg_type: ICMP packet type
+ * @dst: address of the destination node
+ * @orig: address of the source node
+ * @uid: local ICMP socket identifier
  * @reserved: not used - useful for alignment
  * @seqno: ICMP sequence number
  */
 struct batadv_icmp_packet {
-       struct batadv_icmp_header icmph;
+       uint8_t  packet_type;
+       uint8_t  version;
+       uint8_t  ttl;
+       uint8_t  msg_type; /* see ICMP message types above */
+       uint8_t  dst[ETH_ALEN];
+       uint8_t  orig[ETH_ALEN];
+       uint8_t  uid;
        uint8_t  reserved;
        __be16   seqno;
 };
@@ -227,13 +246,25 @@ struct batadv_icmp_packet {
 
 /**
  * batadv_icmp_packet_rr - ICMP RouteRecord packet
- * @icmph: common ICMP header
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @msg_type: ICMP packet type
+ * @dst: address of the destination node
+ * @orig: address of the source node
+ * @uid: local ICMP socket identifier
  * @rr_cur: number of entries the rr array
  * @seqno: ICMP sequence number
  * @rr: route record array
  */
 struct batadv_icmp_packet_rr {
-       struct batadv_icmp_header icmph;
+       uint8_t  packet_type;
+       uint8_t  version;
+       uint8_t  ttl;
+       uint8_t  msg_type; /* see ICMP message types above */
+       uint8_t  dst[ETH_ALEN];
+       uint8_t  orig[ETH_ALEN];
+       uint8_t  uid;
        uint8_t  rr_cur;
        __be16   seqno;
        uint8_t  rr[BATADV_RR_LEN][ETH_ALEN];
@@ -253,8 +284,18 @@ struct batadv_icmp_packet_rr {
  */
 #pragma pack(2)
 
+/**
+ * struct batadv_unicast_packet - unicast packet for network payload
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @ttvn: translation table version number
+ * @dest: originator destination of the unicast packet
+ */
 struct batadv_unicast_packet {
-       struct batadv_header header;
+       uint8_t  packet_type;
+       uint8_t  version;
+       uint8_t  ttl;
        uint8_t  ttvn; /* destination translation table version number */
        uint8_t  dest[ETH_ALEN];
        /* "4 bytes boundary + 2 bytes" long to make the payload after the
@@ -280,7 +321,9 @@ struct batadv_unicast_4addr_packet {
 
 /**
  * struct batadv_frag_packet - fragmented packet
- * @header: common batman packet header with type, compatversion, and ttl
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
  * @dest: final destination used when routing fragments
  * @orig: originator of the fragment used when merging the packet
  * @no: fragment number within this sequence
@@ -289,7 +332,9 @@ struct batadv_unicast_4addr_packet {
  * @total_size: size of the merged packet
  */
 struct batadv_frag_packet {
-       struct  batadv_header header;
+       uint8_t packet_type;
+       uint8_t version;  /* batman version field */
+       uint8_t ttl;
 #if defined(__BIG_ENDIAN_BITFIELD)
        uint8_t no:4;
        uint8_t reserved:4;
@@ -305,8 +350,19 @@ struct batadv_frag_packet {
        __be16  total_size;
 };
 
+/**
+ * struct batadv_bcast_packet - broadcast packet for network payload
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @reserved: reserved byte for alignment
+ * @seqno: sequence identification
+ * @orig: originator of the broadcast packet
+ */
 struct batadv_bcast_packet {
-       struct batadv_header header;
+       uint8_t  packet_type;
+       uint8_t  version;  /* batman version field */
+       uint8_t  ttl;
        uint8_t  reserved;
        __be32   seqno;
        uint8_t  orig[ETH_ALEN];
@@ -315,11 +371,11 @@ struct batadv_bcast_packet {
         */
 };
 
-#pragma pack()
-
 /**
  * struct batadv_coded_packet - network coded packet
- * @header: common batman packet header and ttl of first included packet
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
  * @reserved: Align following fields to 2-byte boundaries
  * @first_source: original source of first included packet
  * @first_orig_dest: original destinal of first included packet
@@ -334,7 +390,9 @@ struct batadv_bcast_packet {
  * @coded_len: length of network coded part of the payload
  */
 struct batadv_coded_packet {
-       struct batadv_header header;
+       uint8_t  packet_type;
+       uint8_t  version;  /* batman version field */
+       uint8_t  ttl;
        uint8_t  first_ttvn;
        /* uint8_t  first_dest[ETH_ALEN]; - saved in mac header destination */
        uint8_t  first_source[ETH_ALEN];
@@ -349,9 +407,13 @@ struct batadv_coded_packet {
        __be16   coded_len;
 };
 
+#pragma pack()
+
 /**
  * struct batadv_unicast_tvlv - generic unicast packet with tvlv payload
- * @header: common batman packet header
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
  * @reserved: reserved field (for packet alignment)
  * @src: address of the source
  * @dst: address of the destination
@@ -359,7 +421,9 @@ struct batadv_coded_packet {
  * @align: 2 bytes to align the header to a 4 byte boundry
  */
 struct batadv_unicast_tvlv_packet {
-       struct batadv_header header;
+       uint8_t  packet_type;
+       uint8_t  version;  /* batman version field */
+       uint8_t  ttl;
        uint8_t  reserved;
        uint8_t  dst[ETH_ALEN];
        uint8_t  src[ETH_ALEN];
@@ -420,13 +484,13 @@ struct batadv_tvlv_tt_vlan_data {
  * struct batadv_tvlv_tt_change - translation table diff data
  * @flags: status indicators concerning the non-mesh client (see
  *  batadv_tt_client_flags)
- * @reserved: reserved field
+ * @reserved: reserved field - useful for alignment purposes only
  * @addr: mac address of non-mesh client that triggered this tt change
  * @vid: VLAN identifier
  */
 struct batadv_tvlv_tt_change {
        uint8_t flags;
-       uint8_t reserved;
+       uint8_t reserved[3];
        uint8_t addr[ETH_ALEN];
        __be16 vid;
 };
index d4114d7..46278bf 100644 (file)
@@ -308,7 +308,7 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
                memcpy(icmph->dst, icmph->orig, ETH_ALEN);
                memcpy(icmph->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
                icmph->msg_type = BATADV_ECHO_REPLY;
-               icmph->header.ttl = BATADV_TTL;
+               icmph->ttl = BATADV_TTL;
 
                res = batadv_send_skb_to_orig(skb, orig_node, NULL);
                if (res != NET_XMIT_DROP)
@@ -338,9 +338,9 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
        icmp_packet = (struct batadv_icmp_packet *)skb->data;
 
        /* send TTL exceeded if packet is an echo request (traceroute) */
-       if (icmp_packet->icmph.msg_type != BATADV_ECHO_REQUEST) {
+       if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
                pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
-                        icmp_packet->icmph.orig, icmp_packet->icmph.dst);
+                        icmp_packet->orig, icmp_packet->dst);
                goto out;
        }
 
@@ -349,7 +349,7 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
                goto out;
 
        /* get routing information */
-       orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->icmph.orig);
+       orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
        if (!orig_node)
                goto out;
 
@@ -359,11 +359,11 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
 
        icmp_packet = (struct batadv_icmp_packet *)skb->data;
 
-       memcpy(icmp_packet->icmph.dst, icmp_packet->icmph.orig, ETH_ALEN);
-       memcpy(icmp_packet->icmph.orig, primary_if->net_dev->dev_addr,
+       memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
+       memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr,
               ETH_ALEN);
-       icmp_packet->icmph.msg_type = BATADV_TTL_EXCEEDED;
-       icmp_packet->icmph.header.ttl = BATADV_TTL;
+       icmp_packet->msg_type = BATADV_TTL_EXCEEDED;
+       icmp_packet->ttl = BATADV_TTL;
 
        if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
                ret = NET_RX_SUCCESS;
@@ -434,7 +434,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
                return batadv_recv_my_icmp_packet(bat_priv, skb);
 
        /* TTL exceeded */
-       if (icmph->header.ttl < 2)
+       if (icmph->ttl < 2)
                return batadv_recv_icmp_ttl_exceeded(bat_priv, skb);
 
        /* get routing information */
@@ -449,7 +449,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
        icmph = (struct batadv_icmp_header *)skb->data;
 
        /* decrement ttl */
-       icmph->header.ttl--;
+       icmph->ttl--;
 
        /* route it */
        if (batadv_send_skb_to_orig(skb, orig_node, recv_if) != NET_XMIT_DROP)
@@ -709,7 +709,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
 
        /* TTL exceeded */
-       if (unicast_packet->header.ttl < 2) {
+       if (unicast_packet->ttl < 2) {
                pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n",
                         ethhdr->h_source, unicast_packet->dest);
                goto out;
@@ -727,9 +727,9 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
 
        /* decrement ttl */
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
-       unicast_packet->header.ttl--;
+       unicast_packet->ttl--;
 
-       switch (unicast_packet->header.packet_type) {
+       switch (unicast_packet->packet_type) {
        case BATADV_UNICAST_4ADDR:
                hdr_len = sizeof(struct batadv_unicast_4addr_packet);
                break;
@@ -970,7 +970,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
        unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
 
-       is4addr = unicast_packet->header.packet_type == BATADV_UNICAST_4ADDR;
+       is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR;
        /* the caller function should have already pulled 2 bytes */
        if (is4addr)
                hdr_size = sizeof(*unicast_4addr_packet);
@@ -1160,7 +1160,7 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
        if (batadv_is_my_mac(bat_priv, bcast_packet->orig))
                goto out;
 
-       if (bcast_packet->header.ttl < 2)
+       if (bcast_packet->ttl < 2)
                goto out;
 
        orig_node = batadv_orig_hash_find(bat_priv, bcast_packet->orig);
index c83be5e..fba4dcf 100644 (file)
@@ -161,11 +161,11 @@ batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
                return false;
 
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
-       unicast_packet->header.version = BATADV_COMPAT_VERSION;
+       unicast_packet->version = BATADV_COMPAT_VERSION;
        /* batman packet type: unicast */
-       unicast_packet->header.packet_type = BATADV_UNICAST;
+       unicast_packet->packet_type = BATADV_UNICAST;
        /* set unicast ttl */
-       unicast_packet->header.ttl = BATADV_TTL;
+       unicast_packet->ttl = BATADV_TTL;
        /* copy the destination for faster routing */
        memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
        /* set the destination tt version number */
@@ -221,7 +221,7 @@ bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
                goto out;
 
        uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
-       uc_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR;
+       uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
        memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
        uc_4addr_packet->subtype = packet_subtype;
        uc_4addr_packet->reserved = 0;
@@ -436,7 +436,7 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
 
        /* as we have a copy now, it is safe to decrease the TTL */
        bcast_packet = (struct batadv_bcast_packet *)newskb->data;
-       bcast_packet->header.ttl--;
+       bcast_packet->ttl--;
 
        skb_reset_mac_header(newskb);
 
index 36f0508..a8f99d1 100644 (file)
@@ -264,11 +264,11 @@ static int batadv_interface_tx(struct sk_buff *skb,
                        goto dropped;
 
                bcast_packet = (struct batadv_bcast_packet *)skb->data;
-               bcast_packet->header.version = BATADV_COMPAT_VERSION;
-               bcast_packet->header.ttl = BATADV_TTL;
+               bcast_packet->version = BATADV_COMPAT_VERSION;
+               bcast_packet->ttl = BATADV_TTL;
 
                /* batman packet type: broadcast */
-               bcast_packet->header.packet_type = BATADV_BCAST;
+               bcast_packet->packet_type = BATADV_BCAST;
                bcast_packet->reserved = 0;
 
                /* hw address of first interface is the orig mac because only
@@ -328,7 +328,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
                         struct sk_buff *skb, struct batadv_hard_iface *recv_if,
                         int hdr_size, struct batadv_orig_node *orig_node)
 {
-       struct batadv_header *batadv_header = (struct batadv_header *)skb->data;
+       struct batadv_bcast_packet *batadv_bcast_packet;
        struct batadv_priv *bat_priv = netdev_priv(soft_iface);
        __be16 ethertype = htons(ETH_P_BATMAN);
        struct vlan_ethhdr *vhdr;
@@ -336,7 +336,8 @@ void batadv_interface_rx(struct net_device *soft_iface,
        unsigned short vid;
        bool is_bcast;
 
-       is_bcast = (batadv_header->packet_type == BATADV_BCAST);
+       batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data;
+       is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST);
 
        /* check if enough space is available for pulling, and pull */
        if (!pskb_may_pull(skb, hdr_size))
@@ -345,7 +346,12 @@ void batadv_interface_rx(struct net_device *soft_iface,
        skb_pull_rcsum(skb, hdr_size);
        skb_reset_mac_header(skb);
 
-       vid = batadv_get_vid(skb, hdr_size);
+       /* clean the netfilter state now that the batman-adv header has been
+        * removed
+        */
+       nf_reset(skb);
+
+       vid = batadv_get_vid(skb, 0);
        ethhdr = eth_hdr(skb);
 
        switch (ntohs(ethhdr->h_proto)) {
index 4add57d..ff625fe 100644 (file)
@@ -333,7 +333,8 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
                return;
 
        tt_change_node->change.flags = flags;
-       tt_change_node->change.reserved = 0;
+       memset(tt_change_node->change.reserved, 0,
+              sizeof(tt_change_node->change.reserved));
        memcpy(tt_change_node->change.addr, common->addr, ETH_ALEN);
        tt_change_node->change.vid = htons(common->vid);
 
@@ -2221,7 +2222,8 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
                               ETH_ALEN);
                        tt_change->flags = tt_common_entry->flags;
                        tt_change->vid = htons(tt_common_entry->vid);
-                       tt_change->reserved = 0;
+                       memset(tt_change->reserved, 0,
+                              sizeof(tt_change->reserved));
 
                        tt_num_entries++;
                        tt_change++;
index 6a6c8bb..7552f9e 100644 (file)
@@ -940,8 +940,22 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
        bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
        skb_pull(skb, 1);
 
-       if (hci_pi(sk)->channel == HCI_CHANNEL_RAW &&
-           bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
+       if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
+               /* No permission check is needed for user channel
+                * since that gets enforced when binding the socket.
+                *
+                * However check that the packet type is valid.
+                */
+               if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
+                   bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
+                   bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
+                       err = -EINVAL;
+                       goto drop;
+               }
+
+               skb_queue_tail(&hdev->raw_q, skb);
+               queue_work(hdev->workqueue, &hdev->tx_work);
+       } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
                u16 opcode = get_unaligned_le16(skb->data);
                u16 ogf = hci_opcode_ogf(opcode);
                u16 ocf = hci_opcode_ocf(opcode);
@@ -972,14 +986,6 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
                        goto drop;
                }
 
-               if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
-                   bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
-                   bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
-                   bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
-                       err = -EINVAL;
-                       goto drop;
-               }
-
                skb_queue_tail(&hdev->raw_q, skb);
                queue_work(hdev->workqueue, &hdev->tx_work);
        }
index ba3b7ea..4fc1722 100644 (file)
@@ -4500,7 +4500,7 @@ struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
 {
        struct netdev_adjacent *upper;
 
-       WARN_ON_ONCE(!rcu_read_lock_held());
+       WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
 
        upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
 
index ca15f32..36b1443 100644 (file)
@@ -1161,6 +1161,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
                                                 neigh->parms->reachable_time :
                                                 0)));
                neigh->nud_state = new;
+               notify = 1;
        }
 
        if (lladdr != neigh->ha) {
index 4c6bdf9..595ddf0 100644 (file)
@@ -152,17 +152,6 @@ static const struct file_operations dccpprobe_fops = {
        .llseek  = noop_llseek,
 };
 
-static __init int setup_jprobe(void)
-{
-       int ret = register_jprobe(&dccp_send_probe);
-
-       if (ret) {
-               request_module("dccp");
-               ret = register_jprobe(&dccp_send_probe);
-       }
-       return ret;
-}
-
 static __init int dccpprobe_init(void)
 {
        int ret = -ENOMEM;
@@ -174,7 +163,13 @@ static __init int dccpprobe_init(void)
        if (!proc_create(procname, S_IRUSR, init_net.proc_net, &dccpprobe_fops))
                goto err0;
 
-       ret = setup_jprobe();
+       ret = register_jprobe(&dccp_send_probe);
+       if (ret) {
+               ret = request_module("dccp");
+               if (!ret)
+                       ret = register_jprobe(&dccp_send_probe);
+       }
+
        if (ret)
                goto err1;
 
index 56a964a..a0f52da 100644 (file)
@@ -106,6 +106,10 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
 
        r->id.idiag_sport = inet->inet_sport;
        r->id.idiag_dport = inet->inet_dport;
+
+       memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+       memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+
        r->id.idiag_src[0] = inet->inet_rcv_saddr;
        r->id.idiag_dst[0] = inet->inet_daddr;
 
@@ -240,12 +244,19 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
 
        r->idiag_family       = tw->tw_family;
        r->idiag_retrans      = 0;
+
        r->id.idiag_if        = tw->tw_bound_dev_if;
        sock_diag_save_cookie(tw, r->id.idiag_cookie);
+
        r->id.idiag_sport     = tw->tw_sport;
        r->id.idiag_dport     = tw->tw_dport;
+
+       memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+       memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+
        r->id.idiag_src[0]    = tw->tw_rcv_saddr;
        r->id.idiag_dst[0]    = tw->tw_daddr;
+
        r->idiag_state        = tw->tw_substate;
        r->idiag_timer        = 3;
        r->idiag_expires      = jiffies_to_msecs(tmo);
@@ -726,8 +737,13 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
 
        r->id.idiag_sport = inet->inet_sport;
        r->id.idiag_dport = ireq->ir_rmt_port;
+
+       memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+       memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+
        r->id.idiag_src[0] = ireq->ir_loc_addr;
        r->id.idiag_dst[0] = ireq->ir_rmt_addr;
+
        r->idiag_expires = jiffies_to_msecs(tmo);
        r->idiag_rqueue = 0;
        r->idiag_wqueue = 0;
index d7aea4c..e560ef3 100644 (file)
@@ -217,6 +217,7 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
                                  iph->saddr, iph->daddr, tpi->key);
 
        if (tunnel) {
+               skb_pop_mac_header(skb);
                ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error);
                return PACKET_RCVD;
        }
index 9124027..df18461 100644 (file)
@@ -828,7 +828,7 @@ static int __ip_append_data(struct sock *sk,
 
        if (cork->length + length > maxnonfragsize - fragheaderlen) {
                ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
-                              mtu-exthdrlen);
+                              mtu - (opt ? opt->optlen : 0));
                return -EMSGSIZE;
        }
 
@@ -1151,7 +1151,8 @@ ssize_t   ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
                         mtu : 0xFFFF;
 
        if (cork->length + size > maxnonfragsize - fragheaderlen) {
-               ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu);
+               ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
+                              mtu - (opt ? opt->optlen : 0));
                return -EMSGSIZE;
        }
 
index f13bd91..a313c3f 100644 (file)
@@ -423,6 +423,7 @@ static void synproxy_tg4_destroy(const struct xt_tgdtor_param *par)
 static struct xt_target synproxy_tg4_reg __read_mostly = {
        .name           = "SYNPROXY",
        .family         = NFPROTO_IPV4,
+       .hooks          = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD),
        .target         = synproxy_tg4,
        .targetsize     = sizeof(struct xt_synproxy_info),
        .checkentry     = synproxy_tg4_check,
index fff5ba1..4a5e94a 100644 (file)
@@ -72,7 +72,7 @@ static int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
 {
        const struct nft_reject *priv = nft_expr_priv(expr);
 
-       if (nla_put_be32(skb, NFTA_REJECT_TYPE, priv->type))
+       if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
                goto nla_put_failure;
 
        switch (priv->type) {
index 62c19fd..f140048 100644 (file)
@@ -1600,20 +1600,15 @@ static void flush_stack(struct sock **stack, unsigned int count,
 }
 
 /* For TCP sockets, sk_rx_dst is protected by socket lock
- * For UDP, we use sk_dst_lock to guard against concurrent changes.
+ * For UDP, we use xchg() to guard against concurrent changes.
  */
 static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
 {
        struct dst_entry *old;
 
-       spin_lock(&sk->sk_dst_lock);
-       old = sk->sk_rx_dst;
-       if (likely(old != dst)) {
-               dst_hold(dst);
-               sk->sk_rx_dst = dst;
-               dst_release(old);
-       }
-       spin_unlock(&sk->sk_dst_lock);
+       dst_hold(dst);
+       old = xchg(&sk->sk_rx_dst, dst);
+       dst_release(old);
 }
 
 /*
index 4acdb63..e6f9319 100644 (file)
@@ -1193,11 +1193,35 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
 
        fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
                        (opt ? opt->opt_nflen : 0);
-       maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
+       maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
+                    sizeof(struct frag_hdr);
 
        if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
-               if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
-                       ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen);
+               unsigned int maxnonfragsize, headersize;
+
+               headersize = sizeof(struct ipv6hdr) +
+                            (opt ? opt->tot_len : 0) +
+                            (dst_allfrag(&rt->dst) ?
+                             sizeof(struct frag_hdr) : 0) +
+                            rt->rt6i_nfheader_len;
+
+               maxnonfragsize = (np->pmtudisc >= IPV6_PMTUDISC_DO) ?
+                                mtu : sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
+
+               /* dontfrag active */
+               if ((cork->length + length > mtu - headersize) && dontfrag &&
+                   (sk->sk_protocol == IPPROTO_UDP ||
+                    sk->sk_protocol == IPPROTO_RAW)) {
+                       ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
+                                                  sizeof(struct ipv6hdr));
+                       goto emsgsize;
+               }
+
+               if (cork->length + length > maxnonfragsize - headersize) {
+emsgsize:
+                       ipv6_local_error(sk, EMSGSIZE, fl6,
+                                        mtu - headersize +
+                                        sizeof(struct ipv6hdr));
                        return -EMSGSIZE;
                }
        }
@@ -1222,12 +1246,6 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
         * --yoshfuji
         */
 
-       if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
-                                          sk->sk_protocol == IPPROTO_RAW)) {
-               ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
-               return -EMSGSIZE;
-       }
-
        skb = skb_peek_tail(&sk->sk_write_queue);
        cork->length += length;
        if (((length > mtu) ||
index f78f41a..a0d1727 100644 (file)
@@ -446,6 +446,7 @@ static void synproxy_tg6_destroy(const struct xt_tgdtor_param *par)
 static struct xt_target synproxy_tg6_reg __read_mostly = {
        .name           = "SYNPROXY",
        .family         = NFPROTO_IPV6,
+       .hooks          = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD),
        .target         = synproxy_tg6,
        .targetsize     = sizeof(struct xt_synproxy_info),
        .checkentry     = synproxy_tg6_check,
index a0a48ac..4b4944c 100644 (file)
@@ -1905,9 +1905,7 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
                else
                        rt->rt6i_gateway = *dest;
                rt->rt6i_flags = ort->rt6i_flags;
-               if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
-                   (RTF_DEFAULT | RTF_ADDRCONF))
-                       rt6_set_from(rt, ort);
+               rt6_set_from(rt, ort);
                rt->rt6i_metric = 0;
 
 #ifdef CONFIG_IPV6_SUBTREES
index 366fbba..c874822 100644 (file)
@@ -924,7 +924,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                if (tunnel->parms.iph.daddr && skb_dst(skb))
                        skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
 
-               if (skb->len > mtu) {
+               if (skb->len > mtu && !skb_is_gso(skb)) {
                        icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
                        ip_rt_put(rt);
                        goto tx_error;
@@ -966,8 +966,10 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
        tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
 
        skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT);
-       if (IS_ERR(skb))
+       if (IS_ERR(skb)) {
+               ip_rt_put(rt);
                goto out;
+       }
 
        err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos,
                            ttl, df, !net_eq(tunnel->net, dev_net(dev)));
index c8beafd..5a355a4 100644 (file)
@@ -63,6 +63,7 @@
 #include <net/ip_vs.h>
 #include <net/netfilter/nf_conntrack_core.h>
 #include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_zones.h>
 
@@ -97,6 +98,11 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin)
        if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
                return;
 
+       /* Applications may adjust TCP seqs */
+       if (cp->app && nf_ct_protonum(ct) == IPPROTO_TCP &&
+           !nfct_seqadj(ct) && !nfct_seqadj_ext_add(ct))
+               return;
+
        /*
         * The connection is not yet in the hashtable, so we update it.
         * CIP->VIP will remain the same, so leave the tuple in
index 17c1bcb..b2d38da 100644 (file)
@@ -36,6 +36,11 @@ int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
        if (off == 0)
                return 0;
 
+       if (unlikely(!seqadj)) {
+               WARN(1, "Wrong seqadj usage, missing nfct_seqadj_ext_add()\n");
+               return 0;
+       }
+
        set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
 
        spin_lock_bh(&ct->lock);
index 902fb0a..7a394df 100644 (file)
@@ -97,7 +97,6 @@ int nf_conntrack_tstamp_pernet_init(struct net *net)
 void nf_conntrack_tstamp_pernet_fini(struct net *net)
 {
        nf_conntrack_tstamp_fini_sysctl(net);
-       nf_ct_extend_unregister(&tstamp_extend);
 }
 
 int nf_conntrack_tstamp_init(void)
index f93b7d0..71a9f49 100644 (file)
@@ -312,6 +312,9 @@ static int nf_tables_table_enable(struct nft_table *table)
        int err, i = 0;
 
        list_for_each_entry(chain, &table->chains, list) {
+               if (!(chain->flags & NFT_BASE_CHAIN))
+                       continue;
+
                err = nf_register_hook(&nft_base_chain(chain)->ops);
                if (err < 0)
                        goto err;
@@ -321,6 +324,9 @@ static int nf_tables_table_enable(struct nft_table *table)
        return 0;
 err:
        list_for_each_entry(chain, &table->chains, list) {
+               if (!(chain->flags & NFT_BASE_CHAIN))
+                       continue;
+
                if (i-- <= 0)
                        break;
 
@@ -333,8 +339,10 @@ static int nf_tables_table_disable(struct nft_table *table)
 {
        struct nft_chain *chain;
 
-       list_for_each_entry(chain, &table->chains, list)
-               nf_unregister_hook(&nft_base_chain(chain)->ops);
+       list_for_each_entry(chain, &table->chains, list) {
+               if (chain->flags & NFT_BASE_CHAIN)
+                       nf_unregister_hook(&nft_base_chain(chain)->ops);
+       }
 
        return 0;
 }
@@ -2098,17 +2106,21 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
                                   struct netlink_callback *cb)
 {
        const struct nft_set *set;
-       unsigned int idx = 0, s_idx = cb->args[0];
+       unsigned int idx, s_idx = cb->args[0];
        struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
 
        if (cb->args[1])
                return skb->len;
 
        list_for_each_entry(table, &ctx->afi->tables, list) {
-               if (cur_table && cur_table != table)
-                       continue;
+               if (cur_table) {
+                       if (cur_table != table)
+                               continue;
 
+                       cur_table = NULL;
+               }
                ctx->table = table;
+               idx = 0;
                list_for_each_entry(set, &ctx->table->sets, list) {
                        if (idx < s_idx)
                                goto cont;
@@ -2370,7 +2382,9 @@ static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
        enum nft_registers dreg;
 
        dreg = nft_type_to_reg(set->dtype);
-       return nft_validate_data_load(ctx, dreg, &elem->data, set->dtype);
+       return nft_validate_data_load(ctx, dreg, &elem->data,
+                                     set->dtype == NFT_DATA_VERDICT ?
+                                     NFT_DATA_VERDICT : NFT_DATA_VALUE);
 }
 
 int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
index 3c4b69e..a155d19 100644 (file)
@@ -1053,6 +1053,7 @@ static void __net_exit nfnl_log_net_exit(struct net *net)
 #ifdef CONFIG_PROC_FS
        remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter);
 #endif
+       nf_log_unset(net, &nfulnl_logger);
 }
 
 static struct pernet_operations nfnl_log_net_ops = {
index 8e0bb75..55c939f 100644 (file)
@@ -31,7 +31,7 @@ static void nft_exthdr_eval(const struct nft_expr *expr,
 {
        struct nft_exthdr *priv = nft_expr_priv(expr);
        struct nft_data *dest = &data[priv->dreg];
-       unsigned int offset;
+       unsigned int offset = 0;
        int err;
 
        err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
index b4c8b00..ba2dffe 100644 (file)
@@ -338,7 +338,8 @@ static int rds_ib_laddr_check(__be32 addr)
        ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
        /* due to this, we will claim to support iWARP devices unless we
           check node_type. */
-       if (ret || cm_id->device->node_type != RDMA_NODE_IB_CA)
+       if (ret || !cm_id->device ||
+           cm_id->device->node_type != RDMA_NODE_IB_CA)
                ret = -EADDRNOTAVAIL;
 
        rdsdebug("addr %pI4 ret %d node type %d\n",
index 33af772..62ced65 100644 (file)
@@ -1253,6 +1253,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
 
        if (msg->msg_name) {
                struct sockaddr_rose *srose;
+               struct full_sockaddr_rose *full_srose = msg->msg_name;
 
                memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
                srose = msg->msg_name;
@@ -1260,18 +1261,9 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
                srose->srose_addr   = rose->dest_addr;
                srose->srose_call   = rose->dest_call;
                srose->srose_ndigis = rose->dest_ndigis;
-               if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) {
-                       struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name;
-                       for (n = 0 ; n < rose->dest_ndigis ; n++)
-                               full_srose->srose_digis[n] = rose->dest_digis[n];
-                       msg->msg_namelen = sizeof(struct full_sockaddr_rose);
-               } else {
-                       if (rose->dest_ndigis >= 1) {
-                               srose->srose_ndigis = 1;
-                               srose->srose_digi = rose->dest_digis[0];
-                       }
-                       msg->msg_namelen = sizeof(struct sockaddr_rose);
-               }
+               for (n = 0 ; n < rose->dest_ndigis ; n++)
+                       full_srose->srose_digis[n] = rose->dest_digis[n];
+               msg->msg_namelen = sizeof(struct full_sockaddr_rose);
        }
 
        skb_free_datagram(sk, skb);
index 5c5edf5..11fe1a4 100644 (file)
@@ -77,16 +77,16 @@ static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est,
                                     &csum_idx_gen, &csum_hash_info);
                if (IS_ERR(pc))
                        return PTR_ERR(pc);
-               p = to_tcf_csum(pc);
                ret = ACT_P_CREATED;
        } else {
-               p = to_tcf_csum(pc);
-               if (!ovr) {
-                       tcf_hash_release(pc, bind, &csum_hash_info);
+               if (bind)/* dont override defaults */
+                       return 0;
+               tcf_hash_release(pc, bind, &csum_hash_info);
+               if (!ovr)
                        return -EEXIST;
-               }
        }
 
+       p = to_tcf_csum(pc);
        spin_lock_bh(&p->tcf_lock);
        p->tcf_action = parm->action;
        p->update_flags = parm->update_flags;
index 5645a4d..eb9ba60 100644 (file)
@@ -102,10 +102,11 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
                        return PTR_ERR(pc);
                ret = ACT_P_CREATED;
        } else {
-               if (!ovr) {
-                       tcf_hash_release(pc, bind, &gact_hash_info);
+               if (bind)/* dont override defaults */
+                       return 0;
+               tcf_hash_release(pc, bind, &gact_hash_info);
+               if (!ovr)
                        return -EEXIST;
-               }
        }
 
        gact = to_gact(pc);
index 882a897..dcbfe8c 100644 (file)
@@ -141,10 +141,12 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est,
                        return PTR_ERR(pc);
                ret = ACT_P_CREATED;
        } else {
-               if (!ovr) {
-                       tcf_ipt_release(to_ipt(pc), bind);
+               if (bind)/* dont override defaults */
+                       return 0;
+               tcf_ipt_release(to_ipt(pc), bind);
+
+               if (!ovr)
                        return -EEXIST;
-               }
        }
        ipt = to_ipt(pc);
 
index 6a15ace..7686953 100644 (file)
@@ -70,15 +70,15 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
                                     &nat_idx_gen, &nat_hash_info);
                if (IS_ERR(pc))
                        return PTR_ERR(pc);
-               p = to_tcf_nat(pc);
                ret = ACT_P_CREATED;
        } else {
-               p = to_tcf_nat(pc);
-               if (!ovr) {
-                       tcf_hash_release(pc, bind, &nat_hash_info);
+               if (bind)
+                       return 0;
+               tcf_hash_release(pc, bind, &nat_hash_info);
+               if (!ovr)
                        return -EEXIST;
-               }
        }
+       p = to_tcf_nat(pc);
 
        spin_lock_bh(&p->tcf_lock);
        p->old_addr = parm->old_addr;
index 03b6767..7aa2dcd 100644 (file)
@@ -84,10 +84,12 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
                ret = ACT_P_CREATED;
        } else {
                p = to_pedit(pc);
-               if (!ovr) {
-                       tcf_hash_release(pc, bind, &pedit_hash_info);
+               tcf_hash_release(pc, bind, &pedit_hash_info);
+               if (bind)
+                       return 0;
+               if (!ovr)
                        return -EEXIST;
-               }
+
                if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) {
                        keys = kmalloc(ksize, GFP_KERNEL);
                        if (keys == NULL)
index 16a62c3..ef246d8 100644 (file)
@@ -177,10 +177,12 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
                        if (bind) {
                                police->tcf_bindcnt += 1;
                                police->tcf_refcnt += 1;
+                               return 0;
                        }
                        if (ovr)
                                goto override;
-                       return ret;
+                       /* not replacing */
+                       return -EEXIST;
                }
        }
 
index 31157d3..f7b45ab 100644 (file)
@@ -142,10 +142,13 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
                ret = ACT_P_CREATED;
        } else {
                d = to_defact(pc);
-               if (!ovr) {
-                       tcf_simp_release(d, bind);
+
+               if (bind)
+                       return 0;
+               tcf_simp_release(d, bind);
+               if (!ovr)
                        return -EEXIST;
-               }
+
                reset_policy(d, defdata, parm);
        }
 
index 35ea643..8fe9d25 100644 (file)
@@ -120,10 +120,11 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
                ret = ACT_P_CREATED;
        } else {
                d = to_skbedit(pc);
-               if (!ovr) {
-                       tcf_hash_release(pc, bind, &skbedit_hash_info);
+               if (bind)
+                       return 0;
+               tcf_hash_release(pc, bind, &skbedit_hash_info);
+               if (!ovr)
                        return -EEXIST;
-               }
        }
 
        spin_lock_bh(&d->tcf_lock);
index 53c452e..5e68b94 100644 (file)
@@ -38,6 +38,7 @@
 #include <net/sctp/sctp.h>
 #include <net/sctp/sm.h>
 
+MODULE_SOFTDEP("pre: sctp");
 MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>");
 MODULE_DESCRIPTION("SCTP snooper");
 MODULE_LICENSE("GPL");
@@ -182,6 +183,20 @@ static struct jprobe sctp_recv_probe = {
        .entry  = jsctp_sf_eat_sack,
 };
 
+static __init int sctp_setup_jprobe(void)
+{
+       int ret = register_jprobe(&sctp_recv_probe);
+
+       if (ret) {
+               if (request_module("sctp"))
+                       goto out;
+               ret = register_jprobe(&sctp_recv_probe);
+       }
+
+out:
+       return ret;
+}
+
 static __init int sctpprobe_init(void)
 {
        int ret = -ENOMEM;
@@ -202,7 +217,7 @@ static __init int sctpprobe_init(void)
                         &sctpprobe_fops))
                goto free_kfifo;
 
-       ret = register_jprobe(&sctp_recv_probe);
+       ret = sctp_setup_jprobe();
        if (ret)
                goto remove_proc;
 
index c081a76..d43f318 100644 (file)
@@ -251,18 +251,15 @@ struct tipc_port *tipc_createport(struct sock *sk,
        return p_ptr;
 }
 
-int tipc_deleteport(u32 ref)
+int tipc_deleteport(struct tipc_port *p_ptr)
 {
-       struct tipc_port *p_ptr;
        struct sk_buff *buf = NULL;
 
-       tipc_withdraw(ref, 0, NULL);
-       p_ptr = tipc_port_lock(ref);
-       if (!p_ptr)
-               return -EINVAL;
+       tipc_withdraw(p_ptr, 0, NULL);
 
-       tipc_ref_discard(ref);
-       tipc_port_unlock(p_ptr);
+       spin_lock_bh(p_ptr->lock);
+       tipc_ref_discard(p_ptr->ref);
+       spin_unlock_bh(p_ptr->lock);
 
        k_cancel_timer(&p_ptr->timer);
        if (p_ptr->connected) {
@@ -704,47 +701,36 @@ int tipc_set_portimportance(u32 ref, unsigned int imp)
 }
 
 
-int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
+int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
+                struct tipc_name_seq const *seq)
 {
-       struct tipc_port *p_ptr;
        struct publication *publ;
        u32 key;
-       int res = -EINVAL;
 
-       p_ptr = tipc_port_lock(ref);
-       if (!p_ptr)
+       if (p_ptr->connected)
                return -EINVAL;
+       key = p_ptr->ref + p_ptr->pub_count + 1;
+       if (key == p_ptr->ref)
+               return -EADDRINUSE;
 
-       if (p_ptr->connected)
-               goto exit;
-       key = ref + p_ptr->pub_count + 1;
-       if (key == ref) {
-               res = -EADDRINUSE;
-               goto exit;
-       }
        publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
                                    scope, p_ptr->ref, key);
        if (publ) {
                list_add(&publ->pport_list, &p_ptr->publications);
                p_ptr->pub_count++;
                p_ptr->published = 1;
-               res = 0;
+               return 0;
        }
-exit:
-       tipc_port_unlock(p_ptr);
-       return res;
+       return -EINVAL;
 }
 
-int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
+int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
+                 struct tipc_name_seq const *seq)
 {
-       struct tipc_port *p_ptr;
        struct publication *publ;
        struct publication *tpubl;
        int res = -EINVAL;
 
-       p_ptr = tipc_port_lock(ref);
-       if (!p_ptr)
-               return -EINVAL;
        if (!seq) {
                list_for_each_entry_safe(publ, tpubl,
                                         &p_ptr->publications, pport_list) {
@@ -771,7 +757,6 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
        }
        if (list_empty(&p_ptr->publications))
                p_ptr->published = 0;
-       tipc_port_unlock(p_ptr);
        return res;
 }
 
index 9122535..34f12bd 100644 (file)
@@ -116,7 +116,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err);
 
 void tipc_acknowledge(u32 port_ref, u32 ack);
 
-int tipc_deleteport(u32 portref);
+int tipc_deleteport(struct tipc_port *p_ptr);
 
 int tipc_portimportance(u32 portref, unsigned int *importance);
 int tipc_set_portimportance(u32 portref, unsigned int importance);
@@ -127,9 +127,9 @@ int tipc_set_portunreliable(u32 portref, unsigned int isunreliable);
 int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable);
 int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable);
 
-int tipc_publish(u32 portref, unsigned int scope,
+int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
                 struct tipc_name_seq const *name_seq);
-int tipc_withdraw(u32 portref, unsigned int scope,
+int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
                  struct tipc_name_seq const *name_seq);
 
 int tipc_connect(u32 portref, struct tipc_portid const *port);
index 3b61851..e741416 100644 (file)
@@ -354,7 +354,7 @@ static int release(struct socket *sock)
         * Delete TIPC port; this ensures no more messages are queued
         * (also disconnects an active connection & sends a 'FIN-' to peer)
         */
-       res = tipc_deleteport(tport->ref);
+       res = tipc_deleteport(tport);
 
        /* Discard any remaining (connection-based) messages in receive queue */
        __skb_queue_purge(&sk->sk_receive_queue);
@@ -386,30 +386,46 @@ static int release(struct socket *sock)
  */
 static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
 {
+       struct sock *sk = sock->sk;
        struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
-       u32 portref = tipc_sk_port(sock->sk)->ref;
+       struct tipc_port *tport = tipc_sk_port(sock->sk);
+       int res = -EINVAL;
 
-       if (unlikely(!uaddr_len))
-               return tipc_withdraw(portref, 0, NULL);
+       lock_sock(sk);
+       if (unlikely(!uaddr_len)) {
+               res = tipc_withdraw(tport, 0, NULL);
+               goto exit;
+       }
 
-       if (uaddr_len < sizeof(struct sockaddr_tipc))
-               return -EINVAL;
-       if (addr->family != AF_TIPC)
-               return -EAFNOSUPPORT;
+       if (uaddr_len < sizeof(struct sockaddr_tipc)) {
+               res = -EINVAL;
+               goto exit;
+       }
+       if (addr->family != AF_TIPC) {
+               res = -EAFNOSUPPORT;
+               goto exit;
+       }
 
        if (addr->addrtype == TIPC_ADDR_NAME)
                addr->addr.nameseq.upper = addr->addr.nameseq.lower;
-       else if (addr->addrtype != TIPC_ADDR_NAMESEQ)
-               return -EAFNOSUPPORT;
+       else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
+               res = -EAFNOSUPPORT;
+               goto exit;
+       }
 
        if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
            (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
-           (addr->addr.nameseq.type != TIPC_CFG_SRV))
-               return -EACCES;
+           (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
+               res = -EACCES;
+               goto exit;
+       }
 
-       return (addr->scope > 0) ?
-               tipc_publish(portref, addr->scope, &addr->addr.nameseq) :
-               tipc_withdraw(portref, -addr->scope, &addr->addr.nameseq);
+       res = (addr->scope > 0) ?
+               tipc_publish(tport, addr->scope, &addr->addr.nameseq) :
+               tipc_withdraw(tport, -addr->scope, &addr->addr.nameseq);
+exit:
+       release_sock(sk);
+       return res;
 }
 
 /**
index a0ca162..a427623 100644 (file)
@@ -718,7 +718,9 @@ static int unix_autobind(struct socket *sock)
        int err;
        unsigned int retries = 0;
 
-       mutex_lock(&u->readlock);
+       err = mutex_lock_interruptible(&u->readlock);
+       if (err)
+               return err;
 
        err = 0;
        if (u->addr)
@@ -877,7 +879,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                goto out;
        addr_len = err;
 
-       mutex_lock(&u->readlock);
+       err = mutex_lock_interruptible(&u->readlock);
+       if (err)
+               goto out;
 
        err = -EINVAL;
        if (u->addr)
index a271c27..722da61 100644 (file)
@@ -124,6 +124,10 @@ int ieee80211_radiotap_iterator_init(
        /* find payload start allowing for extended bitmap(s) */
 
        if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) {
+               if ((unsigned long)iterator->_arg -
+                   (unsigned long)iterator->_rtheader + sizeof(uint32_t) >
+                   (unsigned long)iterator->_max_length)
+                       return -EINVAL;
                while (get_unaligned_le32(iterator->_arg) &
                                        (1 << IEEE80211_RADIOTAP_EXT)) {
                        iterator->_arg += sizeof(uint32_t);
index 65f8008..d3c5bd7 100644 (file)
@@ -632,6 +632,16 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
        }
 #endif
 
+       if (!bss && (status == WLAN_STATUS_SUCCESS)) {
+               WARN_ON_ONCE(!wiphy_to_dev(wdev->wiphy)->ops->connect);
+               bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
+                                      wdev->ssid, wdev->ssid_len,
+                                      WLAN_CAPABILITY_ESS,
+                                      WLAN_CAPABILITY_ESS);
+               if (bss)
+                       cfg80211_hold_bss(bss_from_pub(bss));
+       }
+
        if (wdev->current_bss) {
                cfg80211_unhold_bss(wdev->current_bss);
                cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
@@ -649,16 +659,8 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
                return;
        }
 
-       if (!bss) {
-               WARN_ON_ONCE(!wiphy_to_dev(wdev->wiphy)->ops->connect);
-               bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
-                                      wdev->ssid, wdev->ssid_len,
-                                      WLAN_CAPABILITY_ESS,
-                                      WLAN_CAPABILITY_ESS);
-               if (WARN_ON(!bss))
-                       return;
-               cfg80211_hold_bss(bss_from_pub(bss));
-       }
+       if (WARN_ON(!bss))
+               return;
 
        wdev->current_bss = bss_from_pub(bss);
 
index 32b10f5..2dcb377 100644 (file)
@@ -82,7 +82,9 @@ kallsyms()
                kallsymopt="${kallsymopt} --all-symbols"
        fi
 
-       kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
+       if [ -n "${CONFIG_ARM}" ] && [ -n "${CONFIG_PAGE_OFFSET}" ]; then
+               kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
+       fi
 
        local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL}               \
                      ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
index 419491d..6625699 100644 (file)
@@ -4334,8 +4334,10 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
                }
                err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER,
                                   PEER__RECV, &ad);
-               if (err)
+               if (err) {
                        selinux_netlbl_err(skb, err, 0);
+                       return err;
+               }
        }
 
        if (secmark_active) {
@@ -5586,11 +5588,11 @@ static int selinux_setprocattr(struct task_struct *p,
                /* Check for ptracing, and update the task SID if ok.
                   Otherwise, leave SID unchanged and fail. */
                ptsid = 0;
-               task_lock(p);
+               rcu_read_lock();
                tracer = ptrace_parent(p);
                if (tracer)
                        ptsid = task_sid(tracer);
-               task_unlock(p);
+               rcu_read_unlock();
 
                if (tracer) {
                        error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS,
index 6e03b46..a210467 100644 (file)
@@ -1937,6 +1937,8 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
                case SNDRV_PCM_STATE_DISCONNECTED:
                        err = -EBADFD;
                        goto _endloop;
+               case SNDRV_PCM_STATE_PAUSED:
+                       continue;
                }
                if (!tout) {
                        snd_printd("%s write error (DMA or IRQ trouble?)\n",
index 27aa140..956871d 100644 (file)
@@ -3433,6 +3433,10 @@ static void check_probe_mask(struct azx *chip, int dev)
  * white/black-list for enable_msi
  */
 static struct snd_pci_quirk msi_black_list[] = {
+       SND_PCI_QUIRK(0x103c, 0x2191, "HP", 0), /* AMD Hudson */
+       SND_PCI_QUIRK(0x103c, 0x2192, "HP", 0), /* AMD Hudson */
+       SND_PCI_QUIRK(0x103c, 0x21f7, "HP", 0), /* AMD Hudson */
+       SND_PCI_QUIRK(0x103c, 0x21fa, "HP", 0), /* AMD Hudson */
        SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
        SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
        SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
index 34de5dc..c564694 100644 (file)
@@ -4247,12 +4247,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0610, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0614, "Dell Inspiron 3135", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS),
        SND_PCI_QUIRK(0x1028, 0x061f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0629, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS),
+       SND_PCI_QUIRK(0x1028, 0x063e, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x063f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0640, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
index 8697ced..1ead3c9 100644 (file)
@@ -648,7 +648,7 @@ static int atmel_ssc_prepare(struct snd_pcm_substream *substream,
 
        dma_params = ssc_p->dma_params[dir];
 
-       ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_enable);
+       ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_disable);
        ssc_writel(ssc_p->ssc->regs, IDR, dma_params->mask->ssc_error);
 
        pr_debug("%s enabled SSC_SR=0x%08x\n",
@@ -657,6 +657,33 @@ static int atmel_ssc_prepare(struct snd_pcm_substream *substream,
        return 0;
 }
 
+static int atmel_ssc_trigger(struct snd_pcm_substream *substream,
+                            int cmd, struct snd_soc_dai *dai)
+{
+       struct atmel_ssc_info *ssc_p = &ssc_info[dai->id];
+       struct atmel_pcm_dma_params *dma_params;
+       int dir;
+
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+               dir = 0;
+       else
+               dir = 1;
+
+       dma_params = ssc_p->dma_params[dir];
+
+       switch (cmd) {
+       case SNDRV_PCM_TRIGGER_START:
+       case SNDRV_PCM_TRIGGER_RESUME:
+       case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+               ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_enable);
+               break;
+       default:
+               ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_disable);
+               break;
+       }
+
+       return 0;
+}
 
 #ifdef CONFIG_PM
 static int atmel_ssc_suspend(struct snd_soc_dai *cpu_dai)
@@ -731,6 +758,7 @@ static const struct snd_soc_dai_ops atmel_ssc_dai_ops = {
        .startup        = atmel_ssc_startup,
        .shutdown       = atmel_ssc_shutdown,
        .prepare        = atmel_ssc_prepare,
+       .trigger        = atmel_ssc_trigger,
        .hw_params      = atmel_ssc_hw_params,
        .set_fmt        = atmel_ssc_set_dai_fmt,
        .set_clkdiv     = atmel_ssc_set_dai_clkdiv,
index 1b37228..7d6a905 100644 (file)
@@ -109,7 +109,7 @@ static int sam9x5_wm8731_driver_probe(struct platform_device *pdev)
        dai->stream_name = "WM8731 PCM";
        dai->codec_dai_name = "wm8731-hifi";
        dai->init = sam9x5_wm8731_init;
-       dai->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
+       dai->dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF
                | SND_SOC_DAIFMT_CBM_CFM;
 
        ret = snd_soc_of_parse_card_name(card, "atmel,model");
index 99b359e..0ab2dc2 100644 (file)
@@ -1012,7 +1012,7 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
        { "AEC Loopback", "HPOUT3L", "OUT3L" },
        { "AEC Loopback", "HPOUT3R", "OUT3R" },
        { "HPOUT3L", NULL, "OUT3L" },
-       { "HPOUT3R", NULL, "OUT3L" },
+       { "HPOUT3R", NULL, "OUT3R" },
 
        { "AEC Loopback", "SPKOUTL", "OUT4L" },
        { "SPKOUTLN", NULL, "OUT4L" },
index 3938fb1..53bbfac 100644 (file)
@@ -1444,7 +1444,7 @@ static int wm8904_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 
        switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
        case SND_SOC_DAIFMT_DSP_B:
-               aif1 |= WM8904_AIF_LRCLK_INV;
+               aif1 |= 0x3 | WM8904_AIF_LRCLK_INV;
        case SND_SOC_DAIFMT_DSP_A:
                aif1 |= 0x3;
                break;
index 543c5c2..0f17ed3 100644 (file)
@@ -2439,7 +2439,20 @@ static void wm8962_configure_bclk(struct snd_soc_codec *codec)
        snd_soc_update_bits(codec, WM8962_CLOCKING_4,
                            WM8962_SYSCLK_RATE_MASK, clocking4);
 
+       /* DSPCLK_DIV can be only generated correctly after enabling SYSCLK.
+        * So we here provisionally enable it and then disable it afterward
+        * if current bias_level hasn't reached SND_SOC_BIAS_ON.
+        */
+       if (codec->dapm.bias_level != SND_SOC_BIAS_ON)
+               snd_soc_update_bits(codec, WM8962_CLOCKING2,
+                               WM8962_SYSCLK_ENA_MASK, WM8962_SYSCLK_ENA);
+
        dspclk = snd_soc_read(codec, WM8962_CLOCKING1);
+
+       if (codec->dapm.bias_level != SND_SOC_BIAS_ON)
+               snd_soc_update_bits(codec, WM8962_CLOCKING2,
+                               WM8962_SYSCLK_ENA_MASK, 0);
+
        if (dspclk < 0) {
                dev_err(codec->dev, "Failed to read DSPCLK: %d\n", dspclk);
                return;
index 46ec0e9..4fbcab6 100644 (file)
@@ -1474,13 +1474,17 @@ static int wm_adsp2_ena(struct wm_adsp *dsp)
                return ret;
 
        /* Wait for the RAM to start, should be near instantaneous */
-       count = 0;
-       do {
+       for (count = 0; count < 10; ++count) {
                ret = regmap_read(dsp->regmap, dsp->base + ADSP2_STATUS1,
                                  &val);
                if (ret != 0)
                        return ret;
-       } while (!(val & ADSP2_RAM_RDY) && ++count < 10);
+
+               if (val & ADSP2_RAM_RDY)
+                       break;
+
+               msleep(1);
+       }
 
        if (!(val & ADSP2_RAM_RDY)) {
                adsp_err(dsp, "Failed to start DSP RAM\n");
index 61e4885..3fd76bc 100644 (file)
@@ -130,8 +130,6 @@ static int imx_wm8962_set_bias_level(struct snd_soc_card *card,
                break;
        }
 
-       dapm->bias_level = level;
-
        return 0;
 }
 
index 0b18f65..3920a5e 100644 (file)
@@ -473,17 +473,17 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk[2] = {
        .playback = {
                .channels_min = 1,
                .channels_max = 2,
-               .rates = SNDRV_PCM_RATE_8000_192000 |
-                        SNDRV_PCM_RATE_CONTINUOUS |
-                        SNDRV_PCM_RATE_KNOT,
+               .rates = SNDRV_PCM_RATE_CONTINUOUS,
+               .rate_min = 5512,
+               .rate_max = 192000,
                .formats = KIRKWOOD_I2S_FORMATS,
        },
        .capture = {
                .channels_min = 1,
                .channels_max = 2,
-               .rates = SNDRV_PCM_RATE_8000_192000 |
-                        SNDRV_PCM_RATE_CONTINUOUS |
-                        SNDRV_PCM_RATE_KNOT,
+               .rates = SNDRV_PCM_RATE_CONTINUOUS,
+               .rate_min = 5512,
+               .rate_max = 192000,
                .formats = KIRKWOOD_I2S_FORMATS,
        },
        .ops = &kirkwood_i2s_dai_ops,
@@ -494,17 +494,17 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk[2] = {
        .playback = {
                .channels_min = 1,
                .channels_max = 2,
-               .rates = SNDRV_PCM_RATE_8000_192000 |
-                        SNDRV_PCM_RATE_CONTINUOUS |
-                        SNDRV_PCM_RATE_KNOT,
+               .rates = SNDRV_PCM_RATE_CONTINUOUS,
+               .rate_min = 5512,
+               .rate_max = 192000,
                .formats = KIRKWOOD_SPDIF_FORMATS,
        },
        .capture = {
                .channels_min = 1,
                .channels_max = 2,
-               .rates = SNDRV_PCM_RATE_8000_192000 |
-                        SNDRV_PCM_RATE_CONTINUOUS |
-                        SNDRV_PCM_RATE_KNOT,
+               .rates = SNDRV_PCM_RATE_CONTINUOUS,
+               .rate_min = 5512,
+               .rate_max = 192000,
                .formats = KIRKWOOD_SPDIF_FORMATS,
        },
        .ops = &kirkwood_i2s_dai_ops,
index cbc9c96..41949af 100644 (file)
@@ -305,6 +305,20 @@ static void dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm,
        }
 }
 
+static void dmaengine_pcm_release_chan(struct dmaengine_pcm *pcm)
+{
+       unsigned int i;
+
+       for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE;
+            i++) {
+               if (!pcm->chan[i])
+                       continue;
+               dma_release_channel(pcm->chan[i]);
+               if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
+                       break;
+       }
+}
+
 /**
  * snd_dmaengine_pcm_register - Register a dmaengine based PCM device
  * @dev: The parent device for the PCM device
@@ -315,6 +329,7 @@ int snd_dmaengine_pcm_register(struct device *dev,
        const struct snd_dmaengine_pcm_config *config, unsigned int flags)
 {
        struct dmaengine_pcm *pcm;
+       int ret;
 
        pcm = kzalloc(sizeof(*pcm), GFP_KERNEL);
        if (!pcm)
@@ -326,11 +341,20 @@ int snd_dmaengine_pcm_register(struct device *dev,
        dmaengine_pcm_request_chan_of(pcm, dev);
 
        if (flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
-               return snd_soc_add_platform(dev, &pcm->platform,
+               ret = snd_soc_add_platform(dev, &pcm->platform,
                                &dmaengine_no_residue_pcm_platform);
        else
-               return snd_soc_add_platform(dev, &pcm->platform,
+               ret = snd_soc_add_platform(dev, &pcm->platform,
                                &dmaengine_pcm_platform);
+       if (ret)
+               goto err_free_dma;
+
+       return 0;
+
+err_free_dma:
+       dmaengine_pcm_release_chan(pcm);
+       kfree(pcm);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_register);
 
@@ -345,7 +369,6 @@ void snd_dmaengine_pcm_unregister(struct device *dev)
 {
        struct snd_soc_platform *platform;
        struct dmaengine_pcm *pcm;
-       unsigned int i;
 
        platform = snd_soc_lookup_platform(dev);
        if (!platform)
@@ -353,15 +376,8 @@ void snd_dmaengine_pcm_unregister(struct device *dev)
 
        pcm = soc_platform_to_pcm(platform);
 
-       for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE; i++) {
-               if (pcm->chan[i]) {
-                       dma_release_channel(pcm->chan[i]);
-                       if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
-                               break;
-               }
-       }
-
        snd_soc_remove_platform(platform);
+       dmaengine_pcm_release_chan(pcm);
        kfree(pcm);
 }
 EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_unregister);
index 11a90cd..891b9a9 100644 (file)
@@ -600,12 +600,13 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
        struct snd_soc_platform *platform = rtd->platform;
        struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
        struct snd_soc_dai *codec_dai = rtd->codec_dai;
-       struct snd_soc_codec *codec = rtd->codec;
+       bool playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
 
        mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
 
        /* apply codec digital mute */
-       if (!codec->active)
+       if ((playback && codec_dai->playback_active == 1) ||
+           (!playback && codec_dai->capture_active == 1))
                snd_soc_dai_digital_mute(codec_dai, 1, substream->stream);
 
        /* free any machine hw params */
index 364bf6a..8c819f8 100644 (file)
@@ -74,7 +74,7 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai,
                                unsigned int fmt)
 {
        struct tegra20_i2s *i2s = snd_soc_dai_get_drvdata(dai);
-       unsigned int mask, val;
+       unsigned int mask = 0, val = 0;
 
        switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
        case SND_SOC_DAIFMT_NB_NF:
@@ -83,10 +83,10 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai,
                return -EINVAL;
        }
 
-       mask = TEGRA20_I2S_CTRL_MASTER_ENABLE;
+       mask |= TEGRA20_I2S_CTRL_MASTER_ENABLE;
        switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
        case SND_SOC_DAIFMT_CBS_CFS:
-               val = TEGRA20_I2S_CTRL_MASTER_ENABLE;
+               val |= TEGRA20_I2S_CTRL_MASTER_ENABLE;
                break;
        case SND_SOC_DAIFMT_CBM_CFM:
                break;
index 08bc693..8c7c102 100644 (file)
@@ -67,15 +67,15 @@ static int tegra20_spdif_hw_params(struct snd_pcm_substream *substream,
 {
        struct device *dev = dai->dev;
        struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(dai);
-       unsigned int mask, val;
+       unsigned int mask = 0, val = 0;
        int ret, spdifclock;
 
-       mask = TEGRA20_SPDIF_CTRL_PACK |
-              TEGRA20_SPDIF_CTRL_BIT_MODE_MASK;
+       mask |= TEGRA20_SPDIF_CTRL_PACK |
+               TEGRA20_SPDIF_CTRL_BIT_MODE_MASK;
        switch (params_format(params)) {
        case SNDRV_PCM_FORMAT_S16_LE:
-               val = TEGRA20_SPDIF_CTRL_PACK |
-                     TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT;
+               val |= TEGRA20_SPDIF_CTRL_PACK |
+                      TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT;
                break;
        default:
                return -EINVAL;
index 231a785..02247fe 100644 (file)
@@ -118,7 +118,7 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
                                unsigned int fmt)
 {
        struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
-       unsigned int mask, val;
+       unsigned int mask = 0, val = 0;
 
        switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
        case SND_SOC_DAIFMT_NB_NF:
@@ -127,10 +127,10 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
                return -EINVAL;
        }
 
-       mask = TEGRA30_I2S_CTRL_MASTER_ENABLE;
+       mask |= TEGRA30_I2S_CTRL_MASTER_ENABLE;
        switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
        case SND_SOC_DAIFMT_CBS_CFS:
-               val = TEGRA30_I2S_CTRL_MASTER_ENABLE;
+               val |= TEGRA30_I2S_CTRL_MASTER_ENABLE;
                break;
        case SND_SOC_DAIFMT_CBM_CFM:
                break;
index dc4de37..bcf1d2f 100644 (file)
@@ -18,9 +18,9 @@
 #include "helpers/bitmask.h"
 
 static struct option set_opts[] = {
-       { .name = "perf-bias",  .has_arg = optional_argument,   .flag = NULL,   .val = 'b'},
-       { .name = "sched-mc",   .has_arg = optional_argument,   .flag = NULL,   .val = 'm'},
-       { .name = "sched-smt",  .has_arg = optional_argument,   .flag = NULL,   .val = 's'},
+       { .name = "perf-bias",  .has_arg = required_argument,   .flag = NULL,   .val = 'b'},
+       { .name = "sched-mc",   .has_arg = required_argument,   .flag = NULL,   .val = 'm'},
+       { .name = "sched-smt",  .has_arg = required_argument,   .flag = NULL,   .val = 's'},
        { },
 };