Merge tag 'usb-3.17-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 29 Aug 2014 19:10:03 +0000 (12:10 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 29 Aug 2014 19:10:03 +0000 (12:10 -0700)
Pull USB fixes from Greg KH:
 "Here are a bunch of fixes for the USB drivers for 3.17-rc3.

  Also in here is the movement of the usbip driver out of staging, into
  the "real" part of the kernel, it had to wait until after -rc1 to
  handle the merge issues involved between the USB and staging trees.
  The code is identical, just file movements there.

  The USB fixes are all over the place, new device ids, xhci fixes for
  reported issues and the usual gadget driver fixes as well.  All have
  been in linux-next for a while now"

* tag 'usb-3.17-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb: (46 commits)
  USB: fix build error with CONFIG_PM_RUNTIME disabled
  Revert "usb: ehci/ohci-exynos: Fix PHY getting sequence"
  xhci: Disable streams on Via XHCI with device-id 0x3432
  USB: serial: fix potential heap buffer overflow
  USB: serial: fix potential stack buffer overflow
  usb: ehci/ohci-exynos: Fix PHY getting sequence
  usb: hub: Prevent hub autosuspend if usbcore.autosuspend is -1
  USB: sisusb: add device id for Magic Control USB video
  usb: dwc2: gadget: Set the default EP max packet value as 8 bytes
  usb: ehci: using wIndex + 1 for hub port
  USB: storage: add quirk for Newer Technology uSCSI SCSI-USB converter
  MAINTAINERS: Add an entry for USB/IP driver
  usbip: remove struct usb_device_id table
  usbip: move usbip kernel code out of staging
  usbip: move usbip userspace code out of staging
  USB: whiteheat: Added bounds checking for bulk command response
  usb: gadget: remove $(PWD) in ccflags-y
  usb: pch_udc: usb gadget device support for Intel Quark X1000
  usb: gadget: uvc: fix possible lockup in uvc gadget
  usb: wusbcore: fix below build warning
  ...

337 files changed:
Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
Documentation/devicetree/bindings/mfd/tc3589x.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pci/designware-pcie.txt
Documentation/devicetree/bindings/pci/ti-pci.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt
Documentation/dma-buf-sharing.txt
Documentation/this_cpu_ops.txt
Documentation/x86/tlb.txt
MAINTAINERS
Makefile
arch/alpha/include/asm/io.h
arch/alpha/include/asm/unistd.h
arch/alpha/include/uapi/asm/unistd.h
arch/alpha/kernel/systbls.S
arch/arm/boot/dts/exynos4412-odroid-common.dtsi
arch/arm/boot/dts/imx53.dtsi
arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts
arch/arm/boot/dts/imx6sx-pinfunc.h
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/r8a7791-koelsch.dts
arch/arm/boot/dts/rk3066a-bqcurie2.dts
arch/arm/boot/dts/rk3188-radxarock.dts
arch/arm/boot/dts/sun6i-a31.dtsi
arch/arm/boot/dts/tegra30-apalis.dtsi
arch/arm/boot/dts/tegra30-colibri.dtsi
arch/arm/boot/dts/vf610-twr.dts
arch/arm/mach-imx/Kconfig
arch/arm/mach-imx/Makefile
arch/arm/mach-imx/clk-imx6q.c
arch/arm/mach-imx/suspend-imx6.S
arch/arm/mach-shmobile/Kconfig
arch/arm64/Makefile
arch/arm64/configs/defconfig
arch/arm64/include/asm/sparsemem.h
arch/arm64/include/asm/unistd.h
arch/arm64/include/asm/unistd32.h
arch/arm64/kernel/cpuinfo.c
arch/arm64/kernel/efi.c
arch/arm64/kernel/head.S
arch/arm64/kernel/ptrace.c
arch/arm64/mm/init.c
arch/frv/include/asm/processor.h
arch/ia64/include/asm/unistd.h
arch/ia64/include/uapi/asm/unistd.h
arch/ia64/kernel/entry.S
arch/microblaze/include/uapi/asm/unistd.h
arch/microblaze/kernel/syscall_table.S
arch/mips/alchemy/devboards/db1200.c
arch/mips/bcm47xx/setup.c
arch/mips/cavium-octeon/setup.c
arch/mips/include/asm/eva.h [new file with mode: 0644]
arch/mips/include/asm/gic.h
arch/mips/include/asm/irq.h
arch/mips/include/asm/mach-malta/kernel-entry-init.h
arch/mips/include/asm/mach-netlogic/topology.h
arch/mips/include/asm/pgtable.h
arch/mips/include/asm/syscall.h
arch/mips/kernel/cps-vec.S
arch/mips/kernel/perf_event_mipsxx.c
arch/mips/kernel/scall64-o32.S
arch/mips/loongson/loongson-3/cop2-ex.c
arch/mips/loongson/loongson-3/numa.c
arch/mips/mm/cache.c
arch/mips/mti-malta/malta-memory.c
arch/mips/pmcs-msp71xx/msp_irq.c
arch/powerpc/kvm/book3s_hv_builtin.c
arch/s390/include/uapi/asm/unistd.h
arch/s390/kernel/compat_wrapper.c
arch/s390/kernel/ipl.c
arch/s390/kernel/setup.c
arch/s390/kernel/syscalls.S
arch/sh/Kconfig
arch/x86/include/asm/kvm_host.h
arch/x86/kernel/entry_32.S
arch/x86/kvm/emulate.c
arch/x86/mm/tlb.c
block/bio-integrity.c
block/blk-core.c
block/blk-mq.c
block/cfq-iosched.c
block/scsi_ioctl.c
drivers/ata/ahci_tegra.c
drivers/ata/ahci_xgene.c
drivers/ata/libata-core.c
drivers/ata/pata_samsung_cf.c
drivers/ata/pata_scc.c
drivers/block/brd.c
drivers/block/xsysace.c
drivers/bus/arm-ccn.c
drivers/dma-buf/fence.c
drivers/firmware/efi/vars.c
drivers/gpio/devres.c
drivers/gpio/gpio-lynxpoint.c
drivers/gpio/gpio-zynq.c
drivers/gpio/gpiolib-of.c
drivers/gpu/drm/ast/ast_tables.h
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_fbdev.c
drivers/gpu/drm/msm/msm_iommu.c
drivers/gpu/drm/radeon/Makefile
drivers/gpu/drm/radeon/ci_dpm.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/cik_sdma.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_dma.c
drivers/gpu/drm/radeon/kv_dpm.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r200.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r420.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_dma.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_ib.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/radeon_semaphore.c
drivers/gpu/drm/radeon/radeon_test.c
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/radeon/radeon_vce.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/radeon/rv770_dma.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/si_dma.c
drivers/gpu/drm/radeon/trinity_dpm.c
drivers/gpu/drm/radeon/uvd_v1_0.c
drivers/gpu/drm/sti/Kconfig
drivers/gpu/drm/sti/sti_drm_drv.c
drivers/gpu/drm/sti/sti_hda.c
drivers/gpu/drm/sti/sti_hdmi.c
drivers/gpu/drm/sti/sti_tvout.c
drivers/hid/hid-cherry.c
drivers/hid/hid-huion.c
drivers/hid/hid-kye.c
drivers/hid/hid-lg.c
drivers/hid/hid-lg4ff.c
drivers/hid/hid-logitech-dj.c
drivers/hid/hid-logitech-dj.h
drivers/hid/hid-magicmouse.c
drivers/hid/hid-monterey.c
drivers/hid/hid-petalynx.c
drivers/hid/hid-picolcd_core.c
drivers/hid/hid-rmi.c
drivers/hid/hid-sensor-hub.c
drivers/hid/hid-sunplus.c
drivers/i2c/Kconfig
drivers/i2c/Makefile
drivers/i2c/busses/i2c-i801.c
drivers/i2c/i2c-acpi.c
drivers/iommu/amd_iommu.c
drivers/iommu/intel-iommu.c
drivers/iommu/iommu.c
drivers/isdn/hardware/eicon/xdi_msg.h
drivers/md/dm-crypt.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/mfd/ab8500-core.c
drivers/mfd/htc-i2cpld.c
drivers/mfd/omap-usb-host.c
drivers/mfd/twl4030-power.c
drivers/misc/mei/client.c
drivers/misc/mei/nfc.c
drivers/net/can/c_can/c_can_platform.c
drivers/net/can/flexcan.c
drivers/net/can/sja1000/sja1000.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fec_ptp.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/intel/i40e/i40e_ptp.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
drivers/net/macvlan.c
drivers/net/phy/bcm7xxx.c
drivers/net/phy/smsc.c
drivers/of/fdt.c
drivers/of/irq.c
drivers/of/selftest.c
drivers/pci/host/Kconfig
drivers/pci/host/Makefile
drivers/pci/host/pci-dra7xx.c [new file with mode: 0644]
drivers/pci/host/pci-tegra.c
drivers/pci/host/pcie-designware.c
drivers/pci/host/pcie-designware.h
drivers/pinctrl/nomadik/pinctrl-abx500.c
drivers/pinctrl/pinctrl-at91.c
drivers/pinctrl/pinctrl-rockchip.c
drivers/pinctrl/pinctrl-tegra-xusb.c
drivers/pinctrl/samsung/pinctrl-exynos.c
drivers/pinctrl/samsung/pinctrl-samsung.h
drivers/pinctrl/sh-pfc/pfc-r8a7791.c
drivers/platform/x86/toshiba_acpi.c
drivers/pwm/core.c
drivers/s390/char/con3215.c
drivers/s390/char/sclp_tty.c
drivers/scsi/scsi.c
drivers/scsi/scsi_lib.c
drivers/sh/Makefile
drivers/sh/intc/Kconfig
drivers/staging/android/logger.c
drivers/staging/et131x/et131x.c
drivers/staging/lustre/lustre/libcfs/workitem.c
drivers/staging/lustre/lustre/obdclass/class_obd.c
drivers/staging/rtl8188eu/os_dep/usb_intf.c
drivers/thunderbolt/path.c
drivers/video/backlight/pwm_bl.c
drivers/video/fbdev/amba-clcd.c
drivers/video/fbdev/atmel_lcdfb.c
drivers/video/fbdev/chipsfb.c
drivers/video/fbdev/da8xx-fb.c
drivers/video/of_display_timing.c
fs/aio.c
fs/btrfs/async-thread.c
fs/btrfs/async-thread.h
fs/btrfs/delayed-inode.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/ordered-data.c
fs/btrfs/qgroup.c
fs/btrfs/raid56.c
fs/btrfs/reada.c
fs/btrfs/scrub.c
fs/btrfs/sysfs.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/cifspdu.h
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/misc.c
fs/cifs/readdir.c
fs/cifs/smb1ops.c
fs/cifs/smb2maperror.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2pdu.h
fs/cifs/smbfsctl.h
fs/ext3/super.c
fs/ext4/ext4.h
fs/ext4/extents.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/namei.c
fs/ext4/super.c
fs/isofs/inode.c
fs/isofs/isofs.h
fs/isofs/rock.c
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/jbd2/recovery.c
fs/jbd2/revoke.c
fs/nfs/pagelist.c
fs/nfs/write.c
fs/udf/namei.c
include/drm/drm_pciids.h
include/linux/blk-mq.h
include/linux/brcmphy.h
include/linux/ftrace.h
include/linux/gpio/consumer.h
include/linux/i2c.h
include/linux/jbd2.h
include/linux/nfs_page.h
include/linux/seqno-fence.h
include/uapi/asm-generic/unistd.h
include/uapi/drm/radeon_drm.h
kernel/events/core.c
kernel/kprobes.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
lib/Kconfig.debug
net/atm/lec.c
net/batman-adv/fragmentation.c
net/ipv6/ip6_fib.c
net/mac80211/chan.c
net/openvswitch/actions.c
net/packet/af_packet.c
net/packet/internal.h
net/sched/sch_cbq.c
net/sctp/associola.c
net/tipc/port.h
net/tipc/socket.c
scripts/kernel-doc
security/tomoyo/realpath.c
sound/core/info.c
sound/core/pcm_misc.c
sound/pci/ctxfi/ct20k1reg.h
sound/pci/hda/ca0132_regs.h
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/arizona.c
sound/soc/codecs/pcm512x.c
sound/soc/davinci/davinci-mcasp.c
sound/soc/fsl/Kconfig
sound/soc/fsl/fsl_esai.c
sound/soc/intel/sst-acpi.c
sound/soc/intel/sst-baytrail-ipc.c
sound/soc/intel/sst-baytrail-ipc.h
sound/soc/intel/sst-baytrail-pcm.c
sound/soc/pxa/pxa-ssp.c
sound/soc/soc-dapm.c
virt/kvm/assigned-dev.c
virt/kvm/iommu.c

index 1486497..ce6a1a0 100644 (file)
@@ -4,11 +4,13 @@ Specifying interrupt information for devices
 1) Interrupt client nodes
 -------------------------
 
-Nodes that describe devices which generate interrupts must contain an either an
-"interrupts" property or an "interrupts-extended" property. These properties
-contain a list of interrupt specifiers, one per output interrupt. The format of
-the interrupt specifier is determined by the interrupt controller to which the
-interrupts are routed; see section 2 below for details.
+Nodes that describe devices which generate interrupts must contain an
+"interrupts" property, an "interrupts-extended" property, or both. If both are
+present, the latter should take precedence; the former may be provided simply
+for compatibility with software that does not recognize the latter. These
+properties contain a list of interrupt specifiers, one per output interrupt. The
+format of the interrupt specifier is determined by the interrupt controller to
+which the interrupts are routed; see section 2 below for details.
 
   Example:
        interrupt-parent = <&intc1>;
diff --git a/Documentation/devicetree/bindings/mfd/tc3589x.txt b/Documentation/devicetree/bindings/mfd/tc3589x.txt
new file mode 100644 (file)
index 0000000..6fcedba
--- /dev/null
@@ -0,0 +1,107 @@
+* Toshiba TC3589x multi-purpose expander
+
+The Toshiba TC3589x series are I2C-based MFD devices which may expose the
+following built-in devices: gpio, keypad, rotator (vibrator), PWM (for
+e.g. LEDs or vibrators) The included models are:
+
+- TC35890
+- TC35892
+- TC35893
+- TC35894
+- TC35895
+- TC35896
+
+Required properties:
+ - compatible : must be "toshiba,tc35890", "toshiba,tc35892", "toshiba,tc35893",
+   "toshiba,tc35894", "toshiba,tc35895" or "toshiba,tc35896"
+ - reg : I2C address of the device
+ - interrupt-parent : specifies which IRQ controller we're connected to
+ - interrupts : the interrupt on the parent the controller is connected to
+ - interrupt-controller : marks the device node as an interrupt controller
+ - #interrupt-cells : should be <1>, the first cell is the IRQ offset on this
+   TC3589x interrupt controller.
+
+Optional nodes:
+
+- GPIO
+  This GPIO module inside the TC3589x has 24 (TC35890, TC35892) or 20
+  (other models) GPIO lines.
+ - compatible : must be "toshiba,tc3589x-gpio"
+ - interrupts : interrupt on the parent, which must be the tc3589x MFD device
+ - interrupt-controller : marks the device node as an interrupt controller
+ - #interrupt-cells : should be <2>, the first cell is the IRQ offset on this
+   TC3589x GPIO interrupt controller, the second cell is the interrupt flags
+   in accordance with <dt-bindings/interrupt-controller/irq.h>. The following
+   flags are valid:
+   - IRQ_TYPE_LEVEL_LOW
+   - IRQ_TYPE_LEVEL_HIGH
+   - IRQ_TYPE_EDGE_RISING
+   - IRQ_TYPE_EDGE_FALLING
+   - IRQ_TYPE_EDGE_BOTH
+ - gpio-controller : marks the device node as a GPIO controller
+ - #gpio-cells : should be <2>, the first cell is the GPIO offset on this
+   GPIO controller, the second cell is the flags.
+
+- Keypad
+  This keypad is the same on all variants, supporting up to 96 different
+  keys. The linux-specific properties are modeled on those already existing
+  in other input drivers.
+ - compatible : must be "toshiba,tc3589x-keypad"
+ - debounce-delay-ms : debounce interval in milliseconds
+ - keypad,num-rows : number of rows in the matrix, see
+   bindings/input/matrix-keymap.txt
+ - keypad,num-columns : number of columns in the matrix, see
+   bindings/input/matrix-keymap.txt
+ - linux,keymap: the definition can be found in
+   bindings/input/matrix-keymap.txt
+ - linux,no-autorepeat: do no enable autorepeat feature.
+ - linux,wakeup: use any event on keypad as wakeup event.
+
+Example:
+
+tc35893@44 {
+       compatible = "toshiba,tc35893";
+       reg = <0x44>;
+       interrupt-parent = <&gpio6>;
+       interrupts = <26 IRQ_TYPE_EDGE_RISING>;
+
+       interrupt-controller;
+       #interrupt-cells = <1>;
+
+       tc3589x_gpio {
+               compatible = "toshiba,tc3589x-gpio";
+               interrupts = <0>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+               gpio-controller;
+               #gpio-cells = <2>;
+       };
+       tc3589x_keypad {
+               compatible = "toshiba,tc3589x-keypad";
+               interrupts = <6>;
+               debounce-delay-ms = <4>;
+               keypad,num-columns = <8>;
+               keypad,num-rows = <8>;
+               linux,no-autorepeat;
+               linux,wakeup;
+               linux,keymap = <0x0301006b
+                               0x04010066
+                               0x06040072
+                               0x040200d7
+                               0x0303006a
+                               0x0205000e
+                               0x0607008b
+                               0x0500001c
+                               0x0403000b
+                               0x03040034
+                               0x05020067
+                               0x0305006c
+                               0x040500e7
+                               0x0005009e
+                               0x06020073
+                               0x01030039
+                               0x07060069
+                               0x050500d9>;
+       };
+};
index d0d15ee..ed0d9b9 100644 (file)
@@ -2,6 +2,10 @@
 
 Required properties:
 - compatible: should contain "snps,dw-pcie" to identify the core.
+- reg: Should contain the configuration address space.
+- reg-names: Must be "config" for the PCIe configuration space.
+    (The old way of getting the configuration address space from "ranges"
+    is deprecated and should be avoided.)
 - #address-cells: set to <3>
 - #size-cells: set to <2>
 - device_type: set to "pci"
diff --git a/Documentation/devicetree/bindings/pci/ti-pci.txt b/Documentation/devicetree/bindings/pci/ti-pci.txt
new file mode 100644 (file)
index 0000000..3d21791
--- /dev/null
@@ -0,0 +1,59 @@
+TI PCI Controllers
+
+PCIe Designware Controller
+ - compatible: Should be "ti,dra7-pcie""
+ - reg : Two register ranges as listed in the reg-names property
+ - reg-names : The first entry must be "ti-conf" for the TI specific registers
+              The second entry must be "rc-dbics" for the designware pcie
+              registers
+              The third entry must be "config" for the PCIe configuration space
+ - phys : list of PHY specifiers (used by generic PHY framework)
+ - phy-names : must be "pcie-phy0", "pcie-phy1", "pcie-phyN".. based on the
+              number of PHYs as specified in *phys* property.
+ - ti,hwmods : Name of the hwmod associated to the pcie, "pcie<X>",
+              where <X> is the instance number of the pcie from the HW spec.
+ - interrupts : Two interrupt entries must be specified. The first one is for
+               main interrupt line and the second for MSI interrupt line.
+ - #address-cells,
+   #size-cells,
+   #interrupt-cells,
+   device_type,
+   ranges,
+   num-lanes,
+   interrupt-map-mask,
+   interrupt-map : as specified in ../designware-pcie.txt
+
+Example:
+axi {
+       compatible = "simple-bus";
+       #size-cells = <1>;
+       #address-cells = <1>;
+       ranges = <0x51000000 0x51000000 0x3000
+                 0x0        0x20000000 0x10000000>;
+       pcie@51000000 {
+               compatible = "ti,dra7-pcie";
+               reg = <0x51000000 0x2000>, <0x51002000 0x14c>, <0x1000 0x2000>;
+               reg-names = "rc_dbics", "ti_conf", "config";
+               interrupts = <0 232 0x4>, <0 233 0x4>;
+               #address-cells = <3>;
+               #size-cells = <2>;
+               device_type = "pci";
+               ranges = <0x81000000 0 0          0x03000 0 0x00010000
+                         0x82000000 0 0x20013000 0x13000 0 0xffed000>;
+               #interrupt-cells = <1>;
+               num-lanes = <1>;
+               ti,hwmods = "pcie1";
+               phys = <&pcie1_phy>;
+               phy-names = "pcie-phy0";
+               interrupt-map-mask = <0 0 0 7>;
+               interrupt-map = <0 0 0 1 &pcie_intc 1>,
+                               <0 0 0 2 &pcie_intc 2>,
+                               <0 0 0 3 &pcie_intc 3>,
+                               <0 0 0 4 &pcie_intc 4>;
+               pcie_intc: interrupt-controller {
+                       interrupt-controller;
+                       #address-cells = <0>;
+                       #interrupt-cells = <1>;
+               };
+       };
+};
index 0211c6d..92fae82 100644 (file)
@@ -62,7 +62,7 @@ Example:
                #gpio-cells = <2>;
                interrupt-controller;
                #interrupt-cells = <2>;
-               interrupts = <0 32 0x4>;
+               interrupts = <0 16 0x4>;
 
                pinctrl-names = "default";
                pinctrl-0 = <&gsbi5_uart_default>;
index 67a4087..bb9753b 100644 (file)
@@ -56,10 +56,10 @@ The dma_buf buffer sharing API usage contains the following steps:
                                     size_t size, int flags,
                                     const char *exp_name)
 
-   If this succeeds, dma_buf_export allocates a dma_buf structure, and returns a
-   pointer to the same. It also associates an anonymous file with this buffer,
-   so it can be exported. On failure to allocate the dma_buf object, it returns
-   NULL.
+   If this succeeds, dma_buf_export_named allocates a dma_buf structure, and
+   returns a pointer to the same. It also associates an anonymous file with this
+   buffer, so it can be exported. On failure to allocate the dma_buf object,
+   it returns NULL.
 
    'exp_name' is the name of exporter - to facilitate information while
    debugging.
@@ -76,7 +76,7 @@ The dma_buf buffer sharing API usage contains the following steps:
    drivers and/or processes.
 
    Interface:
-      int dma_buf_fd(struct dma_buf *dmabuf)
+      int dma_buf_fd(struct dma_buf *dmabuf, int flags)
 
    This API installs an fd for the anonymous file associated with this buffer;
    returns either 'fd', or error.
@@ -157,7 +157,9 @@ to request use of buffer for allocation.
    "dma_buf->ops->" indirection from the users of this interface.
 
    In struct dma_buf_ops, unmap_dma_buf is defined as
-      void (*unmap_dma_buf)(struct dma_buf_attachment *, struct sg_table *);
+      void (*unmap_dma_buf)(struct dma_buf_attachment *,
+                            struct sg_table *,
+                            enum dma_data_direction);
 
    unmap_dma_buf signifies the end-of-DMA for the attachment provided. Like
    map_dma_buf, this API also must be implemented by the exporter.
index 1a4ce7e..0ec9957 100644 (file)
@@ -2,26 +2,26 @@ this_cpu operations
 -------------------
 
 this_cpu operations are a way of optimizing access to per cpu
-variables associated with the *currently* executing processor through
-the use of segment registers (or a dedicated register where the cpu
-permanently stored the beginning of the per cpu area for a specific
-processor).
+variables associated with the *currently* executing processor. This is
+done through the use of segment registers (or a dedicated register where
+the cpu permanently stored the beginning of the per cpu        area for a
+specific processor).
 
-The this_cpu operations add a per cpu variable offset to the processor
-specific percpu base and encode that operation in the instruction
+this_cpu operations add a per cpu variable offset to the processor
+specific per cpu base and encode that operation in the instruction
 operating on the per cpu variable.
 
-This means there are no atomicity issues between the calculation of
+This means that there are no atomicity issues between the calculation of
 the offset and the operation on the data. Therefore it is not
-necessary to disable preempt or interrupts to ensure that the
+necessary to disable preemption or interrupts to ensure that the
 processor is not changed between the calculation of the address and
 the operation on the data.
 
 Read-modify-write operations are of particular interest. Frequently
 processors have special lower latency instructions that can operate
-without the typical synchronization overhead but still provide some
-sort of relaxed atomicity guarantee. The x86 for example can execute
-RMV (Read Modify Write) instructions like inc/dec/cmpxchg without the
+without the typical synchronization overhead, but still provide some
+sort of relaxed atomicity guarantees. The x86, for example, can execute
+RMW (Read Modify Write) instructions like inc/dec/cmpxchg without the
 lock prefix and the associated latency penalty.
 
 Access to the variable without the lock prefix is not synchronized but
@@ -30,6 +30,38 @@ data specific to the currently executing processor. Only the current
 processor should be accessing that variable and therefore there are no
 concurrency issues with other processors in the system.
 
+Please note that accesses by remote processors to a per cpu area are
+exceptional situations and may impact performance and/or correctness
+(remote write operations) of local RMW operations via this_cpu_*.
+
+The main use of the this_cpu operations has been to optimize counter
+operations.
+
+The following this_cpu() operations with implied preemption protection
+are defined. These operations can be used without worrying about
+preemption and interrupts.
+
+       this_cpu_add()
+       this_cpu_read(pcp)
+       this_cpu_write(pcp, val)
+       this_cpu_add(pcp, val)
+       this_cpu_and(pcp, val)
+       this_cpu_or(pcp, val)
+       this_cpu_add_return(pcp, val)
+       this_cpu_xchg(pcp, nval)
+       this_cpu_cmpxchg(pcp, oval, nval)
+       this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+       this_cpu_sub(pcp, val)
+       this_cpu_inc(pcp)
+       this_cpu_dec(pcp)
+       this_cpu_sub_return(pcp, val)
+       this_cpu_inc_return(pcp)
+       this_cpu_dec_return(pcp)
+
+
+Inner working of this_cpu operations
+------------------------------------
+
 On x86 the fs: or the gs: segment registers contain the base of the
 per cpu area. It is then possible to simply use the segment override
 to relocate a per cpu relative address to the proper per cpu area for
@@ -48,22 +80,21 @@ results in a single instruction
        mov ax, gs:[x]
 
 instead of a sequence of calculation of the address and then a fetch
-from that address which occurs with the percpu operations. Before
+from that address which occurs with the per cpu operations. Before
 this_cpu_ops such sequence also required preempt disable/enable to
 prevent the kernel from moving the thread to a different processor
 while the calculation is performed.
 
-The main use of the this_cpu operations has been to optimize counter
-operations.
+Consider the following this_cpu operation:
 
        this_cpu_inc(x)
 
-results in the following single instruction (no lock prefix!)
+The above results in the following single instruction (no lock prefix!)
 
        inc gs:[x]
 
 instead of the following operations required if there is no segment
-register.
+register:
 
        int *y;
        int cpu;
@@ -73,10 +104,10 @@ register.
        (*y)++;
        put_cpu();
 
-Note that these operations can only be used on percpu data that is
+Note that these operations can only be used on per cpu data that is
 reserved for a specific processor. Without disabling preemption in the
 surrounding code this_cpu_inc() will only guarantee that one of the
-percpu counters is correctly incremented. However, there is no
+per cpu counters is correctly incremented. However, there is no
 guarantee that the OS will not move the process directly before or
 after the this_cpu instruction is executed. In general this means that
 the value of the individual counters for each processor are
@@ -86,9 +117,9 @@ that is of interest.
 Per cpu variables are used for performance reasons. Bouncing cache
 lines can be avoided if multiple processors concurrently go through
 the same code paths.  Since each processor has its own per cpu
-variables no concurrent cacheline updates take place. The price that
+variables no concurrent cache line updates take place. The price that
 has to be paid for this optimization is the need to add up the per cpu
-counters when the value of the counter is needed.
+counters when the value of a counter is needed.
 
 
 Special operations:
@@ -100,33 +131,39 @@ Takes the offset of a per cpu variable (&x !) and returns the address
 of the per cpu variable that belongs to the currently executing
 processor.  this_cpu_ptr avoids multiple steps that the common
 get_cpu/put_cpu sequence requires. No processor number is
-available. Instead the offset of the local per cpu area is simply
-added to the percpu offset.
+available. Instead, the offset of the local per cpu area is simply
+added to the per cpu offset.
 
+Note that this operation is usually used in a code segment when
+preemption has been disabled. The pointer is then used to
+access local per cpu data in a critical section. When preemption
+is re-enabled this pointer is usually no longer useful since it may
+no longer point to per cpu data of the current processor.
 
 
 Per cpu variables and offsets
 -----------------------------
 
-Per cpu variables have *offsets* to the beginning of the percpu
+Per cpu variables have *offsets* to the beginning of the per cpu
 area. They do not have addresses although they look like that in the
 code. Offsets cannot be directly dereferenced. The offset must be
-added to a base pointer of a percpu area of a processor in order to
+added to a base pointer of a per cpu area of a processor in order to
 form a valid address.
 
 Therefore the use of x or &x outside of the context of per cpu
 operations is invalid and will generally be treated like a NULL
 pointer dereference.
 
-In the context of per cpu operations
+       DEFINE_PER_CPU(int, x);
 
-       x is a per cpu variable. Most this_cpu operations take a cpu
-       variable.
+In the context of per cpu operations the above implies that x is a per
+cpu variable. Most this_cpu operations take a cpu variable.
 
-       &x is the *offset* a per cpu variable. this_cpu_ptr() takes
-       the offset of a per cpu variable which makes this look a bit
-       strange.
+       int __percpu *p = &x;
 
+&x and hence p is the *offset* of a per cpu variable. this_cpu_ptr()
+takes the offset of a per cpu variable which makes this look a bit
+strange.
 
 
 Operations on a field of a per cpu structure
@@ -152,7 +189,7 @@ If we have an offset to struct s:
 
        struct s __percpu *ps = &p;
 
-       z = this_cpu_dec(ps->m);
+       this_cpu_dec(ps->m);
 
        z = this_cpu_inc_return(ps->n);
 
@@ -172,29 +209,52 @@ if we do not make use of this_cpu ops later to manipulate fields:
 Variants of this_cpu ops
 -------------------------
 
-this_cpu ops are interrupt safe. Some architecture do not support
+this_cpu ops are interrupt safe. Some architectures do not support
 these per cpu local operations. In that case the operation must be
 replaced by code that disables interrupts, then does the operations
-that are guaranteed to be atomic and then reenable interrupts. Doing
+that are guaranteed to be atomic and then re-enable interrupts. Doing
 so is expensive. If there are other reasons why the scheduler cannot
 change the processor we are executing on then there is no reason to
-disable interrupts. For that purpose the __this_cpu operations are
-provided. For example.
-
-       __this_cpu_inc(x);
-
-Will increment x and will not fallback to code that disables
+disable interrupts. For that purpose the following __this_cpu operations
+are provided.
+
+These operations have no guarantee against concurrent interrupts or
+preemption. If a per cpu variable is not used in an interrupt context
+and the scheduler cannot preempt, then they are safe. If any interrupts
+still occur while an operation is in progress and if the interrupt too
+modifies the variable, then RMW actions can not be guaranteed to be
+safe.
+
+       __this_cpu_add()
+       __this_cpu_read(pcp)
+       __this_cpu_write(pcp, val)
+       __this_cpu_add(pcp, val)
+       __this_cpu_and(pcp, val)
+       __this_cpu_or(pcp, val)
+       __this_cpu_add_return(pcp, val)
+       __this_cpu_xchg(pcp, nval)
+       __this_cpu_cmpxchg(pcp, oval, nval)
+       __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+       __this_cpu_sub(pcp, val)
+       __this_cpu_inc(pcp)
+       __this_cpu_dec(pcp)
+       __this_cpu_sub_return(pcp, val)
+       __this_cpu_inc_return(pcp)
+       __this_cpu_dec_return(pcp)
+
+
+Will increment x and will not fall-back to code that disables
 interrupts on platforms that cannot accomplish atomicity through
 address relocation and a Read-Modify-Write operation in the same
 instruction.
 
 
-
 &this_cpu_ptr(pp)->n vs this_cpu_ptr(&pp->n)
 --------------------------------------------
 
 The first operation takes the offset and forms an address and then
-adds the offset of the n field.
+adds the offset of the n field. This may result in two add
+instructions emitted by the compiler.
 
 The second one first adds the two offsets and then does the
 relocation.  IMHO the second form looks cleaner and has an easier time
@@ -202,4 +262,73 @@ with (). The second form also is consistent with the way
 this_cpu_read() and friends are used.
 
 
-Christoph Lameter, April 3rd, 2013
+Remote access to per cpu data
+------------------------------
+
+Per cpu data structures are designed to be used by one cpu exclusively.
+If you use the variables as intended, this_cpu_ops() are guaranteed to
+be "atomic" as no other CPU has access to these data structures.
+
+There are special cases where you might need to access per cpu data
+structures remotely. It is usually safe to do a remote read access
+and that is frequently done to summarize counters. Remote write access
+something which could be problematic because this_cpu ops do not
+have lock semantics. A remote write may interfere with a this_cpu
+RMW operation.
+
+Remote write accesses to percpu data structures are highly discouraged
+unless absolutely necessary. Please consider using an IPI to wake up
+the remote CPU and perform the update to its per cpu area.
+
+To access per-cpu data structure remotely, typically the per_cpu_ptr()
+function is used:
+
+
+       DEFINE_PER_CPU(struct data, datap);
+
+       struct data *p = per_cpu_ptr(&datap, cpu);
+
+This makes it explicit that we are getting ready to access a percpu
+area remotely.
+
+You can also do the following to convert the datap offset to an address
+
+       struct data *p = this_cpu_ptr(&datap);
+
+but, passing of pointers calculated via this_cpu_ptr to other cpus is
+unusual and should be avoided.
+
+Remote access are typically only for reading the status of another cpus
+per cpu data. Write accesses can cause unique problems due to the
+relaxed synchronization requirements for this_cpu operations.
+
+One example that illustrates some concerns with write operations is
+the following scenario that occurs because two per cpu variables
+share a cache-line but the relaxed synchronization is applied to
+only one process updating the cache-line.
+
+Consider the following example
+
+
+       struct test {
+               atomic_t a;
+               int b;
+       };
+
+       DEFINE_PER_CPU(struct test, onecacheline);
+
+There is some concern about what would happen if the field 'a' is updated
+remotely from one processor and the local processor would use this_cpu ops
+to update field b. Care should be taken that such simultaneous accesses to
+data within the same cache line are avoided. Also costly synchronization
+may be necessary. IPIs are generally recommended in such scenarios instead
+of a remote write to the per cpu area of another processor.
+
+Even in cases where the remote writes are rare, please bear in
+mind that a remote write will evict the cache line from the processor
+that most likely will access it. If the processor wakes up and finds a
+missing local cache line of a per cpu area, its performance and hence
+the wake up times will be affected.
+
+Christoph Lameter, August 4th, 2014
+Pranith Kumar, Aug 2nd, 2014
index 2b3a82e..39d1723 100644 (file)
@@ -35,7 +35,7 @@ invlpg instruction (or instructions _near_ it) show up high in
 profiles.  If you believe that individual invalidations being
 called too often, you can lower the tunable:
 
-       /sys/debug/kernel/x86/tlb_single_page_flush_ceiling
+       /sys/kernel/debug/x86/tlb_single_page_flush_ceiling
 
 This will cause us to do the global flush for more cases.
 Lowering it to 0 will disable the use of the individual flushes.
index 76ac03d..217dc54 100644 (file)
@@ -1277,6 +1277,7 @@ F:        drivers/scsi/arm/
 ARM/Rockchip SoC support
 M:     Heiko Stuebner <heiko@sntech.de>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L:     linux-rockchip@lists.infradead.org
 S:     Maintained
 F:     arch/arm/mach-rockchip/
 F:     drivers/*/*rockchip*
@@ -1843,6 +1844,12 @@ S:       Orphan
 F:     Documentation/filesystems/befs.txt
 F:     fs/befs/
 
+BECKHOFF CX5020 ETHERCAT MASTER DRIVER
+M: Dariusz Marcinkiewicz <reksio@newterm.pl>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/ec_bhf.c
+
 BFS FILE SYSTEM
 M:     "Tigran A. Aivazian" <tigran@aivazian.fsnet.co.uk>
 S:     Maintained
@@ -2059,7 +2066,7 @@ S:        Supported
 F:     drivers/scsi/bnx2i/
 
 BROADCOM KONA GPIO DRIVER
-M:     Markus Mayer <markus.mayer@linaro.org>
+M:     Ray Jui <rjui@broadcom.com>
 L:     bcm-kernel-feedback-list@broadcom.com
 S:     Supported
 F:     drivers/gpio/gpio-bcm-kona.c
@@ -3115,6 +3122,17 @@ F:       include/linux/host1x.h
 F:     include/uapi/drm/tegra_drm.h
 F:     Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
 
+DRM DRIVERS FOR RENESAS
+M:     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+L:     dri-devel@lists.freedesktop.org
+L:     linux-sh@vger.kernel.org
+T:     git git://people.freedesktop.org/~airlied/linux
+S:     Supported
+F:     drivers/gpu/drm/rcar-du/
+F:     drivers/gpu/drm/shmobile/
+F:     include/linux/platform_data/rcar-du.h
+F:     include/linux/platform_data/shmob_drm.h
+
 DSBR100 USB FM RADIO DRIVER
 M:     Alexey Klimov <klimov.linux@gmail.com>
 L:     linux-media@vger.kernel.org
@@ -3843,10 +3861,13 @@ F:      drivers/tty/serial/ucc_uart.c
 
 FREESCALE SOC SOUND DRIVERS
 M:     Timur Tabi <timur@tabi.org>
+M:     Nicolin Chen <nicoleotsuka@gmail.com>
+M:     Xiubo Li <Li.Xiubo@freescale.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 L:     linuxppc-dev@lists.ozlabs.org
 S:     Maintained
 F:     sound/soc/fsl/fsl*
+F:     sound/soc/fsl/imx*
 F:     sound/soc/fsl/mpc8610_hpcd.c
 
 FREEVXFS FILESYSTEM
@@ -4446,6 +4467,13 @@ F:       include/linux/i2c-*.h
 F:     include/uapi/linux/i2c.h
 F:     include/uapi/linux/i2c-*.h
 
+I2C ACPI SUPPORT
+M:     Mika Westerberg <mika.westerberg@linux.intel.com>
+L:     linux-i2c@vger.kernel.org
+L:     linux-acpi@vger.kernel.org
+S:     Maintained
+F:     drivers/i2c/i2c-acpi.c
+
 I2C-TAOS-EVM DRIVER
 M:     Jean Delvare <jdelvare@suse.de>
 L:     linux-i2c@vger.kernel.org
@@ -5972,6 +6000,12 @@ T:       git git://linuxtv.org/media_tree.git
 S:     Maintained
 F:     drivers/media/radio/radio-mr800.c
 
+MRF24J40 IEEE 802.15.4 RADIO DRIVER
+M:     Alan Ott <alan@signal11.us>
+L:     linux-wpan@vger.kernel.org
+S:     Maintained
+F:     drivers/net/ieee802154/mrf24j40.c
+
 MSI LAPTOP SUPPORT
 M:     "Lee, Chun-Yi" <jlee@suse.com>
 L:     platform-driver-x86@vger.kernel.org
@@ -6858,6 +6892,14 @@ S:       Supported
 F:     Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
 F:     drivers/pci/host/pci-tegra.c
 
+PCI DRIVER FOR TI DRA7XX
+M:     Kishon Vijay Abraham I <kishon@ti.com>
+L:     linux-omap@vger.kernel.org
+L:     linux-pci@vger.kernel.org
+S:     Supported
+F:     Documentation/devicetree/bindings/pci/ti-pci.txt
+F:     drivers/pci/host/pci-dra7xx.c
+
 PCI DRIVER FOR RENESAS R-CAR
 M:     Simon Horman <horms@verge.net.au>
 L:     linux-pci@vger.kernel.org
index e432442..f64fc78 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 17
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Shuffling Zombie Juror
 
 # *DOCUMENTATION*
index 5ebab58..f05bdb4 100644 (file)
@@ -500,10 +500,14 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
 #define outb_p         outb
 #define outw_p         outw
 #define outl_p         outl
-#define readb_relaxed(addr) __raw_readb(addr)
-#define readw_relaxed(addr) __raw_readw(addr)
-#define readl_relaxed(addr) __raw_readl(addr)
-#define readq_relaxed(addr) __raw_readq(addr)
+#define readb_relaxed(addr)    __raw_readb(addr)
+#define readw_relaxed(addr)    __raw_readw(addr)
+#define readl_relaxed(addr)    __raw_readl(addr)
+#define readq_relaxed(addr)    __raw_readq(addr)
+#define writeb_relaxed(b, addr)        __raw_writeb(b, addr)
+#define writew_relaxed(b, addr)        __raw_writew(b, addr)
+#define writel_relaxed(b, addr)        __raw_writel(b, addr)
+#define writeq_relaxed(b, addr)        __raw_writeq(b, addr)
 
 #define mmiowb()
 
index f2c9440..c509d30 100644 (file)
@@ -3,7 +3,7 @@
 
 #include <uapi/asm/unistd.h>
 
-#define NR_SYSCALLS                    508
+#define NR_SYSCALLS                    511
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_STAT64
index 53ae7bb..d214a03 100644 (file)
 #define __NR_process_vm_writev         505
 #define __NR_kcmp                      506
 #define __NR_finit_module              507
+#define __NR_sched_setattr             508
+#define __NR_sched_getattr             509
+#define __NR_renameat2                 510
 
 #endif /* _UAPI_ALPHA_UNISTD_H */
index dca9b3f..2478971 100644 (file)
@@ -526,6 +526,9 @@ sys_call_table:
        .quad sys_process_vm_writev             /* 505 */
        .quad sys_kcmp
        .quad sys_finit_module
+       .quad sys_sched_setattr
+       .quad sys_sched_getattr
+       .quad sys_renameat2                     /* 510 */
 
        .size sys_call_table, . - sys_call_table
        .type sys_call_table, @object
index 6d6d23c..adadaf9 100644 (file)
        i2c@13860000 {
                pinctrl-0 = <&i2c0_bus>;
                pinctrl-names = "default";
+               samsung,i2c-sda-delay = <100>;
+               samsung,i2c-max-bus-freq = <400000>;
                status = "okay";
 
                usb3503: usb3503@08 {
 
                max77686: pmic@09 {
                        compatible = "maxim,max77686";
+                       interrupt-parent = <&gpx3>;
+                       interrupts = <2 0>;
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&max77686_irq>;
                        reg = <0x09>;
                        #clock-cells = <1>;
 
                samsung,pins = "gpx1-3";
                samsung,pin-pud = <0>;
        };
+
+       max77686_irq: max77686-irq {
+               samsung,pins = "gpx3-2";
+               samsung,pin-function = <0>;
+               samsung,pin-pud = <0>;
+               samsung,pin-drv = <0>;
+       };
 };
index 64fa27b..c6c58c1 100644 (file)
                                compatible = "fsl,imx53-vpu";
                                reg = <0x63ff4000 0x1000>;
                                interrupts = <9>;
-                               clocks = <&clks IMX5_CLK_VPU_GATE>,
+                               clocks = <&clks IMX5_CLK_VPU_REFERENCE_GATE>,
                                         <&clks IMX5_CLK_VPU_GATE>;
                                clock-names = "per", "ahb";
                                resets = <&src 1>;
index 8c1cb53..4fa2543 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_enet>;
        phy-mode = "rgmii";
-       phy-reset-gpios = <&gpio3 23 0>;
+       phy-reset-gpios = <&gpio1 25 0>;
        phy-supply = <&vgen2_1v2_eth>;
        status = "okay";
 };
                                MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK    0x1b0b0
                                MX6QDL_PAD_ENET_MDIO__ENET_MDIO         0x1b0b0
                                MX6QDL_PAD_ENET_MDC__ENET_MDC           0x1b0b0
+                               MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25      0x1b0b0
                                MX6QDL_PAD_GPIO_16__ENET_REF_CLK        0x4001b0a8
                        >;
                };
index 3e0b816..bb9c6b7 100644 (file)
@@ -78,7 +78,7 @@
 #define MX6SX_PAD_GPIO1_IO07__USDHC2_WP                           0x0030 0x0378 0x0870 0x1 0x1
 #define MX6SX_PAD_GPIO1_IO07__ENET2_MDIO                          0x0030 0x0378 0x0770 0x2 0x0
 #define MX6SX_PAD_GPIO1_IO07__AUDMUX_MCLK                         0x0030 0x0378 0x0000 0x3 0x0
-#define MX6SX_PAD_GPIO1_IO07__UART1_CTS_B                         0x0030 0x0378 0x082C 0x4 0x1
+#define MX6SX_PAD_GPIO1_IO07__UART1_CTS_B                         0x0030 0x0378 0x0000 0x4 0x0
 #define MX6SX_PAD_GPIO1_IO07__GPIO1_IO_7                          0x0030 0x0378 0x0000 0x5 0x0
 #define MX6SX_PAD_GPIO1_IO07__SRC_EARLY_RESET                     0x0030 0x0378 0x0000 0x6 0x0
 #define MX6SX_PAD_GPIO1_IO07__DCIC2_OUT                           0x0030 0x0378 0x0000 0x7 0x0
@@ -96,7 +96,7 @@
 #define MX6SX_PAD_GPIO1_IO09__WDOG2_WDOG_B                        0x0038 0x0380 0x0000 0x1 0x0
 #define MX6SX_PAD_GPIO1_IO09__SDMA_EXT_EVENT_1                    0x0038 0x0380 0x0820 0x2 0x0
 #define MX6SX_PAD_GPIO1_IO09__CCM_OUT0                            0x0038 0x0380 0x0000 0x3 0x0
-#define MX6SX_PAD_GPIO1_IO09__UART2_CTS_B                         0x0038 0x0380 0x0834 0x4 0x1
+#define MX6SX_PAD_GPIO1_IO09__UART2_CTS_B                         0x0038 0x0380 0x0000 0x4 0x0
 #define MX6SX_PAD_GPIO1_IO09__GPIO1_IO_9                          0x0038 0x0380 0x0000 0x5 0x0
 #define MX6SX_PAD_GPIO1_IO09__SRC_INT_BOOT                        0x0038 0x0380 0x0000 0x6 0x0
 #define MX6SX_PAD_GPIO1_IO09__OBSERVE_MUX_OUT_4                   0x0038 0x0380 0x0000 0x7 0x0
 #define MX6SX_PAD_CSI_DATA07__ESAI_TX3_RX2                        0x0068 0x03B0 0x079C 0x1 0x1
 #define MX6SX_PAD_CSI_DATA07__I2C4_SDA                            0x0068 0x03B0 0x07C4 0x2 0x2
 #define MX6SX_PAD_CSI_DATA07__KPP_ROW_7                           0x0068 0x03B0 0x07DC 0x3 0x0
-#define MX6SX_PAD_CSI_DATA07__UART6_CTS_B                         0x0068 0x03B0 0x0854 0x4 0x1
+#define MX6SX_PAD_CSI_DATA07__UART6_CTS_B                         0x0068 0x03B0 0x0000 0x4 0x0
 #define MX6SX_PAD_CSI_DATA07__GPIO1_IO_21                         0x0068 0x03B0 0x0000 0x5 0x0
 #define MX6SX_PAD_CSI_DATA07__WEIM_DATA_16                        0x0068 0x03B0 0x0000 0x6 0x0
 #define MX6SX_PAD_CSI_DATA07__DCIC1_OUT                           0x0068 0x03B0 0x0000 0x7 0x0
 #define MX6SX_PAD_CSI_VSYNC__CSI1_VSYNC                           0x0078 0x03C0 0x0708 0x0 0x0
 #define MX6SX_PAD_CSI_VSYNC__ESAI_TX5_RX0                         0x0078 0x03C0 0x07A4 0x1 0x1
 #define MX6SX_PAD_CSI_VSYNC__AUDMUX_AUD6_RXD                      0x0078 0x03C0 0x0674 0x2 0x1
-#define MX6SX_PAD_CSI_VSYNC__UART4_CTS_B                          0x0078 0x03C0 0x0844 0x3 0x3
+#define MX6SX_PAD_CSI_VSYNC__UART4_CTS_B                          0x0078 0x03C0 0x0000 0x3 0x0
 #define MX6SX_PAD_CSI_VSYNC__MQS_RIGHT                            0x0078 0x03C0 0x0000 0x4 0x0
 #define MX6SX_PAD_CSI_VSYNC__GPIO1_IO_25                          0x0078 0x03C0 0x0000 0x5 0x0
 #define MX6SX_PAD_CSI_VSYNC__WEIM_DATA_24                         0x0078 0x03C0 0x0000 0x6 0x0
 #define MX6SX_PAD_ENET2_TX_CLK__ENET2_TX_CLK                      0x00A0 0x03E8 0x0000 0x0 0x0
 #define MX6SX_PAD_ENET2_TX_CLK__ENET2_REF_CLK2                    0x00A0 0x03E8 0x076C 0x1 0x1
 #define MX6SX_PAD_ENET2_TX_CLK__I2C3_SDA                          0x00A0 0x03E8 0x07BC 0x2 0x1
-#define MX6SX_PAD_ENET2_TX_CLK__UART1_CTS_B                       0x00A0 0x03E8 0x082C 0x3 0x3
+#define MX6SX_PAD_ENET2_TX_CLK__UART1_CTS_B                       0x00A0 0x03E8 0x0000 0x3 0x0
 #define MX6SX_PAD_ENET2_TX_CLK__MLB_CLK                           0x00A0 0x03E8 0x07E8 0x4 0x1
 #define MX6SX_PAD_ENET2_TX_CLK__GPIO2_IO_9                        0x00A0 0x03E8 0x0000 0x5 0x0
 #define MX6SX_PAD_ENET2_TX_CLK__USB_OTG2_PWR                      0x00A0 0x03E8 0x0000 0x6 0x0
 #define MX6SX_PAD_KEY_COL4__SAI2_RX_BCLK                          0x00B4 0x03FC 0x0808 0x7 0x0
 #define MX6SX_PAD_KEY_ROW0__KPP_ROW_0                             0x00B8 0x0400 0x0000 0x0 0x0
 #define MX6SX_PAD_KEY_ROW0__USDHC3_WP                             0x00B8 0x0400 0x0000 0x1 0x0
-#define MX6SX_PAD_KEY_ROW0__UART6_CTS_B                           0x00B8 0x0400 0x0854 0x2 0x3
+#define MX6SX_PAD_KEY_ROW0__UART6_CTS_B                           0x00B8 0x0400 0x0000 0x2 0x0
 #define MX6SX_PAD_KEY_ROW0__ECSPI1_MOSI                           0x00B8 0x0400 0x0718 0x3 0x0
 #define MX6SX_PAD_KEY_ROW0__AUDMUX_AUD5_TXD                       0x00B8 0x0400 0x0660 0x4 0x0
 #define MX6SX_PAD_KEY_ROW0__GPIO2_IO_15                           0x00B8 0x0400 0x0000 0x5 0x0
 #define MX6SX_PAD_KEY_ROW1__M4_NMI                                0x00BC 0x0404 0x0000 0x8 0x0
 #define MX6SX_PAD_KEY_ROW2__KPP_ROW_2                             0x00C0 0x0408 0x0000 0x0 0x0
 #define MX6SX_PAD_KEY_ROW2__USDHC4_WP                             0x00C0 0x0408 0x0878 0x1 0x1
-#define MX6SX_PAD_KEY_ROW2__UART5_CTS_B                           0x00C0 0x0408 0x084C 0x2 0x3
+#define MX6SX_PAD_KEY_ROW2__UART5_CTS_B                           0x00C0 0x0408 0x0000 0x2 0x0
 #define MX6SX_PAD_KEY_ROW2__CAN1_RX                               0x00C0 0x0408 0x068C 0x3 0x1
 #define MX6SX_PAD_KEY_ROW2__CANFD_RX1                             0x00C0 0x0408 0x0694 0x4 0x1
 #define MX6SX_PAD_KEY_ROW2__GPIO2_IO_17                           0x00C0 0x0408 0x0000 0x5 0x0
 #define MX6SX_PAD_NAND_DATA05__RAWNAND_DATA05                     0x0164 0x04AC 0x0000 0x0 0x0
 #define MX6SX_PAD_NAND_DATA05__USDHC2_DATA5                       0x0164 0x04AC 0x0000 0x1 0x0
 #define MX6SX_PAD_NAND_DATA05__QSPI2_B_DQS                        0x0164 0x04AC 0x0000 0x2 0x0
-#define MX6SX_PAD_NAND_DATA05__UART3_CTS_B                        0x0164 0x04AC 0x083C 0x3 0x1
+#define MX6SX_PAD_NAND_DATA05__UART3_CTS_B                        0x0164 0x04AC 0x0000 0x3 0x0
 #define MX6SX_PAD_NAND_DATA05__AUDMUX_AUD4_RXC                    0x0164 0x04AC 0x064C 0x4 0x0
 #define MX6SX_PAD_NAND_DATA05__GPIO4_IO_9                         0x0164 0x04AC 0x0000 0x5 0x0
 #define MX6SX_PAD_NAND_DATA05__WEIM_AD_5                          0x0164 0x04AC 0x0000 0x6 0x0
 #define MX6SX_PAD_QSPI1A_SS1_B__SIM_M_HADDR_12                    0x019C 0x04E4 0x0000 0x7 0x0
 #define MX6SX_PAD_QSPI1A_SS1_B__SDMA_DEBUG_PC_3                   0x019C 0x04E4 0x0000 0x9 0x0
 #define MX6SX_PAD_QSPI1B_DATA0__QSPI1_B_DATA_0                    0x01A0 0x04E8 0x0000 0x0 0x0
-#define MX6SX_PAD_QSPI1B_DATA0__UART3_CTS_B                       0x01A0 0x04E8 0x083C 0x1 0x4
+#define MX6SX_PAD_QSPI1B_DATA0__UART3_CTS_B                       0x01A0 0x04E8 0x0000 0x1 0x0
 #define MX6SX_PAD_QSPI1B_DATA0__ECSPI3_MOSI                       0x01A0 0x04E8 0x0738 0x2 0x1
 #define MX6SX_PAD_QSPI1B_DATA0__ESAI_RX_FS                        0x01A0 0x04E8 0x0778 0x3 0x2
 #define MX6SX_PAD_QSPI1B_DATA0__CSI1_DATA_22                      0x01A0 0x04E8 0x06F4 0x4 0x1
 #define MX6SX_PAD_SD1_DATA2__AUDMUX_AUD5_TXFS                     0x0230 0x0578 0x0670 0x1 0x1
 #define MX6SX_PAD_SD1_DATA2__PWM3_OUT                             0x0230 0x0578 0x0000 0x2 0x0
 #define MX6SX_PAD_SD1_DATA2__GPT_COMPARE2                         0x0230 0x0578 0x0000 0x3 0x0
-#define MX6SX_PAD_SD1_DATA2__UART2_CTS_B                          0x0230 0x0578 0x0834 0x4 0x2
+#define MX6SX_PAD_SD1_DATA2__UART2_CTS_B                          0x0230 0x0578 0x0000 0x4 0x0
 #define MX6SX_PAD_SD1_DATA2__GPIO6_IO_4                           0x0230 0x0578 0x0000 0x5 0x0
 #define MX6SX_PAD_SD1_DATA2__ECSPI4_RDY                           0x0230 0x0578 0x0000 0x6 0x0
 #define MX6SX_PAD_SD1_DATA2__CCM_OUT0                             0x0230 0x0578 0x0000 0x7 0x0
 #define MX6SX_PAD_SD2_DATA3__VADC_CLAMP_CURRENT_3                 0x024C 0x0594 0x0000 0x8 0x0
 #define MX6SX_PAD_SD2_DATA3__MMDC_DEBUG_31                        0x024C 0x0594 0x0000 0x9 0x0
 #define MX6SX_PAD_SD3_CLK__USDHC3_CLK                             0x0250 0x0598 0x0000 0x0 0x0
-#define MX6SX_PAD_SD3_CLK__UART4_CTS_B                            0x0250 0x0598 0x0844 0x1 0x0
+#define MX6SX_PAD_SD3_CLK__UART4_CTS_B                            0x0250 0x0598 0x0000 0x1 0x0
 #define MX6SX_PAD_SD3_CLK__ECSPI4_SCLK                            0x0250 0x0598 0x0740 0x2 0x0
 #define MX6SX_PAD_SD3_CLK__AUDMUX_AUD6_RXFS                       0x0250 0x0598 0x0680 0x3 0x0
 #define MX6SX_PAD_SD3_CLK__LCDIF2_VSYNC                           0x0250 0x0598 0x0000 0x4 0x0
 #define MX6SX_PAD_SD3_DATA7__USDHC3_DATA7                         0x0274 0x05BC 0x0000 0x0 0x0
 #define MX6SX_PAD_SD3_DATA7__CAN1_RX                              0x0274 0x05BC 0x068C 0x1 0x0
 #define MX6SX_PAD_SD3_DATA7__CANFD_RX1                            0x0274 0x05BC 0x0694 0x2 0x0
-#define MX6SX_PAD_SD3_DATA7__UART3_CTS_B                          0x0274 0x05BC 0x083C 0x3 0x3
+#define MX6SX_PAD_SD3_DATA7__UART3_CTS_B                          0x0274 0x05BC 0x0000 0x3 0x0
 #define MX6SX_PAD_SD3_DATA7__LCDIF2_DATA_5                        0x0274 0x05BC 0x0000 0x4 0x0
 #define MX6SX_PAD_SD3_DATA7__GPIO7_IO_9                           0x0274 0x05BC 0x0000 0x5 0x0
 #define MX6SX_PAD_SD3_DATA7__ENET1_1588_EVENT0_IN                 0x0274 0x05BC 0x0000 0x6 0x0
 #define MX6SX_PAD_SD4_DATA6__SDMA_DEBUG_EVENT_CHANNEL_1           0x0298 0x05E0 0x0000 0x9 0x0
 #define MX6SX_PAD_SD4_DATA7__USDHC4_DATA7                         0x029C 0x05E4 0x0000 0x0 0x0
 #define MX6SX_PAD_SD4_DATA7__RAWNAND_DATA08                       0x029C 0x05E4 0x0000 0x1 0x0
-#define MX6SX_PAD_SD4_DATA7__UART5_CTS_B                          0x029C 0x05E4 0x084C 0x2 0x1
+#define MX6SX_PAD_SD4_DATA7__UART5_CTS_B                          0x029C 0x05E4 0x0000 0x2 0x0
 #define MX6SX_PAD_SD4_DATA7__ECSPI3_SS0                           0x029C 0x05E4 0x073C 0x3 0x0
 #define MX6SX_PAD_SD4_DATA7__LCDIF2_DATA_15                       0x029C 0x05E4 0x0000 0x4 0x0
 #define MX6SX_PAD_SD4_DATA7__GPIO6_IO_21                          0x029C 0x05E4 0x0000 0x5 0x0
index b15f1a7..1fe45d1 100644 (file)
        };
 
        twl_power: power {
-               compatible = "ti,twl4030-power-n900";
+               compatible = "ti,twl4030-power-n900", "ti,twl4030-power-idle-osc-off";
                ti,use_poweroff;
        };
 };
index 23486c0..be59014 100644 (file)
                renesas,function = "msiof0";
        };
 
-       i2c6_pins: i2c6 {
-               renesas,groups = "i2c6";
-               renesas,function = "i2c6";
-       };
-
        usb0_pins: usb0 {
                renesas,groups = "usb0";
                renesas,function = "usb0";
 };
 
 &i2c6 {
-       pinctrl-names = "default";
-       pinctrl-0 = <&i2c6_pins>;
        status = "okay";
        clock-frequency = <100000>;
 
index 042f821..c9d912d 100644 (file)
 &mmc0 { /* sdmmc */
        num-slots = <1>;
        status = "okay";
+       pinctrl-names = "default";
+       pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>;
        vmmc-supply = <&vcc_sd0>;
 
        slot@0 {
index 171b610..5e4e3c2 100644 (file)
 &mmc0 {
        num-slots = <1>;
        status = "okay";
+       pinctrl-names = "default";
+       pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>;
        vmmc-supply = <&vcc_sd0>;
 
        slot@0 {
index 44b07e5..e06fbfc 100644 (file)
                        clock-frequency = <100000>;
                        resets = <&apb2_rst 0>;
                        status = "disabled";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
                };
 
                i2c1: i2c@01c2b000 {
                        clock-frequency = <100000>;
                        resets = <&apb2_rst 1>;
                        status = "disabled";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
                };
 
                i2c2: i2c@01c2b400 {
                        clock-frequency = <100000>;
                        resets = <&apb2_rst 2>;
                        status = "disabled";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
                };
 
                i2c3: i2c@01c2b800 {
                        clock-frequency = <100000>;
                        resets = <&apb2_rst 3>;
                        status = "disabled";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
                };
 
                gmac: ethernet@01c30000 {
index 8adaa78..a5446cb 100644 (file)
                        vcc4-supply = <&sys_3v3_reg>;
                        vcc5-supply = <&sys_3v3_reg>;
                        vcc6-supply = <&vio_reg>;
-                       vcc7-supply = <&sys_5v0_reg>;
+                       vcc7-supply = <&charge_pump_5v0_reg>;
                        vccio-supply = <&sys_3v3_reg>;
 
                        regulators {
                        regulator-max-microvolt = <3300000>;
                        regulator-always-on;
                };
+
+               charge_pump_5v0_reg: regulator@101 {
+                       compatible = "regulator-fixed";
+                       reg = <101>;
+                       regulator-name = "5v0";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+                       regulator-always-on;
+               };
        };
 };
index bf16f8e..c4ed1be 100644 (file)
                        vcc4-supply = <&sys_3v3_reg>;
                        vcc5-supply = <&sys_3v3_reg>;
                        vcc6-supply = <&vio_reg>;
-                       vcc7-supply = <&sys_5v0_reg>;
+                       vcc7-supply = <&charge_pump_5v0_reg>;
                        vccio-supply = <&sys_3v3_reg>;
 
                        regulators {
                        regulator-max-microvolt = <3300000>;
                        regulator-always-on;
                };
+
+               charge_pump_5v0_reg: regulator@101 {
+                       compatible = "regulator-fixed";
+                       reg = <101>;
+                       regulator-name = "5v0";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+                       regulator-always-on;
+               };
        };
 };
index 11d7334..b8a5e8c 100644 (file)
                };
 
                pinctrl_esdhc1: esdhc1grp {
-                       fsl,fsl,pins = <
+                       fsl,pins = <
                                VF610_PAD_PTA24__ESDHC1_CLK     0x31ef
                                VF610_PAD_PTA25__ESDHC1_CMD     0x31ef
                                VF610_PAD_PTA26__ESDHC1_DAT0    0x31ef
index 9de84a2..be9a51a 100644 (file)
@@ -85,7 +85,6 @@ config SOC_IMX25
 
 config SOC_IMX27
        bool
-       select ARCH_HAS_OPP
        select CPU_ARM926T
        select IMX_HAVE_IOMUX_V1
        select MXC_AVIC
@@ -659,7 +658,6 @@ comment "Device tree only"
 
 config SOC_IMX5
        bool
-       select ARCH_HAS_OPP
        select HAVE_IMX_SRC
        select MXC_TZIC
 
index ac88599..23c0293 100644 (file)
@@ -93,9 +93,11 @@ obj-$(CONFIG_HAVE_IMX_ANATOP) += anatop.o
 obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
 obj-$(CONFIG_HAVE_IMX_MMDC) += mmdc.o
 obj-$(CONFIG_HAVE_IMX_SRC) += src.o
+ifdef CONFIG_SOC_IMX6
 AFLAGS_headsmp.o :=-Wa,-march=armv7-a
 obj-$(CONFIG_SMP) += headsmp.o platsmp.o
 obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
+endif
 obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o
 obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o mach-imx6sl.o
 obj-$(CONFIG_SOC_IMX6SX) += clk-imx6sx.o mach-imx6sx.o
index 6cceb77..29d4129 100644 (file)
@@ -194,6 +194,10 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
        clk[IMX6QDL_CLK_PLL3_80M]  = imx_clk_fixed_factor("pll3_80m",  "pll3_usb_otg",   1, 6);
        clk[IMX6QDL_CLK_PLL3_60M]  = imx_clk_fixed_factor("pll3_60m",  "pll3_usb_otg",   1, 8);
        clk[IMX6QDL_CLK_TWD]       = imx_clk_fixed_factor("twd",       "arm",            1, 2);
+       if (cpu_is_imx6dl()) {
+               clk[IMX6QDL_CLK_GPU2D_AXI] = imx_clk_fixed_factor("gpu2d_axi", "mmdc_ch0_axi_podf", 1, 1);
+               clk[IMX6QDL_CLK_GPU3D_AXI] = imx_clk_fixed_factor("gpu3d_axi", "mmdc_ch0_axi_podf", 1, 1);
+       }
 
        clk[IMX6QDL_CLK_PLL4_POST_DIV] = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio", CLK_SET_RATE_PARENT, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock);
        clk[IMX6QDL_CLK_PLL4_AUDIO_DIV] = clk_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT, base + 0x170, 15, 1, 0, &imx_ccm_lock);
@@ -217,8 +221,10 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
        clk[IMX6QDL_CLK_ESAI_SEL]         = imx_clk_mux("esai_sel",         base + 0x20, 19, 2, audio_sels,        ARRAY_SIZE(audio_sels));
        clk[IMX6QDL_CLK_ASRC_SEL]         = imx_clk_mux("asrc_sel",         base + 0x30, 7,  2, audio_sels,        ARRAY_SIZE(audio_sels));
        clk[IMX6QDL_CLK_SPDIF_SEL]        = imx_clk_mux("spdif_sel",        base + 0x30, 20, 2, audio_sels,        ARRAY_SIZE(audio_sels));
-       clk[IMX6QDL_CLK_GPU2D_AXI]        = imx_clk_mux("gpu2d_axi",        base + 0x18, 0,  1, gpu_axi_sels,      ARRAY_SIZE(gpu_axi_sels));
-       clk[IMX6QDL_CLK_GPU3D_AXI]        = imx_clk_mux("gpu3d_axi",        base + 0x18, 1,  1, gpu_axi_sels,      ARRAY_SIZE(gpu_axi_sels));
+       if (cpu_is_imx6q()) {
+               clk[IMX6QDL_CLK_GPU2D_AXI]        = imx_clk_mux("gpu2d_axi",        base + 0x18, 0,  1, gpu_axi_sels,      ARRAY_SIZE(gpu_axi_sels));
+               clk[IMX6QDL_CLK_GPU3D_AXI]        = imx_clk_mux("gpu3d_axi",        base + 0x18, 1,  1, gpu_axi_sels,      ARRAY_SIZE(gpu_axi_sels));
+       }
        clk[IMX6QDL_CLK_GPU2D_CORE_SEL]   = imx_clk_mux("gpu2d_core_sel",   base + 0x18, 16, 2, gpu2d_core_sels,   ARRAY_SIZE(gpu2d_core_sels));
        clk[IMX6QDL_CLK_GPU3D_CORE_SEL]   = imx_clk_mux("gpu3d_core_sel",   base + 0x18, 4,  2, gpu3d_core_sels,   ARRAY_SIZE(gpu3d_core_sels));
        clk[IMX6QDL_CLK_GPU3D_SHADER_SEL] = imx_clk_mux("gpu3d_shader_sel", base + 0x18, 8,  2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels));
index 74b50f1..ca4ea2d 100644 (file)
@@ -173,6 +173,8 @@ ENTRY(imx6_suspend)
        ldr     r6, [r11, #0x0]
        ldr     r11, [r0, #PM_INFO_MX6Q_GPC_V_OFFSET]
        ldr     r6, [r11, #0x0]
+       ldr     r11, [r0, #PM_INFO_MX6Q_IOMUXC_V_OFFSET]
+       ldr     r6, [r11, #0x0]
 
        /* use r11 to store the IO address */
        ldr     r11, [r0, #PM_INFO_MX6Q_SRC_V_OFFSET]
index e15dff7..1e6c51c 100644 (file)
@@ -75,6 +75,7 @@ config ARCH_SH7372
        select ARM_CPU_SUSPEND if PM || CPU_IDLE
        select CPU_V7
        select SH_CLK_CPG
+       select SH_INTC
        select SYS_SUPPORTS_SH_CMT
        select SYS_SUPPORTS_SH_TMU
 
@@ -85,6 +86,7 @@ config ARCH_SH73A0
        select CPU_V7
        select I2C
        select SH_CLK_CPG
+       select SH_INTC
        select RENESAS_INTC_IRQPIN
        select SYS_SUPPORTS_SH_CMT
        select SYS_SUPPORTS_SH_TMU
index 5783354..2df5e5d 100644 (file)
@@ -39,7 +39,7 @@ head-y                := arch/arm64/kernel/head.o
 
 # The byte offset of the kernel image in RAM from the start of RAM.
 ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
-TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%04x0\n", int(65535 * rand())}')
+TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}')
 else
 TEXT_OFFSET := 0x00080000
 endif
index 1e52b74..d92ef3c 100644 (file)
@@ -64,6 +64,8 @@ CONFIG_VIRTIO_BLK=y
 CONFIG_BLK_DEV_SD=y
 # CONFIG_SCSI_LOWLEVEL is not set
 CONFIG_ATA=y
+CONFIG_AHCI_XGENE=y
+CONFIG_PHY_XGENE=y
 CONFIG_PATA_PLATFORM=y
 CONFIG_PATA_OF_PLATFORM=y
 CONFIG_NETDEVICES=y
@@ -71,6 +73,7 @@ CONFIG_TUN=y
 CONFIG_VIRTIO_NET=y
 CONFIG_SMC91X=y
 CONFIG_SMSC911X=y
+CONFIG_NET_XGENE=y
 # CONFIG_WLAN is not set
 CONFIG_INPUT_EVDEV=y
 # CONFIG_SERIO_SERPORT is not set
index 1be62bc..74a9d30 100644 (file)
@@ -17,7 +17,7 @@
 #define __ASM_SPARSEMEM_H
 
 #ifdef CONFIG_SPARSEMEM
-#define MAX_PHYSMEM_BITS       40
+#define MAX_PHYSMEM_BITS       48
 #define SECTION_SIZE_BITS      30
 #endif
 
index 4bc95d2..6d2bf41 100644 (file)
@@ -41,7 +41,7 @@
 #define __ARM_NR_compat_cacheflush     (__ARM_NR_COMPAT_BASE+2)
 #define __ARM_NR_compat_set_tls                (__ARM_NR_COMPAT_BASE+5)
 
-#define __NR_compat_syscalls           383
+#define __NR_compat_syscalls           386
 #endif
 
 #define __ARCH_WANT_SYS_CLONE
index e242600..da1f06b 100644 (file)
@@ -787,3 +787,8 @@ __SYSCALL(__NR_sched_setattr, sys_sched_setattr)
 __SYSCALL(__NR_sched_getattr, sys_sched_getattr)
 #define __NR_renameat2 382
 __SYSCALL(__NR_renameat2, sys_renameat2)
+                       /* 383 for seccomp */
+#define __NR_getrandom 384
+__SYSCALL(__NR_getrandom, sys_getrandom)
+#define __NR_memfd_create 385
+__SYSCALL(__NR_memfd_create, sys_memfd_create)
index f798f66..1771696 100644 (file)
@@ -49,7 +49,7 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
 
        if (l1ip != ICACHE_POLICY_PIPT)
                set_bit(ICACHEF_ALIASING, &__icache_flags);
-       if (l1ip == ICACHE_POLICY_AIVIVT);
+       if (l1ip == ICACHE_POLICY_AIVIVT)
                set_bit(ICACHEF_AIVIVT, &__icache_flags);
 
        pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
index e72f310..03aaa99 100644 (file)
@@ -188,6 +188,8 @@ static __init void reserve_regions(void)
                if (uefi_debug)
                        pr_cont("\n");
        }
+
+       set_bit(EFI_MEMMAP, &efi.flags);
 }
 
 
@@ -463,6 +465,8 @@ static int __init arm64_enter_virtual_mode(void)
        efi_native_runtime_setup();
        set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
 
+       efi.runtime_version = efi.systab->hdr.revision;
+
        return 0;
 
 err_unmap:
index 144f105..bed0283 100644 (file)
 
 #define KERNEL_RAM_VADDR       (PAGE_OFFSET + TEXT_OFFSET)
 
-#if (TEXT_OFFSET & 0xf) != 0
-#error TEXT_OFFSET must be at least 16B aligned
-#elif (PAGE_OFFSET & 0xfffff) != 0
+#if (TEXT_OFFSET & 0xfff) != 0
+#error TEXT_OFFSET must be at least 4KB aligned
+#elif (PAGE_OFFSET & 0x1fffff) != 0
 #error PAGE_OFFSET must be at least 2MB aligned
-#elif TEXT_OFFSET > 0xfffff
+#elif TEXT_OFFSET > 0x1fffff
 #error TEXT_OFFSET must be less than 2MB
 #endif
 
index 0310811..70526cf 100644 (file)
@@ -1115,19 +1115,15 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
        if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
                trace_sys_enter(regs, regs->syscallno);
 
-#ifdef CONFIG_AUDITSYSCALL
        audit_syscall_entry(syscall_get_arch(), regs->syscallno,
                regs->orig_x0, regs->regs[1], regs->regs[2], regs->regs[3]);
-#endif
 
        return regs->syscallno;
 }
 
 asmlinkage void syscall_trace_exit(struct pt_regs *regs)
 {
-#ifdef CONFIG_AUDITSYSCALL
        audit_syscall_exit(regs);
-#endif
 
        if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
                trace_sys_exit(regs, regs_return_value(regs));
index 5b4526e..5472c24 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/of_fdt.h>
 #include <linux/dma-mapping.h>
 #include <linux/dma-contiguous.h>
+#include <linux/efi.h>
 
 #include <asm/fixmap.h>
 #include <asm/sections.h>
@@ -148,7 +149,8 @@ void __init arm64_memblock_init(void)
                memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start);
 #endif
 
-       early_init_fdt_scan_reserved_mem();
+       if (!efi_enabled(EFI_MEMMAP))
+               early_init_fdt_scan_reserved_mem();
 
        /* 4GB maximum for 32-bit only capable devices */
        if (IS_ENABLED(CONFIG_ZONE_DMA))
index a34f309..6554e78 100644 (file)
@@ -129,7 +129,8 @@ unsigned long get_wchan(struct task_struct *p);
 #define        KSTK_EIP(tsk)   ((tsk)->thread.frame0->pc)
 #define        KSTK_ESP(tsk)   ((tsk)->thread.frame0->sp)
 
-#define cpu_relax()    barrier()
+#define cpu_relax() barrier()
+#define cpu_relax_lowlatency() cpu_relax()
 
 /* data cache prefetch */
 #define ARCH_HAS_PREFETCH
index 4254f5d..10a14ea 100644 (file)
@@ -11,7 +11,7 @@
 
 
 
-#define NR_syscalls                    316 /* length of syscall table */
+#define NR_syscalls                    317 /* length of syscall table */
 
 /*
  * The following defines stop scripts/checksyscalls.sh from complaining about
index 99801c3..6a65bb7 100644 (file)
 #define __NR_sched_getattr             1337
 #define __NR_renameat2                 1338
 #define __NR_getrandom                 1339
+#define __NR_memfd_create              1339
 
 #endif /* _UAPI_ASM_IA64_UNISTD_H */
index 4c13837..01edf24 100644 (file)
@@ -1777,6 +1777,7 @@ sys_call_table:
        data8 sys_sched_getattr
        data8 sys_renameat2
        data8 sys_getrandom
+       data8 sys_memfd_create                  // 1340
 
        .org sys_call_table + 8*NR_syscalls     // guard against failures to increase NR_syscalls
 #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
index 4e1ddc9..1c2380b 100644 (file)
 #define __NR_sched_setattr     381
 #define __NR_sched_getattr     382
 #define __NR_renameat2         383
+#define __NR_seccomp           384
+#define __NR_getrandom         385
+#define __NR_memfd_create      386
 
 #endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
index 1a23d5d..de59ee1 100644 (file)
@@ -384,3 +384,6 @@ ENTRY(sys_call_table)
        .long sys_sched_setattr
        .long sys_sched_getattr
        .long sys_renameat2
+       .long sys_seccomp
+       .long sys_getrandom             /* 385 */
+       .long sys_memfd_create
index 7761889..8c13675 100644 (file)
@@ -847,6 +847,7 @@ int __init db1200_dev_setup(void)
                        pr_warn("DB1200: cant get I2C close to 50MHz\n");
                else
                        clk_set_rate(c, pfc);
+               clk_prepare_enable(c);
                clk_put(c);
        }
 
@@ -922,11 +923,6 @@ int __init db1200_dev_setup(void)
        }
 
        /* Audio PSC clock is supplied externally. (FIXME: platdata!!) */
-       c = clk_get(NULL, "psc1_intclk");
-       if (!IS_ERR(c)) {
-               clk_prepare_enable(c);
-               clk_put(c);
-       }
        __raw_writel(PSC_SEL_CLK_SERCLK,
            (void __iomem *)KSEG1ADDR(AU1550_PSC1_PHYS_ADDR) + PSC_SEL_OFFSET);
        wmb();
index 2b63e7e..ad439c2 100644 (file)
@@ -59,12 +59,21 @@ static void bcm47xx_machine_restart(char *command)
        switch (bcm47xx_bus_type) {
 #ifdef CONFIG_BCM47XX_SSB
        case BCM47XX_BUS_TYPE_SSB:
-               ssb_watchdog_timer_set(&bcm47xx_bus.ssb, 3);
+               if (bcm47xx_bus.ssb.chip_id == 0x4785)
+                       write_c0_diag4(1 << 22);
+               ssb_watchdog_timer_set(&bcm47xx_bus.ssb, 1);
+               if (bcm47xx_bus.ssb.chip_id == 0x4785) {
+                       __asm__ __volatile__(
+                               ".set\tmips3\n\t"
+                               "sync\n\t"
+                               "wait\n\t"
+                               ".set\tmips0");
+               }
                break;
 #endif
 #ifdef CONFIG_BCM47XX_BCMA
        case BCM47XX_BUS_TYPE_BCMA:
-               bcma_chipco_watchdog_timer_set(&bcm47xx_bus.bcma.bus.drv_cc, 3);
+               bcma_chipco_watchdog_timer_set(&bcm47xx_bus.bcma.bus.drv_cc, 1);
                break;
 #endif
        }
index 008e9c8..38f4c32 100644 (file)
@@ -263,7 +263,6 @@ static uint64_t crashk_size, crashk_base;
 static int octeon_uart;
 
 extern asmlinkage void handle_int(void);
-extern asmlinkage void plat_irq_dispatch(void);
 
 /**
  * Return non zero if we are currently running in the Octeon simulator
@@ -458,6 +457,18 @@ static void octeon_halt(void)
        octeon_kill_core(NULL);
 }
 
+static char __read_mostly octeon_system_type[80];
+
+static int __init init_octeon_system_type(void)
+{
+       snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)",
+               cvmx_board_type_to_string(octeon_bootinfo->board_type),
+               octeon_model_get_string(read_c0_prid()));
+
+       return 0;
+}
+early_initcall(init_octeon_system_type);
+
 /**
  * Return a string representing the system type
  *
@@ -465,11 +476,7 @@ static void octeon_halt(void)
  */
 const char *octeon_board_type_string(void)
 {
-       static char name[80];
-       sprintf(name, "%s (%s)",
-               cvmx_board_type_to_string(octeon_bootinfo->board_type),
-               octeon_model_get_string(read_c0_prid()));
-       return name;
+       return octeon_system_type;
 }
 
 const char *get_system_type(void)
diff --git a/arch/mips/include/asm/eva.h b/arch/mips/include/asm/eva.h
new file mode 100644 (file)
index 0000000..a3d1807
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014, Imagination Technologies Ltd.
+ *
+ * EVA functions for generic code
+ */
+
+#ifndef _ASM_EVA_H
+#define _ASM_EVA_H
+
+#include <kernel-entry-init.h>
+
+#ifdef __ASSEMBLY__
+
+#ifdef CONFIG_EVA
+
+/*
+ * EVA early init code
+ *
+ * Platforms must define their own 'platform_eva_init' macro in
+ * their kernel-entry-init.h header. This macro usually does the
+ * platform specific configuration of the segmentation registers,
+ * and it is normally called from assembly code.
+ *
+ */
+
+.macro eva_init
+platform_eva_init
+.endm
+
+#else
+
+.macro eva_init
+.endm
+
+#endif /* CONFIG_EVA */
+
+#endif /* __ASSEMBLY__ */
+
+#endif
index 3f20b21..d7699cf 100644 (file)
@@ -49,7 +49,7 @@
 #endif
 #define GICBIS(reg, mask, bits)                        \
        do { u32 data;                          \
-               GICREAD((reg), data);           \
+               GICREAD(reg, data);             \
                data &= ~(mask);                \
                data |= ((bits) & (mask));      \
                GICWRITE((reg), data);          \
index ae1f7b2..39f07ae 100644 (file)
@@ -26,6 +26,8 @@ static inline int irq_canonicalize(int irq)
 #define irq_canonicalize(irq) (irq)    /* Sane hardware, sane code ... */
 #endif
 
+asmlinkage void plat_irq_dispatch(void);
+
 extern void do_IRQ(unsigned int irq);
 
 extern void arch_init_irq(void);
index 77eeda7..0cf8622 100644 (file)
 #ifndef __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
 #define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
 
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+
        /*
         * Prepare segments for EVA boot:
         *
         * This is in case the processor boots in legacy configuration
         * (SI_EVAReset is de-asserted and CONFIG5.K == 0)
         *
-        * On entry, t1 is loaded with CP0_CONFIG
-        *
         * ========================= Mappings =============================
         * Virtual memory           Physical memory           Mapping
         * 0x00000000 - 0x7fffffff  0x80000000 - 0xfffffffff   MUSUK (kuseg)
         *
         *
         * Lowmem is expanded to 2GB
+        *
+        * The following code uses the t0, t1, t2 and ra registers without
+        * previously preserving them.
+        *
         */
-       .macro  eva_entry
+       .macro  platform_eva_init
+
+       .set    push
+       .set    reorder
        /*
         * Get Config.K0 value and use it to program
         * the segmentation registers
         */
+       mfc0    t1, CP0_CONFIG
        andi    t1, 0x7 /* CCA */
        move    t2, t1
        ins     t2, t1, 16, 3
@@ -77,6 +86,8 @@
        mtc0    t0, $16, 5
        sync
        jal     mips_ihb
+
+       .set    pop
        .endm
 
        .macro  kernel_entry_setup
        sll     t0, t0, 6   /* SC bit */
        bgez    t0, 9f
 
-       eva_entry
+       platform_eva_init
        b       0f
 9:
        /* Assume we came from YAMON... */
@@ -127,8 +138,7 @@ nonsc_processor:
 #ifdef CONFIG_EVA
        sync
        ehb
-       mfc0    t1, CP0_CONFIG
-       eva_entry
+       platform_eva_init
 #endif
        .endm
 
index ceeb1f5..0eb43c8 100644 (file)
 
 #include <asm/mach-netlogic/multi-node.h>
 
-#ifdef CONFIG_SMP
-#define topology_physical_package_id(cpu)      cpu_to_node(cpu)
-#define topology_core_id(cpu)  (cpu_logical_map(cpu) / NLM_THREADS_PER_CORE)
-#define topology_thread_cpumask(cpu)           (&cpu_sibling_map[cpu])
-#define topology_core_cpumask(cpu)     cpumask_of_node(cpu_to_node(cpu))
-#endif
-
 #include <asm-generic/topology.h>
 
 #endif /* _ASM_MACH_NETLOGIC_TOPOLOGY_H */
index 027c74d..df49a30 100644 (file)
@@ -122,6 +122,9 @@ do {                                                                        \
        }                                                               \
 } while(0)
 
+extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+       pte_t pteval);
+
 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
 
 #define pte_none(pte)          (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
@@ -145,7 +148,6 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
                }
        }
 }
-#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
 
 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
@@ -183,7 +185,6 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
        }
 #endif
 }
-#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
 
 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
@@ -390,15 +391,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 
 extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
        pte_t pte);
-extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
-       pte_t pte);
 
 static inline void update_mmu_cache(struct vm_area_struct *vma,
        unsigned long address, pte_t *ptep)
 {
        pte_t pte = *ptep;
        __update_tlb(vma, address, pte);
-       __update_cache(vma, address, pte);
 }
 
 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
index 17960fe..cdf68b3 100644 (file)
@@ -131,10 +131,12 @@ static inline int syscall_get_arch(void)
 {
        int arch = EM_MIPS;
 #ifdef CONFIG_64BIT
-       if (!test_thread_flag(TIF_32BIT_REGS))
+       if (!test_thread_flag(TIF_32BIT_REGS)) {
                arch |= __AUDIT_ARCH_64BIT;
-       if (test_thread_flag(TIF_32BIT_ADDR))
-               arch |= __AUDIT_ARCH_CONVENTION_MIPS64_N32;
+               /* N32 sets only TIF_32BIT_ADDR */
+               if (test_thread_flag(TIF_32BIT_ADDR))
+                       arch |= __AUDIT_ARCH_CONVENTION_MIPS64_N32;
+       }
 #endif
 #if defined(__LITTLE_ENDIAN)
        arch |=  __AUDIT_ARCH_LE;
index 6f4f739..e6e97d2 100644 (file)
@@ -13,6 +13,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/asmmacro.h>
 #include <asm/cacheops.h>
+#include <asm/eva.h>
 #include <asm/mipsregs.h>
 #include <asm/mipsmtregs.h>
 #include <asm/pm.h>
@@ -166,6 +167,9 @@ dcache_done:
 1:     jal     mips_cps_core_init
         nop
 
+       /* Do any EVA initialization if necessary */
+       eva_init
+
        /*
         * Boot any other VPEs within this core that should be online, and
         * deactivate this VPE if it should be offline.
index 14bf74b..b63f248 100644 (file)
@@ -558,7 +558,7 @@ static int mipspmu_get_irq(void)
        if (mipspmu.irq >= 0) {
                /* Request my own irq handler. */
                err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
-                       IRQF_PERCPU | IRQF_NOBALANCING,
+                       IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD,
                        "mips_perf_pmu", NULL);
                if (err) {
                        pr_warning("Unable to request IRQ%d for MIPS "
index 13b964f..25bb840 100644 (file)
@@ -113,15 +113,19 @@ trace_a_syscall:
        move    s0, t2                  # Save syscall pointer
        move    a0, sp
        /*
-        * syscall number is in v0 unless we called syscall(__NR_###)
+        * absolute syscall number is in v0 unless we called syscall(__NR_###)
         * where the real syscall number is in a0
         * note: NR_syscall is the first O32 syscall but the macro is
         * only defined when compiling with -mabi=32 (CONFIG_32BIT)
         * therefore __NR_O32_Linux is used (4000)
         */
-       addiu   a1, v0,  __NR_O32_Linux
-       bnez    v0, 1f /* __NR_syscall at offset 0 */
-       lw      a1, PT_R4(sp)
+       .set    push
+       .set    reorder
+       subu    t1, v0,  __NR_O32_Linux
+       move    a1, v0
+       bnez    t1, 1f /* __NR_syscall at offset 0 */
+       lw      a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
+       .set    pop
 
 1:     jal     syscall_trace_enter
 
index 9182e8d..b03e37d 100644 (file)
 static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
        void *data)
 {
-       int fpu_enabled;
+       int fpu_owned;
        int fr = !test_thread_flag(TIF_32BIT_FPREGS);
 
        switch (action) {
        case CU2_EXCEPTION:
                preempt_disable();
-               fpu_enabled = read_c0_status() & ST0_CU1;
+               fpu_owned = __is_fpu_owner();
                if (!fr)
                        set_c0_status(ST0_CU1 | ST0_CU2);
                else
@@ -39,8 +39,8 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
                        KSTK_STATUS(current) |= ST0_FR;
                else
                        KSTK_STATUS(current) &= ~ST0_FR;
-               /* If FPU is enabled, we needn't init or restore fp */
-               if(!fpu_enabled) {
+               /* If FPU is owned, we needn't init or restore fp */
+               if (!fpu_owned) {
                        set_thread_flag(TIF_USEDFPU);
                        if (!used_math()) {
                                _init_fpu();
index ca025a6..37ed184 100644 (file)
@@ -24,8 +24,6 @@
 #include <asm/page.h>
 #include <asm/pgalloc.h>
 #include <asm/sections.h>
-#include <linux/bootmem.h>
-#include <linux/init.h>
 #include <linux/irq.h>
 #include <asm/bootinfo.h>
 #include <asm/mc146818-time.h>
index f7b91d3..7e3ea77 100644 (file)
@@ -119,25 +119,36 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
 
 EXPORT_SYMBOL(__flush_anon_page);
 
-void __update_cache(struct vm_area_struct *vma, unsigned long address,
-       pte_t pte)
+static void mips_flush_dcache_from_pte(pte_t pteval, unsigned long address)
 {
        struct page *page;
-       unsigned long pfn, addr;
-       int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
+       unsigned long pfn = pte_pfn(pteval);
 
-       pfn = pte_pfn(pte);
        if (unlikely(!pfn_valid(pfn)))
                return;
+
        page = pfn_to_page(pfn);
        if (page_mapping(page) && Page_dcache_dirty(page)) {
-               addr = (unsigned long) page_address(page);
-               if (exec || pages_do_alias(addr, address & PAGE_MASK))
-                       flush_data_cache_page(addr);
+               unsigned long page_addr = (unsigned long) page_address(page);
+
+               if (!cpu_has_ic_fills_f_dc ||
+                   pages_do_alias(page_addr, address & PAGE_MASK))
+                       flush_data_cache_page(page_addr);
                ClearPageDcacheDirty(page);
        }
 }
 
+void set_pte_at(struct mm_struct *mm, unsigned long addr,
+        pte_t *ptep, pte_t pteval)
+{
+        if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) {
+                if (pte_present(pteval))
+                        mips_flush_dcache_from_pte(pteval, addr);
+        }
+
+        set_pte(ptep, pteval);
+}
+
 unsigned long _page_cachable_default;
 EXPORT_SYMBOL(_page_cachable_default);
 
index 0c35dee..8fddd2c 100644 (file)
@@ -35,13 +35,19 @@ fw_memblock_t * __init fw_getmdesc(int eva)
        /* otherwise look in the environment */
 
        memsize_str = fw_getenv("memsize");
-       if (memsize_str)
-               tmp = kstrtol(memsize_str, 0, &memsize);
+       if (memsize_str) {
+               tmp = kstrtoul(memsize_str, 0, &memsize);
+               if (tmp)
+                       pr_warn("Failed to read the 'memsize' env variable.\n");
+       }
        if (eva) {
        /* Look for ememsize for EVA */
                ememsize_str = fw_getenv("ememsize");
-               if (ememsize_str)
-                       tmp = kstrtol(ememsize_str, 0, &ememsize);
+               if (ememsize_str) {
+                       tmp = kstrtoul(ememsize_str, 0, &ememsize);
+                       if (tmp)
+                               pr_warn("Failed to read the 'ememsize' env variable.\n");
+               }
        }
        if (!memsize && !ememsize) {
                pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
index 941744a..f914c75 100644 (file)
@@ -51,7 +51,7 @@ static inline void sec_int_dispatch(void)  { do_IRQ(MSP_INT_SEC);  }
  * the range 40-71.
  */
 
-asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
+asmlinkage void plat_irq_dispatch(void)
 {
        u32 pending;
 
index 329d7fd..b9615ba 100644 (file)
@@ -101,7 +101,7 @@ struct kvm_rma_info *kvm_alloc_rma()
        ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL);
        if (!ri)
                return NULL;
-       page = cma_alloc(kvm_cma, kvm_rma_pages, get_order(kvm_rma_pages));
+       page = cma_alloc(kvm_cma, kvm_rma_pages, order_base_2(kvm_rma_pages));
        if (!page)
                goto err_out;
        atomic_set(&ri->use_count, 1);
@@ -135,12 +135,12 @@ struct page *kvm_alloc_hpt(unsigned long nr_pages)
 {
        unsigned long align_pages = HPT_ALIGN_PAGES;
 
-       VM_BUG_ON(get_order(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
+       VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
 
        /* Old CPUs require HPT aligned on a multiple of its size */
        if (!cpu_has_feature(CPU_FTR_ARCH_206))
                align_pages = nr_pages;
-       return cma_alloc(kvm_cma, nr_pages, get_order(align_pages));
+       return cma_alloc(kvm_cma, nr_pages, order_base_2(align_pages));
 }
 EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
 
index 3802d2d..940ac49 100644 (file)
 #define __NR_sched_setattr     345
 #define __NR_sched_getattr     346
 #define __NR_renameat2         347
-#define NR_syscalls 348
+#define __NR_seccomp           348
+#define __NR_getrandom         349
+#define __NR_memfd_create      350
+#define NR_syscalls 351
 
 /* 
  * There are some system calls that are not present on 64 bit, some
index 45cdb37..faf6caa 100644 (file)
@@ -214,3 +214,6 @@ COMPAT_SYSCALL_WRAP3(finit_module, int, fd, const char __user *, uargs, int, fla
 COMPAT_SYSCALL_WRAP3(sched_setattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, flags);
 COMPAT_SYSCALL_WRAP4(sched_getattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, size, unsigned int, flags);
 COMPAT_SYSCALL_WRAP5(renameat2, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, unsigned int, flags);
+COMPAT_SYSCALL_WRAP3(seccomp, unsigned int, op, unsigned int, flags, const char __user *, uargs)
+COMPAT_SYSCALL_WRAP3(getrandom, char __user *, buf, size_t, count, unsigned int, flags)
+COMPAT_SYSCALL_WRAP2(memfd_create, const char __user *, uname, unsigned int, flags)
index 633ca75..22aac58 100644 (file)
@@ -2060,6 +2060,13 @@ void s390_reset_system(void (*func)(void *), void *data)
        S390_lowcore.program_new_psw.addr =
                PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
 
+       /*
+        * Clear subchannel ID and number to signal new kernel that no CCW or
+        * SCSI IPL has been done (for kexec and kdump)
+        */
+       S390_lowcore.subchannel_id = 0;
+       S390_lowcore.subchannel_nr = 0;
+
        /* Store status at absolute zero */
        store_status();
 
index ae1d5be..82bc113 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/stddef.h>
 #include <linux/unistd.h>
 #include <linux/ptrace.h>
+#include <linux/random.h>
 #include <linux/user.h>
 #include <linux/tty.h>
 #include <linux/ioport.h>
@@ -61,6 +62,7 @@
 #include <asm/diag.h>
 #include <asm/os_info.h>
 #include <asm/sclp.h>
+#include <asm/sysinfo.h>
 #include "entry.h"
 
 /*
@@ -766,6 +768,7 @@ static void __init setup_hwcaps(void)
 #endif
 
        get_cpu_id(&cpu_id);
+       add_device_randomness(&cpu_id, sizeof(cpu_id));
        switch (cpu_id.machine) {
        case 0x9672:
 #if !defined(CONFIG_64BIT)
@@ -803,6 +806,19 @@ static void __init setup_hwcaps(void)
        }
 }
 
+/*
+ * Add system information as device randomness
+ */
+static void __init setup_randomness(void)
+{
+       struct sysinfo_3_2_2 *vmms;
+
+       vmms = (struct sysinfo_3_2_2 *) alloc_page(GFP_KERNEL);
+       if (vmms && stsi(vmms, 3, 2, 2) == 0 && vmms->count)
+               add_device_randomness(&vmms, vmms->count);
+       free_page((unsigned long) vmms);
+}
+
 /*
  * Setup function called from init/main.c just after the banner
  * was printed.
@@ -901,6 +917,9 @@ void __init setup_arch(char **cmdline_p)
 
        /* Setup zfcpdump support */
        setup_zfcpdump();
+
+       /* Add system specific data to the random pool */
+       setup_randomness();
 }
 
 #ifdef CONFIG_32BIT
index fe5cdf2..6fe886a 100644 (file)
@@ -356,3 +356,6 @@ SYSCALL(sys_finit_module,sys_finit_module,compat_sys_finit_module)
 SYSCALL(sys_sched_setattr,sys_sched_setattr,compat_sys_sched_setattr) /* 345 */
 SYSCALL(sys_sched_getattr,sys_sched_getattr,compat_sys_sched_getattr)
 SYSCALL(sys_renameat2,sys_renameat2,compat_sys_renameat2)
+SYSCALL(sys_seccomp,sys_seccomp,compat_sys_seccomp)
+SYSCALL(sys_getrandom,sys_getrandom,compat_sys_getrandom)
+SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */
index 453fa5c..b319846 100644 (file)
@@ -172,6 +172,7 @@ menu "System type"
 #
 config CPU_SH2
        bool
+       select SH_INTC
 
 config CPU_SH2A
        bool
@@ -182,6 +183,7 @@ config CPU_SH3
        bool
        select CPU_HAS_INTEVT
        select CPU_HAS_SR_RB
+       select SH_INTC
        select SYS_SUPPORTS_SH_TMU
 
 config CPU_SH4
@@ -189,6 +191,7 @@ config CPU_SH4
        select CPU_HAS_INTEVT
        select CPU_HAS_SR_RB
        select CPU_HAS_FPU if !CPU_SH4AL_DSP
+       select SH_INTC
        select SYS_SUPPORTS_SH_TMU
        select SYS_SUPPORTS_HUGETLBFS if MMU
 
index 5724601..7c492ed 100644 (file)
@@ -95,7 +95,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
 #define KVM_REFILL_PAGES 25
 #define KVM_MAX_CPUID_ENTRIES 80
 #define KVM_NR_FIXED_MTRR_REGION 88
-#define KVM_NR_VAR_MTRR 10
+#define KVM_NR_VAR_MTRR 8
 
 #define ASYNC_PF_PER_VCPU 64
 
index 47c410d..4b0e1df 100644 (file)
@@ -683,7 +683,7 @@ END(syscall_badsys)
 sysenter_badsys:
        movl $-ENOSYS,%eax
        jmp sysenter_after_call
-END(syscall_badsys)
+END(sysenter_badsys)
        CFI_ENDPROC
 
 .macro FIXUP_ESPFIX_STACK
index 56657b0..03954f7 100644 (file)
@@ -1491,9 +1491,6 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
                        goto exception;
                break;
        case VCPU_SREG_CS:
-               if (in_task_switch && rpl != dpl)
-                       goto exception;
-
                if (!(seg_desc.type & 8))
                        goto exception;
 
@@ -4394,8 +4391,11 @@ done_prefixes:
 
        ctxt->execute = opcode.u.execute;
 
+       if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
+               return EMULATION_FAILED;
+
        if (unlikely(ctxt->d &
-                    (NotImpl|EmulateOnUD|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
+                    (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
                /*
                 * These are copied unconditionally here, and checked unconditionally
                 * in x86_emulate_insn.
@@ -4406,9 +4406,6 @@ done_prefixes:
                if (ctxt->d & NotImpl)
                        return EMULATION_FAILED;
 
-               if (!(ctxt->d & EmulateOnUD) && ctxt->ud)
-                       return EMULATION_FAILED;
-
                if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
                        ctxt->op_bytes = 8;
 
index 1fe3398..ee61c36 100644 (file)
@@ -49,7 +49,13 @@ void leave_mm(int cpu)
        if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
                cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
                load_cr3(swapper_pg_dir);
-               trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+               /*
+                * This gets called in the idle path where RCU
+                * functions differently.  Tracing normally
+                * uses RCU, so we have to call the tracepoint
+                * specially here.
+                */
+               trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
        }
 }
 EXPORT_SYMBOL_GPL(leave_mm);
@@ -174,7 +180,7 @@ void flush_tlb_current_task(void)
  *
  * This is in units of pages.
  */
-unsigned long tlb_single_page_flush_ceiling = 33;
+static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
 
 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
                                unsigned long end, unsigned long vmflag)
index bc423f7..f14b4ab 100644 (file)
@@ -520,7 +520,7 @@ void bio_integrity_endio(struct bio *bio, int error)
         */
        if (error) {
                bio->bi_end_io = bip->bip_end_io;
-               bio_endio(bio, error);
+               bio_endio_nodec(bio, error);
 
                return;
        }
index c359d72..bf930f4 100644 (file)
@@ -1252,7 +1252,6 @@ void blk_rq_set_block_pc(struct request *rq)
        rq->__sector = (sector_t) -1;
        rq->bio = rq->biotail = NULL;
        memset(rq->__cmd, 0, sizeof(rq->__cmd));
-       rq->cmd = rq->__cmd;
 }
 EXPORT_SYMBOL(blk_rq_set_block_pc);
 
index 5189cb1..4aac826 100644 (file)
@@ -112,18 +112,22 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
  */
 void blk_mq_freeze_queue(struct request_queue *q)
 {
+       bool freeze;
+
        spin_lock_irq(q->queue_lock);
-       q->mq_freeze_depth++;
+       freeze = !q->mq_freeze_depth++;
        spin_unlock_irq(q->queue_lock);
 
-       percpu_ref_kill(&q->mq_usage_counter);
-       blk_mq_run_queues(q, false);
+       if (freeze) {
+               percpu_ref_kill(&q->mq_usage_counter);
+               blk_mq_run_queues(q, false);
+       }
        wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
 }
 
 static void blk_mq_unfreeze_queue(struct request_queue *q)
 {
-       bool wake = false;
+       bool wake;
 
        spin_lock_irq(q->queue_lock);
        wake = !--q->mq_freeze_depth;
@@ -172,6 +176,8 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
        /* tag was already set */
        rq->errors = 0;
 
+       rq->cmd = rq->__cmd;
+
        rq->extra_len = 0;
        rq->sense_len = 0;
        rq->resid_len = 0;
@@ -1068,13 +1074,17 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
                blk_account_io_start(rq, 1);
 }
 
+static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
+{
+       return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
+               !blk_queue_nomerges(hctx->queue);
+}
+
 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
                                         struct blk_mq_ctx *ctx,
                                         struct request *rq, struct bio *bio)
 {
-       struct request_queue *q = hctx->queue;
-
-       if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) {
+       if (!hctx_allow_merges(hctx)) {
                blk_mq_bio_to_request(rq, bio);
                spin_lock(&ctx->lock);
 insert_rq:
@@ -1082,6 +1092,8 @@ insert_rq:
                spin_unlock(&ctx->lock);
                return false;
        } else {
+               struct request_queue *q = hctx->queue;
+
                spin_lock(&ctx->lock);
                if (!blk_mq_attempt_merge(q, ctx, bio)) {
                        blk_mq_bio_to_request(rq, bio);
@@ -1574,7 +1586,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q,
                hctx->tags = set->tags[i];
 
                /*
-                * Allocate space for all possible cpus to avoid allocation in
+                * Allocate space for all possible cpus to avoid allocation at
                 * runtime
                 */
                hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
@@ -1662,8 +1674,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
 
        queue_for_each_hw_ctx(q, hctx, i) {
                /*
-                * If not software queues are mapped to this hardware queue,
-                * disable it and free the request entries
+                * If no software queues are mapped to this hardware queue,
+                * disable it and free the request entries.
                 */
                if (!hctx->nr_ctx) {
                        struct blk_mq_tag_set *set = q->tag_set;
@@ -1713,14 +1725,10 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
 {
        struct blk_mq_tag_set *set = q->tag_set;
 
-       blk_mq_freeze_queue(q);
-
        mutex_lock(&set->tag_list_lock);
        list_del_init(&q->tag_set_list);
        blk_mq_update_tag_set_depth(set);
        mutex_unlock(&set->tag_list_lock);
-
-       blk_mq_unfreeze_queue(q);
 }
 
 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
index cadc378..3f31cf9 100644 (file)
@@ -1272,15 +1272,22 @@ __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
        rb_insert_color(&cfqg->rb_node, &st->rb);
 }
 
+/*
+ * This has to be called only on activation of cfqg
+ */
 static void
 cfq_update_group_weight(struct cfq_group *cfqg)
 {
-       BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
-
        if (cfqg->new_weight) {
                cfqg->weight = cfqg->new_weight;
                cfqg->new_weight = 0;
        }
+}
+
+static void
+cfq_update_group_leaf_weight(struct cfq_group *cfqg)
+{
+       BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
 
        if (cfqg->new_leaf_weight) {
                cfqg->leaf_weight = cfqg->new_leaf_weight;
@@ -1299,7 +1306,12 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
        /* add to the service tree */
        BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
 
-       cfq_update_group_weight(cfqg);
+       /*
+        * Update leaf_weight.  We cannot update weight at this point
+        * because cfqg might already have been activated and is
+        * contributing its current weight to the parent's child_weight.
+        */
+       cfq_update_group_leaf_weight(cfqg);
        __cfq_group_service_tree_add(st, cfqg);
 
        /*
@@ -1323,6 +1335,7 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
         */
        while ((parent = cfqg_parent(pos))) {
                if (propagate) {
+                       cfq_update_group_weight(pos);
                        propagate = !parent->nr_active++;
                        parent->children_weight += pos->weight;
                }
index 51bf515..9b8eaec 100644 (file)
@@ -279,7 +279,6 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
        r = blk_rq_unmap_user(bio);
        if (!ret)
                ret = r;
-       blk_put_request(rq);
 
        return ret;
 }
@@ -297,8 +296,6 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
 
        if (hdr->interface_id != 'S')
                return -EINVAL;
-       if (hdr->cmd_len > BLK_MAX_CDB)
-               return -EINVAL;
 
        if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
                return -EIO;
@@ -317,16 +314,23 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
        if (hdr->flags & SG_FLAG_Q_AT_HEAD)
                at_head = 1;
 
+       ret = -ENOMEM;
        rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
        if (!rq)
-               return -ENOMEM;
+               goto out;
        blk_rq_set_block_pc(rq);
 
-       if (blk_fill_sghdr_rq(q, rq, hdr, mode)) {
-               blk_put_request(rq);
-               return -EFAULT;
+       if (hdr->cmd_len > BLK_MAX_CDB) {
+               rq->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
+               if (!rq->cmd)
+                       goto out_put_request;
        }
 
+       ret = -EFAULT;
+       if (blk_fill_sghdr_rq(q, rq, hdr, mode))
+               goto out_free_cdb;
+
+       ret = 0;
        if (hdr->iovec_count) {
                size_t iov_data_len;
                struct iovec *iov = NULL;
@@ -335,7 +339,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
                                            0, NULL, &iov);
                if (ret < 0) {
                        kfree(iov);
-                       goto out;
+                       goto out_free_cdb;
                }
 
                iov_data_len = ret;
@@ -358,7 +362,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
                                      GFP_KERNEL);
 
        if (ret)
-               goto out;
+               goto out_free_cdb;
 
        bio = rq->bio;
        memset(sense, 0, sizeof(sense));
@@ -376,9 +380,14 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
 
        hdr->duration = jiffies_to_msecs(jiffies - start_time);
 
-       return blk_complete_sghdr_rq(rq, hdr, bio);
-out:
+       ret = blk_complete_sghdr_rq(rq, hdr, bio);
+
+out_free_cdb:
+       if (rq->cmd != rq->__cmd)
+               kfree(rq->cmd);
+out_put_request:
        blk_put_request(rq);
+out:
        return ret;
 }
 
@@ -448,6 +457,11 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
        }
 
        rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
+       if (!rq) {
+               err = -ENOMEM;
+               goto error;
+       }
+       blk_rq_set_block_pc(rq);
 
        cmdlen = COMMAND_SIZE(opcode);
 
@@ -501,7 +515,6 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
        memset(sense, 0, sizeof(sense));
        rq->sense = sense;
        rq->sense_len = 0;
-       blk_rq_set_block_pc(rq);
 
        blk_execute_rq(q, disk, rq, 0);
 
@@ -521,7 +534,8 @@ out:
        
 error:
        kfree(buffer);
-       blk_put_request(rq);
+       if (rq)
+               blk_put_request(rq);
        return err;
 }
 EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
index fc3df47..f1fef74 100644 (file)
@@ -24,8 +24,8 @@
 #include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
-#include <linux/tegra-powergate.h>
 #include <linux/regulator/consumer.h>
+#include <soc/tegra/pmc.h>
 #include "ahci.h"
 
 #define SATA_CONFIGURATION_0                           0x180
index bc28111..c696230 100644 (file)
@@ -344,7 +344,7 @@ static struct ata_port_operations xgene_ahci_ops = {
 };
 
 static const struct ata_port_info xgene_ahci_port_info = {
-       .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
+       .flags = AHCI_FLAG_COMMON,
        .pio_mask = ATA_PIO4,
        .udma_mask = ATA_UDMA6,
        .port_ops = &xgene_ahci_ops,
@@ -480,7 +480,7 @@ static int xgene_ahci_probe(struct platform_device *pdev)
        /* Configure the host controller */
        xgene_ahci_hw_init(hpriv);
 
-       hpriv->flags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ;
+       hpriv->flags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_NCQ;
 
        rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info);
        if (rc)
index dbdc5d3..f3e7b9f 100644 (file)
@@ -4228,7 +4228,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "Micron_M500*",               NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
        { "Crucial_CT???M500SSD*",      NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
        { "Micron_M550*",               NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
-       { "Crucial_CT???M550SSD*",      NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
+       { "Crucial_CT*M550SSD*",        NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
 
        /*
         * Some WD SATA-I drives spin up and down erratically when the link
index 2578fc1..1a24a5d 100644 (file)
@@ -360,7 +360,7 @@ static int pata_s3c_wait_after_reset(struct ata_link *link,
 /*
  * pata_s3c_bus_softreset - PATA device software reset
  */
-static unsigned int pata_s3c_bus_softreset(struct ata_port *ap,
+static int pata_s3c_bus_softreset(struct ata_port *ap,
                unsigned long deadline)
 {
        struct ata_ioports *ioaddr = &ap->ioaddr;
index 4e006d7..7f4cb76 100644 (file)
@@ -585,7 +585,7 @@ static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
  *     Note: Original code is ata_bus_softreset().
  */
 
-static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
+static int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
                                       unsigned long deadline)
 {
        struct ata_ioports *ioaddr = &ap->ioaddr;
@@ -599,9 +599,7 @@ static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
        udelay(20);
        out_be32(ioaddr->ctl_addr, ap->ctl);
 
-       scc_wait_after_reset(&ap->link, devmask, deadline);
-
-       return 0;
+       return scc_wait_after_reset(&ap->link, devmask, deadline);
 }
 
 /**
@@ -618,7 +616,8 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes,
 {
        struct ata_port *ap = link->ap;
        unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
-       unsigned int devmask = 0, err_mask;
+       unsigned int devmask = 0;
+       int rc;
        u8 err;
 
        DPRINTK("ENTER\n");
@@ -634,9 +633,9 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes,
 
        /* issue bus reset */
        DPRINTK("about to softreset, devmask=%x\n", devmask);
-       err_mask = scc_bus_softreset(ap, devmask, deadline);
-       if (err_mask) {
-               ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", err_mask);
+       rc = scc_bus_softreset(ap, devmask, deadline);
+       if (rc) {
+               ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", rc);
                return -EIO;
        }
 
index c7d138e..3598110 100644 (file)
@@ -442,12 +442,15 @@ static int rd_nr;
 int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
 static int max_part;
 static int part_shift;
+static int part_show = 0;
 module_param(rd_nr, int, S_IRUGO);
 MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
 module_param(rd_size, int, S_IRUGO);
 MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
 module_param(max_part, int, S_IRUGO);
 MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk");
+module_param(part_show, int, S_IRUGO);
+MODULE_PARM_DESC(part_show, "Control RAM disk visibility in /proc/partitions");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
 MODULE_ALIAS("rd");
@@ -501,7 +504,8 @@ static struct brd_device *brd_alloc(int i)
        disk->fops              = &brd_fops;
        disk->private_data      = brd;
        disk->queue             = brd->brd_queue;
-       disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
+       if (!part_show)
+               disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
        sprintf(disk->disk_name, "ram%d", i);
        set_capacity(disk, rd_size * 2);
 
index ab3ea62..c4328d9 100644 (file)
@@ -1203,7 +1203,6 @@ static struct platform_driver ace_platform_driver = {
        .probe = ace_probe,
        .remove = ace_remove,
        .driver = {
-               .owner = THIS_MODULE,
                .name = "xsysace",
                .of_match_table = ace_of_match,
        },
index 3266f8f..6f550d9 100644 (file)
@@ -662,7 +662,7 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
                }
                if (e->num_vcs && vc >= e->num_vcs) {
                        dev_warn(ccn->dev, "Invalid vc %d for node/XP %d!\n",
-                                       port, node_xp);
+                                       vc, node_xp);
                        return -EINVAL;
                }
                valid = 1;
index 4222cb2..7bb9d65 100644 (file)
@@ -29,7 +29,7 @@
 EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on);
 EXPORT_TRACEPOINT_SYMBOL(fence_emit);
 
-/**
+/*
  * fence context counter: each execution context should have its own
  * fence context, this allows checking if fences belong to the same
  * context or not. One device can have multiple separate contexts,
index f0a4364..5abe943 100644 (file)
@@ -481,7 +481,7 @@ EXPORT_SYMBOL_GPL(efivar_entry_remove);
  */
 static void efivar_entry_list_del_unlock(struct efivar_entry *entry)
 {
-       WARN_ON(!spin_is_locked(&__efivars->lock));
+       lockdep_assert_held(&__efivars->lock);
 
        list_del(&entry->list);
        spin_unlock_irq(&__efivars->lock);
@@ -507,7 +507,7 @@ int __efivar_entry_delete(struct efivar_entry *entry)
        const struct efivar_operations *ops = __efivars->ops;
        efi_status_t status;
 
-       WARN_ON(!spin_is_locked(&__efivars->lock));
+       lockdep_assert_held(&__efivars->lock);
 
        status = ops->set_variable(entry->var.VariableName,
                                   &entry->var.VendorGuid,
@@ -667,7 +667,7 @@ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
        int strsize1, strsize2;
        bool found = false;
 
-       WARN_ON(!spin_is_locked(&__efivars->lock));
+       lockdep_assert_held(&__efivars->lock);
 
        list_for_each_entry_safe(entry, n, head, list) {
                strsize1 = ucs2_strsize(name, 1024);
@@ -739,7 +739,7 @@ int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
        const struct efivar_operations *ops = __efivars->ops;
        efi_status_t status;
 
-       WARN_ON(!spin_is_locked(&__efivars->lock));
+       lockdep_assert_held(&__efivars->lock);
 
        status = ops->get_variable(entry->var.VariableName,
                                   &entry->var.VendorGuid,
index 41b2f40..954b9f6 100644 (file)
@@ -90,7 +90,7 @@ struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev,
        struct gpio_desc **dr;
        struct gpio_desc *desc;
 
-       dr = devres_alloc(devm_gpiod_release, sizeof(struct gpiod_desc *),
+       dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
                          GFP_KERNEL);
        if (!dr)
                return ERR_PTR(-ENOMEM);
index ff9eb91..fa945ec 100644 (file)
@@ -407,9 +407,27 @@ static int lp_gpio_runtime_resume(struct device *dev)
        return 0;
 }
 
+static int lp_gpio_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct lp_gpio *lg = platform_get_drvdata(pdev);
+       unsigned long reg;
+       int i;
+
+       /* on some hardware suspend clears input sensing, re-enable it here */
+       for (i = 0; i < lg->chip.ngpio; i++) {
+               if (gpiochip_is_requested(&lg->chip, i) != NULL) {
+                       reg = lp_gpio_reg(&lg->chip, i, LP_CONFIG2);
+                       outl(inl(reg) & ~GPINDIS_BIT, reg);
+               }
+       }
+       return 0;
+}
+
 static const struct dev_pm_ops lp_gpio_pm_ops = {
        .runtime_suspend = lp_gpio_runtime_suspend,
        .runtime_resume = lp_gpio_runtime_resume,
+       .resume = lp_gpio_resume,
 };
 
 static const struct acpi_device_id lynxpoint_gpio_acpi_match[] = {
index c3145f9..31ad5df 100644 (file)
@@ -95,6 +95,9 @@ struct zynq_gpio {
        struct clk *clk;
 };
 
+static struct irq_chip zynq_gpio_level_irqchip;
+static struct irq_chip zynq_gpio_edge_irqchip;
+
 /**
  * zynq_gpio_get_bank_pin - Get the bank number and pin number within that bank
  * for a given pin in the GPIO device
@@ -410,6 +413,15 @@ static int zynq_gpio_set_irq_type(struct irq_data *irq_data, unsigned int type)
                       gpio->base_addr + ZYNQ_GPIO_INTPOL_OFFSET(bank_num));
        writel_relaxed(int_any,
                       gpio->base_addr + ZYNQ_GPIO_INTANY_OFFSET(bank_num));
+
+       if (type & IRQ_TYPE_LEVEL_MASK) {
+               __irq_set_chip_handler_name_locked(irq_data->irq,
+                       &zynq_gpio_level_irqchip, handle_fasteoi_irq, NULL);
+       } else {
+               __irq_set_chip_handler_name_locked(irq_data->irq,
+                       &zynq_gpio_edge_irqchip, handle_level_irq, NULL);
+       }
+
        return 0;
 }
 
@@ -424,9 +436,21 @@ static int zynq_gpio_set_wake(struct irq_data *data, unsigned int on)
 }
 
 /* irq chip descriptor */
-static struct irq_chip zynq_gpio_irqchip = {
+static struct irq_chip zynq_gpio_level_irqchip = {
        .name           = DRIVER_NAME,
        .irq_enable     = zynq_gpio_irq_enable,
+       .irq_eoi        = zynq_gpio_irq_ack,
+       .irq_mask       = zynq_gpio_irq_mask,
+       .irq_unmask     = zynq_gpio_irq_unmask,
+       .irq_set_type   = zynq_gpio_set_irq_type,
+       .irq_set_wake   = zynq_gpio_set_wake,
+       .flags          = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED,
+};
+
+static struct irq_chip zynq_gpio_edge_irqchip = {
+       .name           = DRIVER_NAME,
+       .irq_enable     = zynq_gpio_irq_enable,
+       .irq_ack        = zynq_gpio_irq_ack,
        .irq_mask       = zynq_gpio_irq_mask,
        .irq_unmask     = zynq_gpio_irq_unmask,
        .irq_set_type   = zynq_gpio_set_irq_type,
@@ -469,10 +493,6 @@ static void zynq_gpio_irqhandler(unsigned int irq, struct irq_desc *desc)
                                                        offset);
                                generic_handle_irq(gpio_irq);
                        }
-
-                       /* clear IRQ in HW */
-                       writel_relaxed(int_sts, gpio->base_addr +
-                                       ZYNQ_GPIO_INTSTS_OFFSET(bank_num));
                }
        }
 
@@ -610,14 +630,14 @@ static int zynq_gpio_probe(struct platform_device *pdev)
                writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr +
                               ZYNQ_GPIO_INTDIS_OFFSET(bank_num));
 
-       ret = gpiochip_irqchip_add(chip, &zynq_gpio_irqchip, 0,
-                                  handle_simple_irq, IRQ_TYPE_NONE);
+       ret = gpiochip_irqchip_add(chip, &zynq_gpio_edge_irqchip, 0,
+                                  handle_level_irq, IRQ_TYPE_NONE);
        if (ret) {
                dev_err(&pdev->dev, "Failed to add irq chip\n");
                goto err_rm_gpiochip;
        }
 
-       gpiochip_set_chained_irqchip(chip, &zynq_gpio_irqchip, irq,
+       gpiochip_set_chained_irqchip(chip, &zynq_gpio_edge_irqchip, irq,
                                     zynq_gpio_irqhandler);
 
        pm_runtime_set_active(&pdev->dev);
index 7cfdc22..604dbe6 100644 (file)
@@ -307,7 +307,5 @@ void of_gpiochip_add(struct gpio_chip *chip)
 void of_gpiochip_remove(struct gpio_chip *chip)
 {
        gpiochip_remove_pin_ranges(chip);
-
-       if (chip->of_node)
-               of_node_put(chip->of_node);
+       of_node_put(chip->of_node);
 }
index 4c761dc..05c01ea 100644 (file)
@@ -99,6 +99,7 @@ static struct ast_vbios_dclk_info dclk_table[] = {
        {0x25, 0x65, 0x80},                                     /* 16: VCLK88.75    */
        {0x77, 0x58, 0x80},                                     /* 17: VCLK119      */
        {0x32, 0x67, 0x80},                                 /* 18: VCLK85_5     */
+       {0x6a, 0x6d, 0x80},                                     /* 19: VCLK97_75        */
 };
 
 static struct ast_vbios_stdtable vbios_stdtable[] = {
index fa2be24..90e7730 100644 (file)
@@ -4696,8 +4696,9 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
                return -EINVAL;
 
        /* overflow checks for 32bit size calculations */
+       /* NOTE: DIV_ROUND_UP() can overflow */
        cpp = DIV_ROUND_UP(args->bpp, 8);
-       if (cpp > 0xffffffffU / args->width)
+       if (!cpp || cpp > 0xffffffffU / args->width)
                return -EINVAL;
        stride = cpp * args->width;
        if (args->height > 0xffffffffU / stride)
index ec96f9a..e27cdbe 100644 (file)
@@ -494,6 +494,36 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
        return true;
 }
 
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
+{
+       spin_lock_irq(&dev_priv->irq_lock);
+
+       dev_priv->long_hpd_port_mask = 0;
+       dev_priv->short_hpd_port_mask = 0;
+       dev_priv->hpd_event_bits = 0;
+
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       cancel_work_sync(&dev_priv->dig_port_work);
+       cancel_work_sync(&dev_priv->hotplug_work);
+       cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
+}
+
+static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct drm_encoder *encoder;
+
+       drm_modeset_lock_all(dev);
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+               if (intel_encoder->suspend)
+                       intel_encoder->suspend(intel_encoder);
+       }
+       drm_modeset_unlock_all(dev);
+}
+
 static int i915_drm_freeze(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -538,6 +568,9 @@ static int i915_drm_freeze(struct drm_device *dev)
                flush_delayed_work(&dev_priv->rps.delayed_resume_work);
 
                intel_runtime_pm_disable_interrupts(dev);
+               intel_hpd_cancel_work(dev_priv);
+
+               intel_suspend_encoders(dev_priv);
 
                intel_suspend_gt_powersave(dev);
 
index 4412f6a..7a830ea 100644 (file)
@@ -1458,7 +1458,7 @@ struct drm_i915_private {
                } hpd_mark;
        } hpd_stats[HPD_NUM_PINS];
        u32 hpd_event_bits;
-       struct timer_list hotplug_reenable_timer;
+       struct delayed_work hotplug_reenable_work;
 
        struct i915_fbc fbc;
        struct i915_drrs drrs;
@@ -2178,6 +2178,7 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
 
 extern void intel_console_resume(struct work_struct *work);
 
index 390ccc2..0050ee9 100644 (file)
@@ -1189,8 +1189,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
          * some connectors */
        if (hpd_disabled) {
                drm_kms_helper_poll_enable(dev);
-               mod_timer(&dev_priv->hotplug_reenable_timer,
-                         jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
+               mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
+                                msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
        }
 
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -1213,11 +1213,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
                drm_kms_helper_hotplug_event(dev);
 }
 
-static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
-{
-       del_timer_sync(&dev_priv->hotplug_reenable_timer);
-}
-
 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3892,8 +3887,6 @@ static void gen8_irq_uninstall(struct drm_device *dev)
        if (!dev_priv)
                return;
 
-       intel_hpd_irq_uninstall(dev_priv);
-
        gen8_irq_reset(dev);
 }
 
@@ -3908,8 +3901,6 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
 
        I915_WRITE(VLV_MASTER_IER, 0);
 
-       intel_hpd_irq_uninstall(dev_priv);
-
        for_each_pipe(pipe)
                I915_WRITE(PIPESTAT(pipe), 0xffff);
 
@@ -3988,8 +3979,6 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
        if (!dev_priv)
                return;
 
-       intel_hpd_irq_uninstall(dev_priv);
-
        ironlake_irq_reset(dev);
 }
 
@@ -4360,8 +4349,6 @@ static void i915_irq_uninstall(struct drm_device * dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe;
 
-       intel_hpd_irq_uninstall(dev_priv);
-
        if (I915_HAS_HOTPLUG(dev)) {
                I915_WRITE(PORT_HOTPLUG_EN, 0);
                I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -4598,8 +4585,6 @@ static void i965_irq_uninstall(struct drm_device * dev)
        if (!dev_priv)
                return;
 
-       intel_hpd_irq_uninstall(dev_priv);
-
        I915_WRITE(PORT_HOTPLUG_EN, 0);
        I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
@@ -4615,14 +4600,18 @@ static void i965_irq_uninstall(struct drm_device * dev)
        I915_WRITE(IIR, I915_READ(IIR));
 }
 
-static void intel_hpd_irq_reenable(unsigned long data)
+static void intel_hpd_irq_reenable(struct work_struct *work)
 {
-       struct drm_i915_private *dev_priv = (struct drm_i915_private *)data;
+       struct drm_i915_private *dev_priv =
+               container_of(work, typeof(*dev_priv),
+                            hotplug_reenable_work.work);
        struct drm_device *dev = dev_priv->dev;
        struct drm_mode_config *mode_config = &dev->mode_config;
        unsigned long irqflags;
        int i;
 
+       intel_runtime_pm_get(dev_priv);
+
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
        for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
                struct drm_connector *connector;
@@ -4648,6 +4637,8 @@ static void intel_hpd_irq_reenable(unsigned long data)
        if (dev_priv->display.hpd_irq_setup)
                dev_priv->display.hpd_irq_setup(dev);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+       intel_runtime_pm_put(dev_priv);
 }
 
 void intel_irq_init(struct drm_device *dev)
@@ -4670,8 +4661,8 @@ void intel_irq_init(struct drm_device *dev)
        setup_timer(&dev_priv->gpu_error.hangcheck_timer,
                    i915_hangcheck_elapsed,
                    (unsigned long) dev);
-       setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable,
-                   (unsigned long) dev_priv);
+       INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
+                         intel_hpd_irq_reenable);
 
        pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
 
index 2efaf8e..e8abfce 100644 (file)
@@ -699,16 +699,21 @@ intel_crt_detect(struct drm_connector *connector, bool force)
                goto out;
        }
 
+       drm_modeset_acquire_init(&ctx, 0);
+
        /* for pre-945g platforms use load detect */
        if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
                if (intel_crt_detect_ddc(connector))
                        status = connector_status_connected;
                else
                        status = intel_crt_load_detect(crt);
-               intel_release_load_detect_pipe(connector, &tmp, &ctx);
+               intel_release_load_detect_pipe(connector, &tmp);
        } else
                status = connector_status_unknown;
 
+       drm_modeset_drop_locks(&ctx);
+       drm_modeset_acquire_fini(&ctx);
+
 out:
        intel_display_power_put(dev_priv, power_domain);
        return status;
index 018fb72..d074d70 100644 (file)
@@ -8462,8 +8462,6 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
                      connector->base.id, connector->name,
                      encoder->base.id, encoder->name);
 
-       drm_modeset_acquire_init(ctx, 0);
-
 retry:
        ret = drm_modeset_lock(&config->connection_mutex, ctx);
        if (ret)
@@ -8502,10 +8500,14 @@ retry:
                i++;
                if (!(encoder->possible_crtcs & (1 << i)))
                        continue;
-               if (!possible_crtc->enabled) {
-                       crtc = possible_crtc;
-                       break;
-               }
+               if (possible_crtc->enabled)
+                       continue;
+               /* This can occur when applying the pipe A quirk on resume. */
+               if (to_intel_crtc(possible_crtc)->new_enabled)
+                       continue;
+
+               crtc = possible_crtc;
+               break;
        }
 
        /*
@@ -8574,15 +8576,11 @@ fail_unlock:
                goto retry;
        }
 
-       drm_modeset_drop_locks(ctx);
-       drm_modeset_acquire_fini(ctx);
-
        return false;
 }
 
 void intel_release_load_detect_pipe(struct drm_connector *connector,
-                                   struct intel_load_detect_pipe *old,
-                                   struct drm_modeset_acquire_ctx *ctx)
+                                   struct intel_load_detect_pipe *old)
 {
        struct intel_encoder *intel_encoder =
                intel_attached_encoder(connector);
@@ -8606,17 +8604,12 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
                        drm_framebuffer_unreference(old->release_fb);
                }
 
-               goto unlock;
                return;
        }
 
        /* Switch crtc and encoder back off if necessary */
        if (old->dpms_mode != DRM_MODE_DPMS_ON)
                connector->funcs->dpms(connector, old->dpms_mode);
-
-unlock:
-       drm_modeset_drop_locks(ctx);
-       drm_modeset_acquire_fini(ctx);
 }
 
 static int i9xx_pll_refclk(struct drm_device *dev,
@@ -11700,8 +11693,8 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
        };
        const struct drm_rect clip = {
                /* integer pixels */
-               .x2 = intel_crtc->config.pipe_src_w,
-               .y2 = intel_crtc->config.pipe_src_h,
+               .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
+               .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
        };
        bool visible;
        int ret;
@@ -12659,7 +12652,7 @@ static void intel_enable_pipe_a(struct drm_device *dev)
        struct intel_connector *connector;
        struct drm_connector *crt = NULL;
        struct intel_load_detect_pipe load_detect_temp;
-       struct drm_modeset_acquire_ctx ctx;
+       struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
 
        /* We can't just switch on the pipe A, we need to set things up with a
         * proper mode and output configuration. As a gross hack, enable pipe A
@@ -12676,10 +12669,8 @@ static void intel_enable_pipe_a(struct drm_device *dev)
        if (!crt)
                return;
 
-       if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, &ctx))
-               intel_release_load_detect_pipe(crt, &load_detect_temp, &ctx);
-
-
+       if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
+               intel_release_load_detect_pipe(crt, &load_detect_temp);
 }
 
 static bool
@@ -13112,7 +13103,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
         * experience fancy races otherwise.
         */
        drm_irq_uninstall(dev);
-       cancel_work_sync(&dev_priv->hotplug_work);
+       intel_hpd_cancel_work(dev_priv);
        dev_priv->pm._irqs_disabled = true;
 
        /*
index ee3942f..67cfed6 100644 (file)
@@ -3553,6 +3553,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
        if (WARN_ON(!intel_encoder->base.crtc))
                return;
 
+       if (!to_intel_crtc(intel_encoder->base.crtc)->active)
+               return;
+
        /* Try to read receiver status if the link appears to be up */
        if (!intel_dp_get_link_status(intel_dp, link_status)) {
                return;
@@ -4003,6 +4006,16 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
        kfree(intel_dig_port);
 }
 
+static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+{
+       struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+
+       if (!is_edp(intel_dp))
+               return;
+
+       edp_panel_vdd_off_sync(intel_dp);
+}
+
 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
 {
        intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder));
@@ -4037,15 +4050,21 @@ bool
 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
 {
        struct intel_dp *intel_dp = &intel_dig_port->dp;
+       struct intel_encoder *intel_encoder = &intel_dig_port->base;
        struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret;
+       enum intel_display_power_domain power_domain;
+       bool ret = true;
+
        if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
                intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
 
        DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port,
                      long_hpd ? "long" : "short");
 
+       power_domain = intel_display_port_power_domain(intel_encoder);
+       intel_display_power_get(dev_priv, power_domain);
+
        if (long_hpd) {
                if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
                        goto mst_fail;
@@ -4061,8 +4080,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
 
        } else {
                if (intel_dp->is_mst) {
-                       ret = intel_dp_check_mst_status(intel_dp);
-                       if (ret == -EINVAL)
+                       if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
                                goto mst_fail;
                }
 
@@ -4076,7 +4094,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
                        drm_modeset_unlock(&dev->mode_config.connection_mutex);
                }
        }
-       return false;
+       ret = false;
+       goto put_power;
 mst_fail:
        /* if we were in MST mode, and device is not there get out of MST mode */
        if (intel_dp->is_mst) {
@@ -4084,7 +4103,10 @@ mst_fail:
                intel_dp->is_mst = false;
                drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
        }
-       return true;
+put_power:
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
 }
 
 /* Return which DP Port should be selected for Transcoder DP control */
@@ -4722,6 +4744,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
        intel_encoder->disable = intel_disable_dp;
        intel_encoder->get_hw_state = intel_dp_get_hw_state;
        intel_encoder->get_config = intel_dp_get_config;
+       intel_encoder->suspend = intel_dp_encoder_suspend;
        if (IS_CHERRYVIEW(dev)) {
                intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
                intel_encoder->pre_enable = chv_pre_enable_dp;
index 4b2664b..b8c8bbd 100644 (file)
@@ -153,6 +153,12 @@ struct intel_encoder {
         * be set correctly before calling this function. */
        void (*get_config)(struct intel_encoder *,
                           struct intel_crtc_config *pipe_config);
+       /*
+        * Called during system suspend after all pending requests for the
+        * encoder are flushed (for example for DP AUX transactions) and
+        * device interrupts are disabled.
+        */
+       void (*suspend)(struct intel_encoder *);
        int crtc_mask;
        enum hpd_pin hpd_pin;
 };
@@ -830,8 +836,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
                                struct intel_load_detect_pipe *old,
                                struct drm_modeset_acquire_ctx *ctx);
 void intel_release_load_detect_pipe(struct drm_connector *connector,
-                                   struct intel_load_detect_pipe *old,
-                                   struct drm_modeset_acquire_ctx *ctx);
+                                   struct intel_load_detect_pipe *old);
 int intel_pin_and_fence_fb_obj(struct drm_device *dev,
                               struct drm_i915_gem_object *obj,
                               struct intel_engine_cs *pipelined);
index e211eef..32186a6 100644 (file)
@@ -1323,11 +1323,16 @@ intel_tv_detect(struct drm_connector *connector, bool force)
                struct intel_load_detect_pipe tmp;
                struct drm_modeset_acquire_ctx ctx;
 
+               drm_modeset_acquire_init(&ctx, 0);
+
                if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) {
                        type = intel_tv_detect_type(intel_tv, connector);
-                       intel_release_load_detect_pipe(connector, &tmp, &ctx);
+                       intel_release_load_detect_pipe(connector, &tmp);
                } else
                        return connector_status_unknown;
+
+               drm_modeset_drop_locks(&ctx);
+               drm_modeset_acquire_fini(&ctx);
        } else
                return connector->status;
 
index 74cebb5..c6c80ea 100644 (file)
@@ -397,6 +397,7 @@ static void mdp4_crtc_prepare(struct drm_crtc *crtc)
        struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
        DBG("%s", mdp4_crtc->name);
        /* make sure we hold a ref to mdp clks while setting up mode: */
+       drm_crtc_vblank_get(crtc);
        mdp4_enable(get_kms(crtc));
        mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
 }
@@ -407,6 +408,7 @@ static void mdp4_crtc_commit(struct drm_crtc *crtc)
        crtc_flush(crtc);
        /* drop the ref to mdp clk's that we got in prepare: */
        mdp4_disable(get_kms(crtc));
+       drm_crtc_vblank_put(crtc);
 }
 
 static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
index b447c01..26ee80d 100644 (file)
@@ -974,12 +974,11 @@ static int msm_pdev_probe(struct platform_device *pdev)
 
        for (i = 0; i < ARRAY_SIZE(devnames); i++) {
                struct device *dev;
-               int ret;
 
                dev = bus_find_device_by_name(&platform_bus_type,
                                NULL, devnames[i]);
                if (!dev) {
-                       dev_info(master, "still waiting for %s\n", devnames[i]);
+                       dev_info(&pdev->dev, "still waiting for %s\n", devnames[i]);
                        return -EPROBE_DEFER;
                }
 
index 9c5221c..ab5bfd2 100644 (file)
@@ -143,7 +143,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
        ret = msm_gem_get_iova_locked(fbdev->bo, 0, &paddr);
        if (ret) {
                dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
-               goto fail;
+               goto fail_unlock;
        }
 
        fbi = framebuffer_alloc(0, dev->dev);
index 099af48..7acdaa5 100644 (file)
@@ -27,8 +27,8 @@ struct msm_iommu {
 static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
                unsigned long iova, int flags, void *arg)
 {
-       DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
-       return -ENOSYS;
+       pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags);
+       return 0;
 }
 
 static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
index 0013ad0..f77b713 100644 (file)
@@ -76,7 +76,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
        evergreen.o evergreen_cs.o evergreen_blit_shaders.o \
        evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
        atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
-       si_blit_shaders.o radeon_prime.o radeon_uvd.o cik.o cik_blit_shaders.o \
+       si_blit_shaders.o radeon_prime.o cik.o cik_blit_shaders.o \
        r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
        rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
        trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
index 022561e..d416bb2 100644 (file)
@@ -869,6 +869,9 @@ static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
        WREG32_SMC(CG_THERMAL_CTRL, tmp);
 #endif
 
+       rdev->pm.dpm.thermal.min_temp = low_temp;
+       rdev->pm.dpm.thermal.max_temp = high_temp;
+
        return 0;
 }
 
index b625646..fa95659 100644 (file)
@@ -3483,7 +3483,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
        u32 mc_shared_chmap, mc_arb_ramcfg;
        u32 hdp_host_path_cntl;
        u32 tmp;
-       int i, j, k;
+       int i, j;
 
        switch (rdev->family) {
        case CHIP_BONAIRE:
@@ -3544,6 +3544,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
                           (rdev->pdev->device == 0x130B) ||
                           (rdev->pdev->device == 0x130E) ||
                           (rdev->pdev->device == 0x1315) ||
+                          (rdev->pdev->device == 0x1318) ||
                           (rdev->pdev->device == 0x131B)) {
                        rdev->config.cik.max_cu_per_sh = 4;
                        rdev->config.cik.max_backends_per_se = 1;
@@ -3672,12 +3673,11 @@ static void cik_gpu_init(struct radeon_device *rdev)
                     rdev->config.cik.max_sh_per_se,
                     rdev->config.cik.max_backends_per_se);
 
+       rdev->config.cik.active_cus = 0;
        for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
                for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
-                       for (k = 0; k < rdev->config.cik.max_cu_per_sh; k++) {
-                               rdev->config.cik.active_cus +=
-                                       hweight32(cik_get_cu_active_bitmap(rdev, i, j));
-                       }
+                       rdev->config.cik.active_cus +=
+                               hweight32(cik_get_cu_active_bitmap(rdev, i, j));
                }
        }
 
@@ -3801,7 +3801,7 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
        radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
        radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2));
        radeon_ring_write(ring, 0xDEADBEEF);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
        for (i = 0; i < rdev->usec_timeout; i++) {
                tmp = RREG32(scratch);
@@ -3920,6 +3920,17 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
        radeon_ring_write(ring, 0);
 }
 
+/**
+ * cik_semaphore_ring_emit - emit a semaphore on the CP ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring buffer object
+ * @semaphore: radeon semaphore object
+ * @emit_wait: Is this a sempahore wait?
+ *
+ * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
+ * from running ahead of semaphore waits.
+ */
 bool cik_semaphore_ring_emit(struct radeon_device *rdev,
                             struct radeon_ring *ring,
                             struct radeon_semaphore *semaphore,
@@ -3932,6 +3943,12 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
        radeon_ring_write(ring, lower_32_bits(addr));
        radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
 
+       if (emit_wait && ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
+               /* Prevent the PFP from running ahead of the semaphore wait */
+               radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+               radeon_ring_write(ring, 0x0);
+       }
+
        return true;
 }
 
@@ -4004,7 +4021,7 @@ int cik_copy_cpdma(struct radeon_device *rdev,
                return r;
        }
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        radeon_semaphore_free(rdev, &sem, *fence);
 
        return r;
@@ -4103,7 +4120,7 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
        ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2);
        ib.ptr[2] = 0xDEADBEEF;
        ib.length_dw = 3;
-       r = radeon_ib_schedule(rdev, &ib, NULL);
+       r = radeon_ib_schedule(rdev, &ib, NULL, false);
        if (r) {
                radeon_scratch_free(rdev, scratch);
                radeon_ib_free(rdev, &ib);
@@ -4324,7 +4341,7 @@ static int cik_cp_gfx_start(struct radeon_device *rdev)
        radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
        radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
        return 0;
 }
@@ -5732,20 +5749,17 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
        WREG32(0x15D8, 0);
        WREG32(0x15DC, 0);
 
-       /* empty context1-15 */
-       /* FIXME start with 4G, once using 2 level pt switch to full
-        * vm size space
-        */
+       /* restore context1-15 */
        /* set vm size, must be a multiple of 4 */
        WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
        WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
        for (i = 1; i < 16; i++) {
                if (i < 8)
                        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
-                              rdev->gart.table_addr >> 12);
+                              rdev->vm_manager.saved_table_addr[i]);
                else
                        WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
-                              rdev->gart.table_addr >> 12);
+                              rdev->vm_manager.saved_table_addr[i]);
        }
 
        /* enable context1-15 */
@@ -5810,6 +5824,17 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
  */
 static void cik_pcie_gart_disable(struct radeon_device *rdev)
 {
+       unsigned i;
+
+       for (i = 1; i < 16; ++i) {
+               uint32_t reg;
+               if (i < 8)
+                       reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2);
+               else
+                       reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2);
+               rdev->vm_manager.saved_table_addr[i] = RREG32(reg);
+       }
+
        /* Disable all tables */
        WREG32(VM_CONTEXT0_CNTL, 0);
        WREG32(VM_CONTEXT1_CNTL, 0);
@@ -5958,14 +5983,14 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
 
        /* update SH_MEM_* regs */
        radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
                                 WRITE_DATA_DST_SEL(0)));
        radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
        radeon_ring_write(ring, 0);
        radeon_ring_write(ring, VMID(vm->id));
 
        radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
-       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
                                 WRITE_DATA_DST_SEL(0)));
        radeon_ring_write(ring, SH_MEM_BASES >> 2);
        radeon_ring_write(ring, 0);
@@ -5976,7 +6001,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
        radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */
 
        radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
                                 WRITE_DATA_DST_SEL(0)));
        radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
        radeon_ring_write(ring, 0);
@@ -5987,7 +6012,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
 
        /* bits 0-15 are the VM contexts0-15 */
        radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
                                 WRITE_DATA_DST_SEL(0)));
        radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
        radeon_ring_write(ring, 0);
@@ -9538,6 +9563,9 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
        int ret, i;
        u16 tmp16;
 
+       if (pci_is_root_bus(rdev->pdev->bus))
+               return;
+
        if (radeon_pcie_gen2 == 0)
                return;
 
@@ -9764,7 +9792,8 @@ static void cik_program_aspm(struct radeon_device *rdev)
                        if (orig != data)
                                WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
 
-                       if (!disable_clkreq) {
+                       if (!disable_clkreq &&
+                           !pci_is_root_bus(rdev->pdev->bus)) {
                                struct pci_dev *root = rdev->pdev->bus->self;
                                u32 lnkcap;
 
index bcf4805..192278b 100644 (file)
@@ -596,7 +596,7 @@ int cik_copy_dma(struct radeon_device *rdev,
                return r;
        }
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        radeon_semaphore_free(rdev, &sem, *fence);
 
        return r;
@@ -638,7 +638,7 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
        radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr));
        radeon_ring_write(ring, 1); /* number of DWs to follow */
        radeon_ring_write(ring, 0xDEADBEEF);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
        for (i = 0; i < rdev->usec_timeout; i++) {
                tmp = readl(ptr);
@@ -695,7 +695,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
        ib.ptr[4] = 0xDEADBEEF;
        ib.length_dw = 5;
 
-       r = radeon_ib_schedule(rdev, &ib, NULL);
+       r = radeon_ib_schedule(rdev, &ib, NULL, false);
        if (r) {
                radeon_ib_free(rdev, &ib);
                DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
index 4fedd14..dbca60c 100644 (file)
@@ -2869,7 +2869,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
        radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
        radeon_ring_write(ring, 0);
        radeon_ring_write(ring, 0);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
        cp_me = 0xff;
        WREG32(CP_ME_CNTL, cp_me);
@@ -2912,7 +2912,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
        radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
        radeon_ring_write(ring, 0x00000010); /*  */
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
        return 0;
 }
index 478caef..afaba38 100644 (file)
@@ -155,7 +155,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
                return r;
        }
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        radeon_semaphore_free(rdev, &sem, *fence);
 
        return r;
index 9ef8c38..8b58e11 100644 (file)
@@ -1438,14 +1438,14 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
        return kv_enable_uvd_dpm(rdev, !gate);
 }
 
-static u8 kv_get_vce_boot_level(struct radeon_device *rdev)
+static u8 kv_get_vce_boot_level(struct radeon_device *rdev, u32 evclk)
 {
        u8 i;
        struct radeon_vce_clock_voltage_dependency_table *table =
                &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
 
        for (i = 0; i < table->count; i++) {
-               if (table->entries[i].evclk >= 0) /* XXX */
+               if (table->entries[i].evclk >= evclk)
                        break;
        }
 
@@ -1468,7 +1468,7 @@ static int kv_update_vce_dpm(struct radeon_device *rdev,
                if (pi->caps_stable_p_state)
                        pi->vce_boot_level = table->count - 1;
                else
-                       pi->vce_boot_level = kv_get_vce_boot_level(rdev);
+                       pi->vce_boot_level = kv_get_vce_boot_level(rdev, radeon_new_state->evclk);
 
                ret = kv_copy_bytes_to_smc(rdev,
                                           pi->dpm_table_start +
@@ -2726,7 +2726,10 @@ int kv_dpm_init(struct radeon_device *rdev)
        pi->caps_sclk_ds = true;
        pi->enable_auto_thermal_throttling = true;
        pi->disable_nb_ps3_in_battery = false;
-       pi->bapm_enable = true;
+       if (radeon_bapm == 0)
+               pi->bapm_enable = false;
+       else
+               pi->bapm_enable = true;
        pi->voltage_drop_t = 0;
        pi->caps_sclk_throttle_low_notification = false;
        pi->caps_fps = false; /* true? */
index 327b85f..3faee58 100644 (file)
@@ -1271,7 +1271,7 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
                WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
                WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
                WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
-                       rdev->gart.table_addr >> 12);
+                      rdev->vm_manager.saved_table_addr[i]);
        }
 
        /* enable context1-7 */
@@ -1303,6 +1303,13 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
 
 static void cayman_pcie_gart_disable(struct radeon_device *rdev)
 {
+       unsigned i;
+
+       for (i = 1; i < 8; ++i) {
+               rdev->vm_manager.saved_table_addr[i] = RREG32(
+                       VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2));
+       }
+
        /* Disable all tables */
        WREG32(VM_CONTEXT0_CNTL, 0);
        WREG32(VM_CONTEXT1_CNTL, 0);
@@ -1505,7 +1512,7 @@ static int cayman_cp_start(struct radeon_device *rdev)
        radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
        radeon_ring_write(ring, 0);
        radeon_ring_write(ring, 0);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
        cayman_cp_enable(rdev, true);
 
@@ -1547,7 +1554,7 @@ static int cayman_cp_start(struct radeon_device *rdev)
        radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
        radeon_ring_write(ring, 0x00000010); /*  */
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
        /* XXX init other rings */
 
index 04b5940..4c5ec44 100644 (file)
@@ -925,7 +925,7 @@ int r100_copy_blit(struct radeon_device *rdev,
        if (fence) {
                r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
        }
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        return r;
 }
 
@@ -958,7 +958,7 @@ void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
                          RADEON_ISYNC_ANY3D_IDLE2D |
                          RADEON_ISYNC_WAIT_IDLEGUI |
                          RADEON_ISYNC_CPSCRATCH_IDLEGUI);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 }
 
 
@@ -3638,7 +3638,7 @@ int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
        }
        radeon_ring_write(ring, PACKET0(scratch, 0));
        radeon_ring_write(ring, 0xDEADBEEF);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        for (i = 0; i < rdev->usec_timeout; i++) {
                tmp = RREG32(scratch);
                if (tmp == 0xDEADBEEF) {
@@ -3700,7 +3700,7 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
        ib.ptr[6] = PACKET2(0);
        ib.ptr[7] = PACKET2(0);
        ib.length_dw = 8;
-       r = radeon_ib_schedule(rdev, &ib, NULL);
+       r = radeon_ib_schedule(rdev, &ib, NULL, false);
        if (r) {
                DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
                goto free_ib;
index 58f0473..6778037 100644 (file)
@@ -121,7 +121,7 @@ int r200_copy_dma(struct radeon_device *rdev,
        if (fence) {
                r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
        }
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        return r;
 }
 
index 75b3033..1bc4704 100644 (file)
@@ -295,7 +295,7 @@ void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
        radeon_ring_write(ring,
                          R300_GEOMETRY_ROUND_NEAREST |
                          R300_COLOR_ROUND_NEAREST);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 }
 
 static void r300_errata(struct radeon_device *rdev)
index 802b192..2828605 100644 (file)
@@ -219,7 +219,7 @@ static void r420_cp_errata_init(struct radeon_device *rdev)
        radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
        radeon_ring_write(ring, rdev->config.r300.resync_scratch);
        radeon_ring_write(ring, 0xDEADBEEF);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 }
 
 static void r420_cp_errata_fini(struct radeon_device *rdev)
@@ -232,7 +232,7 @@ static void r420_cp_errata_fini(struct radeon_device *rdev)
        radeon_ring_lock(rdev, ring, 8);
        radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
        radeon_ring_write(ring, R300_RB3D_DC_FINISH);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
 }
 
index c70a504..e616eb5 100644 (file)
@@ -1812,7 +1812,6 @@ static void r600_gpu_init(struct radeon_device *rdev)
 {
        u32 tiling_config;
        u32 ramcfg;
-       u32 cc_rb_backend_disable;
        u32 cc_gc_shader_pipe_config;
        u32 tmp;
        int i, j;
@@ -1939,29 +1938,20 @@ static void r600_gpu_init(struct radeon_device *rdev)
        }
        tiling_config |= BANK_SWAPS(1);
 
-       cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
-       tmp = R6XX_MAX_BACKENDS -
-               r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
-       if (tmp < rdev->config.r600.max_backends) {
-               rdev->config.r600.max_backends = tmp;
-       }
-
        cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
-       tmp = R6XX_MAX_PIPES -
-               r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
-       if (tmp < rdev->config.r600.max_pipes) {
-               rdev->config.r600.max_pipes = tmp;
-       }
-       tmp = R6XX_MAX_SIMDS -
-               r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
-       if (tmp < rdev->config.r600.max_simds) {
-               rdev->config.r600.max_simds = tmp;
-       }
        tmp = rdev->config.r600.max_simds -
                r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
        rdev->config.r600.active_simds = tmp;
 
        disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
+       tmp = 0;
+       for (i = 0; i < rdev->config.r600.max_backends; i++)
+               tmp |= (1 << i);
+       /* if all the backends are disabled, fix it up here */
+       if ((disabled_rb_mask & tmp) == tmp) {
+               for (i = 0; i < rdev->config.r600.max_backends; i++)
+                       disabled_rb_mask &= ~(1 << i);
+       }
        tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
        tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
                                        R6XX_MAX_BACKENDS, disabled_rb_mask);
@@ -2547,7 +2537,7 @@ int r600_cp_start(struct radeon_device *rdev)
        radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
        radeon_ring_write(ring, 0);
        radeon_ring_write(ring, 0);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
        cp_me = 0xff;
        WREG32(R_0086D8_CP_ME_CNTL, cp_me);
@@ -2683,7 +2673,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
        radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
        radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
        radeon_ring_write(ring, 0xDEADBEEF);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        for (i = 0; i < rdev->usec_timeout; i++) {
                tmp = RREG32(scratch);
                if (tmp == 0xDEADBEEF)
@@ -2753,6 +2743,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
        }
 }
 
+/**
+ * r600_semaphore_ring_emit - emit a semaphore on the CP ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring buffer object
+ * @semaphore: radeon semaphore object
+ * @emit_wait: Is this a sempahore wait?
+ *
+ * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
+ * from running ahead of semaphore waits.
+ */
 bool r600_semaphore_ring_emit(struct radeon_device *rdev,
                              struct radeon_ring *ring,
                              struct radeon_semaphore *semaphore,
@@ -2768,6 +2769,13 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
        radeon_ring_write(ring, lower_32_bits(addr));
        radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
 
+       /* PFP_SYNC_ME packet only exists on 7xx+ */
+       if (emit_wait && (rdev->family >= CHIP_RV770)) {
+               /* Prevent the PFP from running ahead of the semaphore wait */
+               radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+               radeon_ring_write(ring, 0x0);
+       }
+
        return true;
 }
 
@@ -2845,7 +2853,7 @@ int r600_copy_cpdma(struct radeon_device *rdev,
                return r;
        }
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        radeon_semaphore_free(rdev, &sem, *fence);
 
        return r;
@@ -3165,7 +3173,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
        ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
        ib.ptr[2] = 0xDEADBEEF;
        ib.length_dw = 3;
-       r = radeon_ib_schedule(rdev, &ib, NULL);
+       r = radeon_ib_schedule(rdev, &ib, NULL, false);
        if (r) {
                DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
                goto free_ib;
index 4969cef..51fd985 100644 (file)
@@ -261,7 +261,7 @@ int r600_dma_ring_test(struct radeon_device *rdev,
        radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
        radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
        radeon_ring_write(ring, 0xDEADBEEF);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
        for (i = 0; i < rdev->usec_timeout; i++) {
                tmp = readl(ptr);
@@ -368,7 +368,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
        ib.ptr[3] = 0xDEADBEEF;
        ib.length_dw = 4;
 
-       r = radeon_ib_schedule(rdev, &ib, NULL);
+       r = radeon_ib_schedule(rdev, &ib, NULL, false);
        if (r) {
                radeon_ib_free(rdev, &ib);
                DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
@@ -493,7 +493,7 @@ int r600_copy_dma(struct radeon_device *rdev,
                return r;
        }
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        radeon_semaphore_free(rdev, &sem, *fence);
 
        return r;
index f94e7a9..0c4a7d8 100644 (file)
                 */
 #              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
 #              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
+#define        PACKET3_PFP_SYNC_ME                             0x42 /* r7xx+ only */
 #define        PACKET3_SURFACE_SYNC                            0x43
 #              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
 #              define PACKET3_FULL_CACHE_ENA       (1 << 20) /* r7xx+ only */
index 9e1732e..5f05b4c 100644 (file)
@@ -105,6 +105,7 @@ extern int radeon_vm_size;
 extern int radeon_vm_block_size;
 extern int radeon_deep_color;
 extern int radeon_use_pflipirq;
+extern int radeon_bapm;
 
 /*
  * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -914,6 +915,8 @@ struct radeon_vm_manager {
        u64                             vram_base_offset;
        /* is vm enabled? */
        bool                            enabled;
+       /* for hw to save the PD addr on suspend/resume */
+       uint32_t                        saved_table_addr[RADEON_NUM_VM];
 };
 
 /*
@@ -967,7 +970,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
                  unsigned size);
 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
-                      struct radeon_ib *const_ib);
+                      struct radeon_ib *const_ib, bool hdp_flush);
 int radeon_ib_pool_init(struct radeon_device *rdev);
 void radeon_ib_pool_fini(struct radeon_device *rdev);
 int radeon_ib_ring_tests(struct radeon_device *rdev);
@@ -977,8 +980,10 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
 void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
 int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
 int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
-void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
-void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp,
+                       bool hdp_flush);
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp,
+                              bool hdp_flush);
 void radeon_ring_undo(struct radeon_ring *ring);
 void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
 int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
index ee712c1..83f382e 100644 (file)
@@ -132,7 +132,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
                 * the buffers used for read only, which doubles the range
                 * to 0 to 31. 32 is reserved for the kernel driver.
                 */
-               priority = (r->flags & 0xf) * 2 + !!r->write_domain;
+               priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
+                          + !!r->write_domain;
 
                /* the first reloc of an UVD job is the msg and that must be in
                   VRAM, also but everything into VRAM on AGP cards to avoid
@@ -450,7 +451,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
                radeon_vce_note_usage(rdev);
 
        radeon_cs_sync_rings(parser);
-       r = radeon_ib_schedule(rdev, &parser->ib, NULL);
+       r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
        if (r) {
                DRM_ERROR("Failed to schedule IB !\n");
        }
@@ -541,9 +542,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
 
        if ((rdev->family >= CHIP_TAHITI) &&
            (parser->chunk_const_ib_idx != -1)) {
-               r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib);
+               r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
        } else {
-               r = radeon_ib_schedule(rdev, &parser->ib, NULL);
+               r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
        }
 
 out:
index c8ea050..6a219bc 100644 (file)
@@ -1680,8 +1680,8 @@ int radeon_gpu_reset(struct radeon_device *rdev)
        radeon_save_bios_scratch_regs(rdev);
        /* block TTM */
        resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
-       radeon_pm_suspend(rdev);
        radeon_suspend(rdev);
+       radeon_hpd_fini(rdev);
 
        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
                ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
@@ -1726,9 +1726,39 @@ retry:
                }
        }
 
-       radeon_pm_resume(rdev);
+       if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+               /* do dpm late init */
+               r = radeon_pm_late_init(rdev);
+               if (r) {
+                       rdev->pm.dpm_enabled = false;
+                       DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
+               }
+       } else {
+               /* resume old pm late */
+               radeon_pm_resume(rdev);
+       }
+
+       /* init dig PHYs, disp eng pll */
+       if (rdev->is_atom_bios) {
+               radeon_atom_encoder_init(rdev);
+               radeon_atom_disp_eng_pll_init(rdev);
+               /* turn on the BL */
+               if (rdev->mode_info.bl_encoder) {
+                       u8 bl_level = radeon_get_backlight_level(rdev,
+                                                                rdev->mode_info.bl_encoder);
+                       radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
+                                                  bl_level);
+               }
+       }
+       /* reset hpd state */
+       radeon_hpd_init(rdev);
+
        drm_helper_resume_force_mode(rdev->ddev);
 
+       /* set the power state here in case we are a PX system or headless */
+       if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
+               radeon_pm_compute_clocks(rdev);
+
        ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
        if (r) {
                /* bad news, how to tell it to userspace ? */
index 092d067..8df8889 100644 (file)
@@ -180,6 +180,7 @@ int radeon_vm_size = 8;
 int radeon_vm_block_size = -1;
 int radeon_deep_color = 0;
 int radeon_use_pflipirq = 2;
+int radeon_bapm = -1;
 
 MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
 module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -259,6 +260,9 @@ module_param_named(deep_color, radeon_deep_color, int, 0444);
 MODULE_PARM_DESC(use_pflipirq, "Pflip irqs for pageflip completion (0 = disable, 1 = as fallback, 2 = exclusive (default))");
 module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444);
 
+MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(bapm, radeon_bapm, int, 0444);
+
 static struct pci_device_id pciidlist[] = {
        radeon_PCI_IDS
 };
index 65b0c21..5bf2c0a 100644 (file)
@@ -107,6 +107,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
  * @rdev: radeon_device pointer
  * @ib: IB object to schedule
  * @const_ib: Const IB to schedule (SI only)
+ * @hdp_flush: Whether or not to perform an HDP cache flush
  *
  * Schedule an IB on the associated ring (all asics).
  * Returns 0 on success, error on failure.
@@ -122,7 +123,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
  * to SI there was just a DE IB.
  */
 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
-                      struct radeon_ib *const_ib)
+                      struct radeon_ib *const_ib, bool hdp_flush)
 {
        struct radeon_ring *ring = &rdev->ring[ib->ring];
        int r = 0;
@@ -176,7 +177,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
        if (ib->vm)
                radeon_vm_fence(rdev, ib->vm, ib->fence);
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, hdp_flush);
        return 0;
 }
 
index 23314be..164898b 100644 (file)
@@ -460,10 +460,6 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
        struct radeon_device *rdev = ddev->dev_private;
        enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
 
-       if  ((rdev->flags & RADEON_IS_PX) &&
-            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
-               return snprintf(buf, PAGE_SIZE, "off\n");
-
        return snprintf(buf, PAGE_SIZE, "%s\n",
                        (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
                        (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
@@ -477,11 +473,6 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct radeon_device *rdev = ddev->dev_private;
 
-       /* Can't set dpm state when the card is off */
-       if  ((rdev->flags & RADEON_IS_PX) &&
-            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
-               return -EINVAL;
-
        mutex_lock(&rdev->pm.mutex);
        if (strncmp("battery", buf, strlen("battery")) == 0)
                rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
@@ -495,7 +486,12 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
                goto fail;
        }
        mutex_unlock(&rdev->pm.mutex);
-       radeon_pm_compute_clocks(rdev);
+
+       /* Can't set dpm state when the card is off */
+       if (!(rdev->flags & RADEON_IS_PX) ||
+           (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
+               radeon_pm_compute_clocks(rdev);
+
 fail:
        return count;
 }
index 5b4e0cf..d656079 100644 (file)
@@ -177,16 +177,18 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig
  *
  * @rdev: radeon_device pointer
  * @ring: radeon_ring structure holding ring information
+ * @hdp_flush: Whether or not to perform an HDP cache flush
  *
  * Update the wptr (write pointer) to tell the GPU to
  * execute new commands on the ring buffer (all asics).
  */
-void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring,
+                       bool hdp_flush)
 {
        /* If we are emitting the HDP flush via the ring buffer, we need to
         * do it before padding.
         */
-       if (rdev->asic->ring[ring->idx]->hdp_flush)
+       if (hdp_flush && rdev->asic->ring[ring->idx]->hdp_flush)
                rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring);
        /* We pad to match fetch size */
        while (ring->wptr & ring->align_mask) {
@@ -196,7 +198,7 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
        /* If we are emitting the HDP flush via MMIO, we need to do it after
         * all CPU writes to VRAM finished.
         */
-       if (rdev->asic->mmio_hdp_flush)
+       if (hdp_flush && rdev->asic->mmio_hdp_flush)
                rdev->asic->mmio_hdp_flush(rdev);
        radeon_ring_set_wptr(rdev, ring);
 }
@@ -207,12 +209,14 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
  *
  * @rdev: radeon_device pointer
  * @ring: radeon_ring structure holding ring information
+ * @hdp_flush: Whether or not to perform an HDP cache flush
  *
  * Call radeon_ring_commit() then unlock the ring (all asics).
  */
-void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring,
+                              bool hdp_flush)
 {
-       radeon_ring_commit(rdev, ring);
+       radeon_ring_commit(rdev, ring, hdp_flush);
        mutex_unlock(&rdev->ring_lock);
 }
 
@@ -372,7 +376,7 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
                radeon_ring_write(ring, data[i]);
        }
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        kfree(data);
        return 0;
 }
@@ -400,9 +404,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
        /* Allocate ring buffer */
        if (ring->ring_obj == NULL) {
                r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
-                                    RADEON_GEM_DOMAIN_GTT,
-                                    (rdev->flags & RADEON_IS_PCIE) ?
-                                    RADEON_GEM_GTT_WC : 0,
+                                    RADEON_GEM_DOMAIN_GTT, 0,
                                     NULL, &ring->ring_obj);
                if (r) {
                        dev_err(rdev->dev, "(%d) ring create failed\n", r);
index dbd6bcd..56d9fd6 100644 (file)
@@ -179,7 +179,7 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
                        continue;
                }
 
-               radeon_ring_commit(rdev, &rdev->ring[i]);
+               radeon_ring_commit(rdev, &rdev->ring[i], false);
                radeon_fence_note_sync(fence, ring);
 
                semaphore->gpu_addr += 8;
index 5adf420..17bc3dc 100644 (file)
@@ -288,7 +288,7 @@ static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
                        return r;
                }
                radeon_fence_emit(rdev, fence, ring->idx);
-               radeon_ring_unlock_commit(rdev, ring);
+               radeon_ring_unlock_commit(rdev, ring, false);
        }
        return 0;
 }
@@ -313,7 +313,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
                goto out_cleanup;
        }
        radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
-       radeon_ring_unlock_commit(rdev, ringA);
+       radeon_ring_unlock_commit(rdev, ringA, false);
 
        r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
        if (r)
@@ -325,7 +325,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
                goto out_cleanup;
        }
        radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
-       radeon_ring_unlock_commit(rdev, ringA);
+       radeon_ring_unlock_commit(rdev, ringA, false);
 
        r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
        if (r)
@@ -344,7 +344,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
                goto out_cleanup;
        }
        radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
-       radeon_ring_unlock_commit(rdev, ringB);
+       radeon_ring_unlock_commit(rdev, ringB, false);
 
        r = radeon_fence_wait(fence1, false);
        if (r) {
@@ -365,7 +365,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
                goto out_cleanup;
        }
        radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
-       radeon_ring_unlock_commit(rdev, ringB);
+       radeon_ring_unlock_commit(rdev, ringB, false);
 
        r = radeon_fence_wait(fence2, false);
        if (r) {
@@ -408,7 +408,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
                goto out_cleanup;
        }
        radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
-       radeon_ring_unlock_commit(rdev, ringA);
+       radeon_ring_unlock_commit(rdev, ringA, false);
 
        r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
        if (r)
@@ -420,7 +420,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
                goto out_cleanup;
        }
        radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
-       radeon_ring_unlock_commit(rdev, ringB);
+       radeon_ring_unlock_commit(rdev, ringB, false);
        r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
        if (r)
                goto out_cleanup;
@@ -442,7 +442,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
                goto out_cleanup;
        }
        radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
-       radeon_ring_unlock_commit(rdev, ringC);
+       radeon_ring_unlock_commit(rdev, ringC, false);
 
        for (i = 0; i < 30; ++i) {
                mdelay(100);
@@ -468,7 +468,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
                goto out_cleanup;
        }
        radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
-       radeon_ring_unlock_commit(rdev, ringC);
+       radeon_ring_unlock_commit(rdev, ringC, false);
 
        mdelay(1000);
 
index 6bf55ec..341848a 100644 (file)
@@ -646,7 +646,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
                ib.ptr[i] = PACKET2(0);
        ib.length_dw = 16;
 
-       r = radeon_ib_schedule(rdev, &ib, NULL);
+       r = radeon_ib_schedule(rdev, &ib, NULL, false);
        if (r)
                goto err;
        ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
index f9b70a4..c7190aa 100644 (file)
@@ -368,7 +368,7 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
        for (i = ib.length_dw; i < ib_size_dw; ++i)
                ib.ptr[i] = 0x0;
 
-       r = radeon_ib_schedule(rdev, &ib, NULL);
+       r = radeon_ib_schedule(rdev, &ib, NULL, false);
        if (r) {
                DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
        }
@@ -425,7 +425,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
        for (i = ib.length_dw; i < ib_size_dw; ++i)
                ib.ptr[i] = 0x0;
 
-       r = radeon_ib_schedule(rdev, &ib, NULL);
+       r = radeon_ib_schedule(rdev, &ib, NULL, false);
        if (r) {
                DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
        }
@@ -715,7 +715,7 @@ int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
                return r;
        }
        radeon_ring_write(ring, VCE_CMD_END);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
        for (i = 0; i < rdev->usec_timeout; i++) {
                if (vce_v1_0_get_rptr(rdev, ring) != rptr)
index ccae4d9..088ffdc 100644 (file)
@@ -420,7 +420,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
        radeon_asic_vm_pad_ib(rdev, &ib);
        WARN_ON(ib.length_dw > 64);
 
-       r = radeon_ib_schedule(rdev, &ib, NULL);
+       r = radeon_ib_schedule(rdev, &ib, NULL, false);
        if (r)
                 goto error;
 
@@ -483,6 +483,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                        /* add a clone of the bo_va to clear the old address */
                        struct radeon_bo_va *tmp;
                        tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+                       if (!tmp) {
+                               mutex_unlock(&vm->mutex);
+                               return -ENOMEM;
+                       }
                        tmp->it.start = bo_va->it.start;
                        tmp->it.last = bo_va->it.last;
                        tmp->vm = vm;
@@ -693,7 +697,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
                radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
                radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
                WARN_ON(ib.length_dw > ndw);
-               r = radeon_ib_schedule(rdev, &ib, NULL);
+               r = radeon_ib_schedule(rdev, &ib, NULL, false);
                if (r) {
                        radeon_ib_free(rdev, &ib);
                        return r;
@@ -957,7 +961,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
        WARN_ON(ib.length_dw > ndw);
 
        radeon_semaphore_sync_to(ib.semaphore, vm->fence);
-       r = radeon_ib_schedule(rdev, &ib, NULL);
+       r = radeon_ib_schedule(rdev, &ib, NULL, false);
        if (r) {
                radeon_ib_free(rdev, &ib);
                return r;
index 3e21e86..8a477bf 100644 (file)
@@ -124,7 +124,7 @@ void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
        radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
        radeon_ring_write(ring, PACKET0(0x20C8, 0));
        radeon_ring_write(ring, 0);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 }
 
 int rv515_mc_wait_for_idle(struct radeon_device *rdev)
index 2983f17..d9f5ce7 100644 (file)
@@ -1177,7 +1177,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
        u32 hdp_host_path_cntl;
        u32 sq_dyn_gpr_size_simd_ab_0;
        u32 gb_tiling_config = 0;
-       u32 cc_rb_backend_disable = 0;
        u32 cc_gc_shader_pipe_config = 0;
        u32 mc_arb_ramcfg;
        u32 db_debug4, tmp;
@@ -1311,21 +1310,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)
                WREG32(SPI_CONFIG_CNTL, 0);
        }
 
-       cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
-       tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16);
-       if (tmp < rdev->config.rv770.max_backends) {
-               rdev->config.rv770.max_backends = tmp;
-       }
-
        cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
-       tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK);
-       if (tmp < rdev->config.rv770.max_pipes) {
-               rdev->config.rv770.max_pipes = tmp;
-       }
-       tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
-       if (tmp < rdev->config.rv770.max_simds) {
-               rdev->config.rv770.max_simds = tmp;
-       }
        tmp = rdev->config.rv770.max_simds -
                r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
        rdev->config.rv770.active_simds = tmp;
@@ -1348,6 +1333,14 @@ static void rv770_gpu_init(struct radeon_device *rdev)
        rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
 
        disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK;
+       tmp = 0;
+       for (i = 0; i < rdev->config.rv770.max_backends; i++)
+               tmp |= (1 << i);
+       /* if all the backends are disabled, fix it up here */
+       if ((disabled_rb_mask & tmp) == tmp) {
+               for (i = 0; i < rdev->config.rv770.max_backends; i++)
+                       disabled_rb_mask &= ~(1 << i);
+       }
        tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
        tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,
                                        R7XX_MAX_BACKENDS, disabled_rb_mask);
index bbf2e07..74426ac 100644 (file)
@@ -90,7 +90,7 @@ int rv770_copy_dma(struct radeon_device *rdev,
                return r;
        }
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        radeon_semaphore_free(rdev, &sem, *fence);
 
        return r;
index 011779b..6bce408 100644 (file)
@@ -3057,7 +3057,7 @@ static void si_gpu_init(struct radeon_device *rdev)
        u32 sx_debug_1;
        u32 hdp_host_path_cntl;
        u32 tmp;
-       int i, j, k;
+       int i, j;
 
        switch (rdev->family) {
        case CHIP_TAHITI:
@@ -3255,12 +3255,11 @@ static void si_gpu_init(struct radeon_device *rdev)
                     rdev->config.si.max_sh_per_se,
                     rdev->config.si.max_cu_per_sh);
 
+       rdev->config.si.active_cus = 0;
        for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
                for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
-                       for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) {
-                               rdev->config.si.active_cus +=
-                                       hweight32(si_get_cu_active_bitmap(rdev, i, j));
-                       }
+                       rdev->config.si.active_cus +=
+                               hweight32(si_get_cu_active_bitmap(rdev, i, j));
                }
        }
 
@@ -3541,7 +3540,7 @@ static int si_cp_start(struct radeon_device *rdev)
        radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
        radeon_ring_write(ring, 0xc000);
        radeon_ring_write(ring, 0xe000);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
        si_cp_enable(rdev, true);
 
@@ -3570,7 +3569,7 @@ static int si_cp_start(struct radeon_device *rdev)
        radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
        radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
        for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
                ring = &rdev->ring[i];
@@ -3580,7 +3579,7 @@ static int si_cp_start(struct radeon_device *rdev)
                radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
                radeon_ring_write(ring, 0);
 
-               radeon_ring_unlock_commit(rdev, ring);
+               radeon_ring_unlock_commit(rdev, ring, false);
        }
 
        return 0;
@@ -4291,10 +4290,10 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
        for (i = 1; i < 16; i++) {
                if (i < 8)
                        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
-                              rdev->gart.table_addr >> 12);
+                              rdev->vm_manager.saved_table_addr[i]);
                else
                        WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
-                              rdev->gart.table_addr >> 12);
+                              rdev->vm_manager.saved_table_addr[i]);
        }
 
        /* enable context1-15 */
@@ -4326,6 +4325,17 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
 
 static void si_pcie_gart_disable(struct radeon_device *rdev)
 {
+       unsigned i;
+
+       for (i = 1; i < 16; ++i) {
+               uint32_t reg;
+               if (i < 8)
+                       reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2);
+               else
+                       reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2);
+               rdev->vm_manager.saved_table_addr[i] = RREG32(reg);
+       }
+
        /* Disable all tables */
        WREG32(VM_CONTEXT0_CNTL, 0);
        WREG32(VM_CONTEXT1_CNTL, 0);
@@ -5028,7 +5038,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
 
        /* flush hdp cache */
        radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
                                 WRITE_DATA_DST_SEL(0)));
        radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
        radeon_ring_write(ring, 0);
@@ -5036,7 +5046,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
 
        /* bits 0-15 are the VM contexts0-15 */
        radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
                                 WRITE_DATA_DST_SEL(0)));
        radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
        radeon_ring_write(ring, 0);
@@ -7178,6 +7188,9 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
        int ret, i;
        u16 tmp16;
 
+       if (pci_is_root_bus(rdev->pdev->bus))
+               return;
+
        if (radeon_pcie_gen2 == 0)
                return;
 
@@ -7455,7 +7468,8 @@ static void si_program_aspm(struct radeon_device *rdev)
                        if (orig != data)
                                WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
 
-                       if (!disable_clkreq) {
+                       if (!disable_clkreq &&
+                           !pci_is_root_bus(rdev->pdev->bus)) {
                                struct pci_dev *root = rdev->pdev->bus->self;
                                u32 lnkcap;
 
index 7165051..7c22baa 100644 (file)
@@ -275,7 +275,7 @@ int si_copy_dma(struct radeon_device *rdev,
                return r;
        }
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        radeon_semaphore_free(rdev, &sem, *fence);
 
        return r;
index 32e50be..57f7800 100644 (file)
@@ -1874,16 +1874,22 @@ int trinity_dpm_init(struct radeon_device *rdev)
        for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
                pi->at[i] = TRINITY_AT_DFLT;
 
-       /* There are stability issues reported on with
-        * bapm enabled when switching between AC and battery
-        * power.  At the same time, some MSI boards hang
-        * if it's not enabled and dpm is enabled.  Just enable
-        * it for MSI boards right now.
-        */
-       if (rdev->pdev->subsystem_vendor == 0x1462)
-               pi->enable_bapm = true;
-       else
+       if (radeon_bapm == -1) {
+               /* There are stability issues reported on with
+                * bapm enabled when switching between AC and battery
+                * power.  At the same time, some MSI boards hang
+                * if it's not enabled and dpm is enabled.  Just enable
+                * it for MSI boards right now.
+                */
+               if (rdev->pdev->subsystem_vendor == 0x1462)
+                       pi->enable_bapm = true;
+               else
+                       pi->enable_bapm = false;
+       } else if (radeon_bapm == 0) {
                pi->enable_bapm = false;
+       } else {
+               pi->enable_bapm = true;
+       }
        pi->enable_nbps_policy = true;
        pi->enable_sclk_ds = true;
        pi->enable_gfx_power_gating = true;
index be42c81..cda3913 100644 (file)
@@ -124,7 +124,7 @@ int uvd_v1_0_init(struct radeon_device *rdev)
        radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
        radeon_ring_write(ring, 3);
 
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
 
 done:
        /* lower clocks again */
@@ -331,7 +331,7 @@ int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
        }
        radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
        radeon_ring_write(ring, 0xDEADBEEF);
-       radeon_ring_unlock_commit(rdev, ring);
+       radeon_ring_unlock_commit(rdev, ring, false);
        for (i = 0; i < rdev->usec_timeout; i++) {
                tmp = RREG32(UVD_CONTEXT_ID);
                if (tmp == 0xDEADBEEF)
index 2d9d425..ae8850f 100644 (file)
@@ -1,6 +1,7 @@
 config DRM_STI
        tristate "DRM Support for STMicroelectronics SoC stiH41x Series"
        depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM)
+       select RESET_CONTROLLER
        select DRM_KMS_HELPER
        select DRM_GEM_CMA_HELPER
        select DRM_KMS_CMA_HELPER
index a7cc249..223d93c 100644 (file)
@@ -201,8 +201,8 @@ static int sti_drm_platform_probe(struct platform_device *pdev)
        master = platform_device_register_resndata(dev,
                        DRIVER_NAME "__master", -1,
                        NULL, 0, NULL, 0);
-       if (!master)
-               return -EINVAL;
+       if (IS_ERR(master))
+               return PTR_ERR(master);
 
        platform_set_drvdata(pdev, master);
        return 0;
index 72d957f..2ae9a9b 100644 (file)
@@ -730,16 +730,16 @@ static int sti_hda_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
        hda->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
-       if (IS_ERR(hda->regs))
-               return PTR_ERR(hda->regs);
+       if (!hda->regs)
+               return -ENOMEM;
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
                        "video-dacs-ctrl");
        if (res) {
                hda->video_dacs_ctrl = devm_ioremap_nocache(dev, res->start,
                                resource_size(res));
-               if (IS_ERR(hda->video_dacs_ctrl))
-                       return PTR_ERR(hda->video_dacs_ctrl);
+               if (!hda->video_dacs_ctrl)
+                       return -ENOMEM;
        } else {
                /* If no existing video-dacs-ctrl resource continue the probe */
                DRM_DEBUG_DRIVER("No video-dacs-ctrl resource\n");
@@ -770,7 +770,7 @@ static int sti_hda_remove(struct platform_device *pdev)
        return 0;
 }
 
-static struct of_device_id hda_of_match[] = {
+static const struct of_device_id hda_of_match[] = {
        { .compatible = "st,stih416-hda", },
        { .compatible = "st,stih407-hda", },
        { /* end node */ }
index 284e541..ef93156 100644 (file)
@@ -677,7 +677,7 @@ static const struct component_ops sti_hdmi_ops = {
        .unbind = sti_hdmi_unbind,
 };
 
-static struct of_device_id hdmi_of_match[] = {
+static const struct of_device_id hdmi_of_match[] = {
        {
                .compatible = "st,stih416-hdmi",
                .data = &tx3g0c55phy_ops,
@@ -713,8 +713,8 @@ static int sti_hdmi_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
        hdmi->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
-       if (IS_ERR(hdmi->regs))
-               return PTR_ERR(hdmi->regs);
+       if (!hdmi->regs)
+               return -ENOMEM;
 
        if (of_device_is_compatible(np, "st,stih416-hdmi")) {
                res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -725,8 +725,8 @@ static int sti_hdmi_probe(struct platform_device *pdev)
                }
                hdmi->syscfg = devm_ioremap_nocache(dev, res->start,
                                                    resource_size(res));
-               if (IS_ERR(hdmi->syscfg))
-                       return PTR_ERR(hdmi->syscfg);
+               if (!hdmi->syscfg)
+                       return -ENOMEM;
 
        }
 
index b69e26f..b8afe49 100644 (file)
@@ -591,8 +591,8 @@ static int sti_tvout_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
        tvout->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
-       if (IS_ERR(tvout->regs))
-               return PTR_ERR(tvout->regs);
+       if (!tvout->regs)
+               return -ENOMEM;
 
        /* get reset resources */
        tvout->reset = devm_reset_control_get(dev, "tvout");
@@ -624,7 +624,7 @@ static int sti_tvout_remove(struct platform_device *pdev)
        return 0;
 }
 
-static struct of_device_id tvout_of_match[] = {
+static const struct of_device_id tvout_of_match[] = {
        { .compatible = "st,stih416-tvout", },
        { .compatible = "st,stih407-tvout", },
        { /* end node */ }
index 1bdcccc..f745d2c 100644 (file)
@@ -28,7 +28,7 @@
 static __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                unsigned int *rsize)
 {
-       if (*rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
+       if (*rsize >= 18 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
                hid_info(hdev, "fixing up Cherry Cymotion report descriptor\n");
                rdesc[11] = rdesc[16] = 0xff;
                rdesc[12] = rdesc[17] = 0x03;
index 60f44cd..61b68ca 100644 (file)
@@ -84,6 +84,15 @@ static const __u8 huion_tablet_rdesc_template[] = {
        0xC0                    /*  End Collection                          */
 };
 
+/* Parameter indices */
+enum huion_prm {
+       HUION_PRM_X_LM          = 1,
+       HUION_PRM_Y_LM          = 2,
+       HUION_PRM_PRESSURE_LM   = 4,
+       HUION_PRM_RESOLUTION    = 5,
+       HUION_PRM_NUM
+};
+
 /* Driver data */
 struct huion_drvdata {
        __u8 *rdesc;
@@ -115,7 +124,12 @@ static int huion_tablet_enable(struct hid_device *hdev)
        int rc;
        struct usb_device *usb_dev = hid_to_usb_dev(hdev);
        struct huion_drvdata *drvdata = hid_get_drvdata(hdev);
-       __le16 buf[6];
+       __le16 *buf = NULL;
+       size_t len;
+       s32 params[HUION_PH_ID_NUM];
+       s32 resolution;
+       __u8 *p;
+       s32 v;
 
        /*
         * Read string descriptor containing tablet parameters. The specific
@@ -123,65 +137,79 @@ static int huion_tablet_enable(struct hid_device *hdev)
         * driver traffic.
         * NOTE: This enables fully-functional tablet mode.
         */
+       len = HUION_PRM_NUM * sizeof(*buf);
+       buf = kmalloc(len, GFP_KERNEL);
+       if (buf == NULL) {
+               hid_err(hdev, "failed to allocate parameter buffer\n");
+               rc = -ENOMEM;
+               goto cleanup;
+       }
        rc = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
                                USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
                                (USB_DT_STRING << 8) + 0x64,
-                               0x0409, buf, sizeof(buf),
+                               0x0409, buf, len,
                                USB_CTRL_GET_TIMEOUT);
-       if (rc == -EPIPE)
-               hid_warn(hdev, "device parameters not found\n");
-       else if (rc < 0)
-               hid_warn(hdev, "failed to get device parameters: %d\n", rc);
-       else if (rc != sizeof(buf))
-               hid_warn(hdev, "invalid device parameters\n");
-       else {
-               s32 params[HUION_PH_ID_NUM];
-               s32 resolution;
-               __u8 *p;
-               s32 v;
+       if (rc == -EPIPE) {
+               hid_err(hdev, "device parameters not found\n");
+               rc = -ENODEV;
+               goto cleanup;
+       } else if (rc < 0) {
+               hid_err(hdev, "failed to get device parameters: %d\n", rc);
+               rc = -ENODEV;
+               goto cleanup;
+       } else if (rc != len) {
+               hid_err(hdev, "invalid device parameters\n");
+               rc = -ENODEV;
+               goto cleanup;
+       }
 
-               /* Extract device parameters */
-               params[HUION_PH_ID_X_LM] = le16_to_cpu(buf[1]);
-               params[HUION_PH_ID_Y_LM] = le16_to_cpu(buf[2]);
-               params[HUION_PH_ID_PRESSURE_LM] = le16_to_cpu(buf[4]);
-               resolution = le16_to_cpu(buf[5]);
-               if (resolution == 0) {
-                       params[HUION_PH_ID_X_PM] = 0;
-                       params[HUION_PH_ID_Y_PM] = 0;
-               } else {
-                       params[HUION_PH_ID_X_PM] = params[HUION_PH_ID_X_LM] *
-                                                       1000 / resolution;
-                       params[HUION_PH_ID_Y_PM] = params[HUION_PH_ID_Y_LM] *
-                                                       1000 / resolution;
-               }
+       /* Extract device parameters */
+       params[HUION_PH_ID_X_LM] = le16_to_cpu(buf[HUION_PRM_X_LM]);
+       params[HUION_PH_ID_Y_LM] = le16_to_cpu(buf[HUION_PRM_Y_LM]);
+       params[HUION_PH_ID_PRESSURE_LM] =
+               le16_to_cpu(buf[HUION_PRM_PRESSURE_LM]);
+       resolution = le16_to_cpu(buf[HUION_PRM_RESOLUTION]);
+       if (resolution == 0) {
+               params[HUION_PH_ID_X_PM] = 0;
+               params[HUION_PH_ID_Y_PM] = 0;
+       } else {
+               params[HUION_PH_ID_X_PM] = params[HUION_PH_ID_X_LM] *
+                                               1000 / resolution;
+               params[HUION_PH_ID_Y_PM] = params[HUION_PH_ID_Y_LM] *
+                                               1000 / resolution;
+       }
 
-               /* Allocate fixed report descriptor */
-               drvdata->rdesc = devm_kmalloc(&hdev->dev,
-                                       sizeof(huion_tablet_rdesc_template),
-                                       GFP_KERNEL);
-               if (drvdata->rdesc == NULL) {
-                       hid_err(hdev, "failed to allocate fixed rdesc\n");
-                       return -ENOMEM;
-               }
-               drvdata->rsize = sizeof(huion_tablet_rdesc_template);
+       /* Allocate fixed report descriptor */
+       drvdata->rdesc = devm_kmalloc(&hdev->dev,
+                               sizeof(huion_tablet_rdesc_template),
+                               GFP_KERNEL);
+       if (drvdata->rdesc == NULL) {
+               hid_err(hdev, "failed to allocate fixed rdesc\n");
+               rc = -ENOMEM;
+               goto cleanup;
+       }
+       drvdata->rsize = sizeof(huion_tablet_rdesc_template);
 
-               /* Format fixed report descriptor */
-               memcpy(drvdata->rdesc, huion_tablet_rdesc_template,
-                       drvdata->rsize);
-               for (p = drvdata->rdesc;
-                    p <= drvdata->rdesc + drvdata->rsize - 4;) {
-                       if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D &&
-                           p[3] < sizeof(params)) {
-                               v = params[p[3]];
-                               put_unaligned(cpu_to_le32(v), (s32 *)p);
-                               p += 4;
-                       } else {
-                               p++;
-                       }
+       /* Format fixed report descriptor */
+       memcpy(drvdata->rdesc, huion_tablet_rdesc_template,
+               drvdata->rsize);
+       for (p = drvdata->rdesc;
+            p <= drvdata->rdesc + drvdata->rsize - 4;) {
+               if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D &&
+                   p[3] < sizeof(params)) {
+                       v = params[p[3]];
+                       put_unaligned(cpu_to_le32(v), (s32 *)p);
+                       p += 4;
+               } else {
+                       p++;
                }
        }
 
-       return 0;
+       rc = 0;
+
+cleanup:
+       kfree(buf);
+       return rc;
 }
 
 static int huion_probe(struct hid_device *hdev, const struct hid_device_id *id)
index e776963..b92bf01 100644 (file)
@@ -300,7 +300,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                 *   - change the button usage range to 4-7 for the extra
                 *     buttons
                 */
-               if (*rsize >= 74 &&
+               if (*rsize >= 75 &&
                        rdesc[61] == 0x05 && rdesc[62] == 0x08 &&
                        rdesc[63] == 0x19 && rdesc[64] == 0x08 &&
                        rdesc[65] == 0x29 && rdesc[66] == 0x0f &&
index a976f48..f91ff14 100644 (file)
@@ -345,14 +345,14 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
        struct usb_device_descriptor *udesc;
        __u16 bcdDevice, rev_maj, rev_min;
 
-       if ((drv_data->quirks & LG_RDESC) && *rsize >= 90 && rdesc[83] == 0x26 &&
+       if ((drv_data->quirks & LG_RDESC) && *rsize >= 91 && rdesc[83] == 0x26 &&
                        rdesc[84] == 0x8c && rdesc[85] == 0x02) {
                hid_info(hdev,
                         "fixing up Logitech keyboard report descriptor\n");
                rdesc[84] = rdesc[89] = 0x4d;
                rdesc[85] = rdesc[90] = 0x10;
        }
-       if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 50 &&
+       if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 51 &&
                        rdesc[32] == 0x81 && rdesc[33] == 0x06 &&
                        rdesc[49] == 0x81 && rdesc[50] == 0x06) {
                hid_info(hdev,
index cc2bd20..7835717 100644 (file)
@@ -451,13 +451,13 @@ static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *at
        drv_data = hid_get_drvdata(hid);
        if (!drv_data) {
                hid_err(hid, "Private driver data not found!\n");
-               return 0;
+               return -EINVAL;
        }
 
        entry = drv_data->device_props;
        if (!entry) {
                hid_err(hid, "Device properties not found!\n");
-               return 0;
+               return -EINVAL;
        }
 
        if (range == 0)
index 486dbde..9bf8637 100644 (file)
@@ -238,13 +238,6 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
                return;
        }
 
-       if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
-           (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
-               dev_err(&djrcv_hdev->dev, "%s: invalid device index:%d\n",
-                       __func__, dj_report->device_index);
-               return;
-       }
-
        if (djrcv_dev->paired_dj_devices[dj_report->device_index]) {
                /* The device is already known. No need to reallocate it. */
                dbg_hid("%s: device is already known\n", __func__);
@@ -557,7 +550,7 @@ static int logi_dj_ll_raw_request(struct hid_device *hid,
        if (!out_buf)
                return -ENOMEM;
 
-       if (count < DJREPORT_SHORT_LENGTH - 2)
+       if (count > DJREPORT_SHORT_LENGTH - 2)
                count = DJREPORT_SHORT_LENGTH - 2;
 
        out_buf[0] = REPORT_ID_DJ_SHORT;
@@ -663,7 +656,6 @@ static int logi_dj_raw_event(struct hid_device *hdev,
        struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
        struct dj_report *dj_report = (struct dj_report *) data;
        unsigned long flags;
-       bool report_processed = false;
 
        dbg_hid("%s, size:%d\n", __func__, size);
 
@@ -691,27 +683,41 @@ static int logi_dj_raw_event(struct hid_device *hdev,
         * anything else with it.
         */
 
+       /* case 1) */
+       if (data[0] != REPORT_ID_DJ_SHORT)
+               return false;
+
+       if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
+           (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
+               /*
+                * Device index is wrong, bail out.
+                * This driver can ignore safely the receiver notifications,
+                * so ignore those reports too.
+                */
+               if (dj_report->device_index != DJ_RECEIVER_INDEX)
+                       dev_err(&hdev->dev, "%s: invalid device index:%d\n",
+                               __func__, dj_report->device_index);
+               return false;
+       }
+
        spin_lock_irqsave(&djrcv_dev->lock, flags);
-       if (dj_report->report_id == REPORT_ID_DJ_SHORT) {
-               switch (dj_report->report_type) {
-               case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
-               case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
-                       logi_dj_recv_queue_notification(djrcv_dev, dj_report);
-                       break;
-               case REPORT_TYPE_NOTIF_CONNECTION_STATUS:
-                       if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] ==
-                           STATUS_LINKLOSS) {
-                               logi_dj_recv_forward_null_report(djrcv_dev, dj_report);
-                       }
-                       break;
-               default:
-                       logi_dj_recv_forward_report(djrcv_dev, dj_report);
+       switch (dj_report->report_type) {
+       case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
+       case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
+               logi_dj_recv_queue_notification(djrcv_dev, dj_report);
+               break;
+       case REPORT_TYPE_NOTIF_CONNECTION_STATUS:
+               if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] ==
+                   STATUS_LINKLOSS) {
+                       logi_dj_recv_forward_null_report(djrcv_dev, dj_report);
                }
-               report_processed = true;
+               break;
+       default:
+               logi_dj_recv_forward_report(djrcv_dev, dj_report);
        }
        spin_unlock_irqrestore(&djrcv_dev->lock, flags);
 
-       return report_processed;
+       return true;
 }
 
 static int logi_dj_probe(struct hid_device *hdev,
index 4a40003..daeb0aa 100644 (file)
@@ -27,6 +27,7 @@
 
 #define DJ_MAX_PAIRED_DEVICES                  6
 #define DJ_MAX_NUMBER_NOTIFICATIONS            8
+#define DJ_RECEIVER_INDEX                      0
 #define DJ_DEVICE_INDEX_MIN                    1
 #define DJ_DEVICE_INDEX_MAX                    6
 
index ecc2cbf..29a74c1 100644 (file)
@@ -290,6 +290,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
                if (size < 4 || ((size - 4) % 9) != 0)
                        return 0;
                npoints = (size - 4) / 9;
+               if (npoints > 15) {
+                       hid_warn(hdev, "invalid size value (%d) for TRACKPAD_REPORT_ID\n",
+                                       size);
+                       return 0;
+               }
                msc->ntouches = 0;
                for (ii = 0; ii < npoints; ii++)
                        magicmouse_emit_touch(msc, ii, data + ii * 9 + 4);
@@ -307,6 +312,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
                if (size < 6 || ((size - 6) % 8) != 0)
                        return 0;
                npoints = (size - 6) / 8;
+               if (npoints > 15) {
+                       hid_warn(hdev, "invalid size value (%d) for MOUSE_REPORT_ID\n",
+                                       size);
+                       return 0;
+               }
                msc->ntouches = 0;
                for (ii = 0; ii < npoints; ii++)
                        magicmouse_emit_touch(msc, ii, data + ii * 8 + 6);
index 9e14c00..25daf28 100644 (file)
@@ -24,7 +24,7 @@
 static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                unsigned int *rsize)
 {
-       if (*rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
+       if (*rsize >= 31 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
                hid_info(hdev, "fixing up button/consumer in HID report descriptor\n");
                rdesc[30] = 0x0c;
        }
index 736b250..6aca4f2 100644 (file)
@@ -25,7 +25,7 @@
 static __u8 *pl_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                unsigned int *rsize)
 {
-       if (*rsize >= 60 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
+       if (*rsize >= 62 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
                        rdesc[41] == 0x00 && rdesc[59] == 0x26 &&
                        rdesc[60] == 0xf9 && rdesc[61] == 0x00) {
                hid_info(hdev, "fixing up Petalynx Maxter Remote report descriptor\n");
index acbb021..020df3c 100644 (file)
@@ -350,6 +350,12 @@ static int picolcd_raw_event(struct hid_device *hdev,
        if (!data)
                return 1;
 
+       if (size > 64) {
+               hid_warn(hdev, "invalid size value (%d) for picolcd raw event\n",
+                               size);
+               return 0;
+       }
+
        if (report->id == REPORT_KEY_STATE) {
                if (data->input_keys)
                        ret = picolcd_raw_keypad(data, report, raw_data+1, size-1);
index 0dc2514..8389e81 100644 (file)
@@ -909,10 +909,15 @@ static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id)
                return ret;
        }
 
-       if (!test_bit(RMI_STARTED, &data->flags)) {
-               hid_hw_stop(hdev);
-               return -EIO;
-       }
+       if (!test_bit(RMI_STARTED, &data->flags))
+               /*
+                * The device maybe in the bootloader if rmi_input_configured
+                * failed to find F11 in the PDT. Print an error, but don't
+                * return an error from rmi_probe so that hidraw will be
+                * accessible from userspace. That way a userspace tool
+                * can be used to reload working firmware on the touchpad.
+                */
+               hid_err(hdev, "Device failed to be properly configured\n");
 
        return 0;
 }
index e244e44..2ac2576 100644 (file)
@@ -604,9 +604,9 @@ static int sensor_hub_probe(struct hid_device *hdev,
                ret = -EINVAL;
                goto err_stop_hw;
        }
-       sd->hid_sensor_hub_client_devs = kzalloc(dev_cnt *
-                                               sizeof(struct mfd_cell),
-                                               GFP_KERNEL);
+       sd->hid_sensor_hub_client_devs = devm_kzalloc(&hdev->dev, dev_cnt *
+                                                     sizeof(struct mfd_cell),
+                                                     GFP_KERNEL);
        if (sd->hid_sensor_hub_client_devs == NULL) {
                hid_err(hdev, "Failed to allocate memory for mfd cells\n");
                        ret = -ENOMEM;
@@ -618,11 +618,12 @@ static int sensor_hub_probe(struct hid_device *hdev,
 
                if (collection->type == HID_COLLECTION_PHYSICAL) {
 
-                       hsdev = kzalloc(sizeof(*hsdev), GFP_KERNEL);
+                       hsdev = devm_kzalloc(&hdev->dev, sizeof(*hsdev),
+                                            GFP_KERNEL);
                        if (!hsdev) {
                                hid_err(hdev, "cannot allocate hid_sensor_hub_device\n");
                                ret = -ENOMEM;
-                               goto err_no_mem;
+                               goto err_stop_hw;
                        }
                        hsdev->hdev = hdev;
                        hsdev->vendor_id = hdev->vendor;
@@ -631,13 +632,13 @@ static int sensor_hub_probe(struct hid_device *hdev,
                        if (last_hsdev)
                                last_hsdev->end_collection_index = i;
                        last_hsdev = hsdev;
-                       name = kasprintf(GFP_KERNEL, "HID-SENSOR-%x",
-                                       collection->usage);
+                       name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
+                                             "HID-SENSOR-%x",
+                                             collection->usage);
                        if (name == NULL) {
                                hid_err(hdev, "Failed MFD device name\n");
                                        ret = -ENOMEM;
-                                       kfree(hsdev);
-                                       goto err_no_mem;
+                                       goto err_stop_hw;
                        }
                        sd->hid_sensor_hub_client_devs[
                                sd->hid_sensor_client_cnt].id =
@@ -661,16 +662,10 @@ static int sensor_hub_probe(struct hid_device *hdev,
        ret = mfd_add_devices(&hdev->dev, 0, sd->hid_sensor_hub_client_devs,
                sd->hid_sensor_client_cnt, NULL, 0, NULL);
        if (ret < 0)
-               goto err_no_mem;
+               goto err_stop_hw;
 
        return ret;
 
-err_no_mem:
-       for (i = 0; i < sd->hid_sensor_client_cnt; ++i) {
-               kfree(sd->hid_sensor_hub_client_devs[i].name);
-               kfree(sd->hid_sensor_hub_client_devs[i].platform_data);
-       }
-       kfree(sd->hid_sensor_hub_client_devs);
 err_stop_hw:
        hid_hw_stop(hdev);
 
@@ -681,7 +676,6 @@ static void sensor_hub_remove(struct hid_device *hdev)
 {
        struct sensor_hub_data *data = hid_get_drvdata(hdev);
        unsigned long flags;
-       int i;
 
        hid_dbg(hdev, " hardware removed\n");
        hid_hw_close(hdev);
@@ -691,11 +685,6 @@ static void sensor_hub_remove(struct hid_device *hdev)
                complete(&data->pending.ready);
        spin_unlock_irqrestore(&data->lock, flags);
        mfd_remove_devices(&hdev->dev);
-       for (i = 0; i < data->hid_sensor_client_cnt; ++i) {
-               kfree(data->hid_sensor_hub_client_devs[i].name);
-               kfree(data->hid_sensor_hub_client_devs[i].platform_data);
-       }
-       kfree(data->hid_sensor_hub_client_devs);
        hid_set_drvdata(hdev, NULL);
        mutex_destroy(&data->mutex);
 }
index 87fc91e..91072fa 100644 (file)
@@ -24,7 +24,7 @@
 static __u8 *sp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                unsigned int *rsize)
 {
-       if (*rsize >= 107 && rdesc[104] == 0x26 && rdesc[105] == 0x80 &&
+       if (*rsize >= 112 && rdesc[104] == 0x26 && rdesc[105] == 0x80 &&
                        rdesc[106] == 0x03) {
                hid_info(hdev, "fixing up Sunplus Wireless Desktop report descriptor\n");
                rdesc[105] = rdesc[110] = 0x03;
index 3e3b680..b51a402 100644 (file)
@@ -23,17 +23,14 @@ config I2C
          This I2C support can also be built as a module.  If so, the module
          will be called i2c-core.
 
-config I2C_ACPI
-       bool "I2C ACPI support"
-       select I2C
-       depends on ACPI
+config ACPI_I2C_OPREGION
+       bool "ACPI I2C Operation region support"
+       depends on I2C=y && ACPI
        default y
        help
-         Say Y here if you want to enable ACPI I2C support. This includes support
-         for automatic enumeration of I2C slave devices and support for ACPI I2C
-         Operation Regions. Operation Regions allow firmware (BIOS) code to
-         access I2C slave devices, such as smart batteries through an I2C host
-         controller driver.
+         Say Y here if you want to enable ACPI I2C operation region support.
+         Operation Regions allow firmware (BIOS) code to access I2C slave devices,
+         such as smart batteries through an I2C host controller driver.
 
 if I2C
 
index a1f590c..e0228b2 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 i2ccore-y := i2c-core.o
-i2ccore-$(CONFIG_I2C_ACPI)     += i2c-acpi.o
+i2ccore-$(CONFIG_ACPI)         += i2c-acpi.o
 
 obj-$(CONFIG_I2C_BOARDINFO)    += i2c-boardinfo.o
 obj-$(CONFIG_I2C)              += i2ccore.o
index 2994690..10467a3 100644 (file)
 
 /* Older devices have their ID defined in <linux/pci_ids.h> */
 #define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS             0x0f12
+#define PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS             0x2292
 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS          0x1c22
 #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS             0x1d22
 /* Patsburg also has three 'Integrated Device Function' SMBus controllers */
@@ -828,6 +829,7 @@ static const struct pci_device_id i801_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) },
        { 0, }
 };
 
index e8b6196..0dbc18c 100644 (file)
@@ -126,6 +126,7 @@ void acpi_i2c_register_devices(struct i2c_adapter *adap)
                dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
 }
 
+#ifdef CONFIG_ACPI_I2C_OPREGION
 static int acpi_gsb_i2c_read_bytes(struct i2c_client *client,
                u8 cmd, u8 *data, u8 data_len)
 {
@@ -360,3 +361,4 @@ void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
 
        acpi_bus_detach_private_data(handle);
 }
+#endif
index 1840531..ecb0109 100644 (file)
@@ -3149,14 +3149,16 @@ free_domains:
 
 static void cleanup_domain(struct protection_domain *domain)
 {
-       struct iommu_dev_data *dev_data, *next;
+       struct iommu_dev_data *entry;
        unsigned long flags;
 
        write_lock_irqsave(&amd_iommu_devtable_lock, flags);
 
-       list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
-               __detach_device(dev_data);
-               atomic_set(&dev_data->bind, 0);
+       while (!list_empty(&domain->dev_list)) {
+               entry = list_first_entry(&domain->dev_list,
+                                        struct iommu_dev_data, list);
+               __detach_device(entry);
+               atomic_set(&entry->bind, 0);
        }
 
        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
index d1f5caa..5619f26 100644 (file)
@@ -3869,6 +3869,14 @@ static int device_notifier(struct notifier_block *nb,
            action != BUS_NOTIFY_DEL_DEVICE)
                return 0;
 
+       /*
+        * If the device is still attached to a device driver we can't
+        * tear down the domain yet as DMA mappings may still be in use.
+        * Wait for the BUS_NOTIFY_UNBOUND_DRIVER event to do that.
+        */
+       if (action == BUS_NOTIFY_DEL_DEVICE && dev->driver != NULL)
+               return 0;
+
        domain = find_domain(dev);
        if (!domain)
                return 0;
index 1698360..ac4adb3 100644 (file)
@@ -995,7 +995,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
        size_t orig_size = size;
        int ret = 0;
 
-       if (unlikely(domain->ops->unmap == NULL ||
+       if (unlikely(domain->ops->map == NULL ||
                     domain->ops->pgsize_bitmap == 0UL))
                return -ENODEV;
 
index 58368f7..2498c34 100644 (file)
@@ -1,6 +1,6 @@
 /* $Id: xdi_msg.h,v 1.1.2.2 2001/02/16 08:40:36 armin Exp $ */
 
-#ifndef __DIVA_XDI_UM_CFG_MESSSGE_H__
+#ifndef __DIVA_XDI_UM_CFG_MESSAGE_H__
 #define __DIVA_XDI_UM_CFG_MESSAGE_H__
 
 /*
index 2785007..cd15e08 100644 (file)
@@ -1688,6 +1688,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        unsigned int key_size, opt_params;
        unsigned long long tmpll;
        int ret;
+       size_t iv_size_padding;
        struct dm_arg_set as;
        const char *opt_string;
        char dummy;
@@ -1724,20 +1725,32 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 
        cc->dmreq_start = sizeof(struct ablkcipher_request);
        cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
-       cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
-       cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
-                          ~(crypto_tfm_ctx_alignment() - 1);
+       cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
+
+       if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
+               /* Allocate the padding exactly */
+               iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
+                               & crypto_ablkcipher_alignmask(any_tfm(cc));
+       } else {
+               /*
+                * If the cipher requires greater alignment than kmalloc
+                * alignment, we don't know the exact position of the
+                * initialization vector. We must assume worst case.
+                */
+               iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
+       }
 
        cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
-                       sizeof(struct dm_crypt_request) + cc->iv_size);
+                       sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
        if (!cc->req_pool) {
                ti->error = "Cannot allocate crypt request mempool";
                goto bad;
        }
 
        cc->per_bio_data_size = ti->per_bio_data_size =
-                               sizeof(struct dm_crypt_io) + cc->dmreq_start +
-                               sizeof(struct dm_crypt_request) + cc->iv_size;
+               ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start +
+                     sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
+                     ARCH_KMALLOC_MINALIGN);
 
        cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
        if (!cc->page_pool) {
index b08c188..6703751 100644 (file)
@@ -2953,6 +2953,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                 */
                if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
                        end_reshape(conf);
+                       close_sync(conf);
                        return 0;
                }
 
@@ -3081,6 +3082,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                        }
 
                        r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+                       r10_bio->state = 0;
                        raise_barrier(conf, rb2 != NULL);
                        atomic_set(&r10_bio->remaining, 0);
 
@@ -3269,6 +3271,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                if (sync_blocks < max_sync)
                        max_sync = sync_blocks;
                r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+               r10_bio->state = 0;
 
                r10_bio->mddev = mddev;
                atomic_set(&r10_bio->remaining, 0);
@@ -4384,6 +4387,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
 read_more:
        /* Now schedule reads for blocks from sector_nr to last */
        r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+       r10_bio->state = 0;
        raise_barrier(conf, sectors_done != 0);
        atomic_set(&r10_bio->remaining, 0);
        r10_bio->mddev = mddev;
@@ -4398,6 +4402,7 @@ read_more:
                 * on all the target devices.
                 */
                // FIXME
+               mempool_free(r10_bio, conf->r10buf_pool);
                set_bit(MD_RECOVERY_INTR, &mddev->recovery);
                return sectors_done;
        }
@@ -4410,7 +4415,7 @@ read_more:
        read_bio->bi_private = r10_bio;
        read_bio->bi_end_io = end_sync_read;
        read_bio->bi_rw = READ;
-       read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
+       read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
        read_bio->bi_flags |= 1 << BIO_UPTODATE;
        read_bio->bi_vcnt = 0;
        read_bio->bi_iter.bi_size = 0;
index 6234b2e..183588b 100644 (file)
@@ -2922,7 +2922,7 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
              (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) &&
              !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
             (sh->raid_conf->level == 6 && s->failed && s->to_write &&
-             s->to_write < sh->raid_conf->raid_disks - 2 &&
+             s->to_write - s->non_overwrite < sh->raid_conf->raid_disks - 2 &&
              (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))))) {
                /* we would like to get this block, possibly by computing it,
                 * otherwise read it if the backing disk is insync
@@ -3817,6 +3817,8 @@ static void handle_stripe(struct stripe_head *sh)
                                set_bit(R5_Wantwrite, &dev->flags);
                                if (prexor)
                                        continue;
+                               if (s.failed > 1)
+                                       continue;
                                if (!test_bit(R5_Insync, &dev->flags) ||
                                    ((i == sh->pd_idx || i == sh->qd_idx)  &&
                                     s.failed == 0))
index ce48aa7..bde2fc0 100644 (file)
@@ -1754,7 +1754,7 @@ static int ab8500_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-#if CONFIG_DEBUG_FS
+#ifdef CONFIG_DEBUG_FS
        /* Pass to debugfs */
        ab8500_debug_resources[0].start = ab8500->irq;
        ab8500_debug_resources[0].end = ab8500->irq;
index b44f020..6bdb78c 100644 (file)
@@ -404,7 +404,7 @@ static int htcpld_register_chip_i2c(
        }
 
        i2c_set_clientdata(client, chip);
-       snprintf(client->name, I2C_NAME_SIZE, "Chip_0x%d", client->addr);
+       snprintf(client->name, I2C_NAME_SIZE, "Chip_0x%x", client->addr);
        chip->client = client;
 
        /* Reset the chip */
index 33a9234..83dab2f 100644 (file)
@@ -647,7 +647,7 @@ static int usbhs_omap_probe(struct platform_device *pdev)
                default:
                        omap->nports = OMAP3_HS_USB_PORTS;
                        dev_dbg(dev,
-                        "USB HOST Rev:0x%d not recognized, assuming %d ports\n",
+                        "USB HOST Rev:0x%x not recognized, assuming %d ports\n",
                         omap->usbhs_rev, omap->nports);
                        break;
                }
index 3bc969a..4d3ff37 100644 (file)
@@ -724,24 +724,24 @@ static struct twl4030_script *omap3_idle_scripts[] = {
  * above.
  */
 static struct twl4030_resconfig omap3_idle_rconfig[] = {
-       TWL_REMAP_SLEEP(RES_VAUX1, DEV_GRP_NULL, 0, 0),
-       TWL_REMAP_SLEEP(RES_VAUX2, DEV_GRP_NULL, 0, 0),
-       TWL_REMAP_SLEEP(RES_VAUX3, DEV_GRP_NULL, 0, 0),
-       TWL_REMAP_SLEEP(RES_VAUX4, DEV_GRP_NULL, 0, 0),
-       TWL_REMAP_SLEEP(RES_VMMC1, DEV_GRP_NULL, 0, 0),
-       TWL_REMAP_SLEEP(RES_VMMC2, DEV_GRP_NULL, 0, 0),
+       TWL_REMAP_SLEEP(RES_VAUX1, TWL4030_RESCONFIG_UNDEF, 0, 0),
+       TWL_REMAP_SLEEP(RES_VAUX2, TWL4030_RESCONFIG_UNDEF, 0, 0),
+       TWL_REMAP_SLEEP(RES_VAUX3, TWL4030_RESCONFIG_UNDEF, 0, 0),
+       TWL_REMAP_SLEEP(RES_VAUX4, TWL4030_RESCONFIG_UNDEF, 0, 0),
+       TWL_REMAP_SLEEP(RES_VMMC1, TWL4030_RESCONFIG_UNDEF, 0, 0),
+       TWL_REMAP_SLEEP(RES_VMMC2, TWL4030_RESCONFIG_UNDEF, 0, 0),
        TWL_REMAP_OFF(RES_VPLL1, DEV_GRP_P1, 3, 1),
        TWL_REMAP_SLEEP(RES_VPLL2, DEV_GRP_P1, 0, 0),
-       TWL_REMAP_SLEEP(RES_VSIM, DEV_GRP_NULL, 0, 0),
-       TWL_REMAP_SLEEP(RES_VDAC, DEV_GRP_NULL, 0, 0),
+       TWL_REMAP_SLEEP(RES_VSIM, TWL4030_RESCONFIG_UNDEF, 0, 0),
+       TWL_REMAP_SLEEP(RES_VDAC, TWL4030_RESCONFIG_UNDEF, 0, 0),
        TWL_REMAP_SLEEP(RES_VINTANA1, TWL_DEV_GRP_P123, 1, 2),
        TWL_REMAP_SLEEP(RES_VINTANA2, TWL_DEV_GRP_P123, 0, 2),
        TWL_REMAP_SLEEP(RES_VINTDIG, TWL_DEV_GRP_P123, 1, 2),
        TWL_REMAP_SLEEP(RES_VIO, TWL_DEV_GRP_P123, 2, 2),
        TWL_REMAP_OFF(RES_VDD1, DEV_GRP_P1, 4, 1),
        TWL_REMAP_OFF(RES_VDD2, DEV_GRP_P1, 3, 1),
-       TWL_REMAP_SLEEP(RES_VUSB_1V5, DEV_GRP_NULL, 0, 0),
-       TWL_REMAP_SLEEP(RES_VUSB_1V8, DEV_GRP_NULL, 0, 0),
+       TWL_REMAP_SLEEP(RES_VUSB_1V5, TWL4030_RESCONFIG_UNDEF, 0, 0),
+       TWL_REMAP_SLEEP(RES_VUSB_1V8, TWL4030_RESCONFIG_UNDEF, 0, 0),
        TWL_REMAP_SLEEP(RES_VUSB_3V1, TWL_DEV_GRP_P123, 0, 0),
        /* Resource #20 USB charge pump skipped */
        TWL_REMAP_SLEEP(RES_REGEN, TWL_DEV_GRP_P123, 2, 1),
index 324e1de..2da05c0 100644 (file)
@@ -601,6 +601,7 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
                cl->timer_count = MEI_CONNECT_TIMEOUT;
                list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
        } else {
+               cl->state = MEI_FILE_INITIALIZING;
                list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
        }
 
index 3095fc5..5ccc23b 100644 (file)
@@ -342,9 +342,10 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
        ndev = (struct mei_nfc_dev *) cldev->priv_data;
        dev = ndev->cl->dev;
 
+       err = -ENOMEM;
        mei_buf = kzalloc(length + MEI_NFC_HEADER_SIZE, GFP_KERNEL);
        if (!mei_buf)
-               return -ENOMEM;
+               goto out;
 
        hdr = (struct mei_nfc_hci_hdr *) mei_buf;
        hdr->cmd = MEI_NFC_CMD_HCI_SEND;
@@ -354,12 +355,9 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
        hdr->data_size = length;
 
        memcpy(mei_buf + MEI_NFC_HEADER_SIZE, buf, length);
-
        err = __mei_cl_send(ndev->cl, mei_buf, length + MEI_NFC_HEADER_SIZE);
        if (err < 0)
-               return err;
-
-       kfree(mei_buf);
+               goto out;
 
        if (!wait_event_interruptible_timeout(ndev->send_wq,
                                ndev->recv_req_id == ndev->req_id, HZ)) {
@@ -368,7 +366,8 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
        } else {
                ndev->req_id++;
        }
-
+out:
+       kfree(mei_buf);
        return err;
 }
 
index 5dede6e..109cb44 100644 (file)
@@ -280,7 +280,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
 
                priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start,
                                                     resource_size(res));
-               if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
+               if (!priv->raminit_ctrlreg || priv->instance < 0)
                        dev_info(&pdev->dev, "control memory is not used for raminit\n");
                else
                        priv->raminit = c_can_hw_raminit_ti;
index f425ec2..944aa5d 100644 (file)
@@ -549,6 +549,13 @@ static void do_state(struct net_device *dev,
 
        /* process state changes depending on the new state */
        switch (new_state) {
+       case CAN_STATE_ERROR_WARNING:
+               netdev_dbg(dev, "Error Warning\n");
+               cf->can_id |= CAN_ERR_CRTL;
+               cf->data[1] = (bec.txerr > bec.rxerr) ?
+                       CAN_ERR_CRTL_TX_WARNING :
+                       CAN_ERR_CRTL_RX_WARNING;
+               break;
        case CAN_STATE_ERROR_ACTIVE:
                netdev_dbg(dev, "Error Active\n");
                cf->can_id |= CAN_ERR_PROT;
@@ -852,6 +859,8 @@ static int flexcan_chip_start(struct net_device *dev)
        if (priv->devtype_data->features & FLEXCAN_HAS_BROKEN_ERR_STATE ||
            priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
                reg_ctrl |= FLEXCAN_CTRL_ERR_MSK;
+       else
+               reg_ctrl &= ~FLEXCAN_CTRL_ERR_MSK;
 
        /* save for later use */
        priv->reg_ctrl_default = reg_ctrl;
index d169215..b27ac60 100644 (file)
@@ -172,6 +172,35 @@ static void set_normal_mode(struct net_device *dev)
        netdev_err(dev, "setting SJA1000 into normal mode failed!\n");
 }
 
+/*
+ * initialize SJA1000 chip:
+ *   - reset chip
+ *   - set output mode
+ *   - set baudrate
+ *   - enable interrupts
+ *   - start operating mode
+ */
+static void chipset_init(struct net_device *dev)
+{
+       struct sja1000_priv *priv = netdev_priv(dev);
+
+       /* set clock divider and output control register */
+       priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN);
+
+       /* set acceptance filter (accept all) */
+       priv->write_reg(priv, SJA1000_ACCC0, 0x00);
+       priv->write_reg(priv, SJA1000_ACCC1, 0x00);
+       priv->write_reg(priv, SJA1000_ACCC2, 0x00);
+       priv->write_reg(priv, SJA1000_ACCC3, 0x00);
+
+       priv->write_reg(priv, SJA1000_ACCM0, 0xFF);
+       priv->write_reg(priv, SJA1000_ACCM1, 0xFF);
+       priv->write_reg(priv, SJA1000_ACCM2, 0xFF);
+       priv->write_reg(priv, SJA1000_ACCM3, 0xFF);
+
+       priv->write_reg(priv, SJA1000_OCR, priv->ocr | OCR_MODE_NORMAL);
+}
+
 static void sja1000_start(struct net_device *dev)
 {
        struct sja1000_priv *priv = netdev_priv(dev);
@@ -180,6 +209,10 @@ static void sja1000_start(struct net_device *dev)
        if (priv->can.state != CAN_STATE_STOPPED)
                set_reset_mode(dev);
 
+       /* Initialize chip if uninitialized at this stage */
+       if (!(priv->read_reg(priv, SJA1000_CDR) & CDR_PELICAN))
+               chipset_init(dev);
+
        /* Clear error counters and error code capture */
        priv->write_reg(priv, SJA1000_TXERR, 0x0);
        priv->write_reg(priv, SJA1000_RXERR, 0x0);
@@ -236,35 +269,6 @@ static int sja1000_get_berr_counter(const struct net_device *dev,
        return 0;
 }
 
-/*
- * initialize SJA1000 chip:
- *   - reset chip
- *   - set output mode
- *   - set baudrate
- *   - enable interrupts
- *   - start operating mode
- */
-static void chipset_init(struct net_device *dev)
-{
-       struct sja1000_priv *priv = netdev_priv(dev);
-
-       /* set clock divider and output control register */
-       priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN);
-
-       /* set acceptance filter (accept all) */
-       priv->write_reg(priv, SJA1000_ACCC0, 0x00);
-       priv->write_reg(priv, SJA1000_ACCC1, 0x00);
-       priv->write_reg(priv, SJA1000_ACCC2, 0x00);
-       priv->write_reg(priv, SJA1000_ACCC3, 0x00);
-
-       priv->write_reg(priv, SJA1000_ACCM0, 0xFF);
-       priv->write_reg(priv, SJA1000_ACCM1, 0xFF);
-       priv->write_reg(priv, SJA1000_ACCM2, 0xFF);
-       priv->write_reg(priv, SJA1000_ACCM3, 0xFF);
-
-       priv->write_reg(priv, SJA1000_OCR, priv->ocr | OCR_MODE_NORMAL);
-}
-
 /*
  * transmit a CAN message
  * message layout in the sk_buff should be like this:
index e1a8f4e..e4222af 100644 (file)
@@ -563,15 +563,21 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
        struct xgene_enet_desc_ring *ring;
 
        ring = pdata->tx_ring;
-       if (ring && ring->cp_ring && ring->cp_ring->cp_skb)
-               devm_kfree(dev, ring->cp_ring->cp_skb);
-       xgene_enet_free_desc_ring(ring);
+       if (ring) {
+               if (ring->cp_ring && ring->cp_ring->cp_skb)
+                       devm_kfree(dev, ring->cp_ring->cp_skb);
+               xgene_enet_free_desc_ring(ring);
+       }
 
        ring = pdata->rx_ring;
-       if (ring && ring->buf_pool && ring->buf_pool->rx_skb)
-               devm_kfree(dev, ring->buf_pool->rx_skb);
-       xgene_enet_free_desc_ring(ring->buf_pool);
-       xgene_enet_free_desc_ring(ring);
+       if (ring) {
+               if (ring->buf_pool) {
+                       if (ring->buf_pool->rx_skb)
+                               devm_kfree(dev, ring->buf_pool->rx_skb);
+                       xgene_enet_free_desc_ring(ring->buf_pool);
+               }
+               xgene_enet_free_desc_ring(ring);
+       }
 }
 
 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
index 4e6c82e..4ccc806 100644 (file)
@@ -483,11 +483,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
 
 #ifdef BNX2X_STOP_ON_ERROR
        fp->tpa_queue_used |= (1 << queue);
-#ifdef _ASM_GENERIC_INT_L64_H
-       DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
-#else
        DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
-#endif
           fp->tpa_queue_used);
 #endif
 }
index c13364b..900cab4 100644 (file)
@@ -10052,6 +10052,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
 }
 
 #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
+#define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \
+                                       0x1848 + ((f) << 4))
 #define BNX2X_PREV_UNDI_RCQ(val)       ((val) & 0xffff)
 #define BNX2X_PREV_UNDI_BD(val)                ((val) >> 16 & 0xffff)
 #define BNX2X_PREV_UNDI_PROD(rcq, bd)  ((bd) << 16 | (rcq))
@@ -10059,8 +10061,6 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
 #define BCM_5710_UNDI_FW_MF_MAJOR      (0x07)
 #define BCM_5710_UNDI_FW_MF_MINOR      (0x08)
 #define BCM_5710_UNDI_FW_MF_VERS       (0x05)
-#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
-#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
 
 static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
 {
@@ -10079,72 +10079,25 @@ static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
        return false;
 }
 
-static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
-{
-       u8 major, minor, version;
-       u32 fw;
-
-       /* Must check that FW is loaded */
-       if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
-            MISC_REGISTERS_RESET_REG_1_RST_XSEM)) {
-               BNX2X_DEV_INFO("XSEM is reset - UNDI MF FW is not loaded\n");
-               return false;
-       }
-
-       /* Read Currently loaded FW version */
-       fw = REG_RD(bp, XSEM_REG_PRAM);
-       major = fw & 0xff;
-       minor = (fw >> 0x8) & 0xff;
-       version = (fw >> 0x10) & 0xff;
-       BNX2X_DEV_INFO("Loaded FW: 0x%08x: Major 0x%02x Minor 0x%02x Version 0x%02x\n",
-                      fw, major, minor, version);
-
-       if (major > BCM_5710_UNDI_FW_MF_MAJOR)
-               return true;
-
-       if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
-           (minor > BCM_5710_UNDI_FW_MF_MINOR))
-               return true;
-
-       if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
-           (minor == BCM_5710_UNDI_FW_MF_MINOR) &&
-           (version >= BCM_5710_UNDI_FW_MF_VERS))
-               return true;
-
-       return false;
-}
-
-static void bnx2x_prev_unload_undi_mf(struct bnx2x *bp)
-{
-       int i;
-
-       /* Due to legacy (FW) code, the first function on each engine has a
-        * different offset macro from the rest of the functions.
-        * Setting this for all 8 functions is harmless regardless of whether
-        * this is actually a multi-function device.
-        */
-       for (i = 0; i < 2; i++)
-               REG_WR(bp, BNX2X_PREV_UNDI_MF_PORT(i), 1);
-
-       for (i = 2; i < 8; i++)
-               REG_WR(bp, BNX2X_PREV_UNDI_MF_FUNC(i - 2), 1);
-
-       BNX2X_DEV_INFO("UNDI FW (MF) set to discard\n");
-}
-
-static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc)
+static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc)
 {
        u16 rcq, bd;
-       u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port));
+       u32 addr, tmp_reg;
 
+       if (BP_FUNC(bp) < 2)
+               addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp));
+       else
+               addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2);
+
+       tmp_reg = REG_RD(bp, addr);
        rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
        bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
 
        tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
-       REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg);
+       REG_WR(bp, addr, tmp_reg);
 
-       BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
-                      port, bd, rcq);
+       BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n",
+                      BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq);
 }
 
 static int bnx2x_prev_mcp_done(struct bnx2x *bp)
@@ -10383,7 +10336,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
        /* Reset should be performed after BRB is emptied */
        if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
                u32 timer_count = 1000;
-               bool need_write = true;
 
                /* Close the MAC Rx to prevent BRB from filling up */
                bnx2x_prev_unload_close_mac(bp, &mac_vals);
@@ -10420,20 +10372,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
                        else
                                timer_count--;
 
-                       /* New UNDI FW supports MF and contains better
-                        * cleaning methods - might be redundant but harmless.
-                        */
-                       if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
-                               if (need_write) {
-                                       bnx2x_prev_unload_undi_mf(bp);
-                                       need_write = false;
-                               }
-                       } else if (prev_undi) {
-                               /* If UNDI resides in memory,
-                                * manually increment it
-                                */
-                               bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1);
-                       }
+                       /* If UNDI resides in memory, manually increment it */
+                       if (prev_undi)
+                               bnx2x_prev_unload_undi_inc(bp, 1);
+
                        udelay(10);
                }
 
index d572821..c067b78 100644 (file)
@@ -652,6 +652,7 @@ struct adapter {
        struct tid_info tids;
        void **tid_release_head;
        spinlock_t tid_release_lock;
+       struct workqueue_struct *workq;
        struct work_struct tid_release_task;
        struct work_struct db_full_task;
        struct work_struct db_drop_task;
index 1afee70..18fb9c6 100644 (file)
@@ -643,8 +643,6 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
        return ret;
 }
 
-static struct workqueue_struct *workq;
-
 /**
  *     link_start - enable a port
  *     @dev: the port to enable
@@ -3340,7 +3338,7 @@ static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
        adap->tid_release_head = (void **)((uintptr_t)p | chan);
        if (!adap->tid_release_task_busy) {
                adap->tid_release_task_busy = true;
-               queue_work(workq, &adap->tid_release_task);
+               queue_work(adap->workq, &adap->tid_release_task);
        }
        spin_unlock_bh(&adap->tid_release_lock);
 }
@@ -4140,7 +4138,7 @@ void t4_db_full(struct adapter *adap)
                notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
                t4_set_reg_field(adap, SGE_INT_ENABLE3,
                                 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
-               queue_work(workq, &adap->db_full_task);
+               queue_work(adap->workq, &adap->db_full_task);
        }
 }
 
@@ -4150,7 +4148,7 @@ void t4_db_dropped(struct adapter *adap)
                disable_dbs(adap);
                notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
        }
-       queue_work(workq, &adap->db_drop_task);
+       queue_work(adap->workq, &adap->db_drop_task);
 }
 
 static void uld_attach(struct adapter *adap, unsigned int uld)
@@ -6517,6 +6515,12 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto out_disable_device;
        }
 
+       adapter->workq = create_singlethread_workqueue("cxgb4");
+       if (!adapter->workq) {
+               err = -ENOMEM;
+               goto out_free_adapter;
+       }
+
        /* PCI device has been enabled */
        adapter->flags |= DEV_ENABLED;
 
@@ -6715,6 +6719,9 @@ sriov:
  out_unmap_bar0:
        iounmap(adapter->regs);
  out_free_adapter:
+       if (adapter->workq)
+               destroy_workqueue(adapter->workq);
+
        kfree(adapter);
  out_disable_device:
        pci_disable_pcie_error_reporting(pdev);
@@ -6736,6 +6743,11 @@ static void remove_one(struct pci_dev *pdev)
        if (adapter) {
                int i;
 
+               /* Tear down per-adapter Work Queue first since it can contain
+                * references to our adapter data structure.
+                */
+               destroy_workqueue(adapter->workq);
+
                if (is_offload(adapter))
                        detach_ulds(adapter);
 
@@ -6788,20 +6800,14 @@ static int __init cxgb4_init_module(void)
 {
        int ret;
 
-       workq = create_singlethread_workqueue("cxgb4");
-       if (!workq)
-               return -ENOMEM;
-
        /* Debugfs support is optional, just warn if this fails */
        cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
        if (!cxgb4_debugfs_root)
                pr_warn("could not create debugfs entry, continuing\n");
 
        ret = pci_register_driver(&cxgb4_driver);
-       if (ret < 0) {
+       if (ret < 0)
                debugfs_remove(cxgb4_debugfs_root);
-               destroy_workqueue(workq);
-       }
 
        register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
 
@@ -6813,8 +6819,6 @@ static void __exit cxgb4_cleanup_module(void)
        unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
        pci_unregister_driver(&cxgb4_driver);
        debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
-       flush_workqueue(workq);
-       destroy_workqueue(workq);
 }
 
 module_init(cxgb4_init_module);
index b0bba32..d22d728 100644 (file)
@@ -2303,7 +2303,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
                            FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0));
        c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC |
                                 FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
-       c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid));
+       c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE |
+                          FW_EQ_ETH_CMD_VIID(pi->viid));
        c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
                                   FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
                                   FW_EQ_ETH_CMD_FETCHRO(1) |
index 0549170..5f2729e 100644 (file)
@@ -1227,6 +1227,7 @@ struct fw_eq_eth_cmd {
 #define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16)
 #define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0)
 
+#define FW_EQ_ETH_CMD_AUTOEQUEQE (1U << 30)
 #define FW_EQ_ETH_CMD_VIID(x) ((x) << 16)
 
 struct fw_eq_ctrl_cmd {
index bdfa80c..a5fb949 100644 (file)
@@ -2250,7 +2250,8 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
        cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC |
                                         FW_EQ_ETH_CMD_EQSTART |
                                         FW_LEN16(cmd));
-       cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_VIID(pi->viid));
+       cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE |
+                                  FW_EQ_ETH_CMD_VIID(pi->viid));
        cmd.fetchszm_to_iqid =
                cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) |
                            FW_EQ_ETH_CMD_PCIECHN(pi->port_id) |
index 9f7fa64..ee41d98 100644 (file)
@@ -275,6 +275,9 @@ struct fec_enet_private {
        struct clk *clk_enet_out;
        struct clk *clk_ptp;
 
+       bool ptp_clk_on;
+       struct mutex ptp_clk_mutex;
+
        /* The saved address of a sent-in-place packet/buffer, for skfree(). */
        unsigned char *tx_bounce[TX_RING_SIZE];
        struct  sk_buff *tx_skbuff[TX_RING_SIZE];
@@ -335,7 +338,7 @@ struct fec_enet_private {
        u32 cycle_speed;
        int hwts_rx_en;
        int hwts_tx_en;
-       struct timer_list time_keep;
+       struct delayed_work time_keep;
        struct regulator *reg_phy;
 };
 
index 4f87dff..89355a7 100644 (file)
@@ -1611,17 +1611,27 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
                                goto failed_clk_enet_out;
                }
                if (fep->clk_ptp) {
+                       mutex_lock(&fep->ptp_clk_mutex);
                        ret = clk_prepare_enable(fep->clk_ptp);
-                       if (ret)
+                       if (ret) {
+                               mutex_unlock(&fep->ptp_clk_mutex);
                                goto failed_clk_ptp;
+                       } else {
+                               fep->ptp_clk_on = true;
+                       }
+                       mutex_unlock(&fep->ptp_clk_mutex);
                }
        } else {
                clk_disable_unprepare(fep->clk_ahb);
                clk_disable_unprepare(fep->clk_ipg);
                if (fep->clk_enet_out)
                        clk_disable_unprepare(fep->clk_enet_out);
-               if (fep->clk_ptp)
+               if (fep->clk_ptp) {
+                       mutex_lock(&fep->ptp_clk_mutex);
                        clk_disable_unprepare(fep->clk_ptp);
+                       fep->ptp_clk_on = false;
+                       mutex_unlock(&fep->ptp_clk_mutex);
+               }
        }
 
        return 0;
@@ -2625,6 +2635,8 @@ fec_probe(struct platform_device *pdev)
        if (IS_ERR(fep->clk_enet_out))
                fep->clk_enet_out = NULL;
 
+       fep->ptp_clk_on = false;
+       mutex_init(&fep->ptp_clk_mutex);
        fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
        fep->bufdesc_ex =
                pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX;
@@ -2715,10 +2727,10 @@ fec_drv_remove(struct platform_device *pdev)
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct fec_enet_private *fep = netdev_priv(ndev);
 
+       cancel_delayed_work_sync(&fep->time_keep);
        cancel_work_sync(&fep->tx_timeout_work);
        unregister_netdev(ndev);
        fec_enet_mii_remove(fep);
-       del_timer_sync(&fep->time_keep);
        if (fep->reg_phy)
                regulator_disable(fep->reg_phy);
        if (fep->ptp_clock)
index 82386b2..cca3617 100644 (file)
@@ -245,12 +245,20 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp,
        u64 ns;
        unsigned long flags;
 
+       mutex_lock(&fep->ptp_clk_mutex);
+       /* Check the ptp clock */
+       if (!fep->ptp_clk_on) {
+               mutex_unlock(&fep->ptp_clk_mutex);
+               return -EINVAL;
+       }
+
        ns = ts->tv_sec * 1000000000ULL;
        ns += ts->tv_nsec;
 
        spin_lock_irqsave(&fep->tmreg_lock, flags);
        timecounter_init(&fep->tc, &fep->cc, ns);
        spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+       mutex_unlock(&fep->ptp_clk_mutex);
        return 0;
 }
 
@@ -338,17 +346,22 @@ int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr)
  * fec_time_keep - call timecounter_read every second to avoid timer overrun
  *                 because ENET just support 32bit counter, will timeout in 4s
  */
-static void fec_time_keep(unsigned long _data)
+static void fec_time_keep(struct work_struct *work)
 {
-       struct fec_enet_private *fep = (struct fec_enet_private *)_data;
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep);
        u64 ns;
        unsigned long flags;
 
-       spin_lock_irqsave(&fep->tmreg_lock, flags);
-       ns = timecounter_read(&fep->tc);
-       spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+       mutex_lock(&fep->ptp_clk_mutex);
+       if (fep->ptp_clk_on) {
+               spin_lock_irqsave(&fep->tmreg_lock, flags);
+               ns = timecounter_read(&fep->tc);
+               spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+       }
+       mutex_unlock(&fep->ptp_clk_mutex);
 
-       mod_timer(&fep->time_keep, jiffies + HZ);
+       schedule_delayed_work(&fep->time_keep, HZ);
 }
 
 /**
@@ -386,15 +399,13 @@ void fec_ptp_init(struct platform_device *pdev)
 
        fec_ptp_start_cyclecounter(ndev);
 
-       init_timer(&fep->time_keep);
-       fep->time_keep.data = (unsigned long)fep;
-       fep->time_keep.function = fec_time_keep;
-       fep->time_keep.expires = jiffies + HZ;
-       add_timer(&fep->time_keep);
+       INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
 
        fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev);
        if (IS_ERR(fep->ptp_clock)) {
                fep->ptp_clock = NULL;
                pr_err("ptp_clock_register failed\n");
        }
+
+       schedule_delayed_work(&fep->time_keep, HZ);
 }
index c912756..21978cc 100644 (file)
@@ -292,6 +292,18 @@ failure:
        atomic_add(buffers_added, &(pool->available));
 }
 
+/*
+ * The final 8 bytes of the buffer list is a counter of frames dropped
+ * because there was not a buffer in the buffer list capable of holding
+ * the frame.
+ */
+static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter)
+{
+       __be64 *p = adapter->buffer_list_addr + 4096 - 8;
+
+       adapter->rx_no_buffer = be64_to_cpup(p);
+}
+
 /* replenish routine */
 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
 {
@@ -307,8 +319,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
                        ibmveth_replenish_buffer_pool(adapter, pool);
        }
 
-       adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
-                                               4096 - 8);
+       ibmveth_update_rx_no_buffer(adapter);
 }
 
 /* empty and free ana buffer pool - also used to do cleanup in error paths */
@@ -698,8 +709,7 @@ static int ibmveth_close(struct net_device *netdev)
 
        free_irq(netdev->irq, netdev);
 
-       adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
-                                               4096 - 8);
+       ibmveth_update_rx_no_buffer(adapter);
 
        ibmveth_cleanup(adapter);
 
index bb7fe98..537b621 100644 (file)
@@ -247,7 +247,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
        u32 prttsyn_stat;
        int n;
 
-       if (pf->flags & I40E_FLAG_PTP)
+       if (!(pf->flags & I40E_FLAG_PTP))
                return;
 
        prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
index 8967255..3ac6a0d 100644 (file)
@@ -1003,11 +1003,19 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
                                  u32 v_retval, u8 *msg, u16 msglen)
 {
-       struct i40e_pf *pf = vf->pf;
-       struct i40e_hw *hw = &pf->hw;
-       int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+       struct i40e_pf *pf;
+       struct i40e_hw *hw;
+       int abs_vf_id;
        i40e_status aq_ret;
 
+       /* validate the request */
+       if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+               return -EINVAL;
+
+       pf = vf->pf;
+       hw = &pf->hw;
+       abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
        /* single place to detect unsuccessful return values */
        if (v_retval) {
                vf->num_invalid_msgs++;
@@ -1928,17 +1936,20 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
 {
        struct i40e_hw *hw = &pf->hw;
        struct i40e_vf *vf = pf->vf;
-       int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
        int i;
 
-       for (i = 0; i < pf->num_alloc_vfs; i++) {
+       for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
+               int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+               /* Not all vfs are enabled so skip the ones that are not */
+               if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
+                   !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+                       continue;
+
                /* Ignore return value on purpose - a given VF may fail, but
                 * we need to keep going and send to all of them
                 */
                i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
                                       msg, msglen, NULL);
-               vf++;
-               abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
        }
 }
 
@@ -1954,12 +1965,12 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf)
        struct i40e_hw *hw = &pf->hw;
        struct i40e_vf *vf = pf->vf;
        struct i40e_link_status *ls = &pf->hw.phy.link_info;
-       int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
        int i;
 
        pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
        pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
-       for (i = 0; i < pf->num_alloc_vfs; i++) {
+       for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
+               int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
                if (vf->link_forced) {
                        pfe.event_data.link_event.link_status = vf->link_up;
                        pfe.event_data.link_event.link_speed =
@@ -1972,8 +1983,6 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf)
                i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
                                       0, (u8 *)&pfe, sizeof(pfe),
                                       NULL);
-               vf++;
-               abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
        }
 }
 
@@ -2002,7 +2011,18 @@ void i40e_vc_notify_reset(struct i40e_pf *pf)
 void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
 {
        struct i40e_virtchnl_pf_event pfe;
-       int abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
+       int abs_vf_id;
+
+       /* validate the request */
+       if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+               return;
+
+       /* verify if the VF is in either init or active before proceeding */
+       if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
+           !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+               return;
+
+       abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
 
        pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
        pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
index 16039d1..b84f5ea 100644 (file)
@@ -268,7 +268,7 @@ struct qlcnic_fdt {
        u16     cksum;
        u16     unused;
        u8      model[16];
-       u16     mfg_id;
+       u     mfg_id;
        u16     id;
        u8      flag;
        u8      erase_cmd;
@@ -2362,6 +2362,19 @@ static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
                return QLC_DEFAULT_VNIC_COUNT;
 }
 
+static inline void qlcnic_swap32_buffer(u32 *buffer, int count)
+{
+#if defined(__BIG_ENDIAN)
+       u32 *tmp = buffer;
+       int i;
+
+       for (i = 0; i < count; i++) {
+               *tmp = swab32(*tmp);
+               tmp++;
+       }
+#endif
+}
+
 #ifdef CONFIG_QLCNIC_HWMON
 void qlcnic_register_hwmon_dev(struct qlcnic_adapter *);
 void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *);
index a4a4ec0..476e499 100644 (file)
@@ -2603,7 +2603,7 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
        }
 
        qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_DIRECT_WINDOW,
-                                    (addr));
+                                    (addr & 0xFFFF0000));
 
        range = flash_offset + (count * sizeof(u32));
        /* Check if data is spread across multiple sectors */
@@ -2753,7 +2753,7 @@ int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *adapter)
        ret = qlcnic_83xx_lockless_flash_read32(adapter, QLCNIC_FDT_LOCATION,
                                                (u8 *)&adapter->ahw->fdt,
                                                count);
-
+       qlcnic_swap32_buffer((u32 *)&adapter->ahw->fdt, count);
        qlcnic_83xx_unlock_flash(adapter);
        return ret;
 }
@@ -2788,7 +2788,7 @@ int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter,
 
        addr1 = (sector_start_addr & 0xFF) << 16;
        addr2 = (sector_start_addr & 0xFF0000) >> 16;
-       reversed_addr = addr1 | addr2;
+       reversed_addr = addr1 | addr2 | (sector_start_addr & 0xFF00);
 
        qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
                                     reversed_addr);
index f33559b..86783e1 100644 (file)
@@ -1378,31 +1378,45 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
 {
        struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
        const struct firmware *fw = fw_info->fw;
-       u32 dest, *p_cache;
+       u32 dest, *p_cache, *temp;
        int i, ret = -EIO;
+       __le32 *temp_le;
        u8 data[16];
        size_t size;
        u64 addr;
 
+       temp = kzalloc(fw->size, GFP_KERNEL);
+       if (!temp) {
+               release_firmware(fw);
+               fw_info->fw = NULL;
+               return -ENOMEM;
+       }
+
+       temp_le = (__le32 *)fw->data;
+
+       /* FW image in file is in little endian, swap the data to nullify
+        * the effect of writel() operation on big endian platform.
+        */
+       for (i = 0; i < fw->size / sizeof(u32); i++)
+               temp[i] = __le32_to_cpu(temp_le[i]);
+
        dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR);
        size = (fw->size & ~0xF);
-       p_cache = (u32 *)fw->data;
+       p_cache = temp;
        addr = (u64)dest;
 
        ret = qlcnic_ms_mem_write128(adapter, addr,
                                     p_cache, size / 16);
        if (ret) {
                dev_err(&adapter->pdev->dev, "MS memory write failed\n");
-               release_firmware(fw);
-               fw_info->fw = NULL;
-               return -EIO;
+               goto exit;
        }
 
        /* alignment check */
        if (fw->size & 0xF) {
                addr = dest + size;
                for (i = 0; i < (fw->size & 0xF); i++)
-                       data[i] = fw->data[size + i];
+                       data[i] = temp[size + i];
                for (; i < 16; i++)
                        data[i] = 0;
                ret = qlcnic_ms_mem_write128(adapter, addr,
@@ -1410,15 +1424,16 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
                if (ret) {
                        dev_err(&adapter->pdev->dev,
                                "MS memory write failed\n");
-                       release_firmware(fw);
-                       fw_info->fw = NULL;
-                       return -EIO;
+                       goto exit;
                }
        }
+
+exit:
        release_firmware(fw);
        fw_info->fw = NULL;
+       kfree(temp);
 
-       return 0;
+       return ret;
 }
 
 static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
index e46fc39..c9f57fb 100644 (file)
@@ -47,15 +47,26 @@ struct qlcnic_common_entry_hdr {
        u32     type;
        u32     offset;
        u32     cap_size;
+#if defined(__LITTLE_ENDIAN)
        u8      mask;
        u8      rsvd[2];
        u8      flags;
+#else
+       u8      flags;
+       u8      rsvd[2];
+       u8      mask;
+#endif
 } __packed;
 
 struct __crb {
        u32     addr;
+#if defined(__LITTLE_ENDIAN)
        u8      stride;
        u8      rsvd1[3];
+#else
+       u8      rsvd1[3];
+       u8      stride;
+#endif
        u32     data_size;
        u32     no_ops;
        u32     rsvd2[4];
@@ -63,15 +74,28 @@ struct __crb {
 
 struct __ctrl {
        u32     addr;
+#if defined(__LITTLE_ENDIAN)
        u8      stride;
        u8      index_a;
        u16     timeout;
+#else
+       u16     timeout;
+       u8      index_a;
+       u8      stride;
+#endif
        u32     data_size;
        u32     no_ops;
+#if defined(__LITTLE_ENDIAN)
        u8      opcode;
        u8      index_v;
        u8      shl_val;
        u8      shr_val;
+#else
+       u8      shr_val;
+       u8      shl_val;
+       u8      index_v;
+       u8      opcode;
+#endif
        u32     val1;
        u32     val2;
        u32     val3;
@@ -79,16 +103,27 @@ struct __ctrl {
 
 struct __cache {
        u32     addr;
+#if defined(__LITTLE_ENDIAN)
        u16     stride;
        u16     init_tag_val;
+#else
+       u16     init_tag_val;
+       u16     stride;
+#endif
        u32     size;
        u32     no_ops;
        u32     ctrl_addr;
        u32     ctrl_val;
        u32     read_addr;
+#if defined(__LITTLE_ENDIAN)
        u8      read_addr_stride;
        u8      read_addr_num;
        u8      rsvd1[2];
+#else
+       u8      rsvd1[2];
+       u8      read_addr_num;
+       u8      read_addr_stride;
+#endif
 } __packed;
 
 struct __ocm {
@@ -122,23 +157,39 @@ struct __mux {
 
 struct __queue {
        u32     sel_addr;
+#if defined(__LITTLE_ENDIAN)
        u16     stride;
        u8      rsvd[2];
+#else
+       u8      rsvd[2];
+       u16     stride;
+#endif
        u32     size;
        u32     no_ops;
        u8      rsvd2[8];
        u32     read_addr;
+#if defined(__LITTLE_ENDIAN)
        u8      read_addr_stride;
        u8      read_addr_cnt;
        u8      rsvd3[2];
+#else
+       u8      rsvd3[2];
+       u8      read_addr_cnt;
+       u8      read_addr_stride;
+#endif
 } __packed;
 
 struct __pollrd {
        u32     sel_addr;
        u32     read_addr;
        u32     sel_val;
+#if defined(__LITTLE_ENDIAN)
        u16     sel_val_stride;
        u16     no_ops;
+#else
+       u16     no_ops;
+       u16     sel_val_stride;
+#endif
        u32     poll_wait;
        u32     poll_mask;
        u32     data_size;
@@ -153,9 +204,15 @@ struct __mux2 {
        u32     no_ops;
        u32     sel_val_mask;
        u32     read_addr;
+#if defined(__LITTLE_ENDIAN)
        u8      sel_val_stride;
        u8      data_size;
        u8      rsvd[2];
+#else
+       u8      rsvd[2];
+       u8      data_size;
+       u8      sel_val_stride;
+#endif
 } __packed;
 
 struct __pollrdmwr {
index f5786d5..59a721f 100644 (file)
@@ -280,6 +280,7 @@ static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
        if (ret != 0)
                return ret;
        qlcnic_read_crb(adapter, buf, offset, size);
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
 
        return size;
 }
@@ -296,6 +297,7 @@ static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
        if (ret != 0)
                return ret;
 
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
        qlcnic_write_crb(adapter, buf, offset, size);
        return size;
 }
@@ -329,6 +331,7 @@ static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
                return -EIO;
 
        memcpy(buf, &data, size);
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
 
        return size;
 }
@@ -346,6 +349,7 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
        if (ret != 0)
                return ret;
 
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
        memcpy(&data, buf, size);
 
        if (qlcnic_pci_mem_write_2M(adapter, offset, data))
@@ -412,6 +416,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
        if (rem)
                return QL_STATUS_INVALID_PARAM;
 
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
        pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
        ret = validate_pm_config(adapter, pm_cfg, count);
 
@@ -474,6 +479,7 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
                pm_cfg[pci_func].dest_npar = 0;
                pm_cfg[pci_func].pci_func = i;
        }
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
        return size;
 }
 
@@ -555,6 +561,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
        if (rem)
                return QL_STATUS_INVALID_PARAM;
 
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
        esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
        ret = validate_esw_config(adapter, esw_cfg, count);
        if (ret)
@@ -649,6 +656,7 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
                if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
                        return QL_STATUS_INVALID_PARAM;
        }
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
        return size;
 }
 
@@ -688,6 +696,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
        if (rem)
                return QL_STATUS_INVALID_PARAM;
 
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
        np_cfg = (struct qlcnic_npar_func_cfg *)buf;
        ret = validate_npar_config(adapter, np_cfg, count);
        if (ret)
@@ -759,6 +768,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
                np_cfg[pci_func].max_tx_queues = nic_info.max_tx_ques;
                np_cfg[pci_func].max_rx_queues = nic_info.max_rx_ques;
        }
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
        return size;
 }
 
@@ -916,6 +926,7 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
 
        pci_cfg = (struct qlcnic_pci_func_cfg *)buf;
        count = size / sizeof(struct qlcnic_pci_func_cfg);
+       qlcnic_swap32_buffer((u32 *)pci_info, size / sizeof(u32));
        for (i = 0; i < count; i++) {
                pci_cfg[i].pci_func = pci_info[i].id;
                pci_cfg[i].func_type = pci_info[i].type;
@@ -969,6 +980,7 @@ static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
        }
 
        qlcnic_83xx_unlock_flash(adapter);
+       qlcnic_swap32_buffer((u32 *)p_read_buf, count);
        memcpy(buf, p_read_buf, size);
        kfree(p_read_buf);
 
@@ -986,9 +998,10 @@ static int qlcnic_83xx_sysfs_flash_bulk_write(struct qlcnic_adapter *adapter,
        if (!p_cache)
                return -ENOMEM;
 
+       count = size / sizeof(u32);
+       qlcnic_swap32_buffer((u32 *)buf, count);
        memcpy(p_cache, buf, size);
        p_src = p_cache;
-       count = size / sizeof(u32);
 
        if (qlcnic_83xx_lock_flash(adapter) != 0) {
                kfree(p_cache);
@@ -1053,6 +1066,7 @@ static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter,
        if (!p_cache)
                return -ENOMEM;
 
+       qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
        memcpy(p_cache, buf, size);
        p_src = p_cache;
        count = size / sizeof(u32);
index 60e4ca0..a969555 100644 (file)
@@ -739,7 +739,10 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
        struct macvlan_dev *vlan = netdev_priv(dev);
        int err = -EINVAL;
 
-       if (!vlan->port->passthru)
+       /* Support unicast filter only on passthru devices.
+        * Multicast filter should be allowed on all devices.
+        */
+       if (!vlan->port->passthru && is_unicast_ether_addr(addr))
                return -EOPNOTSUPP;
 
        if (flags & NLM_F_REPLACE)
@@ -760,7 +763,10 @@ static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
        struct macvlan_dev *vlan = netdev_priv(dev);
        int err = -EINVAL;
 
-       if (!vlan->port->passthru)
+       /* Support unicast filter only on passthru devices.
+        * Multicast filter should be allowed on all devices.
+        */
+       if (!vlan->port->passthru && is_unicast_ether_addr(addr))
                return -EOPNOTSUPP;
 
        if (is_unicast_ether_addr(addr))
index 526b94c..fdce1ea 100644 (file)
@@ -157,6 +157,23 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
        return bcm7xxx_28nm_afe_config_init(phydev);
 }
 
+static int bcm7xxx_28nm_resume(struct phy_device *phydev)
+{
+       int ret;
+
+       /* Re-apply workarounds coming out suspend/resume */
+       ret = bcm7xxx_28nm_config_init(phydev);
+       if (ret)
+               return ret;
+
+       /* 28nm Gigabit PHYs come out of reset without any half-duplex
+        * or "hub" compliant advertised mode, fix that. This does not
+        * cause any problems with the PHY library since genphy_config_aneg()
+        * gracefully handles auto-negotiated and forced modes.
+        */
+       return genphy_config_aneg(phydev);
+}
+
 static int phy_set_clr_bits(struct phy_device *dev, int location,
                                        int set_mask, int clr_mask)
 {
@@ -212,7 +229,7 @@ static int bcm7xxx_config_init(struct phy_device *phydev)
 }
 
 /* Workaround for putting the PHY in IDDQ mode, required
- * for all BCM7XXX PHYs
+ * for all BCM7XXX 40nm and 65nm PHYs
  */
 static int bcm7xxx_suspend(struct phy_device *phydev)
 {
@@ -257,8 +274,7 @@ static struct phy_driver bcm7xxx_driver[] = {
        .config_init    = bcm7xxx_28nm_afe_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
-       .suspend        = bcm7xxx_suspend,
-       .resume         = bcm7xxx_28nm_afe_config_init,
+       .resume         = bcm7xxx_28nm_resume,
        .driver         = { .owner = THIS_MODULE },
 }, {
        .phy_id         = PHY_ID_BCM7439,
@@ -270,8 +286,7 @@ static struct phy_driver bcm7xxx_driver[] = {
        .config_init    = bcm7xxx_28nm_afe_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
-       .suspend        = bcm7xxx_suspend,
-       .resume         = bcm7xxx_28nm_afe_config_init,
+       .resume         = bcm7xxx_28nm_resume,
        .driver         = { .owner = THIS_MODULE },
 }, {
        .phy_id         = PHY_ID_BCM7445,
@@ -283,21 +298,7 @@ static struct phy_driver bcm7xxx_driver[] = {
        .config_init    = bcm7xxx_28nm_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
-       .suspend        = bcm7xxx_suspend,
-       .resume         = bcm7xxx_28nm_config_init,
-       .driver         = { .owner = THIS_MODULE },
-}, {
-       .name           = "Broadcom BCM7XXX 28nm",
-       .phy_id         = PHY_ID_BCM7XXX_28,
-       .phy_id_mask    = PHY_BCM_OUI_MASK,
-       .features       = PHY_GBIT_FEATURES |
-                         SUPPORTED_Pause | SUPPORTED_Asym_Pause,
-       .flags          = PHY_IS_INTERNAL,
-       .config_init    = bcm7xxx_28nm_config_init,
-       .config_aneg    = genphy_config_aneg,
-       .read_status    = genphy_read_status,
-       .suspend        = bcm7xxx_suspend,
-       .resume         = bcm7xxx_28nm_config_init,
+       .resume         = bcm7xxx_28nm_afe_config_init,
        .driver         = { .owner = THIS_MODULE },
 }, {
        .phy_id         = PHY_BCM_OUI_4,
@@ -331,7 +332,6 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
        { PHY_ID_BCM7366, 0xfffffff0, },
        { PHY_ID_BCM7439, 0xfffffff0, },
        { PHY_ID_BCM7445, 0xfffffff0, },
-       { PHY_ID_BCM7XXX_28, 0xfffffc00 },
        { PHY_BCM_OUI_4, 0xffff0000 },
        { PHY_BCM_OUI_5, 0xffffff00 },
        { }
index 180c494..a4b0819 100644 (file)
@@ -42,6 +42,22 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
 }
 
 static int smsc_phy_config_init(struct phy_device *phydev)
+{
+       int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+
+       if (rc < 0)
+               return rc;
+
+       /* Enable energy detect mode for this SMSC Transceivers */
+       rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
+                      rc | MII_LAN83C185_EDPWRDOWN);
+       if (rc < 0)
+               return rc;
+
+       return smsc_phy_ack_interrupt(phydev);
+}
+
+static int smsc_phy_reset(struct phy_device *phydev)
 {
        int rc = phy_read(phydev, MII_LAN83C185_SPECIAL_MODES);
        if (rc < 0)
@@ -66,18 +82,7 @@ static int smsc_phy_config_init(struct phy_device *phydev)
                        rc = phy_read(phydev, MII_BMCR);
                } while (rc & BMCR_RESET);
        }
-
-       rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
-       if (rc < 0)
-               return rc;
-
-       /* Enable energy detect mode for this SMSC Transceivers */
-       rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
-                      rc | MII_LAN83C185_EDPWRDOWN);
-       if (rc < 0)
-               return rc;
-
-       return smsc_phy_ack_interrupt (phydev);
+       return 0;
 }
 
 static int lan911x_config_init(struct phy_device *phydev)
@@ -142,6 +147,7 @@ static struct phy_driver smsc_phy_driver[] = {
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
        .config_init    = smsc_phy_config_init,
+       .soft_reset     = smsc_phy_reset,
 
        /* IRQ related */
        .ack_interrupt  = smsc_phy_ack_interrupt,
@@ -164,6 +170,7 @@ static struct phy_driver smsc_phy_driver[] = {
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
        .config_init    = smsc_phy_config_init,
+       .soft_reset     = smsc_phy_reset,
 
        /* IRQ related */
        .ack_interrupt  = smsc_phy_ack_interrupt,
@@ -186,6 +193,7 @@ static struct phy_driver smsc_phy_driver[] = {
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
        .config_init    = smsc_phy_config_init,
+       .soft_reset     = smsc_phy_reset,
 
        /* IRQ related */
        .ack_interrupt  = smsc_phy_ack_interrupt,
@@ -230,6 +238,7 @@ static struct phy_driver smsc_phy_driver[] = {
        .config_aneg    = genphy_config_aneg,
        .read_status    = lan87xx_read_status,
        .config_init    = smsc_phy_config_init,
+       .soft_reset     = smsc_phy_reset,
 
        /* IRQ related */
        .ack_interrupt  = smsc_phy_ack_interrupt,
index f46a24f..79cb831 100644 (file)
@@ -453,7 +453,7 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
                base = dt_mem_next_cell(dt_root_addr_cells, &prop);
                size = dt_mem_next_cell(dt_root_size_cells, &prop);
 
-               if (base && size &&
+               if (size &&
                    early_init_dt_reserve_memory_arch(base, size, nomap) == 0)
                        pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %ld MiB\n",
                                uname, &base, (unsigned long)size / SZ_1M);
index 3e06a69..1471e0a 100644 (file)
@@ -301,16 +301,17 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
        /* Get the reg property (if any) */
        addr = of_get_property(device, "reg", NULL);
 
+       /* Try the new-style interrupts-extended first */
+       res = of_parse_phandle_with_args(device, "interrupts-extended",
+                                       "#interrupt-cells", index, out_irq);
+       if (!res)
+               return of_irq_parse_raw(addr, out_irq);
+
        /* Get the interrupts property */
        intspec = of_get_property(device, "interrupts", &intlen);
-       if (intspec == NULL) {
-               /* Try the new-style interrupts-extended */
-               res = of_parse_phandle_with_args(device, "interrupts-extended",
-                                               "#interrupt-cells", index, out_irq);
-               if (res)
-                       return -EINVAL;
-               return of_irq_parse_raw(addr, out_irq);
-       }
+       if (intspec == NULL)
+               return -EINVAL;
+
        intlen /= sizeof(*intspec);
 
        pr_debug(" intspec=%d intlen=%d\n", be32_to_cpup(intspec), intlen);
index d410026..a737cb5 100644 (file)
@@ -27,6 +27,7 @@ static struct selftest_results {
 #define NO_OF_NODES 2
 static struct device_node *nodes[NO_OF_NODES];
 static int last_node_index;
+static bool selftest_live_tree;
 
 #define selftest(result, fmt, ...) { \
        if (!(result)) { \
@@ -630,13 +631,6 @@ static int attach_node_and_children(struct device_node *np)
 {
        struct device_node *next, *root = np, *dup;
 
-       if (!np) {
-               pr_warn("%s: No tree to attach; not running tests\n",
-                       __func__);
-               return -ENODATA;
-       }
-
-
        /* skip root node */
        np = np->child;
        /* storing a copy in temporary node */
@@ -672,12 +666,12 @@ static int attach_node_and_children(struct device_node *np)
 static int __init selftest_data_add(void)
 {
        void *selftest_data;
-       struct device_node *selftest_data_node;
+       struct device_node *selftest_data_node, *np;
        extern uint8_t __dtb_testcases_begin[];
        extern uint8_t __dtb_testcases_end[];
        const int size = __dtb_testcases_end - __dtb_testcases_begin;
 
-       if (!size || !of_allnodes) {
+       if (!size) {
                pr_warn("%s: No testcase data to attach; not running tests\n",
                        __func__);
                return -ENODATA;
@@ -692,6 +686,22 @@ static int __init selftest_data_add(void)
                return -ENOMEM;
        }
        of_fdt_unflatten_tree(selftest_data, &selftest_data_node);
+       if (!selftest_data_node) {
+               pr_warn("%s: No tree to attach; not running tests\n", __func__);
+               return -ENODATA;
+       }
+
+       if (!of_allnodes) {
+               /* enabling flag for removing nodes */
+               selftest_live_tree = true;
+               of_allnodes = selftest_data_node;
+
+               for_each_of_allnodes(np)
+                       __of_attach_node_sysfs(np);
+               of_aliases = of_find_node_by_path("/aliases");
+               of_chosen = of_find_node_by_path("/chosen");
+               return 0;
+       }
 
        /* attach the sub-tree to live tree */
        return attach_node_and_children(selftest_data_node);
@@ -723,6 +733,18 @@ static void selftest_data_remove(void)
        struct device_node *np;
        struct property *prop;
 
+       if (selftest_live_tree) {
+               of_node_put(of_aliases);
+               of_node_put(of_chosen);
+               of_aliases = NULL;
+               of_chosen = NULL;
+               for_each_child_of_node(of_allnodes, np)
+                       detach_node_and_children(np);
+               __of_detach_node_sysfs(of_allnodes);
+               of_allnodes = NULL;
+               return;
+       }
+
        while (last_node_index >= 0) {
                if (nodes[last_node_index]) {
                        np = of_find_node_by_path(nodes[last_node_index]->full_name);
index 2d8a4d0..8922c37 100644 (file)
@@ -1,9 +1,18 @@
 menu "PCI host controller drivers"
        depends on PCI
 
+config PCI_DRA7XX
+       bool "TI DRA7xx PCIe controller"
+       select PCIE_DW
+       depends on OF && HAS_IOMEM && TI_PIPE3
+       help
+        Enables support for the PCIe controller in the DRA7xx SoC.  There
+        are two instances of PCIe controller in DRA7xx.  This controller can
+        act both as EP and RC.  This reuses the Designware core.
+
 config PCI_MVEBU
        bool "Marvell EBU PCIe controller"
-       depends on ARCH_MVEBU || ARCH_DOVE || ARCH_KIRKWOOD
+       depends on ARCH_MVEBU || ARCH_DOVE
        depends on OF
 
 config PCIE_DW
index 0daec79..d0e88f1 100644 (file)
@@ -1,4 +1,5 @@
 obj-$(CONFIG_PCIE_DW) += pcie-designware.o
+obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
 obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
 obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
 obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c
new file mode 100644 (file)
index 0000000..52b34fe
--- /dev/null
@@ -0,0 +1,458 @@
+/*
+ * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
+ *
+ * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+/* PCIe controller wrapper DRA7XX configuration registers */
+
+#define        PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN             0x0024
+#define        PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN         0x0028
+#define        ERR_SYS                                         BIT(0)
+#define        ERR_FATAL                                       BIT(1)
+#define        ERR_NONFATAL                                    BIT(2)
+#define        ERR_COR                                         BIT(3)
+#define        ERR_AXI                                         BIT(4)
+#define        ERR_ECRC                                        BIT(5)
+#define        PME_TURN_OFF                                    BIT(8)
+#define        PME_TO_ACK                                      BIT(9)
+#define        PM_PME                                          BIT(10)
+#define        LINK_REQ_RST                                    BIT(11)
+#define        LINK_UP_EVT                                     BIT(12)
+#define        CFG_BME_EVT                                     BIT(13)
+#define        CFG_MSE_EVT                                     BIT(14)
+#define        INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \
+                       ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \
+                       LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT)
+
+#define        PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI              0x0034
+#define        PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI          0x0038
+#define        INTA                                            BIT(0)
+#define        INTB                                            BIT(1)
+#define        INTC                                            BIT(2)
+#define        INTD                                            BIT(3)
+#define        MSI                                             BIT(4)
+#define        LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
+
+#define        PCIECTRL_DRA7XX_CONF_DEVICE_CMD                 0x0104
+#define        LTSSM_EN                                        0x1
+
+#define        PCIECTRL_DRA7XX_CONF_PHY_CS                     0x010C
+#define        LINK_UP                                         BIT(16)
+
+struct dra7xx_pcie {
+       void __iomem            *base;
+       struct phy              **phy;
+       int                     phy_count;
+       struct device           *dev;
+       struct pcie_port        pp;
+};
+
+#define to_dra7xx_pcie(x)      container_of((x), struct dra7xx_pcie, pp)
+
+static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset)
+{
+       return readl(pcie->base + offset);
+}
+
+static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
+                                     u32 value)
+{
+       writel(value, pcie->base + offset);
+}
+
+static int dra7xx_pcie_link_up(struct pcie_port *pp)
+{
+       struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+       u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
+
+       return !!(reg & LINK_UP);
+}
+
+static int dra7xx_pcie_establish_link(struct pcie_port *pp)
+{
+       u32 reg;
+       unsigned int retries = 1000;
+       struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+
+       if (dw_pcie_link_up(pp)) {
+               dev_err(pp->dev, "link is already up\n");
+               return 0;
+       }
+
+       reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
+       reg |= LTSSM_EN;
+       dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
+
+       while (retries--) {
+               reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
+               if (reg & LINK_UP)
+                       break;
+               usleep_range(10, 20);
+       }
+
+       if (retries == 0) {
+               dev_err(pp->dev, "link is not up\n");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp)
+{
+       struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+
+       dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
+                          ~INTERRUPTS);
+       dra7xx_pcie_writel(dra7xx,
+                          PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, INTERRUPTS);
+       dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
+                          ~LEG_EP_INTERRUPTS & ~MSI);
+
+       if (IS_ENABLED(CONFIG_PCI_MSI))
+               dra7xx_pcie_writel(dra7xx,
+                                  PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, MSI);
+       else
+               dra7xx_pcie_writel(dra7xx,
+                                  PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
+                                  LEG_EP_INTERRUPTS);
+}
+
+static void dra7xx_pcie_host_init(struct pcie_port *pp)
+{
+       dw_pcie_setup_rc(pp);
+       dra7xx_pcie_establish_link(pp);
+       if (IS_ENABLED(CONFIG_PCI_MSI))
+               dw_pcie_msi_init(pp);
+       dra7xx_pcie_enable_interrupts(pp);
+}
+
+static struct pcie_host_ops dra7xx_pcie_host_ops = {
+       .link_up = dra7xx_pcie_link_up,
+       .host_init = dra7xx_pcie_host_init,
+};
+
+static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+                               irq_hw_number_t hwirq)
+{
+       irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+       irq_set_chip_data(irq, domain->host_data);
+       set_irq_flags(irq, IRQF_VALID);
+
+       return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+       .map = dra7xx_pcie_intx_map,
+};
+
+static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
+{
+       struct device *dev = pp->dev;
+       struct device_node *node = dev->of_node;
+       struct device_node *pcie_intc_node =  of_get_next_child(node, NULL);
+
+       if (!pcie_intc_node) {
+               dev_err(dev, "No PCIe Intc node found\n");
+               return PTR_ERR(pcie_intc_node);
+       }
+
+       pp->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
+                                              &intx_domain_ops, pp);
+       if (!pp->irq_domain) {
+               dev_err(dev, "Failed to get a INTx IRQ domain\n");
+               return PTR_ERR(pp->irq_domain);
+       }
+
+       return 0;
+}
+
+static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
+{
+       struct pcie_port *pp = arg;
+       struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+       u32 reg;
+
+       reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
+
+       switch (reg) {
+       case MSI:
+               dw_handle_msi_irq(pp);
+               break;
+       case INTA:
+       case INTB:
+       case INTC:
+       case INTD:
+               generic_handle_irq(irq_find_mapping(pp->irq_domain, ffs(reg)));
+               break;
+       }
+
+       dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
+
+       return IRQ_HANDLED;
+}
+
+
+static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
+{
+       struct dra7xx_pcie *dra7xx = arg;
+       u32 reg;
+
+       reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
+
+       if (reg & ERR_SYS)
+               dev_dbg(dra7xx->dev, "System Error\n");
+
+       if (reg & ERR_FATAL)
+               dev_dbg(dra7xx->dev, "Fatal Error\n");
+
+       if (reg & ERR_NONFATAL)
+               dev_dbg(dra7xx->dev, "Non Fatal Error\n");
+
+       if (reg & ERR_COR)
+               dev_dbg(dra7xx->dev, "Correctable Error\n");
+
+       if (reg & ERR_AXI)
+               dev_dbg(dra7xx->dev, "AXI tag lookup fatal Error\n");
+
+       if (reg & ERR_ECRC)
+               dev_dbg(dra7xx->dev, "ECRC Error\n");
+
+       if (reg & PME_TURN_OFF)
+               dev_dbg(dra7xx->dev,
+                       "Power Management Event Turn-Off message received\n");
+
+       if (reg & PME_TO_ACK)
+               dev_dbg(dra7xx->dev,
+                       "Power Management Turn-Off Ack message received\n");
+
+       if (reg & PM_PME)
+               dev_dbg(dra7xx->dev,
+                       "PM Power Management Event message received\n");
+
+       if (reg & LINK_REQ_RST)
+               dev_dbg(dra7xx->dev, "Link Request Reset\n");
+
+       if (reg & LINK_UP_EVT)
+               dev_dbg(dra7xx->dev, "Link-up state change\n");
+
+       if (reg & CFG_BME_EVT)
+               dev_dbg(dra7xx->dev, "CFG 'Bus Master Enable' change\n");
+
+       if (reg & CFG_MSE_EVT)
+               dev_dbg(dra7xx->dev, "CFG 'Memory Space Enable' change\n");
+
+       dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);
+
+       return IRQ_HANDLED;
+}
+
+static int add_pcie_port(struct dra7xx_pcie *dra7xx,
+                         struct platform_device *pdev)
+{
+       int ret;
+       struct pcie_port *pp;
+       struct resource *res;
+       struct device *dev = &pdev->dev;
+
+       pp = &dra7xx->pp;
+       pp->dev = dev;
+       pp->ops = &dra7xx_pcie_host_ops;
+
+       pp->irq = platform_get_irq(pdev, 1);
+       if (pp->irq < 0) {
+               dev_err(dev, "missing IRQ resource\n");
+               return -EINVAL;
+       }
+
+       ret = devm_request_irq(&pdev->dev, pp->irq,
+                              dra7xx_pcie_msi_irq_handler, IRQF_SHARED,
+                              "dra7-pcie-msi", pp);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to request irq\n");
+               return ret;
+       }
+
+       if (!IS_ENABLED(CONFIG_PCI_MSI)) {
+               ret = dra7xx_pcie_init_irq_domain(pp);
+               if (ret < 0)
+                       return ret;
+       }
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics");
+       pp->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
+       if (!pp->dbi_base)
+               return -ENOMEM;
+
+       ret = dw_pcie_host_init(pp);
+       if (ret) {
+               dev_err(dra7xx->dev, "failed to initialize host\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static int __init dra7xx_pcie_probe(struct platform_device *pdev)
+{
+       u32 reg;
+       int ret;
+       int irq;
+       int i;
+       int phy_count;
+       struct phy **phy;
+       void __iomem *base;
+       struct resource *res;
+       struct dra7xx_pcie *dra7xx;
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
+       char name[10];
+
+       dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
+       if (!dra7xx)
+               return -ENOMEM;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(dev, "missing IRQ resource\n");
+               return -EINVAL;
+       }
+
+       ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
+                              IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
+       if (ret) {
+               dev_err(dev, "failed to request irq\n");
+               return ret;
+       }
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
+       base = devm_ioremap_nocache(dev, res->start, resource_size(res));
+       if (!base)
+               return -ENOMEM;
+
+       phy_count = of_property_count_strings(np, "phy-names");
+       if (phy_count < 0) {
+               dev_err(dev, "unable to find the strings\n");
+               return phy_count;
+       }
+
+       phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL);
+       if (!phy)
+               return -ENOMEM;
+
+       for (i = 0; i < phy_count; i++) {
+               snprintf(name, sizeof(name), "pcie-phy%d", i);
+               phy[i] = devm_phy_get(dev, name);
+               if (IS_ERR(phy[i]))
+                       return PTR_ERR(phy[i]);
+
+               ret = phy_init(phy[i]);
+               if (ret < 0)
+                       goto err_phy;
+
+               ret = phy_power_on(phy[i]);
+               if (ret < 0) {
+                       phy_exit(phy[i]);
+                       goto err_phy;
+               }
+       }
+
+       dra7xx->base = base;
+       dra7xx->phy = phy;
+       dra7xx->dev = dev;
+       dra7xx->phy_count = phy_count;
+
+       pm_runtime_enable(dev);
+       ret = pm_runtime_get_sync(dev);
+       if (IS_ERR_VALUE(ret)) {
+               dev_err(dev, "pm_runtime_get_sync failed\n");
+               goto err_phy;
+       }
+
+       reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
+       reg &= ~LTSSM_EN;
+       dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
+
+       platform_set_drvdata(pdev, dra7xx);
+
+       ret = add_pcie_port(dra7xx, pdev);
+       if (ret < 0)
+               goto err_add_port;
+
+       return 0;
+
+err_add_port:
+       pm_runtime_put(dev);
+       pm_runtime_disable(dev);
+
+err_phy:
+       while (--i >= 0) {
+               phy_power_off(phy[i]);
+               phy_exit(phy[i]);
+       }
+
+       return ret;
+}
+
+static int __exit dra7xx_pcie_remove(struct platform_device *pdev)
+{
+       struct dra7xx_pcie *dra7xx = platform_get_drvdata(pdev);
+       struct pcie_port *pp = &dra7xx->pp;
+       struct device *dev = &pdev->dev;
+       int count = dra7xx->phy_count;
+
+       if (pp->irq_domain)
+               irq_domain_remove(pp->irq_domain);
+       pm_runtime_put(dev);
+       pm_runtime_disable(dev);
+       while (count--) {
+               phy_power_off(dra7xx->phy[count]);
+               phy_exit(dra7xx->phy[count]);
+       }
+
+       return 0;
+}
+
+static const struct of_device_id of_dra7xx_pcie_match[] = {
+       { .compatible = "ti,dra7-pcie", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, of_dra7xx_pcie_match);
+
+static struct platform_driver dra7xx_pcie_driver = {
+       .remove         = __exit_p(dra7xx_pcie_remove),
+       .driver = {
+               .name   = "dra7-pcie",
+               .owner  = THIS_MODULE,
+               .of_match_table = of_dra7xx_pcie_match,
+       },
+};
+
+module_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);
+
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_DESCRIPTION("TI PCIe controller driver");
+MODULE_LICENSE("GPL v2");
index abd6578..0fb0fdb 100644 (file)
@@ -25,6 +25,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/debugfs.h>
 #include <linux/delay.h>
 #include <linux/export.h>
 #include <linux/interrupt.h>
@@ -276,6 +277,7 @@ struct tegra_pcie {
        unsigned int num_supplies;
 
        const struct tegra_pcie_soc_data *soc_data;
+       struct dentry *debugfs;
 };
 
 struct tegra_pcie_port {
@@ -1739,6 +1741,115 @@ static const struct of_device_id tegra_pcie_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
 
+static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
+{
+       struct tegra_pcie *pcie = s->private;
+
+       if (list_empty(&pcie->ports))
+               return NULL;
+
+       seq_printf(s, "Index  Status\n");
+
+       return seq_list_start(&pcie->ports, *pos);
+}
+
+static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+       struct tegra_pcie *pcie = s->private;
+
+       return seq_list_next(v, &pcie->ports, pos);
+}
+
+static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
+{
+       bool up = false, active = false;
+       struct tegra_pcie_port *port;
+       unsigned int value;
+
+       port = list_entry(v, struct tegra_pcie_port, list);
+
+       value = readl(port->base + RP_VEND_XP);
+
+       if (value & RP_VEND_XP_DL_UP)
+               up = true;
+
+       value = readl(port->base + RP_LINK_CONTROL_STATUS);
+
+       if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
+               active = true;
+
+       seq_printf(s, "%2u     ", port->index);
+
+       if (up)
+               seq_printf(s, "up");
+
+       if (active) {
+               if (up)
+                       seq_printf(s, ", ");
+
+               seq_printf(s, "active");
+       }
+
+       seq_printf(s, "\n");
+       return 0;
+}
+
+static const struct seq_operations tegra_pcie_ports_seq_ops = {
+       .start = tegra_pcie_ports_seq_start,
+       .next = tegra_pcie_ports_seq_next,
+       .stop = tegra_pcie_ports_seq_stop,
+       .show = tegra_pcie_ports_seq_show,
+};
+
+static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
+{
+       struct tegra_pcie *pcie = inode->i_private;
+       struct seq_file *s;
+       int err;
+
+       err = seq_open(file, &tegra_pcie_ports_seq_ops);
+       if (err)
+               return err;
+
+       s = file->private_data;
+       s->private = pcie;
+
+       return 0;
+}
+
+static const struct file_operations tegra_pcie_ports_ops = {
+       .owner = THIS_MODULE,
+       .open = tegra_pcie_ports_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = seq_release,
+};
+
+static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
+{
+       struct dentry *file;
+
+       pcie->debugfs = debugfs_create_dir("pcie", NULL);
+       if (!pcie->debugfs)
+               return -ENOMEM;
+
+       file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
+                                  pcie, &tegra_pcie_ports_ops);
+       if (!file)
+               goto remove;
+
+       return 0;
+
+remove:
+       debugfs_remove_recursive(pcie->debugfs);
+       pcie->debugfs = NULL;
+       return -ENOMEM;
+}
+
 static int tegra_pcie_probe(struct platform_device *pdev)
 {
        const struct of_device_id *match;
@@ -1793,6 +1904,13 @@ static int tegra_pcie_probe(struct platform_device *pdev)
                goto disable_msi;
        }
 
+       if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+               err = tegra_pcie_debugfs_init(pcie);
+               if (err < 0)
+                       dev_err(&pdev->dev, "failed to setup debugfs: %d\n",
+                               err);
+       }
+
        platform_set_drvdata(pdev, pcie);
        return 0;
 
index 1eaf4df..52bd3a1 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/of_pci.h>
 #include <linux/pci.h>
 #include <linux/pci_regs.h>
+#include <linux/platform_device.h>
 #include <linux/types.h>
 
 #include "pcie-designware.h"
@@ -217,27 +218,47 @@ static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0)
        return 0;
 }
 
+static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
+{
+       unsigned int res, bit, val;
+
+       res = (irq / 32) * 12;
+       bit = irq % 32;
+       dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
+       val &= ~(1 << bit);
+       dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+}
+
 static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
                            unsigned int nvec, unsigned int pos)
 {
-       unsigned int i, res, bit, val;
+       unsigned int i;
 
        for (i = 0; i < nvec; i++) {
                irq_set_msi_desc_off(irq_base, i, NULL);
                clear_bit(pos + i, pp->msi_irq_in_use);
                /* Disable corresponding interrupt on MSI controller */
-               res = ((pos + i) / 32) * 12;
-               bit = (pos + i) % 32;
-               dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
-               val &= ~(1 << bit);
-               dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+               if (pp->ops->msi_clear_irq)
+                       pp->ops->msi_clear_irq(pp, pos + i);
+               else
+                       dw_pcie_msi_clear_irq(pp, pos + i);
        }
 }
 
+static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
+{
+       unsigned int res, bit, val;
+
+       res = (irq / 32) * 12;
+       bit = irq % 32;
+       dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
+       val |= 1 << bit;
+       dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+}
+
 static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
 {
-       int res, bit, irq, pos0, pos1, i;
-       u32 val;
+       int irq, pos0, pos1, i;
        struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata);
 
        if (!pp) {
@@ -281,11 +302,10 @@ static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
                }
                set_bit(pos0 + i, pp->msi_irq_in_use);
                /*Enable corresponding interrupt in MSI interrupt controller */
-               res = ((pos0 + i) / 32) * 12;
-               bit = (pos0 + i) % 32;
-               dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
-               val |= 1 << bit;
-               dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+               if (pp->ops->msi_set_irq)
+                       pp->ops->msi_set_irq(pp, pos0 + i);
+               else
+                       dw_pcie_msi_set_irq(pp, pos0 + i);
        }
 
        *pos = pos0;
@@ -353,7 +373,10 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
         */
        desc->msi_attrib.multiple = msgvec;
 
-       msg.address_lo = virt_to_phys((void *)pp->msi_data);
+       if (pp->ops->get_msi_data)
+               msg.address_lo = pp->ops->get_msi_data(pp);
+       else
+               msg.address_lo = virt_to_phys((void *)pp->msi_data);
        msg.address_hi = 0x0;
        msg.data = pos;
        write_msi_msg(irq, &msg);
@@ -396,10 +419,35 @@ static const struct irq_domain_ops msi_domain_ops = {
 int __init dw_pcie_host_init(struct pcie_port *pp)
 {
        struct device_node *np = pp->dev->of_node;
+       struct platform_device *pdev = to_platform_device(pp->dev);
        struct of_pci_range range;
        struct of_pci_range_parser parser;
-       u32 val;
-       int i;
+       struct resource *cfg_res;
+       u32 val, na, ns;
+       const __be32 *addrp;
+       int i, index;
+
+       /* Find the address cell size and the number of cells in order to get
+        * the untranslated address.
+        */
+       of_property_read_u32(np, "#address-cells", &na);
+       ns = of_n_size_cells(np);
+
+       cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
+       if (cfg_res) {
+               pp->config.cfg0_size = resource_size(cfg_res)/2;
+               pp->config.cfg1_size = resource_size(cfg_res)/2;
+               pp->cfg0_base = cfg_res->start;
+               pp->cfg1_base = cfg_res->start + pp->config.cfg0_size;
+
+               /* Find the untranslated configuration space address */
+               index = of_property_match_string(np, "reg-names", "config");
+               addrp = of_get_address(np, index, false, false);
+               pp->cfg0_mod_base = of_read_number(addrp, ns);
+               pp->cfg1_mod_base = pp->cfg0_mod_base + pp->config.cfg0_size;
+       } else {
+               dev_err(pp->dev, "missing *config* reg space\n");
+       }
 
        if (of_pci_range_parser_init(&parser, np)) {
                dev_err(pp->dev, "missing ranges property\n");
@@ -422,17 +470,33 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
                        pp->config.io_size = resource_size(&pp->io);
                        pp->config.io_bus_addr = range.pci_addr;
                        pp->io_base = range.cpu_addr;
+
+                       /* Find the untranslated IO space address */
+                       pp->io_mod_base = of_read_number(parser.range -
+                                                        parser.np + na, ns);
                }
                if (restype == IORESOURCE_MEM) {
                        of_pci_range_to_resource(&range, np, &pp->mem);
                        pp->mem.name = "MEM";
                        pp->config.mem_size = resource_size(&pp->mem);
                        pp->config.mem_bus_addr = range.pci_addr;
+
+                       /* Find the untranslated MEM space address */
+                       pp->mem_mod_base = of_read_number(parser.range -
+                                                         parser.np + na, ns);
                }
                if (restype == 0) {
                        of_pci_range_to_resource(&range, np, &pp->cfg);
                        pp->config.cfg0_size = resource_size(&pp->cfg)/2;
                        pp->config.cfg1_size = resource_size(&pp->cfg)/2;
+                       pp->cfg0_base = pp->cfg.start;
+                       pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size;
+
+                       /* Find the untranslated configuration space address */
+                       pp->cfg0_mod_base = of_read_number(parser.range -
+                                                          parser.np + na, ns);
+                       pp->cfg1_mod_base = pp->cfg0_mod_base +
+                                           pp->config.cfg0_size;
                }
        }
 
@@ -445,8 +509,6 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
                }
        }
 
-       pp->cfg0_base = pp->cfg.start;
-       pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size;
        pp->mem_base = pp->mem.start;
 
        pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
@@ -509,9 +571,9 @@ static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev)
        /* Program viewport 0 : OUTBOUND : CFG0 */
        dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
                          PCIE_ATU_VIEWPORT);
-       dw_pcie_writel_rc(pp, pp->cfg0_base, PCIE_ATU_LOWER_BASE);
-       dw_pcie_writel_rc(pp, (pp->cfg0_base >> 32), PCIE_ATU_UPPER_BASE);
-       dw_pcie_writel_rc(pp, pp->cfg0_base + pp->config.cfg0_size - 1,
+       dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE);
+       dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE);
+       dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->config.cfg0_size - 1,
                          PCIE_ATU_LIMIT);
        dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
        dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
@@ -525,9 +587,9 @@ static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
        dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
                          PCIE_ATU_VIEWPORT);
        dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1);
-       dw_pcie_writel_rc(pp, pp->cfg1_base, PCIE_ATU_LOWER_BASE);
-       dw_pcie_writel_rc(pp, (pp->cfg1_base >> 32), PCIE_ATU_UPPER_BASE);
-       dw_pcie_writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1,
+       dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE);
+       dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE);
+       dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->config.cfg1_size - 1,
                          PCIE_ATU_LIMIT);
        dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
        dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
@@ -540,9 +602,9 @@ static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
        dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
                          PCIE_ATU_VIEWPORT);
        dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
-       dw_pcie_writel_rc(pp, pp->mem_base, PCIE_ATU_LOWER_BASE);
-       dw_pcie_writel_rc(pp, (pp->mem_base >> 32), PCIE_ATU_UPPER_BASE);
-       dw_pcie_writel_rc(pp, pp->mem_base + pp->config.mem_size - 1,
+       dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE);
+       dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE);
+       dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->config.mem_size - 1,
                          PCIE_ATU_LIMIT);
        dw_pcie_writel_rc(pp, pp->config.mem_bus_addr, PCIE_ATU_LOWER_TARGET);
        dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr),
@@ -556,9 +618,9 @@ static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
        dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
                          PCIE_ATU_VIEWPORT);
        dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1);
-       dw_pcie_writel_rc(pp, pp->io_base, PCIE_ATU_LOWER_BASE);
-       dw_pcie_writel_rc(pp, (pp->io_base >> 32), PCIE_ATU_UPPER_BASE);
-       dw_pcie_writel_rc(pp, pp->io_base + pp->config.io_size - 1,
+       dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE);
+       dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE);
+       dw_pcie_writel_rc(pp, pp->io_mod_base + pp->config.io_size - 1,
                          PCIE_ATU_LIMIT);
        dw_pcie_writel_rc(pp, pp->config.io_bus_addr, PCIE_ATU_LOWER_TARGET);
        dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr),
@@ -656,7 +718,11 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
        }
 
        if (bus->number != pp->root_bus_nr)
-               ret = dw_pcie_rd_other_conf(pp, bus, devfn,
+               if (pp->ops->rd_other_conf)
+                       ret = pp->ops->rd_other_conf(pp, bus, devfn,
+                                               where, size, val);
+               else
+                       ret = dw_pcie_rd_other_conf(pp, bus, devfn,
                                                where, size, val);
        else
                ret = dw_pcie_rd_own_conf(pp, where, size, val);
@@ -679,7 +745,11 @@ static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
                return PCIBIOS_DEVICE_NOT_FOUND;
 
        if (bus->number != pp->root_bus_nr)
-               ret = dw_pcie_wr_other_conf(pp, bus, devfn,
+               if (pp->ops->wr_other_conf)
+                       ret = pp->ops->wr_other_conf(pp, bus, devfn,
+                                               where, size, val);
+               else
+                       ret = dw_pcie_wr_other_conf(pp, bus, devfn,
                                                where, size, val);
        else
                ret = dw_pcie_wr_own_conf(pp, where, size, val);
index 77f592f..daf81f9 100644 (file)
@@ -36,11 +36,15 @@ struct pcie_port {
        u8                      root_bus_nr;
        void __iomem            *dbi_base;
        u64                     cfg0_base;
+       u64                     cfg0_mod_base;
        void __iomem            *va_cfg0_base;
        u64                     cfg1_base;
+       u64                     cfg1_mod_base;
        void __iomem            *va_cfg1_base;
        u64                     io_base;
+       u64                     io_mod_base;
        u64                     mem_base;
+       u64                     mem_mod_base;
        struct resource         cfg;
        struct resource         io;
        struct resource         mem;
@@ -61,8 +65,15 @@ struct pcie_host_ops {
                        u32 val, void __iomem *dbi_base);
        int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
        int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val);
+       int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
+                       unsigned int devfn, int where, int size, u32 *val);
+       int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
+                       unsigned int devfn, int where, int size, u32 val);
        int (*link_up)(struct pcie_port *pp);
        void (*host_init)(struct pcie_port *pp);
+       void (*msi_set_irq)(struct pcie_port *pp, int irq);
+       void (*msi_clear_irq)(struct pcie_port *pp, int irq);
+       u32 (*get_msi_data)(struct pcie_port *pp);
 };
 
 int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val);
index a53a689..8c6fd8d 100644 (file)
@@ -620,8 +620,7 @@ static void abx500_gpio_dbg_show_one(struct seq_file *s,
        } else
                seq_printf(s, " %-9s", chip->get(chip, offset) ? "hi" : "lo");
 
-       if (pctldev)
-               mode = abx500_get_mode(pctldev, chip, offset);
+       mode = abx500_get_mode(pctldev, chip, offset);
 
        seq_printf(s, " %s", (mode < 0) ? "unknown" : modes[mode]);
 
index af1ba4f..60464a2 100644 (file)
@@ -497,10 +497,10 @@ static struct at91_pinctrl_mux_ops at91sam9x5_ops = {
 static void at91_pin_dbg(const struct device *dev, const struct at91_pmx_pin *pin)
 {
        if (pin->mux) {
-               dev_dbg(dev, "pio%c%d configured as periph%c with conf = 0x%lu\n",
+               dev_dbg(dev, "pio%c%d configured as periph%c with conf = 0x%lx\n",
                        pin->bank + 'A', pin->pin, pin->mux - 1 + 'A', pin->conf);
        } else {
-               dev_dbg(dev, "pio%c%d configured as gpio with conf = 0x%lu\n",
+               dev_dbg(dev, "pio%c%d configured as gpio with conf = 0x%lx\n",
                        pin->bank + 'A', pin->pin, pin->conf);
        }
 }
index 5e8b2e0..0c372a3 100644 (file)
@@ -438,7 +438,7 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
        int reg, ret, mask;
        unsigned long flags;
        u8 bit;
-       u32 data;
+       u32 data, rmask;
 
        if (iomux_num > 3)
                return -EINVAL;
@@ -478,8 +478,9 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
        spin_lock_irqsave(&bank->slock, flags);
 
        data = (mask << (bit + 16));
+       rmask = data | (data >> 16);
        data |= (mux & mask) << bit;
-       ret = regmap_write(regmap, reg, data);
+       ret = regmap_update_bits(regmap, reg, rmask, data);
 
        spin_unlock_irqrestore(&bank->slock, flags);
 
@@ -634,7 +635,7 @@ static int rk3288_set_drive(struct rockchip_pin_bank *bank, int pin_num,
        struct regmap *regmap;
        unsigned long flags;
        int reg, ret, i;
-       u32 data;
+       u32 data, rmask;
        u8 bit;
 
        rk3288_calc_drv_reg_and_bit(bank, pin_num, &regmap, &reg, &bit);
@@ -657,9 +658,10 @@ static int rk3288_set_drive(struct rockchip_pin_bank *bank, int pin_num,
 
        /* enable the write to the equivalent lower bits */
        data = ((1 << RK3288_DRV_BITS_PER_PIN) - 1) << (bit + 16);
+       rmask = data | (data >> 16);
        data |= (ret << bit);
 
-       ret = regmap_write(regmap, reg, data);
+       ret = regmap_update_bits(regmap, reg, rmask, data);
        spin_unlock_irqrestore(&bank->slock, flags);
 
        return ret;
@@ -722,7 +724,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
        int reg, ret;
        unsigned long flags;
        u8 bit;
-       u32 data;
+       u32 data, rmask;
 
        dev_dbg(info->dev, "setting pull of GPIO%d-%d to %d\n",
                 bank->bank_num, pin_num, pull);
@@ -750,6 +752,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
 
                /* enable the write to the equivalent lower bits */
                data = ((1 << RK3188_PULL_BITS_PER_PIN) - 1) << (bit + 16);
+               rmask = data | (data >> 16);
 
                switch (pull) {
                case PIN_CONFIG_BIAS_DISABLE:
@@ -770,7 +773,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
                        return -EINVAL;
                }
 
-               ret = regmap_write(regmap, reg, data);
+               ret = regmap_update_bits(regmap, reg, rmask, data);
 
                spin_unlock_irqrestore(&bank->slock, flags);
                break;
index a066204..e641b42 100644 (file)
@@ -680,7 +680,7 @@ static struct phy *tegra_xusb_padctl_xlate(struct device *dev,
        if (args->args_count <= 0)
                return ERR_PTR(-EINVAL);
 
-       if (index > ARRAY_SIZE(padctl->phys))
+       if (index >= ARRAY_SIZE(padctl->phys))
                return ERR_PTR(-EINVAL);
 
        return padctl->phys[index];
@@ -930,7 +930,8 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
 
        padctl->provider = devm_of_phy_provider_register(&pdev->dev,
                                                         tegra_xusb_padctl_xlate);
-       if (err < 0) {
+       if (IS_ERR(padctl->provider)) {
+               err = PTR_ERR(padctl->provider);
                dev_err(&pdev->dev, "failed to register PHYs: %d\n", err);
                goto unregister;
        }
index 003bfd8..d7154ed 100644 (file)
@@ -127,14 +127,10 @@ static int exynos_irq_set_type(struct irq_data *irqd, unsigned int type)
        struct irq_chip *chip = irq_data_get_irq_chip(irqd);
        struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
        struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
-       struct samsung_pin_bank_type *bank_type = bank->type;
        struct samsung_pinctrl_drv_data *d = bank->drvdata;
-       unsigned int pin = irqd->hwirq;
-       unsigned int shift = EXYNOS_EINT_CON_LEN * pin;
+       unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq;
        unsigned int con, trig_type;
        unsigned long reg_con = our_chip->eint_con + bank->eint_offset;
-       unsigned long flags;
-       unsigned int mask;
 
        switch (type) {
        case IRQ_TYPE_EDGE_RISING:
@@ -167,8 +163,32 @@ static int exynos_irq_set_type(struct irq_data *irqd, unsigned int type)
        con |= trig_type << shift;
        writel(con, d->virt_base + reg_con);
 
+       return 0;
+}
+
+static int exynos_irq_request_resources(struct irq_data *irqd)
+{
+       struct irq_chip *chip = irq_data_get_irq_chip(irqd);
+       struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
+       struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
+       struct samsung_pin_bank_type *bank_type = bank->type;
+       struct samsung_pinctrl_drv_data *d = bank->drvdata;
+       unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq;
+       unsigned long reg_con = our_chip->eint_con + bank->eint_offset;
+       unsigned long flags;
+       unsigned int mask;
+       unsigned int con;
+       int ret;
+
+       ret = gpio_lock_as_irq(&bank->gpio_chip, irqd->hwirq);
+       if (ret) {
+               dev_err(bank->gpio_chip.dev, "unable to lock pin %s-%lu IRQ\n",
+                       bank->name, irqd->hwirq);
+               return ret;
+       }
+
        reg_con = bank->pctl_offset + bank_type->reg_offset[PINCFG_TYPE_FUNC];
-       shift = pin * bank_type->fld_width[PINCFG_TYPE_FUNC];
+       shift = irqd->hwirq * bank_type->fld_width[PINCFG_TYPE_FUNC];
        mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1;
 
        spin_lock_irqsave(&bank->slock, flags);
@@ -180,9 +200,42 @@ static int exynos_irq_set_type(struct irq_data *irqd, unsigned int type)
 
        spin_unlock_irqrestore(&bank->slock, flags);
 
+       exynos_irq_unmask(irqd);
+
        return 0;
 }
 
+static void exynos_irq_release_resources(struct irq_data *irqd)
+{
+       struct irq_chip *chip = irq_data_get_irq_chip(irqd);
+       struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
+       struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
+       struct samsung_pin_bank_type *bank_type = bank->type;
+       struct samsung_pinctrl_drv_data *d = bank->drvdata;
+       unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq;
+       unsigned long reg_con = our_chip->eint_con + bank->eint_offset;
+       unsigned long flags;
+       unsigned int mask;
+       unsigned int con;
+
+       reg_con = bank->pctl_offset + bank_type->reg_offset[PINCFG_TYPE_FUNC];
+       shift = irqd->hwirq * bank_type->fld_width[PINCFG_TYPE_FUNC];
+       mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1;
+
+       exynos_irq_mask(irqd);
+
+       spin_lock_irqsave(&bank->slock, flags);
+
+       con = readl(d->virt_base + reg_con);
+       con &= ~(mask << shift);
+       con |= FUNC_INPUT << shift;
+       writel(con, d->virt_base + reg_con);
+
+       spin_unlock_irqrestore(&bank->slock, flags);
+
+       gpio_unlock_as_irq(&bank->gpio_chip, irqd->hwirq);
+}
+
 /*
  * irq_chip for gpio interrupts.
  */
@@ -193,6 +246,8 @@ static struct exynos_irq_chip exynos_gpio_irq_chip = {
                .irq_mask = exynos_irq_mask,
                .irq_ack = exynos_irq_ack,
                .irq_set_type = exynos_irq_set_type,
+               .irq_request_resources = exynos_irq_request_resources,
+               .irq_release_resources = exynos_irq_release_resources,
        },
        .eint_con = EXYNOS_GPIO_ECON_OFFSET,
        .eint_mask = EXYNOS_GPIO_EMASK_OFFSET,
@@ -336,6 +391,8 @@ static struct exynos_irq_chip exynos_wkup_irq_chip = {
                .irq_ack = exynos_irq_ack,
                .irq_set_type = exynos_irq_set_type,
                .irq_set_wake = exynos_wkup_irq_set_wake,
+               .irq_request_resources = exynos_irq_request_resources,
+               .irq_release_resources = exynos_irq_release_resources,
        },
        .eint_con = EXYNOS_WKUP_ECON_OFFSET,
        .eint_mask = EXYNOS_WKUP_EMASK_OFFSET,
index 2b88232..5cedc9d 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/gpio.h>
 
 /* pinmux function number for pin as gpio output line */
+#define FUNC_INPUT     0x0
 #define FUNC_OUTPUT    0x1
 
 /**
index 576d41b..c6e5deb 100644 (file)
@@ -4509,24 +4509,24 @@ static const char * const audio_clk_groups[] = {
 };
 
 static const char * const can0_groups[] = {
-       "can0_data_a",
+       "can0_data",
        "can0_data_b",
        "can0_data_c",
        "can0_data_d",
        "can0_data_e",
        "can0_data_f",
-       "can_clk_a",
+       "can_clk",
        "can_clk_b",
        "can_clk_c",
        "can_clk_d",
 };
 
 static const char * const can1_groups[] = {
-       "can1_data_a",
+       "can1_data",
        "can1_data_b",
        "can1_data_c",
        "can1_data_d",
-       "can_clk_a",
+       "can_clk",
        "can_clk_b",
        "can_clk_c",
        "can_clk_d",
index e4da61b..b062d3d 100644 (file)
@@ -1258,7 +1258,7 @@ static ssize_t toshiba_kbd_bl_mode_store(struct device *dev,
        int mode = -1;
        int time = -1;
 
-       if (sscanf(buf, "%i", &mode) != 1  || (mode != 2 || mode != 1))
+       if (sscanf(buf, "%i", &mode) != 1 && (mode != 2 || mode != 1))
                return -EINVAL;
 
        /* Set the Keyboard Backlight Mode where:
index 4b66bf0..d2c3592 100644 (file)
@@ -606,6 +606,8 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
        unsigned int best = 0;
        struct pwm_lookup *p;
        unsigned int match;
+       unsigned int period;
+       enum pwm_polarity polarity;
 
        /* look up via DT first */
        if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node)
@@ -653,6 +655,8 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
                if (match > best) {
                        chip = pwmchip_find_by_name(p->provider);
                        index = p->index;
+                       period = p->period;
+                       polarity = p->polarity;
 
                        if (match != 3)
                                best = match;
@@ -668,8 +672,8 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
        if (IS_ERR(pwm))
                return pwm;
 
-       pwm_set_period(pwm, p->period);
-       pwm_set_polarity(pwm, p->polarity);
+       pwm_set_period(pwm, period);
+       pwm_set_polarity(pwm, polarity);
 
 
        return pwm;
index a6d47e5..c43aca6 100644 (file)
@@ -1035,12 +1035,26 @@ static int tty3215_write(struct tty_struct * tty,
                         const unsigned char *buf, int count)
 {
        struct raw3215_info *raw;
+       int i, written;
 
        if (!tty)
                return 0;
        raw = (struct raw3215_info *) tty->driver_data;
-       raw3215_write(raw, buf, count);
-       return count;
+       written = count;
+       while (count > 0) {
+               for (i = 0; i < count; i++)
+                       if (buf[i] == '\t' || buf[i] == '\n')
+                               break;
+               raw3215_write(raw, buf, i);
+               count -= i;
+               buf += i;
+               if (count > 0) {
+                       raw3215_putchar(raw, *buf);
+                       count--;
+                       buf++;
+               }
+       }
+       return written;
 }
 
 /*
@@ -1188,7 +1202,7 @@ static int __init tty3215_init(void)
        driver->subtype = SYSTEM_TYPE_TTY;
        driver->init_termios = tty_std_termios;
        driver->init_termios.c_iflag = IGNBRK | IGNPAR;
-       driver->init_termios.c_oflag = ONLCR | XTABS;
+       driver->init_termios.c_oflag = ONLCR;
        driver->init_termios.c_lflag = ISIG;
        driver->flags = TTY_DRIVER_REAL_RAW;
        tty_set_operations(driver, &tty3215_ops);
index 7ed7a59..0036632 100644 (file)
@@ -559,7 +559,7 @@ sclp_tty_init(void)
        driver->subtype = SYSTEM_TYPE_TTY;
        driver->init_termios = tty_std_termios;
        driver->init_termios.c_iflag = IGNBRK | IGNPAR;
-       driver->init_termios.c_oflag = ONLCR | XTABS;
+       driver->init_termios.c_oflag = ONLCR;
        driver->init_termios.c_lflag = ISIG | ECHO;
        driver->flags = TTY_DRIVER_REAL_RAW;
        tty_set_operations(driver, &sclp_ops);
index df33060..d81f3cc 100644 (file)
@@ -377,6 +377,10 @@ scsi_alloc_host_cmd_pool(struct Scsi_Host *shost)
                pool->slab_flags |= SLAB_CACHE_DMA;
                pool->gfp_mask = __GFP_DMA;
        }
+
+       if (hostt->cmd_size)
+               hostt->cmd_pool = pool;
+
        return pool;
 }
 
@@ -421,8 +425,10 @@ out:
 out_free_slab:
        kmem_cache_destroy(pool->cmd_slab);
 out_free_pool:
-       if (hostt->cmd_size)
+       if (hostt->cmd_size) {
                scsi_free_host_cmd_pool(pool);
+               hostt->cmd_pool = NULL;
+       }
        goto out;
 }
 
@@ -444,8 +450,10 @@ static void scsi_put_host_cmd_pool(struct Scsi_Host *shost)
        if (!--pool->users) {
                kmem_cache_destroy(pool->cmd_slab);
                kmem_cache_destroy(pool->sense_slab);
-               if (hostt->cmd_size)
+               if (hostt->cmd_size) {
                        scsi_free_host_cmd_pool(pool);
+                       hostt->cmd_pool = NULL;
+               }
        }
        mutex_unlock(&host_cmd_pool_mutex);
 }
index 9c44392..d837dc1 100644 (file)
@@ -1774,7 +1774,7 @@ static void scsi_request_fn(struct request_queue *q)
        blk_requeue_request(q, req);
        atomic_dec(&sdev->device_busy);
 out_delay:
-       if (atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
+       if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
                blk_delay_queue(q, SCSI_QUEUE_DELAY);
 }
 
@@ -1808,7 +1808,6 @@ static int scsi_mq_prep_fn(struct request *req)
 
        cmd->tag = req->tag;
 
-       req->cmd = req->__cmd;
        cmd->cmnd = req->cmd;
        cmd->prot_op = SCSI_PROT_NORMAL;
 
index 788ed9b..114203f 100644 (file)
@@ -1,8 +1,7 @@
 #
 # Makefile for the SuperH specific drivers.
 #
-obj-$(CONFIG_SUPERH)                   += intc/
-obj-$(CONFIG_ARCH_SHMOBILE_LEGACY)     += intc/
+obj-$(CONFIG_SH_INTC)                  += intc/
 ifneq ($(CONFIG_COMMON_CLK),y)
 obj-$(CONFIG_HAVE_CLK)                 += clk/
 endif
index 60228fa..6a1b05d 100644 (file)
@@ -1,7 +1,9 @@
 config SH_INTC
-       def_bool y
+       bool
        select IRQ_DOMAIN
 
+if SH_INTC
+
 comment "Interrupt controller options"
 
 config INTC_USERIMASK
@@ -37,3 +39,5 @@ config INTC_MAPPING_DEBUG
          between system IRQs and the per-controller id tables.
 
          If in doubt, say N.
+
+endif
index 9b47e66..0bf0d24 100644 (file)
@@ -790,7 +790,7 @@ static int __init create_log(char *log_name, int size)
        if (unlikely(ret)) {
                pr_err("failed to register misc device for log '%s'!\n",
                                log->misc.name);
-               goto out_free_log;
+               goto out_free_misc_name;
        }
 
        pr_info("created %luK log '%s'\n",
@@ -798,6 +798,9 @@ static int __init create_log(char *log_name, int size)
 
        return 0;
 
+out_free_misc_name:
+       kfree(log->misc.name);
+
 out_free_log:
        kfree(log);
 
index 8bf1eb4..831b7c6 100644 (file)
@@ -1421,22 +1421,16 @@ static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
  * @reg: the register to read
  * @value: 16-bit value to write
  */
-static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
+static int et131x_mii_write(struct et131x_adapter *adapter, u8 addr, u8 reg,
+                           u16 value)
 {
        struct mac_regs __iomem *mac = &adapter->regs->mac;
-       struct phy_device *phydev = adapter->phydev;
        int status = 0;
-       u8 addr;
        u32 delay = 0;
        u32 mii_addr;
        u32 mii_cmd;
        u32 mii_indicator;
 
-       if (!phydev)
-               return -EIO;
-
-       addr = phydev->addr;
-
        /* Save a local copy of the registers we are dealing with so we can
         * set them back
         */
@@ -1631,17 +1625,7 @@ static int et131x_mdio_write(struct mii_bus *bus, int phy_addr,
        struct net_device *netdev = bus->priv;
        struct et131x_adapter *adapter = netdev_priv(netdev);
 
-       return et131x_mii_write(adapter, reg, value);
-}
-
-static int et131x_mdio_reset(struct mii_bus *bus)
-{
-       struct net_device *netdev = bus->priv;
-       struct et131x_adapter *adapter = netdev_priv(netdev);
-
-       et131x_mii_write(adapter, MII_BMCR, BMCR_RESET);
-
-       return 0;
+       return et131x_mii_write(adapter, phy_addr, reg, value);
 }
 
 /*     et1310_phy_power_switch -       PHY power control
@@ -1656,18 +1640,20 @@ static int et131x_mdio_reset(struct mii_bus *bus)
 static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down)
 {
        u16 data;
+       struct  phy_device *phydev = adapter->phydev;
 
        et131x_mii_read(adapter, MII_BMCR, &data);
        data &= ~BMCR_PDOWN;
        if (down)
                data |= BMCR_PDOWN;
-       et131x_mii_write(adapter, MII_BMCR, data);
+       et131x_mii_write(adapter, phydev->addr, MII_BMCR, data);
 }
 
 /* et131x_xcvr_init - Init the phy if we are setting it into force mode */
 static void et131x_xcvr_init(struct et131x_adapter *adapter)
 {
        u16 lcr2;
+       struct  phy_device *phydev = adapter->phydev;
 
        /* Set the LED behavior such that LED 1 indicates speed (off =
         * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
@@ -1688,7 +1674,7 @@ static void et131x_xcvr_init(struct et131x_adapter *adapter)
                else
                        lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
 
-               et131x_mii_write(adapter, PHY_LED_2, lcr2);
+               et131x_mii_write(adapter, phydev->addr, PHY_LED_2, lcr2);
        }
 }
 
@@ -3643,14 +3629,14 @@ static void et131x_adjust_link(struct net_device *netdev)
 
                        et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
                                         &register18);
-                       et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
-                                        register18 | 0x4);
-                       et131x_mii_write(adapter, PHY_INDEX_REG,
+                       et131x_mii_write(adapter, phydev->addr,
+                                        PHY_MPHY_CONTROL_REG, register18 | 0x4);
+                       et131x_mii_write(adapter, phydev->addr, PHY_INDEX_REG,
                                         register18 | 0x8402);
-                       et131x_mii_write(adapter, PHY_DATA_REG,
+                       et131x_mii_write(adapter, phydev->addr, PHY_DATA_REG,
                                         register18 | 511);
-                       et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
-                                        register18);
+                       et131x_mii_write(adapter, phydev->addr,
+                                        PHY_MPHY_CONTROL_REG, register18);
                }
 
                et1310_config_flow_control(adapter);
@@ -3662,7 +3648,8 @@ static void et131x_adjust_link(struct net_device *netdev)
                        et131x_mii_read(adapter, PHY_CONFIG, &reg);
                        reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
                        reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
-                       et131x_mii_write(adapter, PHY_CONFIG, reg);
+                       et131x_mii_write(adapter, phydev->addr, PHY_CONFIG,
+                                        reg);
                }
 
                et131x_set_rx_dma_timer(adapter);
@@ -3675,14 +3662,14 @@ static void et131x_adjust_link(struct net_device *netdev)
 
                        et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
                                         &register18);
-                       et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
-                                        register18 | 0x4);
-                       et131x_mii_write(adapter, PHY_INDEX_REG,
-                                        register18 | 0x8402);
-                       et131x_mii_write(adapter, PHY_DATA_REG,
-                                        register18 | 511);
-                       et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
-                                        register18);
+                       et131x_mii_write(adapter, phydev->addr,
+                                       PHY_MPHY_CONTROL_REG, register18 | 0x4);
+                       et131x_mii_write(adapter, phydev->addr,
+                                       PHY_INDEX_REG, register18 | 0x8402);
+                       et131x_mii_write(adapter, phydev->addr,
+                                       PHY_DATA_REG, register18 | 511);
+                       et131x_mii_write(adapter, phydev->addr,
+                                       PHY_MPHY_CONTROL_REG, register18);
                }
 
                /* Free the packets being actively sent & stopped */
@@ -4644,10 +4631,6 @@ static int et131x_pci_setup(struct pci_dev *pdev,
        /* Copy address into the net_device struct */
        memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
 
-       /* Init variable for counting how long we do not have link status */
-       adapter->boot_coma = 0;
-       et1310_disable_phy_coma(adapter);
-
        rc = -ENOMEM;
 
        /* Setup the mii_bus struct */
@@ -4663,7 +4646,6 @@ static int et131x_pci_setup(struct pci_dev *pdev,
        adapter->mii_bus->priv = netdev;
        adapter->mii_bus->read = et131x_mdio_read;
        adapter->mii_bus->write = et131x_mdio_write;
-       adapter->mii_bus->reset = et131x_mdio_reset;
        adapter->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int),
                                              GFP_KERNEL);
        if (!adapter->mii_bus->irq)
@@ -4687,6 +4669,10 @@ static int et131x_pci_setup(struct pci_dev *pdev,
        /* Setup et1310 as per the documentation */
        et131x_adapter_setup(adapter);
 
+       /* Init variable for counting how long we do not have link status */
+       adapter->boot_coma = 0;
+       et1310_disable_phy_coma(adapter);
+
        /* We can enable interrupts now
         *
         *  NOTE - Because registration of interrupt handler is done in the
index 6562957..03ab9e0 100644 (file)
@@ -365,6 +365,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
                return -ENOMEM;
 
        strncpy(sched->ws_name, name, CFS_WS_NAME_LEN);
+       sched->ws_name[CFS_WS_NAME_LEN - 1] = '\0';
        sched->ws_cptab = cptab;
        sched->ws_cpt = cpt;
 
index 8b19f3c..701c6a7 100644 (file)
@@ -35,7 +35,7 @@
  */
 
 #define DEBUG_SUBSYSTEM S_CLASS
-# include <asm/atomic.h>
+# include <linux/atomic.h>
 
 #include "../include/obd_support.h"
 #include "../include/obd_class.h"
index b8676ac..407a318 100644 (file)
@@ -43,9 +43,11 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
        {USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
        /*=== Customer ID ===*/
        /****** 8188EUS ********/
+       {USB_DEVICE(0x056e, 0x4008)}, /* Elecom WDC-150SU2M */
        {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
        {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
        {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
+       {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
        {}      /* Terminating entry */
 };
 
index 8fcf8a7..9562cd0 100644 (file)
@@ -150,7 +150,26 @@ int tb_path_activate(struct tb_path *path)
 
        /* Activate hops. */
        for (i = path->path_length - 1; i >= 0; i--) {
-               struct tb_regs_hop hop;
+               struct tb_regs_hop hop = { 0 };
+
+               /*
+                * We do (currently) not tear down paths setup by the firmeware.
+                * If a firmware device is unplugged and plugged in again then
+                * it can happen that we reuse some of the hops from the (now
+                * defunct) firmeware path. This causes the hotplug operation to
+                * fail (the pci device does not show up). Clearing the hop
+                * before overwriting it fixes the problem.
+                *
+                * Should be removed once we discover and tear down firmeware
+                * paths.
+                */
+               res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS,
+                                   2 * path->hops[i].in_hop_index, 2);
+               if (res) {
+                       __tb_path_deactivate_hops(path, i);
+                       __tb_path_deallocate_nfc(path, 0);
+                       goto err;
+               }
 
                /* dword 0 */
                hop.next_hop = path->hops[i].next_hop_index;
index d7a3d13..b85983e 100644 (file)
@@ -173,6 +173,7 @@ static int pwm_backlight_parse_dt(struct device *dev,
                data->max_brightness--;
        }
 
+       data->enable_gpio = -EINVAL;
        return 0;
 }
 
index beadd3e..a7b6217 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/list.h>
 #include <linux/amba/bus.h>
 #include <linux/amba/clcd.h>
+#include <linux/bitops.h>
 #include <linux/clk.h>
 #include <linux/hardirq.h>
 #include <linux/dma-mapping.h>
@@ -650,6 +651,7 @@ static int clcdfb_of_init_display(struct clcd_fb *fb)
 {
        struct device_node *endpoint;
        int err;
+       unsigned int bpp;
        u32 max_bandwidth;
        u32 tft_r0b0g0[3];
 
@@ -667,11 +669,22 @@ static int clcdfb_of_init_display(struct clcd_fb *fb)
 
        err = of_property_read_u32(fb->dev->dev.of_node, "max-memory-bandwidth",
                        &max_bandwidth);
-       if (!err)
-               fb->panel->bpp = 8 * max_bandwidth / (fb->panel->mode.xres *
-                               fb->panel->mode.yres * fb->panel->mode.refresh);
-       else
-               fb->panel->bpp = 32;
+       if (!err) {
+               /*
+                * max_bandwidth is in bytes per second and pixclock in
+                * pico-seconds, so the maximum allowed bits per pixel is
+                *   8 * max_bandwidth / (PICOS2KHZ(pixclock) * 1000)
+                * Rearrange this calculation to avoid overflow and then ensure
+                * result is a valid format.
+                */
+               bpp = max_bandwidth / (1000 / 8)
+                       / PICOS2KHZ(fb->panel->mode.pixclock);
+               bpp = rounddown_pow_of_two(bpp);
+               if (bpp > 32)
+                       bpp = 32;
+       } else
+               bpp = 32;
+       fb->panel->bpp = bpp;
 
 #ifdef CONFIG_CPU_BIG_ENDIAN
        fb->panel->cntl |= CNTL_BEBO;
index 92640d4..1d8bdb9 100644 (file)
@@ -1102,12 +1102,14 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
        timings = of_get_display_timings(display_np);
        if (!timings) {
                dev_err(dev, "failed to get display timings\n");
+               ret = -EINVAL;
                goto put_display_node;
        }
 
        timings_np = of_find_node_by_name(display_np, "display-timings");
        if (!timings_np) {
                dev_err(dev, "failed to find display-timings node\n");
+               ret = -ENODEV;
                goto put_display_node;
        }
 
index 206a66b..59abdc6 100644 (file)
@@ -273,7 +273,7 @@ static struct chips_init_reg chips_init_xr[] = {
        { 0xa8, 0x00 }
 };
 
-static void __init chips_hw_init(void)
+static void chips_hw_init(void)
 {
        int i;
 
index 788f6b3..10c876c 100644 (file)
@@ -419,7 +419,7 @@ static void lcd_cfg_horizontal_sync(int back_porch, int pulse_width,
 {
        u32 reg;
 
-       reg = lcdc_read(LCD_RASTER_TIMING_0_REG) & 0xf;
+       reg = lcdc_read(LCD_RASTER_TIMING_0_REG) & 0x3ff;
        reg |= (((back_porch-1) & 0xff) << 24)
            | (((front_porch-1) & 0xff) << 16)
            | (((pulse_width-1) & 0x3f) << 10);
index 987edf1..5c098d5 100644 (file)
@@ -236,6 +236,7 @@ timingfail:
        if (native_mode)
                of_node_put(native_mode);
        display_timings_release(disp);
+       disp = NULL;
 entryfail:
        kfree(disp);
 dispfail:
index ae63587..97bc62c 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -141,6 +141,7 @@ struct kioctx {
 
        struct {
                unsigned        tail;
+               unsigned        completed_events;
                spinlock_t      completion_lock;
        } ____cacheline_aligned_in_smp;
 
@@ -857,6 +858,68 @@ out:
        return ret;
 }
 
+/* refill_reqs_available
+ *     Updates the reqs_available reference counts used for tracking the
+ *     number of free slots in the completion ring.  This can be called
+ *     from aio_complete() (to optimistically update reqs_available) or
+ *     from aio_get_req() (the we're out of events case).  It must be
+ *     called holding ctx->completion_lock.
+ */
+static void refill_reqs_available(struct kioctx *ctx, unsigned head,
+                                  unsigned tail)
+{
+       unsigned events_in_ring, completed;
+
+       /* Clamp head since userland can write to it. */
+       head %= ctx->nr_events;
+       if (head <= tail)
+               events_in_ring = tail - head;
+       else
+               events_in_ring = ctx->nr_events - (head - tail);
+
+       completed = ctx->completed_events;
+       if (events_in_ring < completed)
+               completed -= events_in_ring;
+       else
+               completed = 0;
+
+       if (!completed)
+               return;
+
+       ctx->completed_events -= completed;
+       put_reqs_available(ctx, completed);
+}
+
+/* user_refill_reqs_available
+ *     Called to refill reqs_available when aio_get_req() encounters an
+ *     out of space in the completion ring.
+ */
+static void user_refill_reqs_available(struct kioctx *ctx)
+{
+       spin_lock_irq(&ctx->completion_lock);
+       if (ctx->completed_events) {
+               struct aio_ring *ring;
+               unsigned head;
+
+               /* Access of ring->head may race with aio_read_events_ring()
+                * here, but that's okay since whether we read the old version
+                * or the new version, and either will be valid.  The important
+                * part is that head cannot pass tail since we prevent
+                * aio_complete() from updating tail by holding
+                * ctx->completion_lock.  Even if head is invalid, the check
+                * against ctx->completed_events below will make sure we do the
+                * safe/right thing.
+                */
+               ring = kmap_atomic(ctx->ring_pages[0]);
+               head = ring->head;
+               kunmap_atomic(ring);
+
+               refill_reqs_available(ctx, head, ctx->tail);
+       }
+
+       spin_unlock_irq(&ctx->completion_lock);
+}
+
 /* aio_get_req
  *     Allocate a slot for an aio request.
  * Returns NULL if no requests are free.
@@ -865,8 +928,11 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx)
 {
        struct kiocb *req;
 
-       if (!get_reqs_available(ctx))
-               return NULL;
+       if (!get_reqs_available(ctx)) {
+               user_refill_reqs_available(ctx);
+               if (!get_reqs_available(ctx))
+                       return NULL;
+       }
 
        req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
        if (unlikely(!req))
@@ -925,8 +991,8 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
        struct kioctx   *ctx = iocb->ki_ctx;
        struct aio_ring *ring;
        struct io_event *ev_page, *event;
+       unsigned tail, pos, head;
        unsigned long   flags;
-       unsigned tail, pos;
 
        /*
         * Special case handling for sync iocbs:
@@ -987,10 +1053,14 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
        ctx->tail = tail;
 
        ring = kmap_atomic(ctx->ring_pages[0]);
+       head = ring->head;
        ring->tail = tail;
        kunmap_atomic(ring);
        flush_dcache_page(ctx->ring_pages[0]);
 
+       ctx->completed_events++;
+       if (ctx->completed_events > 1)
+               refill_reqs_available(ctx, head, tail);
        spin_unlock_irqrestore(&ctx->completion_lock, flags);
 
        pr_debug("added to ring %p at [%u]\n", iocb, tail);
@@ -1005,7 +1075,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
 
        /* everything turned out well, dispose of the aiocb. */
        kiocb_free(iocb);
-       put_reqs_available(ctx, 1);
 
        /*
         * We have to order our ring_info tail store above and test
index 5a201d8..fbd76de 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/freezer.h>
-#include <linux/workqueue.h>
 #include "async-thread.h"
 #include "ctree.h"
 
@@ -55,8 +54,39 @@ struct btrfs_workqueue {
        struct __btrfs_workqueue *high;
 };
 
-static inline struct __btrfs_workqueue
-*__btrfs_alloc_workqueue(const char *name, int flags, int max_active,
+static void normal_work_helper(struct btrfs_work *work);
+
+#define BTRFS_WORK_HELPER(name)                                        \
+void btrfs_##name(struct work_struct *arg)                             \
+{                                                                      \
+       struct btrfs_work *work = container_of(arg, struct btrfs_work,  \
+                                              normal_work);            \
+       normal_work_helper(work);                                       \
+}
+
+BTRFS_WORK_HELPER(worker_helper);
+BTRFS_WORK_HELPER(delalloc_helper);
+BTRFS_WORK_HELPER(flush_delalloc_helper);
+BTRFS_WORK_HELPER(cache_helper);
+BTRFS_WORK_HELPER(submit_helper);
+BTRFS_WORK_HELPER(fixup_helper);
+BTRFS_WORK_HELPER(endio_helper);
+BTRFS_WORK_HELPER(endio_meta_helper);
+BTRFS_WORK_HELPER(endio_meta_write_helper);
+BTRFS_WORK_HELPER(endio_raid56_helper);
+BTRFS_WORK_HELPER(rmw_helper);
+BTRFS_WORK_HELPER(endio_write_helper);
+BTRFS_WORK_HELPER(freespace_write_helper);
+BTRFS_WORK_HELPER(delayed_meta_helper);
+BTRFS_WORK_HELPER(readahead_helper);
+BTRFS_WORK_HELPER(qgroup_rescan_helper);
+BTRFS_WORK_HELPER(extent_refs_helper);
+BTRFS_WORK_HELPER(scrub_helper);
+BTRFS_WORK_HELPER(scrubwrc_helper);
+BTRFS_WORK_HELPER(scrubnc_helper);
+
+static struct __btrfs_workqueue *
+__btrfs_alloc_workqueue(const char *name, int flags, int max_active,
                         int thresh)
 {
        struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
@@ -232,13 +262,11 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
        spin_unlock_irqrestore(lock, flags);
 }
 
-static void normal_work_helper(struct work_struct *arg)
+static void normal_work_helper(struct btrfs_work *work)
 {
-       struct btrfs_work *work;
        struct __btrfs_workqueue *wq;
        int need_order = 0;
 
-       work = container_of(arg, struct btrfs_work, normal_work);
        /*
         * We should not touch things inside work in the following cases:
         * 1) after work->func() if it has no ordered_free
@@ -262,7 +290,7 @@ static void normal_work_helper(struct work_struct *arg)
                trace_btrfs_all_work_done(work);
 }
 
-void btrfs_init_work(struct btrfs_work *work,
+void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
                     btrfs_func_t func,
                     btrfs_func_t ordered_func,
                     btrfs_func_t ordered_free)
@@ -270,7 +298,7 @@ void btrfs_init_work(struct btrfs_work *work,
        work->func = func;
        work->ordered_func = ordered_func;
        work->ordered_free = ordered_free;
-       INIT_WORK(&work->normal_work, normal_work_helper);
+       INIT_WORK(&work->normal_work, uniq_func);
        INIT_LIST_HEAD(&work->ordered_list);
        work->flags = 0;
 }
index 9c6b66d..e9e31c9 100644 (file)
 
 #ifndef __BTRFS_ASYNC_THREAD_
 #define __BTRFS_ASYNC_THREAD_
+#include <linux/workqueue.h>
 
 struct btrfs_workqueue;
 /* Internal use only */
 struct __btrfs_workqueue;
 struct btrfs_work;
 typedef void (*btrfs_func_t)(struct btrfs_work *arg);
+typedef void (*btrfs_work_func_t)(struct work_struct *arg);
 
 struct btrfs_work {
        btrfs_func_t func;
@@ -38,11 +40,35 @@ struct btrfs_work {
        unsigned long flags;
 };
 
+#define BTRFS_WORK_HELPER_PROTO(name)                                  \
+void btrfs_##name(struct work_struct *arg)
+
+BTRFS_WORK_HELPER_PROTO(worker_helper);
+BTRFS_WORK_HELPER_PROTO(delalloc_helper);
+BTRFS_WORK_HELPER_PROTO(flush_delalloc_helper);
+BTRFS_WORK_HELPER_PROTO(cache_helper);
+BTRFS_WORK_HELPER_PROTO(submit_helper);
+BTRFS_WORK_HELPER_PROTO(fixup_helper);
+BTRFS_WORK_HELPER_PROTO(endio_helper);
+BTRFS_WORK_HELPER_PROTO(endio_meta_helper);
+BTRFS_WORK_HELPER_PROTO(endio_meta_write_helper);
+BTRFS_WORK_HELPER_PROTO(endio_raid56_helper);
+BTRFS_WORK_HELPER_PROTO(rmw_helper);
+BTRFS_WORK_HELPER_PROTO(endio_write_helper);
+BTRFS_WORK_HELPER_PROTO(freespace_write_helper);
+BTRFS_WORK_HELPER_PROTO(delayed_meta_helper);
+BTRFS_WORK_HELPER_PROTO(readahead_helper);
+BTRFS_WORK_HELPER_PROTO(qgroup_rescan_helper);
+BTRFS_WORK_HELPER_PROTO(extent_refs_helper);
+BTRFS_WORK_HELPER_PROTO(scrub_helper);
+BTRFS_WORK_HELPER_PROTO(scrubwrc_helper);
+BTRFS_WORK_HELPER_PROTO(scrubnc_helper);
+
 struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
                                              int flags,
                                              int max_active,
                                              int thresh);
-void btrfs_init_work(struct btrfs_work *work,
+void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper,
                     btrfs_func_t func,
                     btrfs_func_t ordered_func,
                     btrfs_func_t ordered_free);
index da775bf..a2e90f8 100644 (file)
@@ -1395,8 +1395,8 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
                return -ENOMEM;
 
        async_work->delayed_root = delayed_root;
-       btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root,
-                       NULL, NULL);
+       btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
+                       btrfs_async_run_delayed_root, NULL, NULL);
        async_work->nr = nr;
 
        btrfs_queue_work(root->fs_info->delayed_workers, &async_work->work);
index d0ed9e6..a1d36e6 100644 (file)
@@ -39,7 +39,6 @@
 #include "btrfs_inode.h"
 #include "volumes.h"
 #include "print-tree.h"
-#include "async-thread.h"
 #include "locking.h"
 #include "tree-log.h"
 #include "free-space-cache.h"
@@ -693,35 +692,41 @@ static void end_workqueue_bio(struct bio *bio, int err)
 {
        struct end_io_wq *end_io_wq = bio->bi_private;
        struct btrfs_fs_info *fs_info;
+       struct btrfs_workqueue *wq;
+       btrfs_work_func_t func;
 
        fs_info = end_io_wq->info;
        end_io_wq->error = err;
-       btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL);
 
        if (bio->bi_rw & REQ_WRITE) {
-               if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
-                       btrfs_queue_work(fs_info->endio_meta_write_workers,
-                                        &end_io_wq->work);
-               else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
-                       btrfs_queue_work(fs_info->endio_freespace_worker,
-                                        &end_io_wq->work);
-               else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
-                       btrfs_queue_work(fs_info->endio_raid56_workers,
-                                        &end_io_wq->work);
-               else
-                       btrfs_queue_work(fs_info->endio_write_workers,
-                                        &end_io_wq->work);
+               if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
+                       wq = fs_info->endio_meta_write_workers;
+                       func = btrfs_endio_meta_write_helper;
+               } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
+                       wq = fs_info->endio_freespace_worker;
+                       func = btrfs_freespace_write_helper;
+               } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
+                       wq = fs_info->endio_raid56_workers;
+                       func = btrfs_endio_raid56_helper;
+               } else {
+                       wq = fs_info->endio_write_workers;
+                       func = btrfs_endio_write_helper;
+               }
        } else {
-               if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
-                       btrfs_queue_work(fs_info->endio_raid56_workers,
-                                        &end_io_wq->work);
-               else if (end_io_wq->metadata)
-                       btrfs_queue_work(fs_info->endio_meta_workers,
-                                        &end_io_wq->work);
-               else
-                       btrfs_queue_work(fs_info->endio_workers,
-                                        &end_io_wq->work);
+               if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
+                       wq = fs_info->endio_raid56_workers;
+                       func = btrfs_endio_raid56_helper;
+               } else if (end_io_wq->metadata) {
+                       wq = fs_info->endio_meta_workers;
+                       func = btrfs_endio_meta_helper;
+               } else {
+                       wq = fs_info->endio_workers;
+                       func = btrfs_endio_helper;
+               }
        }
+
+       btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
+       btrfs_queue_work(wq, &end_io_wq->work);
 }
 
 /*
@@ -828,7 +833,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
        async->submit_bio_start = submit_bio_start;
        async->submit_bio_done = submit_bio_done;
 
-       btrfs_init_work(&async->work, run_one_async_start,
+       btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
                        run_one_async_done, run_one_async_free);
 
        async->bio_flags = bio_flags;
@@ -3450,7 +3455,8 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors)
                btrfs_set_stack_device_generation(dev_item, 0);
                btrfs_set_stack_device_type(dev_item, dev->type);
                btrfs_set_stack_device_id(dev_item, dev->devid);
-               btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
+               btrfs_set_stack_device_total_bytes(dev_item,
+                                                  dev->disk_total_bytes);
                btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
                btrfs_set_stack_device_io_align(dev_item, dev->io_align);
                btrfs_set_stack_device_io_width(dev_item, dev->io_width);
index 102ed31..3efe1c3 100644 (file)
@@ -552,7 +552,8 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
        caching_ctl->block_group = cache;
        caching_ctl->progress = cache->key.objectid;
        atomic_set(&caching_ctl->count, 1);
-       btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
+       btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
+                       caching_thread, NULL, NULL);
 
        spin_lock(&cache->lock);
        /*
@@ -2749,8 +2750,8 @@ int btrfs_async_run_delayed_refs(struct btrfs_root *root,
                async->sync = 0;
        init_completion(&async->wait);
 
-       btrfs_init_work(&async->work, delayed_ref_async_start,
-                       NULL, NULL);
+       btrfs_init_work(&async->work, btrfs_extent_refs_helper,
+                       delayed_ref_async_start, NULL, NULL);
 
        btrfs_queue_work(root->fs_info->extent_workers, &async->work);
 
@@ -3586,13 +3587,7 @@ static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
  */
 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
 {
-       /*
-        * we add in the count of missing devices because we want
-        * to make sure that any RAID levels on a degraded FS
-        * continue to be honored.
-        */
-       u64 num_devices = root->fs_info->fs_devices->rw_devices +
-               root->fs_info->fs_devices->missing_devices;
+       u64 num_devices = root->fs_info->fs_devices->rw_devices;
        u64 target;
        u64 tmp;
 
@@ -8440,13 +8435,7 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
        if (stripped)
                return extended_to_chunk(stripped);
 
-       /*
-        * we add in the count of missing devices because we want
-        * to make sure that any RAID levels on a degraded FS
-        * continue to be honored.
-        */
-       num_devices = root->fs_info->fs_devices->rw_devices +
-               root->fs_info->fs_devices->missing_devices;
+       num_devices = root->fs_info->fs_devices->rw_devices;
 
        stripped = BTRFS_BLOCK_GROUP_RAID0 |
                BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
index 3e11aab..af0359d 100644 (file)
@@ -2532,6 +2532,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
                                        test_bit(BIO_UPTODATE, &bio->bi_flags);
                                if (err)
                                        uptodate = 0;
+                               offset += len;
                                continue;
                        }
                }
@@ -4207,8 +4208,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                return -ENOMEM;
        path->leave_spinning = 1;
 
-       start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
-       len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
+       start = round_down(start, BTRFS_I(inode)->root->sectorsize);
+       len = round_up(max, BTRFS_I(inode)->root->sectorsize) - start;
 
        /*
         * lookup the last file extent.  We're not using i_size here
index d3afac2..36861b7 100644 (file)
@@ -1840,7 +1840,15 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
 {
        if (filp->private_data)
                btrfs_ioctl_trans_end(filp);
-       filemap_flush(inode->i_mapping);
+       /*
+        * ordered_data_close is set by settattr when we are about to truncate
+        * a file from a non-zero size to a zero size.  This tries to
+        * flush down new bytes that may have been written if the
+        * application were using truncate to replace a file in place.
+        */
+       if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
+                              &BTRFS_I(inode)->runtime_flags))
+                       filemap_flush(inode->i_mapping);
        return 0;
 }
 
@@ -2088,10 +2096,9 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
                goto out;
        }
 
-       if (hole_mergeable(inode, leaf, path->slots[0]+1, offset, end)) {
+       if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
                u64 num_bytes;
 
-               path->slots[0]++;
                key.offset = offset;
                btrfs_set_item_key_safe(root, path, &key);
                fi = btrfs_item_ptr(leaf, path->slots[0],
@@ -2216,7 +2223,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
                goto out_only_mutex;
        }
 
-       lockstart = round_up(offset , BTRFS_I(inode)->root->sectorsize);
+       lockstart = round_up(offset, BTRFS_I(inode)->root->sectorsize);
        lockend = round_down(offset + len,
                             BTRFS_I(inode)->root->sectorsize) - 1;
        same_page = ((offset >> PAGE_CACHE_SHIFT) ==
@@ -2277,7 +2284,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
                                                tail_start + tail_len, 0, 1);
                                if (ret)
                                        goto out_only_mutex;
-                               }
+                       }
                }
        }
 
index 03708ef..9c194bd 100644 (file)
@@ -1096,8 +1096,10 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
                async_cow->end = cur_end;
                INIT_LIST_HEAD(&async_cow->extents);
 
-               btrfs_init_work(&async_cow->work, async_cow_start,
-                               async_cow_submit, async_cow_free);
+               btrfs_init_work(&async_cow->work,
+                               btrfs_delalloc_helper,
+                               async_cow_start, async_cow_submit,
+                               async_cow_free);
 
                nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
                        PAGE_CACHE_SHIFT;
@@ -1881,7 +1883,8 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
 
        SetPageChecked(page);
        page_cache_get(page);
-       btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL);
+       btrfs_init_work(&fixup->work, btrfs_fixup_helper,
+                       btrfs_writepage_fixup_worker, NULL, NULL);
        fixup->page = page;
        btrfs_queue_work(root->fs_info->fixup_workers, &fixup->work);
        return -EBUSY;
@@ -2822,7 +2825,8 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
        struct inode *inode = page->mapping->host;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_ordered_extent *ordered_extent = NULL;
-       struct btrfs_workqueue *workers;
+       struct btrfs_workqueue *wq;
+       btrfs_work_func_t func;
 
        trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
 
@@ -2831,13 +2835,17 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
                                            end - start + 1, uptodate))
                return 0;
 
-       btrfs_init_work(&ordered_extent->work, finish_ordered_fn, NULL, NULL);
+       if (btrfs_is_free_space_inode(inode)) {
+               wq = root->fs_info->endio_freespace_worker;
+               func = btrfs_freespace_write_helper;
+       } else {
+               wq = root->fs_info->endio_write_workers;
+               func = btrfs_endio_write_helper;
+       }
 
-       if (btrfs_is_free_space_inode(inode))
-               workers = root->fs_info->endio_freespace_worker;
-       else
-               workers = root->fs_info->endio_write_workers;
-       btrfs_queue_work(workers, &ordered_extent->work);
+       btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL,
+                       NULL);
+       btrfs_queue_work(wq, &ordered_extent->work);
 
        return 0;
 }
@@ -4674,6 +4682,11 @@ static void evict_inode_truncate_pages(struct inode *inode)
                clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
                remove_extent_mapping(map_tree, em);
                free_extent_map(em);
+               if (need_resched()) {
+                       write_unlock(&map_tree->lock);
+                       cond_resched();
+                       write_lock(&map_tree->lock);
+               }
        }
        write_unlock(&map_tree->lock);
 
@@ -4696,6 +4709,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
                                 &cached_state, GFP_NOFS);
                free_extent_state(state);
 
+               cond_resched();
                spin_lock(&io_tree->lock);
        }
        spin_unlock(&io_tree->lock);
@@ -5181,6 +5195,42 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
                        iput(inode);
                        inode = ERR_PTR(ret);
                }
+               /*
+                * If orphan cleanup did remove any orphans, it means the tree
+                * was modified and therefore the commit root is not the same as
+                * the current root anymore. This is a problem, because send
+                * uses the commit root and therefore can see inode items that
+                * don't exist in the current root anymore, and for example make
+                * calls to btrfs_iget, which will do tree lookups based on the
+                * current root and not on the commit root. Those lookups will
+                * fail, returning a -ESTALE error, and making send fail with
+                * that error. So make sure a send does not see any orphans we
+                * have just removed, and that it will see the same inodes
+                * regardless of whether a transaction commit happened before
+                * it started (meaning that the commit root will be the same as
+                * the current root) or not.
+                */
+               if (sub_root->node != sub_root->commit_root) {
+                       u64 sub_flags = btrfs_root_flags(&sub_root->root_item);
+
+                       if (sub_flags & BTRFS_ROOT_SUBVOL_RDONLY) {
+                               struct extent_buffer *eb;
+
+                               /*
+                                * Assert we can't have races between dentry
+                                * lookup called through the snapshot creation
+                                * ioctl and the VFS.
+                                */
+                               ASSERT(mutex_is_locked(&dir->i_mutex));
+
+                               down_write(&root->fs_info->commit_root_sem);
+                               eb = sub_root->commit_root;
+                               sub_root->commit_root =
+                                       btrfs_root_node(sub_root);
+                               up_write(&root->fs_info->commit_root_sem);
+                               free_extent_buffer(eb);
+                       }
+               }
        }
 
        return inode;
@@ -5605,6 +5655,13 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
                return ERR_PTR(-ENOMEM);
        }
 
+       /*
+        * O_TMPFILE, set link count to 0, so that after this point,
+        * we fill in an inode item with the correct link count.
+        */
+       if (!name)
+               set_nlink(inode, 0);
+
        /*
         * we have to initialize this early, so we can reclaim the inode
         * number if we fail afterwards in this function.
@@ -6097,14 +6154,14 @@ out_fail:
 static int merge_extent_mapping(struct extent_map_tree *em_tree,
                                struct extent_map *existing,
                                struct extent_map *em,
-                               u64 map_start, u64 map_len)
+                               u64 map_start)
 {
        u64 start_diff;
 
        BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
        start_diff = map_start - em->start;
        em->start = map_start;
-       em->len = map_len;
+       em->len = existing->start - em->start;
        if (em->block_start < EXTENT_MAP_LAST_BYTE &&
            !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
                em->block_start += start_diff;
@@ -6275,6 +6332,8 @@ next:
                        goto not_found;
                if (start + len <= found_key.offset)
                        goto not_found;
+               if (start > found_key.offset)
+                       goto next;
                em->start = start;
                em->orig_start = start;
                em->len = found_key.offset - start;
@@ -6390,8 +6449,7 @@ insert:
                                                         em->len);
                        if (existing) {
                                err = merge_extent_mapping(em_tree, existing,
-                                                          em, start,
-                                                          root->sectorsize);
+                                                          em, start);
                                free_extent_map(existing);
                                if (err) {
                                        free_extent_map(em);
@@ -7158,7 +7216,8 @@ again:
        if (!ret)
                goto out_test;
 
-       btrfs_init_work(&ordered->work, finish_ordered_fn, NULL, NULL);
+       btrfs_init_work(&ordered->work, btrfs_endio_write_helper,
+                       finish_ordered_fn, NULL, NULL);
        btrfs_queue_work(root->fs_info->endio_write_workers,
                         &ordered->work);
 out_test:
@@ -7306,10 +7365,8 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
        map_length = orig_bio->bi_iter.bi_size;
        ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
                              &map_length, NULL, 0);
-       if (ret) {
-               bio_put(orig_bio);
+       if (ret)
                return -EIO;
-       }
 
        if (map_length >= orig_bio->bi_iter.bi_size) {
                bio = orig_bio;
@@ -7326,6 +7383,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
        bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
        if (!bio)
                return -ENOMEM;
+
        bio->bi_private = dip;
        bio->bi_end_io = btrfs_end_dio_bio;
        atomic_inc(&dip->pending_bios);
@@ -7534,7 +7592,8 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
        count = iov_iter_count(iter);
        if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
                     &BTRFS_I(inode)->runtime_flags))
-               filemap_fdatawrite_range(inode->i_mapping, offset, count);
+               filemap_fdatawrite_range(inode->i_mapping, offset,
+                                        offset + count - 1);
 
        if (rw & WRITE) {
                /*
@@ -8495,7 +8554,9 @@ struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
        work->inode = inode;
        work->wait = wait;
        work->delay_iput = delay_iput;
-       btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL);
+       WARN_ON_ONCE(!inode);
+       btrfs_init_work(&work->work, btrfs_flush_delalloc_helper,
+                       btrfs_run_delalloc_work, NULL, NULL);
 
        return work;
 }
@@ -8979,6 +9040,14 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
        if (ret)
                goto out;
 
+       /*
+        * We set number of links to 0 in btrfs_new_inode(), and here we set
+        * it to 1 because d_tmpfile() will issue a warning if the count is 0,
+        * through:
+        *
+        *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
+        */
+       set_nlink(inode, 1);
        d_tmpfile(dentry, inode);
        mark_inode_dirty(inode);
 
index 47aceb4..fce6fd0 100644 (file)
@@ -711,39 +711,6 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
        if (ret)
                goto fail;
 
-       ret = btrfs_orphan_cleanup(pending_snapshot->snap);
-       if (ret)
-               goto fail;
-
-       /*
-        * If orphan cleanup did remove any orphans, it means the tree was
-        * modified and therefore the commit root is not the same as the
-        * current root anymore. This is a problem, because send uses the
-        * commit root and therefore can see inode items that don't exist
-        * in the current root anymore, and for example make calls to
-        * btrfs_iget, which will do tree lookups based on the current root
-        * and not on the commit root. Those lookups will fail, returning a
-        * -ESTALE error, and making send fail with that error. So make sure
-        * a send does not see any orphans we have just removed, and that it
-        * will see the same inodes regardless of whether a transaction
-        * commit happened before it started (meaning that the commit root
-        * will be the same as the current root) or not.
-        */
-       if (readonly && pending_snapshot->snap->node !=
-           pending_snapshot->snap->commit_root) {
-               trans = btrfs_join_transaction(pending_snapshot->snap);
-               if (IS_ERR(trans) && PTR_ERR(trans) != -ENOENT) {
-                       ret = PTR_ERR(trans);
-                       goto fail;
-               }
-               if (!IS_ERR(trans)) {
-                       ret = btrfs_commit_transaction(trans,
-                                                      pending_snapshot->snap);
-                       if (ret)
-                               goto fail;
-               }
-       }
-
        inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry);
        if (IS_ERR(inode)) {
                ret = PTR_ERR(inode);
@@ -3527,7 +3494,8 @@ process_slot:
                        btrfs_mark_buffer_dirty(leaf);
                        btrfs_release_path(path);
 
-                       last_dest_end = new_key.offset + datal;
+                       last_dest_end = ALIGN(new_key.offset + datal,
+                                             root->sectorsize);
                        ret = clone_finish_inode_update(trans, inode,
                                                        last_dest_end,
                                                        destoff, olen);
index 963895c..ac734ec 100644 (file)
@@ -615,6 +615,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr)
                spin_unlock(&root->ordered_extent_lock);
 
                btrfs_init_work(&ordered->flush_work,
+                               btrfs_flush_delalloc_helper,
                                btrfs_run_ordered_extent_work, NULL, NULL);
                list_add_tail(&ordered->work_list, &works);
                btrfs_queue_work(root->fs_info->flush_workers,
index b497498..ded5c60 100644 (file)
@@ -1973,7 +1973,7 @@ static int qgroup_subtree_accounting(struct btrfs_trans_handle *trans,
                                   elem.seq, &roots);
        btrfs_put_tree_mod_seq(fs_info, &elem);
        if (ret < 0)
-               return ret;
+               goto out;
 
        if (roots->nnodes != 1)
                goto out;
@@ -2720,6 +2720,7 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
        memset(&fs_info->qgroup_rescan_work, 0,
               sizeof(fs_info->qgroup_rescan_work));
        btrfs_init_work(&fs_info->qgroup_rescan_work,
+                       btrfs_qgroup_rescan_helper,
                        btrfs_qgroup_rescan_worker, NULL, NULL);
 
        if (ret) {
index 4a88f07..0a6b6e4 100644 (file)
@@ -1416,7 +1416,8 @@ cleanup:
 
 static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
 {
-       btrfs_init_work(&rbio->work, rmw_work, NULL, NULL);
+       btrfs_init_work(&rbio->work, btrfs_rmw_helper,
+                       rmw_work, NULL, NULL);
 
        btrfs_queue_work(rbio->fs_info->rmw_workers,
                         &rbio->work);
@@ -1424,7 +1425,8 @@ static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
 
 static void async_read_rebuild(struct btrfs_raid_bio *rbio)
 {
-       btrfs_init_work(&rbio->work, read_rebuild_work, NULL, NULL);
+       btrfs_init_work(&rbio->work, btrfs_rmw_helper,
+                       read_rebuild_work, NULL, NULL);
 
        btrfs_queue_work(rbio->fs_info->rmw_workers,
                         &rbio->work);
@@ -1665,7 +1667,8 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
        plug = container_of(cb, struct btrfs_plug_cb, cb);
 
        if (from_schedule) {
-               btrfs_init_work(&plug->work, unplug_work, NULL, NULL);
+               btrfs_init_work(&plug->work, btrfs_rmw_helper,
+                               unplug_work, NULL, NULL);
                btrfs_queue_work(plug->info->rmw_workers,
                                 &plug->work);
                return;
index 09230cf..20408c6 100644 (file)
@@ -798,7 +798,8 @@ static void reada_start_machine(struct btrfs_fs_info *fs_info)
                /* FIXME we cannot handle this properly right now */
                BUG();
        }
-       btrfs_init_work(&rmw->work, reada_start_machine_worker, NULL, NULL);
+       btrfs_init_work(&rmw->work, btrfs_readahead_helper,
+                       reada_start_machine_worker, NULL, NULL);
        rmw->fs_info = fs_info;
 
        btrfs_queue_work(fs_info->readahead_workers, &rmw->work);
index b6d198f..f4a41f3 100644 (file)
@@ -428,8 +428,8 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
                sbio->index = i;
                sbio->sctx = sctx;
                sbio->page_count = 0;
-               btrfs_init_work(&sbio->work, scrub_bio_end_io_worker,
-                               NULL, NULL);
+               btrfs_init_work(&sbio->work, btrfs_scrub_helper,
+                               scrub_bio_end_io_worker, NULL, NULL);
 
                if (i != SCRUB_BIOS_PER_SCTX - 1)
                        sctx->bios[i]->next_free = i + 1;
@@ -999,8 +999,8 @@ nodatasum_case:
                fixup_nodatasum->root = fs_info->extent_root;
                fixup_nodatasum->mirror_num = failed_mirror_index + 1;
                scrub_pending_trans_workers_inc(sctx);
-               btrfs_init_work(&fixup_nodatasum->work, scrub_fixup_nodatasum,
-                               NULL, NULL);
+               btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
+                               scrub_fixup_nodatasum, NULL, NULL);
                btrfs_queue_work(fs_info->scrub_workers,
                                 &fixup_nodatasum->work);
                goto out;
@@ -1616,7 +1616,8 @@ static void scrub_wr_bio_end_io(struct bio *bio, int err)
        sbio->err = err;
        sbio->bio = bio;
 
-       btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL);
+       btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
+                        scrub_wr_bio_end_io_worker, NULL, NULL);
        btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
 }
 
@@ -2904,6 +2905,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
        struct scrub_ctx *sctx;
        int ret;
        struct btrfs_device *dev;
+       struct rcu_string *name;
 
        if (btrfs_fs_closing(fs_info))
                return -EINVAL;
@@ -2965,6 +2967,16 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
                return -ENODEV;
        }
 
+       if (!is_dev_replace && !readonly && !dev->writeable) {
+               mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+               rcu_read_lock();
+               name = rcu_dereference(dev->name);
+               btrfs_err(fs_info, "scrub: device %s is not writable",
+                         name->str);
+               rcu_read_unlock();
+               return -EROFS;
+       }
+
        mutex_lock(&fs_info->scrub_lock);
        if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
                mutex_unlock(&fs_info->scrub_lock);
@@ -3203,7 +3215,8 @@ static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
        nocow_ctx->len = len;
        nocow_ctx->mirror_num = mirror_num;
        nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
-       btrfs_init_work(&nocow_ctx->work, copy_nocow_pages_worker, NULL, NULL);
+       btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
+                       copy_nocow_pages_worker, NULL, NULL);
        INIT_LIST_HEAD(&nocow_ctx->inodes);
        btrfs_queue_work(fs_info->scrub_nocow_workers,
                         &nocow_ctx->work);
index 7869936..12e5355 100644 (file)
@@ -614,7 +614,7 @@ int btrfs_kobj_rm_device(struct btrfs_fs_info *fs_info,
        if (!fs_info->device_dir_kobj)
                return -EINVAL;
 
-       if (one_device) {
+       if (one_device && one_device->bdev) {
                disk = one_device->bdev->bd_part;
                disk_kobj = &part_to_dev(disk)->kobj;
 
index 9e1f2cd..7e0e6e3 100644 (file)
@@ -3298,7 +3298,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
        struct list_head ordered_sums;
        int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
        bool has_extents = false;
-       bool need_find_last_extent = (*last_extent == 0);
+       bool need_find_last_extent = true;
        bool done = false;
 
        INIT_LIST_HEAD(&ordered_sums);
@@ -3352,8 +3352,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
                 */
                if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) {
                        has_extents = true;
-                       if (need_find_last_extent &&
-                           first_key.objectid == (u64)-1)
+                       if (first_key.objectid == (u64)-1)
                                first_key = ins_keys[i];
                } else {
                        need_find_last_extent = false;
@@ -3427,6 +3426,16 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
        if (!has_extents)
                return ret;
 
+       if (need_find_last_extent && *last_extent == first_key.offset) {
+               /*
+                * We don't have any leafs between our current one and the one
+                * we processed before that can have file extent items for our
+                * inode (and have a generation number smaller than our current
+                * transaction id).
+                */
+               need_find_last_extent = false;
+       }
+
        /*
         * Because we use btrfs_search_forward we could skip leaves that were
         * not modified and then assume *last_extent is valid when it really
@@ -3537,7 +3546,7 @@ fill_holes:
                                               0, 0);
                if (ret)
                        break;
-               *last_extent = offset + len;
+               *last_extent = extent_end;
        }
        /*
         * Need to let the callers know we dropped the path so they should
index 6cb82f6..340a92d 100644 (file)
@@ -508,6 +508,44 @@ static noinline int device_list_add(const char *path,
                ret = 1;
                device->fs_devices = fs_devices;
        } else if (!device->name || strcmp(device->name->str, path)) {
+               /*
+                * When FS is already mounted.
+                * 1. If you are here and if the device->name is NULL that
+                *    means this device was missing at time of FS mount.
+                * 2. If you are here and if the device->name is different
+                *    from 'path' that means either
+                *      a. The same device disappeared and reappeared with
+                *         different name. or
+                *      b. The missing-disk-which-was-replaced, has
+                *         reappeared now.
+                *
+                * We must allow 1 and 2a above. But 2b would be a spurious
+                * and unintentional.
+                *
+                * Further in case of 1 and 2a above, the disk at 'path'
+                * would have missed some transaction when it was away and
+                * in case of 2a the stale bdev has to be updated as well.
+                * 2b must not be allowed at all time.
+                */
+
+               /*
+                * As of now don't allow update to btrfs_fs_device through
+                * the btrfs dev scan cli, after FS has been mounted.
+                */
+               if (fs_devices->opened) {
+                       return -EBUSY;
+               } else {
+                       /*
+                        * That is if the FS is _not_ mounted and if you
+                        * are here, that means there is more than one
+                        * disk with same uuid and devid.We keep the one
+                        * with larger generation number or the last-in if
+                        * generation are equal.
+                        */
+                       if (found_transid < device->generation)
+                               return -EEXIST;
+               }
+
                name = rcu_string_strdup(path, GFP_NOFS);
                if (!name)
                        return -ENOMEM;
@@ -519,6 +557,15 @@ static noinline int device_list_add(const char *path,
                }
        }
 
+       /*
+        * Unmount does not free the btrfs_device struct but would zero
+        * generation along with most of the other members. So just update
+        * it back. We need it to pick the disk with largest generation
+        * (as above).
+        */
+       if (!fs_devices->opened)
+               device->generation = found_transid;
+
        if (found_transid > fs_devices->latest_trans) {
                fs_devices->latest_devid = devid;
                fs_devices->latest_trans = found_transid;
@@ -1436,7 +1483,7 @@ static int btrfs_add_device(struct btrfs_trans_handle *trans,
        btrfs_set_device_io_align(leaf, dev_item, device->io_align);
        btrfs_set_device_io_width(leaf, dev_item, device->io_width);
        btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
-       btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
+       btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
        btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
        btrfs_set_device_group(leaf, dev_item, 0);
        btrfs_set_device_seek_speed(leaf, dev_item, 0);
@@ -1671,7 +1718,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
        device->fs_devices->total_devices--;
 
        if (device->missing)
-               root->fs_info->fs_devices->missing_devices--;
+               device->fs_devices->missing_devices--;
 
        next_device = list_entry(root->fs_info->fs_devices->devices.next,
                                 struct btrfs_device, dev_list);
@@ -1801,8 +1848,12 @@ void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
        if (srcdev->bdev) {
                fs_info->fs_devices->open_devices--;
 
-               /* zero out the old super */
-               btrfs_scratch_superblock(srcdev);
+               /*
+                * zero out the old super if it is not writable
+                * (e.g. seed device)
+                */
+               if (srcdev->writeable)
+                       btrfs_scratch_superblock(srcdev);
        }
 
        call_rcu(&srcdev->rcu, free_device);
@@ -1941,6 +1992,9 @@ static int btrfs_prepare_sprout(struct btrfs_root *root)
        fs_devices->seeding = 0;
        fs_devices->num_devices = 0;
        fs_devices->open_devices = 0;
+       fs_devices->missing_devices = 0;
+       fs_devices->num_can_discard = 0;
+       fs_devices->rotating = 0;
        fs_devices->seed = seed_devices;
 
        generate_random_uuid(fs_devices->fsid);
@@ -5800,7 +5854,8 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
        else
                generate_random_uuid(dev->uuid);
 
-       btrfs_init_work(&dev->work, pending_bios_fn, NULL, NULL);
+       btrfs_init_work(&dev->work, btrfs_submit_helper,
+                       pending_bios_fn, NULL, NULL);
 
        return dev;
 }
index ac4f260..889b984 100644 (file)
@@ -207,6 +207,19 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
        return 0;
 }
 
+static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
+{
+       struct super_block *sb = file->f_path.dentry->d_sb;
+       struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+       struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+       struct TCP_Server_Info *server = tcon->ses->server;
+
+       if (server->ops->fallocate)
+               return server->ops->fallocate(file, tcon, mode, off, len);
+
+       return -EOPNOTSUPP;
+}
+
 static int cifs_permission(struct inode *inode, int mask)
 {
        struct cifs_sb_info *cifs_sb;
@@ -812,8 +825,9 @@ static int cifs_setlease(struct file *file, long arg, struct file_lock **lease)
        if (!(S_ISREG(inode->i_mode)))
                return -EINVAL;
 
-       /* check if file is oplocked */
-       if (((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
+       /* Check if file is oplocked if this is request for new lease */
+       if (arg == F_UNLCK ||
+           ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
            ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
                return generic_setlease(file, arg, lease);
        else if (tlink_tcon(cfile->tlink)->local_lease &&
@@ -908,6 +922,7 @@ const struct file_operations cifs_file_ops = {
        .unlocked_ioctl = cifs_ioctl,
 #endif /* CONFIG_CIFS_POSIX */
        .setlease = cifs_setlease,
+       .fallocate = cifs_fallocate,
 };
 
 const struct file_operations cifs_file_strict_ops = {
@@ -927,6 +942,7 @@ const struct file_operations cifs_file_strict_ops = {
        .unlocked_ioctl = cifs_ioctl,
 #endif /* CONFIG_CIFS_POSIX */
        .setlease = cifs_setlease,
+       .fallocate = cifs_fallocate,
 };
 
 const struct file_operations cifs_file_direct_ops = {
@@ -947,6 +963,7 @@ const struct file_operations cifs_file_direct_ops = {
 #endif /* CONFIG_CIFS_POSIX */
        .llseek = cifs_llseek,
        .setlease = cifs_setlease,
+       .fallocate = cifs_fallocate,
 };
 
 const struct file_operations cifs_file_nobrl_ops = {
@@ -965,6 +982,7 @@ const struct file_operations cifs_file_nobrl_ops = {
        .unlocked_ioctl = cifs_ioctl,
 #endif /* CONFIG_CIFS_POSIX */
        .setlease = cifs_setlease,
+       .fallocate = cifs_fallocate,
 };
 
 const struct file_operations cifs_file_strict_nobrl_ops = {
@@ -983,6 +1001,7 @@ const struct file_operations cifs_file_strict_nobrl_ops = {
        .unlocked_ioctl = cifs_ioctl,
 #endif /* CONFIG_CIFS_POSIX */
        .setlease = cifs_setlease,
+       .fallocate = cifs_fallocate,
 };
 
 const struct file_operations cifs_file_direct_nobrl_ops = {
@@ -1002,6 +1021,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = {
 #endif /* CONFIG_CIFS_POSIX */
        .llseek = cifs_llseek,
        .setlease = cifs_setlease,
+       .fallocate = cifs_fallocate,
 };
 
 const struct file_operations cifs_dir_ops = {
index 0012e1e..dfc731b 100644 (file)
@@ -409,6 +409,10 @@ struct smb_version_operations {
        /* get mtu credits */
        int (*wait_mtu_credits)(struct TCP_Server_Info *, unsigned int,
                                unsigned int *, unsigned int *);
+       /* check if we need to issue closedir */
+       bool (*dir_needs_close)(struct cifsFileInfo *);
+       long (*fallocate)(struct file *, struct cifs_tcon *, int, loff_t,
+                         loff_t);
 };
 
 struct smb_version_values {
@@ -883,6 +887,7 @@ struct cifs_tcon {
                                for this mount even if server would support */
        bool local_lease:1; /* check leases (only) on local system not remote */
        bool broken_posix_open; /* e.g. Samba server versions < 3.3.2, 3.2.9 */
+       bool broken_sparse_sup; /* if server or share does not support sparse */
        bool need_reconnect:1; /* connection reset, tid now invalid */
 #ifdef CONFIG_CIFS_SMB2
        bool print:1;           /* set if connection to printer share */
index 33df36e..5f9822a 100644 (file)
@@ -2253,6 +2253,29 @@ typedef struct {
 /* minimum includes first three fields, and empty FS Name */
 #define MIN_FS_ATTR_INFO_SIZE 12
 
+
+/* List of FileSystemAttributes - see 2.5.1 of MS-FSCC */
+#define FILE_SUPPORT_INTEGRITY_STREAMS 0x04000000
+#define FILE_SUPPORTS_USN_JOURNAL      0x02000000
+#define FILE_SUPPORTS_OPEN_BY_FILE_ID  0x01000000
+#define FILE_SUPPORTS_EXTENDED_ATTRIBUTES 0x00800000
+#define FILE_SUPPORTS_HARD_LINKS       0x00400000
+#define FILE_SUPPORTS_TRANSACTIONS     0x00200000
+#define FILE_SEQUENTIAL_WRITE_ONCE     0x00100000
+#define FILE_READ_ONLY_VOLUME          0x00080000
+#define FILE_NAMED_STREAMS             0x00040000
+#define FILE_SUPPORTS_ENCRYPTION       0x00020000
+#define FILE_SUPPORTS_OBJECT_IDS       0x00010000
+#define FILE_VOLUME_IS_COMPRESSED      0x00008000
+#define FILE_SUPPORTS_REMOTE_STORAGE   0x00000100
+#define FILE_SUPPORTS_REPARSE_POINTS   0x00000080
+#define FILE_SUPPORTS_SPARSE_FILES     0x00000040
+#define FILE_VOLUME_QUOTAS             0x00000020
+#define FILE_FILE_COMPRESSION          0x00000010
+#define FILE_PERSISTENT_ACLS           0x00000008
+#define FILE_UNICODE_ON_DISK           0x00000004
+#define FILE_CASE_PRESERVED_NAMES      0x00000002
+#define FILE_CASE_SENSITIVE_SEARCH     0x00000001
 typedef struct {
        __le32 Attributes;
        __le32 MaxPathNameComponentLength;
index 4ab2f79..d5fec92 100644 (file)
@@ -762,7 +762,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
 
        cifs_dbg(FYI, "Freeing private data in close dir\n");
        spin_lock(&cifs_file_list_lock);
-       if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
+       if (server->ops->dir_needs_close(cfile)) {
                cfile->invalidHandle = true;
                spin_unlock(&cifs_file_list_lock);
                if (server->ops->close_dir)
index 426d6c6..949ec90 100644 (file)
@@ -1727,6 +1727,12 @@ unlink_target:
                                    target_dentry, to_name);
        }
 
+       /* force revalidate to go get info when needed */
+       CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0;
+
+       source_dir->i_ctime = source_dir->i_mtime = target_dir->i_ctime =
+               target_dir->i_mtime = current_fs_time(source_dir->i_sb);
+
 cifs_rename_exit:
        kfree(info_buf_source);
        kfree(from_name);
index 81340c6..b7415d5 100644 (file)
@@ -574,13 +574,6 @@ void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
                cinode->oplock = 0;
 }
 
-static int
-cifs_oplock_break_wait(void *unused)
-{
-       schedule();
-       return signal_pending(current) ? -ERESTARTSYS : 0;
-}
-
 /*
  * We wait for oplock breaks to be processed before we attempt to perform
  * writes.
index b15862e..798c80a 100644 (file)
@@ -593,7 +593,7 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
                /* close and restart search */
                cifs_dbg(FYI, "search backing up - close and restart search\n");
                spin_lock(&cifs_file_list_lock);
-               if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
+               if (server->ops->dir_needs_close(cfile)) {
                        cfile->invalidHandle = true;
                        spin_unlock(&cifs_file_list_lock);
                        if (server->ops->close)
index 5e8c22d..1a6df4b 100644 (file)
@@ -1015,6 +1015,12 @@ cifs_wp_retry_size(struct inode *inode)
        return CIFS_SB(inode->i_sb)->wsize;
 }
 
+static bool
+cifs_dir_needs_close(struct cifsFileInfo *cfile)
+{
+       return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle;
+}
+
 struct smb_version_operations smb1_operations = {
        .send_cancel = send_nt_cancel,
        .compare_fids = cifs_compare_fids,
@@ -1086,6 +1092,7 @@ struct smb_version_operations smb1_operations = {
        .create_mf_symlink = cifs_create_mf_symlink,
        .is_read_op = cifs_is_read_op,
        .wp_retry_size = cifs_wp_retry_size,
+       .dir_needs_close = cifs_dir_needs_close,
 #ifdef CONFIG_CIFS_XATTR
        .query_all_EAs = CIFSSMBQAllEAs,
        .set_EA = CIFSSMBSetEA,
index e31a9df..af59d03 100644 (file)
@@ -214,7 +214,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
        {STATUS_BREAKPOINT, -EIO, "STATUS_BREAKPOINT"},
        {STATUS_SINGLE_STEP, -EIO, "STATUS_SINGLE_STEP"},
        {STATUS_BUFFER_OVERFLOW, -EIO, "STATUS_BUFFER_OVERFLOW"},
-       {STATUS_NO_MORE_FILES, -EIO, "STATUS_NO_MORE_FILES"},
+       {STATUS_NO_MORE_FILES, -ENODATA, "STATUS_NO_MORE_FILES"},
        {STATUS_WAKE_SYSTEM_DEBUGGER, -EIO, "STATUS_WAKE_SYSTEM_DEBUGGER"},
        {STATUS_HANDLES_CLOSED, -EIO, "STATUS_HANDLES_CLOSED"},
        {STATUS_NO_INHERITANCE, -EIO, "STATUS_NO_INHERITANCE"},
@@ -298,7 +298,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
        {STATUS_INVALID_PARAMETER, -EINVAL, "STATUS_INVALID_PARAMETER"},
        {STATUS_NO_SUCH_DEVICE, -ENODEV, "STATUS_NO_SUCH_DEVICE"},
        {STATUS_NO_SUCH_FILE, -ENOENT, "STATUS_NO_SUCH_FILE"},
-       {STATUS_INVALID_DEVICE_REQUEST, -EIO, "STATUS_INVALID_DEVICE_REQUEST"},
+       {STATUS_INVALID_DEVICE_REQUEST, -EOPNOTSUPP, "STATUS_INVALID_DEVICE_REQUEST"},
        {STATUS_END_OF_FILE, -ENODATA, "STATUS_END_OF_FILE"},
        {STATUS_WRONG_VOLUME, -EIO, "STATUS_WRONG_VOLUME"},
        {STATUS_NO_MEDIA_IN_DEVICE, -EIO, "STATUS_NO_MEDIA_IN_DEVICE"},
index f2e6ac2..4aa7a0f 100644 (file)
@@ -178,9 +178,24 @@ smb2_check_message(char *buf, unsigned int length)
                /* Windows 7 server returns 24 bytes more */
                if (clc_len + 20 == len && command == SMB2_OPLOCK_BREAK_HE)
                        return 0;
-               /* server can return one byte more */
+               /* server can return one byte more due to implied bcc[0] */
                if (clc_len == 4 + len + 1)
                        return 0;
+
+               /*
+                * MacOS server pads after SMB2.1 write response with 3 bytes
+                * of junk. Other servers match RFC1001 len to actual
+                * SMB2/SMB3 frame length (header + smb2 response specific data)
+                * Log the server error (once), but allow it and continue
+                * since the frame is parseable.
+                */
+               if (clc_len < 4 /* RFC1001 header size */ + len) {
+                       printk_once(KERN_WARNING
+                               "SMB2 server sent bad RFC1001 len %d not %d\n",
+                               len, clc_len - 4);
+                       return 0;
+               }
+
                return 1;
        }
        return 0;
index 77f8aeb..5a48aa2 100644 (file)
@@ -731,11 +731,72 @@ smb2_sync_write(const unsigned int xid, struct cifsFileInfo *cfile,
        return SMB2_write(xid, parms, written, iov, nr_segs);
 }
 
+/* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */
+static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
+               struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse)
+{
+       struct cifsInodeInfo *cifsi;
+       int rc;
+
+       cifsi = CIFS_I(inode);
+
+       /* if file already sparse don't bother setting sparse again */
+       if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse)
+               return true; /* already sparse */
+
+       if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse)
+               return true; /* already not sparse */
+
+       /*
+        * Can't check for sparse support on share the usual way via the
+        * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share
+        * since Samba server doesn't set the flag on the share, yet
+        * supports the set sparse FSCTL and returns sparse correctly
+        * in the file attributes. If we fail setting sparse though we
+        * mark that server does not support sparse files for this share
+        * to avoid repeatedly sending the unsupported fsctl to server
+        * if the file is repeatedly extended.
+        */
+       if (tcon->broken_sparse_sup)
+               return false;
+
+       rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+                       cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
+                       true /* is_fctl */, &setsparse, 1, NULL, NULL);
+       if (rc) {
+               tcon->broken_sparse_sup = true;
+               cifs_dbg(FYI, "set sparse rc = %d\n", rc);
+               return false;
+       }
+
+       if (setsparse)
+               cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE;
+       else
+               cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE);
+
+       return true;
+}
+
 static int
 smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
                   struct cifsFileInfo *cfile, __u64 size, bool set_alloc)
 {
        __le64 eof = cpu_to_le64(size);
+       struct inode *inode;
+
+       /*
+        * If extending file more than one page make sparse. Many Linux fs
+        * make files sparse by default when extending via ftruncate
+        */
+       inode = cfile->dentry->d_inode;
+
+       if (!set_alloc && (size > inode->i_size + 8192)) {
+               __u8 set_sparse = 1;
+
+               /* whether set sparse succeeds or not, extend the file */
+               smb2_set_sparse(xid, tcon, cfile, inode, set_sparse);
+       }
+
        return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
                            cfile->fid.volatile_fid, cfile->pid, &eof, false);
 }
@@ -954,6 +1015,105 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
        return rc;
 }
 
+static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
+                           loff_t offset, loff_t len, bool keep_size)
+{
+       struct inode *inode;
+       struct cifsInodeInfo *cifsi;
+       struct cifsFileInfo *cfile = file->private_data;
+       struct file_zero_data_information fsctl_buf;
+       long rc;
+       unsigned int xid;
+
+       xid = get_xid();
+
+       inode = cfile->dentry->d_inode;
+       cifsi = CIFS_I(inode);
+
+       /* if file not oplocked can't be sure whether asking to extend size */
+       if (!CIFS_CACHE_READ(cifsi))
+               if (keep_size == false)
+                       return -EOPNOTSUPP;
+
+       /* 
+        * Must check if file sparse since fallocate -z (zero range) assumes
+        * non-sparse allocation
+        */
+       if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE))
+               return -EOPNOTSUPP;
+
+       /*
+        * need to make sure we are not asked to extend the file since the SMB3
+        * fsctl does not change the file size. In the future we could change
+        * this to zero the first part of the range then set the file size
+        * which for a non sparse file would zero the newly extended range
+        */
+       if (keep_size == false)
+               if (i_size_read(inode) < offset + len)
+                       return -EOPNOTSUPP;
+
+       cifs_dbg(FYI, "offset %lld len %lld", offset, len);
+
+       fsctl_buf.FileOffset = cpu_to_le64(offset);
+       fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
+
+       rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+                       cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
+                       true /* is_fctl */, (char *)&fsctl_buf,
+                       sizeof(struct file_zero_data_information), NULL, NULL);
+       free_xid(xid);
+       return rc;
+}
+
+static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
+                           loff_t offset, loff_t len)
+{
+       struct inode *inode;
+       struct cifsInodeInfo *cifsi;
+       struct cifsFileInfo *cfile = file->private_data;
+       struct file_zero_data_information fsctl_buf;
+       long rc;
+       unsigned int xid;
+       __u8 set_sparse = 1;
+
+       xid = get_xid();
+
+       inode = cfile->dentry->d_inode;
+       cifsi = CIFS_I(inode);
+
+       /* Need to make file sparse, if not already, before freeing range. */
+       /* Consider adding equivalent for compressed since it could also work */
+       if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse))
+               return -EOPNOTSUPP;
+
+       cifs_dbg(FYI, "offset %lld len %lld", offset, len);
+
+       fsctl_buf.FileOffset = cpu_to_le64(offset);
+       fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
+
+       rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+                       cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
+                       true /* is_fctl */, (char *)&fsctl_buf,
+                       sizeof(struct file_zero_data_information), NULL, NULL);
+       free_xid(xid);
+       return rc;
+}
+
+static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
+                          loff_t off, loff_t len)
+{
+       /* KEEP_SIZE already checked for by do_fallocate */
+       if (mode & FALLOC_FL_PUNCH_HOLE)
+               return smb3_punch_hole(file, tcon, off, len);
+       else if (mode & FALLOC_FL_ZERO_RANGE) {
+               if (mode & FALLOC_FL_KEEP_SIZE)
+                       return smb3_zero_range(file, tcon, off, len, true);
+               return smb3_zero_range(file, tcon, off, len, false);
+       }
+
+       return -EOPNOTSUPP;
+}
+
 static void
 smb2_downgrade_oplock(struct TCP_Server_Info *server,
                        struct cifsInodeInfo *cinode, bool set_level2)
@@ -1161,6 +1321,12 @@ smb2_wp_retry_size(struct inode *inode)
                     SMB2_MAX_BUFFER_SIZE);
 }
 
+static bool
+smb2_dir_needs_close(struct cifsFileInfo *cfile)
+{
+       return !cfile->invalidHandle;
+}
+
 struct smb_version_operations smb20_operations = {
        .compare_fids = smb2_compare_fids,
        .setup_request = smb2_setup_request,
@@ -1236,6 +1402,7 @@ struct smb_version_operations smb20_operations = {
        .parse_lease_buf = smb2_parse_lease_buf,
        .clone_range = smb2_clone_range,
        .wp_retry_size = smb2_wp_retry_size,
+       .dir_needs_close = smb2_dir_needs_close,
 };
 
 struct smb_version_operations smb21_operations = {
@@ -1313,6 +1480,7 @@ struct smb_version_operations smb21_operations = {
        .parse_lease_buf = smb2_parse_lease_buf,
        .clone_range = smb2_clone_range,
        .wp_retry_size = smb2_wp_retry_size,
+       .dir_needs_close = smb2_dir_needs_close,
 };
 
 struct smb_version_operations smb30_operations = {
@@ -1393,6 +1561,8 @@ struct smb_version_operations smb30_operations = {
        .clone_range = smb2_clone_range,
        .validate_negotiate = smb3_validate_negotiate,
        .wp_retry_size = smb2_wp_retry_size,
+       .dir_needs_close = smb2_dir_needs_close,
+       .fallocate = smb3_fallocate,
 };
 
 struct smb_version_values smb20_values = {
index 42ebc1a..fa0dd04 100644 (file)
@@ -907,7 +907,8 @@ tcon_exit:
 tcon_error_exit:
        if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
                cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
-               tcon->bad_network_name = true;
+               if (tcon)
+                       tcon->bad_network_name = true;
        }
        goto tcon_exit;
 }
@@ -1224,7 +1225,9 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
 
        cifs_dbg(FYI, "SMB2 IOCTL\n");
 
-       *out_data = NULL;
+       if (out_data != NULL)
+               *out_data = NULL;
+
        /* zero out returned data len, in case of error */
        if (plen)
                *plen = 0;
@@ -2177,6 +2180,10 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
        rsp = (struct smb2_query_directory_rsp *)iov[0].iov_base;
 
        if (rc) {
+               if (rc == -ENODATA && rsp->hdr.Status == STATUS_NO_MORE_FILES) {
+                       srch_inf->endOfSearch = true;
+                       rc = 0;
+               }
                cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
                goto qdir_exit;
        }
@@ -2214,11 +2221,6 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
        else
                cifs_dbg(VFS, "illegal search buffer type\n");
 
-       if (rsp->hdr.Status == STATUS_NO_MORE_FILES)
-               srch_inf->endOfSearch = 1;
-       else
-               srch_inf->endOfSearch = 0;
-
        return rc;
 
 qdir_exit:
index 69f3595..fbe486c 100644 (file)
@@ -573,6 +573,12 @@ struct copychunk_ioctl {
        __u32 Reserved2;
 } __packed;
 
+/* this goes in the ioctl buffer when doing FSCTL_SET_ZERO_DATA */
+struct file_zero_data_information {
+       __le64  FileOffset;
+       __le64  BeyondFinalZero;
+} __packed;
+
 struct copychunk_ioctl_rsp {
        __le32 ChunksWritten;
        __le32 ChunkBytesWritten;
index 0e538b5..83efa59 100644 (file)
@@ -63,7 +63,7 @@
 #define FSCTL_SET_OBJECT_ID_EXTENDED 0x000900BC /* BB add struct */
 #define FSCTL_CREATE_OR_GET_OBJECT_ID 0x000900C0 /* BB add struct */
 #define FSCTL_SET_SPARSE             0x000900C4 /* BB add struct */
-#define FSCTL_SET_ZERO_DATA          0x000900C8 /* BB add struct */
+#define FSCTL_SET_ZERO_DATA          0x000980C8
 #define FSCTL_SET_ENCRYPTION         0x000900D7 /* BB add struct */
 #define FSCTL_ENCRYPTION_FSCTL_IO    0x000900DB /* BB add struct */
 #define FSCTL_WRITE_RAW_ENCRYPTED    0x000900DF /* BB add struct */
index 08cdfe5..622e882 100644 (file)
@@ -2828,8 +2828,9 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf)
                 */
                overhead += ngroups * (2 + sbi->s_itb_per_group);
 
-               /* Add the journal blocks as well */
-                overhead += sbi->s_journal->j_maxlen;
+               /* Add the internal journal blocks as well */
+               if (sbi->s_journal && !sbi->journal_bdev)
+                       overhead += sbi->s_journal->j_maxlen;
 
                sbi->s_overhead_last = overhead;
                smp_wmb();
index 5b19760..b0c225c 100644 (file)
@@ -1825,7 +1825,7 @@ ext4_group_first_block_no(struct super_block *sb, ext4_group_t group_no)
 /*
  * Special error return code only used by dx_probe() and its callers.
  */
-#define ERR_BAD_DX_DIR -75000
+#define ERR_BAD_DX_DIR (-(MAX_ERRNO - 1))
 
 /*
  * Timeout and state flag for lazy initialization inode thread.
@@ -2454,6 +2454,22 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
        up_write(&EXT4_I(inode)->i_data_sem);
 }
 
+/* Update i_size, i_disksize. Requires i_mutex to avoid races with truncate */
+static inline int ext4_update_inode_size(struct inode *inode, loff_t newsize)
+{
+       int changed = 0;
+
+       if (newsize > inode->i_size) {
+               i_size_write(inode, newsize);
+               changed = 1;
+       }
+       if (newsize > EXT4_I(inode)->i_disksize) {
+               ext4_update_i_disksize(inode, newsize);
+               changed |= 2;
+       }
+       return changed;
+}
+
 struct ext4_group_info {
        unsigned long   bb_state;
        struct rb_root  bb_free_root;
index 76c2df3..74292a7 100644 (file)
@@ -4665,7 +4665,8 @@ retry:
 }
 
 static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
-                                 ext4_lblk_t len, int flags, int mode)
+                                 ext4_lblk_t len, loff_t new_size,
+                                 int flags, int mode)
 {
        struct inode *inode = file_inode(file);
        handle_t *handle;
@@ -4674,8 +4675,10 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
        int retries = 0;
        struct ext4_map_blocks map;
        unsigned int credits;
+       loff_t epos;
 
        map.m_lblk = offset;
+       map.m_len = len;
        /*
         * Don't normalize the request if it can fit in one extent so
         * that it doesn't get unnecessarily split into multiple
@@ -4690,9 +4693,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
        credits = ext4_chunk_trans_blocks(inode, len);
 
 retry:
-       while (ret >= 0 && ret < len) {
-               map.m_lblk = map.m_lblk + ret;
-               map.m_len = len = len - ret;
+       while (ret >= 0 && len) {
                handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
                                            credits);
                if (IS_ERR(handle)) {
@@ -4709,6 +4710,21 @@ retry:
                        ret2 = ext4_journal_stop(handle);
                        break;
                }
+               map.m_lblk += ret;
+               map.m_len = len = len - ret;
+               epos = (loff_t)map.m_lblk << inode->i_blkbits;
+               inode->i_ctime = ext4_current_time(inode);
+               if (new_size) {
+                       if (epos > new_size)
+                               epos = new_size;
+                       if (ext4_update_inode_size(inode, epos) & 0x1)
+                               inode->i_mtime = inode->i_ctime;
+               } else {
+                       if (epos > inode->i_size)
+                               ext4_set_inode_flag(inode,
+                                                   EXT4_INODE_EOFBLOCKS);
+               }
+               ext4_mark_inode_dirty(handle, inode);
                ret2 = ext4_journal_stop(handle);
                if (ret2)
                        break;
@@ -4731,7 +4747,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
        loff_t new_size = 0;
        int ret = 0;
        int flags;
-       int partial;
+       int credits;
+       int partial_begin, partial_end;
        loff_t start, end;
        ext4_lblk_t lblk;
        struct address_space *mapping = inode->i_mapping;
@@ -4771,7 +4788,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
 
        if (start < offset || end > offset + len)
                return -EINVAL;
-       partial = (offset + len) & ((1 << blkbits) - 1);
+       partial_begin = offset & ((1 << blkbits) - 1);
+       partial_end = (offset + len) & ((1 << blkbits) - 1);
 
        lblk = start >> blkbits;
        max_blocks = (end >> blkbits);
@@ -4805,7 +4823,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
                 * If we have a partial block after EOF we have to allocate
                 * the entire block.
                 */
-               if (partial)
+               if (partial_end)
                        max_blocks += 1;
        }
 
@@ -4813,6 +4831,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
 
                /* Now release the pages and zero block aligned part of pages*/
                truncate_pagecache_range(inode, start, end - 1);
+               inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
 
                /* Wait all existing dio workers, newcomers will block on i_mutex */
                ext4_inode_block_unlocked_dio(inode);
@@ -4825,13 +4844,22 @@ static long ext4_zero_range(struct file *file, loff_t offset,
                if (ret)
                        goto out_dio;
 
-               ret = ext4_alloc_file_blocks(file, lblk, max_blocks, flags,
-                                            mode);
+               ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
+                                            flags, mode);
                if (ret)
                        goto out_dio;
        }
+       if (!partial_begin && !partial_end)
+               goto out_dio;
 
-       handle = ext4_journal_start(inode, EXT4_HT_MISC, 4);
+       /*
+        * In worst case we have to writeout two nonadjacent unwritten
+        * blocks and update the inode
+        */
+       credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1;
+       if (ext4_should_journal_data(inode))
+               credits += 2;
+       handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
        if (IS_ERR(handle)) {
                ret = PTR_ERR(handle);
                ext4_std_error(inode->i_sb, ret);
@@ -4839,12 +4867,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
        }
 
        inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
-
        if (new_size) {
-               if (new_size > i_size_read(inode))
-                       i_size_write(inode, new_size);
-               if (new_size > EXT4_I(inode)->i_disksize)
-                       ext4_update_i_disksize(inode, new_size);
+               ext4_update_inode_size(inode, new_size);
        } else {
                /*
                * Mark that we allocate beyond EOF so the subsequent truncate
@@ -4853,7 +4877,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
                if ((offset + len) > i_size_read(inode))
                        ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
        }
-
        ext4_mark_inode_dirty(handle, inode);
 
        /* Zero out partial block at the edges of the range */
@@ -4880,13 +4903,11 @@ out_mutex:
 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
 {
        struct inode *inode = file_inode(file);
-       handle_t *handle;
        loff_t new_size = 0;
        unsigned int max_blocks;
        int ret = 0;
        int flags;
        ext4_lblk_t lblk;
-       struct timespec tv;
        unsigned int blkbits = inode->i_blkbits;
 
        /* Return error if mode is not supported */
@@ -4937,36 +4958,15 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
                        goto out;
        }
 
-       ret = ext4_alloc_file_blocks(file, lblk, max_blocks, flags, mode);
+       ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
+                                    flags, mode);
        if (ret)
                goto out;
 
-       handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
-       if (IS_ERR(handle))
-               goto out;
-
-       tv = inode->i_ctime = ext4_current_time(inode);
-
-       if (new_size) {
-               if (new_size > i_size_read(inode)) {
-                       i_size_write(inode, new_size);
-                       inode->i_mtime = tv;
-               }
-               if (new_size > EXT4_I(inode)->i_disksize)
-                       ext4_update_i_disksize(inode, new_size);
-       } else {
-               /*
-               * Mark that we allocate beyond EOF so the subsequent truncate
-               * can proceed even if the new size is the same as i_size.
-               */
-               if ((offset + len) > i_size_read(inode))
-                       ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
+       if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
+               ret = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal,
+                                               EXT4_I(inode)->i_sync_tid);
        }
-       ext4_mark_inode_dirty(handle, inode);
-       if (file->f_flags & O_SYNC)
-               ext4_handle_sync(handle);
-
-       ext4_journal_stop(handle);
 out:
        mutex_unlock(&inode->i_mutex);
        trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
index 367a60c..3aa26e9 100644 (file)
@@ -1055,27 +1055,11 @@ static int ext4_write_end(struct file *file,
        } else
                copied = block_write_end(file, mapping, pos,
                                         len, copied, page, fsdata);
-
        /*
-        * No need to use i_size_read() here, the i_size
-        * cannot change under us because we hole i_mutex.
-        *
-        * But it's important to update i_size while still holding page lock:
+        * it's important to update i_size while still holding page lock:
         * page writeout could otherwise come in and zero beyond i_size.
         */
-       if (pos + copied > inode->i_size) {
-               i_size_write(inode, pos + copied);
-               i_size_changed = 1;
-       }
-
-       if (pos + copied > EXT4_I(inode)->i_disksize) {
-               /* We need to mark inode dirty even if
-                * new_i_size is less that inode->i_size
-                * but greater than i_disksize. (hint delalloc)
-                */
-               ext4_update_i_disksize(inode, (pos + copied));
-               i_size_changed = 1;
-       }
+       i_size_changed = ext4_update_inode_size(inode, pos + copied);
        unlock_page(page);
        page_cache_release(page);
 
@@ -1123,7 +1107,7 @@ static int ext4_journalled_write_end(struct file *file,
        int ret = 0, ret2;
        int partial = 0;
        unsigned from, to;
-       loff_t new_i_size;
+       int size_changed = 0;
 
        trace_ext4_journalled_write_end(inode, pos, len, copied);
        from = pos & (PAGE_CACHE_SIZE - 1);
@@ -1146,20 +1130,18 @@ static int ext4_journalled_write_end(struct file *file,
                if (!partial)
                        SetPageUptodate(page);
        }
-       new_i_size = pos + copied;
-       if (new_i_size > inode->i_size)
-               i_size_write(inode, pos+copied);
+       size_changed = ext4_update_inode_size(inode, pos + copied);
        ext4_set_inode_state(inode, EXT4_STATE_JDATA);
        EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
-       if (new_i_size > EXT4_I(inode)->i_disksize) {
-               ext4_update_i_disksize(inode, new_i_size);
+       unlock_page(page);
+       page_cache_release(page);
+
+       if (size_changed) {
                ret2 = ext4_mark_inode_dirty(handle, inode);
                if (!ret)
                        ret = ret2;
        }
 
-       unlock_page(page);
-       page_cache_release(page);
        if (pos + len > inode->i_size && ext4_can_truncate(inode))
                /* if we have allocated more blocks and copied
                 * less. We will have blocks allocated outside
@@ -2095,6 +2077,7 @@ static int mpage_map_and_submit_extent(handle_t *handle,
        struct ext4_map_blocks *map = &mpd->map;
        int err;
        loff_t disksize;
+       int progress = 0;
 
        mpd->io_submit.io_end->offset =
                                ((loff_t)map->m_lblk) << inode->i_blkbits;
@@ -2111,8 +2094,11 @@ static int mpage_map_and_submit_extent(handle_t *handle,
                         * is non-zero, a commit should free up blocks.
                         */
                        if ((err == -ENOMEM) ||
-                           (err == -ENOSPC && ext4_count_free_clusters(sb)))
+                           (err == -ENOSPC && ext4_count_free_clusters(sb))) {
+                               if (progress)
+                                       goto update_disksize;
                                return err;
+                       }
                        ext4_msg(sb, KERN_CRIT,
                                 "Delayed block allocation failed for "
                                 "inode %lu at logical offset %llu with"
@@ -2129,15 +2115,17 @@ static int mpage_map_and_submit_extent(handle_t *handle,
                        *give_up_on_write = true;
                        return err;
                }
+               progress = 1;
                /*
                 * Update buffer state, submit mapped pages, and get us new
                 * extent to map
                 */
                err = mpage_map_and_submit_buffers(mpd);
                if (err < 0)
-                       return err;
+                       goto update_disksize;
        } while (map->m_len);
 
+update_disksize:
        /*
         * Update on-disk size after IO is submitted.  Races with
         * truncate are avoided by checking i_size under i_data_sem.
index 9560277..8b0f9ef 100644 (file)
@@ -1412,6 +1412,8 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
        int last = first + count - 1;
        struct super_block *sb = e4b->bd_sb;
 
+       if (WARN_ON(count == 0))
+               return;
        BUG_ON(last >= (sb->s_blocksize << 3));
        assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
        /* Don't bother if the block group is corrupt. */
@@ -3221,6 +3223,8 @@ static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
        int err;
 
        if (pa == NULL) {
+               if (ac->ac_f_ex.fe_len == 0)
+                       return;
                err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
                if (err) {
                        /*
@@ -3235,6 +3239,7 @@ static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
                mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
                               ac->ac_f_ex.fe_len);
                ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
+               ext4_mb_unload_buddy(&e4b);
                return;
        }
        if (pa->pa_type == MB_INODE_PA)
index b147a67..90a3cdc 100644 (file)
@@ -1227,7 +1227,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
                                   buffer */
        int num = 0;
        ext4_lblk_t  nblocks;
-       int i, err;
+       int i, err = 0;
        int namelen;
 
        *res_dir = NULL;
@@ -1264,7 +1264,11 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
                 * return.  Otherwise, fall back to doing a search the
                 * old fashioned way.
                 */
-               if (bh || (err != ERR_BAD_DX_DIR))
+               if (err == -ENOENT)
+                       return NULL;
+               if (err && err != ERR_BAD_DX_DIR)
+                       return ERR_PTR(err);
+               if (bh)
                        return bh;
                dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
                               "falling back\n"));
@@ -1295,6 +1299,11 @@ restart:
                                }
                                num++;
                                bh = ext4_getblk(NULL, dir, b++, 0, &err);
+                               if (unlikely(err)) {
+                                       if (ra_max == 0)
+                                               return ERR_PTR(err);
+                                       break;
+                               }
                                bh_use[ra_max] = bh;
                                if (bh)
                                        ll_rw_block(READ | REQ_META | REQ_PRIO,
@@ -1417,6 +1426,8 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
                return ERR_PTR(-ENAMETOOLONG);
 
        bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
+       if (IS_ERR(bh))
+               return (struct dentry *) bh;
        inode = NULL;
        if (bh) {
                __u32 ino = le32_to_cpu(de->inode);
@@ -1450,6 +1461,8 @@ struct dentry *ext4_get_parent(struct dentry *child)
        struct buffer_head *bh;
 
        bh = ext4_find_entry(child->d_inode, &dotdot, &de, NULL);
+       if (IS_ERR(bh))
+               return (struct dentry *) bh;
        if (!bh)
                return ERR_PTR(-ENOENT);
        ino = le32_to_cpu(de->inode);
@@ -2727,6 +2740,8 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
 
        retval = -ENOENT;
        bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
+       if (IS_ERR(bh))
+               return PTR_ERR(bh);
        if (!bh)
                goto end_rmdir;
 
@@ -2794,6 +2809,8 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
 
        retval = -ENOENT;
        bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
+       if (IS_ERR(bh))
+               return PTR_ERR(bh);
        if (!bh)
                goto end_unlink;
 
@@ -3121,6 +3138,8 @@ static int ext4_find_delete_entry(handle_t *handle, struct inode *dir,
        struct ext4_dir_entry_2 *de;
 
        bh = ext4_find_entry(dir, d_name, &de, NULL);
+       if (IS_ERR(bh))
+               return PTR_ERR(bh);
        if (bh) {
                retval = ext4_delete_entry(handle, dir, de, bh);
                brelse(bh);
@@ -3128,7 +3147,8 @@ static int ext4_find_delete_entry(handle_t *handle, struct inode *dir,
        return retval;
 }
 
-static void ext4_rename_delete(handle_t *handle, struct ext4_renament *ent)
+static void ext4_rename_delete(handle_t *handle, struct ext4_renament *ent,
+                              int force_reread)
 {
        int retval;
        /*
@@ -3140,7 +3160,8 @@ static void ext4_rename_delete(handle_t *handle, struct ext4_renament *ent)
        if (le32_to_cpu(ent->de->inode) != ent->inode->i_ino ||
            ent->de->name_len != ent->dentry->d_name.len ||
            strncmp(ent->de->name, ent->dentry->d_name.name,
-                   ent->de->name_len)) {
+                   ent->de->name_len) ||
+           force_reread) {
                retval = ext4_find_delete_entry(handle, ent->dir,
                                                &ent->dentry->d_name);
        } else {
@@ -3191,6 +3212,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
                .dentry = new_dentry,
                .inode = new_dentry->d_inode,
        };
+       int force_reread;
        int retval;
 
        dquot_initialize(old.dir);
@@ -3202,6 +3224,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
                dquot_initialize(new.inode);
 
        old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
+       if (IS_ERR(old.bh))
+               return PTR_ERR(old.bh);
        /*
         *  Check for inode number is _not_ due to possible IO errors.
         *  We might rmdir the source, keep it as pwd of some process
@@ -3214,6 +3238,10 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
 
        new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
                                 &new.de, &new.inlined);
+       if (IS_ERR(new.bh)) {
+               retval = PTR_ERR(new.bh);
+               goto end_rename;
+       }
        if (new.bh) {
                if (!new.inode) {
                        brelse(new.bh);
@@ -3246,6 +3274,15 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
                if (retval)
                        goto end_rename;
        }
+       /*
+        * If we're renaming a file within an inline_data dir and adding or
+        * setting the new dirent causes a conversion from inline_data to
+        * extents/blockmap, we need to force the dirent delete code to
+        * re-read the directory, or else we end up trying to delete a dirent
+        * from what is now the extent tree root (or a block map).
+        */
+       force_reread = (new.dir->i_ino == old.dir->i_ino &&
+                       ext4_test_inode_flag(new.dir, EXT4_INODE_INLINE_DATA));
        if (!new.bh) {
                retval = ext4_add_entry(handle, new.dentry, old.inode);
                if (retval)
@@ -3256,6 +3293,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
                if (retval)
                        goto end_rename;
        }
+       if (force_reread)
+               force_reread = !ext4_test_inode_flag(new.dir,
+                                                    EXT4_INODE_INLINE_DATA);
 
        /*
         * Like most other Unix systems, set the ctime for inodes on a
@@ -3267,7 +3307,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
        /*
         * ok, that's it
         */
-       ext4_rename_delete(handle, &old);
+       ext4_rename_delete(handle, &old, force_reread);
 
        if (new.inode) {
                ext4_dec_count(handle, new.inode);
@@ -3330,6 +3370,8 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
 
        old.bh = ext4_find_entry(old.dir, &old.dentry->d_name,
                                 &old.de, &old.inlined);
+       if (IS_ERR(old.bh))
+               return PTR_ERR(old.bh);
        /*
         *  Check for inode number is _not_ due to possible IO errors.
         *  We might rmdir the source, keep it as pwd of some process
@@ -3342,6 +3384,10 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
 
        new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
                                 &new.de, &new.inlined);
+       if (IS_ERR(new.bh)) {
+               retval = PTR_ERR(new.bh);
+               goto end_rename;
+       }
 
        /* RENAME_EXCHANGE case: old *and* new must both exist */
        if (!new.bh || le32_to_cpu(new.de->inode) != new.inode->i_ino)
index 32b43ad..0b28b36 100644 (file)
@@ -3181,9 +3181,9 @@ static int set_journal_csum_feature_set(struct super_block *sb)
 
        if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
-               /* journal checksum v2 */
+               /* journal checksum v3 */
                compat = 0;
-               incompat = JBD2_FEATURE_INCOMPAT_CSUM_V2;
+               incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
        } else {
                /* journal checksum v1 */
                compat = JBD2_FEATURE_COMPAT_CHECKSUM;
@@ -3205,6 +3205,7 @@ static int set_journal_csum_feature_set(struct super_block *sb)
                jbd2_journal_clear_features(sbi->s_journal,
                                JBD2_FEATURE_COMPAT_CHECKSUM, 0,
                                JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
+                               JBD2_FEATURE_INCOMPAT_CSUM_V3 |
                                JBD2_FEATURE_INCOMPAT_CSUM_V2);
        }
 
index 4556ce1..5ddaf86 100644 (file)
@@ -61,7 +61,7 @@ static void isofs_put_super(struct super_block *sb)
        return;
 }
 
-static int isofs_read_inode(struct inode *);
+static int isofs_read_inode(struct inode *, int relocated);
 static int isofs_statfs (struct dentry *, struct kstatfs *);
 
 static struct kmem_cache *isofs_inode_cachep;
@@ -1259,7 +1259,7 @@ out_toomany:
        goto out;
 }
 
-static int isofs_read_inode(struct inode *inode)
+static int isofs_read_inode(struct inode *inode, int relocated)
 {
        struct super_block *sb = inode->i_sb;
        struct isofs_sb_info *sbi = ISOFS_SB(sb);
@@ -1404,7 +1404,7 @@ static int isofs_read_inode(struct inode *inode)
         */
 
        if (!high_sierra) {
-               parse_rock_ridge_inode(de, inode);
+               parse_rock_ridge_inode(de, inode, relocated);
                /* if we want uid/gid set, override the rock ridge setting */
                if (sbi->s_uid_set)
                        inode->i_uid = sbi->s_uid;
@@ -1483,9 +1483,10 @@ static int isofs_iget5_set(struct inode *ino, void *data)
  * offset that point to the underlying meta-data for the inode.  The
  * code below is otherwise similar to the iget() code in
  * include/linux/fs.h */
-struct inode *isofs_iget(struct super_block *sb,
-                        unsigned long block,
-                        unsigned long offset)
+struct inode *__isofs_iget(struct super_block *sb,
+                          unsigned long block,
+                          unsigned long offset,
+                          int relocated)
 {
        unsigned long hashval;
        struct inode *inode;
@@ -1507,7 +1508,7 @@ struct inode *isofs_iget(struct super_block *sb,
                return ERR_PTR(-ENOMEM);
 
        if (inode->i_state & I_NEW) {
-               ret = isofs_read_inode(inode);
+               ret = isofs_read_inode(inode, relocated);
                if (ret < 0) {
                        iget_failed(inode);
                        inode = ERR_PTR(ret);
index 9916723..0ac4c1f 100644 (file)
@@ -107,7 +107,7 @@ extern int iso_date(char *, int);
 
 struct inode;          /* To make gcc happy */
 
-extern int parse_rock_ridge_inode(struct iso_directory_record *, struct inode *);
+extern int parse_rock_ridge_inode(struct iso_directory_record *, struct inode *, int relocated);
 extern int get_rock_ridge_filename(struct iso_directory_record *, char *, struct inode *);
 extern int isofs_name_translate(struct iso_directory_record *, char *, struct inode *);
 
@@ -118,9 +118,24 @@ extern struct dentry *isofs_lookup(struct inode *, struct dentry *, unsigned int
 extern struct buffer_head *isofs_bread(struct inode *, sector_t);
 extern int isofs_get_blocks(struct inode *, sector_t, struct buffer_head **, unsigned long);
 
-extern struct inode *isofs_iget(struct super_block *sb,
-                                unsigned long block,
-                                unsigned long offset);
+struct inode *__isofs_iget(struct super_block *sb,
+                          unsigned long block,
+                          unsigned long offset,
+                          int relocated);
+
+static inline struct inode *isofs_iget(struct super_block *sb,
+                                      unsigned long block,
+                                      unsigned long offset)
+{
+       return __isofs_iget(sb, block, offset, 0);
+}
+
+static inline struct inode *isofs_iget_reloc(struct super_block *sb,
+                                            unsigned long block,
+                                            unsigned long offset)
+{
+       return __isofs_iget(sb, block, offset, 1);
+}
 
 /* Because the inode number is no longer relevant to finding the
  * underlying meta-data for an inode, we are free to choose a more
index c0bf424..f488bba 100644 (file)
@@ -288,12 +288,16 @@ eio:
        goto out;
 }
 
+#define RR_REGARD_XA 1
+#define RR_RELOC_DE 2
+
 static int
 parse_rock_ridge_inode_internal(struct iso_directory_record *de,
-                               struct inode *inode, int regard_xa)
+                               struct inode *inode, int flags)
 {
        int symlink_len = 0;
        int cnt, sig;
+       unsigned int reloc_block;
        struct inode *reloc;
        struct rock_ridge *rr;
        int rootflag;
@@ -305,7 +309,7 @@ parse_rock_ridge_inode_internal(struct iso_directory_record *de,
 
        init_rock_state(&rs, inode);
        setup_rock_ridge(de, inode, &rs);
-       if (regard_xa) {
+       if (flags & RR_REGARD_XA) {
                rs.chr += 14;
                rs.len -= 14;
                if (rs.len < 0)
@@ -485,12 +489,22 @@ repeat:
                                        "relocated directory\n");
                        goto out;
                case SIG('C', 'L'):
-                       ISOFS_I(inode)->i_first_extent =
-                           isonum_733(rr->u.CL.location);
-                       reloc =
-                           isofs_iget(inode->i_sb,
-                                      ISOFS_I(inode)->i_first_extent,
-                                      0);
+                       if (flags & RR_RELOC_DE) {
+                               printk(KERN_ERR
+                                      "ISOFS: Recursive directory relocation "
+                                      "is not supported\n");
+                               goto eio;
+                       }
+                       reloc_block = isonum_733(rr->u.CL.location);
+                       if (reloc_block == ISOFS_I(inode)->i_iget5_block &&
+                           ISOFS_I(inode)->i_iget5_offset == 0) {
+                               printk(KERN_ERR
+                                      "ISOFS: Directory relocation points to "
+                                      "itself\n");
+                               goto eio;
+                       }
+                       ISOFS_I(inode)->i_first_extent = reloc_block;
+                       reloc = isofs_iget_reloc(inode->i_sb, reloc_block, 0);
                        if (IS_ERR(reloc)) {
                                ret = PTR_ERR(reloc);
                                goto out;
@@ -637,9 +651,11 @@ static char *get_symlink_chunk(char *rpnt, struct rock_ridge *rr, char *plimit)
        return rpnt;
 }
 
-int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode)
+int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode,
+                          int relocated)
 {
-       int result = parse_rock_ridge_inode_internal(de, inode, 0);
+       int flags = relocated ? RR_RELOC_DE : 0;
+       int result = parse_rock_ridge_inode_internal(de, inode, flags);
 
        /*
         * if rockridge flag was reset and we didn't look for attributes
@@ -647,7 +663,8 @@ int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode)
         */
        if ((ISOFS_SB(inode->i_sb)->s_rock_offset == -1)
            && (ISOFS_SB(inode->i_sb)->s_rock == 2)) {
-               result = parse_rock_ridge_inode_internal(de, inode, 14);
+               result = parse_rock_ridge_inode_internal(de, inode,
+                                                        flags | RR_REGARD_XA);
        }
        return result;
 }
index 6fac743..b73e021 100644 (file)
@@ -97,7 +97,7 @@ static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
        struct commit_header *h;
        __u32 csum;
 
-       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+       if (!jbd2_journal_has_csum_v2or3(j))
                return;
 
        h = (struct commit_header *)(bh->b_data);
@@ -313,11 +313,11 @@ static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
        return checksum;
 }
 
-static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
+static void write_tag_block(journal_t *j, journal_block_tag_t *tag,
                                   unsigned long long block)
 {
        tag->t_blocknr = cpu_to_be32(block & (u32)~0);
-       if (tag_bytes > JBD2_TAG_SIZE32)
+       if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_64BIT))
                tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
 }
 
@@ -327,7 +327,7 @@ static void jbd2_descr_block_csum_set(journal_t *j,
        struct jbd2_journal_block_tail *tail;
        __u32 csum;
 
-       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+       if (!jbd2_journal_has_csum_v2or3(j))
                return;
 
        tail = (struct jbd2_journal_block_tail *)(bh->b_data + j->j_blocksize -
@@ -340,12 +340,13 @@ static void jbd2_descr_block_csum_set(journal_t *j,
 static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
                                    struct buffer_head *bh, __u32 sequence)
 {
+       journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
        struct page *page = bh->b_page;
        __u8 *addr;
        __u32 csum32;
        __be32 seq;
 
-       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+       if (!jbd2_journal_has_csum_v2or3(j))
                return;
 
        seq = cpu_to_be32(sequence);
@@ -355,8 +356,10 @@ static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
                             bh->b_size);
        kunmap_atomic(addr);
 
-       /* We only have space to store the lower 16 bits of the crc32c. */
-       tag->t_checksum = cpu_to_be16(csum32);
+       if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V3))
+               tag3->t_checksum = cpu_to_be32(csum32);
+       else
+               tag->t_checksum = cpu_to_be16(csum32);
 }
 /*
  * jbd2_journal_commit_transaction
@@ -396,7 +399,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
        LIST_HEAD(io_bufs);
        LIST_HEAD(log_bufs);
 
-       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+       if (jbd2_journal_has_csum_v2or3(journal))
                csum_size = sizeof(struct jbd2_journal_block_tail);
 
        /*
@@ -690,7 +693,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
                        tag_flag |= JBD2_FLAG_SAME_UUID;
 
                tag = (journal_block_tag_t *) tagp;
-               write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
+               write_tag_block(journal, tag, jh2bh(jh)->b_blocknr);
                tag->t_flags = cpu_to_be16(tag_flag);
                jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
                                        commit_transaction->t_tid);
index 67b8e30..19d74d8 100644 (file)
@@ -124,7 +124,7 @@ EXPORT_SYMBOL(__jbd2_debug);
 /* Checksumming functions */
 static int jbd2_verify_csum_type(journal_t *j, journal_superblock_t *sb)
 {
-       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+       if (!jbd2_journal_has_csum_v2or3(j))
                return 1;
 
        return sb->s_checksum_type == JBD2_CRC32C_CHKSUM;
@@ -145,7 +145,7 @@ static __be32 jbd2_superblock_csum(journal_t *j, journal_superblock_t *sb)
 
 static int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb)
 {
-       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+       if (!jbd2_journal_has_csum_v2or3(j))
                return 1;
 
        return sb->s_checksum == jbd2_superblock_csum(j, sb);
@@ -153,7 +153,7 @@ static int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb)
 
 static void jbd2_superblock_csum_set(journal_t *j, journal_superblock_t *sb)
 {
-       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+       if (!jbd2_journal_has_csum_v2or3(j))
                return;
 
        sb->s_checksum = jbd2_superblock_csum(j, sb);
@@ -1522,21 +1522,29 @@ static int journal_get_superblock(journal_t *journal)
                goto out;
        }
 
-       if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) &&
-           JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
+       if (jbd2_journal_has_csum_v2or3(journal) &&
+           JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM)) {
                /* Can't have checksum v1 and v2 on at the same time! */
                printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2 "
                       "at the same time!\n");
                goto out;
        }
 
+       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) &&
+           JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
+               /* Can't have checksum v2 and v3 at the same time! */
+               printk(KERN_ERR "JBD2: Can't enable checksumming v2 and v3 "
+                      "at the same time!\n");
+               goto out;
+       }
+
        if (!jbd2_verify_csum_type(journal, sb)) {
                printk(KERN_ERR "JBD2: Unknown checksum type\n");
                goto out;
        }
 
        /* Load the checksum driver */
-       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
+       if (jbd2_journal_has_csum_v2or3(journal)) {
                journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
                if (IS_ERR(journal->j_chksum_driver)) {
                        printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n");
@@ -1553,7 +1561,7 @@ static int journal_get_superblock(journal_t *journal)
        }
 
        /* Precompute checksum seed for all metadata */
-       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+       if (jbd2_journal_has_csum_v2or3(journal))
                journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid,
                                                   sizeof(sb->s_uuid));
 
@@ -1813,8 +1821,14 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
        if (!jbd2_journal_check_available_features(journal, compat, ro, incompat))
                return 0;
 
-       /* Asking for checksumming v2 and v1?  Only give them v2. */
-       if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2 &&
+       /* If enabling v2 checksums, turn on v3 instead */
+       if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2) {
+               incompat &= ~JBD2_FEATURE_INCOMPAT_CSUM_V2;
+               incompat |= JBD2_FEATURE_INCOMPAT_CSUM_V3;
+       }
+
+       /* Asking for checksumming v3 and v1?  Only give them v3. */
+       if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V3 &&
            compat & JBD2_FEATURE_COMPAT_CHECKSUM)
                compat &= ~JBD2_FEATURE_COMPAT_CHECKSUM;
 
@@ -1823,8 +1837,8 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
 
        sb = journal->j_superblock;
 
-       /* If enabling v2 checksums, update superblock */
-       if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
+       /* If enabling v3 checksums, update superblock */
+       if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
                sb->s_checksum_type = JBD2_CRC32C_CHKSUM;
                sb->s_feature_compat &=
                        ~cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM);
@@ -1842,8 +1856,7 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
                }
 
                /* Precompute checksum seed for all metadata */
-               if (JBD2_HAS_INCOMPAT_FEATURE(journal,
-                                             JBD2_FEATURE_INCOMPAT_CSUM_V2))
+               if (jbd2_journal_has_csum_v2or3(journal))
                        journal->j_csum_seed = jbd2_chksum(journal, ~0,
                                                           sb->s_uuid,
                                                           sizeof(sb->s_uuid));
@@ -1852,7 +1865,8 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
        /* If enabling v1 checksums, downgrade superblock */
        if (COMPAT_FEATURE_ON(JBD2_FEATURE_COMPAT_CHECKSUM))
                sb->s_feature_incompat &=
-                       ~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2);
+                       ~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2 |
+                                    JBD2_FEATURE_INCOMPAT_CSUM_V3);
 
        sb->s_feature_compat    |= cpu_to_be32(compat);
        sb->s_feature_ro_compat |= cpu_to_be32(ro);
@@ -2165,16 +2179,20 @@ int jbd2_journal_blocks_per_page(struct inode *inode)
  */
 size_t journal_tag_bytes(journal_t *journal)
 {
-       journal_block_tag_t tag;
-       size_t x = 0;
+       size_t sz;
+
+       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3))
+               return sizeof(journal_block_tag3_t);
+
+       sz = sizeof(journal_block_tag_t);
 
        if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
-               x += sizeof(tag.t_checksum);
+               sz += sizeof(__u16);
 
        if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
-               return x + JBD2_TAG_SIZE64;
+               return sz;
        else
-               return x + JBD2_TAG_SIZE32;
+               return sz - sizeof(__u32);
 }
 
 /*
index 3b6bb19..9b329b5 100644 (file)
@@ -181,7 +181,7 @@ static int jbd2_descr_block_csum_verify(journal_t *j,
        __be32 provided;
        __u32 calculated;
 
-       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+       if (!jbd2_journal_has_csum_v2or3(j))
                return 1;
 
        tail = (struct jbd2_journal_block_tail *)(buf + j->j_blocksize -
@@ -205,7 +205,7 @@ static int count_tags(journal_t *journal, struct buffer_head *bh)
        int                     nr = 0, size = journal->j_blocksize;
        int                     tag_bytes = journal_tag_bytes(journal);
 
-       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+       if (jbd2_journal_has_csum_v2or3(journal))
                size -= sizeof(struct jbd2_journal_block_tail);
 
        tagp = &bh->b_data[sizeof(journal_header_t)];
@@ -338,10 +338,11 @@ int jbd2_journal_skip_recovery(journal_t *journal)
        return err;
 }
 
-static inline unsigned long long read_tag_block(int tag_bytes, journal_block_tag_t *tag)
+static inline unsigned long long read_tag_block(journal_t *journal,
+                                               journal_block_tag_t *tag)
 {
        unsigned long long block = be32_to_cpu(tag->t_blocknr);
-       if (tag_bytes > JBD2_TAG_SIZE32)
+       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
                block |= (u64)be32_to_cpu(tag->t_blocknr_high) << 32;
        return block;
 }
@@ -384,7 +385,7 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
        __be32 provided;
        __u32 calculated;
 
-       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+       if (!jbd2_journal_has_csum_v2or3(j))
                return 1;
 
        h = buf;
@@ -399,17 +400,21 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
 static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag,
                                      void *buf, __u32 sequence)
 {
+       journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
        __u32 csum32;
        __be32 seq;
 
-       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+       if (!jbd2_journal_has_csum_v2or3(j))
                return 1;
 
        seq = cpu_to_be32(sequence);
        csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
        csum32 = jbd2_chksum(j, csum32, buf, j->j_blocksize);
 
-       return tag->t_checksum == cpu_to_be16(csum32);
+       if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V3))
+               return tag3->t_checksum == cpu_to_be32(csum32);
+       else
+               return tag->t_checksum == cpu_to_be16(csum32);
 }
 
 static int do_one_pass(journal_t *journal,
@@ -426,6 +431,7 @@ static int do_one_pass(journal_t *journal,
        int                     tag_bytes = journal_tag_bytes(journal);
        __u32                   crc32_sum = ~0; /* Transactional Checksums */
        int                     descr_csum_size = 0;
+       int                     block_error = 0;
 
        /*
         * First thing is to establish what we expect to find in the log
@@ -512,8 +518,7 @@ static int do_one_pass(journal_t *journal,
                switch(blocktype) {
                case JBD2_DESCRIPTOR_BLOCK:
                        /* Verify checksum first */
-                       if (JBD2_HAS_INCOMPAT_FEATURE(journal,
-                                       JBD2_FEATURE_INCOMPAT_CSUM_V2))
+                       if (jbd2_journal_has_csum_v2or3(journal))
                                descr_csum_size =
                                        sizeof(struct jbd2_journal_block_tail);
                        if (descr_csum_size > 0 &&
@@ -574,7 +579,7 @@ static int do_one_pass(journal_t *journal,
                                        unsigned long long blocknr;
 
                                        J_ASSERT(obh != NULL);
-                                       blocknr = read_tag_block(tag_bytes,
+                                       blocknr = read_tag_block(journal,
                                                                 tag);
 
                                        /* If the block has been
@@ -598,7 +603,8 @@ static int do_one_pass(journal_t *journal,
                                                       "checksum recovering "
                                                       "block %llu in log\n",
                                                       blocknr);
-                                               continue;
+                                               block_error = 1;
+                                               goto skip_write;
                                        }
 
                                        /* Find a buffer for the new
@@ -797,7 +803,8 @@ static int do_one_pass(journal_t *journal,
                                success = -EIO;
                }
        }
-
+       if (block_error && success == 0)
+               success = -EIO;
        return success;
 
  failed:
@@ -811,7 +818,7 @@ static int jbd2_revoke_block_csum_verify(journal_t *j,
        __be32 provided;
        __u32 calculated;
 
-       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+       if (!jbd2_journal_has_csum_v2or3(j))
                return 1;
 
        tail = (struct jbd2_journal_revoke_tail *)(buf + j->j_blocksize -
index 198c9c1..d5e95a1 100644 (file)
@@ -91,8 +91,8 @@
 #include <linux/list.h>
 #include <linux/init.h>
 #include <linux/bio.h>
-#endif
 #include <linux/log2.h>
+#endif
 
 static struct kmem_cache *jbd2_revoke_record_cache;
 static struct kmem_cache *jbd2_revoke_table_cache;
@@ -597,7 +597,7 @@ static void write_one_revoke_record(journal_t *journal,
        offset = *offsetp;
 
        /* Do we need to leave space at the end for a checksum? */
-       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+       if (jbd2_journal_has_csum_v2or3(journal))
                csum_size = sizeof(struct jbd2_journal_revoke_tail);
 
        /* Make sure we have a descriptor with space left for the record */
@@ -644,7 +644,7 @@ static void jbd2_revoke_csum_set(journal_t *j, struct buffer_head *bh)
        struct jbd2_journal_revoke_tail *tail;
        __u32 csum;
 
-       if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+       if (!jbd2_journal_has_csum_v2or3(j))
                return;
 
        tail = (struct jbd2_journal_revoke_tail *)(bh->b_data + j->j_blocksize -
index ba49192..be7cbce 100644 (file)
@@ -116,7 +116,7 @@ __nfs_iocounter_wait(struct nfs_io_counter *c)
                if (atomic_read(&c->io_count) == 0)
                        break;
                ret = nfs_wait_bit_killable(&q.key);
-       } while (atomic_read(&c->io_count) != 0);
+       } while (atomic_read(&c->io_count) != 0 && !ret);
        finish_wait(wq, &q.wait);
        return ret;
 }
@@ -139,26 +139,49 @@ nfs_iocounter_wait(struct nfs_io_counter *c)
 /*
  * nfs_page_group_lock - lock the head of the page group
  * @req - request in group that is to be locked
+ * @nonblock - if true don't block waiting for lock
  *
  * this lock must be held if modifying the page group list
  *
- * returns result from wait_on_bit_lock: 0 on success, < 0 on error
+ * return 0 on success, < 0 on error: -EDELAY if nonblocking or the
+ * result from wait_on_bit_lock
+ *
+ * NOTE: calling with nonblock=false should always have set the
+ *       lock bit (see fs/buffer.c and other uses of wait_on_bit_lock
+ *       with TASK_UNINTERRUPTIBLE), so there is no need to check the result.
  */
 int
-nfs_page_group_lock(struct nfs_page *req, bool wait)
+nfs_page_group_lock(struct nfs_page *req, bool nonblock)
 {
        struct nfs_page *head = req->wb_head;
-       int ret;
 
        WARN_ON_ONCE(head != head->wb_head);
 
-       do {
-               ret = wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
-                       TASK_UNINTERRUPTIBLE);
-       } while (wait && ret != 0);
+       if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags))
+               return 0;
 
-       WARN_ON_ONCE(ret > 0);
-       return ret;
+       if (!nonblock)
+               return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
+                               TASK_UNINTERRUPTIBLE);
+
+       return -EAGAIN;
+}
+
+/*
+ * nfs_page_group_lock_wait - wait for the lock to clear, but don't grab it
+ * @req - a request in the group
+ *
+ * This is a blocking call to wait for the group lock to be cleared.
+ */
+void
+nfs_page_group_lock_wait(struct nfs_page *req)
+{
+       struct nfs_page *head = req->wb_head;
+
+       WARN_ON_ONCE(head != head->wb_head);
+
+       wait_on_bit(&head->wb_flags, PG_HEADLOCK,
+               TASK_UNINTERRUPTIBLE);
 }
 
 /*
@@ -219,7 +242,7 @@ bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
 {
        bool ret;
 
-       nfs_page_group_lock(req, true);
+       nfs_page_group_lock(req, false);
        ret = nfs_page_group_sync_on_bit_locked(req, bit);
        nfs_page_group_unlock(req);
 
@@ -701,10 +724,11 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
                     struct nfs_pgio_header *hdr)
 {
        struct nfs_page         *req;
-       struct page             **pages;
+       struct page             **pages,
+                               *last_page;
        struct list_head *head = &desc->pg_list;
        struct nfs_commit_info cinfo;
-       unsigned int pagecount;
+       unsigned int pagecount, pageused;
 
        pagecount = nfs_page_array_len(desc->pg_base, desc->pg_count);
        if (!nfs_pgarray_set(&hdr->page_array, pagecount))
@@ -712,12 +736,23 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
 
        nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
        pages = hdr->page_array.pagevec;
+       last_page = NULL;
+       pageused = 0;
        while (!list_empty(head)) {
                req = nfs_list_entry(head->next);
                nfs_list_remove_request(req);
                nfs_list_add_request(req, &hdr->pages);
-               *pages++ = req->wb_page;
+
+               if (WARN_ON_ONCE(pageused >= pagecount))
+                       return nfs_pgio_error(desc, hdr);
+
+               if (!last_page || last_page != req->wb_page) {
+                       *pages++ = last_page = req->wb_page;
+                       pageused++;
+               }
        }
+       if (WARN_ON_ONCE(pageused != pagecount))
+               return nfs_pgio_error(desc, hdr);
 
        if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
            (desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
@@ -788,6 +823,14 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
                        return false;
                if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
                        return false;
+               if (req->wb_page == prev->wb_page) {
+                       if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes)
+                               return false;
+               } else {
+                       if (req->wb_pgbase != 0 ||
+                           prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
+                               return false;
+               }
        }
        size = pgio->pg_ops->pg_test(pgio, prev, req);
        WARN_ON_ONCE(size > req->wb_bytes);
@@ -858,13 +901,8 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
        struct nfs_page *subreq;
        unsigned int bytes_left = 0;
        unsigned int offset, pgbase;
-       int ret;
 
-       ret = nfs_page_group_lock(req, false);
-       if (ret < 0) {
-               desc->pg_error = ret;
-               return 0;
-       }
+       nfs_page_group_lock(req, false);
 
        subreq = req;
        bytes_left = subreq->wb_bytes;
@@ -886,11 +924,7 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
                        if (desc->pg_recoalesce)
                                return 0;
                        /* retry add_request for this subreq */
-                       ret = nfs_page_group_lock(req, false);
-                       if (ret < 0) {
-                               desc->pg_error = ret;
-                               return 0;
-                       }
+                       nfs_page_group_lock(req, false);
                        continue;
                }
 
index e3b5cf2..175d5d0 100644 (file)
@@ -241,7 +241,7 @@ static bool nfs_page_group_covers_page(struct nfs_page *req)
        unsigned int pos = 0;
        unsigned int len = nfs_page_length(req->wb_page);
 
-       nfs_page_group_lock(req, true);
+       nfs_page_group_lock(req, false);
 
        do {
                tmp = nfs_page_group_search_locked(req->wb_head, pos);
@@ -478,10 +478,23 @@ try_again:
                return NULL;
        }
 
-       /* lock each request in the page group */
-       ret = nfs_page_group_lock(head, false);
-       if (ret < 0)
+       /* holding inode lock, so always make a non-blocking call to try the
+        * page group lock */
+       ret = nfs_page_group_lock(head, true);
+       if (ret < 0) {
+               spin_unlock(&inode->i_lock);
+
+               if (!nonblock && ret == -EAGAIN) {
+                       nfs_page_group_lock_wait(head);
+                       nfs_release_request(head);
+                       goto try_again;
+               }
+
+               nfs_release_request(head);
                return ERR_PTR(ret);
+       }
+
+       /* lock each request in the page group */
        subreq = head;
        do {
                /*
index 9737cba..83a0600 100644 (file)
@@ -1014,7 +1014,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
 
        fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
        if (!fi)
-               goto out_no_entry;
+               goto out_fail;
        cfi.icb.extLength = cpu_to_le32(sb->s_blocksize);
        cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
        if (UDF_SB(inode->i_sb)->s_lvid_bh) {
@@ -1036,6 +1036,7 @@ out:
 
 out_no_entry:
        up_write(&iinfo->i_data_sem);
+out_fail:
        inode_dec_link_count(inode);
        iput(inode);
        goto out;
index 6dfd64b..e973540 100644 (file)
@@ -17,6 +17,7 @@
        {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
index eb726b9..a1e31f2 100644 (file)
@@ -127,10 +127,9 @@ enum {
        BLK_MQ_RQ_QUEUE_ERROR   = 2,    /* end IO with error */
 
        BLK_MQ_F_SHOULD_MERGE   = 1 << 0,
-       BLK_MQ_F_SHOULD_SORT    = 1 << 1,
-       BLK_MQ_F_TAG_SHARED     = 1 << 2,
-       BLK_MQ_F_SG_MERGE       = 1 << 3,
-       BLK_MQ_F_SYSFS_UP       = 1 << 4,
+       BLK_MQ_F_TAG_SHARED     = 1 << 1,
+       BLK_MQ_F_SG_MERGE       = 1 << 2,
+       BLK_MQ_F_SYSFS_UP       = 1 << 3,
 
        BLK_MQ_S_STOPPED        = 0,
        BLK_MQ_S_TAG_ACTIVE     = 1,
index 6f76277..61219b9 100644 (file)
@@ -16,7 +16,6 @@
 #define PHY_ID_BCM7366                 0x600d8490
 #define PHY_ID_BCM7439                 0x600d8480
 #define PHY_ID_BCM7445                 0x600d8510
-#define PHY_ID_BCM7XXX_28              0x600d8400
 
 #define PHY_BCM_OUI_MASK               0xfffffc00
 #define PHY_BCM_OUI_1                  0x00206000
index 6bb5e3f..f0b0edb 100644 (file)
@@ -102,6 +102,15 @@ enum {
        FTRACE_OPS_FL_DELETED                   = 1 << 8,
 };
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+/* The hash used to know what functions callbacks trace */
+struct ftrace_ops_hash {
+       struct ftrace_hash              *notrace_hash;
+       struct ftrace_hash              *filter_hash;
+       struct mutex                    regex_lock;
+};
+#endif
+
 /*
  * Note, ftrace_ops can be referenced outside of RCU protection.
  * (Although, for perf, the control ops prevent that). If ftrace_ops is
@@ -121,10 +130,9 @@ struct ftrace_ops {
        int __percpu                    *disabled;
 #ifdef CONFIG_DYNAMIC_FTRACE
        int                             nr_trampolines;
-       struct ftrace_hash              *notrace_hash;
-       struct ftrace_hash              *filter_hash;
+       struct ftrace_ops_hash          local_hash;
+       struct ftrace_ops_hash          *func_hash;
        struct ftrace_hash              *tramp_hash;
-       struct mutex                    regex_lock;
        unsigned long                   trampoline;
 #endif
 };
index b7ce0c6..c7e17de 100644 (file)
@@ -16,8 +16,6 @@ struct device;
  */
 struct gpio_desc;
 
-#ifdef CONFIG_GPIOLIB
-
 #define GPIOD_FLAGS_BIT_DIR_SET                BIT(0)
 #define GPIOD_FLAGS_BIT_DIR_OUT                BIT(1)
 #define GPIOD_FLAGS_BIT_DIR_VAL                BIT(2)
@@ -34,6 +32,8 @@ enum gpiod_flags {
                          GPIOD_FLAGS_BIT_DIR_VAL,
 };
 
+#ifdef CONFIG_GPIOLIB
+
 /* Acquire and dispose GPIOs */
 struct gpio_desc *__must_check __gpiod_get(struct device *dev,
                                         const char *con_id,
index ea50766..a95efeb 100644 (file)
@@ -577,16 +577,20 @@ static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node
 }
 #endif /* CONFIG_OF */
 
-#ifdef CONFIG_I2C_ACPI
-int acpi_i2c_install_space_handler(struct i2c_adapter *adapter);
-void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter);
+#ifdef CONFIG_ACPI
 void acpi_i2c_register_devices(struct i2c_adapter *adap);
 #else
 static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) { }
+#endif /* CONFIG_ACPI */
+
+#ifdef CONFIG_ACPI_I2C_OPREGION
+int acpi_i2c_install_space_handler(struct i2c_adapter *adapter);
+void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter);
+#else
 static inline void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
 { }
 static inline int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
 { return 0; }
-#endif
+#endif /* CONFIG_ACPI_I2C_OPREGION */
 
 #endif /* _LINUX_I2C_H */
index d5b50a1..0dae71e 100644 (file)
@@ -159,7 +159,11 @@ typedef struct journal_header_s
  * journal_block_tag (in the descriptor).  The other h_chksum* fields are
  * not used.
  *
- * Checksum v1 and v2 are mutually exclusive features.
+ * If FEATURE_INCOMPAT_CSUM_V3 is set, the descriptor block uses
+ * journal_block_tag3_t to store a full 32-bit checksum.  Everything else
+ * is the same as v2.
+ *
+ * Checksum v1, v2, and v3 are mutually exclusive features.
  */
 struct commit_header {
        __be32          h_magic;
@@ -179,6 +183,14 @@ struct commit_header {
  * raw struct shouldn't be used for pointer math or sizeof() - use
  * journal_tag_bytes(journal) instead to compute this.
  */
+typedef struct journal_block_tag3_s
+{
+       __be32          t_blocknr;      /* The on-disk block number */
+       __be32          t_flags;        /* See below */
+       __be32          t_blocknr_high; /* most-significant high 32bits. */
+       __be32          t_checksum;     /* crc32c(uuid+seq+block) */
+} journal_block_tag3_t;
+
 typedef struct journal_block_tag_s
 {
        __be32          t_blocknr;      /* The on-disk block number */
@@ -187,9 +199,6 @@ typedef struct journal_block_tag_s
        __be32          t_blocknr_high; /* most-significant high 32bits. */
 } journal_block_tag_t;
 
-#define JBD2_TAG_SIZE32 (offsetof(journal_block_tag_t, t_blocknr_high))
-#define JBD2_TAG_SIZE64 (sizeof(journal_block_tag_t))
-
 /* Tail of descriptor block, for checksumming */
 struct jbd2_journal_block_tail {
        __be32          t_checksum;     /* crc32c(uuid+descr_block) */
@@ -284,6 +293,7 @@ typedef struct journal_superblock_s
 #define JBD2_FEATURE_INCOMPAT_64BIT            0x00000002
 #define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT     0x00000004
 #define JBD2_FEATURE_INCOMPAT_CSUM_V2          0x00000008
+#define JBD2_FEATURE_INCOMPAT_CSUM_V3          0x00000010
 
 /* Features known to this kernel version: */
 #define JBD2_KNOWN_COMPAT_FEATURES     JBD2_FEATURE_COMPAT_CHECKSUM
@@ -291,7 +301,8 @@ typedef struct journal_superblock_s
 #define JBD2_KNOWN_INCOMPAT_FEATURES   (JBD2_FEATURE_INCOMPAT_REVOKE | \
                                        JBD2_FEATURE_INCOMPAT_64BIT | \
                                        JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \
-                                       JBD2_FEATURE_INCOMPAT_CSUM_V2)
+                                       JBD2_FEATURE_INCOMPAT_CSUM_V2 | \
+                                       JBD2_FEATURE_INCOMPAT_CSUM_V3)
 
 #ifdef __KERNEL__
 
@@ -1296,6 +1307,15 @@ static inline int tid_geq(tid_t x, tid_t y)
 extern int jbd2_journal_blocks_per_page(struct inode *inode);
 extern size_t journal_tag_bytes(journal_t *journal);
 
+static inline int jbd2_journal_has_csum_v2or3(journal_t *journal)
+{
+       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) ||
+           JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3))
+               return 1;
+
+       return 0;
+}
+
 /*
  * We reserve t_outstanding_credits >> JBD2_CONTROL_BLOCKS_SHIFT for
  * transaction control blocks.
index 6ad2bbc..6c3e06e 100644 (file)
@@ -123,6 +123,7 @@ extern  int nfs_wait_on_request(struct nfs_page *);
 extern void nfs_unlock_request(struct nfs_page *req);
 extern void nfs_unlock_and_release_request(struct nfs_page *);
 extern int nfs_page_group_lock(struct nfs_page *, bool);
+extern void nfs_page_group_lock_wait(struct nfs_page *);
 extern void nfs_page_group_unlock(struct nfs_page *);
 extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
 
index 3d6003d..a1ba6a5 100644 (file)
@@ -62,6 +62,7 @@ to_seqno_fence(struct fence *fence)
  * @context: the execution context this fence is a part of
  * @seqno_ofs: the offset within @sync_buf
  * @seqno: the sequence # to signal on
+ * @cond: fence wait condition
  * @ops: the fence_ops for operations on this seqno fence
  *
  * This function initializes a struct seqno_fence with passed parameters,
index f1afd60..11d11bc 100644 (file)
@@ -703,9 +703,11 @@ __SYSCALL(__NR_renameat2, sys_renameat2)
 __SYSCALL(__NR_seccomp, sys_seccomp)
 #define __NR_getrandom 278
 __SYSCALL(__NR_getrandom, sys_getrandom)
+#define __NR_memfd_create 279
+__SYSCALL(__NR_memfd_create, sys_memfd_create)
 
 #undef __NR_syscalls
-#define __NR_syscalls 279
+#define __NR_syscalls 280
 
 /*
  * All syscalls below here should go away really,
index 509b2d7..fea6099 100644 (file)
@@ -944,6 +944,7 @@ struct drm_radeon_cs_chunk {
 };
 
 /* drm_radeon_cs_reloc.flags */
+#define RADEON_RELOC_PRIO_MASK         (0xf << 0)
 
 struct drm_radeon_cs_reloc {
        uint32_t                handle;
index 1cf24b3..f9c1ed0 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/cgroup.h>
 #include <linux/module.h>
 #include <linux/mman.h>
+#include <linux/compat.h>
 
 #include "internal.h"
 
@@ -3717,6 +3718,26 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        return 0;
 }
 
+#ifdef CONFIG_COMPAT
+static long perf_compat_ioctl(struct file *file, unsigned int cmd,
+                               unsigned long arg)
+{
+       switch (_IOC_NR(cmd)) {
+       case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
+       case _IOC_NR(PERF_EVENT_IOC_ID):
+               /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
+               if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
+                       cmd &= ~IOCSIZE_MASK;
+                       cmd |= sizeof(void *) << IOCSIZE_SHIFT;
+               }
+               break;
+       }
+       return perf_ioctl(file, cmd, arg);
+}
+#else
+# define perf_compat_ioctl NULL
+#endif
+
 int perf_event_task_enable(void)
 {
        struct perf_event *event;
@@ -4222,7 +4243,7 @@ static const struct file_operations perf_fops = {
        .read                   = perf_read,
        .poll                   = perf_poll,
        .unlocked_ioctl         = perf_ioctl,
-       .compat_ioctl           = perf_ioctl,
+       .compat_ioctl           = perf_compat_ioctl,
        .mmap                   = perf_mmap,
        .fasync                 = perf_fasync,
 };
index 734e9a7..3995f54 100644 (file)
@@ -1778,7 +1778,18 @@ static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
        unsigned long hash, flags = 0;
        struct kretprobe_instance *ri;
 
-       /*TODO: consider to only swap the RA after the last pre_handler fired */
+       /*
+        * To avoid deadlocks, prohibit return probing in NMI contexts,
+        * just skip the probe and increase the (inexact) 'nmissed'
+        * statistical counter, so that the user is informed that
+        * something happened:
+        */
+       if (unlikely(in_nmi())) {
+               rp->nmissed++;
+               return 0;
+       }
+
+       /* TODO: consider to only swap the RA after the last pre_handler fired */
        hash = hash_ptr(current, KPROBE_HASH_BITS);
        raw_spin_lock_irqsave(&rp->lock, flags);
        if (!hlist_empty(&rp->free_instances)) {
index 1654b12..5916a8e 100644 (file)
 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
 
 #ifdef CONFIG_DYNAMIC_FTRACE
-#define INIT_REGEX_LOCK(opsname)       \
-       .regex_lock     = __MUTEX_INITIALIZER(opsname.regex_lock),
+#define INIT_OPS_HASH(opsname) \
+       .func_hash              = &opsname.local_hash,                  \
+       .local_hash.regex_lock  = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
+#define ASSIGN_OPS_HASH(opsname, val) \
+       .func_hash              = val, \
+       .local_hash.regex_lock  = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
 #else
-#define INIT_REGEX_LOCK(opsname)
+#define INIT_OPS_HASH(opsname)
+#define ASSIGN_OPS_HASH(opsname, val)
 #endif
 
 static struct ftrace_ops ftrace_list_end __read_mostly = {
        .func           = ftrace_stub,
        .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
+       INIT_OPS_HASH(ftrace_list_end)
 };
 
 /* ftrace_enabled is a method to turn ftrace on or off */
@@ -140,7 +146,8 @@ static inline void ftrace_ops_init(struct ftrace_ops *ops)
 {
 #ifdef CONFIG_DYNAMIC_FTRACE
        if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
-               mutex_init(&ops->regex_lock);
+               mutex_init(&ops->local_hash.regex_lock);
+               ops->func_hash = &ops->local_hash;
                ops->flags |= FTRACE_OPS_FL_INITIALIZED;
        }
 #endif
@@ -899,7 +906,7 @@ static void unregister_ftrace_profiler(void)
 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
        .func           = function_profile_call,
        .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
-       INIT_REGEX_LOCK(ftrace_profile_ops)
+       INIT_OPS_HASH(ftrace_profile_ops)
 };
 
 static int register_ftrace_profiler(void)
@@ -1081,11 +1088,12 @@ static const struct ftrace_hash empty_hash = {
 #define EMPTY_HASH     ((struct ftrace_hash *)&empty_hash)
 
 static struct ftrace_ops global_ops = {
-       .func                   = ftrace_stub,
-       .notrace_hash           = EMPTY_HASH,
-       .filter_hash            = EMPTY_HASH,
-       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
-       INIT_REGEX_LOCK(global_ops)
+       .func                           = ftrace_stub,
+       .local_hash.notrace_hash        = EMPTY_HASH,
+       .local_hash.filter_hash         = EMPTY_HASH,
+       INIT_OPS_HASH(global_ops)
+       .flags                          = FTRACE_OPS_FL_RECURSION_SAFE |
+                                         FTRACE_OPS_FL_INITIALIZED,
 };
 
 struct ftrace_page {
@@ -1226,8 +1234,8 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
 void ftrace_free_filter(struct ftrace_ops *ops)
 {
        ftrace_ops_init(ops);
-       free_ftrace_hash(ops->filter_hash);
-       free_ftrace_hash(ops->notrace_hash);
+       free_ftrace_hash(ops->func_hash->filter_hash);
+       free_ftrace_hash(ops->func_hash->notrace_hash);
 }
 
 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
@@ -1288,9 +1296,9 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
 }
 
 static void
-ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
+ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
 static void
-ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
+ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
 
 static int
 ftrace_hash_move(struct ftrace_ops *ops, int enable,
@@ -1342,13 +1350,13 @@ update:
         * Remove the current set, update the hash and add
         * them back.
         */
-       ftrace_hash_rec_disable(ops, enable);
+       ftrace_hash_rec_disable_modify(ops, enable);
 
        old_hash = *dst;
        rcu_assign_pointer(*dst, new_hash);
        free_ftrace_hash_rcu(old_hash);
 
-       ftrace_hash_rec_enable(ops, enable);
+       ftrace_hash_rec_enable_modify(ops, enable);
 
        return 0;
 }
@@ -1382,8 +1390,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
                return 0;
 #endif
 
-       filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
-       notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
+       filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
+       notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
 
        if ((ftrace_hash_empty(filter_hash) ||
             ftrace_lookup_ip(filter_hash, ip)) &&
@@ -1503,25 +1511,38 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
 static void ftrace_remove_tramp(struct ftrace_ops *ops,
                                struct dyn_ftrace *rec)
 {
-       struct ftrace_func_entry *entry;
-
-       entry = ftrace_lookup_ip(ops->tramp_hash, rec->ip);
-       if (!entry)
+       /* If TRAMP is not set, no ops should have a trampoline for this */
+       if (!(rec->flags & FTRACE_FL_TRAMP))
                return;
 
+       rec->flags &= ~FTRACE_FL_TRAMP;
+
+       if ((!ftrace_hash_empty(ops->func_hash->filter_hash) &&
+            !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) ||
+           ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
+               return;
        /*
         * The tramp_hash entry will be removed at time
         * of update.
         */
        ops->nr_trampolines--;
-       rec->flags &= ~FTRACE_FL_TRAMP;
 }
 
-static void ftrace_clear_tramps(struct dyn_ftrace *rec)
+static void ftrace_clear_tramps(struct dyn_ftrace *rec, struct ftrace_ops *ops)
 {
        struct ftrace_ops *op;
 
+       /* If TRAMP is not set, no ops should have a trampoline for this */
+       if (!(rec->flags & FTRACE_FL_TRAMP))
+               return;
+
        do_for_each_ftrace_op(op, ftrace_ops_list) {
+               /*
+                * This function is called to clear other tramps
+                * not the one that is being updated.
+                */
+               if (op == ops)
+                       continue;
                if (op->nr_trampolines)
                        ftrace_remove_tramp(op, rec);
        } while_for_each_ftrace_op(op);
@@ -1554,14 +1575,14 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
         *   gets inversed.
         */
        if (filter_hash) {
-               hash = ops->filter_hash;
-               other_hash = ops->notrace_hash;
+               hash = ops->func_hash->filter_hash;
+               other_hash = ops->func_hash->notrace_hash;
                if (ftrace_hash_empty(hash))
                        all = 1;
        } else {
                inc = !inc;
-               hash = ops->notrace_hash;
-               other_hash = ops->filter_hash;
+               hash = ops->func_hash->notrace_hash;
+               other_hash = ops->func_hash->filter_hash;
                /*
                 * If the notrace hash has no items,
                 * then there's nothing to do.
@@ -1622,13 +1643,10 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
                                /*
                                 * If we are adding another function callback
                                 * to this function, and the previous had a
-                                * trampoline used, then we need to go back to
-                                * the default trampoline.
+                                * custom trampoline in use, then we need to go
+                                * back to the default trampoline.
                                 */
-                               rec->flags &= ~FTRACE_FL_TRAMP;
-
-                               /* remove trampolines from any ops for this rec */
-                               ftrace_clear_tramps(rec);
+                               ftrace_clear_tramps(rec, ops);
                        }
 
                        /*
@@ -1682,6 +1700,41 @@ static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
        __ftrace_hash_rec_update(ops, filter_hash, 1);
 }
 
+static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
+                                         int filter_hash, int inc)
+{
+       struct ftrace_ops *op;
+
+       __ftrace_hash_rec_update(ops, filter_hash, inc);
+
+       if (ops->func_hash != &global_ops.local_hash)
+               return;
+
+       /*
+        * If the ops shares the global_ops hash, then we need to update
+        * all ops that are enabled and use this hash.
+        */
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               /* Already done */
+               if (op == ops)
+                       continue;
+               if (op->func_hash == &global_ops.local_hash)
+                       __ftrace_hash_rec_update(op, filter_hash, inc);
+       } while_for_each_ftrace_op(op);
+}
+
+static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
+                                          int filter_hash)
+{
+       ftrace_hash_rec_update_modify(ops, filter_hash, 0);
+}
+
+static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
+                                         int filter_hash)
+{
+       ftrace_hash_rec_update_modify(ops, filter_hash, 1);
+}
+
 static void print_ip_ins(const char *fmt, unsigned char *p)
 {
        int i;
@@ -1896,8 +1949,8 @@ unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
        if (rec->flags & FTRACE_FL_TRAMP) {
                ops = ftrace_find_tramp_ops_new(rec);
                if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
-                       pr_warning("Bad trampoline accounting at: %p (%pS)\n",
-                                   (void *)rec->ip, (void *)rec->ip);
+                       pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
+                               (void *)rec->ip, (void *)rec->ip, rec->flags);
                        /* Ftrace is shutting down, return anything */
                        return (unsigned long)FTRACE_ADDR;
                }
@@ -1964,7 +2017,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
                return ftrace_make_call(rec, ftrace_addr);
 
        case FTRACE_UPDATE_MAKE_NOP:
-               return ftrace_make_nop(NULL, rec, ftrace_addr);
+               return ftrace_make_nop(NULL, rec, ftrace_old_addr);
 
        case FTRACE_UPDATE_MODIFY_CALL:
                return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
@@ -2227,7 +2280,10 @@ static int ftrace_save_ops_tramp_hash(struct ftrace_ops *ops)
        } while_for_each_ftrace_rec();
 
        /* The number of recs in the hash must match nr_trampolines */
-       FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines);
+       if (FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines))
+               pr_warn("count=%ld trampolines=%d\n",
+                       ops->tramp_hash->count,
+                       ops->nr_trampolines);
 
        return 0;
 }
@@ -2436,8 +2492,8 @@ static inline int ops_traces_mod(struct ftrace_ops *ops)
         * Filter_hash being empty will default to trace module.
         * But notrace hash requires a test of individual module functions.
         */
-       return ftrace_hash_empty(ops->filter_hash) &&
-               ftrace_hash_empty(ops->notrace_hash);
+       return ftrace_hash_empty(ops->func_hash->filter_hash) &&
+               ftrace_hash_empty(ops->func_hash->notrace_hash);
 }
 
 /*
@@ -2459,12 +2515,12 @@ ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
                return 0;
 
        /* The function must be in the filter */
-       if (!ftrace_hash_empty(ops->filter_hash) &&
-           !ftrace_lookup_ip(ops->filter_hash, rec->ip))
+       if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
+           !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
                return 0;
 
        /* If in notrace hash, we ignore it too */
-       if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
+       if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
                return 0;
 
        return 1;
@@ -2785,10 +2841,10 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
        } else {
                rec = &iter->pg->records[iter->idx++];
                if (((iter->flags & FTRACE_ITER_FILTER) &&
-                    !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
+                    !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) ||
 
                    ((iter->flags & FTRACE_ITER_NOTRACE) &&
-                    !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
+                    !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) ||
 
                    ((iter->flags & FTRACE_ITER_ENABLED) &&
                     !(rec->flags & FTRACE_FL_ENABLED))) {
@@ -2837,9 +2893,9 @@ static void *t_start(struct seq_file *m, loff_t *pos)
         * functions are enabled.
         */
        if ((iter->flags & FTRACE_ITER_FILTER &&
-            ftrace_hash_empty(ops->filter_hash)) ||
+            ftrace_hash_empty(ops->func_hash->filter_hash)) ||
            (iter->flags & FTRACE_ITER_NOTRACE &&
-            ftrace_hash_empty(ops->notrace_hash))) {
+            ftrace_hash_empty(ops->func_hash->notrace_hash))) {
                if (*pos > 0)
                        return t_hash_start(m, pos);
                iter->flags |= FTRACE_ITER_PRINTALL;
@@ -3001,12 +3057,12 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
        iter->ops = ops;
        iter->flags = flag;
 
-       mutex_lock(&ops->regex_lock);
+       mutex_lock(&ops->func_hash->regex_lock);
 
        if (flag & FTRACE_ITER_NOTRACE)
-               hash = ops->notrace_hash;
+               hash = ops->func_hash->notrace_hash;
        else
-               hash = ops->filter_hash;
+               hash = ops->func_hash->filter_hash;
 
        if (file->f_mode & FMODE_WRITE) {
                const int size_bits = FTRACE_HASH_DEFAULT_BITS;
@@ -3041,7 +3097,7 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
                file->private_data = iter;
 
  out_unlock:
-       mutex_unlock(&ops->regex_lock);
+       mutex_unlock(&ops->func_hash->regex_lock);
 
        return ret;
 }
@@ -3279,7 +3335,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly =
 {
        .func           = function_trace_probe_call,
        .flags          = FTRACE_OPS_FL_INITIALIZED,
-       INIT_REGEX_LOCK(trace_probe_ops)
+       INIT_OPS_HASH(trace_probe_ops)
 };
 
 static int ftrace_probe_registered;
@@ -3342,7 +3398,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                              void *data)
 {
        struct ftrace_func_probe *entry;
-       struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
+       struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
        struct ftrace_hash *hash;
        struct ftrace_page *pg;
        struct dyn_ftrace *rec;
@@ -3359,7 +3415,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
        if (WARN_ON(not))
                return -EINVAL;
 
-       mutex_lock(&trace_probe_ops.regex_lock);
+       mutex_lock(&trace_probe_ops.func_hash->regex_lock);
 
        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
        if (!hash) {
@@ -3428,7 +3484,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
  out_unlock:
        mutex_unlock(&ftrace_lock);
  out:
-       mutex_unlock(&trace_probe_ops.regex_lock);
+       mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
        free_ftrace_hash(hash);
 
        return count;
@@ -3446,7 +3502,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
        struct ftrace_func_entry *rec_entry;
        struct ftrace_func_probe *entry;
        struct ftrace_func_probe *p;
-       struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
+       struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
        struct list_head free_list;
        struct ftrace_hash *hash;
        struct hlist_node *tmp;
@@ -3468,7 +3524,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                        return;
        }
 
-       mutex_lock(&trace_probe_ops.regex_lock);
+       mutex_lock(&trace_probe_ops.func_hash->regex_lock);
 
        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
        if (!hash)
@@ -3521,7 +3577,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
        mutex_unlock(&ftrace_lock);
                
  out_unlock:
-       mutex_unlock(&trace_probe_ops.regex_lock);
+       mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
        free_ftrace_hash(hash);
 }
 
@@ -3717,12 +3773,12 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
        if (unlikely(ftrace_disabled))
                return -ENODEV;
 
-       mutex_lock(&ops->regex_lock);
+       mutex_lock(&ops->func_hash->regex_lock);
 
        if (enable)
-               orig_hash = &ops->filter_hash;
+               orig_hash = &ops->func_hash->filter_hash;
        else
-               orig_hash = &ops->notrace_hash;
+               orig_hash = &ops->func_hash->notrace_hash;
 
        if (reset)
                hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
@@ -3752,7 +3808,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
        mutex_unlock(&ftrace_lock);
 
  out_regex_unlock:
-       mutex_unlock(&ops->regex_lock);
+       mutex_unlock(&ops->func_hash->regex_lock);
 
        free_ftrace_hash(hash);
        return ret;
@@ -3975,15 +4031,15 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
 
        trace_parser_put(parser);
 
-       mutex_lock(&iter->ops->regex_lock);
+       mutex_lock(&iter->ops->func_hash->regex_lock);
 
        if (file->f_mode & FMODE_WRITE) {
                filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
 
                if (filter_hash)
-                       orig_hash = &iter->ops->filter_hash;
+                       orig_hash = &iter->ops->func_hash->filter_hash;
                else
-                       orig_hash = &iter->ops->notrace_hash;
+                       orig_hash = &iter->ops->func_hash->notrace_hash;
 
                mutex_lock(&ftrace_lock);
                ret = ftrace_hash_move(iter->ops, filter_hash,
@@ -3994,7 +4050,7 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
                mutex_unlock(&ftrace_lock);
        }
 
-       mutex_unlock(&iter->ops->regex_lock);
+       mutex_unlock(&iter->ops->func_hash->regex_lock);
        free_ftrace_hash(iter->hash);
        kfree(iter);
 
@@ -4611,7 +4667,6 @@ void __init ftrace_init(void)
 static struct ftrace_ops global_ops = {
        .func                   = ftrace_stub,
        .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
-       INIT_REGEX_LOCK(global_ops)
 };
 
 static int __init ftrace_nodyn_init(void)
@@ -4713,7 +4768,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
 static struct ftrace_ops control_ops = {
        .func   = ftrace_ops_control_func,
        .flags  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
-       INIT_REGEX_LOCK(control_ops)
+       INIT_OPS_HASH(control_ops)
 };
 
 static inline void
@@ -5145,6 +5200,17 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
+static struct ftrace_ops graph_ops = {
+       .func                   = ftrace_stub,
+       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE |
+                                  FTRACE_OPS_FL_INITIALIZED |
+                                  FTRACE_OPS_FL_STUB,
+#ifdef FTRACE_GRAPH_TRAMP_ADDR
+       .trampoline             = FTRACE_GRAPH_TRAMP_ADDR,
+#endif
+       ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
+};
+
 static int ftrace_graph_active;
 
 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
@@ -5307,12 +5373,28 @@ static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
  */
 static void update_function_graph_func(void)
 {
-       if (ftrace_ops_list == &ftrace_list_end ||
-           (ftrace_ops_list == &global_ops &&
-            global_ops.next == &ftrace_list_end))
-               ftrace_graph_entry = __ftrace_graph_entry;
-       else
+       struct ftrace_ops *op;
+       bool do_test = false;
+
+       /*
+        * The graph and global ops share the same set of functions
+        * to test. If any other ops is on the list, then
+        * the graph tracing needs to test if its the function
+        * it should call.
+        */
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               if (op != &global_ops && op != &graph_ops &&
+                   op != &ftrace_list_end) {
+                       do_test = true;
+                       /* in double loop, break out with goto */
+                       goto out;
+               }
+       } while_for_each_ftrace_op(op);
+ out:
+       if (do_test)
                ftrace_graph_entry = ftrace_graph_entry_test;
+       else
+               ftrace_graph_entry = __ftrace_graph_entry;
 }
 
 static struct notifier_block ftrace_suspend_notifier = {
@@ -5353,16 +5435,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
        ftrace_graph_entry = ftrace_graph_entry_test;
        update_function_graph_func();
 
-       /* Function graph doesn't use the .func field of global_ops */
-       global_ops.flags |= FTRACE_OPS_FL_STUB;
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-       /* Optimize function graph calling (if implemented by arch) */
-       if (FTRACE_GRAPH_TRAMP_ADDR != 0)
-               global_ops.trampoline = FTRACE_GRAPH_TRAMP_ADDR;
-#endif
-
-       ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
+       ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
 
 out:
        mutex_unlock(&ftrace_lock);
@@ -5380,12 +5453,7 @@ void unregister_ftrace_graph(void)
        ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
        ftrace_graph_entry = ftrace_graph_entry_stub;
        __ftrace_graph_entry = ftrace_graph_entry_stub;
-       ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
-       global_ops.flags &= ~FTRACE_OPS_FL_STUB;
-#ifdef CONFIG_DYNAMIC_FTRACE
-       if (FTRACE_GRAPH_TRAMP_ADDR != 0)
-               global_ops.trampoline = 0;
-#endif
+       ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
        unregister_pm_notifier(&ftrace_suspend_notifier);
        unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
 
index afb04b9..b38fb2b 100644 (file)
@@ -626,8 +626,22 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
                work = &cpu_buffer->irq_work;
        }
 
-       work->waiters_pending = true;
        poll_wait(filp, &work->waiters, poll_table);
+       work->waiters_pending = true;
+       /*
+        * There's a tight race between setting the waiters_pending and
+        * checking if the ring buffer is empty.  Once the waiters_pending bit
+        * is set, the next event will wake the task up, but we can get stuck
+        * if there's only a single event in.
+        *
+        * FIXME: Ideally, we need a memory barrier on the writer side as well,
+        * but adding a memory barrier to all events will cause too much of a
+        * performance hit in the fast path.  We only need a memory barrier when
+        * the buffer goes from empty to having content.  But as this race is
+        * extremely small, and it's not a problem if another event comes in, we
+        * will fix it later.
+        */
+       smp_mb();
 
        if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
            (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
index 07c2832..1b233fc 100644 (file)
@@ -892,6 +892,10 @@ config DEBUG_WW_MUTEX_SLOWPATH
         the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this
         will test all possible w/w mutex interface abuse with the
         exception of simply not acquiring all the required locks.
+        Note that this feature can introduce significant overhead, so
+        it really should not be enabled in a production or distro kernel,
+        even a debug kernel.  If you are a driver writer, enable it.  If
+        you are a distro, do not.
 
 config DEBUG_LOCK_ALLOC
        bool "Lock debugging: detect incorrect freeing of live locks"
index e4853b5..4b98f89 100644 (file)
@@ -410,9 +410,11 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
                priv->lane2_ops = NULL;
                if (priv->lane_version > 1)
                        priv->lane2_ops = &lane2_ops;
+               rtnl_lock();
                if (dev_set_mtu(dev, mesg->content.config.mtu))
                        pr_info("%s: change_mtu to %d failed\n",
                                dev->name, mesg->content.config.mtu);
+               rtnl_unlock();
                priv->is_proxy = mesg->content.config.is_proxy;
                break;
        case l_flush_tran_id:
index 52c43f9..fc1835c 100644 (file)
@@ -188,7 +188,7 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
 
        /* Reached the end of the list, so insert after 'frag_entry_last'. */
        if (likely(frag_entry_last)) {
-               hlist_add_behind(&frag_entry_last->list, &frag_entry_new->list);
+               hlist_add_behind(&frag_entry_new->list, &frag_entry_last->list);
                chain->size += skb->len - hdr_size;
                chain->timestamp = jiffies;
                ret = true;
index cb4459b..76b7f5e 100644 (file)
@@ -643,7 +643,7 @@ static int fib6_commit_metrics(struct dst_entry *dst,
        if (dst->flags & DST_HOST) {
                mp = dst_metrics_write_ptr(dst);
        } else {
-               mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
+               mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
                if (!mp)
                        return -ENOMEM;
                dst_init_metrics(dst, mp, 0);
index 6d537f0..0375009 100644 (file)
@@ -1444,7 +1444,7 @@ ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
 
                        list_del(&sdata->reserved_chanctx_list);
                        list_move(&sdata->assigned_chanctx_list,
-                                 &new_ctx->assigned_vifs);
+                                 &ctx->assigned_vifs);
                        sdata->reserved_chanctx = NULL;
 
                        ieee80211_vif_chanctx_reservation_complete(sdata);
index fe5cda0..5231652 100644 (file)
@@ -42,6 +42,9 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
 
 static int make_writable(struct sk_buff *skb, int write_len)
 {
+       if (!pskb_may_pull(skb, write_len))
+               return -ENOMEM;
+
        if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
                return 0;
 
@@ -70,6 +73,8 @@ static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
 
        vlan_set_encap_proto(skb, vhdr);
        skb->mac_header += VLAN_HLEN;
+       if (skb_network_offset(skb) < ETH_HLEN)
+               skb_set_network_header(skb, ETH_HLEN);
        skb_reset_mac_len(skb);
 
        return 0;
index 8d9f804..93896d2 100644 (file)
@@ -632,6 +632,7 @@ static void init_prb_bdqc(struct packet_sock *po,
        p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
        p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
 
+       p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
        prb_init_ft_ops(p1, req_u);
        prb_setup_retire_blk_timer(po, tx_ring);
        prb_open_block(p1, pbd);
@@ -1942,6 +1943,18 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                        if ((int)snaplen < 0)
                                snaplen = 0;
                }
+       } else if (unlikely(macoff + snaplen >
+                           GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
+               u32 nval;
+
+               nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
+               pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
+                           snaplen, nval, macoff);
+               snaplen = nval;
+               if (unlikely((int)snaplen < 0)) {
+                       snaplen = 0;
+                       macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
+               }
        }
        spin_lock(&sk->sk_receive_queue.lock);
        h.raw = packet_current_rx_frame(po, skb,
@@ -3783,6 +3796,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
                        goto out;
                if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
                        goto out;
+               if (po->tp_version >= TPACKET_V3 &&
+                   (int)(req->tp_block_size -
+                         BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
+                       goto out;
                if (unlikely(req->tp_frame_size < po->tp_hdrlen +
                                        po->tp_reserve))
                        goto out;
index eb9580a..cdddf6a 100644 (file)
@@ -29,6 +29,7 @@ struct tpacket_kbdq_core {
        char            *pkblk_start;
        char            *pkblk_end;
        int             kblk_size;
+       unsigned int    max_frame_len;
        unsigned int    knum_blocks;
        uint64_t        knxt_seq_num;
        char            *prev;
index ead5264..762a04b 100644 (file)
@@ -159,7 +159,6 @@ struct cbq_sched_data {
        struct cbq_class        *tx_borrowed;
        int                     tx_len;
        psched_time_t           now;            /* Cached timestamp */
-       psched_time_t           now_rt;         /* Cached real time */
        unsigned int            pmask;
 
        struct hrtimer          delay_timer;
@@ -353,12 +352,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
        int toplevel = q->toplevel;
 
        if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) {
-               psched_time_t now;
-               psched_tdiff_t incr;
-
-               now = psched_get_time();
-               incr = now - q->now_rt;
-               now = q->now + incr;
+               psched_time_t now = psched_get_time();
 
                do {
                        if (cl->undertime < now) {
@@ -700,8 +694,13 @@ cbq_update(struct cbq_sched_data *q)
        struct cbq_class *this = q->tx_class;
        struct cbq_class *cl = this;
        int len = q->tx_len;
+       psched_time_t now;
 
        q->tx_class = NULL;
+       /* Time integrator. We calculate EOS time
+        * by adding expected packet transmission time.
+        */
+       now = q->now + L2T(&q->link, len);
 
        for ( ; cl; cl = cl->share) {
                long avgidle = cl->avgidle;
@@ -717,7 +716,7 @@ cbq_update(struct cbq_sched_data *q)
                 *      idle = (now - last) - last_pktlen/rate
                 */
 
-               idle = q->now - cl->last;
+               idle = now - cl->last;
                if ((unsigned long)idle > 128*1024*1024) {
                        avgidle = cl->maxidle;
                } else {
@@ -761,7 +760,7 @@ cbq_update(struct cbq_sched_data *q)
                        idle -= L2T(&q->link, len);
                        idle += L2T(cl, len);
 
-                       cl->undertime = q->now + idle;
+                       cl->undertime = now + idle;
                } else {
                        /* Underlimit */
 
@@ -771,7 +770,8 @@ cbq_update(struct cbq_sched_data *q)
                        else
                                cl->avgidle = avgidle;
                }
-               cl->last = q->now;
+               if ((s64)(now - cl->last) > 0)
+                       cl->last = now;
        }
 
        cbq_update_toplevel(q, this, q->tx_borrowed);
@@ -943,31 +943,13 @@ cbq_dequeue(struct Qdisc *sch)
        struct sk_buff *skb;
        struct cbq_sched_data *q = qdisc_priv(sch);
        psched_time_t now;
-       psched_tdiff_t incr;
 
        now = psched_get_time();
-       incr = now - q->now_rt;
-
-       if (q->tx_class) {
-               psched_tdiff_t incr2;
-               /* Time integrator. We calculate EOS time
-                * by adding expected packet transmission time.
-                * If real time is greater, we warp artificial clock,
-                * so that:
-                *
-                * cbq_time = max(real_time, work);
-                */
-               incr2 = L2T(&q->link, q->tx_len);
-               q->now += incr2;
+
+       if (q->tx_class)
                cbq_update(q);
-               if ((incr -= incr2) < 0)
-                       incr = 0;
-               q->now += incr;
-       } else {
-               if (now > q->now)
-                       q->now = now;
-       }
-       q->now_rt = now;
+
+       q->now = now;
 
        for (;;) {
                q->wd_expires = 0;
@@ -1223,7 +1205,6 @@ cbq_reset(struct Qdisc *sch)
        hrtimer_cancel(&q->delay_timer);
        q->toplevel = TC_CBQ_MAXLEVEL;
        q->now = psched_get_time();
-       q->now_rt = q->now;
 
        for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
                q->active[prio] = NULL;
@@ -1407,7 +1388,6 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
        q->delay_timer.function = cbq_undelay;
        q->toplevel = TC_CBQ_MAXLEVEL;
        q->now = psched_get_time();
-       q->now_rt = q->now;
 
        cbq_link_class(&q->link);
 
index 06a9ee6..a88b852 100644 (file)
@@ -813,6 +813,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
                else {
                        dst_release(transport->dst);
                        transport->dst = NULL;
+                       ulp_notify = false;
                }
 
                spc_state = SCTP_ADDR_UNREACHABLE;
@@ -1244,7 +1245,7 @@ static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr,
 {
        u8 score_curr, score_best;
 
-       if (best == NULL)
+       if (best == NULL || curr == best)
                return curr;
 
        score_curr = sctp_trans_score(curr);
@@ -1355,14 +1356,11 @@ static void sctp_select_active_and_retran_path(struct sctp_association *asoc)
                trans_sec = trans_pri;
 
        /* If we failed to find a usable transport, just camp on the
-        * primary or retran, even if they are inactive, if possible
-        * pick a PF iff it's the better choice.
+        * active or pick a PF iff it's the better choice.
         */
        if (trans_pri == NULL) {
-               trans_pri = sctp_trans_elect_best(asoc->peer.primary_path,
-                                                 asoc->peer.retran_path);
-               trans_pri = sctp_trans_elect_best(trans_pri, trans_pf);
-               trans_sec = asoc->peer.primary_path;
+               trans_pri = sctp_trans_elect_best(asoc->peer.active_path, trans_pf);
+               trans_sec = trans_pri;
        }
 
        /* Set the active and retran transports. */
index 3f93454..3087da3 100644 (file)
@@ -179,9 +179,12 @@ static inline int tipc_port_importance(struct tipc_port *port)
        return msg_importance(&port->phdr);
 }
 
-static inline void tipc_port_set_importance(struct tipc_port *port, int imp)
+static inline int tipc_port_set_importance(struct tipc_port *port, int imp)
 {
+       if (imp > TIPC_CRITICAL_IMPORTANCE)
+               return -EINVAL;
        msg_set_importance(&port->phdr, (u32)imp);
+       return 0;
 }
 
 #endif
index 7d423ee..ff8c811 100644 (file)
@@ -1973,7 +1973,7 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
 
        switch (opt) {
        case TIPC_IMPORTANCE:
-               tipc_port_set_importance(port, value);
+               res = tipc_port_set_importance(port, value);
                break;
        case TIPC_SRC_DROPPABLE:
                if (sock->type != SOCK_STREAM)
index 16a07cf..70bea94 100755 (executable)
@@ -2085,6 +2085,7 @@ sub dump_function($$) {
     $prototype =~ s/^noinline +//;
     $prototype =~ s/__init +//;
     $prototype =~ s/__init_or_module +//;
+    $prototype =~ s/__meminit +//;
     $prototype =~ s/__must_check +//;
     $prototype =~ s/__weak +//;
     my $define = $prototype =~ s/^#\s*define\s+//; #ak added
index a3386d1..bed745c 100644 (file)
@@ -173,7 +173,7 @@ static char *tomoyo_get_local_path(struct dentry *dentry, char * const buffer,
                 * Use filesystem name if filesystem does not support rename()
                 * operation.
                 */
-               if (!inode->i_op->rename)
+               if (!inode->i_op->rename && !inode->i_op->rename2)
                        goto prepend_filesystem_name;
        }
        /* Prepend device name. */
@@ -282,7 +282,8 @@ char *tomoyo_realpath_from_path(struct path *path)
                 * Get local name for filesystems without rename() operation
                 * or dentry without vfsmount.
                 */
-               if (!path->mnt || !inode->i_op->rename)
+               if (!path->mnt ||
+                   (!inode->i_op->rename && !inode->i_op->rename2))
                        pos = tomoyo_get_local_path(path->dentry, buf,
                                                    buf_len - 1);
                /* Get absolute name for the rest. */
index 051d55b..9f404e9 100644 (file)
@@ -684,7 +684,7 @@ int snd_info_card_free(struct snd_card *card)
  * snd_info_get_line - read one line from the procfs buffer
  * @buffer: the procfs buffer
  * @line: the buffer to store
- * @len: the max. buffer size - 1
+ * @len: the max. buffer size
  *
  * Reads one line from the buffer and stores the string.
  *
@@ -704,7 +704,7 @@ int snd_info_get_line(struct snd_info_buffer *buffer, char *line, int len)
                        buffer->stop = 1;
                if (c == '\n')
                        break;
-               if (len) {
+               if (len > 1) {
                        len--;
                        *line++ = c;
                }
index 4560ca0..2c6fd80 100644 (file)
@@ -142,11 +142,11 @@ static struct pcm_format_data pcm_formats[(INT)SNDRV_PCM_FORMAT_LAST+1] = {
        },
        [SNDRV_PCM_FORMAT_DSD_U8] = {
                .width = 8, .phys = 8, .le = 1, .signd = 0,
-               .silence = {},
+               .silence = { 0x69 },
        },
        [SNDRV_PCM_FORMAT_DSD_U16_LE] = {
                .width = 16, .phys = 16, .le = 1, .signd = 0,
-               .silence = {},
+               .silence = { 0x69, 0x69 },
        },
        /* FIXME: the following three formats are not defined properly yet */
        [SNDRV_PCM_FORMAT_MPEG] = {
index f2e34e3..5851249 100644 (file)
@@ -7,7 +7,7 @@
  */
 
 #ifndef CT20K1REG_H
-#define CT20k1REG_H
+#define CT20K1REG_H
 
 /* 20k1 registers */
 #define        DSPXRAM_START                   0x000000
 #define I2SD_R    0x19L
 
 #endif /* CT20K1REG_H */
-
-
index 07e7609..8371274 100644 (file)
@@ -20,7 +20,7 @@
  */
 
 #ifndef __CA0132_REGS_H
-#define __CA0312_REGS_H
+#define __CA0132_REGS_H
 
 #define DSP_CHIP_OFFSET                0x100000
 #define DSP_DBGCNTL_MODULE_OFFSET      0xE30
index 36badba..99d7d7f 100644 (file)
@@ -50,6 +50,8 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
 #define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec))
 
 #define is_valleyview(codec) ((codec)->vendor_id == 0x80862882)
+#define is_cherryview(codec) ((codec)->vendor_id == 0x80862883)
+#define is_valleyview_plus(codec) (is_valleyview(codec) || is_cherryview(codec))
 
 struct hdmi_spec_per_cvt {
        hda_nid_t cvt_nid;
@@ -1459,7 +1461,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
                            mux_idx);
 
        /* configure unused pins to choose other converters */
-       if (is_haswell_plus(codec) || is_valleyview(codec))
+       if (is_haswell_plus(codec) || is_valleyview_plus(codec))
                intel_not_share_assigned_cvt(codec, per_pin->pin_nid, mux_idx);
 
        snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
@@ -1598,7 +1600,8 @@ static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
                 *   and this can make HW reset converter selection on a pin.
                 */
                if (eld->eld_valid && !old_eld_valid && per_pin->setup) {
-                       if (is_haswell_plus(codec) || is_valleyview(codec)) {
+                       if (is_haswell_plus(codec) ||
+                               is_valleyview_plus(codec)) {
                                intel_verify_pin_cvt_connect(codec, per_pin);
                                intel_not_share_assigned_cvt(codec, pin_nid,
                                                        per_pin->mux_idx);
@@ -1779,7 +1782,7 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
        bool non_pcm;
        int pinctl;
 
-       if (is_haswell_plus(codec) || is_valleyview(codec)) {
+       if (is_haswell_plus(codec) || is_valleyview_plus(codec)) {
                /* Verify pin:cvt selections to avoid silent audio after S3.
                 * After S3, the audio driver restores pin:cvt selections
                 * but this can happen before gfx is ready and such selection
@@ -2330,9 +2333,8 @@ static int patch_generic_hdmi(struct hda_codec *codec)
                intel_haswell_fixup_enable_dp12(codec);
        }
 
-       if (is_haswell(codec) || is_valleyview(codec)) {
+       if (is_haswell_plus(codec) || is_valleyview_plus(codec))
                codec->depop_delay = 0;
-       }
 
        if (hdmi_parse_codec(codec) < 0) {
                codec->spec = NULL;
index 6b38ec3..d446ac3 100644 (file)
@@ -181,6 +181,8 @@ static void alc_fix_pll(struct hda_codec *codec)
                            spec->pll_coef_idx);
        val = snd_hda_codec_read(codec, spec->pll_nid, 0,
                                 AC_VERB_GET_PROC_COEF, 0);
+       if (val == -1)
+               return;
        snd_hda_codec_write(codec, spec->pll_nid, 0, AC_VERB_SET_COEF_INDEX,
                            spec->pll_coef_idx);
        snd_hda_codec_write(codec, spec->pll_nid, 0, AC_VERB_SET_PROC_COEF,
@@ -2806,6 +2808,8 @@ static void alc286_shutup(struct hda_codec *codec)
 static void alc269vb_toggle_power_output(struct hda_codec *codec, int power_up)
 {
        int val = alc_read_coef_idx(codec, 0x04);
+       if (val == -1)
+               return;
        if (power_up)
                val |= 1 << 11;
        else
@@ -3264,6 +3268,15 @@ static int alc269_resume(struct hda_codec *codec)
        snd_hda_codec_resume_cache(codec);
        alc_inv_dmic_sync(codec, true);
        hda_call_check_power_status(codec, 0x01);
+
+       /* on some machine, the BIOS will clear the codec gpio data when enter
+        * suspend, and won't restore the data after resume, so we restore it
+        * in the driver.
+        */
+       if (spec->gpio_led)
+               snd_hda_codec_write(codec, codec->afg, 0, AC_VERB_SET_GPIO_DATA,
+                           spec->gpio_led);
+
        if (spec->has_alc5505_dsp)
                alc5505_dsp_resume(codec);
 
@@ -4395,6 +4408,7 @@ enum {
        ALC292_FIXUP_TPT440_DOCK,
        ALC283_FIXUP_BXBT2807_MIC,
        ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
+       ALC282_FIXUP_ASPIRE_V5_PINS,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -4842,6 +4856,22 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained_before = true,
                .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
        },
+       [ALC282_FIXUP_ASPIRE_V5_PINS] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x12, 0x90a60130 },
+                       { 0x14, 0x90170110 },
+                       { 0x17, 0x40000008 },
+                       { 0x18, 0x411111f0 },
+                       { 0x19, 0x411111f0 },
+                       { 0x1a, 0x411111f0 },
+                       { 0x1b, 0x411111f0 },
+                       { 0x1d, 0x40f89b2d },
+                       { 0x1e, 0x411111f0 },
+                       { 0x21, 0x0321101f },
+                       { },
+               },
+       },
 
 };
 
@@ -4853,6 +4883,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
        SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
        SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
+       SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
        SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
        SND_PCI_QUIRK(0x1028, 0x05bd, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05be, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
@@ -5311,27 +5342,30 @@ static void alc269_fill_coef(struct hda_codec *codec)
        if ((alc_get_coef0(codec) & 0x00ff) == 0x017) {
                val = alc_read_coef_idx(codec, 0x04);
                /* Power up output pin */
-               alc_write_coef_idx(codec, 0x04, val | (1<<11));
+               if (val != -1)
+                       alc_write_coef_idx(codec, 0x04, val | (1<<11));
        }
 
        if ((alc_get_coef0(codec) & 0x00ff) == 0x018) {
                val = alc_read_coef_idx(codec, 0xd);
-               if ((val & 0x0c00) >> 10 != 0x1) {
+               if (val != -1 && (val & 0x0c00) >> 10 != 0x1) {
                        /* Capless ramp up clock control */
                        alc_write_coef_idx(codec, 0xd, val | (1<<10));
                }
                val = alc_read_coef_idx(codec, 0x17);
-               if ((val & 0x01c0) >> 6 != 0x4) {
+               if (val != -1 && (val & 0x01c0) >> 6 != 0x4) {
                        /* Class D power on reset */
                        alc_write_coef_idx(codec, 0x17, val | (1<<7));
                }
        }
 
        val = alc_read_coef_idx(codec, 0xd); /* Class D */
-       alc_write_coef_idx(codec, 0xd, val | (1<<14));
+       if (val != -1)
+               alc_write_coef_idx(codec, 0xd, val | (1<<14));
 
        val = alc_read_coef_idx(codec, 0x4); /* HP */
-       alc_write_coef_idx(codec, 0x4, val | (1<<11));
+       if (val != -1)
+               alc_write_coef_idx(codec, 0x4, val | (1<<11));
 }
 
 /*
index bd41ee4..2c71f16 100644 (file)
@@ -1278,6 +1278,8 @@ static int arizona_hw_params(struct snd_pcm_substream *substream,
        else
                rates = &arizona_48k_bclk_rates[0];
 
+       wl = snd_pcm_format_width(params_format(params));
+
        if (tdm_slots) {
                arizona_aif_dbg(dai, "Configuring for %d %d bit TDM slots\n",
                                tdm_slots, tdm_width);
@@ -1285,6 +1287,7 @@ static int arizona_hw_params(struct snd_pcm_substream *substream,
                channels = tdm_slots;
        } else {
                bclk_target = snd_soc_params_to_bclk(params);
+               tdm_width = wl;
        }
 
        if (chan_limit && chan_limit < channels) {
@@ -1319,8 +1322,7 @@ static int arizona_hw_params(struct snd_pcm_substream *substream,
        arizona_aif_dbg(dai, "BCLK %dHz LRCLK %dHz\n",
                        rates[bclk], rates[bclk] / lrclk);
 
-       wl = snd_pcm_format_width(params_format(params));
-       frame = wl << ARIZONA_AIF1TX_WL_SHIFT | wl;
+       frame = wl << ARIZONA_AIF1TX_WL_SHIFT | tdm_width;
 
        reconfig = arizona_aif_cfg_changed(codec, base, bclk, lrclk, frame);
 
index 163ec38..0c8aefa 100644 (file)
@@ -259,13 +259,13 @@ static const struct soc_enum pcm512x_veds =
                        pcm512x_ramp_step_text);
 
 static const struct snd_kcontrol_new pcm512x_controls[] = {
-SOC_DOUBLE_R_TLV("Playback Digital Volume", PCM512x_DIGITAL_VOLUME_2,
+SOC_DOUBLE_R_TLV("Digital Playback Volume", PCM512x_DIGITAL_VOLUME_2,
                 PCM512x_DIGITAL_VOLUME_3, 0, 255, 1, digital_tlv),
 SOC_DOUBLE_TLV("Playback Volume", PCM512x_ANALOG_GAIN_CTRL,
               PCM512x_LAGN_SHIFT, PCM512x_RAGN_SHIFT, 1, 1, analog_tlv),
 SOC_DOUBLE_TLV("Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST,
               PCM512x_AGBL_SHIFT, PCM512x_AGBR_SHIFT, 1, 0, boost_tlv),
-SOC_DOUBLE("Playback Digital Switch", PCM512x_MUTE, PCM512x_RQML_SHIFT,
+SOC_DOUBLE("Digital Playback Switch", PCM512x_MUTE, PCM512x_RQML_SHIFT,
           PCM512x_RQMR_SHIFT, 1, 1),
 
 SOC_SINGLE("Deemphasis Switch", PCM512x_DSP, PCM512x_DEMP_SHIFT, 1, 1),
index c28508d..6a6b2ff 100644 (file)
@@ -403,7 +403,8 @@ out:
        return ret;
 }
 
-static int davinci_mcasp_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div)
+static int __davinci_mcasp_set_clkdiv(struct snd_soc_dai *dai, int div_id,
+                                     int div, bool explicit)
 {
        struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
 
@@ -420,7 +421,8 @@ static int davinci_mcasp_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div
                               ACLKXDIV(div - 1), ACLKXDIV_MASK);
                mcasp_mod_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG,
                               ACLKRDIV(div - 1), ACLKRDIV_MASK);
-               mcasp->bclk_div = div;
+               if (explicit)
+                       mcasp->bclk_div = div;
                break;
 
        case 2:         /* BCLK/LRCLK ratio */
@@ -434,6 +436,12 @@ static int davinci_mcasp_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div
        return 0;
 }
 
+static int davinci_mcasp_set_clkdiv(struct snd_soc_dai *dai, int div_id,
+                                   int div)
+{
+       return __davinci_mcasp_set_clkdiv(dai, div_id, div, 1);
+}
+
 static int davinci_mcasp_set_sysclk(struct snd_soc_dai *dai, int clk_id,
                                    unsigned int freq, int dir)
 {
@@ -738,7 +746,7 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
                                 "Inaccurate BCLK: %u Hz / %u != %u Hz\n",
                                 mcasp->sysclk_freq, div, bclk_freq);
                }
-               davinci_mcasp_set_clkdiv(cpu_dai, 1, div);
+               __davinci_mcasp_set_clkdiv(cpu_dai, 1, div, 0);
        }
 
        ret = mcasp_common_hw_param(mcasp, substream->stream,
index f54a8fc..f3012b6 100644 (file)
@@ -49,7 +49,6 @@ config SND_SOC_FSL_ESAI
        tristate "Enhanced Serial Audio Interface (ESAI) module support"
        select REGMAP_MMIO
        select SND_SOC_IMX_PCM_DMA if SND_IMX_SOC != n
-       select SND_SOC_FSL_UTILS
        help
          Say Y if you want to add Enhanced Synchronous Audio Interface
          (ESAI) support for the Freescale CPUs.
index 72d154e..a3b29ed 100644 (file)
@@ -18,7 +18,6 @@
 
 #include "fsl_esai.h"
 #include "imx-pcm.h"
-#include "fsl_utils.h"
 
 #define FSL_ESAI_RATES         SNDRV_PCM_RATE_8000_192000
 #define FSL_ESAI_FORMATS       (SNDRV_PCM_FMTBIT_S8 | \
@@ -607,7 +606,6 @@ static struct snd_soc_dai_ops fsl_esai_dai_ops = {
        .hw_params = fsl_esai_hw_params,
        .set_sysclk = fsl_esai_set_dai_sysclk,
        .set_fmt = fsl_esai_set_dai_fmt,
-       .xlate_tdm_slot_mask = fsl_asoc_xlate_tdm_slot_mask,
        .set_tdm_slot = fsl_esai_set_dai_tdm_slot,
 };
 
index 42edc6f..03d0a16 100644 (file)
@@ -246,8 +246,8 @@ static struct sst_acpi_desc sst_acpi_broadwell_desc = {
 };
 
 static struct sst_acpi_mach baytrail_machines[] = {
-       { "10EC5640", "byt-rt5640", "intel/fw_sst_0f28.bin-i2s_master" },
-       { "193C9890", "byt-max98090", "intel/fw_sst_0f28.bin-i2s_master" },
+       { "10EC5640", "byt-rt5640", "intel/fw_sst_0f28.bin-48kHz_i2s_master" },
+       { "193C9890", "byt-max98090", "intel/fw_sst_0f28.bin-48kHz_i2s_master" },
        {}
 };
 
index 67673a2..b4ad98c 100644 (file)
@@ -817,7 +817,7 @@ static struct sst_dsp_device byt_dev = {
        .ops = &sst_byt_ops,
 };
 
-int sst_byt_dsp_suspend_noirq(struct device *dev, struct sst_pdata *pdata)
+int sst_byt_dsp_suspend_late(struct device *dev, struct sst_pdata *pdata)
 {
        struct sst_byt *byt = pdata->dsp;
 
@@ -826,14 +826,6 @@ int sst_byt_dsp_suspend_noirq(struct device *dev, struct sst_pdata *pdata)
        sst_byt_drop_all(byt);
        dev_dbg(byt->dev, "dsp in reset\n");
 
-       return 0;
-}
-EXPORT_SYMBOL_GPL(sst_byt_dsp_suspend_noirq);
-
-int sst_byt_dsp_suspend_late(struct device *dev, struct sst_pdata *pdata)
-{
-       struct sst_byt *byt = pdata->dsp;
-
        dev_dbg(byt->dev, "free all blocks and unload fw\n");
        sst_fw_unload(byt->fw);
 
index 06a4d20..8faff6d 100644 (file)
@@ -66,7 +66,6 @@ int sst_byt_get_dsp_position(struct sst_byt *byt,
 int sst_byt_dsp_init(struct device *dev, struct sst_pdata *pdata);
 void sst_byt_dsp_free(struct device *dev, struct sst_pdata *pdata);
 struct sst_dsp *sst_byt_get_dsp(struct sst_byt *byt);
-int sst_byt_dsp_suspend_noirq(struct device *dev, struct sst_pdata *pdata);
 int sst_byt_dsp_suspend_late(struct device *dev, struct sst_pdata *pdata);
 int sst_byt_dsp_boot(struct device *dev, struct sst_pdata *pdata);
 int sst_byt_dsp_wait_for_ready(struct device *dev, struct sst_pdata *pdata);
index 599401c..eab1c7d 100644 (file)
@@ -59,6 +59,9 @@ struct sst_byt_priv_data {
 
        /* DAI data */
        struct sst_byt_pcm_data pcm[BYT_PCM_COUNT];
+
+       /* flag indicating is stream context restore needed after suspend */
+       bool restore_stream;
 };
 
 /* this may get called several times by oss emulation */
@@ -184,7 +187,10 @@ static int sst_byt_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
                sst_byt_stream_start(byt, pcm_data->stream, 0);
                break;
        case SNDRV_PCM_TRIGGER_RESUME:
-               schedule_work(&pcm_data->work);
+               if (pdata->restore_stream == true)
+                       schedule_work(&pcm_data->work);
+               else
+                       sst_byt_stream_resume(byt, pcm_data->stream);
                break;
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
                sst_byt_stream_resume(byt, pcm_data->stream);
@@ -193,6 +199,7 @@ static int sst_byt_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
                sst_byt_stream_stop(byt, pcm_data->stream);
                break;
        case SNDRV_PCM_TRIGGER_SUSPEND:
+               pdata->restore_stream = false;
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
                sst_byt_stream_pause(byt, pcm_data->stream);
                break;
@@ -404,26 +411,10 @@ static const struct snd_soc_component_driver byt_dai_component = {
 };
 
 #ifdef CONFIG_PM
-static int sst_byt_pcm_dev_suspend_noirq(struct device *dev)
-{
-       struct sst_pdata *sst_pdata = dev_get_platdata(dev);
-       int ret;
-
-       dev_dbg(dev, "suspending noirq\n");
-
-       /* at this point all streams will be stopped and context saved */
-       ret = sst_byt_dsp_suspend_noirq(dev, sst_pdata);
-       if (ret < 0) {
-               dev_err(dev, "failed to suspend %d\n", ret);
-               return ret;
-       }
-
-       return ret;
-}
-
 static int sst_byt_pcm_dev_suspend_late(struct device *dev)
 {
        struct sst_pdata *sst_pdata = dev_get_platdata(dev);
+       struct sst_byt_priv_data *priv_data = dev_get_drvdata(dev);
        int ret;
 
        dev_dbg(dev, "suspending late\n");
@@ -434,34 +425,30 @@ static int sst_byt_pcm_dev_suspend_late(struct device *dev)
                return ret;
        }
 
+       priv_data->restore_stream = true;
+
        return ret;
 }
 
 static int sst_byt_pcm_dev_resume_early(struct device *dev)
 {
        struct sst_pdata *sst_pdata = dev_get_platdata(dev);
+       int ret;
 
        dev_dbg(dev, "resume early\n");
 
        /* load fw and boot DSP */
-       return sst_byt_dsp_boot(dev, sst_pdata);
-}
-
-static int sst_byt_pcm_dev_resume(struct device *dev)
-{
-       struct sst_pdata *sst_pdata = dev_get_platdata(dev);
-
-       dev_dbg(dev, "resume\n");
+       ret = sst_byt_dsp_boot(dev, sst_pdata);
+       if (ret)
+               return ret;
 
        /* wait for FW to finish booting */
        return sst_byt_dsp_wait_for_ready(dev, sst_pdata);
 }
 
 static const struct dev_pm_ops sst_byt_pm_ops = {
-       .suspend_noirq = sst_byt_pcm_dev_suspend_noirq,
        .suspend_late = sst_byt_pcm_dev_suspend_late,
        .resume_early = sst_byt_pcm_dev_resume_early,
-       .resume = sst_byt_pcm_dev_resume,
 };
 
 #define SST_BYT_PM_OPS (&sst_byt_pm_ops)
index 0109f6c..a8e0974 100644 (file)
@@ -765,9 +765,7 @@ static int pxa_ssp_remove(struct snd_soc_dai *dai)
                          SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | \
                          SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
 
-#define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
-                           SNDRV_PCM_FMTBIT_S24_LE |   \
-                           SNDRV_PCM_FMTBIT_S32_LE)
+#define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE)
 
 static const struct snd_soc_dai_ops pxa_ssp_dai_ops = {
        .startup        = pxa_ssp_startup,
index 8348352..177bd86 100644 (file)
@@ -2860,12 +2860,14 @@ int snd_soc_dapm_get_enum_double(struct snd_kcontrol *kcontrol,
        struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol);
        struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
        unsigned int reg_val, val;
-       int ret = 0;
 
-       if (e->reg != SND_SOC_NOPM)
-               ret = soc_dapm_read(dapm, e->reg, &reg_val);
-       else
+       if (e->reg != SND_SOC_NOPM) {
+               int ret = soc_dapm_read(dapm, e->reg, &reg_val);
+               if (ret)
+                       return ret;
+       } else {
                reg_val = dapm_kcontrol_get_value(kcontrol);
+       }
 
        val = (reg_val >> e->shift_l) & e->mask;
        ucontrol->value.enumerated.item[0] = snd_soc_enum_val_to_item(e, val);
@@ -2875,7 +2877,7 @@ int snd_soc_dapm_get_enum_double(struct snd_kcontrol *kcontrol,
                ucontrol->value.enumerated.item[1] = val;
        }
 
-       return ret;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_get_enum_double);
 
index bf06577..5819a27 100644 (file)
@@ -526,8 +526,10 @@ static int assign_guest_irq(struct kvm *kvm,
                dev->irq_requested_type |= guest_irq_type;
                if (dev->ack_notifier.gsi != -1)
                        kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier);
-       } else
+       } else {
                kvm_free_irq_source_id(kvm, dev->irq_source_id);
+               dev->irq_source_id = -1;
+       }
 
        return r;
 }
index 0df7d4b..714b949 100644 (file)
@@ -61,6 +61,14 @@ static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
        return pfn;
 }
 
+static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
+{
+       unsigned long i;
+
+       for (i = 0; i < npages; ++i)
+               kvm_release_pfn_clean(pfn + i);
+}
+
 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
 {
        gfn_t gfn, end_gfn;
@@ -123,6 +131,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
                if (r) {
                        printk(KERN_ERR "kvm_iommu_map_address:"
                               "iommu failed to map pfn=%llx\n", pfn);
+                       kvm_unpin_pages(kvm, pfn, page_size);
                        goto unmap_pages;
                }
 
@@ -134,7 +143,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
        return 0;
 
 unmap_pages:
-       kvm_iommu_put_pages(kvm, slot->base_gfn, gfn);
+       kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn);
        return r;
 }
 
@@ -266,14 +275,6 @@ out_unlock:
        return r;
 }
 
-static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
-{
-       unsigned long i;
-
-       for (i = 0; i < npages; ++i)
-               kvm_release_pfn_clean(pfn + i);
-}
-
 static void kvm_iommu_put_pages(struct kvm *kvm,
                                gfn_t base_gfn, unsigned long npages)
 {