Merge tag 'xtensa-20141109' of git://github.com/czankel/xtensa-linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 12 Nov 2014 23:21:52 +0000 (15:21 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 12 Nov 2014 23:21:52 +0000 (15:21 -0800)
Pull Xtensa fixes from Chris Zankel:
 - fix umount syscall
 - fix ISS and xtfpga Kconfig dependencies so that more randconfigs are
   buildable
 - add seccomp, getrandom, and memfd_create syscalls
 - add defconfigs for KC705 and SMP LX200
 - implement pgprot_noncached

* tag 'xtensa-20141109' of git://github.com/czankel/xtensa-linux:
  xtensa: xtfpga: add lx200 SMP DTS and defconfig
  xtensa: xtfpga: add generic KC705 board config
  xtensa: re-wire umount syscall to sys_oldumount
  xtensa: xtfpga: only select ethoc when ethernet is available
  xtensa: add seccomp, getrandom, and memfd_create syscalls
  xtensa: ISS: add BLOCK dependency to BLK_DEV_SIMDISK
  xtensa: implement pgprot_noncached
  xtensa/uapi: Add definition of TIOC[SG]RS485

38 files changed:
drivers/char/hw_random/pseries-rng.c
drivers/crypto/caam/key_gen.c
drivers/crypto/qat/qat_common/adf_accel_devices.h
drivers/crypto/qat/qat_common/adf_transport.c
drivers/crypto/qat/qat_common/qat_algs.c
drivers/crypto/qat/qat_common/qat_crypto.c
drivers/crypto/qat/qat_dh895xcc/adf_admin.c
drivers/crypto/qat/qat_dh895xcc/adf_drv.c
drivers/crypto/qat/qat_dh895xcc/adf_isr.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/cik_sdma.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600_dma.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rs690.c
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/radeon/si.c
drivers/hwmon/fam15h_power.c
drivers/hwmon/ibmpowernv.c
drivers/hwmon/pwm-fan.c
drivers/mfd/max77693.c
drivers/mfd/rtsx_pcr.c
drivers/mfd/stmpe.h
drivers/mfd/twl4030-power.c
drivers/mfd/viperboard.c
drivers/thermal/imx_thermal.c
drivers/thermal/int340x_thermal/int3403_thermal.c
drivers/thermal/samsung/exynos_tmu_data.c
drivers/thermal/samsung/exynos_tmu_data.h
include/linux/mfd/max77693-private.h
include/linux/ring_buffer.h
init/main.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c

index 6226aa0..bcf86f9 100644 (file)
 #include <asm/vio.h>
 
 
-static int pseries_rng_data_read(struct hwrng *rng, u32 *data)
+static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
 {
+       u64 buffer[PLPAR_HCALL_BUFSIZE];
+       size_t size = max < 8 ? max : 8;
        int rc;
 
-       rc = plpar_hcall(H_RANDOM, (unsigned long *)data);
+       rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer);
        if (rc != H_SUCCESS) {
                pr_err_ratelimited("H_RANDOM call failed %d\n", rc);
                return -EIO;
        }
+       memcpy(data, buffer, size);
 
        /* The hypervisor interface returns 64 bits */
-       return 8;
+       return size;
 }
 
 /**
@@ -55,7 +58,7 @@ static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev)
 
 static struct hwrng pseries_rng = {
        .name           = KBUILD_MODNAME,
-       .data_read      = pseries_rng_data_read,
+       .read           = pseries_rng_read,
 };
 
 static int __init pseries_rng_probe(struct vio_dev *dev,
index 871703c..e1eaf4f 100644 (file)
@@ -48,23 +48,29 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
        u32 *desc;
        struct split_key_result result;
        dma_addr_t dma_addr_in, dma_addr_out;
-       int ret = 0;
+       int ret = -ENOMEM;
 
        desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
        if (!desc) {
                dev_err(jrdev, "unable to allocate key input memory\n");
-               return -ENOMEM;
+               return ret;
        }
 
-       init_job_desc(desc, 0);
-
        dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen,
                                     DMA_TO_DEVICE);
        if (dma_mapping_error(jrdev, dma_addr_in)) {
                dev_err(jrdev, "unable to map key input memory\n");
-               kfree(desc);
-               return -ENOMEM;
+               goto out_free;
        }
+
+       dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
+                                     DMA_FROM_DEVICE);
+       if (dma_mapping_error(jrdev, dma_addr_out)) {
+               dev_err(jrdev, "unable to map key output memory\n");
+               goto out_unmap_in;
+       }
+
+       init_job_desc(desc, 0);
        append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
 
        /* Sets MDHA up into an HMAC-INIT */
@@ -81,13 +87,6 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
         * FIFO_STORE with the explicit split-key content store
         * (0x26 output type)
         */
-       dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
-                                     DMA_FROM_DEVICE);
-       if (dma_mapping_error(jrdev, dma_addr_out)) {
-               dev_err(jrdev, "unable to map key output memory\n");
-               kfree(desc);
-               return -ENOMEM;
-       }
        append_fifo_store(desc, dma_addr_out, split_key_len,
                          LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
 
@@ -115,10 +114,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
 
        dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
                         DMA_FROM_DEVICE);
+out_unmap_in:
        dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
-
+out_free:
        kfree(desc);
-
        return ret;
 }
 EXPORT_SYMBOL(gen_split_key);
index 9282381..fe7b3f0 100644 (file)
@@ -198,8 +198,7 @@ struct adf_accel_dev {
        struct dentry *debugfs_dir;
        struct list_head list;
        struct module *owner;
-       uint8_t accel_id;
-       uint8_t numa_node;
        struct adf_accel_pci accel_pci_dev;
+       uint8_t accel_id;
 } __packed;
 #endif
index 5f3fa45..9dd2cb7 100644 (file)
@@ -419,9 +419,10 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
                WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
                ring = &bank->rings[i];
                if (hw_data->tx_rings_mask & (1 << i)) {
-                       ring->inflights = kzalloc_node(sizeof(atomic_t),
-                                                      GFP_KERNEL,
-                                                      accel_dev->numa_node);
+                       ring->inflights =
+                               kzalloc_node(sizeof(atomic_t),
+                                            GFP_KERNEL,
+                                            dev_to_node(&GET_DEV(accel_dev)));
                        if (!ring->inflights)
                                goto err;
                } else {
@@ -469,13 +470,14 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev)
        int i, ret;
 
        etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
-                               accel_dev->numa_node);
+                               dev_to_node(&GET_DEV(accel_dev)));
        if (!etr_data)
                return -ENOMEM;
 
        num_banks = GET_MAX_BANKS(accel_dev);
        size = num_banks * sizeof(struct adf_etr_bank_data);
-       etr_data->banks = kzalloc_node(size, GFP_KERNEL, accel_dev->numa_node);
+       etr_data->banks = kzalloc_node(size, GFP_KERNEL,
+                                      dev_to_node(&GET_DEV(accel_dev)));
        if (!etr_data->banks) {
                ret = -ENOMEM;
                goto err_bank;
index f2e2f15..9e9619c 100644 (file)
@@ -596,7 +596,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
        if (unlikely(!n))
                return -EINVAL;
 
-       bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node);
+       bufl = kmalloc_node(sz, GFP_ATOMIC,
+                           dev_to_node(&GET_DEV(inst->accel_dev)));
        if (unlikely(!bufl))
                return -ENOMEM;
 
@@ -605,6 +606,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
                goto err;
 
        for_each_sg(assoc, sg, assoc_n, i) {
+               if (!sg->length)
+                       continue;
                bufl->bufers[bufs].addr = dma_map_single(dev,
                                                         sg_virt(sg),
                                                         sg->length,
@@ -640,7 +643,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
                struct qat_alg_buf *bufers;
 
                buflout = kmalloc_node(sz, GFP_ATOMIC,
-                                      inst->accel_dev->numa_node);
+                                      dev_to_node(&GET_DEV(inst->accel_dev)));
                if (unlikely(!buflout))
                        goto err;
                bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
index 0d59bcb..828f2a6 100644 (file)
@@ -109,12 +109,14 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
 
        list_for_each(itr, adf_devmgr_get_head()) {
                accel_dev = list_entry(itr, struct adf_accel_dev, list);
-               if (accel_dev->numa_node == node && adf_dev_started(accel_dev))
+               if ((node == dev_to_node(&GET_DEV(accel_dev)) ||
+                       dev_to_node(&GET_DEV(accel_dev)) < 0)
+                               && adf_dev_started(accel_dev))
                        break;
                accel_dev = NULL;
        }
        if (!accel_dev) {
-               pr_err("QAT: Could not find device on give node\n");
+               pr_err("QAT: Could not find device on node %d\n", node);
                accel_dev = adf_devmgr_get_first();
        }
        if (!accel_dev || !adf_dev_started(accel_dev))
@@ -164,7 +166,7 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
 
        for (i = 0; i < num_inst; i++) {
                inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
-                                   accel_dev->numa_node);
+                                   dev_to_node(&GET_DEV(accel_dev)));
                if (!inst)
                        goto err;
 
index 978d6c5..53c491b 100644 (file)
@@ -108,7 +108,7 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
        uint64_t reg_val;
 
        admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
-                            accel_dev->numa_node);
+                            dev_to_node(&GET_DEV(accel_dev)));
        if (!admin)
                return -ENOMEM;
        admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
index 0d0435a..948f66b 100644 (file)
@@ -119,21 +119,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
        kfree(accel_dev);
 }
 
-static uint8_t adf_get_dev_node_id(struct pci_dev *pdev)
-{
-       unsigned int bus_per_cpu = 0;
-       struct cpuinfo_x86 *c = &cpu_data(num_online_cpus() - 1);
-
-       if (!c->phys_proc_id)
-               return 0;
-
-       bus_per_cpu = 256 / (c->phys_proc_id + 1);
-
-       if (bus_per_cpu != 0)
-               return pdev->bus->number / bus_per_cpu;
-       return 0;
-}
-
 static int qat_dev_start(struct adf_accel_dev *accel_dev)
 {
        int cpus = num_online_cpus();
@@ -235,7 +220,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        void __iomem *pmisc_bar_addr = NULL;
        char name[ADF_DEVICE_NAME_LENGTH];
        unsigned int i, bar_nr;
-       uint8_t node;
        int ret;
 
        switch (ent->device) {
@@ -246,12 +230,19 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                return -ENODEV;
        }
 
-       node = adf_get_dev_node_id(pdev);
-       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, node);
+       if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+               /* If the accelerator is connected to a node with no memory
+                * there is no point in using the accelerator since the remote
+                * memory transaction will be very slow. */
+               dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+               return -EINVAL;
+       }
+
+       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+                                dev_to_node(&pdev->dev));
        if (!accel_dev)
                return -ENOMEM;
 
-       accel_dev->numa_node = node;
        INIT_LIST_HEAD(&accel_dev->crypto_list);
 
        /* Add accel device to accel table.
@@ -264,7 +255,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        accel_dev->owner = THIS_MODULE;
        /* Allocate and configure device configuration structure */
-       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, node);
+       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+                              dev_to_node(&pdev->dev));
        if (!hw_data) {
                ret = -ENOMEM;
                goto out_err;
index 67ec61e..d96ee21 100644 (file)
@@ -168,7 +168,7 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
        uint32_t msix_num_entries = hw_data->num_banks + 1;
 
        entries = kzalloc_node(msix_num_entries * sizeof(*entries),
-                              GFP_KERNEL, accel_dev->numa_node);
+                              GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
        if (!entries)
                return -ENOMEM;
 
index 055d5e7..2318b4c 100644 (file)
@@ -986,6 +986,15 @@ static int i915_pm_freeze(struct device *dev)
        return i915_drm_freeze(drm_dev);
 }
 
+static int i915_pm_freeze_late(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct drm_device *drm_dev = pci_get_drvdata(pdev);
+       struct drm_i915_private *dev_priv = drm_dev->dev_private;
+
+       return intel_suspend_complete(dev_priv);
+}
+
 static int i915_pm_thaw_early(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
@@ -1570,6 +1579,7 @@ static const struct dev_pm_ops i915_pm_ops = {
        .resume_early = i915_pm_resume_early,
        .resume = i915_pm_resume,
        .freeze = i915_pm_freeze,
+       .freeze_late = i915_pm_freeze_late,
        .thaw_early = i915_pm_thaw_early,
        .thaw = i915_pm_thaw,
        .poweroff = i915_pm_poweroff,
index b672b84..728938f 100644 (file)
@@ -1902,6 +1902,22 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
              GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
              GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
 
+       if (!USES_PPGTT(dev_priv->dev))
+               /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
+                * so RTL will always use the value corresponding to
+                * pat_sel = 000".
+                * So let's disable cache for GGTT to avoid screen corruptions.
+                * MOCS still can be used though.
+                * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
+                * before this patch, i.e. the same uncached + snooping access
+                * like on gen6/7 seems to be in effect.
+                * - So this just fixes blitter/render access. Again it looks
+                * like it's not just uncached access, but uncached + snooping.
+                * So we can still hold onto all our assumptions wrt cpu
+                * clflushing on LLC machines.
+                */
+               pat = GEN8_PPAT(0, GEN8_PPAT_UC);
+
        /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
         * write would work. */
        I915_WRITE(GEN8_PRIVATE_PAT, pat);
index 0e018cb..41b3be2 100644 (file)
@@ -1098,12 +1098,25 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
        struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_panel *panel = &connector->panel;
+       int min;
 
        WARN_ON(panel->backlight.max == 0);
 
+       /*
+        * XXX: If the vbt value is 255, it makes min equal to max, which leads
+        * to problems. There are such machines out there. Either our
+        * interpretation is wrong or the vbt has bogus data. Or both. Safeguard
+        * against this by letting the minimum be at most (arbitrarily chosen)
+        * 25% of the max.
+        */
+       min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64);
+       if (min != dev_priv->vbt.backlight.min_brightness) {
+               DRM_DEBUG_KMS("clamping VBT min backlight %d/255 to %d/255\n",
+                             dev_priv->vbt.backlight.min_brightness, min);
+       }
+
        /* vbt value is a coefficient in range [0..255] */
-       return scale(dev_priv->vbt.backlight.min_brightness, 0, 255,
-                    0, panel->backlight.max);
+       return scale(min, 0, 255, 0, panel->backlight.max);
 }
 
 static int bdw_setup_backlight(struct intel_connector *connector)
index 377afa5..89c01fa 100644 (file)
@@ -4313,8 +4313,8 @@ static int cik_cp_gfx_start(struct radeon_device *rdev)
        /* init the CE partitions.  CE only used for gfx on CIK */
        radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
        radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
-       radeon_ring_write(ring, 0xc000);
-       radeon_ring_write(ring, 0xc000);
+       radeon_ring_write(ring, 0x8000);
+       radeon_ring_write(ring, 0x8000);
 
        /* setup clear context state */
        radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
@@ -9447,6 +9447,9 @@ void dce8_bandwidth_update(struct radeon_device *rdev)
        u32 num_heads = 0, lb_size;
        int i;
 
+       if (!rdev->mode_info.mode_config_initialized)
+               return;
+
        radeon_update_display_priority(rdev);
 
        for (i = 0; i < rdev->num_crtc; i++) {
index 4e8432d..d748963 100644 (file)
@@ -667,17 +667,20 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        struct radeon_ib ib;
        unsigned i;
+       unsigned index;
        int r;
-       void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
        u32 tmp = 0;
+       u64 gpu_addr;
 
-       if (!ptr) {
-               DRM_ERROR("invalid vram scratch pointer\n");
-               return -EINVAL;
-       }
+       if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+               index = R600_WB_DMA_RING_TEST_OFFSET;
+       else
+               index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
+
+       gpu_addr = rdev->wb.gpu_addr + index;
 
        tmp = 0xCAFEDEAD;
-       writel(tmp, ptr);
+       rdev->wb.wb[index/4] = cpu_to_le32(tmp);
 
        r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
        if (r) {
@@ -686,8 +689,8 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
        }
 
        ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
-       ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
-       ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr);
+       ib.ptr[1] = lower_32_bits(gpu_addr);
+       ib.ptr[2] = upper_32_bits(gpu_addr);
        ib.ptr[3] = 1;
        ib.ptr[4] = 0xDEADBEEF;
        ib.length_dw = 5;
@@ -704,7 +707,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
                return r;
        }
        for (i = 0; i < rdev->usec_timeout; i++) {
-               tmp = readl(ptr);
+               tmp = le32_to_cpu(rdev->wb.wb[index/4]);
                if (tmp == 0xDEADBEEF)
                        break;
                DRM_UDELAY(1);
index f37d39d..85995b4 100644 (file)
@@ -2345,6 +2345,9 @@ void evergreen_bandwidth_update(struct radeon_device *rdev)
        u32 num_heads = 0, lb_size;
        int i;
 
+       if (!rdev->mode_info.mode_config_initialized)
+               return;
+
        radeon_update_display_priority(rdev);
 
        for (i = 0; i < rdev->num_crtc; i++) {
@@ -2552,6 +2555,7 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
                                        WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
                                        tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
                                        WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+                                       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
                                }
                        } else {
                                tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
index 10f8be0..b53b31a 100644 (file)
@@ -3207,6 +3207,9 @@ void r100_bandwidth_update(struct radeon_device *rdev)
        uint32_t pixel_bytes1 = 0;
        uint32_t pixel_bytes2 = 0;
 
+       if (!rdev->mode_info.mode_config_initialized)
+               return;
+
        radeon_update_display_priority(rdev);
 
        if (rdev->mode_info.crtcs[0]->base.enabled) {
index aabc343..cf0df45 100644 (file)
@@ -338,17 +338,17 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        struct radeon_ib ib;
        unsigned i;
+       unsigned index;
        int r;
-       void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
        u32 tmp = 0;
+       u64 gpu_addr;
 
-       if (!ptr) {
-               DRM_ERROR("invalid vram scratch pointer\n");
-               return -EINVAL;
-       }
+       if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+               index = R600_WB_DMA_RING_TEST_OFFSET;
+       else
+               index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
 
-       tmp = 0xCAFEDEAD;
-       writel(tmp, ptr);
+       gpu_addr = rdev->wb.gpu_addr + index;
 
        r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
        if (r) {
@@ -357,8 +357,8 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
        }
 
        ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
-       ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
-       ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
+       ib.ptr[1] = lower_32_bits(gpu_addr);
+       ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
        ib.ptr[3] = 0xDEADBEEF;
        ib.length_dw = 4;
 
@@ -374,7 +374,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
                return r;
        }
        for (i = 0; i < rdev->usec_timeout; i++) {
-               tmp = readl(ptr);
+               tmp = le32_to_cpu(rdev->wb.wb[index/4]);
                if (tmp == 0xDEADBEEF)
                        break;
                DRM_UDELAY(1);
index 5f6db46..9acb1c3 100644 (file)
@@ -879,6 +879,9 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
        u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
        /* FIXME: implement full support */
 
+       if (!rdev->mode_info.mode_config_initialized)
+               return;
+
        radeon_update_display_priority(rdev);
 
        if (rdev->mode_info.crtcs[0]->base.enabled)
index 3462b64..0a2d36e 100644 (file)
@@ -579,6 +579,9 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
        u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
        u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;
 
+       if (!rdev->mode_info.mode_config_initialized)
+               return;
+
        radeon_update_display_priority(rdev);
 
        if (rdev->mode_info.crtcs[0]->base.enabled)
index 8a477bf..c55d653 100644 (file)
@@ -1277,6 +1277,9 @@ void rv515_bandwidth_update(struct radeon_device *rdev)
        struct drm_display_mode *mode0 = NULL;
        struct drm_display_mode *mode1 = NULL;
 
+       if (!rdev->mode_info.mode_config_initialized)
+               return;
+
        radeon_update_display_priority(rdev);
 
        if (rdev->mode_info.crtcs[0]->base.enabled)
index eeea5b6..7d5083d 100644 (file)
@@ -2384,6 +2384,9 @@ void dce6_bandwidth_update(struct radeon_device *rdev)
        u32 num_heads = 0, lb_size;
        int i;
 
+       if (!rdev->mode_info.mode_config_initialized)
+               return;
+
        radeon_update_display_priority(rdev);
 
        for (i = 0; i < rdev->num_crtc; i++) {
index fcdbde4..3057dfc 100644 (file)
@@ -234,7 +234,7 @@ static const struct pci_device_id fam15h_power_id_table[] = {
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
-       { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
+       { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
        {}
 };
 MODULE_DEVICE_TABLE(pci, fam15h_power_id_table);
index d2bf2c9..6a30eee 100644 (file)
@@ -181,7 +181,7 @@ static int __init populate_attr_groups(struct platform_device *pdev)
 
        opal = of_find_node_by_path("/ibm,opal/sensors");
        if (!opal) {
-               dev_err(&pdev->dev, "Opal node 'sensors' not found\n");
+               dev_dbg(&pdev->dev, "Opal node 'sensors' not found\n");
                return -ENODEV;
        }
 
@@ -335,7 +335,9 @@ static int __init ibmpowernv_init(void)
 
        err = platform_driver_probe(&ibmpowernv_driver, ibmpowernv_probe);
        if (err) {
-               pr_err("Platfrom driver probe failed\n");
+               if (err != -ENODEV)
+                       pr_err("Platform driver probe failed (%d)\n", err);
+
                goto exit_device_del;
        }
 
index 823c877..1991d90 100644 (file)
@@ -161,10 +161,17 @@ static int pwm_fan_suspend(struct device *dev)
 static int pwm_fan_resume(struct device *dev)
 {
        struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
+       unsigned long duty;
+       int ret;
 
-       if (ctx->pwm_value)
-               return pwm_enable(ctx->pwm);
-       return 0;
+       if (ctx->pwm_value == 0)
+               return 0;
+
+       duty = DIV_ROUND_UP(ctx->pwm_value * (ctx->pwm->period - 1), MAX_PWM);
+       ret = pwm_config(ctx->pwm, duty, ctx->pwm->period);
+       if (ret)
+               return ret;
+       return pwm_enable(ctx->pwm);
 }
 #endif
 
index cf008f4..711773e 100644 (file)
@@ -240,7 +240,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
                goto err_irq_charger;
        }
 
-       ret = regmap_add_irq_chip(max77693->regmap, max77693->irq,
+       ret = regmap_add_irq_chip(max77693->regmap_muic, max77693->irq,
                                IRQF_ONESHOT | IRQF_SHARED |
                                IRQF_TRIGGER_FALLING, 0,
                                &max77693_muic_irq_chip,
@@ -250,6 +250,17 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
                goto err_irq_muic;
        }
 
+       /* Unmask interrupts from all blocks in interrupt source register */
+       ret = regmap_update_bits(max77693->regmap,
+                               MAX77693_PMIC_REG_INTSRC_MASK,
+                               SRC_IRQ_ALL, (unsigned int)~SRC_IRQ_ALL);
+       if (ret < 0) {
+               dev_err(max77693->dev,
+                       "Could not unmask interrupts in INTSRC: %d\n",
+                       ret);
+               goto err_intsrc;
+       }
+
        pm_runtime_set_active(max77693->dev);
 
        ret = mfd_add_devices(max77693->dev, -1, max77693_devs,
@@ -261,6 +272,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
 
 err_mfd:
        mfd_remove_devices(max77693->dev);
+err_intsrc:
        regmap_del_irq_chip(max77693->irq, max77693->irq_data_muic);
 err_irq_muic:
        regmap_del_irq_chip(max77693->irq, max77693->irq_data_charger);
index f2643c2..30f7ca8 100644 (file)
@@ -947,6 +947,7 @@ static void rtsx_pci_idle_work(struct work_struct *work)
        mutex_unlock(&pcr->pcr_mutex);
 }
 
+#ifdef CONFIG_PM
 static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
 {
        if (pcr->ops->turn_off_led)
@@ -961,6 +962,7 @@ static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
        if (pcr->ops->force_power_down)
                pcr->ops->force_power_down(pcr, pm_state);
 }
+#endif
 
 static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
 {
index 2d045f2..bee0abf 100644 (file)
@@ -269,7 +269,7 @@ int stmpe_remove(struct stmpe *stmpe);
 #define STMPE24XX_REG_CHIP_ID          0x80
 #define STMPE24XX_REG_IEGPIOR_LSB      0x18
 #define STMPE24XX_REG_ISGPIOR_MSB      0x19
-#define STMPE24XX_REG_GPMR_LSB         0xA5
+#define STMPE24XX_REG_GPMR_LSB         0xA4
 #define STMPE24XX_REG_GPSR_LSB         0x85
 #define STMPE24XX_REG_GPCR_LSB         0x88
 #define STMPE24XX_REG_GPDR_LSB         0x8B
index cf92a6d..50f9091 100644 (file)
@@ -44,6 +44,15 @@ static u8 twl4030_start_script_address = 0x2b;
 #define PWR_DEVSLP             BIT(1)
 #define PWR_DEVOFF             BIT(0)
 
+/* Register bits for CFG_P1_TRANSITION (also for P2 and P3) */
+#define STARTON_SWBUG          BIT(7)  /* Start on watchdog */
+#define STARTON_VBUS           BIT(5)  /* Start on VBUS */
+#define STARTON_VBAT           BIT(4)  /* Start on battery insert */
+#define STARTON_RTC            BIT(3)  /* Start on RTC */
+#define STARTON_USB            BIT(2)  /* Start on USB host */
+#define STARTON_CHG            BIT(1)  /* Start on charger */
+#define STARTON_PWON           BIT(0)  /* Start on PWRON button */
+
 #define SEQ_OFFSYNC            (1 << 0)
 
 #define PHY_TO_OFF_PM_MASTER(p)                (p - 0x36)
@@ -606,6 +615,44 @@ twl4030_power_configure_resources(const struct twl4030_power_data *pdata)
        return 0;
 }
 
+static int twl4030_starton_mask_and_set(u8 bitmask, u8 bitvalues)
+{
+       u8 regs[3] = { TWL4030_PM_MASTER_CFG_P1_TRANSITION,
+                      TWL4030_PM_MASTER_CFG_P2_TRANSITION,
+                      TWL4030_PM_MASTER_CFG_P3_TRANSITION, };
+       u8 val;
+       int i, err;
+
+       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG1,
+                              TWL4030_PM_MASTER_PROTECT_KEY);
+       if (err)
+               goto relock;
+       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
+                              TWL4030_PM_MASTER_KEY_CFG2,
+                              TWL4030_PM_MASTER_PROTECT_KEY);
+       if (err)
+               goto relock;
+
+       for (i = 0; i < sizeof(regs); i++) {
+               err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER,
+                                     &val, regs[i]);
+               if (err)
+                       break;
+               val = (~bitmask & val) | (bitmask & bitvalues);
+               err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
+                                      val, regs[i]);
+               if (err)
+                       break;
+       }
+
+       if (err)
+               pr_err("TWL4030 Register access failed: %i\n", err);
+
+relock:
+       return twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 0,
+                               TWL4030_PM_MASTER_PROTECT_KEY);
+}
+
 /*
  * In master mode, start the power off sequence.
  * After a successful execution, TWL shuts down the power to the SoC
@@ -615,6 +662,11 @@ void twl4030_power_off(void)
 {
        int err;
 
+       /* Disable start on charger or VBUS as it can break poweroff */
+       err = twl4030_starton_mask_and_set(STARTON_VBUS | STARTON_CHG, 0);
+       if (err)
+               pr_err("TWL4030 Unable to configure start-up\n");
+
        err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, PWR_DEVOFF,
                               TWL4030_PM_MASTER_P1_SW_EVENTS);
        if (err)
index e00f534..3c2b8f9 100644 (file)
@@ -93,8 +93,9 @@ static int vprbrd_probe(struct usb_interface *interface,
                 version >> 8, version & 0xff,
                 vb->usb_dev->bus->busnum, vb->usb_dev->devnum);
 
-       ret = mfd_add_devices(&interface->dev, -1, vprbrd_devs,
-                               ARRAY_SIZE(vprbrd_devs), NULL, 0, NULL);
+       ret = mfd_add_devices(&interface->dev, PLATFORM_DEVID_AUTO,
+                               vprbrd_devs, ARRAY_SIZE(vprbrd_devs), NULL, 0,
+                               NULL);
        if (ret != 0) {
                dev_err(&interface->dev, "Failed to add mfd devices to core.");
                goto error;
index 461bf3d..5a1f107 100644 (file)
@@ -459,6 +459,10 @@ static int imx_thermal_probe(struct platform_device *pdev)
        int measure_freq;
        int ret;
 
+       if (!cpufreq_get_current_driver()) {
+               dev_dbg(&pdev->dev, "no cpufreq driver!");
+               return -EPROBE_DEFER;
+       }
        data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
@@ -521,6 +525,30 @@ static int imx_thermal_probe(struct platform_device *pdev)
                return ret;
        }
 
+       data->thermal_clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(data->thermal_clk)) {
+               ret = PTR_ERR(data->thermal_clk);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(&pdev->dev,
+                               "failed to get thermal clk: %d\n", ret);
+               cpufreq_cooling_unregister(data->cdev);
+               return ret;
+       }
+
+       /*
+        * Thermal sensor needs clk on to get correct value, normally
+        * we should enable its clk before taking measurement and disable
+        * clk after measurement is done, but if alarm function is enabled,
+        * hardware will auto measure the temperature periodically, so we
+        * need to keep the clk always on for alarm function.
+        */
+       ret = clk_prepare_enable(data->thermal_clk);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
+               cpufreq_cooling_unregister(data->cdev);
+               return ret;
+       }
+
        data->tz = thermal_zone_device_register("imx_thermal_zone",
                                                IMX_TRIP_NUM,
                                                BIT(IMX_TRIP_PASSIVE), data,
@@ -531,26 +559,11 @@ static int imx_thermal_probe(struct platform_device *pdev)
                ret = PTR_ERR(data->tz);
                dev_err(&pdev->dev,
                        "failed to register thermal zone device %d\n", ret);
+               clk_disable_unprepare(data->thermal_clk);
                cpufreq_cooling_unregister(data->cdev);
                return ret;
        }
 
-       data->thermal_clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(data->thermal_clk)) {
-               dev_warn(&pdev->dev, "failed to get thermal clk!\n");
-       } else {
-               /*
-                * Thermal sensor needs clk on to get correct value, normally
-                * we should enable its clk before taking measurement and disable
-                * clk after measurement is done, but if alarm function is enabled,
-                * hardware will auto measure the temperature periodically, so we
-                * need to keep the clk always on for alarm function.
-                */
-               ret = clk_prepare_enable(data->thermal_clk);
-               if (ret)
-                       dev_warn(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
-       }
-
        /* Enable measurements at ~ 10 Hz */
        regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ);
        measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */
index d20dba9..6e9fb62 100644 (file)
@@ -92,7 +92,13 @@ static int sys_get_trip_hyst(struct thermal_zone_device *tzone,
        if (ACPI_FAILURE(status))
                return -EIO;
 
-       *temp = DECI_KELVIN_TO_MILLI_CELSIUS(hyst, KELVIN_OFFSET);
+       /*
+        * Thermal hysteresis represents a temperature difference.
+        * Kelvin and Celsius have same degree size. So the
+        * conversion here between tenths of degree Kelvin unit
+        * and Milli-Celsius unit is just to multiply 100.
+        */
+       *temp = hyst * 100;
 
        return 0;
 }
index 2683d28..1724f6c 100644 (file)
@@ -264,7 +264,6 @@ struct exynos_tmu_init_data const exynos5250_default_tmu_data = {
 static const struct exynos_tmu_registers exynos5260_tmu_registers = {
        .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
        .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
-       .tmu_ctrl = EXYNOS_TMU_REG_CONTROL1,
        .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
        .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
        .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
index 65e2ea6..63de598 100644 (file)
@@ -75,7 +75,6 @@
 #define EXYNOS_MAX_TRIGGER_PER_REG     4
 
 /* Exynos5260 specific */
-#define EXYNOS_TMU_REG_CONTROL1                        0x24
 #define EXYNOS5260_TMU_REG_INTEN               0xC0
 #define EXYNOS5260_TMU_REG_INTSTAT             0xC4
 #define EXYNOS5260_TMU_REG_INTCLEAR            0xC8
index fc17d56..582e67f 100644 (file)
@@ -330,6 +330,13 @@ enum max77693_irq_source {
        MAX77693_IRQ_GROUP_NR,
 };
 
+#define SRC_IRQ_CHARGER                        BIT(0)
+#define SRC_IRQ_TOP                    BIT(1)
+#define SRC_IRQ_FLASH                  BIT(2)
+#define SRC_IRQ_MUIC                   BIT(3)
+#define SRC_IRQ_ALL                    (SRC_IRQ_CHARGER | SRC_IRQ_TOP \
+                                               | SRC_IRQ_FLASH | SRC_IRQ_MUIC)
+
 #define LED_IRQ_FLED2_OPEN             BIT(0)
 #define LED_IRQ_FLED2_SHORT            BIT(1)
 #define LED_IRQ_FLED1_OPEN             BIT(2)
index 49a4d6f..e2c13cd 100644 (file)
@@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
        __ring_buffer_alloc((size), (flags), &__key);   \
 })
 
-int ring_buffer_wait(struct ring_buffer *buffer, int cpu);
+int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full);
 int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
                          struct file *filp, poll_table *poll_table);
 
index 800a0da..321d0ce 100644 (file)
@@ -544,7 +544,7 @@ asmlinkage __visible void __init start_kernel(void)
                                  static_command_line, __start___param,
                                  __stop___param - __start___param,
                                  -1, -1, &unknown_bootoption);
-       if (after_dashes)
+       if (!IS_ERR_OR_NULL(after_dashes))
                parse_args("Setting init args", after_dashes, NULL, 0, -1, -1,
                           set_init_arg);
 
index 2d75c94..a56e07c 100644 (file)
@@ -538,16 +538,18 @@ static void rb_wake_up_waiters(struct irq_work *work)
  * ring_buffer_wait - wait for input to the ring buffer
  * @buffer: buffer to wait on
  * @cpu: the cpu buffer to wait on
+ * @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS
  *
  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
  * as data is added to any of the @buffer's cpu buffers. Otherwise
  * it will wait for data to be added to a specific cpu buffer.
  */
-int ring_buffer_wait(struct ring_buffer *buffer, int cpu)
+int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
 {
-       struct ring_buffer_per_cpu *cpu_buffer;
+       struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
        DEFINE_WAIT(wait);
        struct rb_irq_work *work;
+       int ret = 0;
 
        /*
         * Depending on what the caller is waiting for, either any
@@ -564,36 +566,61 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu)
        }
 
 
-       prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
+       while (true) {
+               prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
 
-       /*
-        * The events can happen in critical sections where
-        * checking a work queue can cause deadlocks.
-        * After adding a task to the queue, this flag is set
-        * only to notify events to try to wake up the queue
-        * using irq_work.
-        *
-        * We don't clear it even if the buffer is no longer
-        * empty. The flag only causes the next event to run
-        * irq_work to do the work queue wake up. The worse
-        * that can happen if we race with !trace_empty() is that
-        * an event will cause an irq_work to try to wake up
-        * an empty queue.
-        *
-        * There's no reason to protect this flag either, as
-        * the work queue and irq_work logic will do the necessary
-        * synchronization for the wake ups. The only thing
-        * that is necessary is that the wake up happens after
-        * a task has been queued. It's OK for spurious wake ups.
-        */
-       work->waiters_pending = true;
+               /*
+                * The events can happen in critical sections where
+                * checking a work queue can cause deadlocks.
+                * After adding a task to the queue, this flag is set
+                * only to notify events to try to wake up the queue
+                * using irq_work.
+                *
+                * We don't clear it even if the buffer is no longer
+                * empty. The flag only causes the next event to run
+                * irq_work to do the work queue wake up. The worse
+                * that can happen if we race with !trace_empty() is that
+                * an event will cause an irq_work to try to wake up
+                * an empty queue.
+                *
+                * There's no reason to protect this flag either, as
+                * the work queue and irq_work logic will do the necessary
+                * synchronization for the wake ups. The only thing
+                * that is necessary is that the wake up happens after
+                * a task has been queued. It's OK for spurious wake ups.
+                */
+               work->waiters_pending = true;
+
+               if (signal_pending(current)) {
+                       ret = -EINTR;
+                       break;
+               }
+
+               if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
+                       break;
+
+               if (cpu != RING_BUFFER_ALL_CPUS &&
+                   !ring_buffer_empty_cpu(buffer, cpu)) {
+                       unsigned long flags;
+                       bool pagebusy;
+
+                       if (!full)
+                               break;
+
+                       raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+                       pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
+                       raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
+                       if (!pagebusy)
+                               break;
+               }
 
-       if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) ||
-           (cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu)))
                schedule();
+       }
 
        finish_wait(&work->waiters, &wait);
-       return 0;
+
+       return ret;
 }
 
 /**
index 8a52839..92f4a6c 100644 (file)
@@ -1076,13 +1076,14 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
 }
 #endif /* CONFIG_TRACER_MAX_TRACE */
 
-static int wait_on_pipe(struct trace_iterator *iter)
+static int wait_on_pipe(struct trace_iterator *iter, bool full)
 {
        /* Iterators are static, they should be filled or empty */
        if (trace_buffer_iter(iter, iter->cpu_file))
                return 0;
 
-       return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
+       return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
+                               full);
 }
 
 #ifdef CONFIG_FTRACE_STARTUP_TEST
@@ -4434,15 +4435,12 @@ static int tracing_wait_pipe(struct file *filp)
 
                mutex_unlock(&iter->mutex);
 
-               ret = wait_on_pipe(iter);
+               ret = wait_on_pipe(iter, false);
 
                mutex_lock(&iter->mutex);
 
                if (ret)
                        return ret;
-
-               if (signal_pending(current))
-                       return -EINTR;
        }
 
        return 1;
@@ -5372,16 +5370,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
                                goto out_unlock;
                        }
                        mutex_unlock(&trace_types_lock);
-                       ret = wait_on_pipe(iter);
+                       ret = wait_on_pipe(iter, false);
                        mutex_lock(&trace_types_lock);
                        if (ret) {
                                size = ret;
                                goto out_unlock;
                        }
-                       if (signal_pending(current)) {
-                               size = -EINTR;
-                               goto out_unlock;
-                       }
                        goto again;
                }
                size = 0;
@@ -5500,7 +5494,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        };
        struct buffer_ref *ref;
        int entries, size, i;
-       ssize_t ret;
+       ssize_t ret = 0;
 
        mutex_lock(&trace_types_lock);
 
@@ -5538,13 +5532,16 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                int r;
 
                ref = kzalloc(sizeof(*ref), GFP_KERNEL);
-               if (!ref)
+               if (!ref) {
+                       ret = -ENOMEM;
                        break;
+               }
 
                ref->ref = 1;
                ref->buffer = iter->trace_buffer->buffer;
                ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
                if (!ref->page) {
+                       ret = -ENOMEM;
                        kfree(ref);
                        break;
                }
@@ -5582,19 +5579,19 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
 
        /* did we read anything? */
        if (!spd.nr_pages) {
+               if (ret)
+                       goto out;
+
                if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
                        ret = -EAGAIN;
                        goto out;
                }
                mutex_unlock(&trace_types_lock);
-               ret = wait_on_pipe(iter);
+               ret = wait_on_pipe(iter, true);
                mutex_lock(&trace_types_lock);
                if (ret)
                        goto out;
-               if (signal_pending(current)) {
-                       ret = -EINTR;
-                       goto out;
-               }
+
                goto again;
        }