Merge branch 'akpm' (patches from Andrew)
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 15 Aug 2015 00:05:26 +0000 (17:05 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 15 Aug 2015 00:05:26 +0000 (17:05 -0700)
Merge fixes from Andrew Morton:
 "11 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  Update maintainers for DRM STI driver
  mm: cma: mark cma_bitmap_maxno() inline in header
  zram: fix pool name truncation
  memory-hotplug: fix wrong edge when hot add a new node
  .mailmap: Andrey Ryabinin has moved
  ipc/sem.c: update/correct memory barriers
  mm/hwpoison: fix panic due to split huge zero page
  ipc,sem: remove uneeded sem_undo_list lock usage in exit_sem()
  ipc,sem: fix use after free on IPC_RMID after a task using same semaphore set exits
  mm/hwpoison: fix fail isolate hugetlbfs page w/ refcount held
  mm/hwpoison: fix page refcount of unknown non LRU page

15 files changed:
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_cqm.c
drivers/clk/pxa/clk-pxa3xx.c
drivers/clocksource/sh_cmt.c
drivers/gpu/drm/exynos/exynos_drm_fimc.c
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
kernel/events/core.c
kernel/events/ring_buffer.c
kernel/locking/qspinlock_paravirt.h
tools/perf/config/Makefile
tools/perf/util/stat-shadow.c

index b9826a9..6326ae2 100644 (file)
@@ -2534,7 +2534,7 @@ static int intel_pmu_cpu_prepare(int cpu)
        if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
                cpuc->shared_regs = allocate_shared_regs(cpu);
                if (!cpuc->shared_regs)
-                       return NOTIFY_BAD;
+                       goto err;
        }
 
        if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
@@ -2542,18 +2542,27 @@ static int intel_pmu_cpu_prepare(int cpu)
 
                cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
                if (!cpuc->constraint_list)
-                       return NOTIFY_BAD;
+                       goto err_shared_regs;
 
                cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
-               if (!cpuc->excl_cntrs) {
-                       kfree(cpuc->constraint_list);
-                       kfree(cpuc->shared_regs);
-                       return NOTIFY_BAD;
-               }
+               if (!cpuc->excl_cntrs)
+                       goto err_constraint_list;
+
                cpuc->excl_thread_id = 0;
        }
 
        return NOTIFY_OK;
+
+err_constraint_list:
+       kfree(cpuc->constraint_list);
+       cpuc->constraint_list = NULL;
+
+err_shared_regs:
+       kfree(cpuc->shared_regs);
+       cpuc->shared_regs = NULL;
+
+err:
+       return NOTIFY_BAD;
 }
 
 static void intel_pmu_cpu_starting(int cpu)
index 63eb68b..377e8f8 100644 (file)
@@ -1255,7 +1255,7 @@ static inline void cqm_pick_event_reader(int cpu)
        cpumask_set_cpu(cpu, &cqm_cpumask);
 }
 
-static void intel_cqm_cpu_prepare(unsigned int cpu)
+static void intel_cqm_cpu_starting(unsigned int cpu)
 {
        struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
        struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -1296,13 +1296,11 @@ static int intel_cqm_cpu_notifier(struct notifier_block *nb,
        unsigned int cpu  = (unsigned long)hcpu;
 
        switch (action & ~CPU_TASKS_FROZEN) {
-       case CPU_UP_PREPARE:
-               intel_cqm_cpu_prepare(cpu);
-               break;
        case CPU_DOWN_PREPARE:
                intel_cqm_cpu_exit(cpu);
                break;
        case CPU_STARTING:
+               intel_cqm_cpu_starting(cpu);
                cqm_pick_event_reader(cpu);
                break;
        }
@@ -1373,7 +1371,7 @@ static int __init intel_cqm_init(void)
                goto out;
 
        for_each_online_cpu(i) {
-               intel_cqm_cpu_prepare(i);
+               intel_cqm_cpu_starting(i);
                cqm_pick_event_reader(i);
        }
 
index 4b93a1e..ac03ba4 100644 (file)
@@ -126,7 +126,7 @@ PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
 PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
 PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
 
-#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB)
+#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENB : &CKENA)
 #define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
                    div_hp, bit, is_lp, flags)                          \
        PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp,         \
index b8ff3c6..c96de14 100644 (file)
@@ -661,6 +661,9 @@ static void sh_cmt_clocksource_suspend(struct clocksource *cs)
 {
        struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
 
+       if (!ch->cs_enabled)
+               return;
+
        sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
        pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
 }
@@ -669,6 +672,9 @@ static void sh_cmt_clocksource_resume(struct clocksource *cs)
 {
        struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
 
+       if (!ch->cs_enabled)
+               return;
+
        pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
        sh_cmt_start(ch, FLAG_CLOCKSOURCE);
 }
index 842d6b8..2a65235 100644 (file)
@@ -1745,7 +1745,6 @@ static int fimc_probe(struct platform_device *pdev)
        spin_lock_init(&ctx->lock);
        platform_set_drvdata(pdev, ctx);
 
-       pm_runtime_set_active(dev);
        pm_runtime_enable(dev);
 
        ret = exynos_drm_ippdrv_register(ippdrv);
index 8040ed2..f1c6b76 100644 (file)
@@ -593,8 +593,7 @@ static int gsc_src_set_transf(struct device *dev,
 
        gsc_write(cfg, GSC_IN_CON);
 
-       ctx->rotation = cfg &
-               (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+       ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
        *swap = ctx->rotation;
 
        return 0;
@@ -857,8 +856,7 @@ static int gsc_dst_set_transf(struct device *dev,
 
        gsc_write(cfg, GSC_IN_CON);
 
-       ctx->rotation = cfg &
-               (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+       ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
        *swap = ctx->rotation;
 
        return 0;
index 99e2864..4a00990 100644 (file)
@@ -1064,6 +1064,7 @@ static int hdmi_get_modes(struct drm_connector *connector)
 {
        struct hdmi_context *hdata = ctx_from_connector(connector);
        struct edid *edid;
+       int ret;
 
        if (!hdata->ddc_adpt)
                return -ENODEV;
@@ -1079,7 +1080,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
 
        drm_mode_connector_update_edid_property(connector, edid);
 
-       return drm_add_edid_modes(connector, edid);
+       ret = drm_add_edid_modes(connector, edid);
+
+       kfree(edid);
+
+       return ret;
 }
 
 static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
index cae98db..4706b56 100644 (file)
@@ -718,6 +718,10 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
 
        /* handling VSYNC */
        if (val & MXR_INT_STATUS_VSYNC) {
+               /* vsync interrupt use different bit for read and clear */
+               val |= MXR_INT_CLEAR_VSYNC;
+               val &= ~MXR_INT_STATUS_VSYNC;
+
                /* interlace scan need to check shadow register */
                if (ctx->interlace) {
                        base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
@@ -743,11 +747,6 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
 
 out:
        /* clear interrupts */
-       if (~val & MXR_INT_EN_VSYNC) {
-               /* vsync interrupt use different bit for read and clear */
-               val &= ~MXR_INT_EN_VSYNC;
-               val |= MXR_INT_CLEAR_VSYNC;
-       }
        mixer_reg_write(res, MXR_INT_STATUS, val);
 
        spin_unlock(&res->reg_slock);
@@ -907,8 +906,8 @@ static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
        }
 
        /* enable vsync interrupt */
-       mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC,
-                       MXR_INT_EN_VSYNC);
+       mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
+       mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
 
        return 0;
 }
@@ -918,7 +917,13 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
        struct mixer_context *mixer_ctx = crtc->ctx;
        struct mixer_resources *res = &mixer_ctx->mixer_res;
 
+       if (!mixer_ctx->powered) {
+               mixer_ctx->int_en &= MXR_INT_EN_VSYNC;
+               return;
+       }
+
        /* disable vsync interrupt */
+       mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
        mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
 }
 
@@ -1047,6 +1052,8 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
 
        mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
 
+       if (ctx->int_en & MXR_INT_EN_VSYNC)
+               mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
        mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
        mixer_win_reset(ctx);
 }
index 52c22b0..e10f964 100644 (file)
@@ -165,31 +165,15 @@ gk104_fifo_context_attach(struct nvkm_object *parent,
        return 0;
 }
 
-static int
-gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
-{
-       struct nvkm_object *obj = (void *)chan;
-       struct gk104_fifo_priv *priv = (void *)obj->engine;
-
-       nv_wr32(priv, 0x002634, chan->base.chid);
-       if (!nv_wait(priv, 0x002634, 0x100000, 0x000000)) {
-               nv_error(priv, "channel %d [%s] kick timeout\n",
-                        chan->base.chid, nvkm_client_name(chan));
-               return -EBUSY;
-       }
-
-       return 0;
-}
-
 static int
 gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
                          struct nvkm_object *object)
 {
        struct nvkm_bar *bar = nvkm_bar(parent);
+       struct gk104_fifo_priv *priv = (void *)parent->engine;
        struct gk104_fifo_base *base = (void *)parent->parent;
        struct gk104_fifo_chan *chan = (void *)parent;
        u32 addr;
-       int ret;
 
        switch (nv_engidx(object->engine)) {
        case NVDEV_ENGINE_SW    : return 0;
@@ -204,9 +188,13 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
                return -EINVAL;
        }
 
-       ret = gk104_fifo_chan_kick(chan);
-       if (ret && suspend)
-               return ret;
+       nv_wr32(priv, 0x002634, chan->base.chid);
+       if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
+               nv_error(priv, "channel %d [%s] kick timeout\n",
+                        chan->base.chid, nvkm_client_name(chan));
+               if (suspend)
+                       return -EBUSY;
+       }
 
        if (addr) {
                nv_wo32(base, addr + 0x00, 0x00000000);
@@ -331,7 +319,6 @@ gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
                gk104_fifo_runlist_update(priv, chan->engine);
        }
 
-       gk104_fifo_chan_kick(chan);
        nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
        return nvkm_fifo_channel_fini(&chan->base, suspend);
 }
index 654c8da..97ad3bc 100644 (file)
@@ -2492,7 +2492,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
                                     true, NULL);
        if (unlikely(ret != 0))
-               goto out_err;
+               goto out_err_nores;
 
        ret = vmw_validate_buffers(dev_priv, sw_context);
        if (unlikely(ret != 0))
@@ -2536,6 +2536,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        vmw_resource_relocations_free(&sw_context->res_relocations);
 
        vmw_fifo_commit(dev_priv, command_size);
+       mutex_unlock(&dev_priv->binding_mutex);
 
        vmw_query_bo_switch_commit(dev_priv, sw_context);
        ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2551,7 +2552,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
                DRM_ERROR("Fence submission error. Syncing.\n");
 
        vmw_resource_list_unreserve(&sw_context->resource_list, false);
-       mutex_unlock(&dev_priv->binding_mutex);
 
        ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
                                    (void *) fence);
index d3dae34..e6feb51 100644 (file)
@@ -1868,8 +1868,6 @@ event_sched_in(struct perf_event *event,
 
        perf_pmu_disable(event->pmu);
 
-       event->tstamp_running += tstamp - event->tstamp_stopped;
-
        perf_set_shadow_time(event, ctx, tstamp);
 
        perf_log_itrace_start(event);
@@ -1881,6 +1879,8 @@ event_sched_in(struct perf_event *event,
                goto out;
        }
 
+       event->tstamp_running += tstamp - event->tstamp_stopped;
+
        if (!is_software_event(event))
                cpuctx->active_oncpu++;
        if (!ctx->nr_active++)
@@ -3958,28 +3958,21 @@ static void perf_event_for_each(struct perf_event *event,
                perf_event_for_each_child(sibling, func);
 }
 
-static int perf_event_period(struct perf_event *event, u64 __user *arg)
-{
-       struct perf_event_context *ctx = event->ctx;
-       int ret = 0, active;
+struct period_event {
+       struct perf_event *event;
        u64 value;
+};
 
-       if (!is_sampling_event(event))
-               return -EINVAL;
-
-       if (copy_from_user(&value, arg, sizeof(value)))
-               return -EFAULT;
-
-       if (!value)
-               return -EINVAL;
+static int __perf_event_period(void *info)
+{
+       struct period_event *pe = info;
+       struct perf_event *event = pe->event;
+       struct perf_event_context *ctx = event->ctx;
+       u64 value = pe->value;
+       bool active;
 
-       raw_spin_lock_irq(&ctx->lock);
+       raw_spin_lock(&ctx->lock);
        if (event->attr.freq) {
-               if (value > sysctl_perf_event_sample_rate) {
-                       ret = -EINVAL;
-                       goto unlock;
-               }
-
                event->attr.sample_freq = value;
        } else {
                event->attr.sample_period = value;
@@ -3998,11 +3991,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
                event->pmu->start(event, PERF_EF_RELOAD);
                perf_pmu_enable(ctx->pmu);
        }
+       raw_spin_unlock(&ctx->lock);
 
-unlock:
+       return 0;
+}
+
+static int perf_event_period(struct perf_event *event, u64 __user *arg)
+{
+       struct period_event pe = { .event = event, };
+       struct perf_event_context *ctx = event->ctx;
+       struct task_struct *task;
+       u64 value;
+
+       if (!is_sampling_event(event))
+               return -EINVAL;
+
+       if (copy_from_user(&value, arg, sizeof(value)))
+               return -EFAULT;
+
+       if (!value)
+               return -EINVAL;
+
+       if (event->attr.freq && value > sysctl_perf_event_sample_rate)
+               return -EINVAL;
+
+       task = ctx->task;
+       pe.value = value;
+
+       if (!task) {
+               cpu_function_call(event->cpu, __perf_event_period, &pe);
+               return 0;
+       }
+
+retry:
+       if (!task_function_call(task, __perf_event_period, &pe))
+               return 0;
+
+       raw_spin_lock_irq(&ctx->lock);
+       if (ctx->is_active) {
+               raw_spin_unlock_irq(&ctx->lock);
+               task = ctx->task;
+               goto retry;
+       }
+
+       __perf_event_period(&pe);
        raw_spin_unlock_irq(&ctx->lock);
 
-       return ret;
+       return 0;
 }
 
 static const struct file_operations perf_fops;
@@ -4740,12 +4775,20 @@ static const struct file_operations perf_fops = {
  * to user-space before waking everybody up.
  */
 
+static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
+{
+       /* only the parent has fasync state */
+       if (event->parent)
+               event = event->parent;
+       return &event->fasync;
+}
+
 void perf_event_wakeup(struct perf_event *event)
 {
        ring_buffer_wakeup(event);
 
        if (event->pending_kill) {
-               kill_fasync(&event->fasync, SIGIO, event->pending_kill);
+               kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
                event->pending_kill = 0;
        }
 }
@@ -6124,7 +6167,7 @@ static int __perf_event_overflow(struct perf_event *event,
        else
                perf_event_output(event, data, regs);
 
-       if (event->fasync && event->pending_kill) {
+       if (*perf_event_fasync(event) && event->pending_kill) {
                event->pending_wakeup = 1;
                irq_work_queue(&event->pending);
        }
index b2be01b..c8aa3f7 100644 (file)
@@ -559,11 +559,13 @@ static void __rb_free_aux(struct ring_buffer *rb)
                rb->aux_priv = NULL;
        }
 
-       for (pg = 0; pg < rb->aux_nr_pages; pg++)
-               rb_free_aux_page(rb, pg);
+       if (rb->aux_nr_pages) {
+               for (pg = 0; pg < rb->aux_nr_pages; pg++)
+                       rb_free_aux_page(rb, pg);
 
-       kfree(rb->aux_pages);
-       rb->aux_nr_pages = 0;
+               kfree(rb->aux_pages);
+               rb->aux_nr_pages = 0;
+       }
 }
 
 void rb_free_aux(struct ring_buffer *rb)
index 04ab181..df19ae4 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/hash.h>
 #include <linux/bootmem.h>
+#include <linux/debug_locks.h>
 
 /*
  * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
@@ -286,15 +287,23 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
 {
        struct __qspinlock *l = (void *)lock;
        struct pv_node *node;
+       u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
 
        /*
         * We must not unlock if SLOW, because in that case we must first
         * unhash. Otherwise it would be possible to have multiple @lock
         * entries, which would be BAD.
         */
-       if (likely(cmpxchg(&l->locked, _Q_LOCKED_VAL, 0) == _Q_LOCKED_VAL))
+       if (likely(lockval == _Q_LOCKED_VAL))
                return;
 
+       if (unlikely(lockval != _Q_SLOW_VAL)) {
+               if (debug_locks_silent)
+                       return;
+               WARN(1, "pvqspinlock: lock %p has corrupted value 0x%x!\n", lock, atomic_read(&lock->val));
+               return;
+       }
+
        /*
         * Since the above failed to release, this must be the SLOW path.
         * Therefore start by looking up the blocked node and unhashing it.
index 094ddae..d31fac1 100644 (file)
@@ -638,7 +638,7 @@ ifndef DESTDIR
 prefix ?= $(HOME)
 endif
 bindir_relative = bin
-bindir = $(prefix)/$(bindir_relative)
+bindir = $(abspath $(prefix)/$(bindir_relative))
 mandir = share/man
 infodir = share/info
 perfexecdir = libexec/perf-core
index 53e8bb7..2a5d8d7 100644 (file)
@@ -85,7 +85,7 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
        else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
                update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
        else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
-               update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
+               update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count[0]);
        else if (perf_stat_evsel__is(counter, TRANSACTION_START))
                update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
        else if (perf_stat_evsel__is(counter, ELISION_START))
@@ -398,20 +398,18 @@ void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel,
                                " #   %5.2f%% aborted cycles         ",
                                100.0 * ((total2-avg) / total));
        } else if (perf_stat_evsel__is(evsel, TRANSACTION_START) &&
-                  avg > 0 &&
                   runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
                total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
 
-               if (total)
+               if (avg)
                        ratio = total / avg;
 
                fprintf(out, " # %8.0f cycles / transaction   ", ratio);
        } else if (perf_stat_evsel__is(evsel, ELISION_START) &&
-                  avg > 0 &&
                   runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
                total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
 
-               if (total)
+               if (avg)
                        ratio = total / avg;
 
                fprintf(out, " # %8.0f cycles / elision       ", ratio);