mm: make lru_add_drain_all() selective
[cascardo/linux.git] / mm / swap.c
index c899502..759c3ca 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -432,6 +432,11 @@ static void activate_page_drain(int cpu)
                pagevec_lru_move_fn(pvec, __activate_page, NULL);
 }
 
+static bool need_activate_page_drain(int cpu)
+{
+       return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0;
+}
+
 void activate_page(struct page *page)
 {
        if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
@@ -449,6 +454,11 @@ static inline void activate_page_drain(int cpu)
 {
 }
 
+static bool need_activate_page_drain(int cpu)
+{
+       return false;
+}
+
 void activate_page(struct page *page)
 {
        struct zone *zone = page_zone(page);
@@ -701,12 +711,36 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
        lru_add_drain();
 }
 
-/*
- * Returns 0 for success
- */
-int lru_add_drain_all(void)
+static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
+
+void lru_add_drain_all(void)
 {
-       return schedule_on_each_cpu(lru_add_drain_per_cpu);
+       static DEFINE_MUTEX(lock);
+       static struct cpumask has_work;
+       int cpu;
+
+       mutex_lock(&lock);
+       get_online_cpus();
+       cpumask_clear(&has_work);
+
+       for_each_online_cpu(cpu) {
+               struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
+
+               if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
+                   pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
+                   pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
+                   need_activate_page_drain(cpu)) {
+                       INIT_WORK(work, lru_add_drain_per_cpu);
+                       schedule_work_on(cpu, work);
+                       cpumask_set_cpu(cpu, &has_work);
+               }
+       }
+
+       for_each_cpu(cpu, &has_work)
+               flush_work(&per_cpu(lru_add_drain_work, cpu));
+
+       put_online_cpus();
+       mutex_unlock(&lock);
 }
 
 /*