Merge branch 'akpm' (Andrew's patch-bomb)
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 29 Mar 2012 00:19:27 +0000 (17:19 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 29 Mar 2012 00:19:28 +0000 (17:19 -0700)
Merge third batch of patches from Andrew Morton:
 - Some MM stragglers
 - core SMP library cleanups (on_each_cpu_mask)
 - Some IPI optimisations
 - kexec
 - kdump
 - IPMI
 - the radix-tree iterator work
 - various other misc bits.

 "That'll do for -rc1.  I still have ~10 patches for 3.4, will send
  those along when they've baked a little more."

* emailed from Andrew Morton <akpm@linux-foundation.org>: (35 commits)
  backlight: fix typo in tosa_lcd.c
  crc32: add help text for the algorithm select option
  mm: move hugepage test examples to tools/testing/selftests/vm
  mm: move slabinfo.c to tools/vm
  mm: move page-types.c from Documentation to tools/vm
  selftests/Makefile: make `run_tests' depend on `all'
  selftests: launch individual selftests from the main Makefile
  radix-tree: use iterators in find_get_pages* functions
  radix-tree: rewrite gang lookup using iterator
  radix-tree: introduce bit-optimized iterator
  fs/proc/namespaces.c: prevent crash when ns_entries[] is empty
  nbd: rename the nbd_device variable from lo to nbd
  pidns: add reboot_pid_ns() to handle the reboot syscall
  sysctl: use bitmap library functions
  ipmi: use locks on watchdog timeout set on reboot
  ipmi: simplify locking
  ipmi: fix message handling during panics
  ipmi: use a tasklet for handling received messages
  ipmi: increase KCS timeouts
  ipmi: decrease the IPMI message transaction time in interrupt mode
  ...

15 files changed:
1  2 
arch/ia64/kernel/acpi.c
arch/ia64/kernel/irq_ia64.c
arch/ia64/kernel/mca.c
arch/ia64/kernel/setup.c
arch/ia64/kernel/smp.c
arch/ia64/kernel/smpboot.c
arch/x86/kernel/setup.c
drivers/block/nbd.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/ipmi/ipmi_watchdog.c
include/linux/mm.h
kernel/kexec.c
kernel/sysctl.c
mm/slub.c

diff --combined arch/ia64/kernel/acpi.c
@@@ -50,6 -50,7 +50,6 @@@
  #include <asm/iosapic.h>
  #include <asm/machvec.h>
  #include <asm/page.h>
 -#include <asm/system.h>
  #include <asm/numa.h>
  #include <asm/sal.h>
  #include <asm/cyclone.h>
@@@ -843,7 -844,7 +843,7 @@@ early_param("additional_cpus", setup_ad
   * are onlined, or offlined. The reason is per-cpu data-structures
   * are allocated by some modules at init time, and dont expect to
   * do this dynamically on cpu arrival/departure.
-  * cpu_present_map on the other hand can change dynamically.
+  * cpu_present_mask on the other hand can change dynamically.
   * In case when cpu_hotplug is not compiled, then we resort to current
   * behaviour, which is cpu_possible == cpu_present.
   * - Ashok Raj
@@@ -921,7 -922,7 +921,7 @@@ static int __cpuinit _acpi_map_lsapic(a
  
        acpi_map_cpu2node(handle, cpu, physid);
  
-       cpu_set(cpu, cpu_present_map);
+       set_cpu_present(cpu, true);
        ia64_cpu_to_sapicid[cpu] = physid;
  
        acpi_processor_set_pdc(handle);
@@@ -940,7 -941,7 +940,7 @@@ EXPORT_SYMBOL(acpi_map_lsapic)
  int acpi_unmap_lsapic(int cpu)
  {
        ia64_cpu_to_sapicid[cpu] = -1;
-       cpu_clear(cpu, cpu_present_map);
+       set_cpu_present(cpu, false);
  
  #ifdef CONFIG_ACPI_NUMA
        /* NUMA specific cleanup's */
@@@ -39,6 -39,7 +39,6 @@@
  #include <asm/hw_irq.h>
  #include <asm/machvec.h>
  #include <asm/pgtable.h>
 -#include <asm/system.h>
  #include <asm/tlbflush.h>
  
  #ifdef CONFIG_PERFMON
@@@ -117,7 -118,7 +117,7 @@@ static inline int find_unassigned_vecto
        cpumask_t mask;
        int pos, vector;
  
-       cpus_and(mask, domain, cpu_online_map);
+       cpumask_and(&mask, &domain, cpu_online_mask);
        if (cpus_empty(mask))
                return -EINVAL;
  
@@@ -140,7 -141,7 +140,7 @@@ static int __bind_irq_vector(int irq, i
        BUG_ON((unsigned)irq >= NR_IRQS);
        BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
  
-       cpus_and(mask, domain, cpu_online_map);
+       cpumask_and(&mask, &domain, cpu_online_mask);
        if (cpus_empty(mask))
                return -EINVAL;
        if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
@@@ -178,7 -179,7 +178,7 @@@ static void __clear_irq_vector(int irq
        BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
        vector = cfg->vector;
        domain = cfg->domain;
-       cpus_and(mask, cfg->domain, cpu_online_map);
+       cpumask_and(&mask, &cfg->domain, cpu_online_mask);
        for_each_cpu_mask(cpu, mask)
                per_cpu(vector_irq, cpu)[vector] = -1;
        cfg->vector = IRQ_VECTOR_UNASSIGNED;
@@@ -321,7 -322,7 +321,7 @@@ void irq_complete_move(unsigned irq
        if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
                return;
  
-       cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
+       cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
        cfg->move_cleanup_count = cpus_weight(cleanup_mask);
        for_each_cpu_mask(i, cleanup_mask)
                platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
diff --combined arch/ia64/kernel/mca.c
@@@ -92,6 -92,7 +92,6 @@@
  #include <asm/meminit.h>
  #include <asm/page.h>
  #include <asm/ptrace.h>
 -#include <asm/system.h>
  #include <asm/sal.h>
  #include <asm/mca.h>
  #include <asm/kexec.h>
@@@ -1514,7 -1515,8 +1514,8 @@@ static voi
  ia64_mca_cmc_poll (unsigned long dummy)
  {
        /* Trigger a CMC interrupt cascade  */
-       platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
+       platform_send_ipi(cpumask_first(cpu_online_mask), IA64_CMCP_VECTOR,
+                                                       IA64_IPI_DM_INT, 0);
  }
  
  /*
@@@ -1590,7 -1592,8 +1591,8 @@@ static voi
  ia64_mca_cpe_poll (unsigned long dummy)
  {
        /* Trigger a CPE interrupt cascade  */
-       platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
+       platform_send_ipi(cpumask_first(cpu_online_mask), IA64_CPEP_VECTOR,
+                                                       IA64_IPI_DM_INT, 0);
  }
  
  #endif /* CONFIG_ACPI */
diff --combined arch/ia64/kernel/setup.c
@@@ -59,6 -59,7 +59,6 @@@
  #include <asm/sections.h>
  #include <asm/setup.h>
  #include <asm/smp.h>
 -#include <asm/system.h>
  #include <asm/tlbflush.h>
  #include <asm/unistd.h>
  #include <asm/hpsim.h>
@@@ -485,7 -486,7 +485,7 @@@ mark_bsp_online (void
  {
  #ifdef CONFIG_SMP
        /* If we register an early console, allow CPU 0 to printk */
-       cpu_set(smp_processor_id(), cpu_online_map);
+       set_cpu_online(smp_processor_id(), true);
  #endif
  }
  
diff --combined arch/ia64/kernel/smp.c
@@@ -44,6 -44,7 +44,6 @@@
  #include <asm/processor.h>
  #include <asm/ptrace.h>
  #include <asm/sal.h>
 -#include <asm/system.h>
  #include <asm/tlbflush.h>
  #include <asm/unistd.h>
  #include <asm/mca.h>
@@@ -76,7 -77,7 +76,7 @@@ stop_this_cpu(void
        /*
         * Remove this CPU:
         */
-       cpu_clear(smp_processor_id(), cpu_online_map);
+       set_cpu_online(smp_processor_id(), false);
        max_xtp();
        local_irq_disable();
        cpu_halt();
@@@ -55,6 -55,7 +55,6 @@@
  #include <asm/processor.h>
  #include <asm/ptrace.h>
  #include <asm/sal.h>
 -#include <asm/system.h>
  #include <asm/tlbflush.h>
  #include <asm/unistd.h>
  #include <asm/sn/arch.h>
@@@ -400,7 -401,7 +400,7 @@@ smp_callin (void
        /* Setup the per cpu irq handling data structures */
        __setup_vector_irq(cpuid);
        notify_cpu_starting(cpuid);
-       cpu_set(cpuid, cpu_online_map);
+       set_cpu_online(cpuid, true);
        per_cpu(cpu_state, cpuid) = CPU_ONLINE;
        spin_unlock(&vector_lock);
        ipi_call_unlock_irq();
@@@ -547,7 -548,7 +547,7 @@@ do_rest
        if (!cpu_isset(cpu, cpu_callin_map)) {
                printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
                ia64_cpu_to_sapicid[cpu] = -1;
-               cpu_clear(cpu, cpu_online_map);  /* was set in smp_callin() */
+               set_cpu_online(cpu, false);  /* was set in smp_callin() */
                return -EINVAL;
        }
        return 0;
@@@ -577,8 -578,7 +577,7 @@@ smp_build_cpu_map (void
        }
  
        ia64_cpu_to_sapicid[0] = boot_cpu_id;
-       cpus_clear(cpu_present_map);
-       set_cpu_present(0, true);
+       init_cpu_present(cpumask_of(0));
        set_cpu_possible(0, true);
        for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
                sapicid = smp_boot_data.cpu_phys_id[i];
@@@ -605,10 -605,6 +604,6 @@@ smp_prepare_cpus (unsigned int max_cpus
  
        smp_setup_percpu_timer();
  
-       /*
-        * We have the boot CPU online for sure.
-        */
-       cpu_set(0, cpu_online_map);
        cpu_set(0, cpu_callin_map);
  
        local_cpu_data->loops_per_jiffy = loops_per_jiffy;
  
  void __devinit smp_prepare_boot_cpu(void)
  {
-       cpu_set(smp_processor_id(), cpu_online_map);
+       set_cpu_online(smp_processor_id(), true);
        cpu_set(smp_processor_id(), cpu_callin_map);
        set_numa_node(cpu_to_node_map[smp_processor_id()]);
        per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
@@@ -689,7 -685,7 +684,7 @@@ int migrate_platform_irqs(unsigned int 
                        /*
                         * Now re-target the CPEI to a different processor
                         */
-                       new_cpei_cpu = any_online_cpu(cpu_online_map);
+                       new_cpei_cpu = cpumask_any(cpu_online_mask);
                        mask = cpumask_of(new_cpei_cpu);
                        set_cpei_target_cpu(new_cpei_cpu);
                        data = irq_get_irq_data(ia64_cpe_irq);
@@@ -731,10 -727,10 +726,10 @@@ int __cpu_disable(void
                        return -EBUSY;
        }
  
-       cpu_clear(cpu, cpu_online_map);
+       set_cpu_online(cpu, false);
  
        if (migrate_platform_irqs(cpu)) {
-               cpu_set(cpu, cpu_online_map);
+               set_cpu_online(cpu, true);
                return -EBUSY;
        }
  
diff --combined arch/x86/kernel/setup.c
@@@ -90,6 -90,7 +90,6 @@@
  #include <asm/processor.h>
  #include <asm/bugs.h>
  
 -#include <asm/system.h>
  #include <asm/vsyscall.h>
  #include <asm/cpu.h>
  #include <asm/desc.h>
@@@ -508,15 -509,6 +508,6 @@@ static void __init memblock_x86_reserve
  
  #ifdef CONFIG_KEXEC
  
- static inline unsigned long long get_total_mem(void)
- {
-       unsigned long long total;
-       total = max_pfn - min_low_pfn;
-       return total << PAGE_SHIFT;
- }
  /*
   * Keep the crash kernel below this limit.  On 32 bits earlier kernels
   * would limit the kernel to the low 512 MiB due to mapping restrictions.
@@@ -535,7 -527,7 +526,7 @@@ static void __init reserve_crashkernel(
        unsigned long long crash_size, crash_base;
        int ret;
  
-       total_mem = get_total_mem();
+       total_mem = memblock_phys_mem_size();
  
        ret = parse_crashkernel(boot_command_line, total_mem,
                        &crash_size, &crash_base);
diff --combined drivers/block/nbd.c
  #include <linux/kthread.h>
  
  #include <asm/uaccess.h>
 -#include <asm/system.h>
  #include <asm/types.h>
  
  #include <linux/nbd.h>
  
- #define LO_MAGIC 0x68797548
+ #define NBD_MAGIC 0x68797548
  
  #ifdef NDEBUG
  #define dprintk(flags, fmt...)
@@@ -115,7 -116,7 +115,7 @@@ static void nbd_end_request(struct requ
        spin_unlock_irqrestore(q->queue_lock, flags);
  }
  
- static void sock_shutdown(struct nbd_device *lo, int lock)
+ static void sock_shutdown(struct nbd_device *nbd, int lock)
  {
        /* Forcibly shutdown the socket causing all listeners
         * to error
         * there should be a more generic interface rather than
         * calling socket ops directly here */
        if (lock)
-               mutex_lock(&lo->tx_lock);
-       if (lo->sock) {
-               dev_warn(disk_to_dev(lo->disk), "shutting down socket\n");
-               kernel_sock_shutdown(lo->sock, SHUT_RDWR);
-               lo->sock = NULL;
+               mutex_lock(&nbd->tx_lock);
+       if (nbd->sock) {
+               dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
+               kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
+               nbd->sock = NULL;
        }
        if (lock)
-               mutex_unlock(&lo->tx_lock);
+               mutex_unlock(&nbd->tx_lock);
  }
  
  static void nbd_xmit_timeout(unsigned long arg)
  /*
   *  Send or receive packet.
   */
- static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
+ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
                int msg_flags)
  {
-       struct socket *sock = lo->sock;
+       struct socket *sock = nbd->sock;
        int result;
        struct msghdr msg;
        struct kvec iov;
        sigset_t blocked, oldset;
  
        if (unlikely(!sock)) {
-               dev_err(disk_to_dev(lo->disk),
+               dev_err(disk_to_dev(nbd->disk),
                        "Attempted %s on closed socket in sock_xmit\n",
                        (send ? "send" : "recv"));
                return -EINVAL;
                if (send) {
                        struct timer_list ti;
  
-                       if (lo->xmit_timeout) {
+                       if (nbd->xmit_timeout) {
                                init_timer(&ti);
                                ti.function = nbd_xmit_timeout;
                                ti.data = (unsigned long)current;
-                               ti.expires = jiffies + lo->xmit_timeout;
+                               ti.expires = jiffies + nbd->xmit_timeout;
                                add_timer(&ti);
                        }
                        result = kernel_sendmsg(sock, &msg, &iov, 1, size);
-                       if (lo->xmit_timeout)
+                       if (nbd->xmit_timeout)
                                del_timer_sync(&ti);
                } else
                        result = kernel_recvmsg(sock, &msg, &iov, 1, size,
                                task_pid_nr(current), current->comm,
                                dequeue_signal_lock(current, &current->blocked, &info));
                        result = -EINTR;
-                       sock_shutdown(lo, !send);
+                       sock_shutdown(nbd, !send);
                        break;
                }
  
        return result;
  }
  
- static inline int sock_send_bvec(struct nbd_device *lo, struct bio_vec *bvec,
+ static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec,
                int flags)
  {
        int result;
        void *kaddr = kmap(bvec->bv_page);
-       result = sock_xmit(lo, 1, kaddr + bvec->bv_offset, bvec->bv_len, flags);
+       result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset,
+                          bvec->bv_len, flags);
        kunmap(bvec->bv_page);
        return result;
  }
  
  /* always call with the tx_lock held */
- static int nbd_send_req(struct nbd_device *lo, struct request *req)
+ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
  {
        int result, flags;
        struct nbd_request request;
        memcpy(request.handle, &req, sizeof(req));
  
        dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
-                       lo->disk->disk_name, req,
+                       nbd->disk->disk_name, req,
                        nbdcmd_to_ascii(nbd_cmd(req)),
                        (unsigned long long)blk_rq_pos(req) << 9,
                        blk_rq_bytes(req));
-       result = sock_xmit(lo, 1, &request, sizeof(request),
+       result = sock_xmit(nbd, 1, &request, sizeof(request),
                        (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
        if (result <= 0) {
-               dev_err(disk_to_dev(lo->disk),
+               dev_err(disk_to_dev(nbd->disk),
                        "Send control failed (result %d)\n", result);
                goto error_out;
        }
                        if (!rq_iter_last(req, iter))
                                flags = MSG_MORE;
                        dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
-                                       lo->disk->disk_name, req, bvec->bv_len);
-                       result = sock_send_bvec(lo, bvec, flags);
+                                       nbd->disk->disk_name, req, bvec->bv_len);
+                       result = sock_send_bvec(nbd, bvec, flags);
                        if (result <= 0) {
-                               dev_err(disk_to_dev(lo->disk),
+                               dev_err(disk_to_dev(nbd->disk),
                                        "Send data failed (result %d)\n",
                                        result);
                                goto error_out;
@@@ -282,25 -284,25 +283,25 @@@ error_out
        return -EIO;
  }
  
- static struct request *nbd_find_request(struct nbd_device *lo,
+ static struct request *nbd_find_request(struct nbd_device *nbd,
                                        struct request *xreq)
  {
        struct request *req, *tmp;
        int err;
  
-       err = wait_event_interruptible(lo->active_wq, lo->active_req != xreq);
+       err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq);
        if (unlikely(err))
                goto out;
  
-       spin_lock(&lo->queue_lock);
-       list_for_each_entry_safe(req, tmp, &lo->queue_head, queuelist) {
+       spin_lock(&nbd->queue_lock);
+       list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) {
                if (req != xreq)
                        continue;
                list_del_init(&req->queuelist);
-               spin_unlock(&lo->queue_lock);
+               spin_unlock(&nbd->queue_lock);
                return req;
        }
-       spin_unlock(&lo->queue_lock);
+       spin_unlock(&nbd->queue_lock);
  
        err = -ENOENT;
  
@@@ -308,78 -310,78 +309,78 @@@ out
        return ERR_PTR(err);
  }
  
- static inline int sock_recv_bvec(struct nbd_device *lo, struct bio_vec *bvec)
+ static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
  {
        int result;
        void *kaddr = kmap(bvec->bv_page);
-       result = sock_xmit(lo, 0, kaddr + bvec->bv_offset, bvec->bv_len,
+       result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len,
                        MSG_WAITALL);
        kunmap(bvec->bv_page);
        return result;
  }
  
  /* NULL returned = something went wrong, inform userspace */
- static struct request *nbd_read_stat(struct nbd_device *lo)
+ static struct request *nbd_read_stat(struct nbd_device *nbd)
  {
        int result;
        struct nbd_reply reply;
        struct request *req;
  
        reply.magic = 0;
-       result = sock_xmit(lo, 0, &reply, sizeof(reply), MSG_WAITALL);
+       result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL);
        if (result <= 0) {
-               dev_err(disk_to_dev(lo->disk),
+               dev_err(disk_to_dev(nbd->disk),
                        "Receive control failed (result %d)\n", result);
                goto harderror;
        }
  
        if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
-               dev_err(disk_to_dev(lo->disk), "Wrong magic (0x%lx)\n",
+               dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
                                (unsigned long)ntohl(reply.magic));
                result = -EPROTO;
                goto harderror;
        }
  
-       req = nbd_find_request(lo, *(struct request **)reply.handle);
+       req = nbd_find_request(nbd, *(struct request **)reply.handle);
        if (IS_ERR(req)) {
                result = PTR_ERR(req);
                if (result != -ENOENT)
                        goto harderror;
  
-               dev_err(disk_to_dev(lo->disk), "Unexpected reply (%p)\n",
+               dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n",
                        reply.handle);
                result = -EBADR;
                goto harderror;
        }
  
        if (ntohl(reply.error)) {
-               dev_err(disk_to_dev(lo->disk), "Other side returned error (%d)\n",
+               dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
                        ntohl(reply.error));
                req->errors++;
                return req;
        }
  
        dprintk(DBG_RX, "%s: request %p: got reply\n",
-                       lo->disk->disk_name, req);
+                       nbd->disk->disk_name, req);
        if (nbd_cmd(req) == NBD_CMD_READ) {
                struct req_iterator iter;
                struct bio_vec *bvec;
  
                rq_for_each_segment(bvec, req, iter) {
-                       result = sock_recv_bvec(lo, bvec);
+                       result = sock_recv_bvec(nbd, bvec);
                        if (result <= 0) {
-                               dev_err(disk_to_dev(lo->disk), "Receive data failed (result %d)\n",
+                               dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
                                        result);
                                req->errors++;
                                return req;
                        }
                        dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
-                               lo->disk->disk_name, req, bvec->bv_len);
+                               nbd->disk->disk_name, req, bvec->bv_len);
                }
        }
        return req;
  harderror:
-       lo->harderror = result;
+       nbd->harderror = result;
        return NULL;
  }
  
@@@ -397,48 -399,48 +398,48 @@@ static struct device_attribute pid_att
        .show = pid_show,
  };
  
- static int nbd_do_it(struct nbd_device *lo)
+ static int nbd_do_it(struct nbd_device *nbd)
  {
        struct request *req;
        int ret;
  
-       BUG_ON(lo->magic != LO_MAGIC);
+       BUG_ON(nbd->magic != NBD_MAGIC);
  
-       lo->pid = task_pid_nr(current);
-       ret = device_create_file(disk_to_dev(lo->disk), &pid_attr);
+       nbd->pid = task_pid_nr(current);
+       ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
        if (ret) {
-               dev_err(disk_to_dev(lo->disk), "device_create_file failed!\n");
-               lo->pid = 0;
+               dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
+               nbd->pid = 0;
                return ret;
        }
  
-       while ((req = nbd_read_stat(lo)) != NULL)
+       while ((req = nbd_read_stat(nbd)) != NULL)
                nbd_end_request(req);
  
-       device_remove_file(disk_to_dev(lo->disk), &pid_attr);
-       lo->pid = 0;
+       device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
+       nbd->pid = 0;
        return 0;
  }
  
- static void nbd_clear_que(struct nbd_device *lo)
+ static void nbd_clear_que(struct nbd_device *nbd)
  {
        struct request *req;
  
-       BUG_ON(lo->magic != LO_MAGIC);
+       BUG_ON(nbd->magic != NBD_MAGIC);
  
        /*
-        * Because we have set lo->sock to NULL under the tx_lock, all
+        * Because we have set nbd->sock to NULL under the tx_lock, all
         * modifications to the list must have completed by now.  For
         * the same reason, the active_req must be NULL.
         *
         * As a consequence, we don't need to take the spin lock while
         * purging the list here.
         */
-       BUG_ON(lo->sock);
-       BUG_ON(lo->active_req);
+       BUG_ON(nbd->sock);
+       BUG_ON(nbd->active_req);
  
-       while (!list_empty(&lo->queue_head)) {
-               req = list_entry(lo->queue_head.next, struct request,
+       while (!list_empty(&nbd->queue_head)) {
+               req = list_entry(nbd->queue_head.next, struct request,
                                 queuelist);
                list_del_init(&req->queuelist);
                req->errors++;
  }
  
  
- static void nbd_handle_req(struct nbd_device *lo, struct request *req)
+ static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
  {
        if (req->cmd_type != REQ_TYPE_FS)
                goto error_out;
        nbd_cmd(req) = NBD_CMD_READ;
        if (rq_data_dir(req) == WRITE) {
                nbd_cmd(req) = NBD_CMD_WRITE;
-               if (lo->flags & NBD_READ_ONLY) {
-                       dev_err(disk_to_dev(lo->disk),
+               if (nbd->flags & NBD_READ_ONLY) {
+                       dev_err(disk_to_dev(nbd->disk),
                                "Write on read-only\n");
                        goto error_out;
                }
  
        req->errors = 0;
  
-       mutex_lock(&lo->tx_lock);
-       if (unlikely(!lo->sock)) {
-               mutex_unlock(&lo->tx_lock);
-               dev_err(disk_to_dev(lo->disk),
+       mutex_lock(&nbd->tx_lock);
+       if (unlikely(!nbd->sock)) {
+               mutex_unlock(&nbd->tx_lock);
+               dev_err(disk_to_dev(nbd->disk),
                        "Attempted send on closed socket\n");
                goto error_out;
        }
  
-       lo->active_req = req;
+       nbd->active_req = req;
  
-       if (nbd_send_req(lo, req) != 0) {
-               dev_err(disk_to_dev(lo->disk), "Request send failed\n");
+       if (nbd_send_req(nbd, req) != 0) {
+               dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
                req->errors++;
                nbd_end_request(req);
        } else {
-               spin_lock(&lo->queue_lock);
-               list_add(&req->queuelist, &lo->queue_head);
-               spin_unlock(&lo->queue_lock);
+               spin_lock(&nbd->queue_lock);
+               list_add(&req->queuelist, &nbd->queue_head);
+               spin_unlock(&nbd->queue_lock);
        }
  
-       lo->active_req = NULL;
-       mutex_unlock(&lo->tx_lock);
-       wake_up_all(&lo->active_wq);
+       nbd->active_req = NULL;
+       mutex_unlock(&nbd->tx_lock);
+       wake_up_all(&nbd->active_wq);
  
        return;
  
@@@ -497,28 -499,28 +498,28 @@@ error_out
  
  static int nbd_thread(void *data)
  {
-       struct nbd_device *lo = data;
+       struct nbd_device *nbd = data;
        struct request *req;
  
        set_user_nice(current, -20);
-       while (!kthread_should_stop() || !list_empty(&lo->waiting_queue)) {
+       while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
                /* wait for something to do */
-               wait_event_interruptible(lo->waiting_wq,
+               wait_event_interruptible(nbd->waiting_wq,
                                         kthread_should_stop() ||
-                                        !list_empty(&lo->waiting_queue));
+                                        !list_empty(&nbd->waiting_queue));
  
                /* extract request */
-               if (list_empty(&lo->waiting_queue))
+               if (list_empty(&nbd->waiting_queue))
                        continue;
  
-               spin_lock_irq(&lo->queue_lock);
-               req = list_entry(lo->waiting_queue.next, struct request,
+               spin_lock_irq(&nbd->queue_lock);
+               req = list_entry(nbd->waiting_queue.next, struct request,
                                 queuelist);
                list_del_init(&req->queuelist);
-               spin_unlock_irq(&lo->queue_lock);
+               spin_unlock_irq(&nbd->queue_lock);
  
                /* handle request */
-               nbd_handle_req(lo, req);
+               nbd_handle_req(nbd, req);
        }
        return 0;
  }
  /*
   * We always wait for result of write, for now. It would be nice to make it optional
   * in future
-  * if ((rq_data_dir(req) == WRITE) && (lo->flags & NBD_WRITE_NOCHK))
+  * if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK))
   *   { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
   */
  
@@@ -535,19 -537,19 +536,19 @@@ static void do_nbd_request(struct reque
        struct request *req;
        
        while ((req = blk_fetch_request(q)) != NULL) {
-               struct nbd_device *lo;
+               struct nbd_device *nbd;
  
                spin_unlock_irq(q->queue_lock);
  
                dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
                                req->rq_disk->disk_name, req, req->cmd_type);
  
-               lo = req->rq_disk->private_data;
+               nbd = req->rq_disk->private_data;
  
-               BUG_ON(lo->magic != LO_MAGIC);
+               BUG_ON(nbd->magic != NBD_MAGIC);
  
-               if (unlikely(!lo->sock)) {
-                       dev_err(disk_to_dev(lo->disk),
+               if (unlikely(!nbd->sock)) {
+                       dev_err(disk_to_dev(nbd->disk),
                                "Attempted send on closed socket\n");
                        req->errors++;
                        nbd_end_request(req);
                        continue;
                }
  
-               spin_lock_irq(&lo->queue_lock);
-               list_add_tail(&req->queuelist, &lo->waiting_queue);
-               spin_unlock_irq(&lo->queue_lock);
+               spin_lock_irq(&nbd->queue_lock);
+               list_add_tail(&req->queuelist, &nbd->waiting_queue);
+               spin_unlock_irq(&nbd->queue_lock);
  
-               wake_up(&lo->waiting_wq);
+               wake_up(&nbd->waiting_wq);
  
                spin_lock_irq(q->queue_lock);
        }
  
  /* Must be called with tx_lock held */
  
- static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
+ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
                       unsigned int cmd, unsigned long arg)
  {
        switch (cmd) {
        case NBD_DISCONNECT: {
                struct request sreq;
  
-               dev_info(disk_to_dev(lo->disk), "NBD_DISCONNECT\n");
+               dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
  
                blk_rq_init(NULL, &sreq);
                sreq.cmd_type = REQ_TYPE_SPECIAL;
                nbd_cmd(&sreq) = NBD_CMD_DISC;
-               if (!lo->sock)
+               if (!nbd->sock)
                        return -EINVAL;
-               nbd_send_req(lo, &sreq);
+               nbd_send_req(nbd, &sreq);
                  return 0;
        }
   
        case NBD_CLEAR_SOCK: {
                struct file *file;
  
-               lo->sock = NULL;
-               file = lo->file;
-               lo->file = NULL;
-               nbd_clear_que(lo);
-               BUG_ON(!list_empty(&lo->queue_head));
+               nbd->sock = NULL;
+               file = nbd->file;
+               nbd->file = NULL;
+               nbd_clear_que(nbd);
+               BUG_ON(!list_empty(&nbd->queue_head));
                if (file)
                        fput(file);
                return 0;
  
        case NBD_SET_SOCK: {
                struct file *file;
-               if (lo->file)
+               if (nbd->file)
                        return -EBUSY;
                file = fget(arg);
                if (file) {
                        struct inode *inode = file->f_path.dentry->d_inode;
                        if (S_ISSOCK(inode->i_mode)) {
-                               lo->file = file;
-                               lo->sock = SOCKET_I(inode);
+                               nbd->file = file;
+                               nbd->sock = SOCKET_I(inode);
                                if (max_part > 0)
                                        bdev->bd_invalidated = 1;
                                return 0;
        }
  
        case NBD_SET_BLKSIZE:
-               lo->blksize = arg;
-               lo->bytesize &= ~(lo->blksize-1);
-               bdev->bd_inode->i_size = lo->bytesize;
-               set_blocksize(bdev, lo->blksize);
-               set_capacity(lo->disk, lo->bytesize >> 9);
+               nbd->blksize = arg;
+               nbd->bytesize &= ~(nbd->blksize-1);
+               bdev->bd_inode->i_size = nbd->bytesize;
+               set_blocksize(bdev, nbd->blksize);
+               set_capacity(nbd->disk, nbd->bytesize >> 9);
                return 0;
  
        case NBD_SET_SIZE:
-               lo->bytesize = arg & ~(lo->blksize-1);
-               bdev->bd_inode->i_size = lo->bytesize;
-               set_blocksize(bdev, lo->blksize);
-               set_capacity(lo->disk, lo->bytesize >> 9);
+               nbd->bytesize = arg & ~(nbd->blksize-1);
+               bdev->bd_inode->i_size = nbd->bytesize;
+               set_blocksize(bdev, nbd->blksize);
+               set_capacity(nbd->disk, nbd->bytesize >> 9);
                return 0;
  
        case NBD_SET_TIMEOUT:
-               lo->xmit_timeout = arg * HZ;
+               nbd->xmit_timeout = arg * HZ;
                return 0;
  
        case NBD_SET_SIZE_BLOCKS:
-               lo->bytesize = ((u64) arg) * lo->blksize;
-               bdev->bd_inode->i_size = lo->bytesize;
-               set_blocksize(bdev, lo->blksize);
-               set_capacity(lo->disk, lo->bytesize >> 9);
+               nbd->bytesize = ((u64) arg) * nbd->blksize;
+               bdev->bd_inode->i_size = nbd->bytesize;
+               set_blocksize(bdev, nbd->blksize);
+               set_capacity(nbd->disk, nbd->bytesize >> 9);
                return 0;
  
        case NBD_DO_IT: {
                struct file *file;
                int error;
  
-               if (lo->pid)
+               if (nbd->pid)
                        return -EBUSY;
-               if (!lo->file)
+               if (!nbd->file)
                        return -EINVAL;
  
-               mutex_unlock(&lo->tx_lock);
+               mutex_unlock(&nbd->tx_lock);
  
-               thread = kthread_create(nbd_thread, lo, lo->disk->disk_name);
+               thread = kthread_create(nbd_thread, nbd, nbd->disk->disk_name);
                if (IS_ERR(thread)) {
-                       mutex_lock(&lo->tx_lock);
+                       mutex_lock(&nbd->tx_lock);
                        return PTR_ERR(thread);
                }
                wake_up_process(thread);
-               error = nbd_do_it(lo);
+               error = nbd_do_it(nbd);
                kthread_stop(thread);
  
-               mutex_lock(&lo->tx_lock);
+               mutex_lock(&nbd->tx_lock);
                if (error)
                        return error;
-               sock_shutdown(lo, 0);
-               file = lo->file;
-               lo->file = NULL;
-               nbd_clear_que(lo);
-               dev_warn(disk_to_dev(lo->disk), "queue cleared\n");
+               sock_shutdown(nbd, 0);
+               file = nbd->file;
+               nbd->file = NULL;
+               nbd_clear_que(nbd);
+               dev_warn(disk_to_dev(nbd->disk), "queue cleared\n");
                if (file)
                        fput(file);
-               lo->bytesize = 0;
+               nbd->bytesize = 0;
                bdev->bd_inode->i_size = 0;
-               set_capacity(lo->disk, 0);
+               set_capacity(nbd->disk, 0);
                if (max_part > 0)
                        ioctl_by_bdev(bdev, BLKRRPART, 0);
-               return lo->harderror;
+               return nbd->harderror;
        }
  
        case NBD_CLEAR_QUE:
                 * This is for compatibility only.  The queue is always cleared
                 * by NBD_DO_IT or NBD_CLEAR_SOCK.
                 */
-               BUG_ON(!lo->sock && !list_empty(&lo->queue_head));
+               BUG_ON(!nbd->sock && !list_empty(&nbd->queue_head));
                return 0;
  
        case NBD_PRINT_DEBUG:
-               dev_info(disk_to_dev(lo->disk),
+               dev_info(disk_to_dev(nbd->disk),
                        "next = %p, prev = %p, head = %p\n",
-                       lo->queue_head.next, lo->queue_head.prev,
-                       &lo->queue_head);
+                       nbd->queue_head.next, nbd->queue_head.prev,
+                       &nbd->queue_head);
                return 0;
        }
        return -ENOTTY;
  static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
                     unsigned int cmd, unsigned long arg)
  {
-       struct nbd_device *lo = bdev->bd_disk->private_data;
+       struct nbd_device *nbd = bdev->bd_disk->private_data;
        int error;
  
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
  
-       BUG_ON(lo->magic != LO_MAGIC);
+       BUG_ON(nbd->magic != NBD_MAGIC);
  
        /* Anyone capable of this syscall can do *real bad* things */
        dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
-                       lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
+               nbd->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
  
-       mutex_lock(&lo->tx_lock);
-       error = __nbd_ioctl(bdev, lo, cmd, arg);
-       mutex_unlock(&lo->tx_lock);
+       mutex_lock(&nbd->tx_lock);
+       error = __nbd_ioctl(bdev, nbd, cmd, arg);
+       mutex_unlock(&nbd->tx_lock);
  
        return error;
  }
@@@ -804,7 -806,7 +805,7 @@@ static int __init nbd_init(void
        for (i = 0; i < nbds_max; i++) {
                struct gendisk *disk = nbd_dev[i].disk;
                nbd_dev[i].file = NULL;
-               nbd_dev[i].magic = LO_MAGIC;
+               nbd_dev[i].magic = NBD_MAGIC;
                nbd_dev[i].flags = 0;
                INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
                spin_lock_init(&nbd_dev[i].queue_lock);
@@@ -33,6 -33,7 +33,6 @@@
  
  #include <linux/module.h>
  #include <linux/errno.h>
 -#include <asm/system.h>
  #include <linux/poll.h>
  #include <linux/sched.h>
  #include <linux/seq_file.h>
@@@ -45,6 -46,7 +45,7 @@@
  #include <linux/init.h>
  #include <linux/proc_fs.h>
  #include <linux/rcupdate.h>
+ #include <linux/interrupt.h>
  
  #define PFX "IPMI message handler: "
  
@@@ -52,6 -54,8 +53,8 @@@
  
  static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
  static int ipmi_init_msghandler(void);
+ static void smi_recv_tasklet(unsigned long);
+ static void handle_new_recv_msgs(ipmi_smi_t intf);
  
  static int initialized;
  
@@@ -354,12 -358,15 +357,15 @@@ struct ipmi_smi 
        int curr_seq;
  
        /*
-        * Messages that were delayed for some reason (out of memory,
-        * for instance), will go in here to be processed later in a
-        * periodic timer interrupt.
+        * Messages queued for delivery.  If delivery fails (out of memory
+        * for instance), They will stay in here to be processed later in a
+        * periodic timer interrupt.  The tasklet is for handling received
+        * messages directly from the handler.
         */
        spinlock_t       waiting_msgs_lock;
        struct list_head waiting_msgs;
+       atomic_t         watchdog_pretimeouts_to_deliver;
+       struct tasklet_struct recv_tasklet;
  
        /*
         * The list of command receivers that are registered for commands
@@@ -492,6 -499,8 +498,8 @@@ static void clean_up_interface_data(ipm
        struct cmd_rcvr  *rcvr, *rcvr2;
        struct list_head list;
  
+       tasklet_kill(&intf->recv_tasklet);
        free_smi_msg_list(&intf->waiting_msgs);
        free_recv_msg_list(&intf->waiting_events);
  
@@@ -2785,12 -2794,17 +2793,17 @@@ channel_handler(ipmi_smi_t intf, struc
        return;
  }
  
void ipmi_poll_interface(ipmi_user_t user)
static void ipmi_poll(ipmi_smi_t intf)
  {
-       ipmi_smi_t intf = user->intf;
        if (intf->handlers->poll)
                intf->handlers->poll(intf->send_info);
+       /* In case something came in */
+       handle_new_recv_msgs(intf);
+ }
+ void ipmi_poll_interface(ipmi_user_t user)
+ {
+       ipmi_poll(user->intf);
  }
  EXPORT_SYMBOL(ipmi_poll_interface);
  
@@@ -2859,6 -2873,10 +2872,10 @@@ int ipmi_register_smi(struct ipmi_smi_h
  #endif
        spin_lock_init(&intf->waiting_msgs_lock);
        INIT_LIST_HEAD(&intf->waiting_msgs);
+       tasklet_init(&intf->recv_tasklet,
+                    smi_recv_tasklet,
+                    (unsigned long) intf);
+       atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
        spin_lock_init(&intf->events_lock);
        INIT_LIST_HEAD(&intf->waiting_events);
        intf->waiting_events_count = 0;
@@@ -3621,11 -3639,11 +3638,11 @@@ static int handle_bmc_rsp(ipmi_smi_
  }
  
  /*
-  * Handle a new message.  Return 1 if the message should be requeued,
+  * Handle a received message.  Return 1 if the message should be requeued,
   * 0 if the message should be freed, or -1 if the message should not
   * be freed or requeued.
   */
- static int handle_new_recv_msg(ipmi_smi_t          intf,
+ static int handle_one_recv_msg(ipmi_smi_t          intf,
                               struct ipmi_smi_msg *msg)
  {
        int requeue;
        return requeue;
  }
  
+ /*
+  * If there are messages in the queue or pretimeouts, handle them.
+  */
+ static void handle_new_recv_msgs(ipmi_smi_t intf)
+ {
+       struct ipmi_smi_msg  *smi_msg;
+       unsigned long        flags = 0;
+       int                  rv;
+       int                  run_to_completion = intf->run_to_completion;
+       /* See if any waiting messages need to be processed. */
+       if (!run_to_completion)
+               spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
+       while (!list_empty(&intf->waiting_msgs)) {
+               smi_msg = list_entry(intf->waiting_msgs.next,
+                                    struct ipmi_smi_msg, link);
+               list_del(&smi_msg->link);
+               if (!run_to_completion)
+                       spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+               rv = handle_one_recv_msg(intf, smi_msg);
+               if (!run_to_completion)
+                       spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
+               if (rv == 0) {
+                       /* Message handled */
+                       ipmi_free_smi_msg(smi_msg);
+               } else if (rv < 0) {
+                       /* Fatal error on the message, del but don't free. */
+               } else {
+                       /*
+                        * To preserve message order, quit if we
+                        * can't handle a message.
+                        */
+                       list_add(&smi_msg->link, &intf->waiting_msgs);
+                       break;
+               }
+       }
+       if (!run_to_completion)
+               spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+       /*
+        * If the pretimout count is non-zero, decrement one from it and
+        * deliver pretimeouts to all the users.
+        */
+       if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
+               ipmi_user_t user;
+               rcu_read_lock();
+               list_for_each_entry_rcu(user, &intf->users, link) {
+                       if (user->handler->ipmi_watchdog_pretimeout)
+                               user->handler->ipmi_watchdog_pretimeout(
+                                       user->handler_data);
+               }
+               rcu_read_unlock();
+       }
+ }
+ static void smi_recv_tasklet(unsigned long val)
+ {
+       handle_new_recv_msgs((ipmi_smi_t) val);
+ }
  /* Handle a new message from the lower layer. */
  void ipmi_smi_msg_received(ipmi_smi_t          intf,
                           struct ipmi_smi_msg *msg)
  {
        unsigned long flags = 0; /* keep us warning-free. */
-       int           rv;
        int           run_to_completion;
  
  
        run_to_completion = intf->run_to_completion;
        if (!run_to_completion)
                spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
-       if (!list_empty(&intf->waiting_msgs)) {
-               list_add_tail(&msg->link, &intf->waiting_msgs);
-               if (!run_to_completion)
-                       spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
-               goto out;
-       }
+       list_add_tail(&msg->link, &intf->waiting_msgs);
        if (!run_to_completion)
                spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
  
-       rv = handle_new_recv_msg(intf, msg);
-       if (rv > 0) {
-               /*
-                * Could not handle the message now, just add it to a
-                * list to handle later.
-                */
-               run_to_completion = intf->run_to_completion;
-               if (!run_to_completion)
-                       spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
-               list_add_tail(&msg->link, &intf->waiting_msgs);
-               if (!run_to_completion)
-                       spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
-       } else if (rv == 0) {
-               ipmi_free_smi_msg(msg);
-       }
+       tasklet_schedule(&intf->recv_tasklet);
   out:
        return;
  }
@@@ -3874,16 -3932,8 +3931,8 @@@ EXPORT_SYMBOL(ipmi_smi_msg_received)
  
  void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
  {
-       ipmi_user_t user;
-       rcu_read_lock();
-       list_for_each_entry_rcu(user, &intf->users, link) {
-               if (!user->handler->ipmi_watchdog_pretimeout)
-                       continue;
-               user->handler->ipmi_watchdog_pretimeout(user->handler_data);
-       }
-       rcu_read_unlock();
+       atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
+       tasklet_schedule(&intf->recv_tasklet);
  }
  EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
  
@@@ -3997,28 -4047,12 +4046,12 @@@ static void ipmi_timeout_handler(long t
        ipmi_smi_t           intf;
        struct list_head     timeouts;
        struct ipmi_recv_msg *msg, *msg2;
-       struct ipmi_smi_msg  *smi_msg, *smi_msg2;
        unsigned long        flags;
        int                  i;
  
        rcu_read_lock();
        list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
-               /* See if any waiting messages need to be processed. */
-               spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
-               list_for_each_entry_safe(smi_msg, smi_msg2,
-                                        &intf->waiting_msgs, link) {
-                       if (!handle_new_recv_msg(intf, smi_msg)) {
-                               list_del(&smi_msg->link);
-                               ipmi_free_smi_msg(smi_msg);
-                       } else {
-                               /*
-                                * To preserve message order, quit if we
-                                * can't handle a message.
-                                */
-                               break;
-                       }
-               }
-               spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+               tasklet_schedule(&intf->recv_tasklet);
  
                /*
                 * Go through the seq table and find any messages that
@@@ -4172,12 -4206,48 +4205,48 @@@ EXPORT_SYMBOL(ipmi_free_recv_msg)
  
  #ifdef CONFIG_IPMI_PANIC_EVENT
  
+ static atomic_t panic_done_count = ATOMIC_INIT(0);
  static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
  {
+       atomic_dec(&panic_done_count);
  }
  
  static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
  {
+       atomic_dec(&panic_done_count);
+ }
+ /*
+  * Inside a panic, send a message and wait for a response.
+  */
+ static void ipmi_panic_request_and_wait(ipmi_smi_t           intf,
+                                       struct ipmi_addr     *addr,
+                                       struct kernel_ipmi_msg *msg)
+ {
+       struct ipmi_smi_msg  smi_msg;
+       struct ipmi_recv_msg recv_msg;
+       int rv;
+       smi_msg.done = dummy_smi_done_handler;
+       recv_msg.done = dummy_recv_done_handler;
+       atomic_add(2, &panic_done_count);
+       rv = i_ipmi_request(NULL,
+                           intf,
+                           addr,
+                           0,
+                           msg,
+                           intf,
+                           &smi_msg,
+                           &recv_msg,
+                           0,
+                           intf->channels[0].address,
+                           intf->channels[0].lun,
+                           0, 1); /* Don't retry, and don't wait. */
+       if (rv)
+               atomic_sub(2, &panic_done_count);
+       while (atomic_read(&panic_done_count) != 0)
+               ipmi_poll(intf);
  }
  
  #ifdef CONFIG_IPMI_PANIC_STRING
@@@ -4216,8 -4286,6 +4285,6 @@@ static void send_panic_events(char *str
        unsigned char                     data[16];
        struct ipmi_system_interface_addr *si;
        struct ipmi_addr                  addr;
-       struct ipmi_smi_msg               smi_msg;
-       struct ipmi_recv_msg              recv_msg;
  
        si = (struct ipmi_system_interface_addr *) &addr;
        si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
                data[7] = str[2];
        }
  
-       smi_msg.done = dummy_smi_done_handler;
-       recv_msg.done = dummy_recv_done_handler;
        /* For every registered interface, send the event. */
        list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
                if (!intf->handlers)
                intf->run_to_completion = 1;
                /* Send the event announcing the panic. */
                intf->handlers->set_run_to_completion(intf->send_info, 1);
-               i_ipmi_request(NULL,
-                              intf,
-                              &addr,
-                              0,
-                              &msg,
-                              intf,
-                              &smi_msg,
-                              &recv_msg,
-                              0,
-                              intf->channels[0].address,
-                              intf->channels[0].lun,
-                              0, 1); /* Don't retry, and don't wait. */
+               ipmi_panic_request_and_wait(intf, &addr, &msg);
        }
  
  #ifdef CONFIG_IPMI_PANIC_STRING
                msg.data = NULL;
                msg.data_len = 0;
                intf->null_user_handler = device_id_fetcher;
-               i_ipmi_request(NULL,
-                              intf,
-                              &addr,
-                              0,
-                              &msg,
-                              intf,
-                              &smi_msg,
-                              &recv_msg,
-                              0,
-                              intf->channels[0].address,
-                              intf->channels[0].lun,
-                              0, 1); /* Don't retry, and don't wait. */
+               ipmi_panic_request_and_wait(intf, &addr, &msg);
  
                if (intf->local_event_generator) {
                        /* Request the event receiver from the local MC. */
                        msg.data = NULL;
                        msg.data_len = 0;
                        intf->null_user_handler = event_receiver_fetcher;
-                       i_ipmi_request(NULL,
-                                      intf,
-                                      &addr,
-                                      0,
-                                      &msg,
-                                      intf,
-                                      &smi_msg,
-                                      &recv_msg,
-                                      0,
-                                      intf->channels[0].address,
-                                      intf->channels[0].lun,
-                                      0, 1); /* no retry, and no wait. */
+                       ipmi_panic_request_and_wait(intf, &addr, &msg);
                }
                intf->null_user_handler = NULL;
  
                        strncpy(data+5, p, 11);
                        p += size;
  
-                       i_ipmi_request(NULL,
-                                      intf,
-                                      &addr,
-                                      0,
-                                      &msg,
-                                      intf,
-                                      &smi_msg,
-                                      &recv_msg,
-                                      0,
-                                      intf->channels[0].address,
-                                      intf->channels[0].lun,
-                                      0, 1); /* no retry, and no wait. */
+                       ipmi_panic_request_and_wait(intf, &addr, &msg);
                }
        }
  #endif /* CONFIG_IPMI_PANIC_STRING */
@@@ -41,6 -41,7 +41,6 @@@
  
  #include <linux/module.h>
  #include <linux/moduleparam.h>
 -#include <asm/system.h>
  #include <linux/sched.h>
  #include <linux/seq_file.h>
  #include <linux/timer.h>
@@@ -170,7 -171,6 +170,6 @@@ struct smi_info 
        struct si_sm_handlers  *handlers;
        enum si_type           si_type;
        spinlock_t             si_lock;
-       spinlock_t             msg_lock;
        struct list_head       xmit_msgs;
        struct list_head       hp_xmit_msgs;
        struct ipmi_smi_msg    *curr_msg;
@@@ -319,16 -319,8 +318,8 @@@ static int register_xaction_notifier(st
  static void deliver_recv_msg(struct smi_info *smi_info,
                             struct ipmi_smi_msg *msg)
  {
-       /* Deliver the message to the upper layer with the lock
-          released. */
-       if (smi_info->run_to_completion) {
-               ipmi_smi_msg_received(smi_info->intf, msg);
-       } else {
-               spin_unlock(&(smi_info->si_lock));
-               ipmi_smi_msg_received(smi_info->intf, msg);
-               spin_lock(&(smi_info->si_lock));
-       }
+       /* Deliver the message to the upper layer. */
+       ipmi_smi_msg_received(smi_info->intf, msg);
  }
  
  static void return_hosed_msg(struct smi_info *smi_info, int cCode)
@@@ -357,13 -349,6 +348,6 @@@ static enum si_sm_result start_next_msg
        struct timeval t;
  #endif
  
-       /*
-        * No need to save flags, we aleady have interrupts off and we
-        * already hold the SMI lock.
-        */
-       if (!smi_info->run_to_completion)
-               spin_lock(&(smi_info->msg_lock));
        /* Pick the high priority queue first. */
        if (!list_empty(&(smi_info->hp_xmit_msgs))) {
                entry = smi_info->hp_xmit_msgs.next;
                rv = SI_SM_CALL_WITHOUT_DELAY;
        }
   out:
-       if (!smi_info->run_to_completion)
-               spin_unlock(&(smi_info->msg_lock));
        return rv;
  }
  
@@@ -480,9 -462,7 +461,7 @@@ static void handle_flags(struct smi_inf
  
                start_clear_flags(smi_info);
                smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
-               spin_unlock(&(smi_info->si_lock));
                ipmi_smi_watchdog_pretimeout(smi_info->intf);
-               spin_lock(&(smi_info->si_lock));
        } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
                /* Messages available. */
                smi_info->curr_msg = ipmi_alloc_smi_msg();
@@@ -888,19 -868,6 +867,6 @@@ static void sender(void                
        printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
  #endif
  
-       /*
-        * last_timeout_jiffies is updated here to avoid
-        * smi_timeout() handler passing very large time_diff
-        * value to smi_event_handler() that causes
-        * the send command to abort.
-        */
-       smi_info->last_timeout_jiffies = jiffies;
-       mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
-       if (smi_info->thread)
-               wake_up_process(smi_info->thread);
        if (smi_info->run_to_completion) {
                /*
                 * If we are running to completion, then throw it in
                return;
        }
  
-       spin_lock_irqsave(&smi_info->msg_lock, flags);
+       spin_lock_irqsave(&smi_info->si_lock, flags);
        if (priority > 0)
                list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
        else
                list_add_tail(&msg->link, &smi_info->xmit_msgs);
-       spin_unlock_irqrestore(&smi_info->msg_lock, flags);
  
-       spin_lock_irqsave(&smi_info->si_lock, flags);
-       if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL)
+       if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
+               /*
+                * last_timeout_jiffies is updated here to avoid
+                * smi_timeout() handler passing very large time_diff
+                * value to smi_event_handler() that causes
+                * the send command to abort.
+                */
+               smi_info->last_timeout_jiffies = jiffies;
+               mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
+               if (smi_info->thread)
+                       wake_up_process(smi_info->thread);
                start_next_msg(smi_info);
+               smi_event_handler(smi_info, 0);
+       }
        spin_unlock_irqrestore(&smi_info->si_lock, flags);
  }
  
@@@ -1033,16 -1013,19 +1012,19 @@@ static int ipmi_thread(void *data
  static void poll(void *send_info)
  {
        struct smi_info *smi_info = send_info;
-       unsigned long flags;
+       unsigned long flags = 0;
+       int run_to_completion = smi_info->run_to_completion;
  
        /*
         * Make sure there is some delay in the poll loop so we can
         * drive time forward and timeout things.
         */
        udelay(10);
-       spin_lock_irqsave(&smi_info->si_lock, flags);
+       if (!run_to_completion)
+               spin_lock_irqsave(&smi_info->si_lock, flags);
        smi_event_handler(smi_info, 10);
-       spin_unlock_irqrestore(&smi_info->si_lock, flags);
+       if (!run_to_completion)
+               spin_unlock_irqrestore(&smi_info->si_lock, flags);
  }
  
  static void request_events(void *send_info)
@@@ -1679,10 -1662,8 +1661,8 @@@ static struct smi_info *smi_info_alloc(
  {
        struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
  
-       if (info) {
+       if (info)
                spin_lock_init(&info->si_lock);
-               spin_lock_init(&info->msg_lock);
-       }
        return info;
  }
  
  #endif
  
  static DEFINE_MUTEX(ipmi_watchdog_mutex);
 -static int nowayout = WATCHDOG_NOWAYOUT;
 +static bool nowayout = WATCHDOG_NOWAYOUT;
  
  static ipmi_user_t watchdog_user;
  static int watchdog_ifnum;
@@@ -320,7 -320,7 +320,7 @@@ module_param(start_now, int, 0444)
  MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as"
                 "soon as the driver is loaded.");
  
 -module_param(nowayout, int, 0644);
 +module_param(nowayout, bool, 0644);
  MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
                 "(default=CONFIG_WATCHDOG_NOWAYOUT)");
  
@@@ -520,6 -520,7 +520,7 @@@ static void panic_halt_ipmi_heartbeat(v
        msg.cmd = IPMI_WDOG_RESET_TIMER;
        msg.data = NULL;
        msg.data_len = 0;
+       atomic_add(2, &panic_done_count);
        rv = ipmi_request_supply_msgs(watchdog_user,
                                      (struct ipmi_addr *) &addr,
                                      0,
                                      &panic_halt_heartbeat_smi_msg,
                                      &panic_halt_heartbeat_recv_msg,
                                      1);
-       if (!rv)
-               atomic_add(2, &panic_done_count);
+       if (rv)
+               atomic_sub(2, &panic_done_count);
  }
  
  static struct ipmi_smi_msg panic_halt_smi_msg = {
@@@ -553,16 -554,18 +554,18 @@@ static void panic_halt_ipmi_set_timeout
        /* Wait for the messages to be free. */
        while (atomic_read(&panic_done_count) != 0)
                ipmi_poll_interface(watchdog_user);
+       atomic_add(2, &panic_done_count);
        rv = i_ipmi_set_timeout(&panic_halt_smi_msg,
                                &panic_halt_recv_msg,
                                &send_heartbeat_now);
-       if (!rv) {
-               atomic_add(2, &panic_done_count);
-               if (send_heartbeat_now)
-                       panic_halt_ipmi_heartbeat();
-       } else
+       if (rv) {
+               atomic_sub(2, &panic_done_count);
                printk(KERN_WARNING PFX
                       "Unable to extend the watchdog timeout.");
+       } else {
+               if (send_heartbeat_now)
+                       panic_halt_ipmi_heartbeat();
+       }
        while (atomic_read(&panic_done_count) != 0)
                ipmi_poll_interface(watchdog_user);
  }
@@@ -1164,7 -1167,7 +1167,7 @@@ static int wdog_reboot_handler(struct n
                if (code == SYS_POWER_OFF || code == SYS_HALT) {
                        /* Disable the WDT if we are shutting down. */
                        ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
-                       panic_halt_ipmi_set_timeout();
+                       ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
                } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
                        /* Set a long timer to let the reboot happens, but
                           reboot if it hangs, but only if the watchdog
                        timeout = 120;
                        pretimeout = 0;
                        ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
-                       panic_halt_ipmi_set_timeout();
+                       ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
                }
        }
        return NOTIFY_OK;
diff --combined include/linux/mm.h
@@@ -954,7 -954,7 +954,7 @@@ extern void truncate_pagecache(struct i
  extern void truncate_setsize(struct inode *inode, loff_t newsize);
  extern int vmtruncate(struct inode *inode, loff_t offset);
  extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end);
+ void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
  int truncate_inode_page(struct address_space *mapping, struct page *page);
  int generic_error_remove_page(struct address_space *mapping, struct page *page);
  
@@@ -1258,8 -1258,6 +1258,8 @@@ static inline void pgtable_page_dtor(st
  extern void free_area_init(unsigned long * zones_size);
  extern void free_area_init_node(int nid, unsigned long * zones_size,
                unsigned long zone_start_pfn, unsigned long *zholes_size);
 +extern void free_initmem(void);
 +
  #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  /*
   * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
diff --combined kernel/kexec.c
@@@ -37,6 -37,7 +37,6 @@@
  #include <asm/page.h>
  #include <asm/uaccess.h>
  #include <asm/io.h>
 -#include <asm/system.h>
  #include <asm/sections.h>
  
  /* Per cpu memory for storing cpu states in case of system crash. */
@@@ -1358,6 -1359,10 +1358,10 @@@ static int __init parse_crashkernel_sim
  
        if (*cur == '@')
                *crash_base = memparse(cur+1, &cur);
+       else if (*cur != ' ' && *cur != '\0') {
+               pr_warning("crashkernel: unrecognized char\n");
+               return -EINVAL;
+       }
  
        return 0;
  }
@@@ -1461,7 -1466,9 +1465,9 @@@ static int __init crash_save_vmcoreinfo
  
        VMCOREINFO_SYMBOL(init_uts_ns);
        VMCOREINFO_SYMBOL(node_online_map);
+ #ifdef CONFIG_MMU
        VMCOREINFO_SYMBOL(swapper_pg_dir);
+ #endif
        VMCOREINFO_SYMBOL(_stext);
        VMCOREINFO_SYMBOL(vmlist);
  
diff --combined kernel/sysctl.c
@@@ -23,6 -23,7 +23,7 @@@
  #include <linux/swap.h>
  #include <linux/slab.h>
  #include <linux/sysctl.h>
+ #include <linux/bitmap.h>
  #include <linux/signal.h>
  #include <linux/printk.h>
  #include <linux/proc_fs.h>
@@@ -68,9 -69,6 +69,9 @@@
  #include <asm/stacktrace.h>
  #include <asm/io.h>
  #endif
 +#ifdef CONFIG_SPARC
 +#include <asm/setup.h>
 +#endif
  #ifdef CONFIG_BSD_PROCESS_ACCT
  #include <linux/acct.h>
  #endif
@@@ -145,6 -143,7 +146,6 @@@ static const int cap_last_cap = CAP_LAS
  #include <linux/inotify.h>
  #endif
  #ifdef CONFIG_SPARC
 -#include <asm/system.h>
  #endif
  
  #ifdef CONFIG_SPARC64
@@@ -2395,9 -2394,7 +2396,7 @@@ int proc_do_large_bitmap(struct ctl_tab
                                }
                        }
  
-                       while (val_a <= val_b)
-                               set_bit(val_a++, tmp_bitmap);
+                       bitmap_set(tmp_bitmap, val_a, val_b - val_a + 1);
                        first = 0;
                        proc_skip_char(&kbuf, &left, '\n');
                }
                        if (*ppos)
                                bitmap_or(bitmap, bitmap, tmp_bitmap, bitmap_len);
                        else
-                               memcpy(bitmap, tmp_bitmap,
-                                       BITS_TO_LONGS(bitmap_len) * sizeof(unsigned long));
+                               bitmap_copy(bitmap, tmp_bitmap, bitmap_len);
                }
                kfree(tmp_bitmap);
                *lenp -= left;
diff --combined mm/slub.c
+++ b/mm/slub.c
@@@ -29,7 -29,6 +29,7 @@@
  #include <linux/math64.h>
  #include <linux/fault-inject.h>
  #include <linux/stacktrace.h>
 +#include <linux/prefetch.h>
  
  #include <trace/events/kmem.h>
  
@@@ -270,11 -269,6 +270,11 @@@ static inline void *get_freepointer(str
        return *(void **)(object + s->offset);
  }
  
 +static void prefetch_freepointer(const struct kmem_cache *s, void *object)
 +{
 +      prefetch(object + s->offset);
 +}
 +
  static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
  {
        void *p;
@@@ -1566,7 -1560,6 +1566,7 @@@ static void *get_partial_node(struct km
                } else {
                        page->freelist = t;
                        available = put_cpu_partial(s, page, 0);
 +                      stat(s, CPU_PARTIAL_NODE);
                }
                if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
                        break;
@@@ -1990,7 -1983,6 +1990,7 @@@ int put_cpu_partial(struct kmem_cache *
                                local_irq_restore(flags);
                                pobjects = 0;
                                pages = 0;
 +                              stat(s, CPU_PARTIAL_DRAIN);
                        }
                }
  
                page->next = oldpage;
  
        } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
 -      stat(s, CPU_PARTIAL_FREE);
        return pobjects;
  }
  
@@@ -2035,9 -2028,17 +2035,17 @@@ static void flush_cpu_slab(void *d
        __flush_cpu_slab(s, smp_processor_id());
  }
  
+ static bool has_cpu_slab(int cpu, void *info)
+ {
+       struct kmem_cache *s = info;
+       struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
+       return !!(c->page);
+ }
  static void flush_all(struct kmem_cache *s)
  {
-       on_each_cpu(flush_cpu_slab, s, 1);
+       on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
  }
  
  /*
@@@ -2326,8 -2327,6 +2334,8 @@@ redo
                object = __slab_alloc(s, gfpflags, node, addr, c);
  
        else {
 +              void *next_object = get_freepointer_safe(s, object);
 +
                /*
                 * The cmpxchg will only match if there was no additional
                 * operation and if we are on the right processor.
                if (unlikely(!this_cpu_cmpxchg_double(
                                s->cpu_slab->freelist, s->cpu_slab->tid,
                                object, tid,
 -                              get_freepointer_safe(s, object), next_tid(tid)))) {
 +                              next_object, next_tid(tid)))) {
  
                        note_cmpxchg_failure("slab_alloc", s, tid);
                        goto redo;
                }
 +              prefetch_freepointer(s, next_object);
                stat(s, ALLOC_FASTPATH);
        }
  
@@@ -2485,10 -2483,9 +2493,10 @@@ static void __slab_free(struct kmem_cac
                 * If we just froze the page then put it onto the
                 * per cpu partial list.
                 */
 -              if (new.frozen && !was_frozen)
 +              if (new.frozen && !was_frozen) {
                        put_cpu_partial(s, page, 1);
 -
 +                      stat(s, CPU_PARTIAL_FREE);
 +              }
                /*
                 * The list lock was not taken therefore no list
                 * activity can be necessary.
@@@ -3950,14 -3947,13 +3958,14 @@@ struct kmem_cache *kmem_cache_create(co
                if (kmem_cache_open(s, n,
                                size, align, flags, ctor)) {
                        list_add(&s->list, &slab_caches);
 +                      up_write(&slub_lock);
                        if (sysfs_slab_add(s)) {
 +                              down_write(&slub_lock);
                                list_del(&s->list);
                                kfree(n);
                                kfree(s);
                                goto err;
                        }
 -                      up_write(&slub_lock);
                        return s;
                }
                kfree(n);
@@@ -5081,8 -5077,6 +5089,8 @@@ STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpx
  STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
  STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
  STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
 +STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
 +STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
  #endif
  
  static struct attribute *slab_attrs[] = {
        &cmpxchg_double_cpu_fail_attr.attr,
        &cpu_partial_alloc_attr.attr,
        &cpu_partial_free_attr.attr,
 +      &cpu_partial_node_attr.attr,
 +      &cpu_partial_drain_attr.attr,
  #endif
  #ifdef CONFIG_FAILSLAB
        &failslab_attr.attr,